diff --git a/go.mod b/go.mod index b34937604c..e314ccd71c 100644 --- a/go.mod +++ b/go.mod @@ -592,7 +592,7 @@ replace ( // TODO: Update when https://github.com/grafana-operator/grafana-operator/pull/1301/files is merged github.com/grafana-operator/grafana-operator/v5 => github.com/jaehnri/grafana-operator/v5 v5.0.0-20231107224428-8e66ce068452 // https://github.com/hashicorp/go-plugin/pull/251 - github.com/hashicorp/go-plugin => github.com/alexandreLamarre/go-plugin v0.1.1-0.20230417174342-eab684801be5 + github.com/hashicorp/go-plugin => github.com/alexandreLamarre/go-plugin v0.0.0-20231106165427-ede0fcce2acc github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza => github.com/rancher-sandbox/otel-collector-contrib/pkg/stanza v0.0.0-20231108205431-9f8e6699a9d4 github.com/opensearch-project/opensearch-go/v2 => github.com/dbason/opensearch-go/v2 v2.0.0-20221202021211-6aec8f80bc41 github.com/rancher/charts-build-scripts => github.com/dbason/charts-build-scripts v0.3.4-0.20220429024555-807c076e8116 diff --git a/go.sum b/go.sum index d7b6450e0f..44fb6addcc 100644 --- a/go.sum +++ b/go.sum @@ -545,8 +545,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alexandreLamarre/go-plugin v0.1.1-0.20230417174342-eab684801be5 h1:VByWr0MtaW/iS+MncS+5E8lCi2Hgc3JHVnMD6bCCwEk= -github.com/alexandreLamarre/go-plugin v0.1.1-0.20230417174342-eab684801be5/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= +github.com/alexandreLamarre/go-plugin v0.0.0-20231106165427-ede0fcce2acc h1:b7BMoL6UF6kRTW3apg0XJmIG039TExK9arOKSOIY7hA= +github.com/alexandreLamarre/go-plugin v0.0.0-20231106165427-ede0fcce2acc/go.mod h1:1BphuLk8r4UASxJBCggGswD3Wn84QLOBANVCAdNoeoY= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= diff --git a/internal/alerting/syncer/syncer_server.go b/internal/alerting/syncer/syncer_server.go index b7a9934f00..020d39febd 100644 --- a/internal/alerting/syncer/syncer_server.go +++ b/internal/alerting/syncer/syncer_server.go @@ -64,7 +64,7 @@ func NewAlertingSyncerV1( init.Store(false) server := &AlertManagerSyncerV1{ serverConfig: serverConfig, - lg: logger.NewPluginLogger().WithGroup("alerting-syncer"), + lg: logger.New().WithGroup("alerting-syncer"), tlsConfig: tlsConfig, } go func() { diff --git a/pkg/agent/v1/agent.go b/pkg/agent/v1/agent.go index b94afe22b4..849975add8 100644 --- a/pkg/agent/v1/agent.go +++ b/pkg/agent/v1/agent.go @@ -60,6 +60,7 @@ const ( type Agent struct { controlv1.UnimplementedHealthServer + controlv1.UnimplementedLogServer AgentOptions config v1beta1.AgentConfigSpec router *gin.Engine diff --git a/pkg/agent/v2/agent.go b/pkg/agent/v2/agent.go index c3f204206b..d43ac32f68 100644 --- a/pkg/agent/v2/agent.go +++ b/pkg/agent/v2/agent.go @@ -24,6 +24,7 @@ import ( "github.com/rancher/opni/pkg/ident/identserver" "github.com/rancher/opni/pkg/keyring" "github.com/rancher/opni/pkg/logger" + "github.com/rancher/opni/pkg/logger/remotelogs" "github.com/rancher/opni/pkg/machinery" "github.com/rancher/opni/pkg/plugins" "github.com/rancher/opni/pkg/plugins/apis/apiextensions" @@ -118,11 +119,28 @@ func WithRebootstrap(rebootstrap bool) AgentOption { func New(ctx context.Context, conf *v1beta1.AgentConfig, opts ...AgentOption) (*Agent, error) { options := AgentOptions{} options.apply(opts...) + + initCtx, initCancel := context.WithTimeout(ctx, 10*time.Second) + defer initCancel() + + ipBuilder, err := ident.GetProviderBuilder(conf.Spec.IdentityProvider) + if err != nil { + return nil, fmt.Errorf("configuration error: %w", err) + } + ip := ipBuilder() + id, err := ip.UniqueIdentifier(initCtx) + if err != nil { + return nil, fmt.Errorf("error getting unique identifier: %w", err) + } + level := logger.DefaultLogLevel.Level() if conf.Spec.LogLevel != "" { level = logger.ParseLevel(conf.Spec.LogLevel) } - lg := logger.New(logger.WithLogLevel(level)).WithGroup("agent") + + ctx = logger.WithAgentId(ctx, id) + ctx = logger.WithMode(ctx, meta.ModeAgent) + lg := logger.New(logger.WithLogLevel(level), logger.WithFileWriter(logger.WriteOnlyFile(logger.GetLogFileName(id)))).WithGroup("agent") lg.Debug(fmt.Sprintf("using log level: %s", level.String())) var pl *plugins.PluginLoader @@ -198,19 +216,6 @@ func New(ctx context.Context, conf *v1beta1.AgentConfig, opts ...AgentOption) (* return nil, fmt.Errorf("agent upgrade configuration error: %w", err) } - initCtx, initCancel := context.WithTimeout(ctx, 10*time.Second) - defer initCancel() - - ipBuilder, err := ident.GetProviderBuilder(conf.Spec.IdentityProvider) - if err != nil { - return nil, fmt.Errorf("configuration error: %w", err) - } - ip := ipBuilder() - id, err := ip.UniqueIdentifier(initCtx) - if err != nil { - return nil, fmt.Errorf("error getting unique identifier: %w", err) - } - sb, err := machinery.ConfigureStorageBackend(initCtx, &conf.Spec.Storage) if err != nil { return nil, fmt.Errorf("error configuring keyring store broker: %w", err) @@ -312,6 +317,14 @@ func New(ctx context.Context, conf *v1beta1.AgentConfig, opts ...AgentOption) (* gatewayClient.RegisterSplicedStream(cc, md.Filename()) })) + ls := remotelogs.NewLogServer() + controlv1.RegisterLogServer(gatewayClient, ls) + + pl.Hook(hooks.OnLoadMC(func(lc controlv1.LogClient, m meta.PluginMeta, cc *grpc.ClientConn) { + client := controlv1.NewLogClient(cc) + ls.AddClient(m.Filename(), client) + })) + return &Agent{ AgentOptions: options, config: conf.Spec, @@ -395,6 +408,7 @@ func (a *Agent) ListenAndServe(ctx context.Context) error { close(done) })) + ctx = logger.WithAgentId(ctx, a.tenantID) a.pluginLoader.LoadPlugins(ctx, a.config.PluginDir, plugins.AgentScheme, plugins.WithManifest(pluginManifest), ) @@ -493,6 +507,7 @@ ROUTES: func (a *Agent) runGatewayClient(ctx context.Context) error { lg := a.logger + defer logger.CloseLogStreaming(a.tenantID) isRetry := false for ctx.Err() == nil { if isRetry { diff --git a/pkg/alerting/drivers/backend/api.go b/pkg/alerting/drivers/backend/api.go index 0ed83c86c6..45b2bac53d 100644 --- a/pkg/alerting/drivers/backend/api.go +++ b/pkg/alerting/drivers/backend/api.go @@ -58,7 +58,7 @@ import ( // }, // body: nil, // values: nil, -// logger: logger.NewPluginLogger().Named("alerting"), +// logger: logger.New().Named("alerting"), // } // } diff --git a/pkg/alerting/extensions/embedded_server.go b/pkg/alerting/extensions/embedded_server.go index 39c4e53b21..c6914135f3 100644 --- a/pkg/alerting/extensions/embedded_server.go +++ b/pkg/alerting/extensions/embedded_server.go @@ -11,12 +11,13 @@ import ( "errors" "net/http" + "log/slog" + "github.com/rancher/opni/pkg/alerting/cache" "github.com/rancher/opni/pkg/alerting/extensions/destination" "github.com/rancher/opni/pkg/alerting/shared" alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" "github.com/rancher/opni/pkg/logger" - "log/slog" // add profiles _ "net/http/pprof" @@ -64,7 +65,7 @@ func StartOpniEmbeddedServer( opniAddr string, sendK8s bool, ) *http.Server { - lg := logger.NewPluginLogger().WithGroup("opni.alerting") + lg := logger.New().WithGroup("opni.alerting") es := NewEmbeddedServer(lg, 125, sendK8s) mux := http.NewServeMux() diff --git a/pkg/alerting/storage/stores.go b/pkg/alerting/storage/stores.go index d04cdf8837..4acc6556cd 100644 --- a/pkg/alerting/storage/stores.go +++ b/pkg/alerting/storage/stores.go @@ -34,7 +34,7 @@ func NewDefaultAlertingBroker(js nats.JetStreamContext, opts ...storage_opts.Cli options := &storage_opts.ClientSetOptions{} options.Apply(opts...) if options.Logger == nil { - options.Logger = logger.NewPluginLogger().WithGroup("alerting-storage-client-set") + options.Logger = logger.New().WithGroup("alerting-storage-client-set") } if options.TrackerTtl == 0 { options.TrackerTtl = defaultTrackerTTLV1 diff --git a/pkg/apis/control/v1/remote.pb.go b/pkg/apis/control/v1/remote.pb.go index c7c96ae9cb..12959abf98 100644 --- a/pkg/apis/control/v1/remote.pb.go +++ b/pkg/apis/control/v1/remote.pb.go @@ -7,11 +7,11 @@ package v1 import ( - _ "github.com/kralicky/totem" v1 "github.com/rancher/opni/pkg/apis/core/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -478,6 +478,321 @@ func (x *PatchList) GetItems() []*PatchSpec { return nil } +type LogStreamRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Since *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=since,proto3" json:"since,omitempty"` + Until *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=until,proto3" json:"until,omitempty"` + Filters *LogStreamFilters `protobuf:"bytes,3,opt,name=filters,proto3" json:"filters,omitempty"` + Follow bool `protobuf:"varint,4,opt,name=follow,proto3" json:"follow,omitempty"` +} + +func (x *LogStreamRequest) Reset() { + *x = LogStreamRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogStreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogStreamRequest) ProtoMessage() {} + +func (x *LogStreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogStreamRequest.ProtoReflect.Descriptor instead. +func (*LogStreamRequest) Descriptor() ([]byte, []int) { + return file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDescGZIP(), []int{7} +} + +func (x *LogStreamRequest) GetSince() *timestamppb.Timestamp { + if x != nil { + return x.Since + } + return nil +} + +func (x *LogStreamRequest) GetUntil() *timestamppb.Timestamp { + if x != nil { + return x.Until + } + return nil +} + +func (x *LogStreamRequest) GetFilters() *LogStreamFilters { + if x != nil { + return x.Filters + } + return nil +} + +func (x *LogStreamRequest) GetFollow() bool { + if x != nil { + return x.Follow + } + return false +} + +type LogStreamFilters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePattern []string `protobuf:"bytes,1,rep,name=namePattern,proto3" json:"namePattern,omitempty"` + Level *int32 `protobuf:"varint,2,opt,name=level,proto3,oneof" json:"level,omitempty"` +} + +func (x *LogStreamFilters) Reset() { + *x = LogStreamFilters{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogStreamFilters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogStreamFilters) ProtoMessage() {} + +func (x *LogStreamFilters) ProtoReflect() protoreflect.Message { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogStreamFilters.ProtoReflect.Descriptor instead. +func (*LogStreamFilters) Descriptor() ([]byte, []int) { + return file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDescGZIP(), []int{8} +} + +func (x *LogStreamFilters) GetNamePattern() []string { + if x != nil { + return x.NamePattern + } + return nil +} + +func (x *LogStreamFilters) GetLevel() int32 { + if x != nil && x.Level != nil { + return *x.Level + } + return 0 +} + +type StructuredLogRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Source string `protobuf:"bytes,4,opt,name=source,proto3" json:"source,omitempty"` + Level string `protobuf:"bytes,5,opt,name=level,proto3" json:"level,omitempty"` + Attributes []*Attr `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (x *StructuredLogRecord) Reset() { + *x = StructuredLogRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructuredLogRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructuredLogRecord) ProtoMessage() {} + +func (x *StructuredLogRecord) ProtoReflect() protoreflect.Message { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructuredLogRecord.ProtoReflect.Descriptor instead. +func (*StructuredLogRecord) Descriptor() ([]byte, []int) { + return file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDescGZIP(), []int{9} +} + +func (x *StructuredLogRecord) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +func (x *StructuredLogRecord) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *StructuredLogRecord) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *StructuredLogRecord) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *StructuredLogRecord) GetLevel() string { + if x != nil { + return x.Level + } + return "" +} + +func (x *StructuredLogRecord) GetAttributes() []*Attr { + if x != nil { + return x.Attributes + } + return nil +} + +type StructuredLogRecords struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items []*StructuredLogRecord `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (x *StructuredLogRecords) Reset() { + *x = StructuredLogRecords{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructuredLogRecords) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructuredLogRecords) ProtoMessage() {} + +func (x *StructuredLogRecords) ProtoReflect() protoreflect.Message { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructuredLogRecords.ProtoReflect.Descriptor instead. +func (*StructuredLogRecords) Descriptor() ([]byte, []int) { + return file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDescGZIP(), []int{10} +} + +func (x *StructuredLogRecords) GetItems() []*StructuredLogRecord { + if x != nil { + return x.Items + } + return nil +} + +type Attr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Attr) Reset() { + *x = Attr{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Attr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Attr) ProtoMessage() {} + +func (x *Attr) ProtoReflect() protoreflect.Message { + mi := &file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Attr.ProtoReflect.Descriptor instead. +func (*Attr) Descriptor() ([]byte, []int) { + return file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDescGZIP(), []int{11} +} + +func (x *Attr) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Attr) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + var File_github_com_rancher_opni_pkg_apis_control_v1_remote_proto protoreflect.FileDescriptor var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDesc = []byte{ @@ -485,75 +800,119 @@ var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDesc = []by 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x1a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x6b, 0x72, 0x61, 0x6c, 0x69, 0x63, 0x6b, 0x79, 0x2f, 0x74, 0x6f, 0x74, 0x65, 0x6d, 0x2f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x61, 0x6e, 0x63, - 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x4b, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x3c, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x0f, 0x72, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, 0x5b, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x44, 0x0a, 0x0e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, - 0x69, 0x66, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, - 0x73, 0x22, 0x62, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x72, 0x63, 0x68, 0x69, - 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, - 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x0d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, - 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xab, 0x01, 0x0a, 0x09, 0x50, 0x61, - 0x74, 0x63, 0x68, 0x53, 0x70, 0x65, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x20, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x52, - 0x02, 0x6f, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x6f, - 0x6c, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x65, 0x77, - 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, - 0x77, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x09, 0x50, 0x61, 0x74, 0x63, 0x68, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x61, - 0x74, 0x63, 0x68, 0x53, 0x70, 0x65, 0x63, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x2a, 0x43, - 0x0a, 0x07, 0x50, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, - 0x65, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x65, 0x6e, 0x61, 0x6d, - 0x65, 0x10, 0x04, 0x32, 0x3b, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x31, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x32, 0x46, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x12, 0x34, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x12, 0x0c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x4b, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x3d, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x61, - 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x1a, - 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, - 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x6f, 0x6c, 0x1a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x72, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x50, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, + 0x69, 0x66, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x22, 0x44, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, + 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x62, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x0d, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x72, 0x63, 0x68, 0x69, + 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xab, + 0x01, 0x0a, 0x09, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x70, 0x65, 0x63, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x50, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1c, + 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x09, + 0x50, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x70, 0x65, 0x63, 0x52, 0x05, 0x69, 0x74, + 0x65, 0x6d, 0x73, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, 0x6e, + 0x74, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x12, 0x33, 0x0a, 0x07, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x22, 0x59, 0x0a, 0x10, 0x4c, 0x6f, 0x67, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, + 0x19, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, + 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x22, 0xd0, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, + 0x72, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2e, 0x0a, 0x04, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x2d, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x52, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, + 0x32, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, + 0x72, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x05, 0x69, 0x74, + 0x65, 0x6d, 0x73, 0x22, 0x2e, 0x0a, 0x04, 0x41, 0x74, 0x74, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2a, 0x43, 0x0a, 0x07, 0x50, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x12, 0x08, + 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x10, 0x02, + 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x10, 0x04, 0x32, 0x3b, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x12, 0x31, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x32, 0x46, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x0c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x4b, 0x0a, + 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x3d, 0x0a, 0x0c, 0x53, + 0x79, 0x6e, 0x63, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x17, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x69, + 0x66, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, + 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x32, 0x4e, 0x0a, 0x03, 0x4c, 0x6f, + 0x67, 0x12, 0x47, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x12, + 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x30, 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -569,37 +928,51 @@ func file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDescGZIP() } var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_goTypes = []interface{}{ - (PatchOp)(0), // 0: control.PatchOp - (*SyncResults)(nil), // 1: control.SyncResults - (*UpdateManifestEntry)(nil), // 2: control.UpdateManifestEntry - (*UpdateManifest)(nil), // 3: control.UpdateManifest - (*PluginArchiveEntry)(nil), // 4: control.PluginArchiveEntry - (*PluginArchive)(nil), // 5: control.PluginArchive - (*PatchSpec)(nil), // 6: control.PatchSpec - (*PatchList)(nil), // 7: control.PatchList - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty - (*v1.Health)(nil), // 9: core.Health + (PatchOp)(0), // 0: control.PatchOp + (*SyncResults)(nil), // 1: control.SyncResults + (*UpdateManifestEntry)(nil), // 2: control.UpdateManifestEntry + (*UpdateManifest)(nil), // 3: control.UpdateManifest + (*PluginArchiveEntry)(nil), // 4: control.PluginArchiveEntry + (*PluginArchive)(nil), // 5: control.PluginArchive + (*PatchSpec)(nil), // 6: control.PatchSpec + (*PatchList)(nil), // 7: control.PatchList + (*LogStreamRequest)(nil), // 8: control.LogStreamRequest + (*LogStreamFilters)(nil), // 9: control.LogStreamFilters + (*StructuredLogRecord)(nil), // 10: control.StructuredLogRecord + (*StructuredLogRecords)(nil), // 11: control.StructuredLogRecords + (*Attr)(nil), // 12: control.Attr + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 14: google.protobuf.Empty + (*v1.Health)(nil), // 15: core.Health } var file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_depIdxs = []int32{ - 7, // 0: control.SyncResults.requiredPatches:type_name -> control.PatchList - 2, // 1: control.UpdateManifest.items:type_name -> control.UpdateManifestEntry - 2, // 2: control.PluginArchiveEntry.metadata:type_name -> control.UpdateManifestEntry - 4, // 3: control.PluginArchive.items:type_name -> control.PluginArchiveEntry - 0, // 4: control.PatchSpec.op:type_name -> control.PatchOp - 6, // 5: control.PatchList.items:type_name -> control.PatchSpec - 8, // 6: control.Health.GetHealth:input_type -> google.protobuf.Empty - 9, // 7: control.HealthListener.UpdateHealth:input_type -> core.Health - 3, // 8: control.UpdateSync.SyncManifest:input_type -> control.UpdateManifest - 9, // 9: control.Health.GetHealth:output_type -> core.Health - 8, // 10: control.HealthListener.UpdateHealth:output_type -> google.protobuf.Empty - 1, // 11: control.UpdateSync.SyncManifest:output_type -> control.SyncResults - 9, // [9:12] is the sub-list for method output_type - 6, // [6:9] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 7, // 0: control.SyncResults.requiredPatches:type_name -> control.PatchList + 2, // 1: control.UpdateManifest.items:type_name -> control.UpdateManifestEntry + 2, // 2: control.PluginArchiveEntry.metadata:type_name -> control.UpdateManifestEntry + 4, // 3: control.PluginArchive.items:type_name -> control.PluginArchiveEntry + 0, // 4: control.PatchSpec.op:type_name -> control.PatchOp + 6, // 5: control.PatchList.items:type_name -> control.PatchSpec + 13, // 6: control.LogStreamRequest.since:type_name -> google.protobuf.Timestamp + 13, // 7: control.LogStreamRequest.until:type_name -> google.protobuf.Timestamp + 9, // 8: control.LogStreamRequest.filters:type_name -> control.LogStreamFilters + 13, // 9: control.StructuredLogRecord.time:type_name -> google.protobuf.Timestamp + 12, // 10: control.StructuredLogRecord.attributes:type_name -> control.Attr + 10, // 11: control.StructuredLogRecords.items:type_name -> control.StructuredLogRecord + 14, // 12: control.Health.GetHealth:input_type -> google.protobuf.Empty + 15, // 13: control.HealthListener.UpdateHealth:input_type -> core.Health + 3, // 14: control.UpdateSync.SyncManifest:input_type -> control.UpdateManifest + 8, // 15: control.Log.StreamLogs:input_type -> control.LogStreamRequest + 15, // 16: control.Health.GetHealth:output_type -> core.Health + 14, // 17: control.HealthListener.UpdateHealth:output_type -> google.protobuf.Empty + 1, // 18: control.UpdateSync.SyncManifest:output_type -> control.SyncResults + 10, // 19: control.Log.StreamLogs:output_type -> control.StructuredLogRecord + 16, // [16:20] is the sub-list for method output_type + 12, // [12:16] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_init() } @@ -692,16 +1065,77 @@ func file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_init() { return nil } } + file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogStreamRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogStreamFilters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructuredLogRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructuredLogRecords); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Attr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } + file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_msgTypes[8].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_rawDesc, NumEnums: 1, - NumMessages: 7, + NumMessages: 12, NumExtensions: 0, - NumServices: 3, + NumServices: 4, }, GoTypes: file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_goTypes, DependencyIndexes: file_github_com_rancher_opni_pkg_apis_control_v1_remote_proto_depIdxs, diff --git a/pkg/apis/control/v1/remote.proto b/pkg/apis/control/v1/remote.proto index 2ad0722cb9..15a8620b14 100644 --- a/pkg/apis/control/v1/remote.proto +++ b/pkg/apis/control/v1/remote.proto @@ -2,9 +2,9 @@ syntax = "proto3"; package control; -import "github.com/kralicky/totem/extensions.proto"; import "github.com/rancher/opni/pkg/apis/core/v1/core.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; option go_package = "github.com/rancher/opni/pkg/apis/control/v1"; @@ -20,6 +20,10 @@ service UpdateSync { rpc SyncManifest(UpdateManifest) returns (SyncResults); } +service Log { + rpc StreamLogs(LogStreamRequest) returns (stream StructuredLogRecord); +} + enum PatchOp { // revisions match None = 0; @@ -70,3 +74,33 @@ message PatchSpec { message PatchList { repeated PatchSpec items = 1; } + +message LogStreamRequest { + google.protobuf.Timestamp since = 1; + google.protobuf.Timestamp until = 2; + LogStreamFilters filters = 3; + bool follow = 4; +} + +message LogStreamFilters { + repeated string namePattern = 1; + optional int32 level = 2; +} + +message StructuredLogRecord { + google.protobuf.Timestamp time = 1; + string message = 2; + string name = 3; + string source = 4; + string level = 5; + repeated Attr attributes = 6; +} + +message StructuredLogRecords { + repeated StructuredLogRecord items = 1; +} + +message Attr { + string key = 1; + string value = 2; +} \ No newline at end of file diff --git a/pkg/apis/control/v1/remote_grpc.pb.go b/pkg/apis/control/v1/remote_grpc.pb.go index 9694773eee..44aa1ed962 100644 --- a/pkg/apis/control/v1/remote_grpc.pb.go +++ b/pkg/apis/control/v1/remote_grpc.pb.go @@ -289,3 +289,120 @@ var UpdateSync_ServiceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "github.com/rancher/opni/pkg/apis/control/v1/remote.proto", } + +const ( + Log_StreamLogs_FullMethodName = "/control.Log/StreamLogs" +) + +// LogClient is the client API for Log service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LogClient interface { + StreamLogs(ctx context.Context, in *LogStreamRequest, opts ...grpc.CallOption) (Log_StreamLogsClient, error) +} + +type logClient struct { + cc grpc.ClientConnInterface +} + +func NewLogClient(cc grpc.ClientConnInterface) LogClient { + return &logClient{cc} +} + +func (c *logClient) StreamLogs(ctx context.Context, in *LogStreamRequest, opts ...grpc.CallOption) (Log_StreamLogsClient, error) { + stream, err := c.cc.NewStream(ctx, &Log_ServiceDesc.Streams[0], Log_StreamLogs_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &logStreamLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Log_StreamLogsClient interface { + Recv() (*StructuredLogRecord, error) + grpc.ClientStream +} + +type logStreamLogsClient struct { + grpc.ClientStream +} + +func (x *logStreamLogsClient) Recv() (*StructuredLogRecord, error) { + m := new(StructuredLogRecord) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LogServer is the server API for Log service. +// All implementations must embed UnimplementedLogServer +// for forward compatibility +type LogServer interface { + StreamLogs(*LogStreamRequest, Log_StreamLogsServer) error + mustEmbedUnimplementedLogServer() +} + +// UnimplementedLogServer must be embedded to have forward compatible implementations. +type UnimplementedLogServer struct { +} + +func (UnimplementedLogServer) StreamLogs(*LogStreamRequest, Log_StreamLogsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamLogs not implemented") +} +func (UnimplementedLogServer) mustEmbedUnimplementedLogServer() {} + +// UnsafeLogServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LogServer will +// result in compilation errors. +type UnsafeLogServer interface { + mustEmbedUnimplementedLogServer() +} + +func RegisterLogServer(s grpc.ServiceRegistrar, srv LogServer) { + s.RegisterService(&Log_ServiceDesc, srv) +} + +func _Log_StreamLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LogStreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogServer).StreamLogs(m, &logStreamLogsServer{stream}) +} + +type Log_StreamLogsServer interface { + Send(*StructuredLogRecord) error + grpc.ServerStream +} + +type logStreamLogsServer struct { + grpc.ServerStream +} + +func (x *logStreamLogsServer) Send(m *StructuredLogRecord) error { + return x.ServerStream.SendMsg(m) +} + +// Log_ServiceDesc is the grpc.ServiceDesc for Log service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Log_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "control.Log", + HandlerType: (*LogServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamLogs", + Handler: _Log_StreamLogs_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/rancher/opni/pkg/apis/control/v1/remote.proto", +} diff --git a/pkg/apis/control/v1/validate.go b/pkg/apis/control/v1/validate.go index 9c543ebdcb..4189f0c45e 100644 --- a/pkg/apis/control/v1/validate.go +++ b/pkg/apis/control/v1/validate.go @@ -1,6 +1,10 @@ package v1 import ( + "fmt" + "log/slog" + "regexp" + "github.com/rancher/opni/pkg/validation" ) @@ -86,3 +90,41 @@ func (a *PatchList) Validate() error { } return nil } + +func (r *LogStreamRequest) Validate() error { + if r.GetSince() == nil { + return fmt.Errorf("%w: %s", validation.ErrMissingRequiredField, "since") + } + if r.GetUntil() == nil { + return fmt.Errorf("%w: %s", validation.ErrMissingRequiredField, "until") + } + since := r.GetSince().AsTime() + until := r.GetUntil().AsTime() + if since.After(until) { + return fmt.Errorf("%w: %s", validation.ErrInvalidValue, "start time must be before end time") + } + + if r.GetFilters() == nil { + return nil + } + + switch slog.Level(r.GetFilters().GetLevel()) { + case slog.LevelDebug: + case slog.LevelInfo: + case slog.LevelWarn: + case slog.LevelError: + default: + return fmt.Errorf("%w: %s", validation.ErrInvalidValue, "log level") + } + + for _, name := range r.GetFilters().GetNamePattern() { + if name == "" { + return fmt.Errorf("%w: %s", validation.ErrMissingRequiredField, "filter pattern") + } + + if _, err := regexp.Compile(name); err != nil { + return err + } + } + return nil +} diff --git a/pkg/apis/management/v1/management.pb.go b/pkg/apis/management/v1/management.pb.go index cfa2f52c70..5d6bb2f2b5 100644 --- a/pkg/apis/management/v1/management.pb.go +++ b/pkg/apis/management/v1/management.pb.go @@ -8,6 +8,7 @@ package v1 import ( v11 "github.com/rancher/opni/pkg/apis/capability/v1" + v12 "github.com/rancher/opni/pkg/apis/control/v1" v1 "github.com/rancher/opni/pkg/apis/core/v1" annotations "google.golang.org/genproto/googleapis/api/annotations" _ "google.golang.org/genproto/googleapis/rpc/status" @@ -1324,6 +1325,61 @@ func (x *DashboardGlobalSettings) GetDefaultTokenLabels() map[string]string { return nil } +type StreamAgentLogsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Agent *v1.Reference `protobuf:"bytes,1,opt,name=agent,proto3" json:"agent,omitempty"` + Request *v12.LogStreamRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +} + +func (x *StreamAgentLogsRequest) Reset() { + *x = StreamAgentLogsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamAgentLogsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamAgentLogsRequest) ProtoMessage() {} + +func (x *StreamAgentLogsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamAgentLogsRequest.ProtoReflect.Descriptor instead. +func (*StreamAgentLogsRequest) Descriptor() ([]byte, []int) { + return file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_rawDescGZIP(), []int{23} +} + +func (x *StreamAgentLogsRequest) GetAgent() *v1.Reference { + if x != nil { + return x.Agent + } + return nil +} + +func (x *StreamAgentLogsRequest) GetRequest() *v12.LogStreamRequest { + if x != nil { + return x.Request + } + return nil +} + var File_github_com_rancher_opni_pkg_apis_management_v1_management_proto protoreflect.FileDescriptor var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_rawDesc = []byte{ @@ -1339,200 +1395,211 @@ var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_rawDesc 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, - 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, - 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xab, 0x02, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x4b, 0x0a, 0x06, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x63, 0x61, 0x70, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x61, 0x70, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x39, 0x0a, - 0x11, 0x43, 0x65, 0x72, 0x74, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, - 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x35, 0x0a, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x36, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xbe, 0x01, 0x0a, 0x12, 0x45, 0x64, 0x69, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x42, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, - 0x64, 0x69, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x51, 0x0a, 0x14, 0x57, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x0d, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x52, 0x0d, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0a, 0x57, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x70, - 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, - 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x22, 0x4a, 0x0a, 0x14, 0x41, 0x50, 0x49, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x32, - 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x50, 0x49, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x10, 0x41, 0x50, 0x49, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x49, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x12, 0x34, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x54, 0x54, 0x50, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x7e, 0x0a, 0x12, 0x48, 0x54, 0x54, 0x50, - 0x52, 0x75, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x28, - 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, - 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0x53, 0x0a, 0x0d, 0x47, 0x61, 0x74, 0x65, - 0x77, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x42, 0x0a, 0x09, 0x64, 0x6f, 0x63, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x69, 0x74, 0x68, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x09, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5a, 0x0a, - 0x18, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x57, - 0x69, 0x74, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, - 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x79, 0x61, 0x6d, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x24, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6a, - 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, - 0x4f, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x6f, 0x63, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x22, 0x42, 0x0a, 0x0e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, - 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x22, 0x5d, 0x0a, 0x0e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x58, 0x0a, 0x1a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x79, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x70, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x62, 0x0a, - 0x18, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x22, 0x37, 0x0a, 0x1b, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x66, 0x0a, 0x1a, 0x43, 0x61, - 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x55, 0x6e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, - 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x22, 0x58, 0x0a, 0x17, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x61, 0x0a, 0x20, - 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x55, 0x6e, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6c, 0x6c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, - 0xd6, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x40, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x47, 0x6c, 0x6f, 0x62, - 0x61, 0x6c, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x48, 0x00, 0x52, 0x06, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, - 0x75, 0x73, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x55, 0x73, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x22, 0xca, 0x02, 0x0a, 0x17, 0x44, 0x61, 0x73, + 0x6f, 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x72, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x02, 0x0a, 0x1b, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x4b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1c, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x61, 0x67, 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x39, 0x0a, 0x11, 0x43, 0x65, 0x72, 0x74, 0x73, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0b, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x36, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x12, 0x45, 0x64, + 0x69, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, + 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x14, 0x57, 0x61, + 0x74, 0x63, 0x68, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x39, 0x0a, 0x0d, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x0d, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x90, 0x01, + 0x0a, 0x0a, 0x57, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, + 0x22, 0x4a, 0x0a, 0x14, 0x41, 0x50, 0x49, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x50, 0x49, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x93, 0x01, 0x0a, + 0x10, 0x41, 0x50, 0x49, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x49, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x12, 0x34, 0x0a, 0x05, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x75, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x22, 0x7e, 0x0a, 0x12, 0x48, 0x54, 0x54, 0x50, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x28, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, + 0x74, 0x70, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x22, 0x53, 0x0a, 0x0d, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x42, 0x0a, 0x09, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x57, 0x69, 0x74, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x09, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x57, 0x69, 0x74, 0x68, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x22, 0x24, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x6f, 0x63, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x13, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x38, 0x0a, 0x09, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x09, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x42, 0x0a, 0x0e, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x05, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x5d, + 0x0a, 0x0e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x58, 0x0a, + 0x1a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x62, 0x0a, 0x18, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x37, 0x0a, 0x1b, 0x43, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x66, 0x0a, 0x1a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x55, 0x6e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x58, 0x0a, 0x17, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x61, 0x0a, 0x20, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x55, 0x6e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x43, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xd6, 0x01, 0x0a, 0x11, 0x44, 0x61, + 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x40, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x49, - 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x43, 0x0a, 0x0f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x74, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x74, - 0x6c, 0x12, 0x6b, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x73, 0x68, 0x62, - 0x6f, 0x61, 0x72, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x45, - 0x0a, 0x17, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x25, 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, + 0x69, 0x6e, 0x67, 0x73, 0x48, 0x00, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x88, 0x01, + 0x01, 0x12, 0x3b, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x73, + 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x55, + 0x73, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x1a, 0x37, + 0x0a, 0x09, 0x55, 0x73, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x22, 0xca, 0x02, 0x0a, 0x17, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, + 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x36, + 0x0a, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x74, 0x6c, 0x12, 0x6b, 0x0a, 0x12, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x47, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x45, 0x0a, 0x17, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x74, 0x0a, 0x16, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4c, 0x6f, + 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x05, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x12, 0x33, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x6f, 0x67, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2a, 0x25, 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x10, 0x00, - 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x02, 0x32, 0xb7, 0x1b, 0x0a, + 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x02, 0x32, 0xb0, 0x1c, 0x0a, 0x0a, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x69, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x27, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, @@ -1752,10 +1819,18 @@ var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_rawDesc 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x3a, 0x01, 0x2a, 0x1a, 0x13, 0x2f, 0x64, 0x61, 0x73, 0x68, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x2f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, - 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x77, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x22, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, + 0x67, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x75, 0x72, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x3a, 0x01, 0x2a, 0x12, 0x13, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x30, 0x01, 0x42, + 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x61, + 0x6e, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6f, 0x70, 0x6e, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1771,7 +1846,7 @@ func file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_rawDes } var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_goTypes = []interface{}{ (WatchEventType)(0), // 0: management.WatchEventType (*CreateBootstrapTokenRequest)(nil), // 1: management.CreateBootstrapTokenRequest @@ -1797,144 +1872,151 @@ var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_goTypes (*CapabilityUninstallCancelRequest)(nil), // 21: management.CapabilityUninstallCancelRequest (*DashboardSettings)(nil), // 22: management.DashboardSettings (*DashboardGlobalSettings)(nil), // 23: management.DashboardGlobalSettings - nil, // 24: management.CreateBootstrapTokenRequest.LabelsEntry - nil, // 25: management.EditClusterRequest.LabelsEntry - nil, // 26: management.DashboardSettings.UserEntry - nil, // 27: management.DashboardGlobalSettings.DefaultTokenLabelsEntry - (*durationpb.Duration)(nil), // 28: google.protobuf.Duration - (*v1.TokenCapability)(nil), // 29: core.TokenCapability - (*v1.CertInfo)(nil), // 30: core.CertInfo - (*v1.LabelSelector)(nil), // 31: core.LabelSelector - (v1.MatchOptions)(0), // 32: core.MatchOptions - (*v1.Reference)(nil), // 33: core.Reference - (*v1.ReferenceList)(nil), // 34: core.ReferenceList - (*v1.Cluster)(nil), // 35: core.Cluster - (*descriptorpb.ServiceDescriptorProto)(nil), // 36: google.protobuf.ServiceDescriptorProto - (*annotations.HttpRule)(nil), // 37: google.api.HttpRule - (*descriptorpb.MethodDescriptorProto)(nil), // 38: google.protobuf.MethodDescriptorProto - (*v11.Details)(nil), // 39: capability.Details - (*v11.InstallRequest)(nil), // 40: capability.InstallRequest - (*v11.UninstallRequest)(nil), // 41: capability.UninstallRequest - (*emptypb.Empty)(nil), // 42: google.protobuf.Empty - (*v1.Role)(nil), // 43: core.Role - (*v1.RoleBinding)(nil), // 44: core.RoleBinding - (*v1.SubjectAccessRequest)(nil), // 45: core.SubjectAccessRequest - (*v1.BootstrapToken)(nil), // 46: core.BootstrapToken - (*v1.BootstrapTokenList)(nil), // 47: core.BootstrapTokenList - (*v1.ClusterList)(nil), // 48: core.ClusterList - (*v1.HealthStatus)(nil), // 49: core.HealthStatus - (*v1.ClusterHealthStatus)(nil), // 50: core.ClusterHealthStatus - (*v1.RoleList)(nil), // 51: core.RoleList - (*v1.RoleBindingList)(nil), // 52: core.RoleBindingList - (*v11.InstallResponse)(nil), // 53: capability.InstallResponse - (*v11.NodeCapabilityStatus)(nil), // 54: capability.NodeCapabilityStatus - (*v1.TaskStatus)(nil), // 55: core.TaskStatus + (*StreamAgentLogsRequest)(nil), // 24: management.StreamAgentLogsRequest + nil, // 25: management.CreateBootstrapTokenRequest.LabelsEntry + nil, // 26: management.EditClusterRequest.LabelsEntry + nil, // 27: management.DashboardSettings.UserEntry + nil, // 28: management.DashboardGlobalSettings.DefaultTokenLabelsEntry + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (*v1.TokenCapability)(nil), // 30: core.TokenCapability + (*v1.CertInfo)(nil), // 31: core.CertInfo + (*v1.LabelSelector)(nil), // 32: core.LabelSelector + (v1.MatchOptions)(0), // 33: core.MatchOptions + (*v1.Reference)(nil), // 34: core.Reference + (*v1.ReferenceList)(nil), // 35: core.ReferenceList + (*v1.Cluster)(nil), // 36: core.Cluster + (*descriptorpb.ServiceDescriptorProto)(nil), // 37: google.protobuf.ServiceDescriptorProto + (*annotations.HttpRule)(nil), // 38: google.api.HttpRule + (*descriptorpb.MethodDescriptorProto)(nil), // 39: google.protobuf.MethodDescriptorProto + (*v11.Details)(nil), // 40: capability.Details + (*v11.InstallRequest)(nil), // 41: capability.InstallRequest + (*v11.UninstallRequest)(nil), // 42: capability.UninstallRequest + (*v12.LogStreamRequest)(nil), // 43: control.LogStreamRequest + (*emptypb.Empty)(nil), // 44: google.protobuf.Empty + (*v1.Role)(nil), // 45: core.Role + (*v1.RoleBinding)(nil), // 46: core.RoleBinding + (*v1.SubjectAccessRequest)(nil), // 47: core.SubjectAccessRequest + (*v1.BootstrapToken)(nil), // 48: core.BootstrapToken + (*v1.BootstrapTokenList)(nil), // 49: core.BootstrapTokenList + (*v1.ClusterList)(nil), // 50: core.ClusterList + (*v1.HealthStatus)(nil), // 51: core.HealthStatus + (*v1.ClusterHealthStatus)(nil), // 52: core.ClusterHealthStatus + (*v1.RoleList)(nil), // 53: core.RoleList + (*v1.RoleBindingList)(nil), // 54: core.RoleBindingList + (*v11.InstallResponse)(nil), // 55: capability.InstallResponse + (*v11.NodeCapabilityStatus)(nil), // 56: capability.NodeCapabilityStatus + (*v1.TaskStatus)(nil), // 57: core.TaskStatus + (*v12.StructuredLogRecord)(nil), // 58: control.StructuredLogRecord } var file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_depIdxs = []int32{ - 28, // 0: management.CreateBootstrapTokenRequest.ttl:type_name -> google.protobuf.Duration - 24, // 1: management.CreateBootstrapTokenRequest.labels:type_name -> management.CreateBootstrapTokenRequest.LabelsEntry - 29, // 2: management.CreateBootstrapTokenRequest.capabilities:type_name -> core.TokenCapability - 30, // 3: management.CertsInfoResponse.chain:type_name -> core.CertInfo - 31, // 4: management.ListClustersRequest.matchLabels:type_name -> core.LabelSelector - 32, // 5: management.ListClustersRequest.matchOptions:type_name -> core.MatchOptions - 33, // 6: management.EditClusterRequest.cluster:type_name -> core.Reference - 25, // 7: management.EditClusterRequest.labels:type_name -> management.EditClusterRequest.LabelsEntry - 34, // 8: management.WatchClustersRequest.knownClusters:type_name -> core.ReferenceList - 35, // 9: management.WatchEvent.cluster:type_name -> core.Cluster + 29, // 0: management.CreateBootstrapTokenRequest.ttl:type_name -> google.protobuf.Duration + 25, // 1: management.CreateBootstrapTokenRequest.labels:type_name -> management.CreateBootstrapTokenRequest.LabelsEntry + 30, // 2: management.CreateBootstrapTokenRequest.capabilities:type_name -> core.TokenCapability + 31, // 3: management.CertsInfoResponse.chain:type_name -> core.CertInfo + 32, // 4: management.ListClustersRequest.matchLabels:type_name -> core.LabelSelector + 33, // 5: management.ListClustersRequest.matchOptions:type_name -> core.MatchOptions + 34, // 6: management.EditClusterRequest.cluster:type_name -> core.Reference + 26, // 7: management.EditClusterRequest.labels:type_name -> management.EditClusterRequest.LabelsEntry + 35, // 8: management.WatchClustersRequest.knownClusters:type_name -> core.ReferenceList + 36, // 9: management.WatchEvent.cluster:type_name -> core.Cluster 0, // 10: management.WatchEvent.type:type_name -> management.WatchEventType - 35, // 11: management.WatchEvent.previous:type_name -> core.Cluster + 36, // 11: management.WatchEvent.previous:type_name -> core.Cluster 8, // 12: management.APIExtensionInfoList.items:type_name -> management.APIExtensionInfo - 36, // 13: management.APIExtensionInfo.serviceDesc:type_name -> google.protobuf.ServiceDescriptorProto + 37, // 13: management.APIExtensionInfo.serviceDesc:type_name -> google.protobuf.ServiceDescriptorProto 9, // 14: management.APIExtensionInfo.rules:type_name -> management.HTTPRuleDescriptor - 37, // 15: management.HTTPRuleDescriptor.http:type_name -> google.api.HttpRule - 38, // 16: management.HTTPRuleDescriptor.method:type_name -> google.protobuf.MethodDescriptorProto + 38, // 15: management.HTTPRuleDescriptor.http:type_name -> google.api.HttpRule + 39, // 16: management.HTTPRuleDescriptor.method:type_name -> google.protobuf.MethodDescriptorProto 11, // 17: management.GatewayConfig.documents:type_name -> management.ConfigDocumentWithSchema 12, // 18: management.UpdateConfigRequest.documents:type_name -> management.ConfigDocument 15, // 19: management.CapabilityList.items:type_name -> management.CapabilityInfo - 39, // 20: management.CapabilityInfo.details:type_name -> capability.Details - 40, // 21: management.CapabilityInstallRequest.target:type_name -> capability.InstallRequest - 41, // 22: management.CapabilityUninstallRequest.target:type_name -> capability.UninstallRequest - 33, // 23: management.CapabilityStatusRequest.cluster:type_name -> core.Reference - 33, // 24: management.CapabilityUninstallCancelRequest.cluster:type_name -> core.Reference + 40, // 20: management.CapabilityInfo.details:type_name -> capability.Details + 41, // 21: management.CapabilityInstallRequest.target:type_name -> capability.InstallRequest + 42, // 22: management.CapabilityUninstallRequest.target:type_name -> capability.UninstallRequest + 34, // 23: management.CapabilityStatusRequest.cluster:type_name -> core.Reference + 34, // 24: management.CapabilityUninstallCancelRequest.cluster:type_name -> core.Reference 23, // 25: management.DashboardSettings.global:type_name -> management.DashboardGlobalSettings - 26, // 26: management.DashboardSettings.user:type_name -> management.DashboardSettings.UserEntry - 28, // 27: management.DashboardGlobalSettings.defaultTokenTtl:type_name -> google.protobuf.Duration - 27, // 28: management.DashboardGlobalSettings.defaultTokenLabels:type_name -> management.DashboardGlobalSettings.DefaultTokenLabelsEntry - 1, // 29: management.Management.CreateBootstrapToken:input_type -> management.CreateBootstrapTokenRequest - 33, // 30: management.Management.RevokeBootstrapToken:input_type -> core.Reference - 42, // 31: management.Management.ListBootstrapTokens:input_type -> google.protobuf.Empty - 33, // 32: management.Management.GetBootstrapToken:input_type -> core.Reference - 3, // 33: management.Management.ListClusters:input_type -> management.ListClustersRequest - 5, // 34: management.Management.WatchClusters:input_type -> management.WatchClustersRequest - 33, // 35: management.Management.DeleteCluster:input_type -> core.Reference - 42, // 36: management.Management.CertsInfo:input_type -> google.protobuf.Empty - 33, // 37: management.Management.GetCluster:input_type -> core.Reference - 33, // 38: management.Management.GetClusterHealthStatus:input_type -> core.Reference - 42, // 39: management.Management.WatchClusterHealthStatus:input_type -> google.protobuf.Empty - 4, // 40: management.Management.EditCluster:input_type -> management.EditClusterRequest - 43, // 41: management.Management.CreateRole:input_type -> core.Role - 43, // 42: management.Management.UpdateRole:input_type -> core.Role - 33, // 43: management.Management.DeleteRole:input_type -> core.Reference - 33, // 44: management.Management.GetRole:input_type -> core.Reference - 44, // 45: management.Management.CreateRoleBinding:input_type -> core.RoleBinding - 44, // 46: management.Management.UpdateRoleBinding:input_type -> core.RoleBinding - 33, // 47: management.Management.DeleteRoleBinding:input_type -> core.Reference - 33, // 48: management.Management.GetRoleBinding:input_type -> core.Reference - 42, // 49: management.Management.ListRoles:input_type -> google.protobuf.Empty - 42, // 50: management.Management.ListRoleBindings:input_type -> google.protobuf.Empty - 45, // 51: management.Management.SubjectAccess:input_type -> core.SubjectAccessRequest - 42, // 52: management.Management.APIExtensions:input_type -> google.protobuf.Empty - 42, // 53: management.Management.GetConfig:input_type -> google.protobuf.Empty - 13, // 54: management.Management.UpdateConfig:input_type -> management.UpdateConfigRequest - 42, // 55: management.Management.ListCapabilities:input_type -> google.protobuf.Empty - 16, // 56: management.Management.CapabilityInstaller:input_type -> management.CapabilityInstallerRequest - 17, // 57: management.Management.InstallCapability:input_type -> management.CapabilityInstallRequest - 19, // 58: management.Management.UninstallCapability:input_type -> management.CapabilityUninstallRequest - 20, // 59: management.Management.CapabilityStatus:input_type -> management.CapabilityStatusRequest - 20, // 60: management.Management.CapabilityUninstallStatus:input_type -> management.CapabilityStatusRequest - 21, // 61: management.Management.CancelCapabilityUninstall:input_type -> management.CapabilityUninstallCancelRequest - 42, // 62: management.Management.GetDashboardSettings:input_type -> google.protobuf.Empty - 22, // 63: management.Management.UpdateDashboardSettings:input_type -> management.DashboardSettings - 46, // 64: management.Management.CreateBootstrapToken:output_type -> core.BootstrapToken - 42, // 65: management.Management.RevokeBootstrapToken:output_type -> google.protobuf.Empty - 47, // 66: management.Management.ListBootstrapTokens:output_type -> core.BootstrapTokenList - 46, // 67: management.Management.GetBootstrapToken:output_type -> core.BootstrapToken - 48, // 68: management.Management.ListClusters:output_type -> core.ClusterList - 6, // 69: management.Management.WatchClusters:output_type -> management.WatchEvent - 42, // 70: management.Management.DeleteCluster:output_type -> google.protobuf.Empty - 2, // 71: management.Management.CertsInfo:output_type -> management.CertsInfoResponse - 35, // 72: management.Management.GetCluster:output_type -> core.Cluster - 49, // 73: management.Management.GetClusterHealthStatus:output_type -> core.HealthStatus - 50, // 74: management.Management.WatchClusterHealthStatus:output_type -> core.ClusterHealthStatus - 35, // 75: management.Management.EditCluster:output_type -> core.Cluster - 42, // 76: management.Management.CreateRole:output_type -> google.protobuf.Empty - 42, // 77: management.Management.UpdateRole:output_type -> google.protobuf.Empty - 42, // 78: management.Management.DeleteRole:output_type -> google.protobuf.Empty - 43, // 79: management.Management.GetRole:output_type -> core.Role - 42, // 80: management.Management.CreateRoleBinding:output_type -> google.protobuf.Empty - 42, // 81: management.Management.UpdateRoleBinding:output_type -> google.protobuf.Empty - 42, // 82: management.Management.DeleteRoleBinding:output_type -> google.protobuf.Empty - 44, // 83: management.Management.GetRoleBinding:output_type -> core.RoleBinding - 51, // 84: management.Management.ListRoles:output_type -> core.RoleList - 52, // 85: management.Management.ListRoleBindings:output_type -> core.RoleBindingList - 34, // 86: management.Management.SubjectAccess:output_type -> core.ReferenceList - 7, // 87: management.Management.APIExtensions:output_type -> management.APIExtensionInfoList - 10, // 88: management.Management.GetConfig:output_type -> management.GatewayConfig - 42, // 89: management.Management.UpdateConfig:output_type -> google.protobuf.Empty - 14, // 90: management.Management.ListCapabilities:output_type -> management.CapabilityList - 18, // 91: management.Management.CapabilityInstaller:output_type -> management.CapabilityInstallerResponse - 53, // 92: management.Management.InstallCapability:output_type -> capability.InstallResponse - 42, // 93: management.Management.UninstallCapability:output_type -> google.protobuf.Empty - 54, // 94: management.Management.CapabilityStatus:output_type -> capability.NodeCapabilityStatus - 55, // 95: management.Management.CapabilityUninstallStatus:output_type -> core.TaskStatus - 42, // 96: management.Management.CancelCapabilityUninstall:output_type -> google.protobuf.Empty - 22, // 97: management.Management.GetDashboardSettings:output_type -> management.DashboardSettings - 42, // 98: management.Management.UpdateDashboardSettings:output_type -> google.protobuf.Empty - 64, // [64:99] is the sub-list for method output_type - 29, // [29:64] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 27, // 26: management.DashboardSettings.user:type_name -> management.DashboardSettings.UserEntry + 29, // 27: management.DashboardGlobalSettings.defaultTokenTtl:type_name -> google.protobuf.Duration + 28, // 28: management.DashboardGlobalSettings.defaultTokenLabels:type_name -> management.DashboardGlobalSettings.DefaultTokenLabelsEntry + 34, // 29: management.StreamAgentLogsRequest.agent:type_name -> core.Reference + 43, // 30: management.StreamAgentLogsRequest.request:type_name -> control.LogStreamRequest + 1, // 31: management.Management.CreateBootstrapToken:input_type -> management.CreateBootstrapTokenRequest + 34, // 32: management.Management.RevokeBootstrapToken:input_type -> core.Reference + 44, // 33: management.Management.ListBootstrapTokens:input_type -> google.protobuf.Empty + 34, // 34: management.Management.GetBootstrapToken:input_type -> core.Reference + 3, // 35: management.Management.ListClusters:input_type -> management.ListClustersRequest + 5, // 36: management.Management.WatchClusters:input_type -> management.WatchClustersRequest + 34, // 37: management.Management.DeleteCluster:input_type -> core.Reference + 44, // 38: management.Management.CertsInfo:input_type -> google.protobuf.Empty + 34, // 39: management.Management.GetCluster:input_type -> core.Reference + 34, // 40: management.Management.GetClusterHealthStatus:input_type -> core.Reference + 44, // 41: management.Management.WatchClusterHealthStatus:input_type -> google.protobuf.Empty + 4, // 42: management.Management.EditCluster:input_type -> management.EditClusterRequest + 45, // 43: management.Management.CreateRole:input_type -> core.Role + 45, // 44: management.Management.UpdateRole:input_type -> core.Role + 34, // 45: management.Management.DeleteRole:input_type -> core.Reference + 34, // 46: management.Management.GetRole:input_type -> core.Reference + 46, // 47: management.Management.CreateRoleBinding:input_type -> core.RoleBinding + 46, // 48: management.Management.UpdateRoleBinding:input_type -> core.RoleBinding + 34, // 49: management.Management.DeleteRoleBinding:input_type -> core.Reference + 34, // 50: management.Management.GetRoleBinding:input_type -> core.Reference + 44, // 51: management.Management.ListRoles:input_type -> google.protobuf.Empty + 44, // 52: management.Management.ListRoleBindings:input_type -> google.protobuf.Empty + 47, // 53: management.Management.SubjectAccess:input_type -> core.SubjectAccessRequest + 44, // 54: management.Management.APIExtensions:input_type -> google.protobuf.Empty + 44, // 55: management.Management.GetConfig:input_type -> google.protobuf.Empty + 13, // 56: management.Management.UpdateConfig:input_type -> management.UpdateConfigRequest + 44, // 57: management.Management.ListCapabilities:input_type -> google.protobuf.Empty + 16, // 58: management.Management.CapabilityInstaller:input_type -> management.CapabilityInstallerRequest + 17, // 59: management.Management.InstallCapability:input_type -> management.CapabilityInstallRequest + 19, // 60: management.Management.UninstallCapability:input_type -> management.CapabilityUninstallRequest + 20, // 61: management.Management.CapabilityStatus:input_type -> management.CapabilityStatusRequest + 20, // 62: management.Management.CapabilityUninstallStatus:input_type -> management.CapabilityStatusRequest + 21, // 63: management.Management.CancelCapabilityUninstall:input_type -> management.CapabilityUninstallCancelRequest + 44, // 64: management.Management.GetDashboardSettings:input_type -> google.protobuf.Empty + 22, // 65: management.Management.UpdateDashboardSettings:input_type -> management.DashboardSettings + 24, // 66: management.Management.GetAgentLogStream:input_type -> management.StreamAgentLogsRequest + 48, // 67: management.Management.CreateBootstrapToken:output_type -> core.BootstrapToken + 44, // 68: management.Management.RevokeBootstrapToken:output_type -> google.protobuf.Empty + 49, // 69: management.Management.ListBootstrapTokens:output_type -> core.BootstrapTokenList + 48, // 70: management.Management.GetBootstrapToken:output_type -> core.BootstrapToken + 50, // 71: management.Management.ListClusters:output_type -> core.ClusterList + 6, // 72: management.Management.WatchClusters:output_type -> management.WatchEvent + 44, // 73: management.Management.DeleteCluster:output_type -> google.protobuf.Empty + 2, // 74: management.Management.CertsInfo:output_type -> management.CertsInfoResponse + 36, // 75: management.Management.GetCluster:output_type -> core.Cluster + 51, // 76: management.Management.GetClusterHealthStatus:output_type -> core.HealthStatus + 52, // 77: management.Management.WatchClusterHealthStatus:output_type -> core.ClusterHealthStatus + 36, // 78: management.Management.EditCluster:output_type -> core.Cluster + 44, // 79: management.Management.CreateRole:output_type -> google.protobuf.Empty + 44, // 80: management.Management.UpdateRole:output_type -> google.protobuf.Empty + 44, // 81: management.Management.DeleteRole:output_type -> google.protobuf.Empty + 45, // 82: management.Management.GetRole:output_type -> core.Role + 44, // 83: management.Management.CreateRoleBinding:output_type -> google.protobuf.Empty + 44, // 84: management.Management.UpdateRoleBinding:output_type -> google.protobuf.Empty + 44, // 85: management.Management.DeleteRoleBinding:output_type -> google.protobuf.Empty + 46, // 86: management.Management.GetRoleBinding:output_type -> core.RoleBinding + 53, // 87: management.Management.ListRoles:output_type -> core.RoleList + 54, // 88: management.Management.ListRoleBindings:output_type -> core.RoleBindingList + 35, // 89: management.Management.SubjectAccess:output_type -> core.ReferenceList + 7, // 90: management.Management.APIExtensions:output_type -> management.APIExtensionInfoList + 10, // 91: management.Management.GetConfig:output_type -> management.GatewayConfig + 44, // 92: management.Management.UpdateConfig:output_type -> google.protobuf.Empty + 14, // 93: management.Management.ListCapabilities:output_type -> management.CapabilityList + 18, // 94: management.Management.CapabilityInstaller:output_type -> management.CapabilityInstallerResponse + 55, // 95: management.Management.InstallCapability:output_type -> capability.InstallResponse + 44, // 96: management.Management.UninstallCapability:output_type -> google.protobuf.Empty + 56, // 97: management.Management.CapabilityStatus:output_type -> capability.NodeCapabilityStatus + 57, // 98: management.Management.CapabilityUninstallStatus:output_type -> core.TaskStatus + 44, // 99: management.Management.CancelCapabilityUninstall:output_type -> google.protobuf.Empty + 22, // 100: management.Management.GetDashboardSettings:output_type -> management.DashboardSettings + 44, // 101: management.Management.UpdateDashboardSettings:output_type -> google.protobuf.Empty + 58, // 102: management.Management.GetAgentLogStream:output_type -> control.StructuredLogRecord + 67, // [67:103] is the sub-list for method output_type + 31, // [31:67] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_init() } @@ -2219,6 +2301,18 @@ func file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_init() return nil } } + file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamAgentLogsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_msgTypes[21].OneofWrappers = []interface{}{} type x struct{} @@ -2227,7 +2321,7 @@ func file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_init() GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_rancher_opni_pkg_apis_management_v1_management_proto_rawDesc, NumEnums: 1, - NumMessages: 27, + NumMessages: 28, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/apis/management/v1/management.proto b/pkg/apis/management/v1/management.proto index d32bc0d558..3567c5e9b4 100644 --- a/pkg/apis/management/v1/management.proto +++ b/pkg/apis/management/v1/management.proto @@ -4,6 +4,7 @@ package management; import "github.com/rancher/opni/pkg/apis/capability/v1/capability.proto"; import "github.com/rancher/opni/pkg/apis/core/v1/core.proto"; +import "github.com/rancher/opni/pkg/apis/control/v1/remote.proto"; import "google/api/annotations.proto"; import "google/api/http.proto"; import "google/protobuf/descriptor.proto"; @@ -205,6 +206,12 @@ service Management { body: "*" }; } + rpc GetAgentLogStream(StreamAgentLogsRequest) returns (stream control.StructuredLogRecord) { + option (google.api.http) = { + get: "/clusters/{id}/logs" + body: "*" + }; + } } message CreateBootstrapTokenRequest { @@ -324,3 +331,8 @@ message DashboardGlobalSettings { google.protobuf.Duration defaultTokenTtl = 2; map defaultTokenLabels = 3; } + +message StreamAgentLogsRequest { + core.Reference agent = 1; + control.LogStreamRequest request = 2; +} diff --git a/pkg/apis/management/v1/management_grpc.pb.go b/pkg/apis/management/v1/management_grpc.pb.go index cc47202157..81cb42ec19 100644 --- a/pkg/apis/management/v1/management_grpc.pb.go +++ b/pkg/apis/management/v1/management_grpc.pb.go @@ -9,6 +9,7 @@ package v1 import ( context "context" v11 "github.com/rancher/opni/pkg/apis/capability/v1" + v12 "github.com/rancher/opni/pkg/apis/control/v1" v1 "github.com/rancher/opni/pkg/apis/core/v1" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -57,6 +58,7 @@ const ( Management_CancelCapabilityUninstall_FullMethodName = "/management.Management/CancelCapabilityUninstall" Management_GetDashboardSettings_FullMethodName = "/management.Management/GetDashboardSettings" Management_UpdateDashboardSettings_FullMethodName = "/management.Management/UpdateDashboardSettings" + Management_GetAgentLogStream_FullMethodName = "/management.Management/GetAgentLogStream" ) // ManagementClient is the client API for Management service. @@ -100,6 +102,7 @@ type ManagementClient interface { CancelCapabilityUninstall(ctx context.Context, in *CapabilityUninstallCancelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) GetDashboardSettings(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DashboardSettings, error) UpdateDashboardSettings(ctx context.Context, in *DashboardSettings, opts ...grpc.CallOption) (*emptypb.Empty, error) + GetAgentLogStream(ctx context.Context, in *StreamAgentLogsRequest, opts ...grpc.CallOption) (Management_GetAgentLogStreamClient, error) } type managementClient struct { @@ -472,6 +475,38 @@ func (c *managementClient) UpdateDashboardSettings(ctx context.Context, in *Dash return out, nil } +func (c *managementClient) GetAgentLogStream(ctx context.Context, in *StreamAgentLogsRequest, opts ...grpc.CallOption) (Management_GetAgentLogStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Management_ServiceDesc.Streams[2], Management_GetAgentLogStream_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &managementGetAgentLogStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Management_GetAgentLogStreamClient interface { + Recv() (*v12.StructuredLogRecord, error) + grpc.ClientStream +} + +type managementGetAgentLogStreamClient struct { + grpc.ClientStream +} + +func (x *managementGetAgentLogStreamClient) Recv() (*v12.StructuredLogRecord, error) { + m := new(v12.StructuredLogRecord) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // ManagementServer is the server API for Management service. // All implementations must embed UnimplementedManagementServer // for forward compatibility @@ -513,6 +548,7 @@ type ManagementServer interface { CancelCapabilityUninstall(context.Context, *CapabilityUninstallCancelRequest) (*emptypb.Empty, error) GetDashboardSettings(context.Context, *emptypb.Empty) (*DashboardSettings, error) UpdateDashboardSettings(context.Context, *DashboardSettings) (*emptypb.Empty, error) + GetAgentLogStream(*StreamAgentLogsRequest, Management_GetAgentLogStreamServer) error mustEmbedUnimplementedManagementServer() } @@ -625,6 +661,9 @@ func (UnimplementedManagementServer) GetDashboardSettings(context.Context, *empt func (UnimplementedManagementServer) UpdateDashboardSettings(context.Context, *DashboardSettings) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateDashboardSettings not implemented") } +func (UnimplementedManagementServer) GetAgentLogStream(*StreamAgentLogsRequest, Management_GetAgentLogStreamServer) error { + return status.Errorf(codes.Unimplemented, "method GetAgentLogStream not implemented") +} func (UnimplementedManagementServer) mustEmbedUnimplementedManagementServer() {} // UnsafeManagementServer may be embedded to opt out of forward compatibility for this service. @@ -1274,6 +1313,27 @@ func _Management_UpdateDashboardSettings_Handler(srv interface{}, ctx context.Co return interceptor(ctx, in, info, handler) } +func _Management_GetAgentLogStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamAgentLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ManagementServer).GetAgentLogStream(m, &managementGetAgentLogStreamServer{stream}) +} + +type Management_GetAgentLogStreamServer interface { + Send(*v12.StructuredLogRecord) error + grpc.ServerStream +} + +type managementGetAgentLogStreamServer struct { + grpc.ServerStream +} + +func (x *managementGetAgentLogStreamServer) Send(m *v12.StructuredLogRecord) error { + return x.ServerStream.SendMsg(m) +} + // Management_ServiceDesc is the grpc.ServiceDesc for Management service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1425,6 +1485,11 @@ var Management_ServiceDesc = grpc.ServiceDesc{ Handler: _Management_WatchClusterHealthStatus_Handler, ServerStreams: true, }, + { + StreamName: "GetAgentLogStream", + Handler: _Management_GetAgentLogStream_Handler, + ServerStreams: true, + }, }, Metadata: "github.com/rancher/opni/pkg/apis/management/v1/management.proto", } diff --git a/pkg/apis/management/v1/validation.go b/pkg/apis/management/v1/validation.go index c6fb9a3229..4b2271ae81 100644 --- a/pkg/apis/management/v1/validation.go +++ b/pkg/apis/management/v1/validation.go @@ -110,3 +110,15 @@ func (r *CapabilityUninstallCancelRequest) Validate() error { } return nil } + +func (r *StreamAgentLogsRequest) Validate() error { + if err := validation.Validate(r.Agent); err != nil { + return err + } + + if err := validation.Validate(r.Request); err != nil { + return err + } + + return nil +} diff --git a/pkg/gateway/delegate.go b/pkg/gateway/delegate.go index cd7c41ef34..ab4c9d88b9 100644 --- a/pkg/gateway/delegate.go +++ b/pkg/gateway/delegate.go @@ -170,3 +170,14 @@ func (d *DelegateServer) Broadcast(ctx context.Context, req *streamv1.BroadcastM return reply, nil } + +func (d *DelegateServer) UseClient(target *corev1.Reference, fn func(cc grpc.ClientConnInterface)) error { + d.mu.RLock() + defer d.mu.RUnlock() + if t, ok := d.activeAgents[target.Id]; ok { + fn(t.ClientConnInterface) + return nil + } else { + return status.Error(codes.NotFound, "target not found") + } +} diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index f14f6286ff..698c383b78 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -57,6 +57,7 @@ type Gateway struct { logger *slog.Logger httpServer *GatewayHTTPServer grpcServer *GatewayGRPCServer + delegate *DelegateServer statusQuerier health.HealthStatusQuerier storageBackend storage.Backend @@ -301,6 +302,7 @@ func NewGateway(ctx context.Context, conf *config.GatewayConfig, pl plugins.Load capBackendStore: capBackendStore, httpServer: httpServer, grpcServer: grpcServer, + delegate: delegate, statusQuerier: monitor, } @@ -359,7 +361,7 @@ func (g *Gateway) CapabilitiesStore() capabilities.BackendStore { return g.capBackendStore } -// Implements management.HealthStatusDataSource +// Implements management.AgentControlDataSource func (g *Gateway) GetClusterHealthStatus(ref *corev1.Reference) (*corev1.HealthStatus, error) { hs := g.statusQuerier.GetHealthStatus(ref.Id) if hs.Health == nil && hs.Status == nil { @@ -368,7 +370,25 @@ func (g *Gateway) GetClusterHealthStatus(ref *corev1.Reference) (*corev1.HealthS return hs, nil } -// Implements management.HealthStatusDataSource +// Implements management.AgentControlDataSource +func (g *Gateway) StreamLogs(ctx context.Context, id *corev1.Reference, req *controlv1.LogStreamRequest) (controlv1.Log_StreamLogsClient, error) { + var stream controlv1.Log_StreamLogsClient + var err error + + g.delegate.UseClient(id, func(cc grpc.ClientConnInterface) { + logsClient := controlv1.NewLogClient(cc) + stream, err = logsClient.StreamLogs(ctx, req) + }) + + if err != nil { + g.logger.Error("failed to fetch remote logs", logger.Err(err)) + return nil, err + } + + return stream, nil +} + +// Implements management.AgentControlDataSource func (g *Gateway) WatchClusterHealthStatus(ctx context.Context) <-chan *corev1.ClusterHealthStatus { return g.statusQuerier.WatchHealthStatus(ctx) } diff --git a/pkg/logger/color_handler.go b/pkg/logger/color_handler.go index 127b8106d0..0cfac8997a 100644 --- a/pkg/logger/color_handler.go +++ b/pkg/logger/color_handler.go @@ -9,6 +9,7 @@ import ( "path/filepath" "runtime" "strconv" + "strings" "sync" "time" "unicode" @@ -35,6 +36,7 @@ const ( ) type colorHandler struct { + slog.Handler level slog.Leveler addSource bool replaceAttr func([]string, slog.Attr) slog.Attr @@ -188,22 +190,22 @@ func (h *colorHandler) appendLevel(buf *buffer, level slog.Level) { switch { case level < slog.LevelInfo: buf.WriteStringIf(h.colorEnabled, ansiBrightMagenta) - buf.WriteString("DEBUG") + buf.WriteString(levelString[0]) appendLevelDelta(buf, level-slog.LevelDebug) buf.WriteStringIf(h.colorEnabled, ansiReset) case level < slog.LevelWarn: buf.WriteStringIf(h.colorEnabled, ansiBrightBlue) - buf.WriteString("INFO") + buf.WriteString(levelString[1]) appendLevelDelta(buf, level-slog.LevelInfo) buf.WriteStringIf(h.colorEnabled, ansiReset) case level < slog.LevelError: buf.WriteStringIf(h.colorEnabled, ansiBrightYellow) - buf.WriteString("WARN") + buf.WriteString(levelString[2]) appendLevelDelta(buf, level-slog.LevelWarn) buf.WriteStringIf(h.colorEnabled, ansiReset) default: buf.WriteStringIf(h.colorEnabled, ansiBrightRed) - buf.WriteString("ERROR") + buf.WriteString(levelString[3]) appendLevelDelta(buf, level-slog.LevelError) buf.WriteStringIf(h.colorEnabled, ansiReset) } @@ -223,7 +225,7 @@ func (h *colorHandler) writeGroups(buf *buffer) { last := len(h.groups) - 1 for i, group := range h.groups { if i == 0 { - if group == pluginGroupPrefix { + if strings.HasPrefix(group, forwardedPluginPrefix) || group == pluginGroupPrefix { buf.WriteStringIf(h.colorEnabled, ansiBrightCyan) } else { buf.WriteStringIf(h.colorEnabled, ansiBrightGreen) diff --git a/pkg/logger/color_handler_test.go b/pkg/logger/color_handler_test.go index 6f913476a3..3e4de0950c 100644 --- a/pkg/logger/color_handler_test.go +++ b/pkg/logger/color_handler_test.go @@ -33,7 +33,12 @@ func Example() { // Run test with "faketime" tag: // // TZ="" go test -tags=faketime -func TestHandler(t *testing.T) { +func TestColorHandler(t *testing.T) { + slog.SetDefault(slog.New(newColorHandler(os.Stderr, &LoggerOptions{ + Level: slog.LevelDebug, + ColorEnabled: true, + }))) + if !faketime.Equal(time.Now()) { t.Skip(`skipping test; run with "-tags=faketime"`) } @@ -98,7 +103,7 @@ func TestHandler(t *testing.T) { F: func(l *slog.Logger) { l.Info("test", "key", "val") }, - Want: `2009 Nov 10 23:00:00 INFO logger/color_handler_test.go:98 test key=val`, + Want: `2009 Nov 10 23:00:00 INFO logger/color_handler_test.go:104 test key=val`, }, { Opts: &LoggerOptions{ @@ -161,7 +166,7 @@ func TestHandler(t *testing.T) { F: func(l *slog.Logger) { l.WithGroup("group").Info("test", "key", "val", "key2", "val2") }, - Want: `2009 Nov 10 23:00:00 INFO group test key=val key2=val2`, + Want: `2009 Nov 10 23:00:00 INFO group test key2=val2`, }, { Opts: &LoggerOptions{ @@ -324,7 +329,7 @@ func TestHandler(t *testing.T) { F: func(l *slog.Logger) { l.Info("test") }, - Want: `2009 Nov 10 23:00:00 INFO logger/color_handler_test.go:324 test`, + Want: `2009 Nov 10 23:00:00 INFO logger/color_handler_test.go:330 test`, }, { F: func(l *slog.Logger) { @@ -387,7 +392,7 @@ func replace(new slog.Value, keys ...string) func([]string, slog.Attr) slog.Attr // // Run e.g.: // -// go test -bench=. -count=10 | benchstat -col /h /dev/stdin +// go test -bench=BenchmarkLogAttrs -count=10 | benchstat -col /h /dev/stdin func BenchmarkLogAttrs(b *testing.B) { handler := []struct { Name string diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index a7d595eebb..aa708b3f1c 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -10,9 +10,15 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/logr/slogr" - "github.com/kralicky/gpkg/sync" + gpkgsync "github.com/kralicky/gpkg/sync" slogmulti "github.com/samber/slog-multi" slogsampling "github.com/samber/slog-sampling" + "github.com/spf13/afero" +) + +const ( + NoRepeatInterval = 3600 * time.Hour // arbitrarily long time to denote one-time sampling + errKey = "err" ) var ( @@ -24,17 +30,19 @@ var ( /_/ Observability + AIOps for Kubernetes ` - DefaultLogLevel = slog.LevelDebug DefaultWriter io.Writer DefaultAddSource = true - pluginGroupPrefix = "plugin" - NoRepeatInterval = 3600 * time.Hour // arbitrarily long time to denote one-time sampling + logFs afero.Fs DefaultTimeFormat = "2006 Jan 02 15:04:05" - errKey = "err" + logSampler = &sampler{} + levelString = []string{"DEBUG", "INFO", "WARN", "ERROR"} + fileDesc gpkgsync.Map[string, *FileWriter] ) -var logSampler = &sampler{} +func init() { + logFs = afero.NewMemMapFs() +} func AsciiLogo() string { return asciiLogo @@ -46,10 +54,11 @@ type LoggerOptions struct { ReplaceAttr func(groups []string, a slog.Attr) slog.Attr Writer io.Writer ColorEnabled bool - Sampling *slogsampling.ThresholdSamplingOption TimeFormat string TotemFormatEnabled bool + Sampling *slogsampling.ThresholdSamplingOption OmitLoggerName bool + FileWriter io.Writer } func ParseLevel(lvl string) slog.Level { @@ -85,6 +94,12 @@ func WithWriter(w io.Writer) LoggerOption { } } +func WithFileWriter(w io.Writer) LoggerOption { + return func(o *LoggerOptions) { + o.FileWriter = w + } +} + func WithColor(color bool) LoggerOption { return func(o *LoggerOptions) { o.ColorEnabled = color @@ -176,9 +191,22 @@ func colorHandlerWithOptions(opts ...LoggerOption) slog.Handler { handler = chain.Handler(handler) } + if options.FileWriter != nil { + logFileHandler := newProtoHandler(options.FileWriter, ConfigureProtoOptions(options)) + // distribute logs to handlers in parallel + return slogmulti.Fanout(handler, logFileHandler) + } + return handler } +func ConfigureProtoOptions(opts *LoggerOptions) *slog.HandlerOptions { + return &slog.HandlerOptions{ + Level: opts.Level, + AddSource: opts.AddSource, + } +} + func New(opts ...LoggerOption) *slog.Logger { return slog.New(colorHandlerWithOptions(opts...)) } @@ -191,12 +219,8 @@ func NewNop() *slog.Logger { return slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})) } -func NewPluginLogger(opts ...LoggerOption) *slog.Logger { - return New(opts...).WithGroup(pluginGroupPrefix) -} - type sampler struct { - dropped sync.Map[string, uint64] + dropped gpkgsync.Map[string, uint64] } func (s *sampler) onDroppedHook(_ context.Context, r slog.Record) { diff --git a/pkg/logger/plugin_logger.go b/pkg/logger/plugin_logger.go new file mode 100644 index 0000000000..c69dc17d88 --- /dev/null +++ b/pkg/logger/plugin_logger.go @@ -0,0 +1,262 @@ +package logger + +import ( + "context" + "fmt" + "io" + "log/slog" + "os" + "path" + "sync" + "testing" + + controlv1 "github.com/rancher/opni/pkg/apis/control/v1" + "github.com/rancher/opni/pkg/plugins/meta" + "github.com/spf13/afero" + "google.golang.org/protobuf/proto" +) + +const ( + pluginGroupPrefix = "plugin" + forwardedPluginPrefix = "plugin." + pluginLoggerKey pluginLoggerKeyType = "plugin_logger" + pluginModeKey pluginModeKeyType = "plugin_logger_mode" + pluginAgentKey pluginAgentKeyType = "plugin_logger_agent" +) + +type ( + pluginLoggerKeyType string + pluginModeKeyType string + pluginAgentKeyType string +) + +func NewPluginLogger(ctx context.Context, opts ...LoggerOption) *slog.Logger { + options := &LoggerOptions{ + Level: DefaultLogLevel, + AddSource: true, + } + options.apply(opts...) + + if options.Writer == nil { + options.Writer = newPluginWriter(ctx) + } + + return slog.New(newProtoHandler(options.Writer, ConfigureProtoOptions(options))).WithGroup(pluginGroupPrefix) +} + +func WithPluginLogger(ctx context.Context, lg *slog.Logger) context.Context { + return context.WithValue(ctx, pluginLoggerKey, lg) +} + +func PluginLoggerFromContext(ctx context.Context) *slog.Logger { + logger := ctx.Value(pluginLoggerKey) + if logger == nil { + return NewPluginLogger(ctx) + } + return logger.(*slog.Logger) +} + +func ReadFile(filename string) afero.File { + f, err := logFs.OpenFile(filename, os.O_CREATE|os.O_RDONLY, 0666) + if err != nil { + panic(err) + } + return f +} + +func WriteOnlyFile(filename string) *FileWriter { + fileWriter, ok := fileDesc.Load(filename) + if ok { + return fileWriter + } + + newFile, err := logFs.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + panic(err) + } + newFileWriter := newFileWriter(newFile) + fileDesc.Store(filename, newFileWriter) + + return newFileWriter +} + +func GetLogFileName(agentId string) string { + return fmt.Sprintf("plugin_%s_%s", meta.ModeAgent, agentId) +} + +func WithMode(ctx context.Context, mode meta.PluginMode) context.Context { + return context.WithValue(ctx, pluginModeKey, mode) +} + +func WithAgentId(ctx context.Context, agentId string) context.Context { + return context.WithValue(ctx, pluginAgentKey, agentId) +} + +// writer used for agent loggers and plugin loggers +type RemotePluginWriter struct { + textWriter *slog.Logger + fileWriter *FileWriter + protoWriter io.Writer +} + +func newPluginWriter(ctx context.Context) *RemotePluginWriter { + if isInProcessPluginLogger() { + mode := getMode(ctx) + if mode == meta.ModeAgent { + return NewPluginFileWriter(ctx) + } else { + return newTestGatewayPluginWriter() + } + } else { + return newSubprocPluginWriter() + } +} + +func NewPluginFileWriter(ctx context.Context) *RemotePluginWriter { + return &RemotePluginWriter{ + textWriter: New(WithWriter(os.Stderr), WithDisableCaller()), + fileWriter: WriteOnlyFile(GetLogFileName(getAgentId(ctx))), + protoWriter: io.Discard, + } +} + +func newTestGatewayPluginWriter() *RemotePluginWriter { + return &RemotePluginWriter{ + textWriter: New(WithWriter(os.Stderr), WithDisableCaller()), + fileWriter: newFileWriter(nil), + protoWriter: io.Discard, + } +} + +func newSubprocPluginWriter() *RemotePluginWriter { + return &RemotePluginWriter{ + textWriter: New(WithWriter(io.Discard), WithDisableCaller()), + fileWriter: newFileWriter(nil), + protoWriter: os.Stderr, + } +} + +func (w *RemotePluginWriter) Write(b []byte) (int, error) { + if w.fileWriter == nil || w.textWriter == nil { + return 0, nil + } + + n, err := w.writeProtoToText(b) + if err != nil { + // not a proto message. log as is + w.textWriter.Info(string(b)) + return n, nil + } + + n, err = w.fileWriter.Write(b) + if err != nil { + return n, err + } + n, err = w.protoWriter.Write(b) + if err != nil { + return n, err + } + return n, err +} + +func CloseLogStreaming(agentId string) { + fileWriter, ok := fileDesc.Load(GetLogFileName(agentId)) + if ok { + fileWriter.file.Close() + } +} + +func (w *RemotePluginWriter) writeProtoToText(b []byte) (int, error) { + unsafeN := len(b) + record := &controlv1.StructuredLogRecord{} + + if unsafeN < 4 { + return 0, io.ErrUnexpectedEOF + } + + size := uint32(b[0]) | + uint32(b[1])<<8 | + uint32(b[2])<<16 | + uint32(b[3])<<24 + + invalidHeader := size > 65535 + if invalidHeader { + return 0, io.ErrUnexpectedEOF + } + + if err := proto.Unmarshal(b[4:size+4], record); err != nil { + w.textWriter.Error("malformed plugin log", "log", b) + return 0, err + } + + lg := w.textWriter.WithGroup(record.GetName()) + + attrs := []any{slog.SourceKey, record.GetSource()} + for _, attr := range record.GetAttributes() { + attrs = append(attrs, attr.Key, attr.Value) + } + + switch record.GetLevel() { + case levelString[0]: + lg.Debug(record.Message, attrs...) + case levelString[1]: + lg.Info(record.Message, attrs...) + case levelString[2]: + lg.Warn(record.Message, attrs...) + case levelString[3]: + lg.Error(record.Message, attrs...) + default: + lg.Debug(record.Message, attrs...) + } + + return int(size), nil +} + +// stores agent and agent plugin logs, retrieved with debug cli +type FileWriter struct { + file afero.File + mu *sync.RWMutex +} + +func newFileWriter(f afero.File) *FileWriter { + return &FileWriter{ + file: f, + mu: &sync.RWMutex{}, + } +} + +func (f *FileWriter) Write(b []byte) (int, error) { + if f.file == nil { + return 0, nil + } + f.mu.Lock() + defer f.mu.Unlock() + return f.file.Write(b) +} + +func getAgentId(ctx context.Context) string { + id := ctx.Value(pluginAgentKey) + if id != nil { + return id.(string) + } + + return "" +} + +func isInProcessPluginLogger() bool { + return getModuleBasename() == "testenv" || testing.Testing() +} + +func getModuleBasename() string { + md := meta.ReadMetadata() + return path.Base(md.Module) +} + +func getMode(ctx context.Context) meta.PluginMode { + mode := ctx.Value(pluginModeKey) + + if mode != nil { + return mode.(meta.PluginMode) + } + return meta.ModeGateway +} diff --git a/pkg/logger/proto_handler.go b/pkg/logger/proto_handler.go new file mode 100644 index 0000000000..425f069c57 --- /dev/null +++ b/pkg/logger/proto_handler.go @@ -0,0 +1,283 @@ +package logger + +import ( + "context" + "encoding" + "fmt" + "io" + "log/slog" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + controlv1 "github.com/rancher/opni/pkg/apis/control/v1" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// protoHandler outputs a size-prefixed []byte for every log message +const atomicChunkSize = 4096 +const headerLen = 4 + +type protoHandler struct { + slog.Handler + level slog.Leveler + addSource bool + replaceAttr func([]string, slog.Attr) slog.Attr + attrs []*controlv1.Attr // attrs started from With + groups []string // all groups started from WithGroup + groupPrefix string // groups started from Group + mu sync.Mutex + w io.Writer +} + +func newProtoHandler(w io.Writer, opts *slog.HandlerOptions) *protoHandler { + if opts.Level == nil { + opts.Level = DefaultLogLevel + } + + return &protoHandler{ + w: w, + level: opts.Level, + addSource: opts.AddSource, + replaceAttr: opts.ReplaceAttr, + } +} + +func (h *protoHandler) Enabled(_ context.Context, level slog.Level) bool { + return level >= h.level.Level() +} + +func (h *protoHandler) clone() *protoHandler { + return &protoHandler{ + level: h.level, + addSource: h.addSource, + replaceAttr: h.replaceAttr, + attrs: h.attrs, + groups: h.groups, + groupPrefix: h.groupPrefix, + w: h.w, + } +} + +func (h *protoHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + h2 := h.clone() + h2.groupPrefix += name + "." + h2.groups = append(h2.groups, name) + return h2 +} + +func (h *protoHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if len(attrs) == 0 { + return h + } + h2 := h.clone() + + for _, attr := range attrs { + if h.replaceAttr != nil { + attr = h.replaceAttr(h.groups, attr) + } + h2.attrs = h2.appendAttr(h.attrs, attr) + } + return h2 +} + +func (h *protoHandler) Handle(_ context.Context, r slog.Record) error { + // get a buffer from the sync pool + buf := newBuffer() + defer buf.Free() + + replace := h.replaceAttr + + // write time + var timestamp *timestamppb.Timestamp + if replace == nil { + timestamp = timestamppb.New(r.Time) + } else if a := replace(nil /* groups */, slog.Time(slog.TimeKey, r.Time)); a.Key != "" { + if t, ok := a.Value.Any().(*timestamppb.Timestamp); ok { + timestamp = t + } else if t, ok := a.Value.Any().(time.Time); ok { + timestamp = timestamppb.New(t) + } else { + timestamp = timestamppb.Now() // overwrites invalid timestamps created from replaceAttr + } + } + + // write level + var lvl string + if replace == nil { + lvl = r.Level.String() + } else if a := replace(nil /* groups */, slog.Any(slog.LevelKey, r.Level)); a.Key != "" { + lvl = h.toString(a.Value) + } + + // write logger name + var name strings.Builder + last := len(h.groups) - 1 + for i, group := range h.groups { + name.WriteString(group) + if i < last { + name.WriteString(".") + } + } + + // write source + var src strings.Builder + if h.addSource { + fs := runtime.CallersFrames([]uintptr{r.PC}) + f, _ := fs.Next() + if f.File != "" { + if h.replaceAttr == nil { + h.appendSource(&src, f.File, f.Line) + } else if a := h.replaceAttr(nil /* groups */, slog.Any(slog.SourceKey, &slog.Source{ + Function: f.Function, + File: f.File, + Line: f.Line, + })); a.Key != "" { + src.WriteString(h.toString(a.Value)) + } + } + } + + // write message + var msg string + if replace == nil { + msg = r.Message + } else if a := replace(nil /* groups */, slog.String(slog.MessageKey, r.Message)); a.Key != "" { + msg = a.Value.String() + } + + var attrs []*controlv1.Attr + // write handler attributes + if h.attrs != nil { + attrs = h.attrs + } + + // write attributes + r.Attrs(func(attr slog.Attr) bool { + if h.replaceAttr != nil { + attr = h.replaceAttr(h.groups, attr) + } + attrs = h.appendAttr(attrs, attr) + return true + }) + + structuredLogRecord := &controlv1.StructuredLogRecord{ + Time: timestamp, + Message: msg, + Name: name.String(), + Source: src.String(), + Level: lvl, + Attributes: attrs, + } + + bytes, err := proto.Marshal(structuredLogRecord) + if err != nil { + return err + } + + // prefix each record with its size + size := len(bytes) + sizeBuf := make([]byte, 4) + sizeBuf[0] = byte(size) + sizeBuf[1] = byte(size >> 8) + sizeBuf[2] = byte(size >> 16) + sizeBuf[3] = byte(size >> 24) + + totalSize := size + headerLen + + // write the record + buf.Write(sizeBuf) + buf.Write(bytes) + + h.mu.Lock() + defer h.mu.Unlock() + + for i := 0; i < totalSize; i += atomicChunkSize { + end := i + atomicChunkSize + + if end > totalSize { + end = totalSize + } + _, err = h.w.Write((*buf)[i:end]) + if err != nil { + return err + } + } + return nil +} + +func (h *protoHandler) appendSource(src *strings.Builder, file string, line int) { + dir, file := filepath.Split(file) + + src.WriteString(filepath.Base(dir)) + src.WriteByte('/') + src.WriteString(file) + src.WriteByte(':') + src.WriteString(strconv.Itoa(line)) +} + +func (h *protoHandler) appendAttr(attrs []*controlv1.Attr, attr slog.Attr) []*controlv1.Attr { + if attr.Equal(slog.Attr{}) { + return attrs + } + + attr.Value = attr.Value.Resolve() + + if attr.Value.Kind() == slog.KindGroup { + for _, groupAttr := range attr.Value.Group() { + attrs = h.appendAttr(attrs, groupAttr) + } + return attrs + } + + protoAttr := &controlv1.Attr{ + Key: attr.Key, + Value: h.toString(attr.Value), + } + + return append(attrs, protoAttr) +} + +func (h *protoHandler) toString(v slog.Value) string { + switch v.Kind() { + case slog.KindString: + return v.String() + case slog.KindInt64: + return strconv.FormatInt(v.Int64(), 10) + case slog.KindUint64: + return strconv.FormatUint(v.Uint64(), 10) + case slog.KindFloat64: + return strconv.FormatFloat(v.Float64(), 'g', -1, 64) + case slog.KindBool: + return fmt.Sprintf("%v", v.Bool()) + case slog.KindDuration: + return v.Duration().String() + case slog.KindTime: + return v.Time().String() + case slog.KindAny: + switch cv := v.Any().(type) { + case slog.Level: + return cv.Level().String() + case encoding.TextMarshaler: + data, err := cv.MarshalText() + if err != nil { + break + } + return string(data) + case *slog.Source: + var src strings.Builder + h.appendSource(&src, cv.File, cv.Line) + return src.String() + default: + return fmt.Sprint(v.Any()) + } + } + return "BADVALUE" +} diff --git a/pkg/logger/proto_handler_test.go b/pkg/logger/proto_handler_test.go new file mode 100644 index 0000000000..e3a8887f59 --- /dev/null +++ b/pkg/logger/proto_handler_test.go @@ -0,0 +1,667 @@ +package logger + +import ( + "bytes" + "context" + "errors" + "io" + "log/slog" + "os" + "strconv" + "testing" + "time" + + controlv1 "github.com/rancher/opni/pkg/apis/control/v1" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// modified from https://github.com/lmittmann/tint to match protoHandler's expected behaviour + +// Run test with "faketime" tag: +// +// TZ="" go test -tags=faketime +func TestProtoHandler(t *testing.T) { + slog.SetDefault(slog.New(newProtoHandler(os.Stderr, &slog.HandlerOptions{ + Level: slog.LevelDebug, + }))) + + if !faketime.Equal(time.Now()) { + t.Skip(`skipping test; run with "-tags=faketime"`) + } + + tests := []struct { + Opts *slog.HandlerOptions + F func(l *slog.Logger) + Want *controlv1.StructuredLogRecord + }{ + { + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Error("test", Err(errors.New("fail"))) + }, + Want: toStructuredLog(faketime, "ERROR", "", "", "test", + []*controlv1.Attr{ + { + Key: "err", + Value: "fail", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Info("test", slog.Group("group", slog.String("key", "val"), Err(errors.New("fail")))) + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + { + Key: "err", + Value: "fail", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.WithGroup("group").Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "group", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.With("key", "val").Info("test", "key2", "val2") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + { + Key: "key2", + Value: "val2", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Info("test", "k e y", "v a l") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "k e y", + Value: "v a l", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.WithGroup("g r o u p").Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "g r o u p", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Info("test", "slice", []string{"a", "b", "c"}, "map", map[string]int{"a": 1, "b": 2, "c": 3}) + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "slice", + Value: "[a b c]", + }, + { + Key: "map", + Value: "map[a:1 b:2 c:3]", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + AddSource: true, + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "", "logger/proto_handler_test.go:151", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop(slog.TimeKey), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(time.Time{}, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop(slog.LevelKey), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop(slog.MessageKey), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop(slog.TimeKey, slog.LevelKey, slog.MessageKey), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(time.Time{}, "", "", "", "", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop("key"), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{}), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop("key"), + }, + F: func(l *slog.Logger) { + l.WithGroup("group").Info("test", "key", "val", "key2", "val2") + }, + Want: toStructuredLog(faketime, "INFO", "group", "", "test", + []*controlv1.Attr{ + { + Key: "key2", + Value: "val2", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == "key" && len(groups) == 1 && groups[0] == "group" { + return slog.Attr{} + } + return a + }, + }, + F: func(l *slog.Logger) { + l.WithGroup("group").Info("test", "key", "val", "key2", "val2") + }, + Want: toStructuredLog(faketime, "INFO", "group", "", "test", + []*controlv1.Attr{ + { + Key: "key2", + Value: "val2", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: replace(slog.StringValue("INF"), slog.LevelKey), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INF", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: replace(slog.IntValue(42), slog.MessageKey), + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "42", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: replace(slog.IntValue(42), "key"), + }, + F: func(l *slog.Logger) { + l.With("key", "val").Info("test", "key2", "val2") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "42", + }, + { + Key: "key2", + Value: "val2", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + return slog.Attr{} + }, + }, + F: func(l *slog.Logger) { + l.Info("test", "key", "val") + }, + Want: toStructuredLog(time.Time{}, "", "", "", "", + []*controlv1.Attr{}), + }, + { + F: func(l *slog.Logger) { + l.Info("test", "key", "") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "key", + Value: "", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Info("test", "", "val") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "", + Value: "val", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Info("test", "", "") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "", + Value: "", + }, + }), + }, + + { + F: func(l *slog.Logger) { + l.Log(context.TODO(), slog.LevelInfo+1, "test") + }, + Want: toStructuredLog(faketime, "INFO+1", "", "", "test", + []*controlv1.Attr{}), + }, + { + Opts: &slog.HandlerOptions{ + Level: slog.LevelDebug - 1, + }, + F: func(l *slog.Logger) { + l.Log(context.TODO(), slog.LevelDebug-1, "test") + }, + Want: toStructuredLog(faketime, "DEBUG-1", "", "", "test", + []*controlv1.Attr{}), + }, + { + F: func(l *slog.Logger) { + l.Error("test", slog.Any("ERROR", errors.New("fail"))) + }, + Want: toStructuredLog(faketime, "ERROR", "", "", "test", + []*controlv1.Attr{ + { + Key: "ERROR", + Value: "fail", + }, + }), + }, + { + F: func(l *slog.Logger) { + l.Error("test", Err(nil)) + }, + Want: toStructuredLog(faketime, "ERROR", "", "", "test", + []*controlv1.Attr{ + { + Key: "err", + Value: "", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.TimeKey && len(groups) == 0 { + return slog.Time(slog.TimeKey, a.Value.Time().Add(24*time.Hour)) + } + return a + }, + }, + F: func(l *slog.Logger) { + l.Error("test") + }, + Want: toStructuredLog(faketime.Add(24*time.Hour), "ERROR", "", "", "test", + []*controlv1.Attr{}), + }, + { + F: func(l *slog.Logger) { + l.Info("test", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{ + { + Key: "a", + Value: "b", + }, + { + Key: "c", + Value: "d", + }, + { + Key: "e", + Value: "f", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: drop(slog.TimeKey, slog.LevelKey, slog.MessageKey, slog.SourceKey), + AddSource: true, + }, + F: func(l *slog.Logger) { + l.WithGroup("group").Info("test", "key", "val") + }, + Want: toStructuredLog(time.Time{}, "", "group", "", "", + []*controlv1.Attr{ + { + Key: "key", + Value: "val", + }, + }), + }, + { + Opts: &slog.HandlerOptions{ + ReplaceAttr: func(g []string, a slog.Attr) slog.Attr { + if len(g) == 0 && a.Key == slog.LevelKey { + _ = a.Value.Any().(slog.Level) + } + return a + }, + }, + F: func(l *slog.Logger) { + l.Info("test") + }, + Want: toStructuredLog(faketime, "INFO", "", "", "test", + []*controlv1.Attr{}), + }, + { + Opts: &slog.HandlerOptions{ + AddSource: true, + ReplaceAttr: func(g []string, a slog.Attr) slog.Attr { + return a + }, + }, + F: func(l *slog.Logger) { + l.Info("test") + }, + Want: toStructuredLog(faketime, "INFO", "", "logger/proto_handler_test.go:479", "test", + []*controlv1.Attr{}), + }, + { + F: func(l *slog.Logger) { + l = l.WithGroup("group") + l.Error("test", Err(errTest)) + }, + Want: toStructuredLog(faketime, "ERROR", "group", "", "test", + []*controlv1.Attr{ + { + Key: "err", + Value: "fail", + }, + }), + }, + } + + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + var buf bytes.Buffer + if test.Opts == nil { + test.Opts = &slog.HandlerOptions{ + Level: slog.LevelDebug, + } + } + + l := slog.New(newProtoHandler(&buf, test.Opts)) + test.F(l) + + sizeBytes := buf.Next(4) + size := int(sizeBytes[0]) | + int(sizeBytes[1])<<8 | + int(sizeBytes[2])<<16 | + int(sizeBytes[3])<<24 + recordBytes := buf.Next(size) + got := &controlv1.StructuredLogRecord{} + + if err := proto.Unmarshal(recordBytes, got); err != nil { + t.Errorf("failed to unmarshal log record bytes: %s", err.Error()) + } + + if !proto.Equal(test.Want, got) { + t.Fatalf("(-want +got)\n- %s\n+ %s", test.Want, got) + } + }) + } +} + +func toStructuredLog(t time.Time, level string, name string, src string, msg string, attrs []*controlv1.Attr) *controlv1.StructuredLogRecord { + timestamppb := timestamppb.New(t) + if t == (time.Time{}) { + timestamppb = nil + } + return &controlv1.StructuredLogRecord{ + Time: timestamppb, + Message: msg, + Name: name, + Source: src, + Level: level, + Attributes: attrs, + } +} + +// See https://github.com/golang/exp/blob/master/slog/benchmarks/benchmarks_test.go#L25 +// +// Run e.g.: +// +// go test -bench=BenchmarkProtoLogAttrs -count=10 | benchstat -col /h /dev/stdin +func BenchmarkProtoLogAttrs(b *testing.B) { + handler := []struct { + Name string + H slog.Handler + }{ + {"protoHandler", newProtoHandler(io.Discard, &slog.HandlerOptions{AddSource: false})}, // slog handlers omit source by default + {"text", slog.NewTextHandler(io.Discard, nil)}, + {"json", slog.NewJSONHandler(io.Discard, nil)}, + {"discard", new(discarder)}, + } + + benchmarks := []struct { + Name string + F func(*slog.Logger) + }{ + { + "5 args", + func(logger *slog.Logger) { + logger.LogAttrs(context.TODO(), slog.LevelInfo, testMessage, + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest)) + }, + }, + { + "5 args custom level", + func(logger *slog.Logger) { + logger.LogAttrs(context.TODO(), slog.LevelInfo+1, testMessage, + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + ) + }, + }, + { + "10 args", + func(logger *slog.Logger) { + logger.LogAttrs(context.TODO(), slog.LevelInfo, testMessage, + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + ) + }, + }, + { + "40 args", + func(logger *slog.Logger) { + logger.LogAttrs(context.TODO(), slog.LevelInfo, testMessage, + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + slog.String("string", testString), + slog.Int("status", testInt), + slog.Duration("duration", testDuration), + slog.Time("time", testTime), + slog.Any("ERROR", errTest), + ) + }, + }, + } + + for _, h := range handler { + b.Run("h="+h.Name, func(b *testing.B) { + for _, bench := range benchmarks { + b.Run(bench.Name, func(b *testing.B) { + b.ReportAllocs() + logger := slog.New(h.H) + for i := 0; i < b.N; i++ { + bench.F(logger) + } + }) + } + }) + } +} diff --git a/pkg/logger/remotelogs/server.go b/pkg/logger/remotelogs/server.go new file mode 100644 index 0000000000..bfba941e70 --- /dev/null +++ b/pkg/logger/remotelogs/server.go @@ -0,0 +1,142 @@ +package remotelogs + +import ( + "io" + "log/slog" + "regexp" + "sync" + "time" + + controlv1 "github.com/rancher/opni/pkg/apis/control/v1" + "github.com/rancher/opni/pkg/auth/cluster" + "github.com/rancher/opni/pkg/logger" + "github.com/spf13/afero" + "google.golang.org/protobuf/proto" +) + +type LogServer struct { + controlv1.UnsafeLogServer + LogServerOptions + logger *slog.Logger + + clientsMu sync.RWMutex + clients map[string]controlv1.LogClient +} + +type LogServerOptions struct{} + +type LogServerOption func(*LogServerOptions) + +func (o *LogServerOptions) apply(opts ...LogServerOption) { + for _, op := range opts { + op(o) + } +} + +func NewLogServer(opts ...LogServerOption) *LogServer { + options := &LogServerOptions{} + options.apply(opts...) + + return &LogServer{ + clients: make(map[string]controlv1.LogClient), + logger: logger.New().WithGroup("agent-log-server"), + } +} + +func (ls *LogServer) AddClient(name string, client controlv1.LogClient) { + ls.clientsMu.Lock() + defer ls.clientsMu.Unlock() + ls.clients[name] = client +} + +func (ls *LogServer) RemoveClient(name string) { + ls.clientsMu.Lock() + defer ls.clientsMu.Unlock() + delete(ls.clients, name) +} + +func (ls *LogServer) StreamLogs(req *controlv1.LogStreamRequest, server controlv1.Log_StreamLogsServer) error { + since := req.Since.AsTime() + until := req.Until.AsTime() + minLevel := req.Filters.Level + nameFilters := req.Filters.NamePattern + follow := req.Follow + + filename := logger.GetLogFileName(cluster.StreamAuthorizedID(server.Context())) + f := logger.ReadFile(filename) + defer f.Close() + + for { + msg, err := ls.getLogMessage(f) + + done := err == io.EOF || err == io.ErrUnexpectedEOF + keepFollowing := done && follow + if keepFollowing { + time.Sleep(time.Second) + continue + } else if done { + return nil + } + if err != nil { + ls.logger.Warn("malformed log record, skipping", logger.Err(err)) + continue + } + + if minLevel != nil && logger.ParseLevel(msg.Level) < slog.Level(*minLevel) { + continue + } + + time := msg.Time.AsTime() + if time.Before(since) { + continue + } + if !follow && time.After(until) { + continue + } + + if nameFilters != nil && !matchesNameFilter(nameFilters, msg.Name) { + continue + } + + err = server.Send(msg) + if err != nil { + return err + } + } +} + +func (ls *LogServer) getLogMessage(f afero.File) (*controlv1.StructuredLogRecord, error) { + sizeBuf := make([]byte, 4) + record := &controlv1.StructuredLogRecord{} + _, err := io.ReadFull(f, sizeBuf) + if err != nil { + return nil, err + } + + size := int32(sizeBuf[0]) | + int32(sizeBuf[1])<<8 | + int32(sizeBuf[2])<<16 | + int32(sizeBuf[3])<<24 + + recordBytes := make([]byte, size) + _, err = io.ReadFull(f, recordBytes) + if err != nil { + return nil, err + } + + if err := proto.Unmarshal(recordBytes, record); err != nil { + return nil, err + } + + return record, nil +} + +func matchesNameFilter(patterns []string, name string) bool { + for _, pattern := range patterns { + matched, _ := regexp.MatchString(pattern, name) + if matched { + return true + } + } + return false +} diff --git a/pkg/management/health.go b/pkg/management/health.go index 8a4f041072..e66c81b08b 100644 --- a/pkg/management/health.go +++ b/pkg/management/health.go @@ -15,25 +15,25 @@ func (m *Server) GetClusterHealthStatus( _ context.Context, ref *corev1.Reference, ) (*corev1.HealthStatus, error) { - if m.healthStatusDataSource == nil { + if m.agentControlDataSource == nil { return nil, status.Error(codes.Unavailable, "health API not configured") } if err := validation.Validate(ref); err != nil { return nil, err } - return m.healthStatusDataSource.GetClusterHealthStatus(ref) + return m.agentControlDataSource.GetClusterHealthStatus(ref) } func (m *Server) WatchClusterHealthStatus( _ *emptypb.Empty, stream managementv1.Management_WatchClusterHealthStatusServer, ) error { - if m.healthStatusDataSource == nil { + if m.agentControlDataSource == nil { return status.Error(codes.Unavailable, "health API not configured") } - healthStatusC := m.healthStatusDataSource.WatchClusterHealthStatus(stream.Context()) + healthStatusC := m.agentControlDataSource.WatchClusterHealthStatus(stream.Context()) for { select { case <-stream.Context().Done(): diff --git a/pkg/management/log.go b/pkg/management/log.go new file mode 100644 index 0000000000..517cb32bd5 --- /dev/null +++ b/pkg/management/log.go @@ -0,0 +1,46 @@ +package management + +import ( + "io" + + managementv1 "github.com/rancher/opni/pkg/apis/management/v1" + "github.com/rancher/opni/pkg/logger" + "github.com/rancher/opni/pkg/validation" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (m *Server) GetAgentLogStream(req *managementv1.StreamAgentLogsRequest, server managementv1.Management_GetAgentLogStreamServer) error { + if m.agentControlDataSource == nil { + return status.Error(codes.Unavailable, "agent control API not configured") + } + + if err := validation.Validate(req); err != nil { + return err + } + + logStream, err := m.agentControlDataSource.StreamLogs(server.Context(), req.Agent, req.Request) + if err != nil || logStream == nil { + m.logger.Error("error streaming agent logs", logger.Err(err)) + return err + } + + for { + log, err := logStream.Recv() + done := err == io.EOF + keepFollowing := done && req.Request.Follow + if keepFollowing { + continue + } else if done { + return nil + } else if err != nil { + return err + } + + err = server.Send(log) + if err != nil { + return err + } + } + +} diff --git a/pkg/management/server.go b/pkg/management/server.go index 6b630ccc03..36cb40ab77 100644 --- a/pkg/management/server.go +++ b/pkg/management/server.go @@ -15,6 +15,7 @@ import ( "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/jhump/protoreflect/desc" + controlv1 "github.com/rancher/opni/pkg/apis/control/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/caching" @@ -54,9 +55,10 @@ type CapabilitiesDataSource interface { CapabilitiesStore() capabilities.BackendStore } -type HealthStatusDataSource interface { +type AgentControlDataSource interface { GetClusterHealthStatus(ref *corev1.Reference) (*corev1.HealthStatus, error) WatchClusterHealthStatus(ctx context.Context) <-chan *corev1.ClusterHealthStatus + StreamLogs(ctx context.Context, id *corev1.Reference, req *controlv1.LogStreamRequest) (controlv1.Log_StreamLogsClient, error) } type apiExtension struct { @@ -85,7 +87,7 @@ var _ managementv1.ManagementServer = (*Server)(nil) type managementServerOptions struct { lifecycler config.Lifecycler capabilitiesDataSource CapabilitiesDataSource - healthStatusDataSource HealthStatusDataSource + agentControlDataSource AgentControlDataSource } type ManagementServerOption func(*managementServerOptions) @@ -108,9 +110,9 @@ func WithCapabilitiesDataSource(src CapabilitiesDataSource) ManagementServerOpti } } -func WithHealthStatusDataSource(src HealthStatusDataSource) ManagementServerOption { +func WithAgentControlDataSource(src AgentControlDataSource) ManagementServerOption { return func(o *managementServerOptions) { - o.healthStatusDataSource = src + o.agentControlDataSource = src } } diff --git a/pkg/opni/commands/admin.go b/pkg/opni/commands/admin.go index eb45cbc076..a8766fa5fd 100644 --- a/pkg/opni/commands/admin.go +++ b/pkg/opni/commands/admin.go @@ -313,7 +313,7 @@ func parseTimeOrDie(timeStr string) time.Time { } t, err := when.EN.Parse(timeStr, time.Now()) if err != nil || t == nil { - lg.Error("could not interpret start time") + lg.Error("could not interpret start/end time") os.Exit(1) } return t.Time diff --git a/pkg/opni/commands/debug.go b/pkg/opni/commands/debug.go index deb03386e8..421f3bf4ba 100644 --- a/pkg/opni/commands/debug.go +++ b/pkg/opni/commands/debug.go @@ -14,11 +14,13 @@ import ( "time" channelzcmd "github.com/kazegusuri/channelzcli/cmd" + controlv1 "github.com/rancher/opni/pkg/apis/control/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/clients" "github.com/rancher/opni/pkg/config/v1beta1" "github.com/rancher/opni/pkg/keyring" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/machinery" "github.com/rancher/opni/pkg/opni/cliutil" "github.com/rancher/opni/pkg/storage" @@ -27,8 +29,10 @@ import ( "go.etcd.io/etcd/etcdctl/v3/ctlv3" channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" "sigs.k8s.io/yaml" ) @@ -43,6 +47,7 @@ func BuildDebugCmd() *cobra.Command { debugCmd.AddCommand(BuildDebugChannelzCmd()) debugCmd.AddCommand(BuildDebugDashboardSettingsCmd()) debugCmd.AddCommand(BuildDebugImportAgentCmd()) + debugCmd.AddCommand(BuildDebugAgentLogStreamGetCmd()) ConfigureManagementCommand(debugCmd) return debugCmd } @@ -462,6 +467,91 @@ func BuildDebugImportAgentCmd() *cobra.Command { return cmd } +func BuildDebugAgentLogStreamGetCmd() *cobra.Command { + var since, until, level, output string + var follow bool + var names []string + cmd := &cobra.Command{ + Use: "agent-logs ", + Short: "Get agent logs", + Args: cobra.ExactArgs(1), + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return completeClusters(cmd, args, toComplete) + }, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + cl, err := mgmtClient.ListClusters(cmd.Context(), &managementv1.ListClustersRequest{}) + if err != nil { + lg.Error("fatal", err) + os.Exit(1) + } + for _, c := range cl.Items { + args = append(args, c.Id) + } + } + streamRequest := &managementv1.StreamAgentLogsRequest{ + Agent: &corev1.Reference{ + Id: args[0], + }, + Request: &controlv1.LogStreamRequest{ + Filters: &controlv1.LogStreamFilters{ + NamePattern: names, + Level: lo.ToPtr(int32(logger.ParseLevel(level))), + }, + Follow: follow, + }, + } + + startTime := parseTimeOrDie(since) + endTime := parseTimeOrDie(until) + streamRequest.Request.Since = timestamppb.New(startTime) + streamRequest.Request.Until = timestamppb.New(endTime) + + stream, err := mgmtClient.GetAgentLogStream(cmd.Context(), streamRequest) + if err != nil { + return err + } + for { + log, err := stream.Recv() + if err != nil { + return err + } + + done := (proto.Equal(log, &controlv1.StructuredLogRecord{})) + keepFollowing := done && follow + if keepFollowing { + continue + } else if done { + return nil + } + + var attrs strings.Builder + for _, attr := range log.Attributes { + attrs.WriteString(attr.Key) + attrs.WriteString("=") + attrs.WriteString(attr.Value) + attrs.WriteString(" ") + } + fmt.Println(log.Time.AsTime().Format(logger.DefaultTimeFormat), log.Level, log.Name, log.Source, log.Message, attrs.String()) + } + }, + } + + cmd.Flags().StringVar(&since, "since", "1 day ago", "Start time") + cmd.Flags().StringVar(&until, "until", "now", "End time") + cmd.Flags().StringSliceVar(&names, "name", nil, "Pattern filter(s) by component name") + cmd.Flags().StringVar(&level, "level", "info", "Minimum log level severity (debug, info, warn, error)") + cmd.Flags().StringVar(&output, "output", "text", "Output format") + cmd.Flags().BoolVar(&follow, "follow", false, "Follow logs") + cmd.RegisterFlagCompletionFunc("level", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"debug", "info", "warn", "error"}, cobra.ShellCompDirectiveDefault + }) + cmd.RegisterFlagCompletionFunc("output", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"text", "json"}, cobra.ShellCompDirectiveDefault + }) + return cmd +} + func init() { AddCommandsToGroup(Debug, BuildDebugCmd()) } diff --git a/pkg/opni/commands/gateway.go b/pkg/opni/commands/gateway.go index 0210a871b5..15a8355291 100644 --- a/pkg/opni/commands/gateway.go +++ b/pkg/opni/commands/gateway.go @@ -22,6 +22,7 @@ import ( "github.com/rancher/opni/pkg/opni/cliutil" "github.com/rancher/opni/pkg/plugins" "github.com/rancher/opni/pkg/plugins/hooks" + "github.com/rancher/opni/pkg/plugins/meta" "github.com/rancher/opni/pkg/tracing" "github.com/rancher/opni/pkg/update/noop" "github.com/samber/lo" @@ -99,7 +100,7 @@ func BuildGatewayCmd() *cobra.Command { m := management.NewServer(ctx, &gatewayConfig.Spec.Management, g, pluginLoader, management.WithCapabilitiesDataSource(g), - management.WithHealthStatusDataSource(g), + management.WithAgentControlDataSource(g), management.WithLifecycler(lifecycler), ) @@ -110,6 +111,7 @@ func BuildGatewayCmd() *cobra.Command { lg.Info(fmt.Sprintf("loaded %d plugins", numLoaded)) close(doneLoadingPlugins) })) + ctx = logger.WithMode(ctx, meta.ModeGateway) pluginLoader.LoadPlugins(ctx, gatewayConfig.Spec.Plugins.Dir, plugins.GatewayScheme) select { case <-doneLoadingPlugins: diff --git a/pkg/plugins/apis/apiextensions/stream/plugin.go b/pkg/plugins/apis/apiextensions/stream/plugin.go index e6c35aa8d8..ee61acc5f8 100644 --- a/pkg/plugins/apis/apiextensions/stream/plugin.go +++ b/pkg/plugins/apis/apiextensions/stream/plugin.go @@ -76,6 +76,6 @@ func (p *streamApiExtensionPlugin[T]) GRPCClient( } func init() { - plugins.GatewayScheme.Add(StreamAPIExtensionPluginID, NewGatewayPlugin(nil)) - plugins.AgentScheme.Add(StreamAPIExtensionPluginID, NewAgentPlugin(nil)) + plugins.GatewayScheme.Add(StreamAPIExtensionPluginID, NewGatewayPlugin(context.TODO(), nil)) + plugins.AgentScheme.Add(StreamAPIExtensionPluginID, NewAgentPlugin(context.TODO(), nil)) } diff --git a/pkg/plugins/apis/apiextensions/stream/plugin_agent.go b/pkg/plugins/apis/apiextensions/stream/plugin_agent.go index ca1a214484..ef7194963a 100644 --- a/pkg/plugins/apis/apiextensions/stream/plugin_agent.go +++ b/pkg/plugins/apis/apiextensions/stream/plugin_agent.go @@ -11,8 +11,6 @@ import ( "sync" "time" - "log/slog" - "github.com/google/uuid" "github.com/hashicorp/go-plugin" "github.com/jhump/protoreflect/grpcreflect" @@ -40,7 +38,7 @@ var ( discoveryTimeout = atomic.NewDuration(10 * time.Second) ) -func NewAgentPlugin(p StreamAPIExtension) plugin.Plugin { +func NewAgentPlugin(ctx context.Context, p StreamAPIExtension) plugin.Plugin { pc, _, _, ok := runtime.Caller(1) fn := runtime.FuncForPC(pc) name := "unknown" @@ -50,9 +48,12 @@ func NewAgentPlugin(p StreamAPIExtension) plugin.Plugin { name = fmt.Sprintf("plugin_%s", parts[slices.Index(parts, "plugins")+1]) } + lg := logger.NewPluginLogger(ctx).WithGroup(name).WithGroup("stream") + ctx = logger.WithPluginLogger(ctx, lg) + ext := &agentStreamExtensionServerImpl{ + ctx: ctx, name: name, - logger: logger.NewPluginLogger().WithGroup(name).WithGroup("stream"), activeStreams: make(map[string]chan struct{}), } if p != nil { @@ -80,10 +81,10 @@ type agentStreamExtensionServerImpl struct { streamv1.UnsafeStreamServer apiextensions.UnimplementedStreamAPIExtensionServer + ctx context.Context name string servers []*richServer clientHandler StreamClientHandler - logger *slog.Logger activeStreamsMu sync.Mutex activeStreams map[string]chan struct{} @@ -91,7 +92,8 @@ type agentStreamExtensionServerImpl struct { // Implements streamv1.StreamServer func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectServer) error { - e.logger.Debug("stream connected") + lg := logger.PluginLoggerFromContext(e.ctx) + lg.Debug("stream connected") correlationId := uuid.NewString() stream.SendHeader(metadata.Pairs(CorrelationIDHeader, correlationId)) @@ -117,7 +119,7 @@ func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectS ts, err := totem.NewServer(stream, opts...) if err != nil { - e.logger.With( + lg.With( logger.Err(err), ).Error("failed to create stream server") return err @@ -145,32 +147,32 @@ func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectS // and reconnect. return status.Errorf(codes.DeadlineExceeded, "stream client discovery timed out after %s", timeout) case <-stream.Context().Done(): - e.logger.With(stream.Context().Err()).Error("stream disconnected while waiting for discovery") + lg.With(stream.Context().Err()).Error("stream disconnected while waiting for discovery") return stream.Context().Err() } select { case <-notifyC: - e.logger.Debug("stream client is now available") + lg.Debug("stream client is now available") if e.clientHandler != nil { e.clientHandler.UseStreamClient(cc) } case err := <-errC: if err != nil { - e.logger.With(stream.Context().Err()).Error("stream encountered an error while waiting for discovery") + lg.With(stream.Context().Err()).Error("stream encountered an error while waiting for discovery") return status.Errorf(codes.Internal, "stream encountered an error while waiting for discovery: %v", err) } } - e.logger.Debug("stream server started") + lg.Debug("stream server started") err = <-errC if errors.Is(err, io.EOF) { - e.logger.Debug("stream disconnected") + lg.Debug("stream disconnected") } else if status.Code(err) == codes.Canceled { - e.logger.Debug("stream closed") + lg.Debug("stream closed") } else { - e.logger.With( + lg.With( logger.Err(err), ).Warn("stream disconnected with error") } @@ -178,14 +180,15 @@ func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectS } func (e *agentStreamExtensionServerImpl) Notify(_ context.Context, event *streamv1.StreamEvent) (*emptypb.Empty, error) { - e.logger.With( + lg := logger.PluginLoggerFromContext(e.ctx) + lg.With( "type", event.Type.String(), ).Debug(fmt.Sprintf("received notify event for '%s'", e.name)) e.activeStreamsMu.Lock() defer e.activeStreamsMu.Unlock() if event.Type == streamv1.EventType_DiscoveryComplete { - e.logger.Debug("processing discovery complete event") + lg.Debug("processing discovery complete event") correlationId := event.GetCorrelationId() if correlationId == "" { diff --git a/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go b/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go index a0a0d56843..12e7636881 100644 --- a/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go +++ b/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go @@ -9,8 +9,6 @@ import ( "slices" "strings" - "log/slog" - "github.com/hashicorp/go-plugin" "github.com/jhump/protoreflect/grpcreflect" "github.com/kralicky/totem" @@ -54,7 +52,7 @@ func WithMetrics(conf GatewayStreamMetricsConfig) GatewayStreamApiExtensionPlugi } } -func NewGatewayPlugin(p StreamAPIExtension, opts ...GatewayStreamApiExtensionPluginOption) plugin.Plugin { +func NewGatewayPlugin(ctx context.Context, p StreamAPIExtension, opts ...GatewayStreamApiExtensionPluginOption) plugin.Plugin { options := GatewayStreamApiExtensionPluginOptions{} options.apply(opts...) @@ -67,9 +65,11 @@ func NewGatewayPlugin(p StreamAPIExtension, opts ...GatewayStreamApiExtensionPlu name = fmt.Sprintf("plugin_%s", parts[slices.Index(parts, "plugins")+1]) } + lg := logger.NewPluginLogger(ctx).WithGroup(name).WithGroup("stream") + ctx = logger.WithPluginLogger(ctx, lg) ext := &gatewayStreamExtensionServerImpl{ + ctx: ctx, name: name, - logger: logger.NewPluginLogger().WithGroup(name).WithGroup("stream"), metricsConfig: options.metricsConfig, } if p != nil { @@ -105,19 +105,20 @@ type gatewayStreamExtensionServerImpl struct { streamv1.UnimplementedStreamServer apiextensions.UnsafeStreamAPIExtensionServer + ctx context.Context name string servers []*richServer clientHandler StreamClientHandler - logger *slog.Logger metricsConfig GatewayStreamMetricsConfig meterProvider *metric.MeterProvider } // Implements streamv1.StreamServer func (e *gatewayStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectServer) error { + lg := logger.PluginLoggerFromContext(e.ctx) id := cluster.StreamAuthorizedID(stream.Context()) - e.logger.With( + lg.With( "id", id, ).Debug("stream connected") @@ -144,7 +145,7 @@ func (e *gatewayStreamExtensionServerImpl) Connect(stream streamv1.Stream_Connec ts, err := totem.NewServer(stream, opts...) if err != nil { - e.logger.With( + lg.With( logger.Err(err), ).Error("failed to create stream server") return err @@ -155,15 +156,15 @@ func (e *gatewayStreamExtensionServerImpl) Connect(stream streamv1.Stream_Connec _, errC := ts.Serve() - e.logger.Debug("stream server started") + lg.Debug("stream server started") err = <-errC if errors.Is(err, io.EOF) || status.Code(err) == codes.OK { - e.logger.Debug("stream server exited") + lg.Debug("stream server exited") } else if status.Code(err) == codes.Canceled { - e.logger.Debug("stream server closed") + lg.Debug("stream server closed") } else { - e.logger.With( + lg.With( logger.Err(err), ).Warn("stream server exited with error") } @@ -172,13 +173,15 @@ func (e *gatewayStreamExtensionServerImpl) Connect(stream streamv1.Stream_Connec // ConnectInternal implements apiextensions.StreamAPIExtensionServer func (e *gatewayStreamExtensionServerImpl) ConnectInternal(stream apiextensions.StreamAPIExtension_ConnectInternalServer) error { + lg := logger.PluginLoggerFromContext(e.ctx) + if e.clientHandler == nil { stream.SendHeader(metadata.Pairs("accept-internal-stream", "false")) return nil } stream.SendHeader(metadata.Pairs("accept-internal-stream", "true")) - e.logger.Debug("internal gateway stream connected") + lg.Debug("internal gateway stream connected") ts, err := totem.NewServer( stream, @@ -197,11 +200,11 @@ func (e *gatewayStreamExtensionServerImpl) ConnectInternal(stream apiextensions. select { case err := <-errC: if errors.Is(err, io.EOF) { - e.logger.Debug("stream disconnected") + lg.Debug("stream disconnected") } else if status.Code(err) == codes.Canceled { - e.logger.Debug("stream closed") + lg.Debug("stream closed") } else { - e.logger.With( + lg.With( logger.Err(err), ).Warn("stream disconnected with error") } @@ -209,7 +212,7 @@ func (e *gatewayStreamExtensionServerImpl) ConnectInternal(stream apiextensions. default: } - e.logger.Debug("calling client handler") + lg.Debug("calling client handler") go e.clientHandler.UseStreamClient(cc) return <-errC diff --git a/pkg/plugins/apis/apiextensions/stream/plugin_test.go b/pkg/plugins/apis/apiextensions/stream/plugin_test.go index 54445063f2..f829823708 100644 --- a/pkg/plugins/apis/apiextensions/stream/plugin_test.go +++ b/pkg/plugins/apis/apiextensions/stream/plugin_test.go @@ -41,12 +41,12 @@ var _ = Describe("Stream API Extensions Plugin", Ordered, Label("unit"), func() pluginImpl = mock_stream.NewMockStreamAPIExtensionWithHandlers(ctrl) agentMode = func(ctx context.Context) meta.Scheme { agentScheme := meta.NewScheme(meta.WithMode(meta.ModeAgent)) - agentScheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(pluginImpl)) + agentScheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(ctx, pluginImpl)) return agentScheme } gatewayMode = func(ctx context.Context) meta.Scheme { gatewayScheme := meta.NewScheme(meta.WithMode(meta.ModeGateway)) - gatewayScheme.Add(stream.StreamAPIExtensionPluginID, stream.NewGatewayPlugin(pluginImpl)) + gatewayScheme.Add(stream.StreamAPIExtensionPluginID, stream.NewGatewayPlugin(ctx, pluginImpl)) return gatewayScheme } diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go index 8e4efc3c10..44faa098ff 100644 --- a/pkg/plugins/client.go +++ b/pkg/plugins/client.go @@ -1,8 +1,9 @@ package plugins import ( + "context" "fmt" - "os" + "io" "os/exec" "github.com/hashicorp/go-hclog" @@ -10,6 +11,7 @@ import ( "github.com/rancher/opni/pkg/auth/cluster" "github.com/rancher/opni/pkg/auth/session" "github.com/rancher/opni/pkg/caching" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins/meta" "github.com/rancher/opni/pkg/util/streams" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -41,17 +43,23 @@ func WithSecureConfig(sc *plugin.SecureConfig) ClientOption { } } -func ClientConfig(md meta.PluginMeta, scheme meta.Scheme, opts ...ClientOption) *plugin.ClientConfig { +func ClientConfig(ctx context.Context, md meta.PluginMeta, scheme meta.Scheme, opts ...ClientOption) *plugin.ClientConfig { options := &ClientOptions{} options.apply(opts...) + if md.ExtendedMetadata != nil { + mode := md.ExtendedMetadata.ModeList.Modes[0] + ctx = logger.WithMode(ctx, mode) + } + cc := &plugin.ClientConfig{ Plugins: scheme.PluginMap(), HandshakeConfig: Handshake, AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, Managed: true, Logger: hclog.New(&hclog.LoggerOptions{ - Level: hclog.Error, + Level: hclog.Error, + Output: io.Discard, }), GRPCDialOptions: []grpc.DialOption{ grpc.WithChainUnaryInterceptor( @@ -61,8 +69,7 @@ func ClientConfig(md meta.PluginMeta, scheme meta.Scheme, opts ...ClientOption) grpc.WithPerRPCCredentials(cluster.ClusterIDKey), grpc.WithPerRPCCredentials(session.AttributesKey), }, - SyncStderr: os.Stderr, - Stderr: os.Stderr, + Stderr: logger.NewPluginFileWriter(ctx), } if options.reattach != nil { diff --git a/pkg/plugins/loader.go b/pkg/plugins/loader.go index 6ea085a5ef..e5d7958315 100644 --- a/pkg/plugins/loader.go +++ b/pkg/plugins/loader.go @@ -297,7 +297,7 @@ func (p *PluginLoader) LoadPlugins(ctx context.Context, pluginDir string, scheme continue } } - cc := ClientConfig(md, scheme, clientOpts...) + cc := ClientConfig(ctx, md, scheme, clientOpts...) wg.Add(1) go func() { diff --git a/pkg/resources/collector/collector.go b/pkg/resources/collector/collector.go index f05eb77651..5cc0e4da51 100644 --- a/pkg/resources/collector/collector.go +++ b/pkg/resources/collector/collector.go @@ -45,7 +45,7 @@ func NewReconciler( collector: instance, tmpl: otel.OTELTemplates, ctx: ctx, - lg: logger.New().WithGroup("plugin").WithGroup("collector-controller"), + lg: logger.New().WithGroup("collector-controller"), PrometheusDiscovery: nil, } } diff --git a/pkg/test/environment.go b/pkg/test/environment.go index b18a940ee4..cd7a78f9c3 100644 --- a/pkg/test/environment.go +++ b/pkg/test/environment.go @@ -1885,7 +1885,7 @@ func (e *Environment) startGateway() { m := management.NewServer(e.ctx, &e.gatewayConfig.Spec.Management, g, pluginLoader, management.WithCapabilitiesDataSource(g), - management.WithHealthStatusDataSource(g), + management.WithAgentControlDataSource(g), management.WithLifecycler(lifecycler), ) g.MustRegisterCollector(m) @@ -2160,6 +2160,7 @@ func (e *Environment) StartAgent(id string, token *corev1.BootstrapToken, pins [ var a agent.AgentInterface mu := &sync.Mutex{} agentCtx, cancel := context.WithCancel(options.ctx) + go func() { defer cancel() mu.Lock() @@ -2179,7 +2180,8 @@ func (e *Environment) StartAgent(id string, token *corev1.BootstrapToken, pins [ mu.Unlock() return } - globalTestPlugins.LoadPlugins(e.ctx, pl, pluginmeta.ModeAgent) + ctx = logger.WithAgentId(e.ctx, id) + globalTestPlugins.LoadPlugins(ctx, pl, pluginmeta.ModeAgent) agentListMu.Lock() agentList[id] = cancel agentListMu.Unlock() diff --git a/pkg/test/plugin.go b/pkg/test/plugin.go index 7358f7c213..62d9e52a90 100644 --- a/pkg/test/plugin.go +++ b/pkg/test/plugin.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/rancher/opni/pkg/config/v1beta1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins" "github.com/rancher/opni/pkg/plugins/apis/apiextensions" managementext "github.com/rancher/opni/pkg/plugins/apis/apiextensions/management" @@ -117,6 +118,7 @@ func (tp TestPluginSet) LoadPlugins(ctx context.Context, loader *plugins.PluginL RootCAs: caPool, ServerName: "localhost", } + ctx = logger.WithMode(ctx, mode) wg := &sync.WaitGroup{} for _, p := range tp[mode] { @@ -131,7 +133,7 @@ func (tp TestPluginSet) LoadPlugins(ctx context.Context, loader *plugins.PluginL return tlsConfig, nil } go plugin.Serve(sc) - cc := plugins.ClientConfig(p.Metadata, scheme, plugins.WithReattachConfig(<-ch)) + cc := plugins.ClientConfig(ctx, p.Metadata, scheme, plugins.WithReattachConfig(<-ch)) cc.TLSConfig = tlsConfig wg.Add(1) go func() { @@ -156,6 +158,11 @@ func (tp TestPluginSet) EnablePlugin(pkgName, pluginName string, mode meta.Plugi BinaryPath: pluginName, GoVersion: runtime.Version(), Module: pkgName, + ExtendedMetadata: &meta.ExtendedPluginMeta{ + ModeList: meta.ModeList{ + Modes: []meta.PluginMode{mode}, + }, + }, }, }) } diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 481082c6d6..848d5f29d9 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -52,5 +52,5 @@ func Configure(serviceName string) { otel.SetTracerProvider(tracesdk.NewTracerProvider(opts...)) otel.SetTextMapPropagator(autoprop.NewTextMapPropagator()) - otel.SetLogger(logger.NewLogr().WithName("tracing")) + otel.SetLogger(logger.NewLogr(logger.WithLogLevel(slog.LevelError)).WithName("tracing")) } diff --git a/pkg/util/nats/client.go b/pkg/util/nats/client.go index ff376e9d00..3d1a30ad1c 100644 --- a/pkg/util/nats/client.go +++ b/pkg/util/nats/client.go @@ -94,7 +94,7 @@ func AcquireNATSConnection( opts ...NatsAcquireOption, ) (*nats.Conn, error) { options := &natsAcquireOptions{ - lg: logger.NewPluginLogger().WithGroup("nats-conn"), + lg: logger.New().WithGroup("nats-conn"), retrier: backoffv2.Exponential( backoffv2.WithMaxRetries(0), backoffv2.WithMinInterval(5*time.Second), diff --git a/plugins/aiops/pkg/gateway/admin.go b/plugins/aiops/pkg/gateway/admin.go index d7865131c4..b768b3d591 100644 --- a/plugins/aiops/pkg/gateway/admin.go +++ b/plugins/aiops/pkg/gateway/admin.go @@ -153,6 +153,7 @@ func (s *AIOpsPlugin) putPretrainedModel( } func (s *AIOpsPlugin) GetAISettings(ctx context.Context, _ *emptypb.Empty) (*admin.AISettings, error) { + lg := logger.PluginLoggerFromContext(s.ctx) opni := &aiv1beta1.OpniCluster{} err := s.k8sClient.Get(ctx, types.NamespacedName{ Name: OpniServicesName, @@ -188,7 +189,7 @@ func (s *AIOpsPlugin) GetAISettings(ctx context.Context, _ *emptypb.Empty) (*adm Namespace: opni.Spec.S3.External.Credentials.Namespace, }, secret) if err != nil { - s.Logger.Error("failed to get s3 secret", logger.Err(err)) + lg.Error("failed to get s3 secret", logger.Err(err)) } s3Settings = &admin.S3Settings{ Endpoint: opni.Spec.S3.External.Endpoint, @@ -325,7 +326,8 @@ func (s *AIOpsPlugin) PutAISettings(ctx context.Context, settings *admin.AISetti } func (s *AIOpsPlugin) deleteAIOpsResources(ctx context.Context) error { - s.Logger.Info("deleting aiops resources...") + lg := logger.PluginLoggerFromContext(s.ctx) + lg.Info("deleting aiops resources...") err := s.k8sClient.Delete(ctx, &aiv1beta1.OpniCluster{ ObjectMeta: metav1.ObjectMeta{ Name: OpniServicesName, @@ -354,16 +356,17 @@ func (s *AIOpsPlugin) deleteAIOpsResources(ctx context.Context) error { } func (s *AIOpsPlugin) DeleteAISettings(ctx context.Context, options *admin.DeleteOptions) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(s.ctx) if lo.FromPtrOr(options.PurgeModelTrainingData, false) { ctxca, ca := context.WithTimeout(ctx, 10*time.Second) defer ca() if err := s.deleteTrainingJobInfo(ctxca); err != nil { - s.Logger.Error(fmt.Sprintf("failed to purge %s", err)) + lg.Error(fmt.Sprintf("failed to purge %s", err)) return nil, err } } if err := s.deleteAIOpsResources(ctx); err != nil { - s.Logger.Error(fmt.Sprintf("failed to delete AiOps resources : %s", err)) + lg.Error(fmt.Sprintf("failed to delete AiOps resources : %s", err)) return nil, err } return &emptypb.Empty{}, nil diff --git a/plugins/aiops/pkg/gateway/aggregation.go b/plugins/aiops/pkg/gateway/aggregation.go index d005bd539f..b60825170d 100644 --- a/plugins/aiops/pkg/gateway/aggregation.go +++ b/plugins/aiops/pkg/gateway/aggregation.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" "time" + + "github.com/rancher/opni/pkg/logger" ) type Key struct { @@ -73,6 +75,7 @@ func (a *Aggregations) Add(bucket Bucket) { } func (p *AIOpsPlugin) aggregateWorkloadLogs() { + lg := logger.PluginLoggerFromContext(p.ctx) request := map[string]any{ "size": 0, "query": map[string]any{ @@ -131,7 +134,7 @@ func (p *AIOpsPlugin) aggregateWorkloadLogs() { for { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(request); err != nil { - p.Logger.Error(fmt.Sprintf("Error: Unable to encode request: %s", err)) + lg.Error(fmt.Sprintf("Error: Unable to encode request: %s", err)) return } res, err := p.osClient.Get().Search( @@ -142,17 +145,17 @@ func (p *AIOpsPlugin) aggregateWorkloadLogs() { p.osClient.Get().Search.WithPretty(), ) if err != nil { - p.Logger.Error(fmt.Sprintf("Unable to connect to Opensearch %s", err)) + lg.Error(fmt.Sprintf("Unable to connect to Opensearch %s", err)) return } defer res.Body.Close() if res.IsError() { - p.Logger.Error(fmt.Sprintf("Error: %s", res.String())) + lg.Error(fmt.Sprintf("Error: %s", res.String())) return } var result SearchResponse if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - p.Logger.Error(fmt.Sprintf("Error parsing the response body: %s", err)) + lg.Error(fmt.Sprintf("Error parsing the response body: %s", err)) return } for _, b := range result.Aggregations.Bucket.Buckets { @@ -167,12 +170,12 @@ func (p *AIOpsPlugin) aggregateWorkloadLogs() { } aggregatedResults, err := json.Marshal(resultAgg) if err != nil { - p.Logger.Error(fmt.Sprintf("Error: %s", err)) + lg.Error(fmt.Sprintf("Error: %s", err)) return } bytesAggregation := []byte(aggregatedResults) p.aggregationKv.Get().Put("aggregation", bytesAggregation) - p.Logger.Info("Updated aggregation of deployments to Jetstream.") + lg.Info("Updated aggregation of deployments to Jetstream.") } func (p *AIOpsPlugin) runAggregation() { diff --git a/plugins/aiops/pkg/gateway/modeltraining.go b/plugins/aiops/pkg/gateway/modeltraining.go index dc10b8a29b..b5b450d91c 100644 --- a/plugins/aiops/pkg/gateway/modeltraining.go +++ b/plugins/aiops/pkg/gateway/modeltraining.go @@ -12,6 +12,7 @@ import ( backoffv2 "github.com/lestrrat-go/backoff/v2" "github.com/nats-io/nats.go" corev1 "github.com/rancher/opni/pkg/apis/core/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/plugins/aiops/apis/admin" "github.com/rancher/opni/plugins/aiops/apis/modeltraining" "google.golang.org/grpc/codes" @@ -116,6 +117,7 @@ func (p *AIOpsPlugin) persistInitialJobInfo(ctx context.Context, in *modeltraini } func (p *AIOpsPlugin) TrainModel(ctx context.Context, in *modeltraining.ModelTrainingParametersList) (*modeltraining.ModelTrainingResponse, error) { + lg := logger.PluginLoggerFromContext(p.ctx) parametersBytes, err := p.persistInitialJobInfo(ctx, in) if err != nil { return nil, err @@ -123,7 +125,7 @@ func (p *AIOpsPlugin) TrainModel(ctx context.Context, in *modeltraining.ModelTra _, err = p.LaunchAIServices(ctx) if err != nil { delErr := errors.Join(p.deleteTrainingJobInfo(ctx), err) - p.Logger.Error(fmt.Sprintf("failed to launch AI services : %s", delErr)) + lg.Error(fmt.Sprintf("failed to launch AI services : %s", delErr)) return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("failed to launch AI services : %s", delErr)) } resp, err := p.requestModelTraining(ctx, parametersBytes) @@ -133,7 +135,7 @@ func (p *AIOpsPlugin) TrainModel(ctx context.Context, in *modeltraining.ModelTra ctxca, ca := context.WithTimeout(ctx, 10*time.Second) defer ca() delErr := errors.Join(p.deleteTrainingJobInfo(ctxca), err) - p.Logger.Error(fmt.Sprintf("failed to request model training : %s", delErr)) + lg.Error(fmt.Sprintf("failed to request model training : %s", delErr)) return nil, delErr } @@ -177,7 +179,8 @@ func (p *AIOpsPlugin) PutModelTrainingStatus(ctx context.Context, in *modeltrain } func (p *AIOpsPlugin) deleteModelTrainingStatus(ctx context.Context) error { - p.Logger.Info("Deleting model training status...") + lg := logger.PluginLoggerFromContext(p.ctx) + lg.Info("Deleting model training status...") statisticsKv, err := p.statisticsKv.GetContext(ctx) if err != nil { return err @@ -186,7 +189,8 @@ func (p *AIOpsPlugin) deleteModelTrainingStatus(ctx context.Context) error { } func (p *AIOpsPlugin) deleteModelTrainingParams(ctx context.Context) error { - p.Logger.Info("Deleting model training parameters...") + lg := logger.PluginLoggerFromContext(p.ctx) + lg.Info("Deleting model training parameters...") modelTrainingKv, err := p.modelTrainingKv.GetContext(ctx) if err != nil { return err diff --git a/plugins/aiops/pkg/gateway/plugin.go b/plugins/aiops/pkg/gateway/plugin.go index 500fc56846..6139b7a906 100644 --- a/plugins/aiops/pkg/gateway/plugin.go +++ b/plugins/aiops/pkg/gateway/plugin.go @@ -4,8 +4,6 @@ import ( "context" "os" - "log/slog" - "github.com/nats-io/nats.go" "github.com/opensearch-project/opensearch-go" "github.com/rancher/opni/apis" @@ -30,7 +28,6 @@ type AIOpsPlugin struct { admin.UnsafeAIAdminServer system.UnimplementedSystemPluginClient ctx context.Context - Logger *slog.Logger k8sClient client.Client osClient future.Future[*opensearch.Client] natsConnection future.Future[*nats.Conn] @@ -109,9 +106,10 @@ func NewPlugin(ctx context.Context, opts ...PluginOption) *AIOpsPlugin { panic(err) } + lg := logger.NewPluginLogger(ctx).WithGroup("modeltraining") + ctx = logger.WithPluginLogger(ctx, lg) return &AIOpsPlugin{ PluginOptions: options, - Logger: logger.NewPluginLogger().WithGroup("modeltraining"), ctx: ctx, natsConnection: future.New[*nats.Conn](), aggregationKv: future.New[nats.KeyValue](), @@ -123,7 +121,7 @@ func NewPlugin(ctx context.Context, opts ...PluginOption) *AIOpsPlugin { } func (p *AIOpsPlugin) UseManagementAPI(_ managementv1.ManagementClient) { - lg := p.Logger + lg := logger.PluginLoggerFromContext(p.ctx) nc, err := newNatsConnection() if err != nil { lg.Error("fatal", logger.Err(err)) diff --git a/plugins/aiops/pkg/gateway/system.go b/plugins/aiops/pkg/gateway/system.go index 5a186958ff..2336654c4a 100644 --- a/plugins/aiops/pkg/gateway/system.go +++ b/plugins/aiops/pkg/gateway/system.go @@ -11,6 +11,7 @@ import ( backoffv2 "github.com/lestrrat-go/backoff/v2" "github.com/nats-io/nats.go" opensearch "github.com/opensearch-project/opensearch-go" + "github.com/rancher/opni/pkg/logger" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" opsterv1 "opensearch.opster.io/api/v1" @@ -43,6 +44,7 @@ func newNatsConnection() (*nats.Conn, error) { } func (s *AIOpsPlugin) setOpensearchConnection() { + lg := logger.PluginLoggerFromContext(s.ctx) esEndpoint := fmt.Sprintf("https://opni-opensearch-svc.%s.svc:9200", s.storageNamespace) retrier := backoffv2.Exponential( backoffv2.WithMaxRetries(0), @@ -56,7 +58,7 @@ FETCH: for { select { case <-b.Done(): - s.Logger.Warn("plugin context cancelled before Opensearch object created") + lg.Warn("plugin context cancelled before Opensearch object created") case <-b.Next(): err := s.k8sClient.Get(s.ctx, types.NamespacedName{ Name: "opni", @@ -66,7 +68,7 @@ FETCH: if k8serrors.IsNotFound(err) { continue } - s.Logger.Error(fmt.Sprintf("failed to check k8s object: %v", err)) + lg.Error(fmt.Sprintf("failed to check k8s object: %v", err)) continue } break FETCH diff --git a/plugins/alerting/pkg/agent/drivers/default_driver/driver.go b/plugins/alerting/pkg/agent/drivers/default_driver/driver.go index eb2b3ca9ac..1053378ea0 100644 --- a/plugins/alerting/pkg/agent/drivers/default_driver/driver.go +++ b/plugins/alerting/pkg/agent/drivers/default_driver/driver.go @@ -7,8 +7,6 @@ import ( node_drivers "github.com/rancher/opni/plugins/alerting/pkg/agent/drivers" - "log/slog" - promoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" "github.com/rancher/opni/pkg/logger" @@ -23,8 +21,8 @@ import ( ) type NodeDriverOptions struct { + ctx context.Context K8sClient client.Client `option:"k8sClient"` - Logger *slog.Logger `option:"logger"` } type Driver struct { @@ -116,9 +114,10 @@ var _ node_drivers.NodeDriver = (*Driver)(nil) func init() { node_drivers.NodeDrivers.Register("k8s_driver", func(ctx context.Context, opts ...driverutil.Option) (node_drivers.NodeDriver, error) { + lg := logger.PluginLoggerFromContext(ctx).WithGroup("alerting").WithGroup("rule-discovery") driverOptions := &NodeDriverOptions{ + ctx: logger.WithPluginLogger(ctx, lg), K8sClient: nil, - Logger: logger.NewPluginLogger().WithGroup("alerting").WithGroup("rule-discovery"), } if err := driverutil.ApplyOptions(driverOptions, opts...); err != nil { return nil, err diff --git a/plugins/alerting/pkg/agent/node.go b/plugins/alerting/pkg/agent/node.go index 4da50770f8..2887066314 100644 --- a/plugins/alerting/pkg/agent/node.go +++ b/plugins/alerting/pkg/agent/node.go @@ -10,8 +10,6 @@ import ( "slices" - "log/slog" - capabilityv1 "github.com/rancher/opni/pkg/apis/capability/v1" controlv1 "github.com/rancher/opni/pkg/apis/control/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" @@ -35,7 +33,6 @@ type AlertingNode struct { controlv1.UnsafeHealthServer ctx context.Context - lg *slog.Logger capability string configMu sync.RWMutex @@ -56,12 +53,10 @@ type AlertingNode struct { func NewAlertingNode( ctx context.Context, - lg *slog.Logger, ct health.ConditionTracker, ) *AlertingNode { node := &AlertingNode{ ctx: ctx, - lg: lg, conditions: ct, capability: wellknown.CapabilityAlerting, listeners: []ConfigPropagator{}, @@ -102,12 +97,14 @@ func (s *AlertingNode) SetClients( } func (s *AlertingNode) SyncNow(_ context.Context, req *capabilityv1.Filter) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(s.ctx) + if len(req.CapabilityNames) > 0 { if !slices.Contains(req.CapabilityNames, s.capability) { - s.lg.Debug(fmt.Sprintf("ignoring sync request due to capability filter '%s'", s.capability)) + lg.Debug(fmt.Sprintf("ignoring sync request due to capability filter '%s'", s.capability)) return &emptypb.Empty{}, nil } - s.lg.Debug(fmt.Sprintf("received %s node sync request", s.capability)) + lg.Debug(fmt.Sprintf("received %s node sync request", s.capability)) if !s.hasNodeSyncClient() { return nil, status.Error(codes.Unavailable, "not connected to node server") @@ -125,7 +122,9 @@ func (s *AlertingNode) SyncNow(_ context.Context, req *capabilityv1.Filter) (*em } func (s *AlertingNode) doSync(ctx context.Context) { - s.lg.Debug(fmt.Sprintf("syncing %s node", s.capability)) + lg := logger.PluginLoggerFromContext(s.ctx) + + lg.Debug(fmt.Sprintf("syncing %s node", s.capability)) if !s.hasNodeSyncClient() && !s.hasRemoteHealthClient() { s.conditions.Set(health.CondConfigSync, health.StatusPending, "no clients set, skipping") return @@ -143,9 +142,9 @@ func (s *AlertingNode) doSync(ctx context.Context) { s.conditions.Clear(health.CondConfigSync) switch syncResp.GetConfigStatus() { case corev1.ConfigStatus_UpToDate: - s.lg.Info(fmt.Sprintf("%s node config is up to date", s.capability)) + lg.Info(fmt.Sprintf("%s node config is up to date", s.capability)) case corev1.ConfigStatus_NeedsUpdate: - s.lg.Info(fmt.Sprintf("%s updating node config", s.capability)) + lg.Info(fmt.Sprintf("%s updating node config", s.capability)) if err := s.updateConfig(ctx, syncResp.GetUpdatedConfig()); err != nil { s.conditions.Set(health.CondNodeDriver, health.StatusFailure, err.Error()) return @@ -156,10 +155,12 @@ func (s *AlertingNode) doSync(ctx context.Context) { } func (s *AlertingNode) updateConfig(ctx context.Context, config *node.AlertingCapabilityConfig) error { + lg := logger.PluginLoggerFromContext(s.ctx) + s.idMu.RLock() id, err := s.identityClient.Whoami(ctx, &emptypb.Empty{}) if err != nil { - s.lg.With(logger.Err(err)).Error(fmt.Sprintf("failed to fetch %s node id %s", s.capability, err)) + lg.With(logger.Err(err)).Error(fmt.Sprintf("failed to fetch %s node id %s", s.capability, err)) return err } s.idMu.RUnlock() @@ -183,7 +184,7 @@ func (s *AlertingNode) updateConfig(ctx context.Context, config *node.AlertingCa if err := eg.Error(); err != nil { s.config.Conditions = (append(s.config.GetConditions(), err.Error())) - s.lg.With(logger.Err(err)).Error(fmt.Sprintf("%s node configuration error", s.capability)) + lg.With(logger.Err(err)).Error(fmt.Sprintf("%s node configuration error", s.capability)) return err } else { s.config = config @@ -222,23 +223,25 @@ func (s *AlertingNode) GetHealth(_ context.Context, _ *emptypb.Empty) (*corev1.H } func (s *AlertingNode) sendHealthUpdate() { + lg := logger.PluginLoggerFromContext(s.ctx) + s.healthMu.RLock() defer s.healthMu.RUnlock() if !s.hasRemoteHealthClient() { - s.lg.Warn(fmt.Sprintf("failed to send %s node health update, remote health client not set", s.capability)) + lg.Warn(fmt.Sprintf("failed to send %s node health update, remote health client not set", s.capability)) return } health, err := s.GetHealth(s.ctx, &emptypb.Empty{}) if err != nil { - s.lg.With(logger.Err(err)).Warn(fmt.Sprintf("failed to get %s node health", s.capability)) + lg.With(logger.Err(err)).Warn(fmt.Sprintf("failed to get %s node health", s.capability)) return } if _, err := s.healthListenerClient.UpdateHealth(s.ctx, health); err != nil { - s.lg.With(logger.Err(err)).Warn(fmt.Sprintf("failed to send %s node health updates", s.capability)) + lg.With(logger.Err(err)).Warn(fmt.Sprintf("failed to send %s node health updates", s.capability)) } else { - s.lg.Debug("send node health update") + lg.Debug("send node health update") } } diff --git a/plugins/alerting/pkg/agent/plugin.go b/plugins/alerting/pkg/agent/plugin.go index b4d073a712..8c9033d906 100644 --- a/plugins/alerting/pkg/agent/plugin.go +++ b/plugins/alerting/pkg/agent/plugin.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - "log/slog" - healthpkg "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins/apis/apiextensions/stream" @@ -16,7 +14,6 @@ import ( ) type Plugin struct { - lg *slog.Logger ctx context.Context ruleStreamer *RuleStreamer @@ -25,17 +22,18 @@ type Plugin struct { } func NewPlugin(ctx context.Context) *Plugin { - lg := logger.NewPluginLogger().WithGroup("alerting") + lg := logger.NewPluginLogger(ctx).WithGroup("alerting") + healthConfSyncLg := lg.With("component", "health-cfg-sync") + ruleStreamerLg := lg.With("component", "rule-streamer") + ctx = logger.WithPluginLogger(ctx, lg) ct := healthpkg.NewDefaultConditionTracker(lg) p := &Plugin{ ctx: ctx, - lg: lg, } p.node = NewAlertingNode( - ctx, - p.lg.With("component", "health-cfg-sync"), + logger.WithPluginLogger(ctx, healthConfSyncLg), ct, ) @@ -58,8 +56,7 @@ func NewPlugin(ctx context.Context) *Plugin { panic("no driver set") } p.ruleStreamer = NewRuleStreamer( - ctx, - lg.With("component", "rule-streamer"), + logger.WithPluginLogger(ctx, ruleStreamerLg), ct, p.driver, ) @@ -69,9 +66,10 @@ func NewPlugin(ctx context.Context) *Plugin { func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeAgent)) + p := NewPlugin(ctx) scheme.Add(capability.CapabilityBackendPluginID, capability.NewAgentPlugin(p.node)) scheme.Add(health.HealthPluginID, health.NewPlugin(p.node)) - scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(p)) + scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(ctx, p)) return scheme } diff --git a/plugins/alerting/pkg/agent/rules.go b/plugins/alerting/pkg/agent/rules.go index 91b5a51936..251057ad9a 100644 --- a/plugins/alerting/pkg/agent/rules.go +++ b/plugins/alerting/pkg/agent/rules.go @@ -6,8 +6,6 @@ import ( "sync" "time" - "log/slog" - backoffv2 "github.com/lestrrat-go/backoff/v2" healthpkg "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" @@ -24,7 +22,6 @@ const ( type RuleStreamer struct { parentCtx context.Context - lg *slog.Logger stopRuleStream context.CancelFunc @@ -39,13 +36,11 @@ var _ drivers.ConfigPropagator = (*RuleStreamer)(nil) func NewRuleStreamer( ctx context.Context, - lg *slog.Logger, ct healthpkg.ConditionTracker, nodeDriver drivers.NodeDriver, ) *RuleStreamer { return &RuleStreamer{ parentCtx: ctx, - lg: lg, conditions: ct, nodeDriver: nodeDriver, ruleSyncClient: nil, @@ -71,7 +66,7 @@ func (r *RuleStreamer) ConfigureNode(nodeId string, cfg *node.AlertingCapability } func (r *RuleStreamer) configureRuleStreamer(nodeId string, cfg *node.AlertingCapabilityConfig) error { - lg := r.lg.With("nodeId", nodeId) + lg := logger.PluginLoggerFromContext(r.parentCtx).With("nodeId", nodeId) lg.Debug("alerting capability updated") currentlyRunning := r.stopRuleStream != nil @@ -101,6 +96,8 @@ func (r *RuleStreamer) configureRuleStreamer(nodeId string, cfg *node.AlertingCa } func (r *RuleStreamer) sync(ctx context.Context) { + lg := logger.PluginLoggerFromContext(r.parentCtx) + r.conditions.Set(CondRuleSync, healthpkg.StatusPending, "") retrier := backoffv2.Exponential( @@ -113,12 +110,12 @@ func (r *RuleStreamer) sync(ctx context.Context) { for backoffv2.Continue(b) { if !r.isSet() { r.conditions.Set(CondRuleSync, healthpkg.StatusFailure, "Rule sync client not set") - r.lg.Warn("rule sync client not yet set") + lg.Warn("rule sync client not yet set") continue } ruleManifest, err := r.nodeDriver.DiscoverRules(ctx) if err != nil { - r.lg.Warn("failed to discover rules", logger.Err(err)) + lg.Warn("failed to discover rules", logger.Err(err)) r.conditions.Set(CondRuleSync, healthpkg.StatusFailure, fmt.Sprintf("Failed to discover rules : %s", err)) continue } @@ -128,16 +125,18 @@ func (r *RuleStreamer) sync(ctx context.Context) { r.clientMu.RUnlock() if err == nil { r.conditions.Clear(CondRuleSync) - r.lg.Info(fmt.Sprintf("successfully synced (%d) rules with gateway", len(ruleManifest.GetRules()))) + lg.Info(fmt.Sprintf("successfully synced (%d) rules with gateway", len(ruleManifest.GetRules()))) break } - r.lg.Warn("failed to sync rules with gateway", logger.Err(err)) + lg.Warn("failed to sync rules with gateway", logger.Err(err)) r.conditions.Set(CondRuleSync, healthpkg.StatusFailure, fmt.Sprintf("Failed to sync rules : %s", err)) } } func (r *RuleStreamer) run(ctx context.Context) { - r.lg.Info("starting initial sync...") + lg := logger.PluginLoggerFromContext(r.parentCtx) + + lg.Info("starting initial sync...") r.sync(ctx) t := time.NewTicker(RuleSyncInterval) defer t.Stop() @@ -146,7 +145,7 @@ func (r *RuleStreamer) run(ctx context.Context) { case <-t.C: r.sync(ctx) case <-ctx.Done(): - r.lg.Info("Exiting rule sync loop") + lg.Info("Exiting rule sync loop") return } } diff --git a/plugins/alerting/pkg/alerting/admin.go b/plugins/alerting/pkg/alerting/admin.go index 2a697326d9..e463f5a935 100644 --- a/plugins/alerting/pkg/alerting/admin.go +++ b/plugins/alerting/pkg/alerting/admin.go @@ -7,8 +7,6 @@ import ( "sync" "time" - "log/slog" - "github.com/google/uuid" "github.com/rancher/opni/pkg/alerting/shared" "github.com/rancher/opni/pkg/alerting/storage/spec" @@ -45,7 +43,7 @@ type RemoteInfo struct { } type SyncController struct { - lg *slog.Logger + ctx context.Context hashMu sync.Mutex syncMu sync.RWMutex @@ -95,6 +93,7 @@ func (s *SyncController) ListRemoteInfo() map[string]RemoteInfo { } func (s *SyncController) PushSyncReq(payload *syncPayload) { + lg := logger.PluginLoggerFromContext(s.ctx) for id, syncer := range s.syncPushers { id := id syncer := syncer @@ -110,12 +109,14 @@ func (s *SyncController) PushSyncReq(payload *syncPayload) { }, }: default: - s.lg.With("syncer-id", id).Error("failed to push sync request : buffer already full") + lg.With("syncer-id", id).Error("failed to push sync request : buffer already full") } } } func (s *SyncController) PushOne(lifecycleId string, payload *syncPayload) { + lg := logger.PluginLoggerFromContext(s.ctx) + if _, ok := s.syncPushers[lifecycleId]; ok { select { case s.syncPushers[lifecycleId] <- &alertops.SyncRequest{ @@ -129,14 +130,14 @@ func (s *SyncController) PushOne(lifecycleId string, payload *syncPayload) { }, }: default: - s.lg.With("syncer-id", lifecycleId).Error("failed to push sync request : buffer already full") + lg.With("syncer-id", lifecycleId).Error("failed to push sync request : buffer already full") } } } -func NewSyncController(lg *slog.Logger) SyncController { +func NewSyncController(ctx context.Context) SyncController { return SyncController{ - lg: lg, + ctx: ctx, syncPushers: map[string]chan *alertops.SyncRequest{}, remoteInfo: map[string]RemoteInfo{}, syncMu: sync.RWMutex{}, @@ -209,6 +210,7 @@ func (p *Plugin) InstallCluster(ctx context.Context, _ *emptypb.Empty) (*emptypb func (p *Plugin) UninstallCluster(ctx context.Context, request *alertops.UninstallRequest) (*emptypb.Empty, error) { ctxTimeout, cancel := context.WithTimeout(ctx, 1*time.Second) + lg := logger.PluginLoggerFromContext(ctxTimeout) defer cancel() driver, err := p.clusterDriver.GetContext(ctxTimeout) if err != nil { @@ -218,7 +220,7 @@ func (p *Plugin) UninstallCluster(ctx context.Context, request *alertops.Uninsta go func() { err := p.storageClientSet.Get().Purge(context.Background()) if err != nil { - p.logger.Warn(fmt.Sprintf("failed to purge data %s", err)) + lg.Warn(fmt.Sprintf("failed to purge data %s", err)) } }() } @@ -274,7 +276,8 @@ func (p *Plugin) constructManualSync() (*syncPayload, error) { func (p *Plugin) SyncConfig(server alertops.ConfigReconciler_SyncConfigServer) error { assignedLifecycleUuid := uuid.New().String() - lg := p.logger.With("method", "SyncConfig", "assignedId", assignedLifecycleUuid) + lg := logger.PluginLoggerFromContext(p.ctx).With("method", "SyncConfig", "assignedId", assignedLifecycleUuid) + lg.Info(" remote syncer connected, performing initial sync...") syncChan := make(chan *alertops.SyncRequest, 16) defer close(syncChan) @@ -354,7 +357,7 @@ func (p *Plugin) constructPartialSyncRequest( p.syncController.hashMu.Lock() defer p.syncController.hashMu.Unlock() - lg := p.logger.With("method", "constructSyncRequest") + lg := logger.PluginLoggerFromContext(p.ctx).With("method", "constructSyncRequest") hash, err := hashRing.GetHash(ctx, shared.SingleConfigId) if err != nil { lg.Error(fmt.Sprintf("failed to get hash for %s: %s", shared.SingleConfigId, err)) @@ -392,7 +395,7 @@ func (p *Plugin) doConfigSync(ctx context.Context, syncInfo alertingSync.SyncInf if !syncInfo.ShouldSync { return nil } - lg := p.logger.With("method", "doSync") + lg := logger.PluginLoggerFromContext(p.ctx).With("method", "doSync") p.syncController.syncMu.Lock() defer p.syncController.syncMu.Unlock() @@ -448,7 +451,7 @@ func (p *Plugin) doConfigForceSync(ctx context.Context, syncInfo alertingSync.Sy if !syncInfo.ShouldSync { return nil } - lg := p.logger.With("method", "doForceSync") + lg := logger.PluginLoggerFromContext(p.ctx).With("method", "doForceSync") p.syncController.syncMu.Lock() defer p.syncController.syncMu.Unlock() ctxTimeout, ca := context.WithTimeout(ctx, 5*time.Second) @@ -502,7 +505,7 @@ func (p *Plugin) getSyncInfo(ctx context.Context) (alertingSync.SyncInfo, error) } func (p *Plugin) runSyncTasks(tasks []alertingSync.SyncTask) (retErr error) { - lg := p.logger.With("action", "runSyncTasks") + lg := logger.PluginLoggerFromContext(p.ctx).With("action", "runSyncTasks") ctx, ca := context.WithTimeout(p.ctx, 10*time.Second) defer ca() start := time.Now() @@ -547,6 +550,7 @@ func (p *Plugin) runSyncTasks(tasks []alertingSync.SyncTask) (retErr error) { } func (p *Plugin) runSync() { + lg := logger.PluginLoggerFromContext(p.ctx) ticker := p.syncController.heartbeatTicker longTicker := p.syncController.forceSyncTicker defer ticker.Stop() @@ -560,15 +564,15 @@ func (p *Plugin) runSync() { for { select { case <-p.ctx.Done(): - p.logger.Info("exiting main sync loop") + lg.Info("exiting main sync loop") return case <-ticker.C: if err := p.runSyncTasks(syncTasks); err != nil { - p.logger.Error(fmt.Sprintf("failed to successfully run all alerting sync tasks : %s", err)) + lg.Error(fmt.Sprintf("failed to successfully run all alerting sync tasks : %s", err)) } case <-longTicker.C: if err := p.runSyncTasks(forceSyncTasks); err != nil { - p.logger.Error(fmt.Sprintf("failed to successfully run all alerting force sync tasks : %s", err)) + lg.Error(fmt.Sprintf("failed to successfully run all alerting force sync tasks : %s", err)) } } } diff --git a/plugins/alerting/pkg/alerting/alarms/v1/component.go b/plugins/alerting/pkg/alerting/alarms/v1/component.go index 707b952a8d..8d4980f44e 100644 --- a/plugins/alerting/pkg/alerting/alarms/v1/component.go +++ b/plugins/alerting/pkg/alerting/alarms/v1/component.go @@ -5,14 +5,13 @@ import ( "fmt" "sync" - "log/slog" - "github.com/nats-io/nats.go" "github.com/rancher/opni/pkg/alerting/server" alertingSync "github.com/rancher/opni/pkg/alerting/server/sync" "github.com/rancher/opni/pkg/alerting/storage/spec" alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/pkg/util/future" "github.com/rancher/opni/plugins/alerting/pkg/alerting/metrics" @@ -30,14 +29,12 @@ type AlarmServerComponent struct { alertingv1.UnsafeAlertConditionsServer rules.UnsafeRuleSyncServer - util.Initializer ctx context.Context + util.Initializer mu sync.RWMutex server.Config - logger *slog.Logger - runner *Runner notifications *notifications.NotificationServerComponent @@ -58,12 +55,11 @@ type AlarmServerComponent struct { func NewAlarmServerComponent( ctx context.Context, - logger *slog.Logger, notifications *notifications.NotificationServerComponent, ) *AlarmServerComponent { + comp := &AlarmServerComponent{ ctx: ctx, - logger: logger, runner: NewRunner(), notifications: notifications, conditionStorage: future.New[spec.ConditionStorage](), @@ -117,6 +113,7 @@ func (a *AlarmServerComponent) SetConfig(conf server.Config) { } func (a *AlarmServerComponent) Sync(ctx context.Context, syncInfo alertingSync.SyncInfo) error { + lg := logger.PluginLoggerFromContext(a.ctx) conditionStorage, err := a.conditionStorage.GetContext(a.ctx) if err != nil { return err @@ -134,7 +131,7 @@ func (a *AlarmServerComponent) Sync(ctx context.Context, syncInfo alertingSync.S conds = append(conds, groupConds...) } eg := &util.MultiErrGroup{} - a.logger.Debug(fmt.Sprintf("syncing (%v) %d conditions", syncInfo.ShouldSync, len(conds))) + lg.Debug(fmt.Sprintf("syncing (%v) %d conditions", syncInfo.ShouldSync, len(conds))) for _, cond := range conds { cond := cond if syncInfo.ShouldSync { @@ -182,7 +179,7 @@ func (a *AlarmServerComponent) Sync(ctx context.Context, syncInfo alertingSync.S } eg.Wait() if len(eg.Errors()) > 0 { - a.logger.Error(fmt.Sprintf("successfully synced (%d/%d) conditions", len(conds)-len(eg.Errors()), len(conds))) + lg.Error(fmt.Sprintf("successfully synced (%d/%d) conditions", len(conds)-len(eg.Errors()), len(conds))) } if err := eg.Error(); err != nil { return err diff --git a/plugins/alerting/pkg/alerting/alarms/v1/server.go b/plugins/alerting/pkg/alerting/alarms/v1/server.go index ca1d2795fc..86bef2d7ef 100644 --- a/plugins/alerting/pkg/alerting/alarms/v1/server.go +++ b/plugins/alerting/pkg/alerting/alarms/v1/server.go @@ -15,6 +15,7 @@ import ( corev1 "github.com/rancher/opni/pkg/apis/core/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/caching" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/metrics/compat" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/pkg/validation" @@ -110,7 +111,7 @@ func (a *AlarmServerComponent) UpdateAlertCondition(ctx context.Context, req *al if err := req.Validate(); err != nil { return nil, err } - lg := a.logger.With("handler", "UpdateAlertCondition") + lg := logger.PluginLoggerFromContext(a.ctx).With("handler", "UpdateAlertCondition") lg.Debug(fmt.Sprintf("Updating alert condition %s", req.Id)) conditionStorage := a.conditionStorage.Get() conditionId := req.Id.Id @@ -169,7 +170,7 @@ func (a *AlarmServerComponent) AlertConditionStatus(ctx context.Context, ref *al if !a.Initialized() { return nil, status.Error(codes.Unavailable, "Alarm server is not yet available") } - lg := a.logger.With("handler", "AlertConditionStatus") + lg := logger.PluginLoggerFromContext(a.ctx).With("handler", "AlertConditionStatus") // required info cond, err := a.conditionStorage.Get().Group(ref.GroupId).Get(ctx, ref.Id) @@ -341,7 +342,7 @@ func (a *AlarmServerComponent) ListAlertConditionsWithStatus(ctx context.Context } func (a *AlarmServerComponent) CloneTo(ctx context.Context, req *alertingv1.CloneToRequest) (*emptypb.Empty, error) { - lg := a.logger.With("handler", "CloneTo") + lg := logger.PluginLoggerFromContext(a.ctx).With("handler", "CloneTo") if err := req.Validate(); err != nil { return nil, err } @@ -450,7 +451,7 @@ func (a *AlarmServerComponent) Timeline(ctx context.Context, req *alertingv1.Tim if err := req.Validate(); err != nil { return nil, err } - lg := a.logger.With("handler", "Timeline") + lg := logger.PluginLoggerFromContext(a.ctx).With("handler", "Timeline") conditions := []*alertingv1.AlertCondition{} var groupIds []string if req.Filters == nil || len(req.Filters.GroupIds) == 0 { @@ -491,7 +492,7 @@ func (a *AlarmServerComponent) Timeline(ctx context.Context, req *alertingv1.Tim if alertingv1.IsInternalCondition(cond) { activeWindows, err := a.incidentStorage.Get().GetActiveWindowsFromIncidentTracker(ctx, cond.Id, start, end) if err != nil { - a.logger.Error(fmt.Sprintf("failed to get active windows from agent incident tracker : %s", err)) + lg.Error(fmt.Sprintf("failed to get active windows from agent incident tracker : %s", err)) return } for _, w := range activeWindows { diff --git a/plugins/alerting/pkg/alerting/alarms/v1/setup.go b/plugins/alerting/pkg/alerting/alarms/v1/setup.go index dbea522890..7b7bb27367 100644 --- a/plugins/alerting/pkg/alerting/alarms/v1/setup.go +++ b/plugins/alerting/pkg/alerting/alarms/v1/setup.go @@ -13,6 +13,7 @@ import ( "github.com/rancher/opni/pkg/alerting/shared" alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/plugins/metrics/apis/cortexadmin" "github.com/samber/lo" "gopkg.in/yaml.v3" @@ -108,7 +109,7 @@ func (p *AlarmServerComponent) activateCondition( cond *alertingv1.AlertCondition, conditionId string, ) (ref *corev1.Reference, retErr error) { - lg := p.logger.With("condition", cond.GetName(), "id", cond.GetId(), "cond-group", cond.GroupId) + lg := logger.PluginLoggerFromContext(p.ctx).With("condition", cond.GetName(), "id", cond.GetId(), "cond-group", cond.GroupId) lg.Info("activating alarm") conditionStorage, err := p.conditionStorage.GetContext(ctx) if err != nil { @@ -173,9 +174,10 @@ func (p *AlarmServerComponent) handleSystemAlertCreation( conditionName string, namespace string, ) error { + lg := logger.PluginLoggerFromContext(p.ctx) err := p.onSystemConditionCreate(newConditionId, conditionName, namespace, k) if err != nil { - p.logger.Error(fmt.Sprintf("failed to create agent condition %s", err)) + lg.Error(fmt.Sprintf("failed to create agent condition %s", err)) } return nil } @@ -187,9 +189,10 @@ func (p *AlarmServerComponent) handleDownstreamCapabilityAlertCreation( conditionName string, namespace string, ) error { + lg := logger.PluginLoggerFromContext(p.ctx) err := p.onDownstreamCapabilityConditionCreate(newConditionId, conditionName, namespace, k) if err != nil { - p.logger.Error(fmt.Sprintf("failed to create agent condition %s", err)) + lg.Error(fmt.Sprintf("failed to create agent condition %s", err)) } return nil } @@ -201,9 +204,10 @@ func (p *AlarmServerComponent) handleMonitoringBackendAlertCreation( conditionName string, namespace string, ) error { + lg := logger.PluginLoggerFromContext(p.ctx) err := p.onCortexClusterStatusCreate(newConditionId, conditionName, namespace, k) if err != nil { - p.logger.Error(fmt.Sprintf("failed to create cortex cluster condition %s", err)) + lg.Error(fmt.Sprintf("failed to create cortex cluster condition %s", err)) } return nil } diff --git a/plugins/alerting/pkg/alerting/alarms/v1/streams.go b/plugins/alerting/pkg/alerting/alarms/v1/streams.go index 97cdc05c99..1267fa346a 100644 --- a/plugins/alerting/pkg/alerting/alarms/v1/streams.go +++ b/plugins/alerting/pkg/alerting/alarms/v1/streams.go @@ -13,8 +13,6 @@ import ( corev1 "github.com/rancher/opni/pkg/apis/core/v1" "github.com/rancher/opni/pkg/logger" - "log/slog" - "github.com/nats-io/nats.go" "github.com/rancher/opni/pkg/alerting/fingerprint" "github.com/rancher/opni/pkg/alerting/message" @@ -82,7 +80,8 @@ func NewCortexStatusSubject() string { } func (p *AlarmServerComponent) onSystemConditionCreate(conditionId, conditionName, namespace string, condition *alertingv1.AlertCondition) error { - lg := p.logger.With("onSystemConditionCreate", conditionId) + lg := logger.PluginLoggerFromContext(p.ctx).With("onSystemConditionCreate", conditionId) + ctx := logger.WithPluginLogger(p.ctx, lg) lg.Debug(fmt.Sprintf("received condition update: %v", condition)) disconnect := condition.GetAlertType().GetSystem() jsCtx, cancel := context.WithCancel(p.ctx) @@ -92,12 +91,11 @@ func (p *AlarmServerComponent) onSystemConditionCreate(conditionId, conditionNam &internalConditionMetadata{ conditionId: conditionId, conditionName: conditionName, - lg: lg, clusterId: agentId, alertmanagerlabels: map[string]string{}, }, &internalConditionContext{ - parentCtx: p.ctx, + parentCtx: ctx, evaluationCtx: jsCtx, evaluateInterval: DisconnectStreamEvaluateInterval, cancelEvaluation: cancel, @@ -166,7 +164,8 @@ func (p *AlarmServerComponent) onSystemConditionCreate(conditionId, conditionNam } func (p *AlarmServerComponent) onDownstreamCapabilityConditionCreate(conditionId, conditionName, namespace string, condition *alertingv1.AlertCondition) error { - lg := p.logger.With("onCapabilityStatusCreate", conditionId) + lg := logger.PluginLoggerFromContext(p.ctx).With("onCapabilityStatusCreate", conditionId) + ctx := logger.WithPluginLogger(p.ctx, lg) capability := condition.GetAlertType().GetDownstreamCapability() lg.Debug(fmt.Sprintf("received condition update: %v", condition)) jsCtx, cancel := context.WithCancel(p.ctx) @@ -176,12 +175,11 @@ func (p *AlarmServerComponent) onDownstreamCapabilityConditionCreate(conditionId &internalConditionMetadata{ conditionId: conditionId, conditionName: conditionName, - lg: lg, clusterId: agentId, alertmanagerlabels: map[string]string{}, }, &internalConditionContext{ - parentCtx: p.ctx, + parentCtx: ctx, evaluationCtx: jsCtx, evaluateInterval: CapabilityStreamEvaluateInterval, cancelEvaluation: cancel, @@ -488,7 +486,8 @@ func reduceCortexAdminStates(componentsToTrack []string, cStatus *cortexadmin.Co } func (p *AlarmServerComponent) onCortexClusterStatusCreate(conditionId, conditionName, namespace string, condition *alertingv1.AlertCondition) error { - lg := p.logger.With("onCortexClusterStatusCreate", conditionId) + lg := logger.PluginLoggerFromContext(p.ctx).With("onCortexClusterStatusCreate", conditionId) + ctx := logger.WithPluginLogger(p.ctx, lg) cortex := condition.GetAlertType().GetMonitoringBackend() lg.Debug(fmt.Sprintf("received condition update: %v", condition)) jsCtx, cancel := context.WithCancel(p.ctx) @@ -498,12 +497,11 @@ func (p *AlarmServerComponent) onCortexClusterStatusCreate(conditionId, conditio &internalConditionMetadata{ conditionId: conditionId, conditionName: conditionName, - lg: lg, clusterId: "", // unused here alertmanagerlabels: map[string]string{}, }, &internalConditionContext{ - parentCtx: p.ctx, + parentCtx: ctx, evaluationCtx: jsCtx, evaluateInterval: CortexStreamEvaluateInterval, cancelEvaluation: cancel, @@ -566,7 +564,6 @@ func (p *AlarmServerComponent) onCortexClusterStatusCreate(conditionId, conditio } type internalConditionMetadata struct { - lg *slog.Logger conditionName string conditionId string clusterId string @@ -632,6 +629,7 @@ type InternalConditionEvaluator[T proto.Message] struct { // infinite & blocking : must be run in a goroutine func (c *InternalConditionEvaluator[T]) SubscriberLoop() { + lg := logger.PluginLoggerFromContext(c.parentCtx) defer c.cancelEvaluation() // replay consumer if it exists t := time.NewTicker(c.evaluateInterval) @@ -644,7 +642,7 @@ func (c *InternalConditionEvaluator[T]) SubscriberLoop() { case <-t.C: subStream, err := c.js.ChanSubscribe(c.streamSubject, c.msgCh) if err != nil { - c.lg.Warn("failed to subscribe to stream %s", err) + lg.Warn("failed to subscribe to stream %s", err) continue } defer subStream.Unsubscribe() @@ -658,16 +656,16 @@ func (c *InternalConditionEvaluator[T]) SubscriberLoop() { for { select { case <-c.parentCtx.Done(): - c.lg.Info("parent context is exiting, exiting evaluation loop") + lg.Info("parent context is exiting, exiting evaluation loop") return case <-c.evaluationCtx.Done(): - c.lg.Info("evaluation context is exiting, exiting evaluation loop") + lg.Info("evaluation context is exiting, exiting evaluation loop") return case msg := <-c.msgCh: var status T err := json.Unmarshal(msg.Data, &status) if err != nil { - c.lg.Error("error", logger.Err(err)) + lg.Error("error", logger.Err(err)) } healthy, md, ts := c.healthOnMessage(status) incomingState := alertingv1.CachedState{ @@ -684,25 +682,27 @@ func (c *InternalConditionEvaluator[T]) SubscriberLoop() { // infinite & blocking : must be run in a goroutine func (c *InternalConditionEvaluator[T]) EvaluateLoop() { + lg := logger.PluginLoggerFromContext(c.parentCtx) + defer c.cancelEvaluation() // cancel parent context, if we return (non-recoverable) ticker := time.NewTicker(c.evaluateInterval) defer ticker.Stop() for { select { case <-c.parentCtx.Done(): - c.lg.Info("parent context is exiting, exiting evaluation loop") + lg.Info("parent context is exiting, exiting evaluation loop") return case <-c.evaluationCtx.Done(): - c.lg.Info("evaluation context is exiting, exiting evaluation loop") + lg.Info("evaluation context is exiting, exiting evaluation loop") return case <-ticker.C: lastKnownState, err := c.stateStorage.Get(c.evaluationCtx, c.conditionId) if err != nil { - c.lg.With("id", c.conditionId, "name", c.conditionName).Error(fmt.Sprintf("failed to get last internal condition state %s", err)) + lg.With("id", c.conditionId, "name", c.conditionName).Error(fmt.Sprintf("failed to get last internal condition state %s", err)) continue } if !lastKnownState.Healthy { - c.lg.Debug(fmt.Sprintf("condition %s is unhealthy", c.conditionName)) + lg.Debug(fmt.Sprintf("condition %s is unhealthy", c.conditionName)) interval := timestamppb.Now().AsTime().Sub(lastKnownState.Timestamp.AsTime()) if interval > c.evaluateDuration { // then we must fire an alert if !c.IsFiring() { @@ -715,11 +715,11 @@ func (c *InternalConditionEvaluator[T]) EvaluateLoop() { Metadata: lastKnownState.Metadata, }) if err != nil { - c.lg.Error("error", logger.Err(err)) + lg.Error("error", logger.Err(err)) } err = c.incidentStorage.OpenInterval(c.evaluationCtx, c.conditionId, string(c.fingerprint), timestamppb.Now()) if err != nil { - c.lg.Error("error", logger.Err(err)) + lg.Error("error", logger.Err(err)) } } alertLabels := map[string]string{ @@ -735,17 +735,17 @@ func (c *InternalConditionEvaluator[T]) EvaluateLoop() { ) } - c.lg.Debug(fmt.Sprintf("triggering alert for condition %s", c.conditionName)) + lg.Debug(fmt.Sprintf("triggering alert for condition %s", c.conditionName)) c.triggerHook(c.evaluationCtx, c.conditionId, alertLabels, alertAnnotations) } } else if lastKnownState.Healthy && c.IsFiring() && // avoid potential noise from api streams & replays lastKnownState.Timestamp.AsTime().Add(-c.evaluateInterval).Before(time.Now()) { - c.lg.Debug(fmt.Sprintf("condition %s is now healthy again after having fired", c.conditionName)) + lg.Debug(fmt.Sprintf("condition %s is now healthy again after having fired", c.conditionName)) c.SetFiring(false) err = c.incidentStorage.CloseInterval(c.evaluationCtx, c.conditionId, string(c.fingerprint), timestamppb.Now()) if err != nil { - c.lg.Error("error", logger.Err(err)) + lg.Error("error", logger.Err(err)) } c.resolveHook(c.evaluationCtx, c.conditionId, map[string]string{ message.NotificationPropertyFingerprint: string(c.fingerprint), @@ -780,12 +780,14 @@ func (c *InternalConditionEvaluator[T]) UpdateState(ctx context.Context, s *aler } func (c *InternalConditionEvaluator[T]) CalculateInitialState() { + lg := logger.PluginLoggerFromContext(c.parentCtx) + incomingState := alertingv1.DefaultCachedState() if _, getErr := c.incidentStorage.Get(c.evaluationCtx, c.conditionId); getErr != nil { if status, ok := status.FromError(getErr); ok && status.Code() == codes.NotFound { err := c.incidentStorage.Put(c.evaluationCtx, c.conditionId, alertingv1.NewIncidentIntervals()) if err != nil { - c.lg.Error("error", logger.Err(err)) + lg.Error("error", logger.Err(err)) c.cancelEvaluation() return } @@ -794,7 +796,7 @@ func (c *InternalConditionEvaluator[T]) CalculateInitialState() { return } } else if getErr != nil { - c.lg.Error("error", logger.Err(getErr)) + lg.Error("error", logger.Err(getErr)) } if st, getErr := c.stateStorage.Get(c.evaluationCtx, c.conditionId); getErr != nil { if code, ok := status.FromError(getErr); ok && code.Code() == codes.NotFound { diff --git a/plugins/alerting/pkg/alerting/alarms/v1/templates.go b/plugins/alerting/pkg/alerting/alarms/v1/templates.go index f2435c788f..419e64d07e 100644 --- a/plugins/alerting/pkg/alerting/alarms/v1/templates.go +++ b/plugins/alerting/pkg/alerting/alarms/v1/templates.go @@ -157,7 +157,7 @@ func (p *AlarmServerComponent) fetchDownstreamCapabilityInfo(ctx context.Context } func (p *AlarmServerComponent) fetchKubeStateInfo(ctx context.Context) (*alertingv1.ListAlertTypeDetails, error) { - lg := p.logger.With("handler", "fetchKubeStateInfo") + lg := logger.PluginLoggerFromContext(p.ctx).With("handler", "fetchKubeStateInfo") resKubeState := &alertingv1.ListAlertConditionKubeState{ Clusters: map[string]*alertingv1.KubeObjectGroups{}, States: shared.KubeStates, @@ -268,7 +268,7 @@ type clusterCpuSaturation struct { } func (p *AlarmServerComponent) fetchCPUSaturationInfo(ctx context.Context) (*alertingv1.ListAlertTypeDetails, error) { - lg := p.logger.With("handler", "fetchCPUSaturationInfo") + lg := logger.PluginLoggerFromContext(p.ctx).With("handler", "fetchCPUSaturationInfo") clusters, err := p.mgmtClient.Get().ListClusters( caching.WithGrpcClientCaching(ctx, 1*time.Minute), &managementv1.ListClustersRequest{}, @@ -390,7 +390,7 @@ type clusterMemorySaturation struct { } func (p *AlarmServerComponent) fetchMemorySaturationInfo(ctx context.Context) (*alertingv1.ListAlertTypeDetails, error) { - lg := p.logger.With("handler", "fetchMemorySaturationInfo") + lg := logger.PluginLoggerFromContext(p.ctx).With("handler", "fetchMemorySaturationInfo") clusters, err := p.mgmtClient.Get().ListClusters( caching.WithGrpcClientCaching(ctx, 1*time.Minute), &managementv1.ListClustersRequest{}) @@ -516,7 +516,7 @@ type clusterFilesystemSaturation struct { } func (p *AlarmServerComponent) fetchFsSaturationInfo(ctx context.Context) (*alertingv1.ListAlertTypeDetails, error) { - lg := p.logger.With("handler", "fetchMemorySaturationInfo") + lg := logger.PluginLoggerFromContext(p.ctx).With("handler", "fetchMemorySaturationInfo") clusters, err := p.mgmtClient.Get().ListClusters( caching.WithGrpcClientCaching(ctx, 1*time.Minute), &managementv1.ListClustersRequest{}, diff --git a/plugins/alerting/pkg/alerting/certs.go b/plugins/alerting/pkg/alerting/certs.go index 8fac689550..087bfe27f6 100644 --- a/plugins/alerting/pkg/alerting/certs.go +++ b/plugins/alerting/pkg/alerting/certs.go @@ -7,14 +7,17 @@ import ( "fmt" "os" "time" + + "github.com/rancher/opni/pkg/logger" ) func (p *Plugin) loadCerts() *tls.Config { - ctx, ca := context.WithTimeout(context.Background(), 10*time.Second) + ctx, ca := context.WithTimeout(p.ctx, 10*time.Second) + lg := logger.PluginLoggerFromContext(p.ctx) defer ca() gwConfig, err := p.gatewayConfig.GetContext(ctx) if err != nil { - p.logger.Error(fmt.Sprintf("plugin startup failed : config was not loaded: %s", err)) + lg.Error(fmt.Sprintf("plugin startup failed : config was not loaded: %s", err)) os.Exit(1) } alertingServerCa := gwConfig.Spec.Alerting.Certs.ServerCA @@ -22,7 +25,7 @@ func (p *Plugin) loadCerts() *tls.Config { alertingClientCert := gwConfig.Spec.Alerting.Certs.ClientCert alertingClientKey := gwConfig.Spec.Alerting.Certs.ClientKey - p.logger.With( + lg.With( "alertingServerCa", alertingServerCa, "alertingClientCa", alertingClientCa, "alertingClientCert", alertingClientCert, @@ -31,31 +34,31 @@ func (p *Plugin) loadCerts() *tls.Config { clientCert, err := tls.LoadX509KeyPair(alertingClientCert, alertingClientKey) if err != nil { - p.logger.Error(fmt.Sprintf("failed to load alerting client key id : %s", err)) + lg.Error(fmt.Sprintf("failed to load alerting client key id : %s", err)) os.Exit(1) } serverCaPool := x509.NewCertPool() serverCaData, err := os.ReadFile(alertingServerCa) if err != nil { - p.logger.Error(fmt.Sprintf("failed to read alerting server CA %s", err)) + lg.Error(fmt.Sprintf("failed to read alerting server CA %s", err)) os.Exit(1) } if ok := serverCaPool.AppendCertsFromPEM(serverCaData); !ok { - p.logger.Error(fmt.Sprintf("failed to load alerting server CA %s", err)) + lg.Error(fmt.Sprintf("failed to load alerting server CA %s", err)) os.Exit(1) } clientCaPool := x509.NewCertPool() clientCaData, err := os.ReadFile(alertingClientCa) if err != nil { - p.logger.Error(fmt.Sprintf("failed to load alerting client CA : %s", err)) + lg.Error(fmt.Sprintf("failed to load alerting client CA : %s", err)) os.Exit(1) } if ok := clientCaPool.AppendCertsFromPEM(clientCaData); !ok { - p.logger.Error("failed to load alerting client Ca") + lg.Error("failed to load alerting client Ca") os.Exit(1) } diff --git a/plugins/alerting/pkg/alerting/drivers/alerting_manager/cluster_driver.go b/plugins/alerting/pkg/alerting/drivers/alerting_manager/cluster_driver.go index 355c1d9119..1658aca3e0 100644 --- a/plugins/alerting/pkg/alerting/drivers/alerting_manager/cluster_driver.go +++ b/plugins/alerting/pkg/alerting/drivers/alerting_manager/cluster_driver.go @@ -85,7 +85,7 @@ func (a *AlertingClusterManager) newAlertingClusterCrd() *corev1beta1.AlertingCl } func (a *AlertingClusterManager) InstallCluster(ctx context.Context, _ *emptypb.Empty) (*emptypb.Empty, error) { - lg := a.Logger.With("action", "install-cluster") + lg := logger.PluginLoggerFromContext(a.Context).With("action", "install-cluster") mutator := func(cl *corev1beta1.AlertingCluster) { cl.Spec.Alertmanager.Enable = true cl.Spec.Alertmanager.ApplicationSpec.ExtraArgs = []string{ @@ -177,7 +177,7 @@ func (a *AlertingClusterManager) ConfigureCluster(ctx context.Context, conf *ale if err := conf.Validate(); err != nil { return nil, err } - lg := a.Logger.With("action", "configure-cluster") + lg := logger.PluginLoggerFromContext(a.Context).With("action", "configure-cluster") cpuLimit, err := resource.ParseQuantity(conf.ResourceLimits.Cpu) if err != nil { return nil, err @@ -376,7 +376,9 @@ func listPeers(replicas int) []alertingClient.AlertingPeer { } func init() { - drivers.Drivers.Register("alerting-manager", func(_ context.Context, opts ...driverutil.Option) (drivers.ClusterDriver, error) { + drivers.Drivers.Register("alerting-manager", func(ctx context.Context, opts ...driverutil.Option) (drivers.ClusterDriver, error) { + lg := logger.NewPluginLogger(ctx).WithGroup("alerting").WithGroup("alerting-manager") + options := AlertingDriverOptions{ GatewayRef: types.NamespacedName{ Namespace: os.Getenv("POD_NAMESPACE"), @@ -384,7 +386,7 @@ func init() { }, ConfigKey: shared.AlertManagerConfigKey, InternalRoutingKey: shared.InternalRoutingConfigKey, - Logger: logger.NewPluginLogger().WithGroup("alerting").WithGroup("alerting-manager"), + Context: logger.WithPluginLogger(ctx, lg), } driverutil.ApplyOptions(&options, opts...) return NewAlertingClusterManager(options) diff --git a/plugins/alerting/pkg/alerting/drivers/alerting_manager/driver_test.go b/plugins/alerting/pkg/alerting/drivers/alerting_manager/driver_test.go index 3ba7fef338..87afb3136d 100644 --- a/plugins/alerting/pkg/alerting/drivers/alerting_manager/driver_test.go +++ b/plugins/alerting/pkg/alerting/drivers/alerting_manager/driver_test.go @@ -1,6 +1,7 @@ package alerting_manager_test import ( + "context" "crypto/tls" . "github.com/onsi/ginkgo/v2" @@ -20,10 +21,13 @@ var _ = Describe("", Label("unit"), func() { driverutil.NewOption("tlsConfig", tlsConfig), } + ctx := context.Background() + lg := logger.NewPluginLogger(ctx).WithGroup("alerting").WithGroup("alerting-manager") + options := alerting_manager.AlertingDriverOptions{ ConfigKey: shared.AlertManagerConfigKey, InternalRoutingKey: shared.InternalRoutingConfigKey, - Logger: logger.NewPluginLogger().WithGroup("alerting").WithGroup("alerting-manager"), + Context: logger.WithPluginLogger(ctx, lg), } driverutil.ApplyOptions(&options, opts...) Expect(options.TlsConfig).NotTo(BeNil()) @@ -35,11 +39,13 @@ var _ = Describe("", Label("unit"), func() { opts := []driverutil.Option{ driverutil.NewOption("subscribers", []chan client.AlertingClient{subscriberA, subscriberB}), } + ctx := context.Background() + lg := logger.NewPluginLogger(ctx).WithGroup("alerting").WithGroup("alerting-manager") options := alerting_manager.AlertingDriverOptions{ ConfigKey: shared.AlertManagerConfigKey, InternalRoutingKey: shared.InternalRoutingConfigKey, - Logger: logger.NewPluginLogger().WithGroup("alerting").WithGroup("alerting-manager"), + Context: logger.WithPluginLogger(ctx, lg), } driverutil.ApplyOptions(&options, opts...) Expect(options.Subscribers).To(HaveLen(2)) diff --git a/plugins/alerting/pkg/alerting/drivers/alerting_manager/options.go b/plugins/alerting/pkg/alerting/drivers/alerting_manager/options.go index c0bb937b25..883651b3bd 100644 --- a/plugins/alerting/pkg/alerting/drivers/alerting_manager/options.go +++ b/plugins/alerting/pkg/alerting/drivers/alerting_manager/options.go @@ -1,8 +1,8 @@ package alerting_manager import ( + "context" "crypto/tls" - "log/slog" alertingClient "github.com/rancher/opni/pkg/alerting/client" "github.com/rancher/opni/pkg/alerting/shared" @@ -11,7 +11,7 @@ import ( ) type AlertingDriverOptions struct { - Logger *slog.Logger `option:"logger"` + Context context.Context `option:"context"` K8sClient client.Client `option:"k8sClient"` GatewayRef types.NamespacedName `option:"gatewayRef"` ConfigKey string `option:"configKey"` diff --git a/plugins/alerting/pkg/alerting/endpoints/v1/component.go b/plugins/alerting/pkg/alerting/endpoints/v1/component.go index 012fcad64f..0363bf5035 100644 --- a/plugins/alerting/pkg/alerting/endpoints/v1/component.go +++ b/plugins/alerting/pkg/alerting/endpoints/v1/component.go @@ -11,7 +11,6 @@ import ( "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/pkg/util/future" notifications "github.com/rancher/opni/plugins/alerting/pkg/alerting/notifications/v1" - "log/slog" ) type manualSync func(ctx context.Context, hashRing spec.HashRing, routers spec.RouterStorage) error @@ -27,8 +26,6 @@ type EndpointServerComponent struct { notifications *notifications.NotificationServerComponent - logger *slog.Logger - endpointStorage future.Future[spec.EndpointStorage] conditionStorage future.Future[spec.ConditionStorage] routerStorage future.Future[spec.RouterStorage] @@ -39,12 +36,10 @@ var _ server.ServerComponent = (*EndpointServerComponent)(nil) func NewEndpointServerComponent( ctx context.Context, - logger *slog.Logger, notifications *notifications.NotificationServerComponent, ) *EndpointServerComponent { return &EndpointServerComponent{ ctx: ctx, - logger: logger, notifications: notifications, endpointStorage: future.New[spec.EndpointStorage](), conditionStorage: future.New[spec.ConditionStorage](), diff --git a/plugins/alerting/pkg/alerting/http.go b/plugins/alerting/pkg/alerting/http.go index e04513f6b0..b74270af29 100644 --- a/plugins/alerting/pkg/alerting/http.go +++ b/plugins/alerting/pkg/alerting/http.go @@ -13,7 +13,8 @@ import ( var _ httpext.HTTPAPIExtension = (*Plugin)(nil) func (p *Plugin) ConfigureRoutes(router *gin.Engine) { - router.Use(logger.GinLogger(p.logger.With("component", "http-proxy")), gin.Recovery()) + lg := logger.PluginLoggerFromContext(p.ctx) + router.Use(logger.GinLogger(lg.With("component", "http-proxy")), gin.Recovery()) p.hsServer.ConfigureRoutes(router) p.httpProxy.ConfigureRoutes(router) } diff --git a/plugins/alerting/pkg/alerting/impl.go b/plugins/alerting/pkg/alerting/impl.go index e2ed324402..5f60ba03f5 100644 --- a/plugins/alerting/pkg/alerting/impl.go +++ b/plugins/alerting/pkg/alerting/impl.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/management" "github.com/rancher/opni/plugins/alerting/pkg/alerting/alarms/v1" @@ -13,6 +14,7 @@ import ( ) func (p *Plugin) newClusterWatcherHooks(ctx context.Context, ingressStream *nats.StreamConfig) *management.ManagementWatcherHooks[*managementv1.WatchEvent] { + lg := logger.PluginLoggerFromContext(p.ctx) err := natsutil.NewPersistentStream(p.js.Get(), ingressStream) if err != nil { panic(err) @@ -22,7 +24,7 @@ func (p *Plugin) newClusterWatcherHooks(ctx context.Context, ingressStream *nats createClusterEvent, func(ctx context.Context, event *managementv1.WatchEvent) error { err := natsutil.NewDurableReplayConsumer(p.js.Get(), ingressStream.Name, alarms.NewAgentDurableReplayConsumer(event.Cluster.Id)) - p.logger.Info(fmt.Sprintf("added durable ordered push consumer for cluster %s", event.Cluster.Id)) + lg.Info(fmt.Sprintf("added durable ordered push consumer for cluster %s", event.Cluster.Id)) if err != nil { panic(err) } diff --git a/plugins/alerting/pkg/alerting/management.go b/plugins/alerting/pkg/alerting/management.go index 2238c2b6cc..c0fb208944 100644 --- a/plugins/alerting/pkg/alerting/management.go +++ b/plugins/alerting/pkg/alerting/management.go @@ -26,13 +26,14 @@ import ( ) func (p *Plugin) configureDriver(ctx context.Context, opts ...driverutil.Option) { + lg := logger.PluginLoggerFromContext(p.ctx) priorityOrder := []string{"alerting-manager", "gateway-manager", "local-alerting", "test-environment", "noop"} for _, name := range priorityOrder { if builder, ok := drivers.Drivers.Get(name); ok { - p.logger.With("driver", name).Info("using cluster driver") + lg.With("driver", name).Info("using cluster driver") driver, err := builder(ctx, opts...) if err != nil { - p.logger.With( + lg.With( "driver", name, logger.Err(err), ).Error("failed to initialize cluster driver") @@ -46,7 +47,7 @@ func (p *Plugin) configureDriver(ctx context.Context, opts ...driverutil.Option) // blocking func (p *Plugin) watchCortexClusterStatus() { - lg := p.logger.With("watcher", "cortex-cluster-status") + lg := logger.PluginLoggerFromContext(p.ctx).With("watcher", "cortex-cluster-status") err := natsutil.NewPersistentStream(p.js.Get(), alarms.NewCortexStatusStream()) if err != nil { panic(err) @@ -96,11 +97,11 @@ func (p *Plugin) watchCortexClusterStatus() { go func() { cortexStatusData, err := json.Marshal(ccStatus) if err != nil { - p.logger.Error(fmt.Sprintf("failed to marshal cortex cluster status: %s", err)) + lg.Error(fmt.Sprintf("failed to marshal cortex cluster status: %s", err)) } _, err = p.js.Get().PublishAsync(alarms.NewCortexStatusSubject(), cortexStatusData) if err != nil { - p.logger.Error(fmt.Sprintf("failed to publish cortex cluster status : %s", err)) + lg.Error(fmt.Sprintf("failed to publish cortex cluster status : %s", err)) } }() } @@ -112,9 +113,10 @@ func (p *Plugin) watchGlobalCluster( client managementv1.ManagementClient, watcher *management.ManagementWatcherHooks[*managementv1.WatchEvent], ) { + lg := logger.PluginLoggerFromContext(p.ctx) clusterClient, err := client.WatchClusters(p.ctx, &managementv1.WatchClustersRequest{}) if err != nil { - p.logger.Error("failed to watch clusters, exiting...") + lg.Error("failed to watch clusters, exiting...") os.Exit(1) } for { @@ -124,7 +126,7 @@ func (p *Plugin) watchGlobalCluster( default: event, err := clusterClient.Recv() if err != nil { - p.logger.Error(fmt.Sprintf("failed to receive cluster event : %s", err)) + lg.Error(fmt.Sprintf("failed to receive cluster event : %s", err)) continue } watcher.HandleEvent(event) @@ -137,6 +139,7 @@ func (p *Plugin) publishInitialStatus( cl *corev1.Cluster, ingressStream *nats.StreamConfig, ) { + lg := logger.PluginLoggerFromContext(p.ctx) retries := 10 for i := retries; i > 0; i-- { select { @@ -145,7 +148,7 @@ func (p *Plugin) publishInitialStatus( if err == nil { clusterStatusData, err := json.Marshal(clusterStatus) if err != nil { - p.logger.Error(fmt.Sprintf("failed to marshal cluster health status: %s", err)) + lg.Error(fmt.Sprintf("failed to marshal cluster health status: %s", err)) continue } @@ -154,14 +157,14 @@ func (p *Plugin) publishInitialStatus( return } if err != nil { - p.logger.Error(fmt.Sprintf("failed to publish cluster health status : %s", err)) + lg.Error(fmt.Sprintf("failed to publish cluster health status : %s", err)) } } else { - p.logger.Warn(fmt.Sprintf("failed to read cluster health status on startup for cluster %s : %s, retrying...", cl.GetId(), err.Error())) + lg.Warn(fmt.Sprintf("failed to read cluster health status on startup for cluster %s : %s, retrying...", cl.GetId(), err.Error())) } } } - p.logger.Info(fmt.Sprintf("manually setting %s cluster's status to disconnected", cl.GetId())) + lg.Info(fmt.Sprintf("manually setting %s cluster's status to disconnected", cl.GetId())) msg := &corev1.ClusterHealthStatus{ Cluster: &corev1.Reference{ Id: cl.GetId(), @@ -183,7 +186,7 @@ func (p *Plugin) publishInitialStatus( data, err := json.Marshal(msg) if err != nil { - p.logger.Error(fmt.Sprintf("failed to marshal default message %s", err)) + lg.Error(fmt.Sprintf("failed to marshal default message %s", err)) return } p.js.Get().PublishAsync(alarms.NewAgentStreamSubject(cl.GetId()), data) @@ -191,19 +194,20 @@ func (p *Plugin) publishInitialStatus( // blocking func (p *Plugin) watchGlobalClusterHealthStatus(client managementv1.ManagementClient, ingressStream *nats.StreamConfig) { + lg := logger.PluginLoggerFromContext(p.ctx) err := natsutil.NewPersistentStream(p.js.Get(), ingressStream) if err != nil { panic(err) } clusterStatusClient, err := client.WatchClusterHealthStatus(p.ctx, &emptypb.Empty{}) if err != nil { - p.logger.Error("failed to watch cluster health status, exiting...") + lg.Error("failed to watch cluster health status, exiting...") os.Exit(1) } // on startup always send a manual read in case the gateway was down when the agent status changed cls, err := client.ListClusters(p.ctx, &managementv1.ListClustersRequest{}) if err != nil { - p.logger.Error("failed to list clusters, exiting...") + lg.Error("failed to list clusters, exiting...") os.Exit(1) } for _, cl := range cls.Items { @@ -228,17 +232,17 @@ func (p *Plugin) watchGlobalClusterHealthStatus(client managementv1.ManagementCl default: clusterStatus, err := clusterStatusClient.Recv() if err != nil { - p.logger.Warn("failed to receive cluster health status from grpc stream, retrying...") + lg.Warn("failed to receive cluster health status from grpc stream, retrying...") continue } clusterStatusData, err := json.Marshal(clusterStatus) if err != nil { - p.logger.Error(fmt.Sprintf("failed to marshal cluster health status: %s", err)) + lg.Error(fmt.Sprintf("failed to marshal cluster health status: %s", err)) continue } _, err = p.js.Get().PublishAsync(alarms.NewAgentStreamSubject(clusterStatus.Cluster.Id), clusterStatusData) if err != nil { - p.logger.Error(fmt.Sprintf("failed to publish cluster health status : %s", err)) + lg.Error(fmt.Sprintf("failed to publish cluster health status : %s", err)) } } } diff --git a/plugins/alerting/pkg/alerting/notifications/v1/component.go b/plugins/alerting/pkg/alerting/notifications/v1/component.go index 397eb34ce4..6c63aedec1 100644 --- a/plugins/alerting/pkg/alerting/notifications/v1/component.go +++ b/plugins/alerting/pkg/alerting/notifications/v1/component.go @@ -10,19 +10,17 @@ import ( alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/pkg/util/future" - "log/slog" ) type NotificationServerComponent struct { alertingv1.UnsafeAlertNotificationsServer + ctx context.Context util.Initializer mu sync.Mutex server.Config - logger *slog.Logger - conditionStorage future.Future[spec.ConditionStorage] endpointStorage future.Future[spec.EndpointStorage] } @@ -30,10 +28,10 @@ type NotificationServerComponent struct { var _ server.ServerComponent = (*NotificationServerComponent)(nil) func NewNotificationServerComponent( - logger *slog.Logger, + ctx context.Context, ) *NotificationServerComponent { return &NotificationServerComponent{ - logger: logger, + ctx: ctx, conditionStorage: future.New[spec.ConditionStorage](), endpointStorage: future.New[spec.EndpointStorage](), } diff --git a/plugins/alerting/pkg/alerting/plugin.go b/plugins/alerting/pkg/alerting/plugin.go index a53af154b5..cf81d9150e 100644 --- a/plugins/alerting/pkg/alerting/plugin.go +++ b/plugins/alerting/pkg/alerting/plugin.go @@ -28,7 +28,6 @@ import ( "github.com/rancher/opni/plugins/alerting/pkg/alerting/endpoints/v1" "github.com/rancher/opni/plugins/alerting/pkg/alerting/notifications/v1" "github.com/rancher/opni/plugins/alerting/pkg/node_backend" - "log/slog" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/logger" @@ -56,8 +55,7 @@ type Plugin struct { alertops.ConfigReconcilerServer system.UnimplementedSystemPluginClient - ctx context.Context - logger *slog.Logger + ctx context.Context storageClientSet future.Future[spec.AlertingClientSet] @@ -98,7 +96,9 @@ var ( ) func NewPlugin(ctx context.Context) *Plugin { - lg := logger.NewPluginLogger().WithGroup("alerting") + lg := logger.NewPluginLogger(ctx).WithGroup("alerting") + ctx = logger.WithPluginLogger(ctx, lg) + storageClientSet := future.New[spec.AlertingClientSet]() metricReader := metricsdk.NewManualReader() metricsExporter.RegisterMeterProvider(metricsdk.NewMeterProvider( @@ -106,8 +106,7 @@ func NewPlugin(ctx context.Context) *Plugin { )) collector := collector.NewCollectorServer(metricReader) p := &Plugin{ - ctx: ctx, - logger: lg, + ctx: ctx, storageClientSet: storageClientSet, @@ -131,29 +130,34 @@ func NewPlugin(ctx context.Context) *Plugin { CollectorServer: collector, } - p.syncController = NewSyncController(p.logger.With("component", "sync-controller")) + syncCtrlLg := lg.With("component", "sync-controller") + p.syncController = NewSyncController(logger.WithPluginLogger(ctx, syncCtrlLg)) p.hsServer = newHealthStatusServer( p.ready, p.healthy, ) + httpProxyLg := lg.With("component", "http-proxy") p.httpProxy = proxy.NewProxyServer( - lg.With("component", "http-proxy"), + logger.WithPluginLogger(ctx, httpProxyLg), ) + nodeLg := lg.With("component", "node-backend") p.node = *node_backend.NewAlertingNodeBackend( - p.logger.With("component", "node-backend"), + logger.WithPluginLogger(ctx, nodeLg), ) + + notificationLg := lg.With("component", "notifications") p.NotificationServerComponent = notifications.NewNotificationServerComponent( - p.logger.With("component", "notifications"), + logger.WithPluginLogger(ctx, notificationLg), ) + endpointLg := lg.With("component", "endpoints") p.EndpointServerComponent = endpoints.NewEndpointServerComponent( - p.ctx, - p.logger.With("component", "endpoints"), + logger.WithPluginLogger(ctx, endpointLg), p.NotificationServerComponent, ) + alarmLg := lg.With("component", "alarms") p.AlarmServerComponent = alarms.NewAlarmServerComponent( - p.ctx, - p.logger.With("component", "alarms"), + logger.WithPluginLogger(ctx, alarmLg), p.NotificationServerComponent, ) @@ -251,6 +255,7 @@ var ( func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme() + p := NewPlugin(ctx) scheme.Add(system.SystemPluginID, system.NewPlugin(p)) scheme.Add(httpext.HTTPAPIExtensionPluginID, httpext.NewPlugin(p)) @@ -289,6 +294,6 @@ func Scheme(ctx context.Context) meta.Scheme { scheme.Add(metrics.MetricsPluginID, metrics.NewPlugin(p)) scheme.Add(capability.CapabilityBackendPluginID, capability.NewPlugin(&p.node)) - scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(p)) + scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(ctx, p)) return scheme } diff --git a/plugins/alerting/pkg/alerting/proxy/proxy.go b/plugins/alerting/pkg/alerting/proxy/proxy.go index effd78d41b..4121a98f79 100644 --- a/plugins/alerting/pkg/alerting/proxy/proxy.go +++ b/plugins/alerting/pkg/alerting/proxy/proxy.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "fmt" - "log/slog" "net/http" "net/http/httputil" "sync" @@ -13,6 +12,7 @@ import ( "github.com/gin-gonic/gin" "github.com/rancher/opni/pkg/alerting/server" ssync "github.com/rancher/opni/pkg/alerting/server/sync" + "github.com/rancher/opni/pkg/logger" httpext "github.com/rancher/opni/pkg/plugins/apis/apiextensions/http" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/pkg/util/future" @@ -21,7 +21,7 @@ import ( const proxyPath = "/plugin_alerting/alertmanager" type ProxyServer struct { - lg *slog.Logger + ctx context.Context *alertmanagerProxy } @@ -55,11 +55,11 @@ func (p *ProxyServer) SetConfig(cfg server.Config) { } func NewProxyServer( - lg *slog.Logger, + ctx context.Context, ) *ProxyServer { return &ProxyServer{ - lg: lg, - alertmanagerProxy: newAlertmanagerProxy(lg), + ctx: ctx, + alertmanagerProxy: newAlertmanagerProxy(ctx), } } @@ -80,7 +80,7 @@ var _ httpext.HTTPAPIExtension = (*ProxyServer)(nil) type alertmanagerProxy struct { util.Initializer - lg *slog.Logger + ctx context.Context tlsConfig future.Future[*tls.Config] configMu sync.RWMutex @@ -94,20 +94,21 @@ func (a *alertmanagerProxy) Initialize(tlsConfig *tls.Config) { } func (a *alertmanagerProxy) SetConfig(config server.Config) { + lg := logger.PluginLoggerFromContext(a.ctx) a.configMu.Lock() defer a.configMu.Unlock() if config.Client == nil { - a.lg.Info("disabling alertmanager proxy...") + lg.Info("disabling alertmanager proxy...") a.reverseProxy = nil return } targetURL := config.Client.ProxyClient().ProxyURL() - a.lg.Info(fmt.Sprintf("configuring alertmanager proxy to : %s", targetURL.String())) + lg.Info(fmt.Sprintf("configuring alertmanager proxy to : %s", targetURL.String())) ctxca, ca := context.WithTimeout(context.Background(), time.Second) defer ca() tlsConfig, err := a.tlsConfig.GetContext(ctxca) if err != nil { - a.lg.Error("tls config for alertmanager reverse proxy is not initialized") + lg.Error("tls config for alertmanager reverse proxy is not initialized") a.reverseProxy = nil return } @@ -119,10 +120,10 @@ func (a *alertmanagerProxy) SetConfig(config server.Config) { a.reverseProxy = reverseProxy } -func newAlertmanagerProxy(lg *slog.Logger) *alertmanagerProxy { +func newAlertmanagerProxy(ctx context.Context) *alertmanagerProxy { return &alertmanagerProxy{ + ctx: ctx, reverseProxy: nil, - lg: lg, tlsConfig: future.New[*tls.Config](), } } diff --git a/plugins/alerting/pkg/alerting/sync.go b/plugins/alerting/pkg/alerting/sync.go index a2e90cd5a9..a9e86c7688 100644 --- a/plugins/alerting/pkg/alerting/sync.go +++ b/plugins/alerting/pkg/alerting/sync.go @@ -9,6 +9,7 @@ import ( "github.com/rancher/opni/pkg/alerting/storage/opts" "github.com/rancher/opni/pkg/capabilities/wellknown" "github.com/rancher/opni/pkg/health" + "github.com/rancher/opni/pkg/logger" "google.golang.org/protobuf/types/known/durationpb" alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" @@ -108,9 +109,10 @@ var ( ) func (p *Plugin) createDefaultDisconnect(clusterId string) error { + lg := logger.PluginLoggerFromContext(p.ctx) conditions, err := p.storageClientSet.Get().Conditions().Group("").List(p.ctx, opts.WithUnredacted()) if err != nil { - p.logger.Error(fmt.Sprintf("failed to list alert conditions : %s", err)) + lg.Error(fmt.Sprintf("failed to list alert conditions : %s", err)) return err } disconnectExists := false @@ -127,12 +129,12 @@ func (p *Plugin) createDefaultDisconnect(clusterId string) error { } _, err = p.CreateAlertCondition(p.ctx, DefaultDisconnectAlarm(clusterId)) if err != nil { - p.logger.Warn(fmt.Sprintf( + lg.Warn(fmt.Sprintf( "could not create a downstream agent disconnect condition on cluster creation for cluster %s", clusterId)) } else { - p.logger.Debug(fmt.Sprintf( + lg.Debug(fmt.Sprintf( "downstream agent disconnect condition on cluster creation for cluster %s is now active", clusterId)) @@ -141,9 +143,11 @@ func (p *Plugin) createDefaultDisconnect(clusterId string) error { } func (p *Plugin) onDeleteClusterAgentDisconnectHook(ctx context.Context, clusterId string) error { + lg := logger.PluginLoggerFromContext(p.ctx) + conditions, err := p.storageClientSet.Get().Conditions().Group("").List(p.ctx, opts.WithUnredacted()) if err != nil { - p.logger.Error(fmt.Sprintf("failed to list conditions from storage : %s", err)) + lg.Error(fmt.Sprintf("failed to list conditions from storage : %s", err)) } var wg sync.WaitGroup for _, cond := range conditions { @@ -157,7 +161,7 @@ func (p *Plugin) onDeleteClusterAgentDisconnectHook(ctx context.Context, cluster Id: cond.Id, }) if err != nil { - p.logger.Error(fmt.Sprintf("failed to delete condition %s : %s", cond.Id, err)) + lg.Error(fmt.Sprintf("failed to delete condition %s : %s", cond.Id, err)) } }() } @@ -168,9 +172,11 @@ func (p *Plugin) onDeleteClusterAgentDisconnectHook(ctx context.Context, cluster } func (p *Plugin) createDefaultCapabilityHealth(clusterId string) error { + lg := logger.PluginLoggerFromContext(p.ctx) + items, err := p.storageClientSet.Get().Conditions().Group("").List(p.ctx, opts.WithUnredacted()) if err != nil { - p.logger.Error(fmt.Sprintf("failed to list alert conditions : %s", err)) + lg.Error(fmt.Sprintf("failed to list alert conditions : %s", err)) return err } healthExists := false @@ -189,12 +195,12 @@ func (p *Plugin) createDefaultCapabilityHealth(clusterId string) error { _, err = p.CreateAlertCondition(p.ctx, DefaultCapabilityHealthAlarm(clusterId)) if err != nil { - p.logger.Warn(fmt.Sprintf( + lg.Warn(fmt.Sprintf( "could not create a default downstream capability health condition on cluster creation for cluster %s", clusterId)) } else { - p.logger.Debug(fmt.Sprintf( + lg.Debug(fmt.Sprintf( "downstream agent disconnect condition on cluster creation for cluster %s is now active", clusterId)) @@ -203,9 +209,11 @@ func (p *Plugin) createDefaultCapabilityHealth(clusterId string) error { } func (p *Plugin) onDeleteClusterCapabilityHook(ctx context.Context, clusterId string) error { + lg := logger.PluginLoggerFromContext(p.ctx) + conditions, err := p.storageClientSet.Get().Conditions().Group("").List(p.ctx, opts.WithUnredacted()) if err != nil { - p.logger.Error(fmt.Sprintf("failed to list conditions from storage : %s", err)) + lg.Error(fmt.Sprintf("failed to list conditions from storage : %s", err)) } var wg sync.WaitGroup for _, cond := range conditions { @@ -219,7 +227,7 @@ func (p *Plugin) onDeleteClusterCapabilityHook(ctx context.Context, clusterId st Id: cond.Id, }) if err != nil { - p.logger.Error(fmt.Sprintf("failed to delete condition %s : %s", cond.Id, err)) + lg.Error(fmt.Sprintf("failed to delete condition %s : %s", cond.Id, err)) } }() } diff --git a/plugins/alerting/pkg/alerting/system.go b/plugins/alerting/pkg/alerting/system.go index 615bd816de..5a49e13034 100644 --- a/plugins/alerting/pkg/alerting/system.go +++ b/plugins/alerting/pkg/alerting/system.go @@ -39,19 +39,21 @@ import ( ) func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { + lg := logger.PluginLoggerFromContext(p.ctx) + opt := &shared.AlertingClusterOptions{} p.mgmtClient.Set(client) cfg, err := client.GetConfig(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("Failed to get mgmnt config") os.Exit(1) } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("failed to load config") os.Exit(1) @@ -60,7 +62,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { p.gatewayConfig.Set(config) backend, err := machinery.ConfigureStorageBackend(p.ctx, &config.Spec.Storage) if err != nil { - p.logger.With(logger.Err(err)).Error("failed to configure storage backend") + lg.With(logger.Err(err)).Error("failed to configure storage backend") os.Exit(1) } p.storageBackend.Set(backend) @@ -81,7 +83,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { p.configureDriver( p.ctx, driverutil.NewOption("alertingOptions", opt), - driverutil.NewOption("logger", p.logger.WithGroup("alerting-manager")), + driverutil.NewOption("logger", lg.WithGroup("alerting-manager")), driverutil.NewOption("subscribers", []chan alertingClient.AlertingClient{p.clusterNotifier}), driverutil.NewOption("tlsConfig", tlsConfig), ) @@ -94,6 +96,8 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { // UseKeyValueStore Alerting Condition & Alert Endpoints are stored in K,V stores func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { + lg := logger.PluginLoggerFromContext(p.ctx) + p.capabilitySpecStore.Set(node_backend.CapabilitySpecKV{ DefaultCapabilitySpec: kvutil.WithKey(system.NewKVStoreClient[*node.AlertingCapabilitySpec](client), "/alerting/config/capability/default"), NodeCapabilitySpecs: kvutil.WithPrefix(system.NewKVStoreClient[*node.AlertingCapabilitySpec](client), "/alerting/config/capability/nodes"), @@ -119,19 +123,19 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { nc, err = natsutil.AcquireNATSConnection( p.ctx, cfg, - natsutil.WithLogger(p.logger), + natsutil.WithLogger(lg), natsutil.WithNatsOptions([]nats.Option{ nats.ErrorHandler(func(nc *nats.Conn, s *nats.Subscription, err error) { if s != nil { - p.logger.Error("nats : async error in %q/%q: %v", s.Subject, s.Queue, err) + lg.Error("nats : async error in %q/%q: %v", s.Subject, s.Queue, err) } else { - p.logger.Warn("nats : async error outside subscription") + lg.Warn("nats : async error outside subscription") } }), }), ) if err != nil { - p.logger.With(logger.Err(err)).Error("fatal error connecting to NATs") + lg.With(logger.Err(err)).Error("fatal error connecting to NATs") } p.natsConn.Set(nc) mgr, err := p.natsConn.Get().JetStream() @@ -149,13 +153,13 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { } clStatus, err := p.GetClusterStatus(p.ctx, &emptypb.Empty{}) if err != nil { - p.logger.With(logger.Err(err)).Error("failed to get cluster status") + lg.With(logger.Err(err)).Error("failed to get cluster status") return } if clStatus.State == alertops.InstallState_Installed || clStatus.State == alertops.InstallState_InstallUpdating { syncInfo, err := p.getSyncInfo(p.ctx) if err != nil { - p.logger.With(logger.Err(err)).Error("failed to get sync info") + lg.With(logger.Err(err)).Error("failed to get sync info") } else { for _, comp := range p.Components() { comp.Sync(p.ctx, syncInfo) @@ -163,16 +167,16 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { } conf, err := p.GetClusterConfiguration(p.ctx, &emptypb.Empty{}) if err != nil { - p.logger.With(logger.Err(err)).Error("failed to get cluster configuration") + lg.With(logger.Err(err)).Error("failed to get cluster configuration") return } peers := listPeers(int(conf.GetNumReplicas())) - p.logger.Info(fmt.Sprintf("reindexing known alerting peers to : %v", peers)) + lg.Info(fmt.Sprintf("reindexing known alerting peers to : %v", peers)) ctxca, ca := context.WithTimeout(context.Background(), 5*time.Second) defer ca() alertingClient, err := p.alertingClient.GetContext(ctxca) if err != nil { - p.logger.Error(err.Error()) + lg.Error(err.Error()) return } @@ -192,10 +196,12 @@ func UseCachingProvider(c caching.CachingProvider[proto.Message]) { } func (p *Plugin) UseAPIExtensions(intf system.ExtensionClientInterface) { + lg := logger.PluginLoggerFromContext(p.ctx) + services := []string{"CortexAdmin", "CortexOps"} cc, err := intf.GetClientConn(p.ctx, services...) if err != nil { - p.logger.With(logger.Err(err)).Error(fmt.Sprintf("failed to get required clients for alerting : %s", strings.Join(services, ","))) + lg.With(logger.Err(err)).Error(fmt.Sprintf("failed to get required clients for alerting : %s", strings.Join(services, ","))) if p.ctx.Err() != nil { // Plugin is shutting down, don't exit return @@ -207,13 +213,15 @@ func (p *Plugin) UseAPIExtensions(intf system.ExtensionClientInterface) { } func (p *Plugin) handleDriverNotifications() { + lg := logger.PluginLoggerFromContext(p.ctx) + for { select { case <-p.ctx.Done(): - p.logger.Info("shutting down cluster driver update handler") + lg.Info("shutting down cluster driver update handler") return case client := <-p.clusterNotifier: - p.logger.Info("updating alerting client based on cluster status") + lg.Info("updating alerting client based on cluster status") serverCfg := server.Config{ Client: client.Clone(), } diff --git a/plugins/alerting/pkg/node_backend/backend.go b/plugins/alerting/pkg/node_backend/backend.go index 8d6ecdc4b7..5c10f3b8dd 100644 --- a/plugins/alerting/pkg/node_backend/backend.go +++ b/plugins/alerting/pkg/node_backend/backend.go @@ -6,8 +6,6 @@ import ( "sync" "sync/atomic" - "log/slog" - "github.com/google/go-cmp/cmp" "github.com/rancher/opni/pkg/agent" capabilityv1 "github.com/rancher/opni/pkg/apis/capability/v1" @@ -45,7 +43,7 @@ type AlertingNodeBackend struct { node.UnsafeNodeAlertingCapabilityServer node.UnsafeAlertingNodeConfigurationServer - lg *slog.Logger + ctx context.Context nodeStatusMu sync.RWMutex nodeStatus map[string]*capabilityv1.NodeCapabilityStatus @@ -58,10 +56,10 @@ type AlertingNodeBackend struct { } func NewAlertingNodeBackend( - lg *slog.Logger, + ctx context.Context, ) *AlertingNodeBackend { return &AlertingNodeBackend{ - lg: lg, + ctx: ctx, delegate: future.New[streamext.StreamDelegate[agent.ClientSet]](), mgmtClient: future.New[managementv1.ManagementClient](), storageBackend: future.New[storage.Backend](), @@ -118,6 +116,8 @@ func (a *AlertingNodeBackend) requestNodeSync(ctx context.Context, target *corev func (a *AlertingNodeBackend) broadcastNodeSync(ctx context.Context) { // keep any metadata in the context, but don't propagate cancellation ctx = context.WithoutCancel(ctx) + lg := logger.PluginLoggerFromContext(a.ctx) + var errs []error a.delegate.Get(). WithBroadcastSelector(&corev1.ClusterSelector{}, func(reply any, msg *streamv1.BroadcastReplyList) error { @@ -134,7 +134,7 @@ func (a *AlertingNodeBackend) broadcastNodeSync(ctx context.Context) { CapabilityNames: []string{wellknown.CapabilityAlerting}, }) if len(errs) > 0 { - a.lg.With( + lg.With( logger.Err(errors.Join(errs...)), ).Warn("one or more agents failed to sync; they may not be updated immediately") } @@ -236,17 +236,21 @@ func (a *AlertingNodeBackend) getDefaultNodeSpec(ctx context.Context) (*node.Ale } func (a *AlertingNodeBackend) getNodeSpecOrDefault(ctx context.Context, id string) (*node.AlertingCapabilitySpec, error) { + lg := logger.PluginLoggerFromContext(a.ctx) + nodeSpec, err := a.capabilityKV.Get().NodeCapabilitySpecs.Get(ctx, id) if status.Code(err) == codes.NotFound { return a.getDefaultNodeSpec(ctx) } else if err != nil { - a.lg.With(logger.Err(err)).Error("failed to get node capability spec") + lg.With(logger.Err(err)).Error("failed to get node capability spec") return nil, status.Errorf(codes.Unavailable, "failed to get node capability spec: %v", err) } return nodeSpec, nil } func (a *AlertingNodeBackend) Sync(ctx context.Context, req *node.AlertingCapabilityConfig) (*node.SyncResponse, error) { + lg := logger.PluginLoggerFromContext(a.ctx) + if !a.Initialized() { return nil, status.Error(codes.Unavailable, "Alerting Node Backend is not yet initialized") } @@ -278,7 +282,7 @@ func (a *AlertingNodeBackend) Sync(ctx context.Context, req *node.AlertingCapabi status.Conditions = req.GetConditions() status.LastSync = timestamppb.Now() - a.lg.With( + lg.With( "id", id, "time", status.LastSync.AsTime(), ).Debug("synced node") diff --git a/plugins/alerting/test/test_drivers.go b/plugins/alerting/test/test_drivers.go index 1accd5721b..15b4e56be5 100644 --- a/plugins/alerting/test/test_drivers.go +++ b/plugins/alerting/test/test_drivers.go @@ -22,8 +22,6 @@ import ( "slices" - "log/slog" - "github.com/prometheus/common/model" "github.com/rancher/opni/pkg/alerting/client" "github.com/rancher/opni/pkg/alerting/drivers/config" @@ -77,12 +75,11 @@ type TestEnvAlertingClusterDriverOptions struct { type TestEnvAlertingClusterDriver struct { env *test.Environment + ctx context.Context managedInstances []AlertingServerUnit enabled *atomic.Bool ConfigFile string stateMu *sync.RWMutex - logger *slog.Logger - *shared.AlertingClusterOptions *alertops.ClusterConfiguration @@ -106,8 +103,7 @@ func NewTestEnvAlertingClusterDriver(env *test.Environment, options TestEnvAlert panic(err) } configFile := path.Join(dir, "alertmanager.yaml") - lg := logger.NewPluginLogger().WithGroup("alerting-test-cluster-driver") - lg = lg.With("config-file", configFile) + lg := logger.NewPluginLogger(env.Context()).WithGroup("alerting-test-cluster-driver").With("config-file", configFile) initial := &atomic.Bool{} initial.Store(false) @@ -133,6 +129,7 @@ func NewTestEnvAlertingClusterDriver(env *test.Environment, options TestEnvAlert return &TestEnvAlertingClusterDriver{ env: env, + ctx: logger.WithPluginLogger(env.Context(), lg), managedInstances: []AlertingServerUnit{}, enabled: initial, ConfigFile: configFile, @@ -140,7 +137,6 @@ func NewTestEnvAlertingClusterDriver(env *test.Environment, options TestEnvAlert ClusterConfiguration: &alertops.ClusterConfiguration{ ResourceLimits: &alertops.ResourceLimitSpec{}, }, - logger: lg, subscribers: options.Subscribers, stateMu: &sync.RWMutex{}, embdServerAddress: opniAddr, @@ -209,6 +205,7 @@ func (l *TestEnvAlertingClusterDriver) ConfigureCluster(_ context.Context, confi } func (l *TestEnvAlertingClusterDriver) GetClusterStatus(ctx context.Context, _ *emptypb.Empty) (*alertops.InstallStatus, error) { + lg := logger.PluginLoggerFromContext(l.ctx) if !l.enabled.Load() { return &alertops.InstallStatus{ State: alertops.InstallState_NotInstalled, @@ -222,7 +219,7 @@ func (l *TestEnvAlertingClusterDriver) GetClusterStatus(ctx context.Context, _ * }, nil } if err := l.AlertingClient.StatusClient().Ready(ctx); err != nil { - l.logger.Error("error", logger.Err(err)) + lg.Error("error", logger.Err(err)) return &alertops.InstallStatus{ State: alertops.InstallState_InstallUpdating, }, nil @@ -336,6 +333,7 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( ctx context.Context, configFilePath string, ) AlertingServerUnit { + lg := logger.PluginLoggerFromContext(l.ctx) opniBin := path.Join(l.env.TestBin, "../../bin/opni") webPort := freeport.GetFreePort() opniPort := freeport.GetFreePort() @@ -391,8 +389,8 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( ctxCa, cancelFunc := context.WithCancel(ctx) alertmanagerCmd := exec.CommandContext(ctxCa, opniBin, alertmanagerArgs...) plugins.ConfigureSysProcAttr(alertmanagerCmd) - l.logger.Debug("Starting opni alertmanagwer with : " + strings.Join(alertmanagerArgs, " ")) - l.logger.With("alertmanager-port", webPort, "opni-port", opniPort).Info("Starting AlertManager") + lg.Debug("Starting opni alertmanagwer with : " + strings.Join(alertmanagerArgs, " ")) + lg.With("alertmanager-port", webPort, "opni-port", opniPort).Info("Starting AlertManager") session, err := testutil.StartCmd(alertmanagerCmd) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { @@ -418,14 +416,14 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( if err == nil { defer resp.Body.Close() if resp.StatusCode == http.StatusOK { - l.logger.Info("Alertmanager successfully started") + lg.Info("Alertmanager successfully started") break } else { body, err := io.ReadAll(resp.Body) if err != nil { - l.logger.Warn(err.Error()) + lg.Warn(err.Error()) } - l.logger. + lg. With("code", resp.StatusCode, "resp", string(body)). Warn(fmt.Sprintf("Alertmanager not ready yet : %d", resp.StatusCode)) } @@ -437,10 +435,10 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( } } - l.logger.Debug("Syncer starting with : " + strings.Join(syncerArgs, " ")) + lg.Debug("Syncer starting with : " + strings.Join(syncerArgs, " ")) syncerCmd := exec.CommandContext(ctxCa, opniBin, syncerArgs...) plugins.ConfigureSysProcAttr(syncerCmd) - l.logger.With("port", syncerPort).Info("Starting AlertManager Syncer") + lg.With("port", syncerPort).Info("Starting AlertManager Syncer") _, err = testutil.StartCmd(syncerCmd) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { @@ -450,7 +448,7 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( } } - l.logger.With("address", fmt.Sprintf("http://127.0.0.1:%d", webPort)).Info("AlertManager started") + lg.With("address", fmt.Sprintf("http://127.0.0.1:%d", webPort)).Info("AlertManager started") context.AfterFunc(ctx, func() { cmd, _ := session.G() if cmd != nil { diff --git a/plugins/example/pkg/example/plugin.go b/plugins/example/pkg/example/plugin.go index 5f661d8726..63351ad8f4 100644 --- a/plugins/example/pkg/example/plugin.go +++ b/plugins/example/pkg/example/plugin.go @@ -45,8 +45,7 @@ type ExamplePlugin struct { UnsafeExampleUnaryExtensionServer capabilityv1.UnsafeBackendServer system.UnimplementedSystemPluginClient - ctx context.Context - logger *slog.Logger + ctx context.Context storageBackend future.Future[storage.Backend] uninstallController future.Future[*task.Controller] @@ -85,21 +84,22 @@ func (s *ExamplePlugin) UseCachingProvider(cacheProvider caching.CachingProvider } func (s *ExamplePlugin) UseManagementAPI(client managementv1.ManagementClient) { + lg := logger.PluginLoggerFromContext(s.ctx) cfg, err := client.GetConfig(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { - s.logger.With(logger.Err(err)).Error("failed to get config") + lg.With(logger.Err(err)).Error("failed to get config") return } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - s.logger.With(logger.Err(err)).Error("failed to load config") + lg.With(logger.Err(err)).Error("failed to load config") return } machinery.LoadAuthProviders(s.ctx, objectList) objectList.Visit(func(config *v1beta1.GatewayConfig) { backend, err := machinery.ConfigureStorageBackend(s.ctx, &config.Spec.Storage) if err != nil { - s.logger.With(logger.Err(err)).Error("failed to configure storage backend") + lg.With(logger.Err(err)).Error("failed to configure storage backend") return } s.storageBackend.Set(backend) @@ -112,11 +112,12 @@ func (s *ExamplePlugin) UseManagementAPI(client managementv1.ManagementClient) { } func (s *ExamplePlugin) UseKeyValueStore(client system.KeyValueStoreClient) { + lg := logger.PluginLoggerFromContext(s.ctx) ctrl, err := task.NewController(s.ctx, "uninstall", system.NewKVStoreClient[*corev1.TaskStatus](client), &uninstallTaskRunner{ storageBackend: s.storageBackend.Get(), }) if err != nil { - s.logger.With(logger.Err(err)).Error("failed to create uninstall controller") + lg.With(logger.Err(err)).Error("failed to create uninstall controller") return } s.uninstallController.Set(ctrl) @@ -132,8 +133,9 @@ func (s *ExamplePlugin) UseKeyValueStore(client system.KeyValueStoreClient) { } func (s *ExamplePlugin) ConfigureRoutes(app *gin.Engine) { + lg := logger.PluginLoggerFromContext(s.ctx) app.GET("/example", func(c *gin.Context) { - s.logger.Debug("handling /example") + lg.Debug("handling /example") c.JSON(http.StatusOK, map[string]string{ "message": "hello world", }) @@ -222,9 +224,10 @@ func (s *ExamplePlugin) InstallerTemplate(context.Context, *emptypb.Empty) (*cap func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme() + lg := logger.NewPluginLogger(ctx).WithGroup("example") + ctx = logger.WithPluginLogger(ctx, lg) p := &ExamplePlugin{ ctx: ctx, - logger: logger.NewPluginLogger().WithGroup("example"), storageBackend: future.New[storage.Backend](), uninstallController: future.New[*task.Controller](), } diff --git a/plugins/logging/pkg/agent/drivers/events/event_collector.go b/plugins/logging/pkg/agent/drivers/events/event_collector.go index 17c3e548f5..9e405057fc 100644 --- a/plugins/logging/pkg/agent/drivers/events/event_collector.go +++ b/plugins/logging/pkg/agent/drivers/events/event_collector.go @@ -8,8 +8,6 @@ import ( "sync" "time" - "log/slog" - "github.com/opensearch-project/opensearch-go/opensearchutil" "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/util" @@ -42,10 +40,10 @@ type timestampedEvent struct { type EventCollector struct { EventCollectorOptions + ctx context.Context clientset kubernetes.Interface queue workqueue.RateLimitingInterface informer informercorev1.EventInformer - logger *slog.Logger state scrapeState namespace string } @@ -82,7 +80,7 @@ func WithRestConfig(restConfig *rest.Config) EventCollectorOption { } func NewEventCollector( - logger *slog.Logger, + ctx context.Context, opts ...EventCollectorOption, ) (*EventCollector, error) { options := EventCollectorOptions{ @@ -112,10 +110,10 @@ func NewEventCollector( informer := factory.Core().V1().Events() return &EventCollector{ + ctx: ctx, EventCollectorOptions: options, clientset: clientset, informer: informer, - logger: logger, namespace: namespace, }, nil } @@ -127,6 +125,7 @@ func (c *EventCollector) Name() string { } func (c *EventCollector) ConfigureNode(config *node.LoggingCapabilityConfig) { + lg := logger.PluginLoggerFromContext(c.ctx) c.state.Lock() defer c.state.Unlock() if config.GetEnabled() { @@ -138,7 +137,7 @@ func (c *EventCollector) ConfigureNode(config *node.LoggingCapabilityConfig) { go func() { err := c.run(c.state.stopCh) if err != nil { - c.logger.Error("failed to start events", logger.Err(err)) + lg.Error("failed to start events", logger.Err(err)) c.state.Lock() close(c.state.stopCh) c.state.running = false @@ -158,6 +157,7 @@ func (c *EventCollector) ConfigureNode(config *node.LoggingCapabilityConfig) { } func (c *EventCollector) run(stopCh <-chan struct{}) error { + lg := logger.PluginLoggerFromContext(c.ctx) defer utilruntime.HandleCrash() defer c.queue.ShutDown() @@ -178,16 +178,16 @@ func (c *EventCollector) run(stopCh <-chan struct{}) error { }, }) - c.logger.Info("starting event collector") + lg.Info("starting event collector") go c.informer.Informer().Run(stopCh) if ok := cache.WaitForCacheSync(stopCh, c.informer.Informer().HasSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } - c.logger.Info("collector started") + lg.Info("collector started") wait.Until(c.runWorker, time.Second, stopCh) - c.logger.Info("shutting down collector") + lg.Info("shutting down collector") return nil } @@ -200,9 +200,10 @@ func (c *EventCollector) runWorker() { } func (c *EventCollector) enqueueEvent(obj interface{}) { + lg := logger.PluginLoggerFromContext(c.ctx) key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - c.logger.Error(fmt.Sprintf("could't get key for event %+v: %s", obj, err)) + lg.Error(fmt.Sprintf("could't get key for event %+v: %s", obj, err)) } c.queue.Add(timestampedEvent{ key: key, @@ -211,9 +212,10 @@ func (c *EventCollector) enqueueEvent(obj interface{}) { } func (c *EventCollector) processNextItem() bool { + lg := logger.PluginLoggerFromContext(c.ctx) event, shutdown := c.queue.Get() if shutdown { - c.logger.Info("queue shutdown, halting event shipping") + lg.Info("queue shutdown, halting event shipping") return false } defer c.queue.Done(event) @@ -224,18 +226,19 @@ func (c *EventCollector) processNextItem() bool { return true } if c.maxRetries == 0 || c.queue.NumRequeues(event) < c.maxRetries { - c.logger.Warn(fmt.Sprintf("failed to process event %s, requeueing: %v", event, err)) + lg.Warn(fmt.Sprintf("failed to process event %s, requeueing: %v", event, err)) c.queue.AddRateLimited(event) return true } - c.logger.Error(fmt.Sprintf("failed to process event %s, giving up: %v", event, err)) + lg.Error(fmt.Sprintf("failed to process event %s, giving up: %v", event, err)) c.queue.Forget(event) utilruntime.HandleError(err) return true } func (c *EventCollector) processItem(obj interface{}) error { + lg := logger.PluginLoggerFromContext(c.ctx) eventObj := obj.(timestampedEvent) event, _, err := c.informer.Informer().GetIndexer().GetByKey(eventObj.key) if err != nil { @@ -243,7 +246,7 @@ func (c *EventCollector) processItem(obj interface{}) error { } if event == nil || util.IsInterfaceNil(event) { - c.logger.Info("nil event, skipping") + lg.Info("nil event, skipping") return nil } diff --git a/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go b/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go index 6b0ced9409..0094556c8b 100644 --- a/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go +++ b/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log/slog" "os" "sync" @@ -61,9 +60,9 @@ type reconcilerState struct { } type KubernetesManagerDriverOptions struct { - Namespace string `option:"namespace"` - RestConfig *rest.Config `option:"restConfig"` - Logger *slog.Logger `option:"logger"` + Namespace string `option:"namespace"` + RestConfig *rest.Config `option:"restConfig"` + Context context.Context `option:"context"` } func NewKubernetesManagerDriver(options KubernetesManagerDriverOptions) (*KubernetesManagerDriver, error) { @@ -105,6 +104,7 @@ func NewKubernetesManagerDriver(options KubernetesManagerDriverOptions) (*Kubern var _ drivers.LoggingNodeDriver = (*KubernetesManagerDriver)(nil) func (m *KubernetesManagerDriver) ConfigureNode(config *node.LoggingCapabilityConfig) { + lg := logger.PluginLoggerFromContext(m.Context) m.state.Lock() if m.state.running { m.state.backoffCancel() @@ -122,7 +122,7 @@ BACKOFF: for backoff.Continue(b) { logCollectorConf := m.buildLoggingCollectorConfig() if err := m.reconcileObject(logCollectorConf, config.Enabled); err != nil { - m.Logger.With( + lg.With( "object", client.ObjectKeyFromObject(logCollectorConf).String(), logger.Err(err), ).Error("error reconciling object") @@ -130,7 +130,7 @@ BACKOFF: } if err := m.reconcileCollector(config.Enabled); err != nil { - m.Logger.With( + lg.With( "object", "opni collector", logger.Err(err), ).Error("error reconciling object") @@ -141,9 +141,9 @@ BACKOFF: } if !success { - m.Logger.Error("timed out reconciling objects") + lg.Error("timed out reconciling objects") } else { - m.Logger.Info("objects reconciled successfully") + lg.Info("objects reconciled successfully") } } @@ -166,7 +166,7 @@ func (m *KubernetesManagerDriver) buildLoggingCollectorConfig() *opniloggingv1be func (m *KubernetesManagerDriver) reconcileObject(desired client.Object, shouldExist bool) error { // get the object key := client.ObjectKeyFromObject(desired) - lg := m.Logger.With("object", key) + lg := logger.PluginLoggerFromContext(m.Context).With("object", key) lg.Info("reconciling object") // get the agent statefulset @@ -282,22 +282,24 @@ func (m *KubernetesManagerDriver) getAgentService() (*corev1.Service, error) { } func (m *KubernetesManagerDriver) patchObject(current client.Object, desired client.Object) error { + lg := logger.PluginLoggerFromContext(m.Context) + // update the object patchResult, err := patch.DefaultPatchMaker.Calculate(current, desired, patch.IgnoreStatusFields()) if err != nil { - m.Logger.With( + lg.With( logger.Err(err), ).Warn("could not match objects") return err } if patchResult.IsEmpty() { - m.Logger.Info("resource is in sync") + lg.Info("resource is in sync") return nil } - m.Logger.Info("resource diff") + lg.Info("resource diff") if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desired); err != nil { - m.Logger.With( + lg.With( logger.Err(err), ).Error("failed to set last applied annotation") } @@ -312,7 +314,7 @@ func (m *KubernetesManagerDriver) patchObject(current client.Object, desired cli return err } - m.Logger.Info("updating resource") + lg.Info("updating resource") return m.k8sClient.Update(context.TODO(), desired) } @@ -340,10 +342,12 @@ func (m *KubernetesManagerDriver) buildEmptyCollector() *opnicorev1beta1.Collect } func init() { - drivers.NodeDrivers.Register("kubernetes-manager", func(_ context.Context, opts ...driverutil.Option) (drivers.LoggingNodeDriver, error) { + drivers.NodeDrivers.Register("kubernetes-manager", func(ctx context.Context, opts ...driverutil.Option) (drivers.LoggingNodeDriver, error) { + lg := logger.NewPluginLogger(ctx).WithGroup("logging").WithGroup("kubernetes-manager") + options := KubernetesManagerDriverOptions{ Namespace: os.Getenv("POD_NAMESPACE"), - Logger: logger.NewPluginLogger().WithGroup("logging").WithGroup("kubernetes-manager"), + Context: logger.WithPluginLogger(ctx, lg), } driverutil.ApplyOptions(&options, opts...) return NewKubernetesManagerDriver(options) diff --git a/plugins/logging/pkg/agent/node.go b/plugins/logging/pkg/agent/node.go index bb6aa39dcc..04d3741fdb 100644 --- a/plugins/logging/pkg/agent/node.go +++ b/plugins/logging/pkg/agent/node.go @@ -14,6 +14,7 @@ import ( corev1 "github.com/rancher/opni/pkg/apis/core/v1" "github.com/rancher/opni/pkg/capabilities/wellknown" "github.com/rancher/opni/pkg/health" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/plugins/logging/apis/node" "github.com/rancher/opni/plugins/logging/pkg/agent/drivers" @@ -21,14 +22,13 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" - "log/slog" ) type LoggingNode struct { capabilityv1.UnsafeNodeServer controlv1.UnsafeHealthServer - logger *slog.Logger + ctx context.Context clientMu sync.RWMutex client node.NodeLoggingCapabilityClient @@ -40,9 +40,9 @@ type LoggingNode struct { conditions health.ConditionTracker } -func NewLoggingNode(ct health.ConditionTracker, lg *slog.Logger) *LoggingNode { +func NewLoggingNode(ctx context.Context, ct health.ConditionTracker) *LoggingNode { return &LoggingNode{ - logger: lg, + ctx: ctx, conditions: ct, } } @@ -70,13 +70,14 @@ func (l *LoggingNode) Info(_ context.Context, _ *emptypb.Empty) (*capabilityv1.D // Implements capabilityv1.NodeServer func (l *LoggingNode) SyncNow(_ context.Context, req *capabilityv1.Filter) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(l.ctx) if len(req.CapabilityNames) > 0 { if !slices.Contains(req.CapabilityNames, wellknown.CapabilityLogs) { - l.logger.Debug("ignoring sync request due to capability filter") + lg.Debug("ignoring sync request due to capability filter") return &emptypb.Empty{}, nil } } - l.logger.Debug("received sync request") + lg.Debug("received sync request") l.clientMu.RLock() defer l.clientMu.RUnlock() @@ -114,7 +115,9 @@ func (l *LoggingNode) GetHealth(_ context.Context, _ *emptypb.Empty) (*corev1.He } func (l *LoggingNode) doSync(ctx context.Context) { - l.logger.Debug("syncing logging node") + lg := logger.PluginLoggerFromContext(l.ctx) + + lg.Debug("syncing logging node") l.clientMu.RLock() defer l.clientMu.RUnlock() @@ -137,9 +140,9 @@ func (l *LoggingNode) doSync(ctx context.Context) { switch syncResp.ConfigStatus { case node.ConfigStatus_UpToDate: - l.logger.Info("logging node config is up to date") + lg.Info("logging node config is up to date") case node.ConfigStatus_NeedsUpdate: - l.logger.Info("updating logging node config") + lg.Info("updating logging node config") l.updateConfig(syncResp.GetUpdatedConfig()) } @@ -147,6 +150,8 @@ func (l *LoggingNode) doSync(ctx context.Context) { } func (l *LoggingNode) updateConfig(config *node.LoggingCapabilityConfig) { + lg := logger.PluginLoggerFromContext(l.ctx) + l.configMu.Lock() defer l.configMu.Unlock() @@ -163,7 +168,7 @@ func (l *LoggingNode) updateConfig(config *node.LoggingCapabilityConfig) { select { case ch <- clone: default: - l.logger.Warn("slow config update listener detected") + lg.Warn("slow config update listener detected") ch <- clone } } diff --git a/plugins/logging/pkg/agent/plugin.go b/plugins/logging/pkg/agent/plugin.go index f8eb4e2760..27200c5660 100644 --- a/plugins/logging/pkg/agent/plugin.go +++ b/plugins/logging/pkg/agent/plugin.go @@ -2,7 +2,6 @@ package agent import ( "context" - "log/slog" healthpkg "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" @@ -20,26 +19,27 @@ import ( type Plugin struct { ctx context.Context - logger *slog.Logger node *LoggingNode otelForwarder *otel.Forwarder } func NewPlugin(ctx context.Context) *Plugin { - lg := logger.NewPluginLogger().WithGroup("logging") + lg := logger.NewPluginLogger(ctx).WithGroup("logging") + ctx = logger.WithPluginLogger(ctx, lg) + logForwarderLg := lg.WithGroup("otel-logs-forwarder") + traceForwarderLg := lg.WithGroup("otel-trace-forwarder") ct := healthpkg.NewDefaultConditionTracker(lg) p := &Plugin{ - ctx: ctx, - logger: lg, - node: NewLoggingNode(ct, lg), + ctx: ctx, + node: NewLoggingNode(ctx, ct), otelForwarder: otel.NewForwarder( otel.NewLogsForwarder( - otel.WithLogger(lg.WithGroup("otel-logs-forwarder")), + logger.WithPluginLogger(ctx, logForwarderLg), otel.WithPrivileged(true)), otel.NewTraceForwarder( - otel.WithLogger(lg.WithGroup("otel-trace-forwarder")), + logger.WithPluginLogger(ctx, traceForwarderLg), otel.WithPrivileged(true)), ), } @@ -69,10 +69,11 @@ var ( func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeAgent)) + p := NewPlugin(ctx) scheme.Add(capability.CapabilityBackendPluginID, capability.NewAgentPlugin(p.node)) scheme.Add(health.HealthPluginID, health.NewPlugin(p.node)) - scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(p)) + scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(ctx, p)) scheme.Add(httpext.HTTPAPIExtensionPluginID, httpext.NewPlugin(p.otelForwarder)) return scheme } diff --git a/plugins/logging/pkg/backend/logging.go b/plugins/logging/pkg/backend/logging.go index cf13b671f0..f7d183aa1f 100644 --- a/plugins/logging/pkg/backend/logging.go +++ b/plugins/logging/pkg/backend/logging.go @@ -5,8 +5,6 @@ import ( "slices" "sync" - "log/slog" - "github.com/rancher/opni/pkg/agent" capabilityv1 "github.com/rancher/opni/pkg/apis/capability/v1" opnicorev1 "github.com/rancher/opni/pkg/apis/core/v1" @@ -37,7 +35,7 @@ type LoggingBackend struct { } type LoggingBackendConfig struct { - Logger *slog.Logger `validate:"required"` + Context context.Context `validate:"required"` StorageBackend storage.Backend `validate:"required"` MgmtClient managementv1.ManagementClient `validate:"required"` Delegate streamext.StreamDelegate[agent.ClientSet] `validate:"required"` @@ -50,6 +48,7 @@ var _ node.NodeLoggingCapabilityServer = (*LoggingBackend)(nil) // TODO: set up watches on underlying k8s objects to dynamically request a sync func (b *LoggingBackend) Initialize(conf LoggingBackendConfig) { + lg := logger.PluginLoggerFromContext(context.Background()) b.InitOnce(func() { if err := loggingutil.Validate.Struct(conf); err != nil { panic(err) @@ -66,14 +65,14 @@ func (b *LoggingBackend) Initialize(conf LoggingBackendConfig) { go func() { clusters, err := b.MgmtClient.ListClusters(context.Background(), &managementv1.ListClustersRequest{}) if err != nil { - b.Logger.With( + lg.With( logger.Err(err), ).Error("could not list clusters for reconciliation") return } if err := b.reconcileClusterMetadata(context.Background(), clusters.Items); err != nil { - b.Logger.With(logger.Err(err)).Error("could not reconcile opni agents with metadata index, some agents may not be included") + lg.With(logger.Err(err)).Error("could not reconcile opni agents with metadata index, some agents may not be included") return } diff --git a/plugins/logging/pkg/backend/metadata.go b/plugins/logging/pkg/backend/metadata.go index 47a0b5dd41..2153a4172b 100644 --- a/plugins/logging/pkg/backend/metadata.go +++ b/plugins/logging/pkg/backend/metadata.go @@ -10,6 +10,7 @@ import ( ) func (b *LoggingBackend) updateClusterMetadata(ctx context.Context, event *managementv1.WatchEvent) error { + lg := logger.PluginLoggerFromContext(b.Context) incomingLabels := event.GetCluster().GetMetadata().GetLabels() previousLabels := event.GetPrevious().GetMetadata().GetLabels() var newName, oldName string @@ -20,19 +21,19 @@ func (b *LoggingBackend) updateClusterMetadata(ctx context.Context, event *manag oldName = previousLabels[opnicorev1.NameLabel] } if newName == oldName { - b.Logger.With( + lg.With( "oldName", oldName, "newName", newName, ).Debug("cluster was not renamed") return nil } - b.Logger.With( + lg.With( "oldName", oldName, "newName", newName, ).Debug("cluster was renamed") if err := b.ClusterDriver.StoreClusterMetadata(ctx, event.Cluster.GetId(), newName); err != nil { - b.Logger.With( + lg.With( logger.Err(err), "cluster", event.Cluster.Id, ).Debug("could not update cluster metadata") @@ -43,24 +44,26 @@ func (b *LoggingBackend) updateClusterMetadata(ctx context.Context, event *manag } func (b *LoggingBackend) watchClusterEvents(ctx context.Context) { + lg := logger.PluginLoggerFromContext(b.Context) + clusterClient, err := b.MgmtClient.WatchClusters(ctx, &managementv1.WatchClustersRequest{}) if err != nil { - b.Logger.With(logger.Err(err)).Error("failed to watch clusters, existing") + lg.With(logger.Err(err)).Error("failed to watch clusters, existing") os.Exit(1) } - b.Logger.Info("watching cluster events") + lg.Info("watching cluster events") outer: for { select { case <-clusterClient.Context().Done(): - b.Logger.Info("context cancelled, stoping cluster event watcher") + lg.Info("context cancelled, stoping cluster event watcher") break outer default: event, err := clusterClient.Recv() if err != nil { - b.Logger.With(logger.Err(err)).Error("failed to receive cluster event") + lg.With(logger.Err(err)).Error("failed to receive cluster event") continue } @@ -70,10 +73,12 @@ outer: } func (b *LoggingBackend) reconcileClusterMetadata(ctx context.Context, clusters []*opnicorev1.Cluster) (retErr error) { + lg := logger.PluginLoggerFromContext(b.Context) + for _, cluster := range clusters { err := b.ClusterDriver.StoreClusterMetadata(ctx, cluster.GetId(), cluster.Metadata.Labels[opnicorev1.NameLabel]) if err != nil { - b.Logger.With( + lg.With( logger.Err(err), "cluster", cluster.Id, ).Warn("could not update cluster metadata") diff --git a/plugins/logging/pkg/backend/sync.go b/plugins/logging/pkg/backend/sync.go index b9bbe847bf..4e1a57c7b8 100644 --- a/plugins/logging/pkg/backend/sync.go +++ b/plugins/logging/pkg/backend/sync.go @@ -112,6 +112,8 @@ func (b *LoggingBackend) shouldDisableNode(ctx context.Context) bool { } func (b *LoggingBackend) requestNodeSync(ctx context.Context, cluster *opnicorev1.Reference) { + lg := logger.PluginLoggerFromContext(b.Context) + _, err := b.Delegate.WithTarget(cluster).SyncNow(ctx, &capabilityv1.Filter{ CapabilityNames: []string{wellknown.CapabilityLogs}, }) @@ -121,14 +123,14 @@ func (b *LoggingBackend) requestNodeSync(ctx context.Context, cluster *opnicorev name = "(all)" } if err != nil { - b.Logger.With( + lg.With( "cluster", name, "capability", wellknown.CapabilityLogs, logger.Err(err), ).Warn("failed to request node sync; nodes may not be updated immediately") return } - b.Logger.With( + lg.With( "cluster", name, "capability", wellknown.CapabilityLogs, ).Info("node sync requested") diff --git a/plugins/logging/pkg/gateway/admin_v2.go b/plugins/logging/pkg/gateway/admin_v2.go index 2a2838611c..1221e6300c 100644 --- a/plugins/logging/pkg/gateway/admin_v2.go +++ b/plugins/logging/pkg/gateway/admin_v2.go @@ -8,9 +8,8 @@ import ( "strings" "time" - "log/slog" - "github.com/lestrrat-go/backoff/v2" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/versions" "github.com/rancher/opni/plugins/logging/apis/loggingadmin" loggingerrors "github.com/rancher/opni/plugins/logging/pkg/errors" @@ -63,9 +62,9 @@ var defaultIndices = []string{ type LoggingManagerV2 struct { loggingadmin.UnsafeLoggingAdminV2Server + ctx context.Context managementDriver management.ClusterDriver backendDriver backend.ClusterDriver - logger *slog.Logger alertingServer *alerting.AlertingManagementServer opensearchManager *opensearchdata.Manager otelForwarder *otel.Forwarder @@ -83,6 +82,8 @@ func (m *LoggingManagerV2) GetOpensearchCluster(ctx context.Context, _ *emptypb. } func (m *LoggingManagerV2) DeleteOpensearchCluster(ctx context.Context, _ *emptypb.Empty) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.ctx) + // Check that it is safe to delete the cluster m.opensearchManager.UnsetClient() @@ -96,7 +97,7 @@ func (m *LoggingManagerV2) DeleteOpensearchCluster(ctx context.Context, _ *empty err = m.managementDriver.DeleteCluster(ctx) if err != nil { if errors.Is(err, loggingerrors.ErrLoggingCapabilityExists) { - m.logger.Error("can not delete opensearch until logging capability is uninstalled from all clusters") + lg.Error("can not delete opensearch until logging capability is uninstalled from all clusters") } return nil, err } @@ -186,9 +187,10 @@ func (m *LoggingManagerV2) DoUpgrade(ctx context.Context, options *loggingadmin. } func (m *LoggingManagerV2) GetStorageClasses(ctx context.Context, _ *emptypb.Empty) (*loggingadmin.StorageClassResponse, error) { + lg := logger.PluginLoggerFromContext(m.ctx) storageClassNames, err := m.managementDriver.GetStorageClasses(ctx) if err != nil { - m.logger.Error(fmt.Sprintf("failed to list storageclasses: %v", err)) + lg.Error(fmt.Sprintf("failed to list storageclasses: %v", err)) return nil, err } @@ -289,25 +291,31 @@ func (m *LoggingManagerV2) ListSnapshotSchedules(ctx context.Context, _ *emptypb } func (m *LoggingManagerV2) validDurationString(duration string) bool { + lg := logger.PluginLoggerFromContext(m.ctx) + match, err := regexp.MatchString(`^\d+[dMmyh]`, duration) if err != nil { - m.logger.Error(fmt.Sprintf("could not run regexp: %v", err)) + lg.Error(fmt.Sprintf("could not run regexp: %v", err)) return false } return match } func (m *LoggingManagerV2) validateStorage(dataNodes *loggingadmin.DataDetails) error { + lg := logger.PluginLoggerFromContext(m.ctx) + if dataNodes.GetReplicas() < 2 && !dataNodes.GetPersistence().GetEnabled() { - m.logger.Error("minimum of 2 data nodes required if no persistent storage") + lg.Error("minimum of 2 data nodes required if no persistent storage") return loggingerrors.ErrInvalidDataPersistence } return nil } func (m *LoggingManagerV2) opensearchClusterReady() bool { + lg := logger.PluginLoggerFromContext(m.ctx) + absentRetriesMax := 3 - ctx := context.TODO() + ctx := m.ctx expBackoff := backoff.Exponential( backoff.WithMaxRetries(0), backoff.WithMinInterval(5*time.Second), @@ -321,18 +329,18 @@ FETCH: absentRetries := 0 select { case <-b.Done(): - m.logger.Warn("plugin context cancelled before Opensearch object created") + lg.Warn("plugin context cancelled before Opensearch object created") return true case <-b.Next(): state := m.backendDriver.GetInstallStatus(ctx) switch state { case backend.Error: - m.logger.Error("failed to fetch opensearch cluster, can't check readiness") + lg.Error("failed to fetch opensearch cluster, can't check readiness") return true case backend.Absent: absentRetries++ if absentRetries > absentRetriesMax { - m.logger.Error("failed to fetch opensearch cluster, can't check readiness") + lg.Error("failed to fetch opensearch cluster, can't check readiness") return true } continue diff --git a/plugins/logging/pkg/gateway/drivers/backend/kubernetes_manager/kubernetes_manager.go b/plugins/logging/pkg/gateway/drivers/backend/kubernetes_manager/kubernetes_manager.go index 281dfd8da7..d1436fc5b4 100644 --- a/plugins/logging/pkg/gateway/drivers/backend/kubernetes_manager/kubernetes_manager.go +++ b/plugins/logging/pkg/gateway/drivers/backend/kubernetes_manager/kubernetes_manager.go @@ -12,6 +12,7 @@ import ( loggingv1beta1 "github.com/rancher/opni/apis/logging/v1beta1" capabilityv1 "github.com/rancher/opni/pkg/apis/capability/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins/driverutil" "github.com/rancher/opni/pkg/resources" k8sutilerrors "github.com/rancher/opni/pkg/util/errors/k8sutil" @@ -24,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" - "log/slog" opsterv1 "opensearch.opster.io/api/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -40,7 +40,7 @@ type KubernetesManagerDriverOptions struct { K8sClient client.Client `option:"k8sClient"` Namespace string `option:"namespace"` OpensearchCluster *opnimeta.OpensearchClusterRef `option:"opensearchCluster"` - Logger *slog.Logger `option:"logger"` + Context context.Context `option:"context"` } func NewKubernetesManagerDriver(options KubernetesManagerDriverOptions) (*KubernetesManagerDriver, error) { @@ -89,6 +89,7 @@ func (d *KubernetesManagerDriver) GetInstallStatus(ctx context.Context) backend. } func (d *KubernetesManagerDriver) StoreCluster(ctx context.Context, req *corev1.Reference, friendlyName string) error { + lg := logger.PluginLoggerFromContext(d.Context) _, err := d.getCluster(ctx, req.GetId()) if err == nil { return loggingerrors.ErrAlreadyExists @@ -115,13 +116,14 @@ func (d *KubernetesManagerDriver) StoreCluster(ctx context.Context, req *corev1. } if err := d.K8sClient.Create(ctx, loggingCluster); err != nil { - d.Logger.Error(fmt.Sprintf("failed to store cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to store cluster: %v", err)) k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) getCluster(ctx context.Context, id string) (*opnicorev1beta1.LoggingCluster, error) { + lg := logger.PluginLoggerFromContext(d.Context) loggingClusterList := &opnicorev1beta1.LoggingClusterList{} if err := d.K8sClient.List( ctx, @@ -129,7 +131,7 @@ func (d *KubernetesManagerDriver) getCluster(ctx context.Context, id string) (*o client.InNamespace(d.Namespace), client.MatchingLabels{resources.OpniClusterID: id}, ); err != nil { - d.Logger.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) + lg.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) return nil, k8sutilerrors.GRPCFromK8s(err) } @@ -145,6 +147,7 @@ func (d *KubernetesManagerDriver) getCluster(ctx context.Context, id string) (*o } func (d *KubernetesManagerDriver) StoreClusterMetadata(ctx context.Context, id, name string) error { + lg := logger.PluginLoggerFromContext(d.Context) cluster, err := d.getCluster(ctx, id) if err != nil { return err @@ -164,13 +167,14 @@ func (d *KubernetesManagerDriver) StoreClusterMetadata(ctx context.Context, id, }) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to update cluster data: %v", err)) + lg.Error(fmt.Sprintf("failed to update cluster data: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) DeleteCluster(ctx context.Context, id string) error { + lg := logger.PluginLoggerFromContext(d.Context) loggingClusterList := &opnicorev1beta1.LoggingClusterList{} if err := d.K8sClient.List( ctx, @@ -178,7 +182,7 @@ func (d *KubernetesManagerDriver) DeleteCluster(ctx context.Context, id string) client.InNamespace(d.OpensearchCluster.Namespace), client.MatchingLabels{resources.OpniClusterID: id}, ); err != nil { - d.Logger.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) + lg.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -189,7 +193,7 @@ func (d *KubernetesManagerDriver) DeleteCluster(ctx context.Context, id string) loggingCluster := &loggingClusterList.Items[0] err := d.K8sClient.Delete(ctx, loggingCluster) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to delete cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to delete cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil @@ -199,6 +203,7 @@ func (d *KubernetesManagerDriver) DeleteCluster(ctx context.Context, id string) } func (d *KubernetesManagerDriver) SetClusterStatus(ctx context.Context, id string, enabled bool) error { + lg := logger.PluginLoggerFromContext(d.Context) syncTime := time.Now() loggingClusterList := &opnicorev1beta1.LoggingClusterList{} if err := d.K8sClient.List( @@ -207,7 +212,7 @@ func (d *KubernetesManagerDriver) SetClusterStatus(ctx context.Context, id strin client.InNamespace(d.Namespace), client.MatchingLabels{resources.OpniClusterID: id}, ); err != nil { - d.Logger.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) + lg.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -228,13 +233,14 @@ func (d *KubernetesManagerDriver) SetClusterStatus(ctx context.Context, id strin }) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to update logging cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to update logging cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) GetClusterStatus(ctx context.Context, id string) (*capabilityv1.NodeCapabilityStatus, error) { + lg := logger.PluginLoggerFromContext(d.Context) loggingClusterList := &opnicorev1beta1.LoggingClusterList{} if err := d.K8sClient.List( ctx, @@ -242,7 +248,7 @@ func (d *KubernetesManagerDriver) GetClusterStatus(ctx context.Context, id strin client.InNamespace(d.Namespace), client.MatchingLabels{resources.OpniClusterID: id}, ); err != nil { - d.Logger.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) + lg.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) return nil, k8sutilerrors.GRPCFromK8s(err) } @@ -268,6 +274,7 @@ func (d *KubernetesManagerDriver) SetSyncTime() { } func (d *KubernetesManagerDriver) StoreClusterReadUser(ctx context.Context, username, password, id string) error { + lg := logger.PluginLoggerFromContext(d.Context) user := &loggingv1beta1.MulticlusterUser{ ObjectMeta: metav1.ObjectMeta{ Name: username, @@ -311,7 +318,7 @@ func (d *KubernetesManagerDriver) StoreClusterReadUser(ctx context.Context, user err = client.IgnoreAlreadyExists(d.K8sClient.Create(ctx, binding)) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to store logging cluster binding: %v", err)) + lg.Error(fmt.Sprintf("failed to store logging cluster binding: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil diff --git a/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/helpers.go b/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/helpers.go index f0ec706d22..5e171e7fcf 100644 --- a/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/helpers.go +++ b/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/helpers.go @@ -6,6 +6,7 @@ import ( "slices" loggingv1beta1 "github.com/rancher/opni/apis/logging/v1beta1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/util" k8sutilerrors "github.com/rancher/opni/pkg/util/errors/k8sutil" "github.com/rancher/opni/plugins/logging/apis/loggingadmin" @@ -380,6 +381,7 @@ func convertProtobufToDashboards( } func (d *KubernetesManagerDriver) storeS3Credentials(ctx context.Context, credentials *loggingadmin.S3Credentials) error { + lg := logger.PluginLoggerFromContext(d.Context) if credentials == nil { return nil } @@ -400,12 +402,12 @@ func (d *KubernetesManagerDriver) storeS3Credentials(ctx context.Context, creden } err = d.K8sClient.Create(ctx, secret) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to create s3 credentials secret: %v", err)) + lg.Error(fmt.Sprintf("failed to create s3 credentials secret: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } - d.Logger.Error(fmt.Sprintf("failed to get existing s3 credentials: %v", err)) + lg.Error(fmt.Sprintf("failed to get existing s3 credentials: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -422,13 +424,14 @@ func (d *KubernetesManagerDriver) storeS3Credentials(ctx context.Context, creden return d.K8sClient.Update(ctx, secret) }) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to update s3 credentials: %v", err)) + lg.Error(fmt.Sprintf("failed to update s3 credentials: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) getS3Credentials(ctx context.Context) (*loggingadmin.S3Credentials, error) { + lg := logger.PluginLoggerFromContext(d.Context) secret := &corev1.Secret{} err := d.K8sClient.Get(ctx, types.NamespacedName{ Name: s3CredentialsSecret, @@ -438,7 +441,7 @@ func (d *KubernetesManagerDriver) getS3Credentials(ctx context.Context) (*loggin if k8serrors.IsNotFound(err) { return nil, nil } - d.Logger.Error(fmt.Sprintf("failed to get s3 credentials: %v", err)) + lg.Error(fmt.Sprintf("failed to get s3 credentials: %v", err)) return nil, k8sutilerrors.GRPCFromK8s(err) } return &loggingadmin.S3Credentials{ @@ -486,6 +489,7 @@ func (d *KubernetesManagerDriver) createOrUpdateRecurringSnapshot( defaultIndices []string, owner metav1.Object, ) error { + lg := logger.PluginLoggerFromContext(d.Context) k8sSnapshot := &loggingv1beta1.RecurringSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: snapshot.GetRef().GetName(), @@ -496,14 +500,14 @@ func (d *KubernetesManagerDriver) createOrUpdateRecurringSnapshot( err := d.K8sClient.Get(ctx, client.ObjectKeyFromObject(k8sSnapshot), k8sSnapshot) if err != nil { if !k8serrors.IsNotFound(err) { - d.Logger.Error(fmt.Sprintf("failed to check if snapshot exists: %v", err)) + lg.Error(fmt.Sprintf("failed to check if snapshot exists: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } controllerutil.SetOwnerReference(owner, k8sSnapshot, d.K8sClient.Scheme()) d.updateRecurringSnapshot(k8sSnapshot, snapshot, defaultIndices) err = d.K8sClient.Create(ctx, k8sSnapshot) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to create snapshot: %v", err)) + lg.Error(fmt.Sprintf("failed to create snapshot: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } } @@ -518,7 +522,7 @@ func (d *KubernetesManagerDriver) createOrUpdateRecurringSnapshot( }) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to update snapshot: %v", err)) + lg.Error(fmt.Sprintf("failed to update snapshot: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil @@ -553,10 +557,11 @@ func (d *KubernetesManagerDriver) updateRecurringSnapshot( } func (d *KubernetesManagerDriver) listRecurringSnapshots(ctx context.Context) (retSlice []*loggingadmin.SnapshotStatus, retErr error) { + lg := logger.PluginLoggerFromContext(d.Context) list := &loggingv1beta1.RecurringSnapshotList{} retErr = d.K8sClient.List(ctx, list, client.InNamespace(d.OpensearchCluster.Namespace)) if retErr != nil { - d.Logger.Error(fmt.Sprintf("failed to list recurring snapshots: %v", retErr)) + lg.Error(fmt.Sprintf("failed to list recurring snapshots: %v", retErr)) retErr = k8sutilerrors.GRPCFromK8s(retErr) return } diff --git a/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager.go b/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager.go index 3f10f22103..101240371f 100644 --- a/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager.go +++ b/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager.go @@ -6,12 +6,11 @@ import ( "os" "time" - "log/slog" - "github.com/lestrrat-go/backoff/v2" "github.com/rancher/opni/apis" opnicorev1beta1 "github.com/rancher/opni/apis/core/v1beta1" loggingv1beta1 "github.com/rancher/opni/apis/logging/v1beta1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/opensearch/certs" "github.com/rancher/opni/pkg/opensearch/opensearch" "github.com/rancher/opni/pkg/plugins/driverutil" @@ -54,7 +53,7 @@ type KubernetesManagerDriver struct { type KubernetesManagerDriverOptions struct { OpensearchCluster *opnimeta.OpensearchClusterRef `option:"opensearchCluster"` K8sClient client.Client `option:"k8sClient"` - Logger *slog.Logger `option:"logger"` + Context context.Context `option:"context"` } func NewKubernetesManagerDriver(options KubernetesManagerDriverOptions) (*KubernetesManagerDriver, error) { @@ -74,6 +73,8 @@ func NewKubernetesManagerDriver(options KubernetesManagerDriverOptions) (*Kubern } func (d *KubernetesManagerDriver) AdminPassword(ctx context.Context) (password []byte, retErr error) { + lg := logger.PluginLoggerFromContext(d.Context) + k8sOpensearchCluster := &loggingv1beta1.OpniOpensearch{} retErr = d.K8sClient.Get(ctx, types.NamespacedName{ @@ -81,7 +82,7 @@ func (d *KubernetesManagerDriver) AdminPassword(ctx context.Context) (password [ Namespace: d.OpensearchCluster.Namespace, }, k8sOpensearchCluster) if retErr != nil { - d.Logger.Error("failed to get opensearch cluster") + lg.Error("failed to get opensearch cluster") retErr = k8sutilerrors.GRPCFromK8s(retErr) return } @@ -101,13 +102,13 @@ func (d *KubernetesManagerDriver) AdminPassword(ctx context.Context) (password [ retErr = d.K8sClient.Create(ctx, secret) if retErr != nil { if !k8serrors.IsAlreadyExists(retErr) { - d.Logger.Error(fmt.Sprintf("failed to create secret: %v", retErr)) + lg.Error(fmt.Sprintf("failed to create secret: %v", retErr)) retErr = k8sutilerrors.GRPCFromK8s(retErr) return } retErr = d.K8sClient.Get(ctx, client.ObjectKeyFromObject(secret), secret) if retErr != nil { - d.Logger.Error(fmt.Sprintf("failed to get existing secret: %v", retErr)) + lg.Error(fmt.Sprintf("failed to get existing secret: %v", retErr)) retErr = k8sutilerrors.GRPCFromK8s(retErr) return } @@ -173,10 +174,11 @@ FETCH: } func (d *KubernetesManagerDriver) DeleteCluster(ctx context.Context) error { + lg := logger.PluginLoggerFromContext(d.Context) loggingClusters := &opnicorev1beta1.LoggingClusterList{} err := d.K8sClient.List(ctx, loggingClusters, client.InNamespace(d.OpensearchCluster.Namespace)) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) + lg.Error(fmt.Sprintf("failed to list logging clusters: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -193,13 +195,14 @@ func (d *KubernetesManagerDriver) DeleteCluster(ctx context.Context) error { err = d.K8sClient.Delete(ctx, cluster) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to delete cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to delete cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) GetCluster(ctx context.Context) (*loggingadmin.OpensearchClusterV2, error) { + lg := logger.PluginLoggerFromContext(d.Context) cluster := &loggingv1beta1.OpniOpensearch{} if err := d.K8sClient.Get(ctx, types.NamespacedName{ Name: d.OpensearchCluster.Name, @@ -208,7 +211,7 @@ func (d *KubernetesManagerDriver) GetCluster(ctx context.Context) (*loggingadmin if k8serrors.IsNotFound(err) { return &loggingadmin.OpensearchClusterV2{}, nil } - d.Logger.Error(fmt.Sprintf("failed to fetch cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to fetch cluster: %v", err)) return nil, k8sutilerrors.GRPCFromK8s(err) } @@ -239,6 +242,7 @@ func (d *KubernetesManagerDriver) CreateOrUpdateCluster( opniVersion string, natName string, ) error { + lg := logger.PluginLoggerFromContext(d.Context) err := d.storeS3Credentials(ctx, cluster.GetS3().GetCredentials()) if err != nil { return err @@ -252,7 +256,7 @@ func (d *KubernetesManagerDriver) CreateOrUpdateCluster( }, k8sOpensearchCluster) if err != nil { if !k8serrors.IsNotFound(err) { - d.Logger.Error(fmt.Sprintf("failed to fetch cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to fetch cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } exists = false @@ -301,7 +305,7 @@ func (d *KubernetesManagerDriver) CreateOrUpdateCluster( err = d.K8sClient.Create(ctx, k8sOpensearchCluster) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to create cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to create cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil @@ -331,13 +335,14 @@ func (d *KubernetesManagerDriver) CreateOrUpdateCluster( }) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to update cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to update cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) UpgradeAvailable(ctx context.Context, opniVersion string) (bool, error) { + lg := logger.PluginLoggerFromContext(d.Context) k8sOpensearchCluster := &loggingv1beta1.OpniOpensearch{} err := d.K8sClient.Get(ctx, types.NamespacedName{ @@ -346,10 +351,10 @@ func (d *KubernetesManagerDriver) UpgradeAvailable(ctx context.Context, opniVers }, k8sOpensearchCluster) if err != nil { if k8serrors.IsNotFound(err) { - d.Logger.Error("opensearch cluster does not exist") + lg.Error("opensearch cluster does not exist") return false, loggingerrors.WrappedGetPrereqFailed(err) } - d.Logger.Error(fmt.Sprintf("failed to fetch opensearch cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to fetch opensearch cluster: %v", err)) return false, k8sutilerrors.GRPCFromK8s(err) } @@ -368,6 +373,7 @@ func (d *KubernetesManagerDriver) UpgradeAvailable(ctx context.Context, opniVers } func (d *KubernetesManagerDriver) DoUpgrade(ctx context.Context, opniVersion string) error { + lg := logger.PluginLoggerFromContext(d.Context) k8sOpensearchCluster := &loggingv1beta1.OpniOpensearch{ ObjectMeta: metav1.ObjectMeta{ Name: d.OpensearchCluster.Name, @@ -396,16 +402,17 @@ func (d *KubernetesManagerDriver) DoUpgrade(ctx context.Context, opniVersion str return d.K8sClient.Update(ctx, k8sOpensearchCluster) }) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to update opensearch cluster: %v", err)) + lg.Error(fmt.Sprintf("failed to update opensearch cluster: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } return nil } func (d *KubernetesManagerDriver) GetStorageClasses(ctx context.Context) ([]string, error) { + lg := logger.PluginLoggerFromContext(d.Context) storageClasses := &storagev1.StorageClassList{} if err := d.K8sClient.List(ctx, storageClasses); err != nil { - d.Logger.Error(fmt.Sprintf("failed to list storageclasses: %v", err)) + lg.Error(fmt.Sprintf("failed to list storageclasses: %v", err)) return nil, k8sutilerrors.GRPCFromK8s(err) } @@ -422,6 +429,7 @@ func (d *KubernetesManagerDriver) CreateOrUpdateSnapshotSchedule( snapshot *loggingadmin.SnapshotSchedule, defaultIndices []string, ) error { + lg := logger.PluginLoggerFromContext(d.Context) repo := &loggingv1beta1.OpensearchRepository{} err := d.K8sClient.Get(ctx, types.NamespacedName{ Name: d.OpensearchCluster.Name, @@ -429,10 +437,10 @@ func (d *KubernetesManagerDriver) CreateOrUpdateSnapshotSchedule( }, repo) if err != nil { if k8serrors.IsNotFound(err) { - d.Logger.Error("opensearch repository does not exist") + lg.Error("opensearch repository does not exist") return loggingerrors.WrappedGetPrereqFailed(err) } - d.Logger.Error(fmt.Sprintf("failed to list opensearch repositories: %v", err)) + lg.Error(fmt.Sprintf("failed to list opensearch repositories: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -448,13 +456,14 @@ func (d *KubernetesManagerDriver) GetSnapshotSchedule( ref *loggingadmin.SnapshotReference, defaultIndices []string, ) (*loggingadmin.SnapshotSchedule, error) { + lg := logger.PluginLoggerFromContext(d.Context) snapshot := &loggingv1beta1.RecurringSnapshot{} err := d.K8sClient.Get(ctx, types.NamespacedName{ Name: ref.GetName(), Namespace: d.OpensearchCluster.Namespace, }, snapshot) if err != nil { - d.Logger.Error(fmt.Sprintf("failed to fetch snapshot: %v", err)) + lg.Error(fmt.Sprintf("failed to fetch snapshot: %v", err)) return nil, k8sutilerrors.GRPCFromK8s(err) } @@ -480,6 +489,7 @@ func (d *KubernetesManagerDriver) GetSnapshotSchedule( } func (d *KubernetesManagerDriver) DeleteSnapshotSchedule(ctx context.Context, ref *loggingadmin.SnapshotReference) error { + lg := logger.PluginLoggerFromContext(d.Context) err := d.K8sClient.Delete(ctx, &loggingv1beta1.Snapshot{ ObjectMeta: metav1.ObjectMeta{ Name: ref.GetName(), @@ -487,7 +497,7 @@ func (d *KubernetesManagerDriver) DeleteSnapshotSchedule(ctx context.Context, re }, }) if client.IgnoreNotFound(err) != nil { - d.Logger.Error(fmt.Sprintf("failed to delete snapshot: %v", err)) + lg.Error(fmt.Sprintf("failed to delete snapshot: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -498,7 +508,7 @@ func (d *KubernetesManagerDriver) DeleteSnapshotSchedule(ctx context.Context, re }, }) if client.IgnoreNotFound(err) != nil { - d.Logger.Error(fmt.Sprintf("failed to delete snapshot: %v", err)) + lg.Error(fmt.Sprintf("failed to delete snapshot: %v", err)) return k8sutilerrors.GRPCFromK8s(err) } @@ -517,12 +527,13 @@ func (d *KubernetesManagerDriver) ListAllSnapshotSchedules(ctx context.Context) } func init() { - management.Drivers.Register("kubernetes-manager", func(_ context.Context, opts ...driverutil.Option) (management.ClusterDriver, error) { + management.Drivers.Register("kubernetes-manager", func(ctx context.Context, opts ...driverutil.Option) (management.ClusterDriver, error) { options := KubernetesManagerDriverOptions{ OpensearchCluster: &opnimeta.OpensearchClusterRef{ Name: "opni", Namespace: os.Getenv("POD_NAMESPACE"), }, + Context: ctx, } driverutil.ApplyOptions(&options, opts...) return NewKubernetesManagerDriver(options) diff --git a/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager_test.go b/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager_test.go index 081bc3b170..fcf76b8be1 100644 --- a/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager_test.go +++ b/plugins/logging/pkg/gateway/drivers/management/kubernetes_manager/kubernetes_manager_test.go @@ -10,6 +10,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" loggingv1beta1 "github.com/rancher/opni/apis/logging/v1beta1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/test/testlog" opnimeta "github.com/rancher/opni/pkg/util/meta" "github.com/rancher/opni/plugins/logging/apis/loggingadmin" @@ -134,7 +135,7 @@ var _ = Describe("Opensearch Admin V2", Ordered, Label("integration"), func() { kubernetes_manager.KubernetesManagerDriverOptions{ K8sClient: k8sClient, OpensearchCluster: opniCluster, - Logger: testlog.Log, + Context: logger.WithPluginLogger(context.Background(), testlog.Log), }, ) Expect(err).NotTo(HaveOccurred()) diff --git a/plugins/logging/pkg/gateway/plugin.go b/plugins/logging/pkg/gateway/plugin.go index dc35b951da..fe2fe389d7 100644 --- a/plugins/logging/pkg/gateway/plugin.go +++ b/plugins/logging/pkg/gateway/plugin.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log/slog" "os" "github.com/dbason/featureflags" @@ -55,7 +54,6 @@ type Plugin struct { opensearch.UnsafeOpensearchServer system.UnimplementedSystemPluginClient ctx context.Context - logger *slog.Logger storageBackend future.Future[storage.Backend] kv future.Future[system.KeyValueStoreClient] mgmtApi future.Future[managementv1.ManagementClient] @@ -127,27 +125,31 @@ func NewPlugin(ctx context.Context, opts ...PluginOption) *Plugin { } } - lg := logger.NewPluginLogger().WithGroup("logging") + lg := logger.NewPluginLogger(ctx).WithGroup("logging") + ctx = logger.WithPluginLogger(ctx, lg) + logForwarderLg := lg.WithGroup("otel-logs-forwarder") + traceForwarderLg := lg.WithGroup("otel-trace-forwarder") + backendLg := lg.WithGroup("logging-backend") + opensearchMgrLg := lg.WithGroup("opensearch-manager") kv := future.New[system.KeyValueStoreClient]() p := &Plugin{ PluginOptions: options, ctx: ctx, - logger: lg, storageBackend: future.New[storage.Backend](), mgmtApi: future.New[managementv1.ManagementClient](), uninstallController: future.New[*task.Controller](), kv: kv, alertingServer: alerting.NewAlertingManagementServer(), opensearchManager: opensearchdata.NewManager( - lg.WithGroup("opensearch-manager"), + logger.WithPluginLogger(ctx, opensearchMgrLg), kv, ), delegate: future.New[streamext.StreamDelegate[agent.ClientSet]](), otelForwarder: otel.NewForwarder( otel.NewLogsForwarder( - otel.WithLogger(lg.WithGroup("otel-logs-forwarder")), + logger.WithPluginLogger(ctx, logForwarderLg), otel.WithAddress(fmt.Sprintf( "%s:%d", preprocessor.PreprocessorServiceName(opniopensearch.OpniPreprocessingInstanceName), @@ -157,7 +159,7 @@ func NewPlugin(ctx context.Context, opts ...PluginOption) *Plugin { otel.WithPrivileged(true), ), otel.NewTraceForwarder( - otel.WithLogger(lg.WithGroup("otel-trace-forwarder")), + logger.WithPluginLogger(ctx, traceForwarderLg), otel.WithAddress(fmt.Sprintf( "%s:%d", preprocessor.PreprocessorServiceName(opniopensearch.OpniPreprocessingInstanceName), @@ -176,7 +178,7 @@ func NewPlugin(ctx context.Context, opts ...PluginOption) *Plugin { delegate streamext.StreamDelegate[agent.ClientSet], ) { p.logging.Initialize(backend.LoggingBackendConfig{ - Logger: p.logger.WithGroup("logging-backend"), + Context: logger.WithPluginLogger(p.ctx, backendLg), // context nil, causes panic StorageBackend: storageBackend, UninstallController: uninstallController, MgmtClient: mgmtClient, @@ -200,12 +202,13 @@ func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeGateway)) p := NewPlugin(ctx) - p.logger.Info("logging plugin enabled") + lg := logger.PluginLoggerFromContext(p.ctx) + lg.Info("logging plugin enabled") restconfig, err := rest.InClusterConfig() if err != nil { if !errors.Is(err, rest.ErrNotInCluster) { - p.logger.Error(fmt.Sprintf("failed to create config: %s", err)) + lg.Error(fmt.Sprintf("failed to create config: %s", err)) os.Exit(1) } } @@ -226,12 +229,12 @@ func Scheme(ctx context.Context) meta.Scheme { var ok bool backendDriverBuilder, ok := backenddriver.Drivers.Get(driverName) if !ok { - p.logger.Error(fmt.Sprintf("could not find backend driver %q", driverName)) + lg.Error(fmt.Sprintf("could not find backend driver %q", driverName)) os.Exit(1) } managementDriverBuilder, ok := managementdriver.Drivers.Get(driverName) if !ok { - p.logger.Error(fmt.Sprintf("could not find management driver %q", driverName)) + lg.Error(fmt.Sprintf("could not find management driver %q", driverName)) os.Exit(1) } @@ -239,17 +242,17 @@ func Scheme(ctx context.Context) meta.Scheme { driverutil.NewOption("restConfig", p.restconfig), driverutil.NewOption("namespace", p.storageNamespace), driverutil.NewOption("opensearchCluster", p.opensearchCluster), - driverutil.NewOption("logger", p.logger), + driverutil.NewOption("logger", lg), } p.backendDriver, err = backendDriverBuilder(ctx, driverOptions...) if err != nil { - p.logger.Error(fmt.Sprintf("failed to create backend driver: %v", err)) + lg.Error(fmt.Sprintf("failed to create backend driver: %v", err)) os.Exit(1) } p.managementDriver, err = managementDriverBuilder(ctx, driverOptions...) if err != nil { - p.logger.Error(fmt.Sprintf("failed to create management driver: %v", err)) + lg.Error(fmt.Sprintf("failed to create management driver: %v", err)) os.Exit(1) } @@ -260,14 +263,14 @@ func Scheme(ctx context.Context) meta.Scheme { go p.alertingServer.SetClient(loggingManager.managementDriver.NewOpensearchClientForCluster) err = loggingManager.createInitialAdmin() if err != nil { - p.logger.Warn(fmt.Sprintf("failed to create initial admin: %v", err)) + lg.Warn(fmt.Sprintf("failed to create initial admin: %v", err)) } p.otelForwarder.BackgroundInitClient() } scheme.Add(system.SystemPluginID, system.NewPlugin(p)) scheme.Add(capability.CapabilityBackendPluginID, capability.NewPlugin(&p.logging)) - scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(p)) + scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(ctx, p)) scheme.Add( managementext.ManagementAPIExtensionPluginID, managementext.NewPlugin( @@ -282,10 +285,11 @@ func Scheme(ctx context.Context) meta.Scheme { } func (p *Plugin) NewLoggingManagerForPlugin() *LoggingManagerV2 { + lg := logger.PluginLoggerFromContext(p.ctx).WithGroup("opensearch-manager") return &LoggingManagerV2{ + ctx: logger.WithPluginLogger(p.ctx, lg), managementDriver: p.managementDriver, backendDriver: p.backendDriver, - logger: p.logger.WithGroup("opensearch-manager"), alertingServer: p.alertingServer, opensearchManager: p.opensearchManager, storageNamespace: p.storageNamespace, diff --git a/plugins/logging/pkg/gateway/system.go b/plugins/logging/pkg/gateway/system.go index be1914d615..63d5346a29 100644 --- a/plugins/logging/pkg/gateway/system.go +++ b/plugins/logging/pkg/gateway/system.go @@ -7,6 +7,7 @@ import ( opnicorev1 "github.com/rancher/opni/pkg/apis/core/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/config/v1beta1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/machinery" "github.com/rancher/opni/pkg/plugins/apis/system" "github.com/rancher/opni/pkg/task" @@ -18,10 +19,11 @@ import ( ) func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { + lg := logger.PluginLoggerFromContext(p.ctx) p.mgmtApi.Set(client) cfg, err := client.GetConfig(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("failed to get config") os.Exit(1) @@ -29,7 +31,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("failed to load config") os.Exit(1) @@ -40,7 +42,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { objectList.Visit(func(config *v1beta1.GatewayConfig) { backend, err := machinery.ConfigureStorageBackend(p.ctx, &config.Spec.Storage) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("failed to configure storage backend") os.Exit(1) @@ -52,15 +54,16 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { p.kv.Set(client) + lg := logger.PluginLoggerFromContext(p.ctx).WithGroup("uninstaller") ctrl, err := task.NewController(p.ctx, "uninstall", system.NewKVStoreClient[*opnicorev1.TaskStatus](client), &UninstallTaskRunner{ + ctx: logger.WithPluginLogger(p.ctx, lg), storageNamespace: p.storageNamespace, opensearchManager: p.opensearchManager, backendDriver: p.backendDriver, storageBackend: p.storageBackend, - logger: p.logger.WithGroup("uninstaller"), }) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("failed to create task controller") os.Exit(1) diff --git a/plugins/logging/pkg/gateway/uninstall.go b/plugins/logging/pkg/gateway/uninstall.go index 8d29da8bd5..6f5623f66f 100644 --- a/plugins/logging/pkg/gateway/uninstall.go +++ b/plugins/logging/pkg/gateway/uninstall.go @@ -21,11 +21,11 @@ import ( type UninstallTaskRunner struct { uninstall.DefaultPendingHandler + ctx context.Context storageNamespace string opensearchManager *opensearchdata.Manager backendDriver backenddriver.ClusterDriver storageBackend future.Future[storage.Backend] - logger *slog.Logger } func (a *UninstallTaskRunner) OnTaskRunning(ctx context.Context, ti task.ActiveTask) error { diff --git a/plugins/logging/pkg/opensearchdata/admin.go b/plugins/logging/pkg/opensearchdata/admin.go index b0cdb3bb8a..9f45952670 100644 --- a/plugins/logging/pkg/opensearchdata/admin.go +++ b/plugins/logging/pkg/opensearchdata/admin.go @@ -7,6 +7,7 @@ import ( "github.com/lestrrat-go/backoff/v2" "github.com/opensearch-project/opensearch-go/opensearchutil" + "github.com/rancher/opni/pkg/logger" opensearchtypes "github.com/rancher/opni/pkg/opensearch/opensearch/types" "github.com/rancher/opni/pkg/plugins/apis/system" ) @@ -18,6 +19,7 @@ const ( ) func (m *Manager) CreateInitialAdmin(password []byte, readyFunc ...ReadyFunc) { + lg := logger.PluginLoggerFromContext(m.ctx) m.WaitForInit() //Check if it's been created already for idempotence @@ -31,14 +33,14 @@ func (m *Manager) CreateInitialAdmin(password []byte, readyFunc ...ReadyFunc) { Value: []byte(initialAdminPending), }) if err != nil { - m.logger.Warn(fmt.Sprintf("failed to store initial admin state: %v", err)) + lg.Warn(fmt.Sprintf("failed to store initial admin state: %v", err)) } m.adminInitStateRW.Unlock() for _, r := range readyFunc { exitEarly := r() if exitEarly { - m.logger.Warn("opensearch cluster is never able to receive queries") + lg.Warn("opensearch cluster is never able to receive queries") return } @@ -68,12 +70,12 @@ CREATE: for { select { case <-b.Done(): - m.logger.Warn("context cancelled before admin user created") + lg.Warn("context cancelled before admin user created") return case <-b.Next(): err := m.maybeCreateUser(ctx, user) if err != nil { - m.logger.Error(fmt.Sprintf("failed to create admin user: %v", err)) + lg.Error(fmt.Sprintf("failed to create admin user: %v", err)) continue } break CREATE @@ -86,7 +88,7 @@ CREATE: Value: []byte(initialAdminCreated), }) if err != nil { - m.logger.Warn(fmt.Sprintf("failed to store initial admin state: %v", err)) + lg.Warn(fmt.Sprintf("failed to store initial admin state: %v", err)) } m.adminInitStateRW.Unlock() } @@ -108,13 +110,14 @@ func (m *Manager) userExists(ctx context.Context, name string) (bool, error) { } func (m *Manager) maybeCreateUser(ctx context.Context, user opensearchtypes.UserSpec) error { - m.logger.Debug("creating opensearch admin user") + lg := logger.PluginLoggerFromContext(m.ctx) + lg.Debug("creating opensearch admin user") exists, err := m.userExists(ctx, user.UserName) if err != nil { return err } if exists { - m.logger.Debug("user already exists, doing nothing") + lg.Debug("user already exists, doing nothing") return nil } @@ -126,22 +129,23 @@ func (m *Manager) maybeCreateUser(ctx context.Context, user opensearchtypes.User if resp.IsError() { return fmt.Errorf("failed to create user: %s", resp.String()) } - m.logger.Debug(fmt.Sprintf("user successfully created: %s", resp.String())) + lg.Debug(fmt.Sprintf("user successfully created: %s", resp.String())) return nil } func (m *Manager) shouldCreateInitialAdmin() bool { + lg := logger.PluginLoggerFromContext(m.ctx) m.adminInitStateRW.RLock() defer m.adminInitStateRW.RUnlock() idExists, err := m.keyExists(initialAdminKey) if err != nil { - m.logger.Error(fmt.Sprintf("failed to check initial admin state: %v", err)) + lg.Error(fmt.Sprintf("failed to check initial admin state: %v", err)) return false } if !idExists { - m.logger.Debug("user creation not started, will install") + lg.Debug("user creation not started, will install") return true } @@ -149,19 +153,19 @@ func (m *Manager) shouldCreateInitialAdmin() bool { Key: fmt.Sprintf("%s%s", opensearchPrefix, initialAdminKey), }) if err != nil { - m.logger.Error(fmt.Sprintf("failed to check initial admin state: %v", err)) + lg.Error(fmt.Sprintf("failed to check initial admin state: %v", err)) return false } switch string(adminState.GetValue()) { case initialAdminPending: - m.logger.Debug("admin user creation is pending, restarting") + lg.Debug("admin user creation is pending, restarting") return true case initialAdminCreated: - m.logger.Debug("admin user already created, not restarting") + lg.Debug("admin user already created, not restarting") return false default: - m.logger.Error("invalid initial admin state returned") + lg.Error("invalid initial admin state returned") return false } } diff --git a/plugins/logging/pkg/opensearchdata/delete.go b/plugins/logging/pkg/opensearchdata/delete.go index d022aa5782..89148fbcdf 100644 --- a/plugins/logging/pkg/opensearchdata/delete.go +++ b/plugins/logging/pkg/opensearchdata/delete.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins/apis/system" "github.com/rancher/opni/pkg/util" loggingerrors "github.com/rancher/opni/plugins/logging/pkg/errors" @@ -16,12 +17,13 @@ import ( // to the generic task controller func (m *Manager) DoClusterDataDelete(ctx context.Context, id string, readyFunc ...ReadyFunc) error { + lg := logger.PluginLoggerFromContext(m.ctx) m.WaitForInit() for _, r := range readyFunc { exitEarly := r() if exitEarly { - m.logger.Warn("opensearch cluster is never able to receive queries") + lg.Warn("opensearch cluster is never able to receive queries") return nil } @@ -65,13 +67,13 @@ func (m *Manager) DoClusterDataDelete(ctx context.Context, id string, readyFunc defer resp.Body.Close() if resp.IsError() { - m.logger.Error(fmt.Sprintf("opensearch request failed: %s", resp.String())) + lg.Error(fmt.Sprintf("opensearch request failed: %s", resp.String())) return loggingerrors.ErrOpensearchResponse } respString := util.ReadString(resp.Body) taskID := gjson.Get(respString, "task").String() - m.logger.Debug(fmt.Sprintf("opensearch taskID is :%s", taskID)) + lg.Debug(fmt.Sprintf("opensearch taskID is :%s", taskID)) _, err = m.systemKV.Get().Put(ctx, &system.PutRequest{ Key: fmt.Sprintf("%s%s", opensearchPrefix, id), Value: []byte(taskID), @@ -85,12 +87,13 @@ func (m *Manager) DoClusterDataDelete(ctx context.Context, id string, readyFunc } func (m *Manager) DeleteTaskStatus(ctx context.Context, id string, readyFunc ...ReadyFunc) (DeleteStatus, error) { + lg := logger.PluginLoggerFromContext(m.ctx) m.WaitForInit() for _, r := range readyFunc { exitEarly := r() if exitEarly { - m.logger.Warn("opensearch cluster is never able to receive queries") + lg.Warn("opensearch cluster is never able to receive queries") return DeleteFinishedWithErrors, nil } } @@ -104,7 +107,7 @@ func (m *Manager) DeleteTaskStatus(ctx context.Context, id string, readyFunc ... } // If ID doesn't exist in KV set task to finished with errors if !idExists { - m.logger.Warn("could not find cluster id in KV store") + lg.Warn("could not find cluster id in KV store") return DeleteFinishedWithErrors, nil } @@ -118,7 +121,7 @@ func (m *Manager) DeleteTaskStatus(ctx context.Context, id string, readyFunc ... taskID := string(value.GetValue()) if taskID == pendingValue { - m.logger.Debug("kv status is pending") + lg.Debug("kv status is pending") return DeletePending, nil } @@ -139,7 +142,7 @@ func (m *Manager) DeleteTaskStatus(ctx context.Context, id string, readyFunc ... case resp.IsError(): return DeleteError, loggingerrors.ErrOpensearchResponse case !gjson.Get(body, "completed").Bool(): - m.logger.Debug(body) + lg.Debug(body) return DeleteRunning, nil case len(gjson.Get(body, "response.failures").Array()) > 0: status = DeleteFinishedWithErrors diff --git a/plugins/logging/pkg/opensearchdata/opensearchdata.go b/plugins/logging/pkg/opensearchdata/opensearchdata.go index f50848010c..1717ef3590 100644 --- a/plugins/logging/pkg/opensearchdata/opensearchdata.go +++ b/plugins/logging/pkg/opensearchdata/opensearchdata.go @@ -8,7 +8,6 @@ import ( "github.com/rancher/opni/pkg/plugins/apis/system" "github.com/rancher/opni/pkg/util/future" loggingutil "github.com/rancher/opni/plugins/logging/pkg/util" - "log/slog" ) const ( @@ -44,16 +43,16 @@ type Manager struct { *loggingutil.AsyncOpensearchClient systemKV future.Future[system.KeyValueStoreClient] - logger *slog.Logger + ctx context.Context adminInitStateRW sync.RWMutex } -func NewManager(logger *slog.Logger, kv future.Future[system.KeyValueStoreClient]) *Manager { +func NewManager(ctx context.Context, kv future.Future[system.KeyValueStoreClient]) *Manager { return &Manager{ AsyncOpensearchClient: loggingutil.NewAsyncOpensearchClient(), systemKV: kv, - logger: logger, + ctx: ctx, } } diff --git a/plugins/logging/pkg/opensearchdata/snapshot.go b/plugins/logging/pkg/opensearchdata/snapshot.go index ff038d8d85..cc686e4395 100644 --- a/plugins/logging/pkg/opensearchdata/snapshot.go +++ b/plugins/logging/pkg/opensearchdata/snapshot.go @@ -7,11 +7,13 @@ import ( "time" "github.com/opensearch-project/opensearch-go/opensearchutil" + "github.com/rancher/opni/pkg/logger" opensearchtypes "github.com/rancher/opni/pkg/opensearch/opensearch/types" loggingerrors "github.com/rancher/opni/plugins/logging/pkg/errors" ) func (m *Manager) DoSnapshot(ctx context.Context, repository string, indices []string) error { + lg := logger.PluginLoggerFromContext(m.ctx) m.WaitForInit() snapshotName := fmt.Sprintf("upgrade-%s", time.Now().Format(time.UnixDate)) @@ -28,7 +30,7 @@ func (m *Manager) DoSnapshot(ctx context.Context, repository string, indices []s defer resp.Body.Close() if resp.IsError() { - m.logger.Error(fmt.Sprintf("opensearch request failed: %s", resp.String())) + lg.Error(fmt.Sprintf("opensearch request failed: %s", resp.String())) return loggingerrors.ErrOpensearchResponse } diff --git a/plugins/logging/pkg/opensearchdata/status.go b/plugins/logging/pkg/opensearchdata/status.go index 28c581b935..d7eb897f56 100644 --- a/plugins/logging/pkg/opensearchdata/status.go +++ b/plugins/logging/pkg/opensearchdata/status.go @@ -10,6 +10,7 @@ import ( ) func (m *Manager) GetClusterStatus() ClusterStatus { + lg := logger.PluginLoggerFromContext(m.ctx) if !m.IsInitialized() { return ClusterStatusNoClient } @@ -19,13 +20,13 @@ func (m *Manager) GetClusterStatus() ClusterStatus { resp, err := m.Client.Cluster.GetClusterHealth(context.TODO()) if err != nil { - m.logger.With(logger.Err(err)).Error("failed to fetch opensearch cluster status") + lg.With(logger.Err(err)).Error("failed to fetch opensearch cluster status") return ClusterStatusError } defer resp.Body.Close() if resp.IsError() { - m.logger.With("resp", resp.String).Error("failure response from cluster status") + lg.With("resp", resp.String).Error("failure response from cluster status") return ClusterStatusError } @@ -39,7 +40,7 @@ func (m *Manager) GetClusterStatus() ClusterStatus { case "red": return ClusterStatusRed default: - m.logger.Error(fmt.Sprintf("unknown status: %s", status)) + lg.Error(fmt.Sprintf("unknown status: %s", status)) return ClusterStatusError } } diff --git a/plugins/logging/pkg/otel/forwarder.go b/plugins/logging/pkg/otel/forwarder.go index ccbe4dbd58..e780db6201 100644 --- a/plugins/logging/pkg/otel/forwarder.go +++ b/plugins/logging/pkg/otel/forwarder.go @@ -1,10 +1,11 @@ package otel import ( + "context" + "github.com/gin-contrib/pprof" "github.com/gin-gonic/gin" "google.golang.org/grpc" - "log/slog" ) type Forwarder struct { @@ -22,7 +23,7 @@ func NewForwarder(logsForwarder *LogsForwarder, traceForwarder *TraceForwarder) type forwarderOptions struct { collectorAddressOverride string cc grpc.ClientConnInterface - lg *slog.Logger + ctx context.Context dialOptions []grpc.DialOption // privileged marks if the agent has a stream authorized clusterID available in @@ -50,9 +51,9 @@ func WithClientConn(cc grpc.ClientConnInterface) ForwarderOption { } } -func WithLogger(lg *slog.Logger) ForwarderOption { +func WithContext(ctx context.Context) ForwarderOption { return func(o *forwarderOptions) { - o.lg = lg + o.ctx = ctx } } diff --git a/plugins/logging/pkg/otel/forwarder_test.go b/plugins/logging/pkg/otel/forwarder_test.go index c051f245c2..6b7bf12cd0 100644 --- a/plugins/logging/pkg/otel/forwarder_test.go +++ b/plugins/logging/pkg/otel/forwarder_test.go @@ -4,6 +4,13 @@ import ( "bytes" "context" "fmt" + "net" + "net/http" + "os" + "path" + "text/template" + "time" + "github.com/google/uuid" "github.com/rancher/opni/pkg/test" "github.com/rancher/opni/plugins/logging/pkg/otel" @@ -15,12 +22,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "net" - "net/http" - "os" - "path" - "text/template" - "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -82,6 +83,7 @@ var _ = Describe("OTEL forwarder", Ordered, Label("integration"), func() { By("setting up the forwarder") forwarder := otel.NewTraceForwarder( + ctx, otel.WithAddress(fmt.Sprintf("localhost:%d", preprocessorCfg.PreprocessorPort)), otel.WithDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials())), otel.WithPrivileged(false), diff --git a/plugins/logging/pkg/otel/logs.go b/plugins/logging/pkg/otel/logs.go index d24d7807aa..1dbb7c06cb 100644 --- a/plugins/logging/pkg/otel/logs.go +++ b/plugins/logging/pkg/otel/logs.go @@ -36,10 +36,15 @@ type LogsForwarder struct { clientMu sync.RWMutex } -func NewLogsForwarder(opts ...ForwarderOption) *LogsForwarder { +func NewLogsForwarder(ctx context.Context, opts ...ForwarderOption) *LogsForwarder { + lg := logger.PluginLoggerFromContext(ctx) + if lg == nil { + lg = logger.NewPluginLogger(ctx).WithGroup("default-otel") + } + options := forwarderOptions{ collectorAddressOverride: defaultAddress, - lg: logger.NewPluginLogger().WithGroup("default-otel"), + ctx: logger.WithPluginLogger(ctx, lg), } options.apply(opts...) return &LogsForwarder{ @@ -57,6 +62,7 @@ func (f *LogsForwarder) SetClient(cc grpc.ClientConnInterface) { } func (f *LogsForwarder) initializeLogsForwarder() collogspb.LogsServiceClient { + lg := logger.PluginLoggerFromContext(f.ctx) if f.cc == nil { ctx := context.Background() expBackoff := backoff.Exponential( @@ -70,7 +76,7 @@ func (f *LogsForwarder) initializeLogsForwarder() collogspb.LogsServiceClient { for { select { case <-b.Done(): - f.lg.Warn("plugin context cancelled before gRPC client created") + lg.Warn("plugin context cancelled before gRPC client created") return nil case <-b.Next(): conn, err := grpc.Dial( @@ -78,7 +84,7 @@ func (f *LogsForwarder) initializeLogsForwarder() collogspb.LogsServiceClient { f.dialOptions..., ) if err != nil { - f.lg.Error(fmt.Sprintf("failed dial grpc: %v", err)) + lg.Error(fmt.Sprintf("failed dial grpc: %v", err)) continue } return collogspb.NewLogsServiceClient(conn) @@ -92,8 +98,9 @@ func (f *LogsForwarder) Export( ctx context.Context, request *collogspb.ExportLogsServiceRequest, ) (*collogspb.ExportLogsServiceResponse, error) { + lg := logger.PluginLoggerFromContext(f.ctx) if !f.Client.IsSet() { - f.lg.Error("collector is unavailable") + lg.Error("collector is unavailable") return nil, status.Errorf(codes.Unavailable, "collector is unavailable") } clusterID := cluster.StreamAuthorizedID(ctx) @@ -111,7 +118,7 @@ func (f *LogsForwarder) Export( } if len(values)%2 != 0 { - f.lg.Warn(fmt.Sprintf("invalid number of attribute values: %d", len(values))) + lg.Warn(fmt.Sprintf("invalid number of attribute values: %d", len(values))) return f.forwardLogs(ctx, request) } @@ -155,9 +162,10 @@ func (f *LogsForwarder) forwardLogs( ctx context.Context, request *collogspb.ExportLogsServiceRequest, ) (*collogspb.ExportLogsServiceResponse, error) { + lg := logger.PluginLoggerFromContext(f.ctx) resp, err := f.Client.Client.Export(ctx, request) if err != nil { - f.lg.Error("failed to forward logs: %v", logger.Err(err)) + lg.Error("failed to forward logs: %v", logger.Err(err)) return nil, err } return resp, nil diff --git a/plugins/logging/pkg/otel/render.go b/plugins/logging/pkg/otel/render.go index 919f7c1ee7..fba58f207f 100644 --- a/plugins/logging/pkg/otel/render.go +++ b/plugins/logging/pkg/otel/render.go @@ -8,6 +8,7 @@ import ( "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/render" + "github.com/rancher/opni/pkg/logger" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" @@ -45,9 +46,10 @@ func (p protoJSON) Render(w http.ResponseWriter) error { } func (f *LogsForwarder) renderProto(c *gin.Context) { + lg := logger.PluginLoggerFromContext(f.ctx) body, err := readBody(c) if err != nil { - f.lg.Error(fmt.Sprintf("failed to read body: %v", err)) + lg.Error(fmt.Sprintf("failed to read body: %v", err)) c.Status(http.StatusBadRequest) return } @@ -55,8 +57,8 @@ func (f *LogsForwarder) renderProto(c *gin.Context) { req := &collogspb.ExportLogsServiceRequest{} err = proto.Unmarshal(body, req) if err != nil { - f.lg.Error(fmt.Sprintf("failed to unmarshal body: %v", err)) - f.lg.Debug(fmt.Sprintf("body: %x", body)) + lg.Error(fmt.Sprintf("failed to unmarshal body: %v", err)) + lg.Debug(fmt.Sprintf("body: %x", body)) c.Status(http.StatusBadRequest) return } @@ -72,9 +74,10 @@ func (f *LogsForwarder) renderProto(c *gin.Context) { } func (f *LogsForwarder) renderProtoJSON(c *gin.Context) { + lg := logger.PluginLoggerFromContext(f.ctx) body, err := readBody(c) if err != nil { - f.lg.Error(fmt.Sprintf("failed to read body: %v", err)) + lg.Error(fmt.Sprintf("failed to read body: %v", err)) c.Status(http.StatusBadRequest) return } @@ -82,7 +85,7 @@ func (f *LogsForwarder) renderProtoJSON(c *gin.Context) { req := &collogspb.ExportLogsServiceRequest{} err = protojson.Unmarshal(body, req) if err != nil { - f.lg.Error(fmt.Sprintf("failed to unmarshal body: %v", err)) + lg.Error(fmt.Sprintf("failed to unmarshal body: %v", err)) c.Status(http.StatusBadRequest) return } @@ -98,9 +101,10 @@ func (f *LogsForwarder) renderProtoJSON(c *gin.Context) { } func (f *TraceForwarder) renderProto(c *gin.Context) { + lg := logger.PluginLoggerFromContext(f.ctx) body, err := readBody(c) if err != nil { - f.lg.Error("failed to read body: %v", err) + lg.Error("failed to read body: %v", err) c.Status(http.StatusBadRequest) return } @@ -108,8 +112,8 @@ func (f *TraceForwarder) renderProto(c *gin.Context) { req := &coltracepb.ExportTraceServiceRequest{} err = proto.Unmarshal(body, req) if err != nil { - f.lg.Error("failed to unmarshal body: %v", err) - f.lg.Debug(fmt.Sprintf("body: %x", body)) + lg.Error("failed to unmarshal body: %v", err) + lg.Debug(fmt.Sprintf("body: %x", body)) c.Status(http.StatusBadRequest) return } @@ -125,9 +129,10 @@ func (f *TraceForwarder) renderProto(c *gin.Context) { } func (f *TraceForwarder) renderProtoJSON(c *gin.Context) { + lg := logger.PluginLoggerFromContext(f.ctx) body, err := readBody(c) if err != nil { - f.lg.Error("failed to read body: %v", err) + lg.Error("failed to read body: %v", err) c.Status(http.StatusBadRequest) return } @@ -135,7 +140,7 @@ func (f *TraceForwarder) renderProtoJSON(c *gin.Context) { req := &coltracepb.ExportTraceServiceRequest{} err = protojson.Unmarshal(body, req) if err != nil { - f.lg.Error("failed to unmarshal body: %v", err) + lg.Error("failed to unmarshal body: %v", err) c.Status(http.StatusBadRequest) return } diff --git a/plugins/logging/pkg/otel/trace.go b/plugins/logging/pkg/otel/trace.go index 340240f894..a53fe758da 100644 --- a/plugins/logging/pkg/otel/trace.go +++ b/plugins/logging/pkg/otel/trace.go @@ -29,10 +29,15 @@ type TraceForwarder struct { clientMu sync.RWMutex } -func NewTraceForwarder(opts ...ForwarderOption) *TraceForwarder { +func NewTraceForwarder(ctx context.Context, opts ...ForwarderOption) *TraceForwarder { + lg := logger.PluginLoggerFromContext(ctx) + if lg == nil { + lg = logger.NewPluginLogger(ctx).WithGroup("default-otel") + } + options := forwarderOptions{ collectorAddressOverride: defaultAddress, - lg: logger.NewPluginLogger().WithGroup("default-otel"), + ctx: logger.WithPluginLogger(ctx, lg), } options.apply(opts...) return &TraceForwarder{ @@ -50,6 +55,7 @@ func (f *TraceForwarder) SetClient(cc grpc.ClientConnInterface) { } func (f *TraceForwarder) InitializeTraceForwarder() coltracepb.TraceServiceClient { + lg := logger.PluginLoggerFromContext(f.ctx) if f.cc == nil { ctx := context.Background() expBackoff := backoff.Exponential( @@ -63,7 +69,7 @@ func (f *TraceForwarder) InitializeTraceForwarder() coltracepb.TraceServiceClien for { select { case <-b.Done(): - f.lg.Warn("plugin context cancelled before gRPC client created") + lg.Warn("plugin context cancelled before gRPC client created") return nil case <-b.Next(): conn, err := grpc.Dial( @@ -71,7 +77,7 @@ func (f *TraceForwarder) InitializeTraceForwarder() coltracepb.TraceServiceClien f.dialOptions..., ) if err != nil { - f.lg.Error("failed dial grpc: %v", err) + lg.Error("failed dial grpc: %v", err) continue } return coltracepb.NewTraceServiceClient(conn) @@ -85,8 +91,10 @@ func (f *TraceForwarder) Export( ctx context.Context, request *coltracepb.ExportTraceServiceRequest, ) (*coltracepb.ExportTraceServiceResponse, error) { + lg := logger.PluginLoggerFromContext(f.ctx) + if !f.Client.IsSet() { - f.lg.Error("collector is unavailable") + lg.Error("collector is unavailable") return nil, status.Errorf(codes.Unavailable, "collector is unavailable") } @@ -107,7 +115,7 @@ func (f *TraceForwarder) Export( } if len(values)%2 != 0 { - f.lg.Warn(fmt.Sprintf("invalid number of attribute values: %d", len(values))) + lg.Warn(fmt.Sprintf("invalid number of attribute values: %d", len(values))) return f.forwardTrace(ctx, request) } @@ -148,9 +156,11 @@ func (f *TraceForwarder) forwardTrace( ctx context.Context, request *coltracepb.ExportTraceServiceRequest, ) (*coltracepb.ExportTraceServiceResponse, error) { + lg := logger.PluginLoggerFromContext(f.ctx) + resp, err := f.Client.Client.Export(ctx, request) if err != nil { - f.lg.Error("failed to forward traces: %v", err) + lg.Error("failed to forward traces: %v", err) return nil, err } return resp, nil diff --git a/plugins/metrics/pkg/agent/drivers/opni_manager_otel/otel_driver.go b/plugins/metrics/pkg/agent/drivers/opni_manager_otel/otel_driver.go index f8b3e89bdf..205a82a827 100644 --- a/plugins/metrics/pkg/agent/drivers/opni_manager_otel/otel_driver.go +++ b/plugins/metrics/pkg/agent/drivers/opni_manager_otel/otel_driver.go @@ -7,8 +7,6 @@ import ( "fmt" "os" - "log/slog" - "github.com/lestrrat-go/backoff/v2" opnicorev1beta1 "github.com/rancher/opni/apis/core/v1beta1" monitoringv1beta1 "github.com/rancher/opni/apis/monitoring/v1beta1" @@ -46,9 +44,9 @@ func (*OTELNodeDriver) ConfigureRuleGroupFinder(_ *v1beta1.RulesSpec) notifier.F var _ drivers.MetricsNodeDriver = (*OTELNodeDriver)(nil) type OTELNodeDriverOptions struct { - K8sClient client.Client `option:"k8sClient"` - Logger *slog.Logger `option:"logger"` - Namespace string `option:"namespace"` + K8sClient client.Client `option:"k8sClient"` + Context context.Context `option:"context"` + Namespace string `option:"namespace"` } func NewOTELDriver(options OTELNodeDriverOptions) (*OTELNodeDriver, error) { @@ -72,7 +70,7 @@ func NewOTELDriver(options OTELNodeDriverOptions) (*OTELNodeDriver, error) { } func (o *OTELNodeDriver) ConfigureNode(nodeId string, conf *node.MetricsCapabilityConfig) error { - lg := o.Logger.With("nodeId", nodeId) + lg := logger.PluginLoggerFromContext(o.Context).With("nodeId", nodeId) if o.state.GetRunning() { o.state.Cancel() } @@ -101,7 +99,7 @@ BACKOFF: client.ObjectKeyFromObject(obj.A).String(), obj.B)) - if err := reconcilerutil.ReconcileObject(lg, o.K8sClient, o.Namespace, obj); err != nil { + if err := reconcilerutil.ReconcileObject(o.Context, o.K8sClient, o.Namespace, obj); err != nil { lg.With( "object", client.ObjectKeyFromObject(obj.A).String(), logger.Err(err), @@ -139,6 +137,7 @@ func (o *OTELNodeDriver) DiscoverPrometheuses(_ context.Context, _ string) ([]*r func (o *OTELNodeDriver) buildMonitoringCollectorConfig( incomingSpec *node.OTELSpec, ) *monitoringv1beta1.CollectorConfig { + lg := logger.PluginLoggerFromContext(o.Context) collectorConfig := &monitoringv1beta1.CollectorConfig{ ObjectMeta: metav1.ObjectMeta{ Name: otel.MetricsCrdName, @@ -149,12 +148,14 @@ func (o *OTELNodeDriver) buildMonitoringCollectorConfig( OtelSpec: lo.FromPtrOr(node.CompatOTELStruct(incomingSpec), otel.OTELSpec{}), }, } - o.Logger.Debug(fmt.Sprintf("building %s", string(util.Must(json.Marshal(collectorConfig))))) + lg.Debug(fmt.Sprintf("building %s", string(util.Must(json.Marshal(collectorConfig))))) return collectorConfig } func (o *OTELNodeDriver) reconcileCollector(shouldExist bool) error { - o.Logger.Debug("reconciling collector") + lg := logger.PluginLoggerFromContext(o.Context) + + lg.Debug("reconciling collector") coll := &opnicorev1beta1.Collector{ ObjectMeta: metav1.ObjectMeta{ Name: otel.CollectorName, @@ -171,30 +172,30 @@ func (o *OTELNodeDriver) reconcileCollector(shouldExist bool) error { switch { case !collectorExists && shouldExist: - o.Logger.Debug("collector does not exist and should exist, creating") + lg.Debug("collector does not exist and should exist, creating") coll = o.buildEmptyCollector() coll.Spec.MetricsConfig = &corev1.LocalObjectReference{ Name: otel.MetricsCrdName, } return o.K8sClient.Create(context.TODO(), coll) case !collectorExists && !shouldExist: - o.Logger.Debug("collector does not exist and should not exist, skipping") + lg.Debug("collector does not exist and should not exist, skipping") return nil } err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - o.Logger.Debug("updating collector with metrics config") + lg.Debug("updating collector with metrics config") err := o.K8sClient.Get(context.TODO(), client.ObjectKeyFromObject(coll), coll) if err != nil { return err } if shouldExist { - o.Logger.Debug("setting metrics config") + lg.Debug("setting metrics config") coll.Spec.MetricsConfig = &corev1.LocalObjectReference{ Name: otel.MetricsCrdName, } } else { - o.Logger.Debug("removing metrics config") + lg.Debug("removing metrics config") coll.Spec.MetricsConfig = nil } return o.K8sClient.Update(context.TODO(), coll) @@ -253,10 +254,12 @@ func (o *OTELNodeDriver) getAgentService() (*corev1.Service, error) { } func init() { - drivers.NodeDrivers.Register("opni-manager-otel", func(_ context.Context, opts ...driverutil.Option) (drivers.MetricsNodeDriver, error) { + drivers.NodeDrivers.Register("opni-manager-otel", func(ctx context.Context, opts ...driverutil.Option) (drivers.MetricsNodeDriver, error) { + lg := logger.PluginLoggerFromContext(ctx).WithGroup("metrics").WithGroup("otel") + options := OTELNodeDriverOptions{ Namespace: os.Getenv("POD_NAMESPACE"), - Logger: logger.NewPluginLogger().WithGroup("metrics").WithGroup("otel"), + Context: logger.WithPluginLogger(ctx, lg), } if err := driverutil.ApplyOptions(&options, opts...); err != nil { return nil, err diff --git a/plugins/metrics/pkg/agent/drivers/prometheus_operator/external_operator.go b/plugins/metrics/pkg/agent/drivers/prometheus_operator/external_operator.go index 35a1578009..48193a7489 100644 --- a/plugins/metrics/pkg/agent/drivers/prometheus_operator/external_operator.go +++ b/plugins/metrics/pkg/agent/drivers/prometheus_operator/external_operator.go @@ -5,8 +5,6 @@ import ( "fmt" "os" - "log/slog" - "github.com/lestrrat-go/backoff/v2" monitoringcoreosv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" monitoringcoreosv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" @@ -35,9 +33,9 @@ type ExternalPromOperatorDriver struct { } type ExternalPromOperatorDriverOptions struct { - K8sClient client.Client `option:"k8sClient"` - Logger *slog.Logger `option:"logger"` - Namespace string `option:"namespace"` + K8sClient client.Client `option:"k8sClient"` + Context context.Context `option:"context"` + Namespace string `option:"namespace"` } func NewExternalPromOperatorDriver(options ExternalPromOperatorDriverOptions) (*ExternalPromOperatorDriver, error) { @@ -60,7 +58,7 @@ func NewExternalPromOperatorDriver(options ExternalPromOperatorDriverOptions) (* } func (d *ExternalPromOperatorDriver) ConfigureNode(nodeId string, conf *node.MetricsCapabilityConfig) error { - lg := d.Logger.With("nodeId", nodeId) + lg := logger.PluginLoggerFromContext(d.Context).With("nodeId", nodeId) if d.state.GetRunning() { d.state.Cancel() } @@ -104,7 +102,7 @@ BACKOFF: for backoff.Continue(b) { for _, obj := range objList { lg.Debug(fmt.Sprintf("object : %s, should exist : %t", client.ObjectKeyFromObject(obj.A).String(), obj.B)) - if err := reconcilerutil.ReconcileObject(lg, d.K8sClient, d.Namespace, obj); err != nil { + if err := reconcilerutil.ReconcileObject(d.Context, d.K8sClient, d.Namespace, obj); err != nil { lg.With( "object", client.ObjectKeyFromObject(obj.A).String(), logger.Err(err), @@ -214,6 +212,8 @@ func (d *ExternalPromOperatorDriver) buildAdditionalScrapeConfigsSecret() *corev } func (d *ExternalPromOperatorDriver) serviceName() string { + lg := logger.PluginLoggerFromContext(d.Context) + list := &corev1.ServiceList{} err := d.K8sClient.List(context.TODO(), list, client.InNamespace(d.Namespace), @@ -222,11 +222,11 @@ func (d *ExternalPromOperatorDriver) serviceName() string { }, ) if err != nil { - d.Logger.Error("unable to list services, defaulting to opni-agent") + lg.Error("unable to list services, defaulting to opni-agent") return "opni-agent" } if len(list.Items) != 1 { - d.Logger.Error("unable to fetch service name, defaulting to opni-agent") + lg.Error("unable to fetch service name, defaulting to opni-agent") return "opni-agent" } return list.Items[0].Name @@ -308,8 +308,10 @@ func (d *ExternalPromOperatorDriver) DiscoverPrometheuses(ctx context.Context, n } func (d *ExternalPromOperatorDriver) ConfigureRuleGroupFinder(config *v1beta1.RulesSpec) notifier.Finder[rules.RuleGroup] { + lg := logger.PluginLoggerFromContext(d.Context) + if config.Discovery.PrometheusRules != nil { - opts := []prometheusrule.PrometheusRuleFinderOption{prometheusrule.WithLogger(d.Logger)} + opts := []prometheusrule.PrometheusRuleFinderOption{prometheusrule.WithLogger(lg)} if len(config.Discovery.PrometheusRules.SearchNamespaces) > 0 { opts = append(opts, prometheusrule.WithNamespaces(config.Discovery.PrometheusRules.SearchNamespaces...)) } @@ -319,10 +321,12 @@ func (d *ExternalPromOperatorDriver) ConfigureRuleGroupFinder(config *v1beta1.Ru } func init() { - drivers.NodeDrivers.Register("prometheus-operator", func(_ context.Context, opts ...driverutil.Option) (drivers.MetricsNodeDriver, error) { + drivers.NodeDrivers.Register("prometheus-operator", func(ctx context.Context, opts ...driverutil.Option) (drivers.MetricsNodeDriver, error) { + lg := logger.PluginLoggerFromContext(ctx).WithGroup("metrics").WithGroup("prometheus-operator") + options := ExternalPromOperatorDriverOptions{ Namespace: os.Getenv("POD_NAMESPACE"), - Logger: logger.NewPluginLogger().WithGroup("metrics").WithGroup("prometheus-operator"), + Context: logger.WithPluginLogger(ctx, lg), } if err := driverutil.ApplyOptions(&options, opts...); err != nil { return nil, err diff --git a/plugins/metrics/pkg/agent/drivers/util/util.go b/plugins/metrics/pkg/agent/drivers/util/util.go index d03b008857..7f0c415cea 100644 --- a/plugins/metrics/pkg/agent/drivers/util/util.go +++ b/plugins/metrics/pkg/agent/drivers/util/util.go @@ -5,10 +5,8 @@ import ( "errors" "sync" - "log/slog" - "github.com/cisco-open/k8s-objectmatcher/patch" - opnilogger "github.com/rancher/opni/pkg/logger" + "github.com/rancher/opni/pkg/logger" "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -54,11 +52,11 @@ func (r *ReconcilerState) SetBackoffCtx(ctx context.Context, cancel context.Canc // (obj, shouldExist)) type ReconcileItem lo.Tuple2[client.Object, bool] -func ReconcileObject(logger *slog.Logger, k8sClient client.Client, namespace string, item ReconcileItem) error { +func ReconcileObject(ctx context.Context, k8sClient client.Client, namespace string, item ReconcileItem) error { desired, shouldExist := item.A, item.B // get the object key := client.ObjectKeyFromObject(desired) - lg := logger.With("object", key) + lg := logger.PluginLoggerFromContext(ctx).With("object", key) lg.Info("reconciling object") // get the agent statefulset @@ -103,21 +101,21 @@ func ReconcileObject(logger *slog.Logger, k8sClient client.Client, namespace str // update the object patchResult, err := patch.DefaultPatchMaker.Calculate(current, desired, patch.IgnoreStatusFields()) if err != nil { - logger.With( - opnilogger.Err(err), + lg.With( + logger.Err(err), ).Warn("could not match objects") return err } if patchResult.IsEmpty() { - logger.Info("resource is in sync") + lg.Info("resource is in sync") return nil } - logger.Info("resource diff") + lg.Info("resource diff") if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desired); err != nil { - logger.With( - opnilogger.Err(err), + lg.With( + logger.Err(err), ).Error("failed to set last applied annotation") } @@ -131,7 +129,7 @@ func ReconcileObject(logger *slog.Logger, k8sClient client.Client, namespace str return err } - logger.Info("updating resource") + lg.Info("updating resource") return k8sClient.Update(context.TODO(), desired) } diff --git a/plugins/metrics/pkg/agent/http.go b/plugins/metrics/pkg/agent/http.go index 599a9ffc08..567b31f8eb 100644 --- a/plugins/metrics/pkg/agent/http.go +++ b/plugins/metrics/pkg/agent/http.go @@ -13,7 +13,6 @@ import ( "github.com/rancher/opni/plugins/metrics/apis/remotewrite" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "log/slog" "github.com/rancher/opni/pkg/clients" "github.com/rancher/opni/pkg/health" @@ -23,7 +22,7 @@ import ( type HttpServer struct { apiextensions.UnsafeHTTPAPIExtensionServer - logger *slog.Logger + ctx context.Context remoteWriteClientMu sync.RWMutex remoteWriteClient clients.Locker[remotewrite.RemoteWriteClient] @@ -33,9 +32,9 @@ type HttpServer struct { enabled atomic.Bool } -func NewHttpServer(ct health.ConditionTracker, lg *slog.Logger) *HttpServer { +func NewHttpServer(ctx context.Context, ct health.ConditionTracker) *HttpServer { return &HttpServer{ - logger: lg, + ctx: ctx, conditions: ct, } } diff --git a/plugins/metrics/pkg/agent/node.go b/plugins/metrics/pkg/agent/node.go index 1bf9c55d89..a93829ca12 100644 --- a/plugins/metrics/pkg/agent/node.go +++ b/plugins/metrics/pkg/agent/node.go @@ -18,8 +18,6 @@ import ( "slices" - "log/slog" - capabilityv1 "github.com/rancher/opni/pkg/apis/capability/v1" controlv1 "github.com/rancher/opni/pkg/apis/control/v1" corev1 "github.com/rancher/opni/pkg/apis/core/v1" @@ -40,7 +38,7 @@ type MetricsNode struct { // we only need a subset of the methods remoteread.UnsafeRemoteReadAgentServer - logger *slog.Logger + ctx context.Context nodeClientMu sync.RWMutex nodeClient node.NodeMetricsCapabilityClient @@ -64,11 +62,11 @@ type MetricsNode struct { nodeDrivers []drivers.MetricsNodeDriver } -func NewMetricsNode(ct health.ConditionTracker, lg *slog.Logger) *MetricsNode { +func NewMetricsNode(ctx context.Context, ct health.ConditionTracker) *MetricsNode { mn := &MetricsNode{ - logger: lg, + ctx: ctx, conditions: ct, - targetRunner: NewTargetRunner(lg), + targetRunner: NewTargetRunner(ctx), } mn.conditions.AddListener(mn.sendHealthUpdate) mn.targetRunner.SetRemoteReaderClient(NewRemoteReader(&http.Client{})) @@ -77,23 +75,24 @@ func NewMetricsNode(ct health.ConditionTracker, lg *slog.Logger) *MetricsNode { } func (m *MetricsNode) sendHealthUpdate() { + lg := logger.PluginLoggerFromContext(m.ctx) // TODO this can be optimized to de-duplicate rapid updates m.healthListenerClientMu.RLock() defer m.healthListenerClientMu.RUnlock() if m.healthListenerClient != nil { health, err := m.GetHealth(context.TODO(), &emptypb.Empty{}) if err != nil { - m.logger.With( + lg.With( logger.Err(err), ).Warn("failed to get node health") return } if _, err := m.healthListenerClient.UpdateHealth(context.TODO(), health); err != nil { - m.logger.With( + lg.With( logger.Err(err), ).Warn("failed to send node health update") } else { - m.logger.Debug("sent node health update") + lg.Debug("sent node health update") } } } @@ -150,13 +149,15 @@ func (m *MetricsNode) Info(_ context.Context, _ *emptypb.Empty) (*capabilityv1.D // Implements capabilityv1.NodeServer func (m *MetricsNode) SyncNow(_ context.Context, req *capabilityv1.Filter) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.ctx) + if len(req.CapabilityNames) > 0 { if !slices.Contains(req.CapabilityNames, wellknown.CapabilityMetrics) { - m.logger.Debug("ignoring sync request due to capability filter") + lg.Debug("ignoring sync request due to capability filter") return &emptypb.Empty{}, nil } } - m.logger.Debug("received sync request") + lg.Debug("received sync request") m.nodeClientMu.RLock() defer m.nodeClientMu.RUnlock() @@ -224,11 +225,13 @@ func (m *MetricsNode) GetTargetStatus(_ context.Context, request *remoteread.Tar } func (m *MetricsNode) Discover(ctx context.Context, request *remoteread.DiscoveryRequest) (*remoteread.DiscoveryResponse, error) { + lg := logger.PluginLoggerFromContext(m.ctx) + m.nodeDriverMu.RLock() defer m.nodeDriverMu.RUnlock() if len(m.nodeDrivers) == 0 { - m.logger.Warn("no node driver available for discvoery") + lg.Warn("no node driver available for discvoery") return &remoteread.DiscoveryResponse{ Entries: []*remoteread.DiscoveryEntry{}, @@ -252,7 +255,9 @@ func (m *MetricsNode) Discover(ctx context.Context, request *remoteread.Discover } func (m *MetricsNode) doSync(ctx context.Context) { - m.logger.Debug("syncing metrics node") + lg := logger.PluginLoggerFromContext(m.ctx) + + lg.Debug("syncing metrics node") m.nodeClientMu.RLock() defer m.nodeClientMu.RUnlock() m.identityClientMu.RLock() @@ -283,9 +288,9 @@ func (m *MetricsNode) doSync(ctx context.Context) { switch syncResp.ConfigStatus { case node.ConfigStatus_UpToDate: - m.logger.Info("metrics node config is up to date") + lg.Info("metrics node config is up to date") case node.ConfigStatus_NeedsUpdate: - m.logger.Info("updating metrics node config") + lg.Info("updating metrics node config") if err := m.updateConfig(ctx, syncResp.UpdatedConfig); err != nil { m.conditions.Set(health.CondNodeDriver, health.StatusFailure, err.Error()) return @@ -297,14 +302,16 @@ func (m *MetricsNode) doSync(ctx context.Context) { // requires identityClientMu to be held (either R or W) func (m *MetricsNode) updateConfig(ctx context.Context, config *node.MetricsCapabilityConfig) error { + lg := logger.PluginLoggerFromContext(m.ctx) + id, err := m.identityClient.Whoami(ctx, &emptypb.Empty{}) if err != nil { - m.logger.With(logger.Err(err)).Error("error fetching node id", err) + lg.With(logger.Err(err)).Error("error fetching node id", err) return err } if !m.configMu.TryLock() { - m.logger.Debug("waiting on a previous config update to finish...") + lg.Debug("waiting on a previous config update to finish...") m.configMu.Lock() } defer m.configMu.Unlock() @@ -326,7 +333,7 @@ func (m *MetricsNode) updateConfig(ctx context.Context, config *node.MetricsCapa if err := eg.Error(); err != nil { m.config.Conditions = append(config.Conditions, err.Error()) - m.logger.With(logger.Err(err)).Error("node configuration error") + lg.With(logger.Err(err)).Error("node configuration error") return err } diff --git a/plugins/metrics/pkg/agent/plugin.go b/plugins/metrics/pkg/agent/plugin.go index ad7609790e..6de6cc0ff5 100644 --- a/plugins/metrics/pkg/agent/plugin.go +++ b/plugins/metrics/pkg/agent/plugin.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - "log/slog" - healthpkg "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" httpext "github.com/rancher/opni/pkg/plugins/apis/apiextensions/http" @@ -20,8 +18,7 @@ import ( ) type Plugin struct { - ctx context.Context - logger *slog.Logger + ctx context.Context httpServer *HttpServer ruleStreamer *RuleStreamer @@ -31,16 +28,16 @@ type Plugin struct { } func NewPlugin(ctx context.Context) *Plugin { - lg := logger.NewPluginLogger().WithGroup("metrics") + lg := logger.NewPluginLogger(ctx).WithGroup("metrics") + ctx = logger.WithPluginLogger(ctx, lg) ct := healthpkg.NewDefaultConditionTracker(lg) p := &Plugin{ ctx: ctx, - logger: lg, - httpServer: NewHttpServer(ct, lg), - ruleStreamer: NewRuleStreamer(ct, lg), - node: NewMetricsNode(ct, lg), + httpServer: NewHttpServer(ctx, ct), + ruleStreamer: NewRuleStreamer(ctx, ct), + node: NewMetricsNode(ctx, ct), } for _, name := range drivers.NodeDrivers.List() { @@ -65,7 +62,7 @@ func NewPlugin(ctx context.Context) *Plugin { } func (p *Plugin) ConfigureNode(nodeId string, cfg *node.MetricsCapabilityConfig) error { - lg := p.logger.With("nodeId", nodeId) + lg := logger.PluginLoggerFromContext(p.ctx).With("nodeId", nodeId) lg.Debug("metrics capability config updated") // at this point, we know the config has been updated @@ -90,22 +87,22 @@ func (p *Plugin) ConfigureNode(nodeId string, cfg *node.MetricsCapabilityConfig) switch { case currentlyRunning && shouldRun: - p.logger.Debug("reconfiguring rule sync") + lg.Debug("reconfiguring rule sync") p.stopRuleStreamer() startRuleStreamer() case currentlyRunning && !shouldRun: - p.logger.Debug("stopping rule sync") + lg.Debug("stopping rule sync") p.stopRuleStreamer() p.stopRuleStreamer = nil - p.logger.Debug("disabling http server") + lg.Debug("disabling http server") p.httpServer.SetEnabled(false) case !currentlyRunning && shouldRun: - p.logger.Debug("starting rule sync") + lg.Debug("starting rule sync") startRuleStreamer() - p.logger.Debug("enabling http server") + lg.Debug("enabling http server") p.httpServer.SetEnabled(true) case !currentlyRunning && !shouldRun: - p.logger.Debug("rule sync is disabled") + lg.Debug("rule sync is disabled") } return nil @@ -113,10 +110,11 @@ func (p *Plugin) ConfigureNode(nodeId string, cfg *node.MetricsCapabilityConfig) func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeAgent)) + p := NewPlugin(ctx) scheme.Add(capability.CapabilityBackendPluginID, capability.NewAgentPlugin(p.node)) scheme.Add(health.HealthPluginID, health.NewPlugin(p.node)) - scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(p)) + scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(ctx, p)) scheme.Add(httpext.HTTPAPIExtensionPluginID, httpext.NewPlugin(p.httpServer)) return scheme } diff --git a/plugins/metrics/pkg/agent/rules.go b/plugins/metrics/pkg/agent/rules.go index fe40f5afff..3ddf9cdfeb 100644 --- a/plugins/metrics/pkg/agent/rules.go +++ b/plugins/metrics/pkg/agent/rules.go @@ -7,8 +7,6 @@ import ( "sync" "time" - "log/slog" - "github.com/rancher/opni/pkg/config/v1beta1" "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" @@ -21,15 +19,15 @@ import ( ) type RuleStreamer struct { - logger *slog.Logger + ctx context.Context remoteWriteClientMu sync.Mutex remoteWriteClient remotewrite.RemoteWriteClient conditions health.ConditionTracker } -func NewRuleStreamer(ct health.ConditionTracker, lg *slog.Logger) *RuleStreamer { +func NewRuleStreamer(ctx context.Context, ct health.ConditionTracker) *RuleStreamer { return &RuleStreamer{ - logger: lg, + ctx: ctx, conditions: ct, } } @@ -44,7 +42,7 @@ func (s *RuleStreamer) Run(ctx context.Context, config *v1beta1.RulesSpec, finde s.conditions.Set(node.CondRuleSync, health.StatusPending, "") defer s.conditions.Clear(node.CondRuleSync) - lg := s.logger + lg := logger.PluginLoggerFromContext(s.ctx) updateC, err := s.streamRuleGroupUpdates(ctx, config, finder) if err != nil { return err @@ -124,8 +122,9 @@ func (s *RuleStreamer) streamRuleGroupUpdates( config *v1beta1.RulesSpec, finder notifier.Finder[rules.RuleGroup], ) (<-chan [][]byte, error) { - s.logger.Debug("configuring rule discovery") - s.logger.Debug("rule discovery configured") + lg := logger.PluginLoggerFromContext(s.ctx) + lg.Debug("configuring rule discovery") + lg.Debug("rule discovery configured") searchInterval := time.Minute * 15 if interval := config.GetDiscovery().GetInterval(); interval != "" { duration, err := time.ParseDuration(interval) @@ -135,22 +134,22 @@ func (s *RuleStreamer) streamRuleGroupUpdates( searchInterval = duration } notifier := notifier.NewPeriodicUpdateNotifier(ctx, finder, searchInterval) - s.logger.With( + lg.With( "interval", searchInterval.String(), ).Debug("rule discovery notifier configured") notifierC := notifier.NotifyC(ctx) - s.logger.Debug("starting rule group update notifier") + lg.Debug("starting rule group update notifier") groupYamlDocs := make(chan [][]byte, cap(notifierC)) go func() { defer close(groupYamlDocs) for { ruleGroups, ok := <-notifierC if !ok { - s.logger.Debug("rule discovery channel closed") + lg.Debug("rule discovery channel closed") return } - s.logger.Debug("received updated rule groups from discovery") + lg.Debug("received updated rule groups from discovery") go func() { groupYamlDocs <- s.marshalRuleGroups(ruleGroups) }() @@ -160,11 +159,12 @@ func (s *RuleStreamer) streamRuleGroupUpdates( } func (s *RuleStreamer) marshalRuleGroups(ruleGroups []rules.RuleGroup) [][]byte { + lg := logger.PluginLoggerFromContext(s.ctx) yamlDocs := make([][]byte, 0, len(ruleGroups)) for _, ruleGroup := range ruleGroups { doc, err := yaml.Marshal(ruleGroup) if err != nil { - s.logger.With( + lg.With( logger.Err(err), "group", ruleGroup.Name, ).Error("failed to marshal rule group") diff --git a/plugins/metrics/pkg/agent/runner.go b/plugins/metrics/pkg/agent/runner.go index 4700dd666f..6783312981 100644 --- a/plugins/metrics/pkg/agent/runner.go +++ b/plugins/metrics/pkg/agent/runner.go @@ -136,10 +136,11 @@ type taskRunner struct { backoffPolicy backoff.Policy - logger *slog.Logger + ctx context.Context } -func newTaskRunner(logger *slog.Logger) *taskRunner { +func newTaskRunner(ctx context.Context) *taskRunner { + lg := logger.PluginLoggerFromContext(ctx).WithGroup("task-runner") return &taskRunner{ backoffPolicy: backoff.Exponential( backoff.WithMaxRetries(0), @@ -147,7 +148,7 @@ func newTaskRunner(logger *slog.Logger) *taskRunner { backoff.WithMaxInterval(5*time.Minute), backoff.WithMultiplier(1.1), ), - logger: logger.WithGroup("task-runner"), + ctx: logger.WithPluginLogger(ctx, lg), } } @@ -167,6 +168,7 @@ func (tr *taskRunner) OnTaskPending(_ context.Context, _ task.ActiveTask) error } func (tr *taskRunner) doPush(ctx context.Context, writeRequest *prompb.WriteRequest) error { + lg := logger.PluginLoggerFromContext(tr.ctx) expbackoff := tr.backoffPolicy.Start(ctx) for { @@ -194,7 +196,7 @@ func (tr *taskRunner) doPush(ctx context.Context, writeRequest *prompb.WriteRequ switch { case strings.Contains(err.Error(), "ingestion rate limit"): - tr.logger.With( + lg.With( logger.Err(err), ).Warn("failed to push to remote write, retrying...") default: @@ -316,7 +318,7 @@ type TargetRunner interface { } type taskingTargetRunner struct { - logger *slog.Logger + ctx context.Context runnerMu sync.RWMutex runner *taskRunner @@ -324,10 +326,10 @@ type taskingTargetRunner struct { controller *task.Controller } -func NewTargetRunner(logger *slog.Logger) TargetRunner { +func NewTargetRunner(ctx context.Context) TargetRunner { store := inmemory.NewKeyValueStore[*corev1.TaskStatus](util.ProtoClone) - runner := newTaskRunner(logger) + runner := newTaskRunner(ctx) controller, err := task.NewController(context.Background(), "target-runner", store, runner) if err != nil { @@ -335,13 +337,15 @@ func NewTargetRunner(logger *slog.Logger) TargetRunner { } return &taskingTargetRunner{ - logger: logger, + ctx: ctx, runner: runner, controller: controller, } } func (runner *taskingTargetRunner) Start(target *remoteread.Target, query *remoteread.Query) error { + lg := logger.PluginLoggerFromContext(runner.ctx) + if status, err := runner.controller.TaskStatus(target.Meta.Name); err != nil { if !strings.Contains(err.Error(), "not found") { return fmt.Errorf("error checking for target status: %s", err) @@ -362,12 +366,14 @@ func (runner *taskingTargetRunner) Start(target *remoteread.Target, query *remot return fmt.Errorf("could not run target: %w", err) } - runner.logger.Info(fmt.Sprintf("started target '%s'", target.Meta.Name)) + lg.Info(fmt.Sprintf("started target '%s'", target.Meta.Name)) return nil } func (runner *taskingTargetRunner) Stop(name string) error { + lg := logger.PluginLoggerFromContext(runner.ctx) + status, err := runner.controller.TaskStatus(name) if err != nil { return fmt.Errorf("target not found") @@ -380,7 +386,7 @@ func (runner *taskingTargetRunner) Stop(name string) error { runner.controller.CancelTask(name) - runner.logger.Info(fmt.Sprintf("stopped target '%s'", name)) + lg.Info(fmt.Sprintf("stopped target '%s'", name)) return nil } diff --git a/plugins/metrics/pkg/backend/capability.go b/plugins/metrics/pkg/backend/capability.go index f9ee4672b9..90543bd2ff 100644 --- a/plugins/metrics/pkg/backend/capability.go +++ b/plugins/metrics/pkg/backend/capability.go @@ -106,6 +106,7 @@ func (m *MetricsBackend) Status(_ context.Context, req *corev1.Reference) (*v1.N } func (m *MetricsBackend) Uninstall(ctx context.Context, req *v1.UninstallRequest) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() cluster, err := m.MgmtClient.GetCluster(ctx, req.Cluster) @@ -166,7 +167,7 @@ func (m *MetricsBackend) Uninstall(ctx context.Context, req *v1.UninstallRequest return nil, fmt.Errorf("failed to update cluster metadata: %v", err) } if err := m.requestNodeSync(ctx, req.Cluster); err != nil { - m.Logger.With( + lg.With( logger.Err(err), "agent", req.Cluster, ).Warn("sync request failed; agent may not be updated immediately") diff --git a/plugins/metrics/pkg/backend/metrics.go b/plugins/metrics/pkg/backend/metrics.go index 5c3bacca25..e4ffd913e2 100644 --- a/plugins/metrics/pkg/backend/metrics.go +++ b/plugins/metrics/pkg/backend/metrics.go @@ -13,8 +13,6 @@ import ( "github.com/rancher/opni/plugins/metrics/apis/node" "github.com/rancher/opni/plugins/metrics/apis/remoteread" - "log/slog" - streamext "github.com/rancher/opni/pkg/plugins/apis/apiextensions/stream" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -63,7 +61,7 @@ type MetricsAgentClientSet interface { } type MetricsBackendConfig struct { - Logger *slog.Logger `validate:"required"` + Context context.Context `validate:"required"` StorageBackend storage.Backend `validate:"required"` MgmtClient managementv1.ManagementClient `validate:"required"` UninstallController *task.Controller `validate:"required"` @@ -100,6 +98,7 @@ func (m *MetricsBackend) requestNodeSync(ctx context.Context, target *corev1.Ref } func (m *MetricsBackend) broadcastNodeSync(ctx context.Context) { + lg := logger.PluginLoggerFromContext(m.Context) // keep any metadata in the context, but don't propagate cancellation ctx = context.WithoutCancel(ctx) var errs []error @@ -118,7 +117,7 @@ func (m *MetricsBackend) broadcastNodeSync(ctx context.Context) { CapabilityNames: []string{wellknown.CapabilityMetrics}, }) if len(errs) > 0 { - m.Logger.With( + lg.With( logger.Err(errors.Join(errs...)), ).Warn("one or more agents failed to sync; they may not be updated immediately") } @@ -126,6 +125,7 @@ func (m *MetricsBackend) broadcastNodeSync(ctx context.Context) { // Implements node.NodeMetricsCapabilityServer func (m *MetricsBackend) Sync(ctx context.Context, req *node.SyncRequest) (*node.SyncResponse, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() id := cluster.StreamAuthorizedID(ctx) @@ -147,7 +147,7 @@ func (m *MetricsBackend) Sync(ctx context.Context, req *node.SyncRequest) (*node // auto-disable if cortex is not installed if err := m.ClusterDriver.ShouldDisableNode(cluster.Reference()); err != nil { reason := status.Convert(err).Message() - m.Logger.With( + lg.With( "reason", reason, ).Info("disabling metrics capability for node") enabled = false @@ -167,7 +167,7 @@ func (m *MetricsBackend) Sync(ctx context.Context, req *node.SyncRequest) (*node status.Enabled = req.GetCurrentConfig().GetEnabled() status.Conditions = req.GetCurrentConfig().GetConditions() status.LastSync = timestamppb.Now() - m.Logger.With( + lg.With( "id", id, "time", status.LastSync.AsTime(), ).Debug("synced node") @@ -204,11 +204,12 @@ func init() { } func (m *MetricsBackend) getDefaultNodeSpec(ctx context.Context) (*node.MetricsCapabilitySpec, error) { + lg := logger.PluginLoggerFromContext(m.Context) nodeSpec, err := m.KV.DefaultCapabilitySpec.Get(ctx) if status.Code(err) == codes.NotFound { nodeSpec = FallbackDefaultNodeSpec.Load() } else if err != nil { - m.Logger.With(logger.Err(err)).Error("failed to get default capability spec") + lg.With(logger.Err(err)).Error("failed to get default capability spec") return nil, status.Errorf(codes.Unavailable, "failed to get default capability spec: %v", err) } grpc.SetTrailer(ctx, node.DefaultConfigMetadata()) @@ -216,11 +217,12 @@ func (m *MetricsBackend) getDefaultNodeSpec(ctx context.Context) (*node.MetricsC } func (m *MetricsBackend) getNodeSpecOrDefault(ctx context.Context, id string) (*node.MetricsCapabilitySpec, error) { + lg := logger.PluginLoggerFromContext(m.Context) nodeSpec, err := m.KV.NodeCapabilitySpecs.Get(ctx, id) if status.Code(err) == codes.NotFound { return m.getDefaultNodeSpec(ctx) } else if err != nil { - m.Logger.With(logger.Err(err)).Error("failed to get node capability spec") + lg.With(logger.Err(err)).Error("failed to get node capability spec") return nil, status.Errorf(codes.Unavailable, "failed to get node capability spec: %v", err) } // handle the case where an older config is now invalid: reset to factory default diff --git a/plugins/metrics/pkg/backend/remoteread.go b/plugins/metrics/pkg/backend/remoteread.go index 780eb3d5e0..b6e49cc1da 100644 --- a/plugins/metrics/pkg/backend/remoteread.go +++ b/plugins/metrics/pkg/backend/remoteread.go @@ -32,6 +32,7 @@ func getIdFromTargetMeta(meta *remoteread.TargetMeta) string { } func (m *MetricsBackend) AddTarget(_ context.Context, request *remoteread.TargetAddRequest) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() m.remoteReadTargetMu.Lock() @@ -53,7 +54,7 @@ func (m *MetricsBackend) AddTarget(_ context.Context, request *remoteread.Target m.remoteReadTargets[targetId] = request.Target - m.Logger.With( + lg.With( "cluster", request.Target.Meta.ClusterId, "target", request.Target.Meta.Name, "capability", wellknown.CapabilityMetrics, @@ -63,6 +64,7 @@ func (m *MetricsBackend) AddTarget(_ context.Context, request *remoteread.Target } func (m *MetricsBackend) EditTarget(ctx context.Context, request *remoteread.TargetEditRequest) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() status, err := m.GetTargetStatus(ctx, &remoteread.TargetStatusRequest{ @@ -104,7 +106,7 @@ func (m *MetricsBackend) EditTarget(ctx context.Context, request *remoteread.Tar target.Spec.Endpoint = diff.Endpoint } - m.Logger.With( + lg.With( "cluster", request.Meta.ClusterId, "target", request.Meta.Name, "capability", wellknown.CapabilityMetrics, @@ -114,6 +116,7 @@ func (m *MetricsBackend) EditTarget(ctx context.Context, request *remoteread.Tar } func (m *MetricsBackend) RemoveTarget(ctx context.Context, request *remoteread.TargetRemoveRequest) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() status, err := m.GetTargetStatus(ctx, &remoteread.TargetStatusRequest{ @@ -139,7 +142,7 @@ func (m *MetricsBackend) RemoveTarget(ctx context.Context, request *remoteread.T delete(m.remoteReadTargets, targetId) - m.Logger.With( + lg.With( "cluster", request.Meta.ClusterId, "target", request.Meta.Name, "capability", wellknown.CapabilityMetrics, @@ -149,6 +152,7 @@ func (m *MetricsBackend) RemoveTarget(ctx context.Context, request *remoteread.T } func (m *MetricsBackend) ListTargets(ctx context.Context, request *remoteread.TargetListRequest) (*remoteread.TargetList, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() m.remoteReadTargetMu.RLock() @@ -164,7 +168,7 @@ func (m *MetricsBackend) ListTargets(ctx context.Context, request *remoteread.Ta eg.Go(func() error { newStatus, err := m.GetTargetStatus(ctx, &remoteread.TargetStatusRequest{Meta: target.Meta}) if err != nil { - m.Logger.Info(fmt.Sprintf("could not get newStatus for target '%s/%s': %s", target.Meta.ClusterId, target.Meta.Name, err)) + lg.Info(fmt.Sprintf("could not get newStatus for target '%s/%s': %s", target.Meta.ClusterId, target.Meta.Name, err)) newStatus.State = remoteread.TargetState_Unknown } @@ -180,7 +184,7 @@ func (m *MetricsBackend) ListTargets(ctx context.Context, request *remoteread.Ta } if err := eg.Wait(); err != nil { - m.Logger.Error(fmt.Sprintf("error waiting for status to update: %s", err)) + lg.Error(fmt.Sprintf("error waiting for status to update: %s", err)) } list := &remoteread.TargetList{Targets: inner} @@ -189,6 +193,7 @@ func (m *MetricsBackend) ListTargets(ctx context.Context, request *remoteread.Ta } func (m *MetricsBackend) GetTargetStatus(ctx context.Context, request *remoteread.TargetStatusRequest) (*remoteread.TargetStatus, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() targetId := getIdFromTargetMeta(request.Meta) @@ -208,7 +213,7 @@ func (m *MetricsBackend) GetTargetStatus(ctx context.Context, request *remoterea }, nil } - m.Logger.With( + lg.With( "cluster", request.Meta.ClusterId, "capability", wellknown.CapabilityMetrics, "target", request.Meta.Name, @@ -222,6 +227,7 @@ func (m *MetricsBackend) GetTargetStatus(ctx context.Context, request *remoterea } func (m *MetricsBackend) Start(ctx context.Context, request *remoteread.StartReadRequest) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() if m.Delegate == nil { @@ -245,7 +251,7 @@ func (m *MetricsBackend) Start(ctx context.Context, request *remoteread.StartRea _, err := m.Delegate.WithTarget(&corev1.Reference{Id: request.Target.Meta.ClusterId}).Start(ctx, request) if err != nil { - m.Logger.With( + lg.With( "cluster", request.Target.Meta.ClusterId, "capability", wellknown.CapabilityMetrics, "target", request.Target.Meta.Name, @@ -255,7 +261,7 @@ func (m *MetricsBackend) Start(ctx context.Context, request *remoteread.StartRea return nil, err } - m.Logger.With( + lg.With( "cluster", request.Target.Meta.ClusterId, "capability", wellknown.CapabilityMetrics, "target", request.Target.Meta.Name, @@ -265,6 +271,7 @@ func (m *MetricsBackend) Start(ctx context.Context, request *remoteread.StartRea } func (m *MetricsBackend) Stop(ctx context.Context, request *remoteread.StopReadRequest) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() if m.Delegate == nil { @@ -274,7 +281,7 @@ func (m *MetricsBackend) Stop(ctx context.Context, request *remoteread.StopReadR _, err := m.Delegate.WithTarget(&corev1.Reference{Id: request.Meta.ClusterId}).Stop(ctx, request) if err != nil { - m.Logger.With( + lg.With( "cluster", request.Meta.ClusterId, "capability", wellknown.CapabilityMetrics, "target", request.Meta.Name, @@ -284,7 +291,7 @@ func (m *MetricsBackend) Stop(ctx context.Context, request *remoteread.StopReadR return nil, err } - m.Logger.With( + lg.With( "cluster", request.Meta.Name, "capability", wellknown.CapabilityMetrics, "target", request.Meta.Name, @@ -294,6 +301,7 @@ func (m *MetricsBackend) Stop(ctx context.Context, request *remoteread.StopReadR } func (m *MetricsBackend) Discover(ctx context.Context, request *remoteread.DiscoveryRequest) (*remoteread.DiscoveryResponse, error) { + lg := logger.PluginLoggerFromContext(m.Context) m.WaitForInit() response, err := m.Delegate.WithBroadcastSelector(&corev1.ClusterSelector{ ClusterIDs: request.ClusterIds, @@ -305,7 +313,7 @@ func (m *MetricsBackend) Discover(ctx context.Context, request *remoteread.Disco discoverResponse := &remoteread.DiscoveryResponse{} if err := proto.Unmarshal(response.Reply.GetResponse().Response, discoverResponse); err != nil { - m.Logger.Error(fmt.Sprintf("failed to unmarshal for aggregated DiscoveryResponse: %s", err)) + lg.Error(fmt.Sprintf("failed to unmarshal for aggregated DiscoveryResponse: %s", err)) } // inject the cluster id gateway-side @@ -321,7 +329,7 @@ func (m *MetricsBackend) Discover(ctx context.Context, request *remoteread.Disco }).Discover(ctx, request) if err != nil { - m.Logger.With( + lg.With( "capability", wellknown.CapabilityMetrics, logger.Err(err), ).Error("failed to run import discovery") diff --git a/plugins/metrics/pkg/cortex/admin.go b/plugins/metrics/pkg/cortex/admin.go index 672dd67e58..74ea88fad0 100644 --- a/plugins/metrics/pkg/cortex/admin.go +++ b/plugins/metrics/pkg/cortex/admin.go @@ -15,8 +15,6 @@ import ( "sync" "time" - "log/slog" - "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/distributor" "github.com/rancher/opni/plugins/metrics/apis/cortexadmin" @@ -44,7 +42,7 @@ type CortexAdminServer struct { type CortexAdminServerConfig struct { CortexClientSet ClientSet `validate:"required"` Config *v1beta1.GatewayConfigSpec `validate:"required"` - Logger *slog.Logger `validate:"required"` + Context context.Context `validate:"required"` } func (p *CortexAdminServer) Initialize(conf CortexAdminServerConfig) { @@ -59,6 +57,8 @@ func (p *CortexAdminServer) Initialize(conf CortexAdminServerConfig) { var _ cortexadmin.CortexAdminServer = (*CortexAdminServer)(nil) func (p *CortexAdminServer) AllUserStats(ctx context.Context, _ *emptypb.Empty) (*cortexadmin.UserIDStatsList, error) { + lg := logger.PluginLoggerFromContext(p.Context) + if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } @@ -76,7 +76,7 @@ func (p *CortexAdminServer) AllUserStats(ctx context.Context, _ *emptypb.Empty) defer func(Body io.ReadCloser) { err := Body.Close() if err != nil { - p.Logger.With( + lg.With( "err", err, ).Error("failed to close response body") } @@ -147,7 +147,7 @@ func (p *CortexAdminServer) WriteMetrics(ctx context.Context, in *cortexadmin.Wr if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "clusterID", in.ClusterID, "seriesCount", len(in.Timeseries), ) @@ -162,7 +162,7 @@ func (p *CortexAdminServer) WriteMetrics(ctx context.Context, in *cortexadmin.Wr lg.Debug("writing metrics to cortex") _, err := p.CortexClientSet.Distributor().Push(outgoingContext(ctx, in), cortexReq) if err != nil { - p.Logger.With(logger.Err(err)).Error("failed to write metrics") + lg.With(logger.Err(err)).Error("failed to write metrics") return nil, err } return &cortexadmin.WriteResponse{}, nil @@ -185,7 +185,7 @@ func (p *CortexAdminServer) Query( if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "query", in.Query, ) lg.Debug("handling query") @@ -241,7 +241,7 @@ func (p *CortexAdminServer) QueryRange( if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "query", in.Query, ) client := p.CortexClientSet @@ -324,7 +324,7 @@ func (p *CortexAdminServer) GetRule(ctx context.Context, if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "group name", in.GroupName, ) @@ -377,7 +377,7 @@ func (p *CortexAdminServer) ListRules(ctx context.Context, req *cortexadmin.List if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "cluster id", req.ClusterId, ) if err := req.Validate(); err != nil { @@ -436,7 +436,7 @@ func (p *CortexAdminServer) LoadRules(ctx context.Context, if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "cluster", in.ClusterId, ) if err := in.Validate(); err != nil { @@ -476,7 +476,7 @@ func (p *CortexAdminServer) DeleteRule( if !p.Initialized() { return nil, util.StatusError(codes.Unavailable) } - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "group", in.GroupName, "namespace", in.Namespace, "cluster", in.ClusterId, @@ -552,7 +552,7 @@ func (p *CortexAdminServer) GetSeriesMetrics(ctx context.Context, request *corte } func (p *CortexAdminServer) ExtractRawSeries(ctx context.Context, request *cortexadmin.MatcherRequest) (*cortexadmin.QueryResponse, error) { - lg := p.Logger.With("series matcher", request.MatchExpr) + lg := logger.PluginLoggerFromContext(p.Context).With("series matcher", request.MatchExpr) lg.Debug("fetching raw series") return p.Query(ctx, &cortexadmin.QueryRequest{ Tenants: []string{request.Tenant}, @@ -632,7 +632,7 @@ func (p *CortexAdminServer) FlushBlocks( body, _ := io.ReadAll(resp.Body) err = resp.Body.Close() if err != nil { - p.Logger.Error("failed to close response body") + logger.PluginLoggerFromContext(p.Context).Error("failed to close response body") } if err := json.NewDecoder(bytes.NewReader(body)).Decode(&ring); err != nil { return nil, err @@ -641,7 +641,7 @@ func (p *CortexAdminServer) FlushBlocks( // flush all active ingesters wg := errgroup.Group{} for _, ingester := range ring.Ingesters { - lg := p.Logger.With( + lg := logger.PluginLoggerFromContext(p.Context).With( "id", ingester.ID, ) if ingester.State != "ACTIVE" { @@ -800,13 +800,13 @@ func (p *CortexAdminServer) proxyCortexToPrometheus( req.Header.Set(orgIDCodec.Key(), orgIDCodec.Encode([]string{tenant})) resp, err := p.CortexClientSet.HTTP().Do(req) if err != nil { - p.Logger.With( + logger.PluginLoggerFromContext(p.Context).With( "request", url, ).Error("failed with %v", err) return nil, err } if resp.StatusCode != http.StatusOK { - p.Logger.With( + logger.PluginLoggerFromContext(p.Context).With( "request", url, ).Error(fmt.Sprintf("request failed with %s", resp.Status)) return nil, fmt.Errorf("request failed with: %s", resp.Status) diff --git a/plugins/metrics/pkg/cortex/api.go b/plugins/metrics/pkg/cortex/api.go index c0d4d0f96f..cf6cfc0671 100644 --- a/plugins/metrics/pkg/cortex/api.go +++ b/plugins/metrics/pkg/cortex/api.go @@ -7,7 +7,6 @@ import ( "github.com/gin-contrib/pprof" "github.com/gin-gonic/gin" - "log/slog" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/auth" @@ -43,7 +42,6 @@ type HttpApiServerConfig struct { CortexClientSet ClientSet `validate:"required"` Config *v1beta1.GatewayConfigSpec `validate:"required"` CortexTLSConfig *tls.Config `validate:"required"` - Logger *slog.Logger `validate:"required"` StorageBackend storage.Backend `validate:"required"` AuthMiddlewares map[string]auth.Middleware `validate:"required"` } @@ -61,25 +59,25 @@ var _ httpext.HTTPAPIExtension = (*HttpApiServer)(nil) func (p *HttpApiServer) ConfigureRoutes(router *gin.Engine) { p.WaitForInit() + lg := logger.NewPluginLogger(p.PluginContext) + lg.Info("configuring http api server") - p.Logger.Info("configuring http api server") - - router.Use(logger.GinLogger(p.Logger), gin.Recovery()) + router.Use(logger.GinLogger(lg), gin.Recovery()) rbacProvider := storage.NewRBACProvider(p.StorageBackend) rbacMiddleware := rbac.NewMiddleware(rbacProvider, orgIDCodec) authMiddleware, ok := p.AuthMiddlewares[p.Config.AuthProvider] if !ok { - p.Logger.With( + lg.With( "name", p.Config.AuthProvider, ).Error("auth provider not found") os.Exit(1) } fwds := &forwarders{ - QueryFrontend: fwd.To(p.Config.Cortex.QueryFrontend.HTTPAddress, fwd.WithLogger(p.Logger), fwd.WithTLS(p.CortexTLSConfig), fwd.WithName("query-frontend")), - Alertmanager: fwd.To(p.Config.Cortex.Alertmanager.HTTPAddress, fwd.WithLogger(p.Logger), fwd.WithTLS(p.CortexTLSConfig), fwd.WithName("alertmanager")), - Ruler: fwd.To(p.Config.Cortex.Ruler.HTTPAddress, fwd.WithLogger(p.Logger), fwd.WithTLS(p.CortexTLSConfig), fwd.WithName("ruler")), + QueryFrontend: fwd.To(p.Config.Cortex.QueryFrontend.HTTPAddress, fwd.WithLogger(lg), fwd.WithTLS(p.CortexTLSConfig), fwd.WithName("query-frontend")), + Alertmanager: fwd.To(p.Config.Cortex.Alertmanager.HTTPAddress, fwd.WithLogger(lg), fwd.WithTLS(p.CortexTLSConfig), fwd.WithName("alertmanager")), + Ruler: fwd.To(p.Config.Cortex.Ruler.HTTPAddress, fwd.WithLogger(lg), fwd.WithTLS(p.CortexTLSConfig), fwd.WithName("ruler")), } mws := &middlewares{ @@ -97,11 +95,13 @@ func (p *HttpApiServer) ConfigureRoutes(router *gin.Engine) { } func (p *HttpApiServer) configureAlertmanager(router *gin.Engine, f *forwarders, m *middlewares) { + lg := logger.NewPluginLogger(p.PluginContext) + orgIdLimiter := func(c *gin.Context) { ids := rbac.AuthorizedClusterIDs(c) if len(ids) > 1 { user, _ := rbac.AuthorizedUserID(c) - p.Logger.With( + lg.With( "request", c.FullPath(), "user", user, ).Debug("multiple org ids found, limiting to first") diff --git a/plugins/metrics/pkg/cortex/remotewrite.go b/plugins/metrics/pkg/cortex/remotewrite.go index d54cd67c8c..51ba8c57c8 100644 --- a/plugins/metrics/pkg/cortex/remotewrite.go +++ b/plugins/metrics/pkg/cortex/remotewrite.go @@ -11,6 +11,7 @@ import ( "github.com/rancher/opni/pkg/auth/cluster" "github.com/rancher/opni/pkg/auth/session" "github.com/rancher/opni/pkg/config/v1beta1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/metrics" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/plugins/metrics/apis/remotewrite" @@ -23,7 +24,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" - "log/slog" ) type RemoteWriteForwarder struct { @@ -39,7 +39,7 @@ var _ remotewrite.RemoteWriteServer = (*RemoteWriteForwarder)(nil) type RemoteWriteForwarderConfig struct { CortexClientSet ClientSet `validate:"required"` Config *v1beta1.GatewayConfigSpec `validate:"required"` - Logger *slog.Logger `validate:"required"` + Context context.Context `validate:"required"` } func (f *RemoteWriteForwarder) Initialize(conf RemoteWriteForwarderConfig) { @@ -139,6 +139,8 @@ func (f *RemoteWriteForwarder) Push(ctx context.Context, writeReq *cortexpb.Writ } func (f *RemoteWriteForwarder) SyncRules(ctx context.Context, payload *remotewrite.Payload) (_ *emptypb.Empty, syncErr error) { + lg := logger.PluginLoggerFromContext(f.Context) + if !f.Initialized() { return nil, util.StatusError(codes.Unavailable) } @@ -150,7 +152,7 @@ func (f *RemoteWriteForwarder) SyncRules(ctx context.Context, payload *remotewri defer func() { if syncErr != nil { - f.Logger.With( + lg.With( "err", syncErr, "clusterId", clusterId, ).Error("error syncing rules to cortex") diff --git a/plugins/metrics/pkg/gateway/certs.go b/plugins/metrics/pkg/gateway/certs.go index 6be49977c6..00cfcc8e1e 100644 --- a/plugins/metrics/pkg/gateway/certs.go +++ b/plugins/metrics/pkg/gateway/certs.go @@ -7,14 +7,18 @@ import ( "fmt" "os" "time" + + "github.com/rancher/opni/pkg/logger" ) func (p *Plugin) loadCortexCerts() *tls.Config { + lg := logger.PluginLoggerFromContext(p.ctx) + ctx, ca := context.WithTimeout(context.Background(), 10*time.Second) defer ca() config, err := p.config.GetContext(ctx) if err != nil { - p.logger.Error(fmt.Sprintf("plugin startup failed: config was not loaded: %v", err)) + lg.Error(fmt.Sprintf("plugin startup failed: config was not loaded: %v", err)) os.Exit(1) } cortexServerCA := config.Spec.Cortex.Certs.ServerCA @@ -24,27 +28,27 @@ func (p *Plugin) loadCortexCerts() *tls.Config { clientCert, err := tls.LoadX509KeyPair(cortexClientCert, cortexClientKey) if err != nil { - p.logger.Error(fmt.Sprintf("failed to load cortex client keypair: %v", err)) + lg.Error(fmt.Sprintf("failed to load cortex client keypair: %v", err)) os.Exit(1) } serverCAPool := x509.NewCertPool() serverCAData, err := os.ReadFile(cortexServerCA) if err != nil { - p.logger.Error(fmt.Sprintf("failed to read cortex server CA: %v", err)) + lg.Error(fmt.Sprintf("failed to read cortex server CA: %v", err)) os.Exit(1) } if ok := serverCAPool.AppendCertsFromPEM(serverCAData); !ok { - p.logger.Error("failed to load cortex server CA") + lg.Error("failed to load cortex server CA") os.Exit(1) } clientCAPool := x509.NewCertPool() clientCAData, err := os.ReadFile(cortexClientCA) if err != nil { - p.logger.Error(fmt.Sprintf("failed to read cortex client CA: %v", err)) + lg.Error(fmt.Sprintf("failed to read cortex client CA: %v", err)) os.Exit(1) } if ok := clientCAPool.AppendCertsFromPEM(clientCAData); !ok { - p.logger.Error("failed to load cortex client CA") + lg.Error("failed to load cortex client CA") os.Exit(1) } return &tls.Config{ diff --git a/plugins/metrics/pkg/gateway/plugin.go b/plugins/metrics/pkg/gateway/plugin.go index 206de5b0a0..5ec5a4ce8d 100644 --- a/plugins/metrics/pkg/gateway/plugin.go +++ b/plugins/metrics/pkg/gateway/plugin.go @@ -4,8 +4,6 @@ import ( "context" "crypto/tls" - "log/slog" - managementv1 "github.com/rancher/opni/pkg/apis/management/v1" "github.com/rancher/opni/pkg/auth" "github.com/rancher/opni/pkg/config/v1beta1" @@ -38,8 +36,7 @@ type Plugin struct { system.UnimplementedSystemPluginClient collector.CollectorServer - ctx context.Context - logger *slog.Logger + ctx context.Context cortexAdmin cortex.CortexAdminServer cortexHttp cortex.HttpApiServer @@ -69,10 +66,17 @@ func NewPlugin(ctx context.Context) *Plugin { cortex.RegisterMeterProvider(mp) collector := collector.NewCollectorServer(cortexReader) + + lg := logger.NewPluginLogger(ctx).WithGroup("metrics") + adminLg := lg.WithGroup("cortex-admin") + rwLg := lg.WithGroup("cortex-rw") + metricsBackendLg := lg.WithGroup("metrics-backend") + httpLg := lg.WithGroup("cortex-http") + ctx = logger.WithPluginLogger(ctx, lg) + p := &Plugin{ CollectorServer: collector, ctx: ctx, - logger: logger.NewPluginLogger().WithGroup("metrics"), config: future.New[*v1beta1.GatewayConfig](), authMw: future.New[map[string]auth.Middleware](), @@ -93,7 +97,7 @@ func NewPlugin(ctx context.Context) *Plugin { p.cortexAdmin.Initialize(cortex.CortexAdminServerConfig{ CortexClientSet: cortexClientSet, Config: &config.Spec, - Logger: p.logger.WithGroup("cortex-admin"), + Context: logger.WithPluginLogger(ctx, adminLg), }) }) @@ -102,7 +106,7 @@ func NewPlugin(ctx context.Context) *Plugin { p.cortexRemoteWrite.Initialize(cortex.RemoteWriteForwarderConfig{ CortexClientSet: cortexClientSet, Config: &config.Spec, - Logger: p.logger.WithGroup("cortex-rw"), + Context: logger.WithPluginLogger(ctx, rwLg), }) }) @@ -120,11 +124,11 @@ func NewPlugin(ctx context.Context) *Plugin { ) { driverName := config.Spec.Cortex.Management.ClusterDriver if driverName == "" { - p.logger.Warn("no cluster driver configured") + lg.Warn("no cluster driver configured") } builder, ok := drivers.ClusterDrivers.Get(driverName) if !ok { - p.logger.With( + lg.With( "driver", driverName, ).Error("unknown cluster driver, using fallback noop driver") builder, ok = drivers.ClusterDrivers.Get("noop") @@ -136,14 +140,13 @@ func NewPlugin(ctx context.Context) *Plugin { driverutil.NewOption("defaultConfigStore", backendKvClients.DefaultClusterConfigurationSpec), ) if err != nil { - p.logger.With( + lg.With( "driver", driverName, logger.Err(err), ).Error("failed to initialize cluster driver") panic("failed to initialize cluster driver") - return } - p.logger.With( + lg.With( "driver", driverName, ).Info("initialized cluster driver") p.clusterDriver.Set(driver) @@ -158,7 +161,7 @@ func NewPlugin(ctx context.Context) *Plugin { backendKvClients *backend.KVClients, ) { p.metrics.Initialize(backend.MetricsBackendConfig{ - Logger: p.logger.WithGroup("metrics-backend"), + Context: logger.WithPluginLogger(p.ctx, metricsBackendLg), StorageBackend: storageBackend, MgmtClient: mgmtClient, UninstallController: uninstallController, @@ -178,12 +181,11 @@ func NewPlugin(ctx context.Context) *Plugin { authMiddlewares map[string]auth.Middleware, ) { p.cortexHttp.Initialize(cortex.HttpApiServerConfig{ - PluginContext: p.ctx, + PluginContext: logger.WithPluginLogger(p.ctx, httpLg), ManagementClient: mgmtApi, CortexClientSet: cortexClientSet, Config: &config.Spec, CortexTLSConfig: tlsConfig, - Logger: p.logger.WithGroup("cortex-http"), StorageBackend: storageBackend, AuthMiddlewares: authMiddlewares, }) @@ -193,12 +195,13 @@ func NewPlugin(ctx context.Context) *Plugin { func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeGateway)) + p := NewPlugin(ctx) scheme.Add(system.SystemPluginID, system.NewPlugin(p)) scheme.Add(httpext.HTTPAPIExtensionPluginID, httpext.NewPlugin(&p.cortexHttp)) streamMetricReader := metric.NewManualReader() p.CollectorServer.AppendReader(streamMetricReader) - scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(p, + scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(ctx, p, streamext.WithMetrics(streamext.GatewayStreamMetricsConfig{ Reader: streamMetricReader, LabelsForStream: p.labelsForStreamMetrics, diff --git a/plugins/metrics/pkg/gateway/system.go b/plugins/metrics/pkg/gateway/system.go index 7f55f26e81..31aededb72 100644 --- a/plugins/metrics/pkg/gateway/system.go +++ b/plugins/metrics/pkg/gateway/system.go @@ -25,17 +25,18 @@ import ( ) func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { + lg := logger.PluginLoggerFromContext(p.ctx) p.mgmtClient.Set(client) cfg, err := client.GetConfig(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("failed to get config") os.Exit(1) } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("failed to load config") os.Exit(1) @@ -44,7 +45,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { objectList.Visit(func(config *v1beta1.GatewayConfig) { backend, err := machinery.ConfigureStorageBackend(p.ctx, &config.Spec.Storage) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("failed to configure storage backend") os.Exit(1) @@ -55,7 +56,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { p.cortexTlsConfig.Set(tlsConfig) clientset, err := cortex.NewClientSet(p.ctx, &config.Spec.Cortex, tlsConfig) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("failed to configure cortex clientset") os.Exit(1) @@ -68,9 +69,10 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { } func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { + lg := logger.PluginLoggerFromContext(p.ctx) ctrl, err := task.NewController(p.ctx, "uninstall", system.NewKVStoreClient[*corev1.TaskStatus](client), &p.uninstallRunner) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("failed to create task controller") os.Exit(1) diff --git a/plugins/slo/pkg/slo/api.go b/plugins/slo/pkg/slo/api.go index 915c3bc675..cee2de1320 100644 --- a/plugins/slo/pkg/slo/api.go +++ b/plugins/slo/pkg/slo/api.go @@ -9,6 +9,7 @@ import ( corev1 "github.com/rancher/opni/pkg/apis/core/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/slo/shared" "github.com/rancher/opni/pkg/storage" "github.com/rancher/opni/pkg/validation" @@ -127,7 +128,7 @@ func (p *Plugin) UpdateSLO(ctx context.Context, req *sloapi.SLOData) (*emptypb.E } func (p *Plugin) DeleteSLO(ctx context.Context, req *corev1.Reference) (*emptypb.Empty, error) { - lg := p.logger + lg := logger.PluginLoggerFromContext(p.ctx) existing, err := p.storage.Get().SLOs.Get(ctx, path.Join("/slos", req.Id)) if err != nil { lg.With("delete slo", req.Id).Error("failed to get slo to delete in K,V store") diff --git a/plugins/slo/pkg/slo/cortex_info.go b/plugins/slo/pkg/slo/cortex_info.go index ae31315c59..7c092276d3 100644 --- a/plugins/slo/pkg/slo/cortex_info.go +++ b/plugins/slo/pkg/slo/cortex_info.go @@ -3,7 +3,6 @@ package slo import ( "context" "fmt" - "log/slog" "time" "emperror.dev/errors" @@ -11,6 +10,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/metrics/compat" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -22,7 +22,9 @@ import ( var instantMaskDisabled = true func createGrafanaSLOMask(ctx context.Context, p *Plugin, clusterId string, ruleId string) error { - p.logger.With("sloId", ruleId, "clusterId", clusterId).Debug("creating grafana mask") + lg := logger.PluginLoggerFromContext(p.ctx) + + lg.With("sloId", ruleId, "clusterId", clusterId).Debug("creating grafana mask") if !instantMaskDisabled { _, err := p.adminClient.Get().WriteMetrics(ctx, &cortexadmin.WriteRequest{ ClusterID: clusterId, @@ -49,17 +51,16 @@ func createGrafanaSLOMask(ctx context.Context, p *Plugin, clusterId string, rule func tryApplyThenDeleteCortexRules( ctx context.Context, p *Plugin, - lg *slog.Logger, clusterId string, ruleId *string, toApply []rulefmt.RuleGroup, ) error { + lg := logger.PluginLoggerFromContext(ctx) var errArr []error for _, rules := range toApply { err := applyCortexSLORules( ctx, p, - lg, clusterId, rules, ) @@ -72,7 +73,6 @@ func tryApplyThenDeleteCortexRules( err := deleteCortexSLORules( ctx, p, - lg, clusterId, rules.Name, ) @@ -99,10 +99,11 @@ func tryApplyThenDeleteCortexRules( func applyCortexSLORules( ctx context.Context, p *Plugin, - lg *slog.Logger, clusterId string, ruleSpec rulefmt.RuleGroup, ) error { + lg := logger.PluginLoggerFromContext(ctx) + out, err := yaml.Marshal(ruleSpec) if err != nil { return err @@ -125,7 +126,6 @@ func applyCortexSLORules( func deleteCortexSLORules( ctx context.Context, p *Plugin, - _ *slog.Logger, clusterId string, groupName string, ) error { diff --git a/plugins/slo/pkg/slo/filters.go b/plugins/slo/pkg/slo/filters.go index 2b0a038148..3b41624bde 100644 --- a/plugins/slo/pkg/slo/filters.go +++ b/plugins/slo/pkg/slo/filters.go @@ -1,17 +1,19 @@ package slo import ( + "context" "embed" "fmt" "io/fs" "path/filepath" "regexp" + "log/slog" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/plugins/metrics/apis/cortexadmin" sloapi "github.com/rancher/opni/plugins/slo/apis/slo" "gopkg.in/yaml.v3" - "log/slog" ) //go:embed metricgroups/*.yaml @@ -22,7 +24,7 @@ var ServiceGroups embed.FS // map of directory names to their embed.FS var EnabledFilters = map[string]embed.FS{"metricgroups": MetricGroups, "servicegroups": ServiceGroups} -var filters = constructFilters(logger.NewPluginLogger().WithGroup("slo")) +var filters = constructFilters(logger.NewPluginLogger(context.Background()).WithGroup("slo")) // Regexp adds unmarshalling from json for regexp.Regexp type Regexp struct { diff --git a/plugins/slo/pkg/slo/filters_test.go b/plugins/slo/pkg/slo/filters_test.go index 86cf60a533..4f8fd37b9e 100644 --- a/plugins/slo/pkg/slo/filters_test.go +++ b/plugins/slo/pkg/slo/filters_test.go @@ -1,6 +1,8 @@ package slo_test import ( + "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/opni/pkg/logger" @@ -13,7 +15,9 @@ var _ = Describe("SLO Filter tests", Ordered, Label("unit", "slow"), func() { When("We use SLO filters", func() { It("should get parse them from our embedded directory definitions", func() { for dirName, embedFs := range slo.EnabledFilters { - filters := slo.GetGroupConfigsFromEmbed(logger.NewPluginLogger().WithGroup("slo"), dirName, embedFs) + lg := logger.NewPluginLogger(context.Background()).WithGroup("slo") + + filters := slo.GetGroupConfigsFromEmbed(lg, dirName, embedFs) Expect(filters).NotTo(HaveLen(0)) for _, filter := range filters { Expect(filter.Name).NotTo(Equal("")) diff --git a/plugins/slo/pkg/slo/impl.go b/plugins/slo/pkg/slo/impl.go index 1369ff7597..8b2cdc9afb 100644 --- a/plugins/slo/pkg/slo/impl.go +++ b/plugins/slo/pkg/slo/impl.go @@ -16,6 +16,7 @@ import ( "github.com/prometheus/prometheus/promql/parser" corev1 "github.com/rancher/opni/pkg/apis/core/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/plugins/metrics/apis/cortexadmin" sloapi "github.com/rancher/opni/plugins/slo/apis/slo" @@ -37,7 +38,7 @@ func (s SLOMonitoring) Create() (*corev1.Reference, error) { rrecording, rmetadata, ralerting := slo.ConstructCortexRules(nil) toApply := []rulefmt.RuleGroup{rrecording, rmetadata, ralerting} ruleId := slo.GetId() - err := tryApplyThenDeleteCortexRules(s.ctx, s.p, s.p.logger, req.GetSlo().GetClusterId(), &ruleId, toApply) + err := tryApplyThenDeleteCortexRules(s.ctx, s.p, req.GetSlo().GetClusterId(), &ruleId, toApply) if err != nil { return nil, err } @@ -45,17 +46,18 @@ func (s SLOMonitoring) Create() (*corev1.Reference, error) { } func (s SLOMonitoring) Update(existing *sloapi.SLOData) (*sloapi.SLOData, error) { + lg := logger.PluginLoggerFromContext(s.p.ctx) incomingSLO := (s.req).(*sloapi.SLOData) // Create is the same as Update if within the same cluster newSlo := SLODataToStruct(incomingSLO) rrecording, rmetadata, ralerting := newSlo.ConstructCortexRules(nil) toApply := []rulefmt.RuleGroup{rrecording, rmetadata, ralerting} - err := tryApplyThenDeleteCortexRules(s.ctx, s.p, s.p.logger, incomingSLO.GetSLO().GetClusterId(), nil, toApply) + err := tryApplyThenDeleteCortexRules(s.ctx, s.p, incomingSLO.GetSLO().GetClusterId(), nil, toApply) // successfully applied rules to another cluster if err == nil && existing.SLO.ClusterId != incomingSLO.SLO.ClusterId { _, err := s.p.DeleteSLO(s.ctx, &corev1.Reference{Id: existing.Id}) if err != nil { - s.lg.With("sloId", existing.Id).Error(fmt.Sprintf( + lg.With("sloId", existing.Id).Error(fmt.Sprintf( "Unable to delete SLO when updating between clusters : %v", err)) } @@ -64,8 +66,10 @@ func (s SLOMonitoring) Update(existing *sloapi.SLOData) (*sloapi.SLOData, error) } func (s SLOMonitoring) Delete(existing *sloapi.SLOData) error { + lg := logger.PluginLoggerFromContext(s.p.ctx) + id, clusterId := existing.Id, existing.SLO.ClusterId - //err := deleteCortexSLORules(s.p, id, clusterId, s.ctx, s.lg) + //err := deleteCortexSLORules(s.p, id, clusterId, s.ctx, lg) errArr := []error{} slo := SLODataToStruct(existing) rrecording, rmetadata, ralerting := slo.ConstructCortexRules(nil) @@ -76,7 +80,6 @@ func (s SLOMonitoring) Delete(existing *sloapi.SLOData) error { err := deleteCortexSLORules( s.ctx, s.p, - s.p.logger, clusterId, rule.Alert.Value, ) @@ -88,7 +91,6 @@ func (s SLOMonitoring) Delete(existing *sloapi.SLOData) error { err := deleteCortexSLORules( s.ctx, s.p, - s.p.logger, clusterId, rule.Record.Value, ) @@ -100,7 +102,7 @@ func (s SLOMonitoring) Delete(existing *sloapi.SLOData) error { } err := createGrafanaSLOMask(s.ctx, s.p, clusterId, id) if err != nil { - s.p.logger.Error(fmt.Sprintf("creating grafana mask failed %s", err)) + lg.Error(fmt.Sprintf("creating grafana mask failed %s", err)) errArr = append(errArr, err) } return errors.Combine(errArr...) @@ -115,7 +117,7 @@ func (s SLOMonitoring) Clone(clone *sloapi.SLOData) (*corev1.Reference, *sloapi. rrecording, rmetadata, ralerting := slo.ConstructCortexRules(nil) toApply := []rulefmt.RuleGroup{rrecording, rmetadata, ralerting} ruleId := slo.GetId() - err := tryApplyThenDeleteCortexRules(s.ctx, s.p, s.p.logger, sloData.GetClusterId(), &ruleId, toApply) + err := tryApplyThenDeleteCortexRules(s.ctx, s.p, sloData.GetClusterId(), &ruleId, toApply) clonedData.SLO.Name = sloData.Name + "-clone" clonedData.Id = slo.GetId() return &corev1.Reference{Id: slo.GetId()}, clonedData, err @@ -196,7 +198,7 @@ func (s SLOMonitoring) MultiClusterClone( ) continue } - errArr[idx] = tryApplyThenDeleteCortexRules(s.ctx, s.p, s.p.logger, clusterId.Id, &ruleId, toApply) + errArr[idx] = tryApplyThenDeleteCortexRules(s.ctx, s.p, clusterId.Id, &ruleId, toApply) clonedData.SLO.Name = sloData.Name + "-clone-" + strconv.Itoa(idx) clonedData.Id = slo.GetId() clusterDefinitions[idx] = clonedData @@ -211,10 +213,11 @@ func (s SLOMonitoring) MultiClusterClone( // - If it has Data, check if it is within budget // - If is within budget, check if any alerts are firing func (s SLOMonitoring) Status(existing *sloapi.SLOData) (*sloapi.SLOStatus, error) { + lg := logger.PluginLoggerFromContext(s.p.ctx) now := time.Now() if now.Sub(existing.CreatedAt.AsTime()) <= sloapi.MinEvaluateInterval*2 { - s.lg.Debug("SLO status is not ready to be evaluated : ", "sloId", existing.Id, "status", (&sloapi.SLOStatus{State: sloapi.SLOStatusState_Creating}).String()) + lg.Debug("SLO status is not ready to be evaluated : ", "sloId", existing.Id, "status", (&sloapi.SLOStatus{State: sloapi.SLOStatusState_Creating}).String()) return &sloapi.SLOStatus{State: sloapi.SLOStatusState_Creating}, nil } @@ -234,7 +237,7 @@ func (s SLOMonitoring) Status(existing *sloapi.SLOData) (*sloapi.SLOStatus, erro if sliDataVector == nil || sliDataVector.Len() == 0 { return &sloapi.SLOStatus{State: sloapi.SLOStatusState_NoData}, nil } - s.lg.With("sloId", slo.GetId()).Debug(fmt.Sprintf("sli status response vector : %s", sliDataVector.String())) + lg.With("sloId", slo.GetId()).Debug(fmt.Sprintf("sli status response vector : %s", sliDataVector.String())) // ======================= error budget ======================= // race condition can cause initial evaluation to fail with empty vector, resulting in no data state // this is why we return creating state with two intervals @@ -250,7 +253,7 @@ func (s SLOMonitoring) Status(existing *sloapi.SLOData) (*sloapi.SLOStatus, erro if metadataBudget <= 0 { return &sloapi.SLOStatus{State: sloapi.SLOStatusState_Breaching}, nil } - s.lg.With("sloId", slo.GetId()).Debug(fmt.Sprintf("sli status %s", metadataVector.String())) + lg.With("sloId", slo.GetId()).Debug(fmt.Sprintf("sli status %s", metadataVector.String())) // //// ======================= alert ======================= @@ -270,7 +273,7 @@ func (s SLOMonitoring) Status(existing *sloapi.SLOData) (*sloapi.SLOStatus, erro if (*alertDataVector1)[len(*alertDataVector1)-1].Value > 0 || (*alertDataVector2)[len(*alertDataVector2)-1].Value > 0 { return &sloapi.SLOStatus{State: sloapi.SLOStatusState_Warning}, nil } - s.lg.With("sloId", slo.GetId()).Debug("alert status response vector ", alertDataVector1.String(), alertDataVector2.String()) + lg.With("sloId", slo.GetId()).Debug("alert status response vector ", alertDataVector1.String(), alertDataVector2.String()) return &sloapi.SLOStatus{ State: state, }, nil diff --git a/plugins/slo/pkg/slo/interfaces.go b/plugins/slo/pkg/slo/interfaces.go index 0fe3829636..65494faa42 100644 --- a/plugins/slo/pkg/slo/interfaces.go +++ b/plugins/slo/pkg/slo/interfaces.go @@ -6,7 +6,6 @@ import ( corev1 "github.com/rancher/opni/pkg/apis/core/v1" sloapi "github.com/rancher/opni/plugins/slo/apis/slo" - "log/slog" "google.golang.org/protobuf/proto" ) @@ -54,7 +53,6 @@ type RequestBase struct { req proto.Message p *Plugin ctx context.Context - lg *slog.Logger } type SLOMonitoring struct { @@ -69,24 +67,22 @@ type MonitoringServiceBackend struct { RequestBase } -func NewSLOMonitoringStore(p *Plugin, lg *slog.Logger) SLOStore { +func NewSLOMonitoringStore(p *Plugin) SLOStore { return &SLOMonitoring{ RequestBase{ req: nil, p: p, ctx: context.Background(), - lg: lg, }, } } -func NewMonitoringServiceBackend(p *Plugin, lg *slog.Logger) ServiceBackend { +func NewMonitoringServiceBackend(p *Plugin) ServiceBackend { return &MonitoringServiceBackend{ RequestBase{ req: nil, p: p, - ctx: context.TODO(), - lg: lg, + ctx: p.ctx, }, } } diff --git a/plugins/slo/pkg/slo/plugin.go b/plugins/slo/pkg/slo/plugin.go index dfc675f25b..fe5b3a4cd4 100644 --- a/plugins/slo/pkg/slo/plugin.go +++ b/plugins/slo/pkg/slo/plugin.go @@ -5,7 +5,6 @@ import ( "github.com/rancher/opni/plugins/metrics/apis/cortexadmin" "github.com/rancher/opni/plugins/slo/apis/slo" - "log/slog" alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" @@ -22,8 +21,7 @@ type Plugin struct { slo.UnsafeSLOServer system.UnimplementedSystemPluginClient - ctx context.Context - logger *slog.Logger + ctx context.Context storage future.Future[StorageAPIs] mgmtClient future.Future[managementv1.ManagementClient] @@ -38,9 +36,10 @@ type StorageAPIs struct { } func NewPlugin(ctx context.Context) *Plugin { + lg := logger.NewPluginLogger(ctx).WithGroup("slo") + ctx = logger.WithPluginLogger(ctx, lg) return &Plugin{ ctx: ctx, - logger: logger.NewPluginLogger().WithGroup("slo"), storage: future.New[StorageAPIs](), mgmtClient: future.New[managementv1.ManagementClient](), adminClient: future.New[cortexadmin.CortexAdminClient](), @@ -52,6 +51,7 @@ var _ slo.SLOServer = (*Plugin)(nil) func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme() + p := NewPlugin(ctx) scheme.Add(system.SystemPluginID, system.NewPlugin(p)) scheme.Add(managementext.ManagementAPIExtensionPluginID, diff --git a/plugins/slo/pkg/slo/system.go b/plugins/slo/pkg/slo/system.go index 8b8a74a0df..998a0f44da 100644 --- a/plugins/slo/pkg/slo/system.go +++ b/plugins/slo/pkg/slo/system.go @@ -5,6 +5,7 @@ import ( alertingv1 "github.com/rancher/opni/pkg/apis/alerting/v1" managementv1 "github.com/rancher/opni/pkg/apis/management/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins/apis/system" "github.com/rancher/opni/pkg/slo/shared" "github.com/rancher/opni/plugins/metrics/apis/cortexadmin" @@ -26,9 +27,10 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { } func (p *Plugin) UseAPIExtensions(intf system.ExtensionClientInterface) { + lg := logger.PluginLoggerFromContext(p.ctx) cc, err := intf.GetClientConn(p.ctx, "CortexAdmin", "AlertEndpoints") if err != nil { - p.logger.Error("failed to get cortex admin client", "error", err) + lg.Error("failed to get cortex admin client", "error", err) if p.ctx.Err() != nil { // Plugin is shutting down, don't exit return @@ -42,7 +44,7 @@ func (p *Plugin) UseAPIExtensions(intf system.ExtensionClientInterface) { p.alertEndpointClient.Set(alertingEndpointClient) RegisterDatasource( shared.MonitoringDatasource, - NewSLOMonitoringStore(p, p.logger), - NewMonitoringServiceBackend(p, p.logger), + NewSLOMonitoringStore(p), + NewMonitoringServiceBackend(p), ) } diff --git a/plugins/topology/pkg/backend/topology.go b/plugins/topology/pkg/backend/topology.go index 24a33cc360..f8d8464a65 100644 --- a/plugins/topology/pkg/backend/topology.go +++ b/plugins/topology/pkg/backend/topology.go @@ -6,8 +6,6 @@ import ( "sync" "time" - "log/slog" - "github.com/google/go-cmp/cmp" "github.com/rancher/opni/pkg/agent" capabilityv1 "github.com/rancher/opni/pkg/apis/capability/v1" @@ -33,7 +31,7 @@ import ( ) type TopologyBackendConfig struct { - Logger *slog.Logger `validate:"required"` + Context context.Context `validate:"required"` StorageBackend storage.Backend `validate:"required"` MgmtClient managementv1.ManagementClient `validate:"required"` Delegate streamext.StreamDelegate[agent.ClientSet] `validate:"required"` @@ -113,6 +111,7 @@ func (t *TopologyBackend) canInstall(ctx context.Context) error { } func (t *TopologyBackend) requestNodeSync(ctx context.Context, cluster *corev1.Reference) { + lg := logger.PluginLoggerFromContext(t.Context) _, err := t.Delegate.WithTarget(cluster).SyncNow(ctx, &capabilityv1.Filter{ CapabilityNames: []string{wellknown.CapabilityTopology}, }) @@ -121,14 +120,14 @@ func (t *TopologyBackend) requestNodeSync(ctx context.Context, cluster *corev1.R name = "(all)" } if err != nil { - t.Logger.With( + lg.With( "cluster", name, "capability", wellknown.CapabilityTopology, logger.Err(err), ).Warn("failed to request node sync; nodes may not be updated immediately") return } - t.Logger.With( + lg.With( "cluster", name, "capability", wellknown.CapabilityTopology, ).Info("node sync requested") @@ -138,7 +137,7 @@ func (t *TopologyBackend) Install(ctx context.Context, req *capabilityv1.Install ctxTimeout, ca := context.WithTimeout(ctx, time.Second*60) defer ca() if err := t.WaitForInitContext(ctxTimeout); err != nil { - // !! t.logger is not initialized if the deadline is exceeded + // !! lg is not initialized if the deadline is exceeded return nil, err } @@ -276,6 +275,7 @@ func (t *TopologyBackend) InstallerTemplate(_ context.Context, _ *emptypb.Empty) } func (t *TopologyBackend) Sync(ctx context.Context, req *node.SyncRequest) (*node.SyncResponse, error) { + lg := logger.PluginLoggerFromContext(t.Context) t.WaitForInit() id := cluster.StreamAuthorizedID(ctx) @@ -297,7 +297,7 @@ func (t *TopologyBackend) Sync(ctx context.Context, req *node.SyncRequest) (*nod if enabled { if err := t.ClusterDriver.ShouldDisableNode(cluster.Reference()); err != nil { reason := status.Convert(err).Message() - t.Logger.With( + lg.With( "reason", reason, ) } @@ -311,7 +311,7 @@ func (t *TopologyBackend) Sync(ctx context.Context, req *node.SyncRequest) (*nod status := t.nodeStatus[id] if status == nil { - t.Logger.Debug("No current status found, setting to default") + lg.Debug("No current status found, setting to default") t.nodeStatus[id] = &capabilityv1.NodeCapabilityStatus{} status = t.nodeStatus[id] } diff --git a/plugins/topology/pkg/topology/agent/collector.go b/plugins/topology/pkg/topology/agent/collector.go index e0b01f9a93..7b8dc2232e 100644 --- a/plugins/topology/pkg/topology/agent/collector.go +++ b/plugins/topology/pkg/topology/agent/collector.go @@ -8,8 +8,6 @@ import ( "sync" "time" - "log/slog" - controlv1 "github.com/rancher/opni/pkg/apis/control/v1" "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" @@ -27,7 +25,7 @@ type BatchingConfig struct { } type TopologyStreamer struct { - logger *slog.Logger + ctx context.Context conditions health.ConditionTracker v chan client.Object @@ -39,7 +37,7 @@ type TopologyStreamer struct { topologyStreamClient stream.RemoteTopologyClient } -func NewTopologyStreamer(ct health.ConditionTracker, lg *slog.Logger) *TopologyStreamer { +func NewTopologyStreamer(ctx context.Context, ct health.ConditionTracker) *TopologyStreamer { return &TopologyStreamer{ // FIXME: reintroduce this when we want to monitor kubernetes events // eventWatchClient: util.Must(client.NewWithWatch( @@ -47,7 +45,7 @@ func NewTopologyStreamer(ct health.ConditionTracker, lg *slog.Logger) *TopologyS // client.Options{ // Scheme: apis.NewScheme(), // })), - logger: lg, + ctx: ctx, conditions: ct, } } @@ -66,7 +64,7 @@ func (s *TopologyStreamer) SetIdentityClient(identityClient controlv1.IdentityCl } func (s *TopologyStreamer) Run(ctx context.Context, spec *node.TopologyCapabilitySpec) error { - lg := s.logger + lg := logger.PluginLoggerFromContext(s.ctx) if spec == nil { lg.With("stream", "topology").Warn("no topology capability spec provided, setting defaults") diff --git a/plugins/topology/pkg/topology/agent/node.go b/plugins/topology/pkg/topology/agent/node.go index 589a11f59f..a3846753c5 100644 --- a/plugins/topology/pkg/topology/agent/node.go +++ b/plugins/topology/pkg/topology/agent/node.go @@ -13,20 +13,19 @@ import ( corev1 "github.com/rancher/opni/pkg/apis/core/v1" "github.com/rancher/opni/pkg/capabilities/wellknown" "github.com/rancher/opni/pkg/health" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/plugins/topology/apis/node" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" - "log/slog" ) type TopologyNode struct { capabilityv1.UnsafeNodeServer controlv1.UnsafeHealthServer - logger *slog.Logger - + ctx context.Context clientMu sync.RWMutex client node.NodeTopologyCapabilityClient @@ -40,9 +39,9 @@ type TopologyNode struct { var _ capabilityv1.NodeServer = &TopologyNode{} var _ controlv1.HealthServer = &TopologyNode{} -func NewTopologyNode(ct health.ConditionTracker, lg *slog.Logger) *TopologyNode { +func NewTopologyNode(ctx context.Context, ct health.ConditionTracker) *TopologyNode { return &TopologyNode{ - logger: lg, + ctx: ctx, conditions: ct, config: &node.TopologyCapabilityConfig{ Enabled: false, @@ -63,7 +62,8 @@ func (t *TopologyNode) AddConfigListener(ch chan<- *node.TopologyCapabilityConfi } func (t *TopologyNode) doSync(ctx context.Context) { - t.logger.Debug("syncing topology node") + lg := logger.PluginLoggerFromContext(t.ctx) + lg.Debug("syncing topology node") t.clientMu.RLock() defer t.clientMu.RUnlock() @@ -86,9 +86,9 @@ func (t *TopologyNode) doSync(ctx context.Context) { switch syncResp.ConfigStatus { case node.ConfigStatus_UpToDate: - t.logger.Info("topology node is up to date") + lg.Info("topology node is up to date") case node.ConfigStatus_NeedsUpdate: - t.logger.Info("topology node needs update") + lg.Info("topology node needs update") t.updateConfig(syncResp.UpdatedConfig) } @@ -97,13 +97,15 @@ func (t *TopologyNode) doSync(ctx context.Context) { // Implements capabilityv1.NodeServer func (t *TopologyNode) SyncNow(_ context.Context, req *capabilityv1.Filter) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(t.ctx) + if len(req.GetCapabilityNames()) > 0 { if !slices.Contains(req.CapabilityNames, wellknown.CapabilityTopology) { - t.logger.Debug("ignoring sync request due to capability filter") + lg.Debug("ignoring sync request due to capability filter") return &emptypb.Empty{}, nil } } - t.logger.Debug("received sync request") + lg.Debug("received sync request") t.clientMu.RLock() defer t.clientMu.RUnlock() @@ -137,6 +139,8 @@ func (t *TopologyNode) GetHealth(_ context.Context, _ *emptypb.Empty) (*corev1.H } func (t *TopologyNode) updateConfig(config *node.TopologyCapabilityConfig) { + lg := logger.PluginLoggerFromContext(t.ctx) + t.configMu.Lock() defer t.configMu.Unlock() @@ -147,7 +151,7 @@ func (t *TopologyNode) updateConfig(config *node.TopologyCapabilityConfig) { select { case ch <- clone: default: - t.logger.Warn("slow config update listener detected") + lg.Warn("slow config update listener detected") ch <- clone } } diff --git a/plugins/topology/pkg/topology/agent/plugin.go b/plugins/topology/pkg/topology/agent/plugin.go index df9953e58d..4e405dc83f 100644 --- a/plugins/topology/pkg/topology/agent/plugin.go +++ b/plugins/topology/pkg/topology/agent/plugin.go @@ -3,8 +3,6 @@ package agent import ( "context" - "log/slog" - healthpkg "github.com/rancher/opni/pkg/health" "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/plugins/apis/apiextensions/stream" @@ -18,8 +16,7 @@ import ( ) type Plugin struct { - ctx context.Context - logger *slog.Logger + ctx context.Context node *TopologyNode topologyStreamer *TopologyStreamer @@ -30,13 +27,13 @@ type Plugin struct { } func NewPlugin(ctx context.Context) *Plugin { - lg := logger.NewPluginLogger().WithGroup("topology") + lg := logger.NewPluginLogger(ctx).WithGroup("topology") + ctx = logger.WithPluginLogger(ctx, lg) ct := healthpkg.NewDefaultConditionTracker(lg) p := &Plugin{ ctx: ctx, - logger: lg, - node: NewTopologyNode(ct, lg), - topologyStreamer: NewTopologyStreamer(ct, lg), + node: NewTopologyNode(ctx, ct), + topologyStreamer: NewTopologyStreamer(ctx, ct), k8sClient: future.New[client.Client](), } @@ -61,7 +58,8 @@ func NewPlugin(ctx context.Context) *Plugin { } func (p *Plugin) onConfigUpdated(cfg *node.TopologyCapabilityConfig) { - p.logger.Debug("topology capability config updated") + lg := logger.PluginLoggerFromContext(p.ctx) + lg.Debug("topology capability config updated") // at this point we know the config has been updated currentlyRunning := (p.stopStreaming != nil) @@ -75,26 +73,27 @@ func (p *Plugin) onConfigUpdated(cfg *node.TopologyCapabilityConfig) { switch { case currentlyRunning && shouldRun: - p.logger.Debug("reconfiguring topology stream") + lg.Debug("reconfiguring topology stream") p.stopStreaming() startTopologyStream() case currentlyRunning && !shouldRun: - p.logger.Debug("stopping topology stream") + lg.Debug("stopping topology stream") p.stopStreaming() p.stopStreaming = nil case !currentlyRunning && shouldRun: - p.logger.Debug("starting topology stream") + lg.Debug("starting topology stream") startTopologyStream() case !currentlyRunning && !shouldRun: - p.logger.Debug("topology streaming is disabled") + lg.Debug("topology streaming is disabled") } } func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeAgent)) + p := NewPlugin(ctx) scheme.Add(health.HealthPluginID, health.NewPlugin(p.node)) scheme.Add(capability.CapabilityBackendPluginID, capability.NewAgentPlugin(p.node)) - scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(p)) + scheme.Add(stream.StreamAPIExtensionPluginID, stream.NewAgentPlugin(ctx, p)) return scheme } diff --git a/plugins/topology/pkg/topology/agent/stream.go b/plugins/topology/pkg/topology/agent/stream.go index 46dcf13229..d6ae865a5f 100644 --- a/plugins/topology/pkg/topology/agent/stream.go +++ b/plugins/topology/pkg/topology/agent/stream.go @@ -22,7 +22,8 @@ func (p *Plugin) StreamServers() []streamext.Server { } func (p *Plugin) UseStreamClient(cc grpc.ClientConnInterface) { - p.topologyStreamer.SetTopologyStreamClient(stream.NewRemoteTopologyClient(cc)) p.topologyStreamer.SetIdentityClient(controlv1.NewIdentityClient(cc)) + + p.topologyStreamer.SetTopologyStreamClient(stream.NewRemoteTopologyClient(cc)) p.node.SetClient(node.NewNodeTopologyCapabilityClient(cc)) } diff --git a/plugins/topology/pkg/topology/gateway/management.go b/plugins/topology/pkg/topology/gateway/management.go index 8926db12ed..1d379821ef 100644 --- a/plugins/topology/pkg/topology/gateway/management.go +++ b/plugins/topology/pkg/topology/gateway/management.go @@ -6,6 +6,7 @@ import ( ) func (p *Plugin) configureTopologyManagement() { + lg := logger.PluginLoggerFromContext(p.ctx) drivers.ResetClusterDrivers() if kcd, err := drivers.NewTopologyManagerClusterDriver(); err == nil { @@ -16,7 +17,7 @@ func (p *Plugin) configureTopologyManagement() { name := "topology-manager" driver, err := drivers.GetClusterDriver(name) if err != nil { - p.logger.With( + lg.With( "driver", name, logger.Err(err), ).Error("failed to load cluster driver, using fallback no-op driver") diff --git a/plugins/topology/pkg/topology/gateway/plugin.go b/plugins/topology/pkg/topology/gateway/plugin.go index 5e9a417b2c..816cc691a1 100644 --- a/plugins/topology/pkg/topology/gateway/plugin.go +++ b/plugins/topology/pkg/topology/gateway/plugin.go @@ -27,7 +27,6 @@ import ( "github.com/rancher/opni/plugins/topology/pkg/topology/gateway/drivers" "github.com/rancher/opni/plugins/topology/pkg/topology/gateway/stream" "google.golang.org/protobuf/proto" - "log/slog" ) type Plugin struct { @@ -35,8 +34,7 @@ type Plugin struct { system.UnimplementedSystemPluginClient uninstallRunner TopologyUninstallTaskRunner - ctx context.Context - logger *slog.Logger + ctx context.Context topologyRemoteWrite stream.TopologyStreamWriter topologyBackend backend.TopologyBackend @@ -54,9 +52,14 @@ type Plugin struct { } func NewPlugin(ctx context.Context) *Plugin { + lg := logger.NewPluginLogger(ctx).WithGroup("topology") + streamWriteLg := lg.With("component", "stream") + uninstallRunnerLg := lg.WithGroup("topology-uninstall-runner") + backendLg := lg.WithGroup("topology-backend") + + ctx = logger.WithPluginLogger(ctx, lg) p := &Plugin{ ctx: ctx, - logger: logger.NewPluginLogger().WithGroup("topology"), nc: future.New[*nats.Conn](), storage: future.New[ConfigStorageAPIs](), mgmtClient: future.New[managementv1.ManagementClient](), @@ -69,18 +72,18 @@ func NewPlugin(ctx context.Context) *Plugin { } future.Wait1(p.nc, func(nc *nats.Conn) { p.topologyRemoteWrite.Initialize(stream.TopologyStreamWriteConfig{ - Logger: p.logger.With("component", "stream"), - Nc: nc, + Context: logger.WithPluginLogger(ctx, streamWriteLg), + Nc: nc, }) }) future.Wait2(p.storageBackend, p.uninstallController, func(storageBackend storage.Backend, uninstallController *task.Controller) { - p.uninstallRunner.logger = p.logger.WithGroup("topology-uninstall-runner") + p.uninstallRunner.ctx = logger.WithPluginLogger(ctx, uninstallRunnerLg) p.uninstallRunner.storageBackend = storageBackend }) - p.logger.Debug("waiting for async requirements for starting topology backend") + lg.Debug("waiting for async requirements for starting topology backend") future.Wait5(p.storageBackend, p.mgmtClient, p.delegate, p.uninstallController, p.clusterDriver, func( storageBackend storage.Backend, @@ -89,7 +92,7 @@ func NewPlugin(ctx context.Context) *Plugin { uninstallController *task.Controller, clusterDriver drivers.ClusterDriver, ) { - p.logger.With( + lg.With( "storageBackend", storageBackend, "mgmtClient", mgmtClient, "delegate", delegate, @@ -97,14 +100,14 @@ func NewPlugin(ctx context.Context) *Plugin { "clusterDriver", clusterDriver, ).Debug("async requirements for starting topology backend are ready") p.topologyBackend.Initialize(backend.TopologyBackendConfig{ - Logger: p.logger.WithGroup("topology-backend"), + Context: logger.WithPluginLogger(ctx, backendLg), StorageBackend: storageBackend, MgmtClient: mgmtClient, Delegate: delegate, UninstallController: uninstallController, ClusterDriver: clusterDriver, }) - p.logger.Debug("initialized topology backend") + lg.Debug("initialized topology backend") }) return p @@ -119,6 +122,7 @@ type ConfigStorageAPIs struct { func Scheme(ctx context.Context) meta.Scheme { scheme := meta.NewScheme(meta.WithMode(meta.ModeGateway)) + p := NewPlugin(ctx) scheme.Add(system.SystemPluginID, system.NewPlugin(p)) scheme.Add(managementext.ManagementAPIExtensionPluginID, @@ -133,7 +137,7 @@ func Scheme(ctx context.Context) meta.Scheme { ), ), ) - scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(p)) + scheme.Add(streamext.StreamAPIExtensionPluginID, streamext.NewGatewayPlugin(ctx, p)) scheme.Add(capability.CapabilityBackendPluginID, capability.NewPlugin(&p.topologyBackend)) return scheme } diff --git a/plugins/topology/pkg/topology/gateway/stream/stream.go b/plugins/topology/pkg/topology/gateway/stream/stream.go index 67ca7722ba..d9403befc3 100644 --- a/plugins/topology/pkg/topology/gateway/stream/stream.go +++ b/plugins/topology/pkg/topology/gateway/stream/stream.go @@ -15,18 +15,18 @@ import ( "github.com/nats-io/nats.go" corev1 "github.com/rancher/opni/pkg/apis/core/v1" + "github.com/rancher/opni/pkg/logger" "github.com/rancher/opni/pkg/slo/shared" "github.com/rancher/opni/pkg/topology/store" "github.com/rancher/opni/pkg/util" "github.com/rancher/opni/plugins/topology/apis/stream" "google.golang.org/grpc/codes" "google.golang.org/protobuf/types/known/emptypb" - "log/slog" ) type TopologyStreamWriteConfig struct { - Logger *slog.Logger - Nc *nats.Conn + Context context.Context + Nc *nats.Conn } type TopologyStreamWriter struct { @@ -42,9 +42,10 @@ var _ stream.RemoteTopologyServer = (*TopologyStreamWriter)(nil) func (t *TopologyStreamWriter) Initialize(conf TopologyStreamWriteConfig) { t.InitOnce(func() { + lg := logger.PluginLoggerFromContext(conf.Context) objStore, err := store.NewTopologyObjectStore(conf.Nc) if err != nil { - conf.Logger.With("error", err).Error("failed to initialize topology object store") + lg.With("error", err).Error("failed to initialize topology object store") os.Exit(1) } t.topologyObjectStore = objStore @@ -66,6 +67,8 @@ func (t *TopologyStreamWriter) objectDef(clusterId *corev1.Reference, repr strea } func (t *TopologyStreamWriter) Push(_ context.Context, payload *stream.Payload) (*emptypb.Empty, error) { + lg := logger.PluginLoggerFromContext(t.Context) + if !t.Initialized() { return nil, util.StatusError(codes.Unavailable) } @@ -77,7 +80,7 @@ func (t *TopologyStreamWriter) Push(_ context.Context, payload *stream.Payload) if err != nil { return nil, err } - t.Logger.With("info", info).Debug("successfully pushed topology data") + lg.With("info", info).Debug("successfully pushed topology data") return &emptypb.Empty{}, nil } diff --git a/plugins/topology/pkg/topology/gateway/system.go b/plugins/topology/pkg/topology/gateway/system.go index 891a54674a..5e9d25595a 100644 --- a/plugins/topology/pkg/topology/gateway/system.go +++ b/plugins/topology/pkg/topology/gateway/system.go @@ -21,6 +21,7 @@ import ( ) func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { + lg := logger.PluginLoggerFromContext(p.ctx) p.mgmtClient.Set(client) cfg, err := client.GetConfig( context.Background(), @@ -28,20 +29,20 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { grpc.WaitForReady(true), ) if err != nil { - p.logger.With(logger.Err(err)).Error("failed to get config") + lg.With(logger.Err(err)).Error("failed to get config") os.Exit(1) } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With(logger.Err(err)).Error("failed to load config") + lg.With(logger.Err(err)).Error("failed to load config") os.Exit(1) } machinery.LoadAuthProviders(p.ctx, objectList) objectList.Visit(func(config *v1beta1.GatewayConfig) { backend, err := machinery.ConfigureStorageBackend(p.ctx, &config.Spec.Storage) if err != nil { - p.logger.With( + lg.With( "err", err, ).Error("failed to configure storage backend") os.Exit(1) @@ -54,6 +55,8 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { } func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { + lg := logger.PluginLoggerFromContext(p.ctx) + // set other futures before trying to acquire NATS connection ctrl, err := task.NewController( p.ctx, @@ -62,7 +65,7 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { &p.uninstallRunner) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("failed to create uninstall task controller") } @@ -85,7 +88,7 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { } nc, err := natsutil.AcquireNATSConnection(p.ctx, cfg) if err != nil { - p.logger.With( + lg.With( logger.Err(err), ).Error("fatal : failed to acquire NATS connection") os.Exit(1) diff --git a/plugins/topology/pkg/topology/gateway/uninstall.go b/plugins/topology/pkg/topology/gateway/uninstall.go index 0ab86388f7..fd21f6e746 100644 --- a/plugins/topology/pkg/topology/gateway/uninstall.go +++ b/plugins/topology/pkg/topology/gateway/uninstall.go @@ -16,7 +16,7 @@ import ( ) type TopologyUninstallTaskRunner struct { - logger *slog.Logger + ctx context.Context storageBackend storage.Backend } diff --git a/test/plugins/metrics/runner_test.go b/test/plugins/metrics/runner_test.go index e7cd044130..f0fd1d0487 100644 --- a/test/plugins/metrics/runner_test.go +++ b/test/plugins/metrics/runner_test.go @@ -1,6 +1,7 @@ package metrics_test import ( + "context" "fmt" "math" "time" @@ -79,11 +80,13 @@ var _ = Describe("Target Runner", Ordered, Label("unit"), func() { ) BeforeEach(func() { - lg := logger.NewPluginLogger().WithGroup("test-runner") + ctx := context.Background() + lg := logger.NewPluginLogger(ctx).WithGroup("test-runner") + ctx = logger.WithPluginLogger(ctx, lg) writerClient = &mockRemoteWriteClient{} - runner = agent.NewTargetRunner(lg) + runner = agent.NewTargetRunner(ctx) runner.SetRemoteWriteClient(clients.NewLocker(nil, func(connInterface grpc.ClientConnInterface) remotewrite.RemoteWriteClient { return writerClient }))