diff --git a/lib/auth/init.go b/lib/auth/init.go index 215dd4aaae512..c3707012cccf9 100644 --- a/lib/auth/init.go +++ b/lib/auth/init.go @@ -1080,7 +1080,7 @@ func createPresetRoles(ctx context.Context, rm PresetRoleManager) error { return trace.Wrap(err) } - role, err := services.AddRoleDefaults(currentRole) + role, err := services.AddRoleDefaults(gctx, currentRole) if trace.IsAlreadyExists(err) { return nil } diff --git a/lib/services/access_checker.go b/lib/services/access_checker.go index 1d3639bc742c8..acea13514adea 100644 --- a/lib/services/access_checker.go +++ b/lib/services/access_checker.go @@ -22,13 +22,13 @@ import ( "cmp" "context" "fmt" + "log/slog" "net" "slices" "strings" "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "github.com/gravitational/teleport/api/constants" @@ -39,6 +39,7 @@ import ( "github.com/gravitational/teleport/lib/services/readonly" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) // AccessChecker interface checks access to resources based on roles, traits, @@ -406,10 +407,11 @@ func (a *accessChecker) checkAllowedResources(r AccessCheckable) error { return nil } - // Note: logging in this function only happens in debug mode. This is because + // Note: logging in this function only happens in trace mode. This is because // adding logging to this function (which is called on every resource returned // by the backend) can slow down this function by 50x for large clusters! - isDebugEnabled, debugf := rbacDebugLogger() + ctx := context.Background() + isLoggingEnabled := rbacLogger.Enabled(ctx, logutils.TraceLevel) for _, resourceID := range a.info.AllowedResourceIDs { if resourceID.ClusterName == a.localCluster && @@ -421,23 +423,33 @@ func (a *accessChecker) checkAllowedResources(r AccessCheckable) error { (resourceID.Kind == r.GetKind() || (slices.Contains(types.KubernetesResourcesKinds, resourceID.Kind) && r.GetKind() == types.KindKubernetesCluster)) && resourceID.Name == r.GetName() { // Allowed to access this resource by resource ID, move on to role checks. - if isDebugEnabled { - debugf("Matched allowed resource ID %q", types.ResourceIDToString(resourceID)) + + if isLoggingEnabled { + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Matched allowed resource ID", + slog.String("resource_id", types.ResourceIDToString(resourceID)), + ) } + return nil } } - if isDebugEnabled { + if isLoggingEnabled { allowedResources, err := types.ResourceIDsToString(a.info.AllowedResourceIDs) if err != nil { return trace.Wrap(err) } - err = trace.AccessDenied("access to %v denied, %q not in allowed resource IDs %s", + + slog.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, not in allowed resource IDs", + slog.String("resource_kind", r.GetKind()), + slog.String("resource_name", r.GetName()), + slog.Any("allowed_resources", allowedResources), + ) + + return trace.AccessDenied("access to %v denied, %q not in allowed resource IDs %s", r.GetKind(), r.GetName(), allowedResources) - debugf("Access denied: %v", err) - return err } + return trace.AccessDenied("access to %v denied, not in allowed resource IDs", r.GetKind()) } @@ -875,10 +887,11 @@ func (a *accessChecker) CheckAccessToRemoteCluster(rc types.RemoteCluster) error return trace.AccessDenied("access to cluster denied") } - // Note: logging in this function only happens in debug mode, this is because + // Note: logging in this function only happens in trace mode, this is because // adding logging to this function (which is called on every server returned // by GetRemoteClusters) can slow down this function by 50x for large clusters! - isDebugEnabled, debugf := rbacDebugLogger() + ctx := context.Background() + isLoggingEnabled := rbacLogger.Enabled(ctx, logutils.TraceLevel) rcLabels := rc.GetMetadata().Labels @@ -898,8 +911,10 @@ func (a *accessChecker) CheckAccessToRemoteCluster(rc types.RemoteCluster) error } if !usesLabels && len(rcLabels) == 0 { - debugf("Grant access to cluster %v - no role in %v uses cluster labels and the cluster is not labeled.", - rc.GetName(), a.RoleNames()) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Grant access to cluster - no role uses cluster labels and the cluster is not labeled", + slog.String("cluster_name", rc.GetName()), + slog.Any("roles", a.RoleNames()), + ) return nil } @@ -907,21 +922,24 @@ func (a *accessChecker) CheckAccessToRemoteCluster(rc types.RemoteCluster) error // the deny role set prohibits access. var errs []error for _, role := range a.RoleSet { - matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Deny, role, a.info.Traits, rc, isDebugEnabled) + matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Deny, role, a.info.Traits, rc, isLoggingEnabled) if err != nil { return trace.Wrap(err) } if matchLabels { // This condition avoids formatting calls on large scale. - debugf("Access to cluster %v denied, deny rule in %v matched; match(%s)", - rc.GetName(), role.GetName(), labelsMessage) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Access to cluster denied, deny rule matched", + slog.String("cluster", rc.GetName()), + slog.String("role", role.GetName()), + slog.String("label_message", labelsMessage), + ) return trace.AccessDenied("access to cluster denied") } } // Check allow rules: label has to match in any role in the role set to be granted access. for _, role := range a.RoleSet { - matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Allow, role, a.info.Traits, rc, isDebugEnabled) + matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Allow, role, a.info.Traits, rc, isLoggingEnabled) if err != nil { return trace.Wrap(err) } @@ -929,22 +947,32 @@ func (a *accessChecker) CheckAccessToRemoteCluster(rc types.RemoteCluster) error if err != nil { return trace.Wrap(err) } - debugf("Check access to role(%v) rc(%v, labels=%v) matchLabels=%v, msg=%v, err=%v allow=%v rcLabels=%v", - role.GetName(), rc.GetName(), rcLabels, matchLabels, labelsMessage, err, labelMatchers, rcLabels) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Check access to role", + slog.String("role", role.GetName()), + slog.String("cluster", rc.GetName()), + slog.Any("cluster_labels", rcLabels), + slog.Any("match_labels", matchLabels), + slog.String("labels_message", labelsMessage), + slog.Any("error", err), + slog.Any("allow", labelMatchers), + ) if err != nil { return trace.Wrap(err) } if matchLabels { return nil } - if isDebugEnabled { + if isLoggingEnabled { deniedError := trace.AccessDenied("role=%v, match(%s)", role.GetName(), labelsMessage) errs = append(errs, deniedError) } } - debugf("Access to cluster %v denied, no allow rule matched; %v", rc.GetName(), errs) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Access to cluster denied, no allow rule matched", + slog.String("cluster", rc.GetName()), + slog.Any("error", errs), + ) return trace.AccessDenied("access to cluster denied") } @@ -1095,7 +1123,10 @@ func (a *accessChecker) HostUsers(s types.Server) (*HostUsersInfo, error) { fmt.Fprintf(b, "%s=%v ", shell, roles) } - log.Warnf("Host user shell resolution is ambiguous due to conflicting roles. %q will be used, but consider unifying roles around a single shell. Current shell assignments: %s", shell, b) + slog.WarnContext(context.Background(), "Host user shell resolution is ambiguous due to conflicting roles, consider unifying roles around a single shell", + "selected_shell", shell, + "shell_assignments", b, + ) } for _, role := range a.RoleSet { @@ -1247,8 +1278,11 @@ func AccessInfoFromRemoteCertificate(cert *ssh.Certificate, roleMap types.RoleMa if len(roles) == 0 { return nil, trace.AccessDenied("no roles mapped for user with remote roles %v", unmappedRoles) } - log.Debugf("Mapped remote roles %v to local roles %v and traits %v.", - unmappedRoles, roles, traits) + slog.DebugContext(context.Background(), "Mapped remote roles to local roles and traits", + "remote_roles", unmappedRoles, + "local_roles", roles, + "traits", traits, + ) allowedResourceIDs, err := ExtractAllowedResourcesFromCert(cert) if err != nil { @@ -1281,10 +1315,10 @@ func AccessInfoFromLocalIdentity(identity tlsca.Identity, access UserGetter) (*A return nil, trace.Wrap(err) } - log.Warnf("Failed to find roles in x509 identity for %v. Fetching "+ - "from backend. If the identity provider allows username changes, this can "+ - "potentially allow an attacker to change the role of the existing user.", - identity.Username) + const msg = "Failed to find roles in x509 identity. Fetching " + + "from backend. If the identity provider allows username changes, this can " + + "potentially allow an attacker to change the role of the existing user." + slog.WarnContext(context.Background(), msg, "username", identity.Username) roles = u.GetRoles() traits = u.GetTraits() } @@ -1335,8 +1369,12 @@ func AccessInfoFromRemoteIdentity(identity tlsca.Identity, roleMap types.RoleMap if len(roles) == 0 { return nil, trace.AccessDenied("no roles mapped for remote user %q from cluster %q with remote roles %v", identity.Username, identity.TeleportCluster, unmappedRoles) } - log.Debugf("Mapped roles %v of remote user %q to local roles %v and traits %v.", - unmappedRoles, identity.Username, roles, traits) + slog.DebugContext(context.Background(), "Mapped roles of remote user to local roles and traits", + "remote_roles", unmappedRoles, + "user", identity.Username, + "local_roles", roles, + "traits", traits, + ) allowedResourceIDs := identity.AllowedResourceIDs diff --git a/lib/services/access_request.go b/lib/services/access_request.go index 67da35cde7381..2d5c407f05e48 100644 --- a/lib/services/access_request.go +++ b/lib/services/access_request.go @@ -41,6 +41,7 @@ import ( apiutils "github.com/gravitational/teleport/api/utils" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" "github.com/gravitational/teleport/lib/utils/parse" "github.com/gravitational/teleport/lib/utils/typical" ) @@ -2170,10 +2171,9 @@ func (m *RequestValidator) pruneResourceRequestRoles( for _, resourceID := range resourceIDs { if resourceID.ClusterName != localClusterName { - _, debugf := rbacDebugLogger() - debugf("Requested resource %q is in a foreign cluster, unable to prune roles. "+ - `All available "search_as_roles" will be requested.`, - types.ResourceIDToString(resourceID)) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, `Requested resource is in a foreign cluster, unable to prune roles - All available "search_as_roles" will be requested`, + slog.Any("requested_resources", types.ResourceIDToString(resourceID)), + ) return roles, nil } } diff --git a/lib/services/database.go b/lib/services/database.go index a151f2956fc6f..9a3aba1ad9560 100644 --- a/lib/services/database.go +++ b/lib/services/database.go @@ -21,6 +21,7 @@ package services import ( "context" "errors" + "log/slog" "net" "net/netip" "net/url" @@ -28,7 +29,6 @@ import ( "strings" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" @@ -277,7 +277,11 @@ func validateMongoDB(db types.Database) error { // DNS errors here by replacing the scheme and then ParseAndValidate again // to validate as much as we can. if isDNSError(err) { - log.Warnf("MongoDB database %q (connection string %q) failed validation with DNS error: %v.", db.GetName(), db.GetURI(), err) + slog.WarnContext(context.Background(), "MongoDB database %q (connection string %q) failed validation with DNS error", + "database_name", db.GetName(), + "database_uri", db.GetURI(), + "error", err, + ) connString, err = connstring.ParseAndValidate(strings.Replace( db.GetURI(), diff --git a/lib/services/local/access.go b/lib/services/local/access.go index eb467e76ed815..55f53a1715268 100644 --- a/lib/services/local/access.go +++ b/lib/services/local/access.go @@ -20,11 +20,11 @@ package local import ( "context" + "log/slog" "strings" "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" @@ -36,14 +36,14 @@ import ( // AccessService manages roles type AccessService struct { backend.Backend - log *logrus.Entry + logger *slog.Logger } // NewAccessService returns new access service instance func NewAccessService(backend backend.Backend) *AccessService { return &AccessService{ Backend: backend, - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "AccessService"}), + logger: slog.With(teleport.ComponentKey, "AccessService"), } } @@ -124,7 +124,10 @@ func (s *AccessService) ListRoles(ctx context.Context, req *proto.ListRolesReque services.WithRevision(item.Revision), ) if err != nil { - s.log.Warnf("Failed to unmarshal role at %q: %v", item.Key, err) + s.logger.WarnContext(ctx, "Failed to unmarshal role", + "key", item.Key, + "error", err, + ) continue } diff --git a/lib/services/local/access_list.go b/lib/services/local/access_list.go index 611d9de23f094..955b691d503bc 100644 --- a/lib/services/local/access_list.go +++ b/lib/services/local/access_list.go @@ -27,9 +27,7 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" accesslistv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accesslist/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" @@ -75,7 +73,6 @@ const ( // consistent view to the rest of the Teleport application. It makes no decisions // about granting or withholding list membership. type AccessListService struct { - log logrus.FieldLogger clock clockwork.Clock service *generic.Service[*accesslist.AccessList] memberService *generic.Service[*accesslist.AccessListMember] @@ -144,7 +141,6 @@ func NewAccessListService(b backend.Backend, clock clockwork.Clock, opts ...Serv } return &AccessListService{ - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "access-list:local-service"}), clock: clock, service: service, memberService: memberService, diff --git a/lib/services/local/dynamic_access.go b/lib/services/local/dynamic_access.go index e11aa5a51b16a..cdacfbecd566b 100644 --- a/lib/services/local/dynamic_access.go +++ b/lib/services/local/dynamic_access.go @@ -20,11 +20,11 @@ package local import ( "context" + "log/slog" "slices" "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" @@ -38,14 +38,14 @@ import ( // DynamicAccessService manages dynamic RBAC type DynamicAccessService struct { backend.Backend - log *logrus.Entry + logger *slog.Logger } // NewDynamicAccessService returns new dynamic access service instance func NewDynamicAccessService(backend backend.Backend) *DynamicAccessService { return &DynamicAccessService{ Backend: backend, - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "DynamicAccess"}), + logger: slog.With(teleport.ComponentKey, "DynamicAccess"), } } @@ -348,7 +348,10 @@ func (s *DynamicAccessService) ListAccessRequests(ctx context.Context, req *prot accessRequest, err := itemToAccessRequest(item) if err != nil { - s.log.Warnf("Failed to unmarshal access request at %q: %v", item.Key, err) + s.logger.WarnContext(ctx, "Failed to unmarshal access request", + "key", item.Key, + "error", err, + ) continue } diff --git a/lib/services/local/events.go b/lib/services/local/events.go index 9931b80857500..58e56ada292a4 100644 --- a/lib/services/local/events.go +++ b/lib/services/local/events.go @@ -20,11 +20,11 @@ package local import ( "context" + "log/slog" "strings" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" apidefaults "github.com/gravitational/teleport/api/defaults" @@ -48,14 +48,14 @@ import ( // EventsService implements service to watch for events type EventsService struct { - *logrus.Entry + logger *slog.Logger backend backend.Backend } // NewEventsService returns new events service instance func NewEventsService(b backend.Backend) *EventsService { return &EventsService{ - Entry: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "Events"}), + logger: slog.With(teleport.ComponentKey, "Events"), backend: b, } } @@ -287,13 +287,13 @@ func (e *EventsService) NewWatcher(ctx context.Context, watch types.Watch) (type if err != nil { return nil, trace.Wrap(err) } - return newWatcher(w, e.Entry, parsers, validKinds), nil + return newWatcher(w, e.logger, parsers, validKinds), nil } -func newWatcher(backendWatcher backend.Watcher, l *logrus.Entry, parsers []resourceParser, kinds []types.WatchKind) *watcher { +func newWatcher(backendWatcher backend.Watcher, l *slog.Logger, parsers []resourceParser, kinds []types.WatchKind) *watcher { w := &watcher{ backendWatcher: backendWatcher, - Entry: l, + logger: l, parsers: parsers, eventsC: make(chan types.Event), kinds: kinds, @@ -303,7 +303,7 @@ func newWatcher(backendWatcher backend.Watcher, l *logrus.Entry, parsers []resou } type watcher struct { - *logrus.Entry + logger *slog.Logger parsers []resourceParser backendWatcher backend.Watcher eventsC chan types.Event @@ -350,7 +350,7 @@ func (w *watcher) forwardEvents() { // node events as well, and there could be no // handler registered for nodes, only for namespaces if !trace.IsNotFound(err) { - w.Warning(trace.DebugReport(err)) + w.logger.WarnContext(context.Background(), "failed parsing event", "error", err) } } for _, c := range converted { @@ -2944,7 +2944,7 @@ func WaitForEvent(ctx context.Context, watcher types.Watcher, m EventMatcher, cl return res, nil } if !trace.IsCompareFailed(err) { - logrus.WithError(err).Debug("Failed to match event.") + slog.DebugContext(ctx, "Failed to match event", "error", err) } case <-watcher.Done(): // Watcher closed, probably due to a network error. diff --git a/lib/services/local/externalauditstorage.go b/lib/services/local/externalauditstorage.go index 96cd050bfd5be..fdb59e827d03a 100644 --- a/lib/services/local/externalauditstorage.go +++ b/lib/services/local/externalauditstorage.go @@ -22,9 +22,7 @@ import ( "context" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types/externalauditstorage" "github.com/gravitational/teleport/api/types/header" "github.com/gravitational/teleport/lib/backend" @@ -45,14 +43,12 @@ var ( // ExternalAuditStorageService manages External Audit Storage resources in the Backend. type ExternalAuditStorageService struct { backend backend.Backend - logger *logrus.Entry } // NewExternalAuditStorageService returns a new *ExternalAuditStorageService or an error if it fails. func NewExternalAuditStorageService(backend backend.Backend) *ExternalAuditStorageService { return &ExternalAuditStorageService{ backend: backend, - logger: logrus.WithField(teleport.ComponentKey, "ExternalAuditStorage.backend"), } } diff --git a/lib/services/local/externalauditstorage_watcher.go b/lib/services/local/externalauditstorage_watcher.go index 633e8a5c53f3e..f962e2e5cdcce 100644 --- a/lib/services/local/externalauditstorage_watcher.go +++ b/lib/services/local/externalauditstorage_watcher.go @@ -21,11 +21,11 @@ package local import ( "context" "errors" + "log/slog" "sync" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -38,8 +38,8 @@ import ( type ClusterExternalAuditStorageWatcherConfig struct { // Backend is the storage backend used to create watchers. Backend backend.Backend - // Log is a logger. - Log logrus.FieldLogger + // Logger is a logger. + Logger *slog.Logger // Clock is used to control time. Clock clockwork.Clock // OnChange is the action to take when the cluster ExternalAuditStorage @@ -52,8 +52,8 @@ func (cfg *ClusterExternalAuditStorageWatcherConfig) CheckAndSetDefaults() error if cfg.Backend == nil { return trace.BadParameter("missing parameter Backend") } - if cfg.Log == nil { - cfg.Log = logrus.StandardLogger().WithField(teleport.ComponentKey, "ExternalAuditStorage.watcher") + if cfg.Logger == nil { + cfg.Logger = slog.With(teleport.ComponentKey, "ExternalAuditStorage.watcher") } if cfg.Clock == nil { cfg.Clock = cfg.Backend.Clock() @@ -67,7 +67,7 @@ func (cfg *ClusterExternalAuditStorageWatcherConfig) CheckAndSetDefaults() error // ClusterExternalAuditWatcher is a light weight backend watcher for the cluster external audit resource. type ClusterExternalAuditWatcher struct { backend backend.Backend - log logrus.FieldLogger + logger *slog.Logger clock clockwork.Clock onChange func() retry retryutils.Retry @@ -97,7 +97,7 @@ func NewClusterExternalAuditWatcher(ctx context.Context, cfg ClusterExternalAudi w := &ClusterExternalAuditWatcher{ backend: cfg.Backend, - log: cfg.Log, + logger: cfg.Logger, clock: cfg.Clock, onChange: cfg.OnChange, retry: retry, @@ -137,7 +137,10 @@ func (w *ClusterExternalAuditWatcher) runWatchLoop(ctx context.Context) { startedWaiting := w.clock.Now() select { case t := <-w.retry.After(): - w.log.Warningf("Restarting watch on error after waiting %v. Error: %v.", t.Sub(startedWaiting), err) + w.logger.WarnContext(ctx, "Restarting watch on error", + "backoff", t.Sub(startedWaiting), + "error", err, + ) w.retry.Inc() case <-ctx.Done(): return @@ -156,7 +159,7 @@ func (w *ClusterExternalAuditWatcher) watch(ctx context.Context) error { for { select { case <-watcher.Events(): - w.log.Infof("Detected change to cluster ExternalAuditStorage config") + w.logger.InfoContext(ctx, "Detected change to cluster ExternalAuditStorage config") w.onChange() case w.running <- struct{}{}: case <-watcher.Done(): diff --git a/lib/services/local/headlessauthn_watcher.go b/lib/services/local/headlessauthn_watcher.go index d3ded3d60439c..dee036fa363df 100644 --- a/lib/services/local/headlessauthn_watcher.go +++ b/lib/services/local/headlessauthn_watcher.go @@ -21,12 +21,12 @@ package local import ( "context" "errors" + "log/slog" "sync" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" apiutils "github.com/gravitational/teleport/api/utils" @@ -51,8 +51,8 @@ var ErrHeadlessAuthenticationWatcherClosed = errors.New("headless authentication type HeadlessAuthenticationWatcherConfig struct { // Backend is the storage backend used to create watchers. Backend backend.Backend - // Log is a logger. - Log logrus.FieldLogger + // Logger is a logger. + Logger *slog.Logger // Clock is used to control time. Clock clockwork.Clock // MaxRetryPeriod is the maximum retry period on failed watchers. @@ -64,9 +64,8 @@ func (cfg *HeadlessAuthenticationWatcherConfig) CheckAndSetDefaults() error { if cfg.Backend == nil { return trace.BadParameter("missing parameter Backend") } - if cfg.Log == nil { - cfg.Log = logrus.StandardLogger() - cfg.Log.WithField("resource-kind", types.KindHeadlessAuthentication) + if cfg.Logger == nil { + cfg.Logger = slog.With("resource_kind", types.KindHeadlessAuthentication) } if cfg.MaxRetryPeriod == 0 { // On watcher failure, we eagerly retry in order to avoid login delays. @@ -159,12 +158,15 @@ func (h *HeadlessAuthenticationWatcher) runWatchLoop(ctx context.Context) { startedWaiting := h.Clock.Now() select { case t := <-h.retry.After(): - h.Log.Warningf("Restarting watch on error after waiting %v. Error: %v.", t.Sub(startedWaiting), err) + h.Logger.WarnContext(ctx, "Restarting watch on error", + "backoff", t.Sub(startedWaiting), + "error", err, + ) h.retry.Inc() case <-ctx.Done(): return case <-h.closed: - h.Log.Debug("Watcher closed. Returning from watch loop.") + h.Logger.DebugContext(ctx, "Watcher closed, terminating watch loop") return } } @@ -191,7 +193,7 @@ func (h *HeadlessAuthenticationWatcher) watch(ctx context.Context) error { case types.OpPut: headlessAuthn, err := unmarshalHeadlessAuthenticationFromItem(&event.Item) if err != nil { - h.Log.WithError(err).Debug("failed to unmarshal headless authentication from put event") + h.Logger.DebugContext(ctx, "failed to unmarshal headless authentication from put event", "error", err) } else { h.notify(headlessAuthn) } diff --git a/lib/services/local/inventory.go b/lib/services/local/inventory.go index 2b488e49e58aa..0eba8cadd618b 100644 --- a/lib/services/local/inventory.go +++ b/lib/services/local/inventory.go @@ -54,11 +54,17 @@ func (s *PresenceService) GetInstances(ctx context.Context, req types.InstanceFi return stream.FilterMap(items, func(item backend.Item) (types.Instance, bool) { instance, err := generic.FastUnmarshal[*types.InstanceV1](item) if err != nil { - s.log.Warnf("Skipping instance at %s, failed to unmarshal: %v", item.Key, err) + s.logger.WarnContext(ctx, "Skipping instance failed to unmarshal", + "key", item.Key, + "error", err, + ) return nil, false } if err := instance.CheckAndSetDefaults(); err != nil { - s.log.Warnf("Skipping instance at %s: %v", item.Key, err) + s.logger.WarnContext(ctx, "Skipping invalid instance", + "key", item.Key, + "error", err, + ) return nil, false } if !req.Match(instance) { diff --git a/lib/services/local/okta.go b/lib/services/local/okta.go index b1ab4e8f3b65d..dfbca6b4d29ee 100644 --- a/lib/services/local/okta.go +++ b/lib/services/local/okta.go @@ -24,9 +24,7 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/services" @@ -43,7 +41,6 @@ const ( // OktaService manages Okta resources in the Backend. type OktaService struct { - log logrus.FieldLogger clock clockwork.Clock importRuleSvc *generic.Service[types.OktaImportRule] assignmentSvc *generic.Service[types.OktaAssignment] @@ -76,7 +73,6 @@ func NewOktaService(b backend.Backend, clock clockwork.Clock) (*OktaService, err } return &OktaService{ - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "okta:local-service"}), clock: clock, importRuleSvc: importRuleSvc, assignmentSvc: assignmentSvc, diff --git a/lib/services/local/presence.go b/lib/services/local/presence.go index ff95884151b34..520eae0f33c87 100644 --- a/lib/services/local/presence.go +++ b/lib/services/local/presence.go @@ -26,7 +26,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" @@ -45,7 +44,7 @@ import ( // PresenceService records and reports the presence of all components // of the cluster - Nodes, Proxies and SSH nodes type PresenceService struct { - log *logrus.Entry + logger *slog.Logger jitter retryutils.Jitter backend.Backend } @@ -57,7 +56,7 @@ type backendItemToResourceFunc func(item backend.Item) (types.ResourceWithLabels // NewPresenceService returns new presence service instance func NewPresenceService(b backend.Backend) *PresenceService { return &PresenceService{ - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "Presence"}), + logger: slog.With(teleport.ComponentKey, "Presence"), jitter: retryutils.FullJitter, Backend: b, } @@ -160,7 +159,10 @@ func (s *PresenceService) GetServerInfos(ctx context.Context) stream.Stream[type services.WithRevision(item.Revision), ) if err != nil { - s.log.Warnf("Skipping server info at %s, failed to unmarshal: %v", item.Key, err) + s.logger.WarnContext(ctx, "Failed to unmarshal server info", + "key", item.Key, + "error", err, + ) return nil, false } return si, true diff --git a/lib/services/local/saml_idp_service_provider.go b/lib/services/local/saml_idp_service_provider.go index 6b08cf084afd9..eadeb347367ab 100644 --- a/lib/services/local/saml_idp_service_provider.go +++ b/lib/services/local/saml_idp_service_provider.go @@ -22,6 +22,7 @@ import ( "context" "encoding/xml" "fmt" + "log/slog" "net/http" "net/url" "time" @@ -29,7 +30,6 @@ import ( "github.com/crewjam/saml" "github.com/crewjam/saml/samlsp" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -55,7 +55,7 @@ type SAMLIdPServiceProviderService struct { // backend is used to spawn Plugins storage service so that // it can be queried from the SAML service. backend backend.Backend - log logrus.FieldLogger + logger *slog.Logger httpClient *http.Client } @@ -86,7 +86,7 @@ func NewSAMLIdPServiceProviderService(b backend.Backend, opts ...SAMLIdPOption) samlSPService := &SAMLIdPServiceProviderService{ svc: *svc, backend: b, - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "saml-idp"}), + logger: slog.With(teleport.ComponentKey, "saml-idp"), } for _, opt := range opts { @@ -120,13 +120,17 @@ func (s *SAMLIdPServiceProviderService) CreateSAMLIdPServiceProvider(ctx context if err := services.ValidateSAMLIdPACSURLAndRelayStateInputs(sp); err != nil { // logging instead of returning an error cause we do not want to break cache writes on a cluster // that already has a service provider with unsupported characters/scheme in the acs_url or relay_state. - s.log.Warn(err) + s.logger.WarnContext(ctx, "Provided SAML IdP service provided is invalid", "error", err) } if sp.GetEntityDescriptor() == "" { if err := s.configureEntityDescriptorPerPreset(sp); err != nil { errMsg := fmt.Errorf("failed to configure entity descriptor with the given entity_id %q and acs_url %q: %w", sp.GetEntityID(), sp.GetACSURL(), err) - s.log.Errorf(errMsg.Error()) + s.logger.ErrorContext(ctx, "failed to configure entity descriptor", + "entity_id", sp.GetEntityID(), + "acs_url", sp.GetACSURL(), + "error", err, + ) return trace.BadParameter(errMsg.Error()) } } @@ -166,7 +170,7 @@ func (s *SAMLIdPServiceProviderService) UpdateSAMLIdPServiceProvider(ctx context if err := services.ValidateSAMLIdPACSURLAndRelayStateInputs(sp); err != nil { // logging instead of returning an error cause we do not want to break cache writes on a cluster // that already has a service provider with unsupported characters/scheme in the acs_url or relay_state. - s.log.Warn(err) + s.logger.WarnContext(ctx, "Provided SAML IdP service provided is invalid", "error", err) } // we only verify if the entity ID field in the spec matches with the entity descriptor. @@ -250,7 +254,10 @@ func (s *SAMLIdPServiceProviderService) configureEntityDescriptorPerPreset(sp ty // fetchAndSetEntityDescriptor is expected to return error if it fails // to fetch a valid entity descriptor. if err := s.fetchAndSetEntityDescriptor(sp); err != nil { - s.log.Debugf("Failed to fetch entity descriptor from %q: %v.", sp.GetEntityID(), err) + s.logger.DebugContext(context.Background(), "Failed to fetch entity descriptor", + "entity_id", sp.GetEntityID(), + "error", err, + ) // We aren't interested in checking error type as any occurrence of error // mean entity descriptor was not set. return trace.Wrap(s.generateAndSetEntityDescriptor(sp)) @@ -295,7 +302,10 @@ func (s *SAMLIdPServiceProviderService) fetchAndSetEntityDescriptor(sp types.SAM // generateAndSetEntityDescriptor generates and sets Service Provider entity descriptor // with ACS URL, Entity ID and unspecified NameID format. func (s *SAMLIdPServiceProviderService) generateAndSetEntityDescriptor(sp types.SAMLIdPServiceProvider) error { - s.log.Infof("Generating a default entity_descriptor with entity_id %q and acs_url %q.", sp.GetEntityID(), sp.GetACSURL()) + s.logger.InfoContext(context.Background(), "Generating a default entity_descriptor", + "entity_id", sp.GetEntityID(), + "acs_url", sp.GetACSURL(), + ) acsURL, err := url.Parse(sp.GetACSURL()) if err != nil { @@ -335,7 +345,9 @@ func (s *SAMLIdPServiceProviderService) embedAttributeMapping(sp types.SAMLIdPSe switch attrMapLen := len(sp.GetAttributeMapping()); { case attrMapLen == 0: if teleportSPSSODescriptorIndex == 0 { - s.log.Debugf("No custom attribute mapping values provided for %s. SAML assertion will default to uid and eduPersonAffiliate", sp.GetEntityID()) + s.logger.DebugContext(context.Background(), "No custom attribute mapping values provided,SAML assertion will default to uid and eduPersonAffiliate", + "entity_id", sp.GetEntityID(), + ) return nil } else { // delete Teleport SPSSODescriptor diff --git a/lib/services/local/secreports.go b/lib/services/local/secreports.go index c08ac60fe3a36..36307ce17d528 100644 --- a/lib/services/local/secreports.go +++ b/lib/services/local/secreports.go @@ -23,9 +23,7 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/secreports" "github.com/gravitational/teleport/lib/backend" @@ -46,7 +44,6 @@ var ( // SecReportsService is the local implementation of the SecReports service. type SecReportsService struct { - log logrus.FieldLogger clock clockwork.Clock auditQuerySvc *generic.Service[*secreports.AuditQuery] securityReportSvc *generic.Service[*secreports.Report] @@ -99,7 +96,6 @@ func NewSecReportsService(backend backend.Backend, clock clockwork.Clock) (*SecR } return &SecReportsService{ - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "secreports:local-service"}), clock: clock, auditQuerySvc: auditQuerySvc, securityReportSvc: securityReportSvc, diff --git a/lib/services/local/session.go b/lib/services/local/session.go index c2fde121ddce7..ce032ca6fc3b2 100644 --- a/lib/services/local/session.go +++ b/lib/services/local/session.go @@ -23,7 +23,6 @@ import ( "slices" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/types" @@ -315,7 +314,7 @@ func (s *IdentityService) DeleteAllSAMLIdPSessions(ctx context.Context) error { // WebSessions returns the web sessions manager. func (s *IdentityService) WebSessions() types.WebSessionInterface { - return &webSessions{backend: s.Backend, log: s.log} + return &webSessions{backend: s.Backend} } // Get returns the web session state described with req. @@ -423,12 +422,11 @@ func (r *webSessions) listLegacySessions(ctx context.Context) ([]types.WebSessio type webSessions struct { backend backend.Backend - log logrus.FieldLogger } // WebTokens returns the web token manager. func (s *IdentityService) WebTokens() types.WebTokenInterface { - return &webTokens{backend: s.Backend, log: s.log} + return &webTokens{backend: s.Backend} } // Get returns the web token described with req. @@ -504,7 +502,6 @@ func (r *webTokens) DeleteAll(ctx context.Context) error { type webTokens struct { backend backend.Backend - log logrus.FieldLogger } func webSessionKey(sessionID string) backend.Key { diff --git a/lib/services/local/sessiontracker.go b/lib/services/local/sessiontracker.go index ad6fc5e9d06f1..95ec34895d78e 100644 --- a/lib/services/local/sessiontracker.go +++ b/lib/services/local/sessiontracker.go @@ -20,10 +20,10 @@ package local import ( "context" + "log/slog" "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/types" @@ -160,7 +160,7 @@ func (s *sessionTracker) getActiveSessionTrackers(ctx context.Context, filter *t for _, item := range noExpiry { if err := s.bk.Delete(ctx, item.Key); err != nil { if !trace.IsNotFound(err) { - logrus.WithError(err).Error("Failed to remove stale session tracker") + slog.ErrorContext(ctx, "Failed to remove stale session tracker", "error", err) } } } diff --git a/lib/services/local/status.go b/lib/services/local/status.go index 9af78ee0d4f2b..b6046e9bde34a 100644 --- a/lib/services/local/status.go +++ b/lib/services/local/status.go @@ -20,10 +20,10 @@ package local import ( "context" + "log/slog" "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" @@ -35,13 +35,13 @@ import ( // StatusService manages cluster status info. type StatusService struct { backend.Backend - log logrus.FieldLogger + logger *slog.Logger } func NewStatusService(bk backend.Backend) *StatusService { return &StatusService{ Backend: bk, - log: logrus.WithField(teleport.ComponentKey, "status"), + logger: slog.With(teleport.ComponentKey, "status"), } } @@ -68,7 +68,7 @@ func (s *StatusService) GetClusterAlerts(ctx context.Context, query types.GetClu filtered := alerts[:0] for _, alert := range alerts { if err := alert.CheckAndSetDefaults(); err != nil { - s.log.Warnf("Skipping invalid cluster alert: %v", err) + s.logger.WarnContext(ctx, "Skipping invalid cluster alert", "error", err) } if !query.Match(alert) { diff --git a/lib/services/local/user_login_state.go b/lib/services/local/user_login_state.go index 76e12b6b4c95d..a73c77b238dc4 100644 --- a/lib/services/local/user_login_state.go +++ b/lib/services/local/user_login_state.go @@ -22,9 +22,7 @@ import ( "context" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/userloginstate" "github.com/gravitational/teleport/lib/backend" @@ -38,7 +36,6 @@ const ( // UserLoginStateService manages user login state resources in the Backend. type UserLoginStateService struct { - log logrus.FieldLogger svc *generic.Service[*userloginstate.UserLoginState] } @@ -56,7 +53,6 @@ func NewUserLoginStateService(b backend.Backend) (*UserLoginStateService, error) } return &UserLoginStateService{ - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "user-login-state:local-service"}), svc: svc, }, nil } diff --git a/lib/services/local/users.go b/lib/services/local/users.go index 760634dfa65b3..1c4790bcda886 100644 --- a/lib/services/local/users.go +++ b/lib/services/local/users.go @@ -26,6 +26,7 @@ import ( "encoding/base64" "encoding/json" "io" + "log/slog" "sort" "strings" "sync" @@ -37,7 +38,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "golang.org/x/crypto/bcrypt" "golang.org/x/sync/errgroup" @@ -65,43 +65,30 @@ var GlobalSessionDataMaxEntries = 5000 // arbitrary // user accounts as well type IdentityService struct { backend.Backend - log logrus.FieldLogger + logger *slog.Logger bcryptCost int notificationsSvc *NotificationsService } -// TODO(rudream): Rename to NewIdentityService. +// TODO(tross): DELETE ONCE e is updated to use NewIdentityService // NewIdentityServiceV2 returns a new instance of IdentityService object func NewIdentityServiceV2(backend backend.Backend) (*IdentityService, error) { - notificationsSvc, err := NewNotificationsService(backend, backend.Clock()) - if err != nil { - return nil, trace.Wrap(err) - } - - return &IdentityService{ - Backend: backend, - log: logrus.WithField(teleport.ComponentKey, "identity"), - bcryptCost: bcrypt.DefaultCost, - notificationsSvc: notificationsSvc, - }, nil + return NewIdentityService(backend) } -// TODO(rudream): Remove once NewIdentityServiceV2 is merged. // NewIdentityService returns a new instance of IdentityService object -func NewIdentityService(backend backend.Backend) *IdentityService { +func NewIdentityService(backend backend.Backend) (*IdentityService, error) { notificationsSvc, err := NewNotificationsService(backend, backend.Clock()) - - log := logrus.WithField(teleport.ComponentKey, "identity") if err != nil { - log.Warnf("error initializing notifications service with identity service: %v", err) + return nil, trace.Wrap(err) } return &IdentityService{ Backend: backend, - log: log, + logger: slog.With(teleport.ComponentKey, "identity"), bcryptCost: bcrypt.DefaultCost, notificationsSvc: notificationsSvc, - } + }, nil } // NewTestIdentityService returns a new instance of IdentityService object to be @@ -201,7 +188,10 @@ func (s *IdentityService) streamUsersWithSecrets(itemStream stream.Stream[backen collectorStream := stream.FilterMap(itemStream, func(item backend.Item) (collector, bool) { name, suffix, err := splitUsernameAndSuffix(item.Key) if err != nil { - s.log.Warnf("Failed to extract name/suffix for user item at %q: %v", item.Key, err) + s.logger.WarnContext(context.Background(), "Failed to extract name/suffix for user item", + "key", item.Key, + "error", err, + ) return collector{}, false } @@ -242,7 +232,10 @@ func (s *IdentityService) streamUsersWithSecrets(itemStream stream.Stream[backen userStream := stream.FilterMap(collectorStream, func(c collector) (*types.UserV2, bool) { user, err := userFromUserItems(c.name, c.items) if err != nil { - s.log.Warnf("Failed to build user %q from user item aggregator: %v", c.name, err) + s.logger.WarnContext(context.Background(), "Failed to build user from user item aggregator", + "user", c.name, + "error", err, + ) return nil, false } @@ -263,7 +256,10 @@ func (s *IdentityService) streamUsersWithoutSecrets(itemStream stream.Stream[bac user, err := services.UnmarshalUser(item.Value, services.WithRevision(item.Revision)) if err != nil { - s.log.Warnf("Failed to unmarshal user at %q: %v", item.Key, err) + s.logger.WarnContext(context.Background(), "Failed to unmarshal user", + "key", item.Key, + "error", err, + ) return nil, false } @@ -874,6 +870,7 @@ func (s *IdentityService) DeleteUserLoginAttempts(user string) error { // `PasswordState` status flag accordingly. Returns an error if the user doesn't // exist. func (s *IdentityService) UpsertPassword(user string, password []byte) error { + ctx := context.TODO() if user == "" { return trace.BadParameter("missing username") } @@ -891,7 +888,7 @@ func (s *IdentityService) UpsertPassword(user string, password []byte) error { } _, err = s.UpdateAndSwapUser( - context.TODO(), + ctx, user, false, /*withSecrets*/ func(u types.User) (bool, error) { @@ -900,10 +897,10 @@ func (s *IdentityService) UpsertPassword(user string, password []byte) error { }) if err != nil { // Don't let the password state flag change fail the entire operation. - s.log. - WithError(err). - WithField("user", user). - Warn("Failed to set password state") + s.logger.WarnContext(ctx, "Failed to set password state", + "user", user, + "error", err, + ) } return nil @@ -924,7 +921,7 @@ func (s *IdentityService) DeletePassword(ctx context.Context, user string) error } if _, err := s.UpdateAndSwapUser( - context.TODO(), + ctx, user, false, /*withSecrets*/ func(u types.User) (bool, error) { @@ -933,10 +930,10 @@ func (s *IdentityService) DeletePassword(ctx context.Context, user string) error }, ); err != nil { // Don't let the password state flag change fail the entire operation. - s.log. - WithError(err). - WithField("user", user). - Warn("Failed to set password state") + s.logger.WarnContext(ctx, "Failed to set password state", + "user", user, + "error", err, + ) } // Now is the time to return the delete operation, if any. @@ -987,7 +984,7 @@ func (s *IdentityService) UpsertWebauthnLocalAuth(ctx context.Context, user stri // lib/auth/webauthn is prepared to deal with eventual inconsistencies // between "web/users/.../webauthnlocalauth" and "webauthn/users/" keys. if err := s.Delete(ctx, wlaKey); err != nil { - s.log.WithError(err).Warn("Failed to undo WebauthnLocalAuth update") + s.logger.WarnContext(ctx, "Failed to undo WebauthnLocalAuth update", "error", err) } return trace.Wrap(err, "writing webauthn user") } @@ -1208,7 +1205,7 @@ func (s *IdentityService) UpsertMFADevice(ctx context.Context, user string, d *t return trace.Wrap(err) } if err := s.upsertUserStatusMFADevice(ctx, user); err != nil { - s.log.WithError(err).Warn("Unable to update user status after adding MFA device") + s.logger.WarnContext(ctx, "Unable to update user status after adding MFA device", "error", err) } return nil } @@ -1308,7 +1305,7 @@ func (s *IdentityService) buildAndSetWeakestMFADeviceKind(ctx context.Context, u } state, err := s.buildWeakestMFADeviceKind(ctx, user.GetName(), upsertingMFA...) if err != nil { - s.log.WithError(err).Warn("Failed to determine weakest mfa device kind for user") + s.logger.WarnContext(ctx, "Failed to determine weakest mfa device kind for user", "error", err) return } user.SetWeakestDevice(state) @@ -1370,7 +1367,7 @@ func (s *IdentityService) DeleteMFADevice(ctx context.Context, user, id string) return trace.Wrap(err) } if err := s.upsertUserStatusMFADevice(ctx, user); err != nil { - s.log.WithError(err).Warn("Unable to update user status after deleting MFA device") + s.logger.WarnContext(ctx, "Unable to update user status after deleting MFA device", "error", err) } return nil } @@ -1607,10 +1604,10 @@ func (s *IdentityService) GetOIDCConnectors(ctx context.Context, withSecrets boo for _, item := range result.Items { conn, err := services.UnmarshalOIDCConnector(item.Value, services.WithExpires(item.Expires), services.WithRevision(item.Revision)) if err != nil { - logrus. - WithError(err). - WithField("key", item.Key). - Errorf("Error unmarshaling OIDC Connector") + s.logger.ErrorContext(ctx, "Error unmarshaling OIDC Connector", + "key", item.Key, + "error", err, + ) continue } if !withSecrets { @@ -1775,10 +1772,10 @@ func (s *IdentityService) GetSAMLConnectors(ctx context.Context, withSecrets boo for _, item := range result.Items { conn, err := services.UnmarshalSAMLConnector(item.Value, services.WithExpires(item.Expires), services.WithRevision(item.Revision)) if err != nil { - logrus. - WithError(err). - WithField("key", item.Key). - Errorf("Error unmarshaling SAML Connector") + s.logger.ErrorContext(ctx, "Error unmarshaling SAML Connector", + "key", item.Key, + "error", err, + ) continue } if !withSecrets { @@ -2016,10 +2013,10 @@ func (s *IdentityService) GetGithubConnectors(ctx context.Context, withSecrets b for _, item := range result.Items { connector, err := services.UnmarshalGithubConnector(item.Value, services.WithRevision(item.Revision)) if err != nil { - logrus. - WithError(err). - WithField("key", item.Key). - Errorf("Error unmarshaling GitHub Connector") + s.logger.ErrorContext(ctx, "Error unmarshaling GitHub Connector", + "key", item.Key, + "error", err, + ) continue } if !withSecrets { diff --git a/lib/services/matchers.go b/lib/services/matchers.go index e625d4ca59fab..84e8ba87bd793 100644 --- a/lib/services/matchers.go +++ b/lib/services/matchers.go @@ -19,10 +19,11 @@ package services import ( + "context" + "log/slog" "slices" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" apiutils "github.com/gravitational/teleport/api/utils" @@ -115,8 +116,11 @@ func MatchResourceLabels(matchers []ResourceMatcher, labels map[string]string) b } match, _, err := MatchLabels(matcher.Labels, labels) if err != nil { - logrus.WithError(err).Errorf("Failed to match labels %v: %v.", - matcher.Labels, labels) + slog.ErrorContext(context.Background(), "Failed to match labels", + "error", err, + "matcher_labels", matcher.Labels, + "resource_labels", labels, + ) return false } if match { diff --git a/lib/services/parser.go b/lib/services/parser.go index 8d70f0f53b8fe..1ea6b6d7cdd60 100644 --- a/lib/services/parser.go +++ b/lib/services/parser.go @@ -19,14 +19,14 @@ package services import ( + "context" "fmt" - "io" + "log/slog" "slices" "strings" "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/vulcand/predicate" "github.com/vulcand/predicate/builder" @@ -35,6 +35,7 @@ import ( "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/api/types/wrappers" "github.com/gravitational/teleport/lib/session" + logutils "github.com/gravitational/teleport/lib/utils/log" "github.com/gravitational/teleport/lib/utils/typical" ) @@ -231,34 +232,41 @@ func NewActionsParser(ctx RuleContext) (predicate.Parser, error) { // NewLogActionFn creates logger functions func NewLogActionFn(ctx RuleContext) interface{} { l := &LogAction{ctx: ctx} - writer, ok := ctx.(io.Writer) - if ok && writer != nil { - l.writer = writer - } + return l.Log } // LogAction represents action that will emit log entry // when specified in the actions of a matched rule type LogAction struct { - ctx RuleContext - writer io.Writer + ctx RuleContext } -// Log logs with specified level and formatting string with arguments -func (l *LogAction) Log(level, format string, args ...interface{}) predicate.BoolPredicate { +// Log logs with specified level and message string and attributes +func (l *LogAction) Log(level, msg string, args ...any) predicate.BoolPredicate { return func() bool { - ilevel, err := log.ParseLevel(level) - if err != nil { - ilevel = log.DebugLevel + slevel := slog.LevelDebug + switch strings.ToLower(level) { + case "error": + slevel = slog.LevelError + case "warn", "warning": + slevel = slog.LevelWarn + case "info": + slevel = slog.LevelInfo + case "debug": + slevel = slog.LevelDebug + case "trace": + slevel = logutils.TraceLevel } - var writer io.Writer - if l.writer != nil { - writer = l.writer - } else { - writer = log.StandardLogger().WriterLevel(ilevel) + + ctx := context.Background() + // Expicitly check whether logging is enabled for the level + // to avoid formatting the message if the log won't be sampled. + if !slog.Default().Handler().Enabled(ctx, slevel) { + //nolint:sloglint // msg cannot be constant + slog.Log(context.Background(), slevel, fmt.Sprintf(msg, args...)) } - writer.Write([]byte(fmt.Sprintf(format, args...))) + return true } } diff --git a/lib/services/presets.go b/lib/services/presets.go index ec3f8ad529c9d..9f59ff004073e 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -19,10 +19,11 @@ package services import ( + "context" + "log/slog" "slices" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" @@ -801,7 +802,7 @@ func defaultAllowAccountAssignments(enterprise bool) map[string][]types.Identity // AddRoleDefaults adds default role attributes to a preset role. // Only attributes whose resources are not already defined (either allowing or denying) are added. -func AddRoleDefaults(role types.Role) (types.Role, error) { +func AddRoleDefaults(ctx context.Context, role types.Role) (types.Role, error) { changed := false oldLabels := role.GetAllLabels() @@ -855,7 +856,10 @@ func AddRoleDefaults(role types.Role) (types.Role, error) { continue } - log.Debugf("Adding default allow rule %v for role %q", defaultRule, role.GetName()) + slog.DebugContext(ctx, "Adding default allow rule to role", + "rule", defaultRule, + "role", role.GetName(), + ) rules := role.GetRules(types.Allow) rules = append(rules, defaultRule) role.SetRules(types.Allow, rules) diff --git a/lib/services/presets_test.go b/lib/services/presets_test.go index 3e4f12c5d4084..8e0490c82f714 100644 --- a/lib/services/presets_test.go +++ b/lib/services/presets_test.go @@ -19,6 +19,7 @@ package services import ( + "context" "testing" "github.com/google/go-cmp/cmp" @@ -639,7 +640,7 @@ func TestAddRoleDefaults(t *testing.T) { }) } - role, err := AddRoleDefaults(test.role) + role, err := AddRoleDefaults(context.Background(), test.role) test.expectedErr(t, err) require.Empty(t, cmp.Diff(role, test.expected)) diff --git a/lib/services/role.go b/lib/services/role.go index 37418d27c41a0..b9e3e04d83816 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -35,7 +35,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" jsoniter "github.com/json-iterator/go" - log "github.com/sirupsen/logrus" "github.com/vulcand/predicate" "golang.org/x/crypto/ssh" @@ -51,6 +50,7 @@ import ( "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" awsutils "github.com/gravitational/teleport/lib/utils/aws" + logutils "github.com/gravitational/teleport/lib/utils/log" "github.com/gravitational/teleport/lib/utils/parse" ) @@ -1598,7 +1598,7 @@ func (set RoleSet) CheckGCPServiceAccounts(ttl time.Duration, overrideTTL bool) // //nolint:revive // Because we want this to be IdP. func (set RoleSet) CheckAccessToSAMLIdP(authPref readonly.AuthPreference, state AccessState) error { - _, debugf := rbacDebugLogger() + ctx := context.Background() if authPref != nil { if !authPref.IsSAMLIdPEnabled() { @@ -1607,7 +1607,7 @@ func (set RoleSet) CheckAccessToSAMLIdP(authPref readonly.AuthPreference, state } if state.MFARequired == MFARequiredAlways && !state.MFAVerified { - debugf("Access to SAML IdP denied, cluster requires per-session MFA") + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Access to SAML IdP denied, cluster requires per-session MFA") return trace.Wrap(ErrSessionMFARequired) } @@ -1627,7 +1627,10 @@ func (set RoleSet) CheckAccessToSAMLIdP(authPref readonly.AuthPreference, state } if !mfaAllowed && options.RequireMFAType.IsSessionMFARequired() { - debugf("Access to SAML IdP denied, role %q requires per-session MFA", role.GetName()) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Access to SAML IdP denied, role requires per-session MFA", + slog.String("role", role.GetName()), + ) + return trace.Wrap(ErrSessionMFARequired) } } @@ -2539,19 +2542,7 @@ type AccessCheckable interface { GetAllLabels() map[string]string } -// rbacDebugLogger creates a debug logger for Teleport's RBAC component. -// It also returns a flag indicating whether debug logging is enabled, -// allowing the RBAC system to generate more verbose errors in debug mode. -func rbacDebugLogger() (debugEnabled bool, debugf func(format string, args ...interface{})) { - debugEnabled = log.IsLevelEnabled(log.TraceLevel) - debugf = func(format string, args ...interface{}) {} - - if debugEnabled { - debugf = log.WithField(teleport.ComponentKey, teleport.ComponentRBAC).Tracef - } - - return -} +var rbacLogger = logutils.NewPackageLogger(teleport.ComponentKey, teleport.ComponentRBAC) // resourceRequiresLabelMatching decides if a resource requires lapel matching // when making RBAC access decisions. @@ -2567,13 +2558,18 @@ func resourceRequiresLabelMatching(r AccessCheckable) bool { } func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state AccessState, matchers ...RoleMatcher) error { - // Note: logging in this function only happens in debug mode. This is because + // Note: logging in this function only happens in trace mode. This is because // adding logging to this function (which is called on every resource returned // by the backend) can slow down this function by 50x for large clusters! - isDebugEnabled, debugf := rbacDebugLogger() + ctx := context.Background() + logger := rbacLogger + isLoggingEnabled := logger.Handler().Enabled(ctx, logutils.TraceLevel) + if isLoggingEnabled { + logger.With("resource_kind", r.GetKind(), "resource_name", r.GetName()) + } if !state.MFAVerified && state.MFARequired == MFARequiredAlways { - debugf("Access to %v %q denied, cluster requires per-session MFA", r.GetKind(), r.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, cluster requires per-session MFA") return ErrSessionMFARequired } @@ -2601,18 +2597,21 @@ func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state continue } if requiresLabelMatching { - matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Deny, role, traits, r, isDebugEnabled) + matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Deny, role, traits, r, isLoggingEnabled) if err != nil { return trace.Wrap(err) } if matchLabels { - debugf("Access to %v %q denied, deny rule in role %q matched; match(namespace=%v, %s)", - r.GetKind(), r.GetName(), role.GetName(), namespaceMessage, labelsMessage) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, deny rule in role matched", + slog.String("role", role.GetName()), + slog.String("namespace_message", namespaceMessage), + slog.String("label_message", labelsMessage), + ) return trace.AccessDenied("access to %v denied. User does not have permissions. %v", r.GetKind(), additionalDeniedMessage) } } else { - debugf("Role label matching skipped for %v %q", r.GetKind(), r.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Role label matching skipped") } // Deny rules are greedy on purpose. They will always match if // at least one of the matchers returns true. @@ -2621,8 +2620,10 @@ func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state return trace.Wrap(err) } if matchMatchers { - debugf("Access to %v %q denied, deny rule in role %q matched; match(matcher=%v)", - r.GetKind(), r.GetName(), role.GetName(), matchersMessage) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, deny rule in role matched", + slog.String("role", role.GetName()), + slog.Any("matcher_message", matchersMessage), + ) return trace.AccessDenied("access to %v denied. User does not have permissions. %v", r.GetKind(), additionalDeniedMessage) } @@ -2640,7 +2641,7 @@ func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state for _, role := range set { matchNamespace, namespaceMessage := MatchNamespace(role.GetNamespaces(types.Allow), namespace) if !matchNamespace { - if isDebugEnabled { + if isLoggingEnabled { errs = append(errs, trace.AccessDenied("role=%v, match(namespace=%v)", role.GetName(), namespaceMessage)) } @@ -2648,20 +2649,20 @@ func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state } if requiresLabelMatching { - matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Allow, role, traits, r, isDebugEnabled) + matchLabels, labelsMessage, err := checkRoleLabelsMatch(types.Allow, role, traits, r, isLoggingEnabled) if err != nil { return trace.Wrap(err) } if !matchLabels { - if isDebugEnabled { + if isLoggingEnabled { errs = append(errs, trace.AccessDenied("role=%v, match(%s)", role.GetName(), labelsMessage)) } continue } } else { - debugf("Role label matching skipped for %v %q", r.GetKind(), r.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Role label matching skipped for resource") } // Allow rules are not greedy. They will match only if all of the @@ -2671,7 +2672,7 @@ func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state return trace.Wrap(err) } if !matchMatchers { - if isDebugEnabled { + if isLoggingEnabled { errs = append(errs, fmt.Errorf("role=%v, match(matchers=%v)", role.GetName(), matchers)) } @@ -2689,37 +2690,43 @@ func (set RoleSet) checkAccess(r AccessCheckable, traits wrappers.Traits, state // ensure the access is permitted. if mfaAllowed && deviceAllowed { - debugf("Access to %v %q granted, allow rule in role %q matched.", - r.GetKind(), r.GetName(), role.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource granted, allow rule in role matched", + slog.String("role", role.GetName()), + ) return nil } // MFA verification. if !mfaAllowed && role.GetOptions().RequireMFAType.IsSessionMFARequired() { - debugf("Access to %v %q denied, role %q requires per-session MFA", - r.GetKind(), r.GetName(), role.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, role requires per-session MFA", + slog.String("role", role.GetName()), + ) return ErrSessionMFARequired } // Device verification. if !deviceAllowed && role.GetOptions().DeviceTrustMode == constants.DeviceTrustModeRequired { - debugf("Access to %v %q denied, role %q requires a trusted device", - r.GetKind(), r.GetName(), role.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, role requires a trusted device", + slog.String("role", role.GetName()), + ) return ErrTrustedDeviceRequired } // Current role allows access, but keep looking for a more restrictive // setting. allowed = true - debugf("Access to %v %q granted, allow rule in role %q matched.", - r.GetKind(), r.GetName(), role.GetName()) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource granted, allow rule in role matched", + slog.String("role", role.GetName()), + ) } if allowed { return nil } - debugf("Access to %v %q denied, no allow rule matched; %v", r.GetKind(), r.GetName(), errs) + logger.LogAttrs(ctx, logutils.TraceLevel, "Access to resource denied, no allow rule matched", + slog.Any("errors", errs), + ) return trace.AccessDenied("access to %v denied. User does not have permissions. %v", r.GetKind(), additionalDeniedMessage) } @@ -3224,6 +3231,8 @@ func (a *accessExplicitlyDenied) Unwrap() error { } func (set RoleSet) checkAccessToRuleImpl(p checkAccessParams) (err error) { + ctx := context.Background() + // Every unknown error, which could be due to a bad role or an expression // that can't parse, should be considered an explicit denial. explicitDeny := true @@ -3247,10 +3256,13 @@ func (set RoleSet) checkAccessToRuleImpl(p checkAccessParams) (err error) { return trace.Wrap(err) } if matched { - log.WithFields(log.Fields{ - teleport.ComponentKey: teleport.ComponentRBAC, - }).Tracef("Access to %v %v in namespace %v denied to %v: deny rule matched.", - p.verb, p.resource, p.namespace, role.GetName()) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Access denied, deny rule matched", + slog.String("verb", p.verb), + slog.String("resource", p.resource), + slog.String("namespace", p.namespace), + slog.String("role", role.GetName()), + ) + return trace.AccessDenied("access denied to perform action %q on %q", p.verb, p.resource) } } @@ -3270,10 +3282,12 @@ func (set RoleSet) checkAccessToRuleImpl(p checkAccessParams) (err error) { } } - log.WithFields(log.Fields{ - teleport.ComponentKey: teleport.ComponentRBAC, - }).Tracef("Access to %v %v in namespace %v denied to %v: no allow rule matched.", - p.verb, p.resource, p.namespace, set) + rbacLogger.LogAttrs(ctx, logutils.TraceLevel, "Access denied, no allow rule matched", + slog.String("verb", p.verb), + slog.String("resource", p.resource), + slog.String("namespace", p.namespace), + slog.Any("set", set), + ) // At this point no deny rule has matched and there are no more unknown // errors, so this is only an implicit denial. diff --git a/lib/services/role_test.go b/lib/services/role_test.go index 7c85f6a07e0a8..13f2cc3f18c72 100644 --- a/lib/services/role_test.go +++ b/lib/services/role_test.go @@ -19,7 +19,6 @@ package services import ( - "bytes" "cmp" "context" "encoding/json" @@ -2180,27 +2179,13 @@ func makeAccessCheckerWithRoleSet(roleSet RoleSet) AccessChecker { return NewAccessCheckerWithRoleSet(accessInfo, "clustername", roleSet) } -// testContext overrides context and captures log writes in action -type testContext struct { - Context - // Buffer captures log writes - buffer *bytes.Buffer -} - -// Write is implemented explicitly to avoid collision -// of String methods when embedding -func (t *testContext) Write(data []byte) (int, error) { - return t.buffer.Write(data) -} - func TestCheckRuleAccess(t *testing.T) { type check struct { - hasAccess bool - verb string - namespace string - rule string - context testContext - matchBuffer string + hasAccess bool + verb string + namespace string + rule string + context Context } testCases := []struct { name string @@ -2320,9 +2305,6 @@ func TestCheckRuleAccess(t *testing.T) { Resources: []string{types.KindSession}, Verbs: []string{types.VerbRead}, Where: `contains(user.spec.traits["group"], "prod")`, - Actions: []string{ - `log("info", "4 - tc match for user %v", user.metadata.name)`, - }, }, }, }, @@ -2333,17 +2315,14 @@ func TestCheckRuleAccess(t *testing.T) { {rule: types.KindSession, verb: types.VerbRead, namespace: apidefaults.Namespace, hasAccess: false}, {rule: types.KindSession, verb: types.VerbList, namespace: apidefaults.Namespace, hasAccess: false}, { - context: testContext{ - buffer: &bytes.Buffer{}, - Context: Context{ - User: &types.UserV2{ - Metadata: types.Metadata{ - Name: "bob", - }, - Spec: types.UserSpecV2{ - Traits: map[string][]string{ - "group": {"dev", "prod"}, - }, + context: Context{ + User: &types.UserV2{ + Metadata: types.Metadata{ + Name: "bob", + }, + Spec: types.UserSpecV2{ + Traits: map[string][]string{ + "group": {"dev", "prod"}, }, }, }, @@ -2354,14 +2333,11 @@ func TestCheckRuleAccess(t *testing.T) { hasAccess: true, }, { - context: testContext{ - buffer: &bytes.Buffer{}, - Context: Context{ - User: &types.UserV2{ - Spec: types.UserSpecV2{ - Traits: map[string][]string{ - "group": {"dev"}, - }, + context: Context{ + User: &types.UserV2{ + Spec: types.UserSpecV2{ + Traits: map[string][]string{ + "group": {"dev"}, }, }, }, @@ -2389,9 +2365,6 @@ func TestCheckRuleAccess(t *testing.T) { Resources: []string{types.KindRole}, Verbs: []string{types.VerbRead}, Where: `equals(resource.metadata.labels["team"], "dev")`, - Actions: []string{ - `log("error", "4 - tc match")`, - }, }, }, }, @@ -2402,13 +2375,10 @@ func TestCheckRuleAccess(t *testing.T) { {rule: types.KindRole, verb: types.VerbRead, namespace: apidefaults.Namespace, hasAccess: false}, {rule: types.KindRole, verb: types.VerbList, namespace: apidefaults.Namespace, hasAccess: false}, { - context: testContext{ - buffer: &bytes.Buffer{}, - Context: Context{ - Resource: &types.RoleV6{ - Metadata: types.Metadata{ - Labels: map[string]string{"team": "dev"}, - }, + context: Context{ + Resource: &types.RoleV6{ + Metadata: types.Metadata{ + Labels: map[string]string{"team": "dev"}, }, }, }, @@ -2439,9 +2409,6 @@ func TestCheckRuleAccess(t *testing.T) { Resources: []string{types.KindRole}, Verbs: []string{types.VerbRead}, Where: `equals(resource.metadata.labels["team"], "dev")`, - Actions: []string{ - `log("info", "matched more specific rule")`, - }, }, }, }, @@ -2450,21 +2417,17 @@ func TestCheckRuleAccess(t *testing.T) { }, checks: []check{ { - context: testContext{ - buffer: &bytes.Buffer{}, - Context: Context{ - Resource: &types.RoleV6{ - Metadata: types.Metadata{ - Labels: map[string]string{"team": "dev"}, - }, + context: Context{ + Resource: &types.RoleV6{ + Metadata: types.Metadata{ + Labels: map[string]string{"team": "dev"}, }, }, }, - rule: types.KindRole, - verb: types.VerbRead, - namespace: apidefaults.Namespace, - hasAccess: true, - matchBuffer: "more specific rule", + rule: types.KindRole, + verb: types.VerbRead, + namespace: apidefaults.Namespace, + hasAccess: true, }, }, }, @@ -2486,9 +2449,6 @@ func TestCheckRuleAccess(t *testing.T) { } else { require.True(t, trace.IsAccessDenied(result), comment) } - if check.matchBuffer != "" { - require.Contains(t, check.context.buffer.String(), check.matchBuffer, comment) - } } } } @@ -2498,7 +2458,7 @@ func TestDefaultImplicitRules(t *testing.T) { hasAccess bool verb string rule string - context testContext + context Context } testCases := []struct { name string diff --git a/lib/services/saml.go b/lib/services/saml.go index ad8e2dccaa36c..ab6988047e0a6 100644 --- a/lib/services/saml.go +++ b/lib/services/saml.go @@ -24,6 +24,7 @@ import ( "crypto/x509/pkix" "encoding/base64" "encoding/xml" + "log/slog" "net/http" "strings" "time" @@ -33,7 +34,6 @@ import ( saml2 "github.com/russellhaering/gosaml2" samltypes "github.com/russellhaering/gosaml2/types" dsig "github.com/russellhaering/goxmldsig" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -87,7 +87,11 @@ func ValidateSAMLConnector(sc types.SAMLConnector, rg RoleGetter) error { } sc.SetEntityDescriptor(entityDescriptor) - log.Debugf("[SAML] Successfully fetched entity descriptor from %v for connector %v", url, sc.GetName()) + slog.DebugContext(context.Background(), " Successfully fetched entity descriptor for connector", + teleport.ComponentKey, teleport.ComponentSAML, + "entity_descriptor_url", url, + "connector", sc.GetName(), + ) } if ed := sc.GetEntityDescriptor(); ed != "" { @@ -173,9 +177,12 @@ func ValidateSAMLConnector(sc types.SAMLConnector, rg RoleGetter) error { sc.SetMFASettings(mfa) } - log.Debugf("[SAML] SSO: %v", sc.GetSSO()) - log.Debugf("[SAML] Issuer: %v", sc.GetIssuer()) - log.Debugf("[SAML] ACS: %v", sc.GetAssertionConsumerService()) + slog.DebugContext(context.Background(), "connector validated", + teleport.ComponentKey, teleport.ComponentSAML, + "sso", sc.GetSSO(), + "issuer", sc.GetIssuer(), + "acs", sc.GetAssertionConsumerService(), + ) return nil } @@ -271,7 +278,9 @@ func GetSAMLServiceProvider(sc types.SAMLConnector, clock clockwork.Clock) (*sam // Case 1: Only the signing key pair is set. This means that SAML encryption is not expected // and we therefore configure the main key that gets used for all operations as the signing key. // This is done because gosaml2 mandates an encryption key even if not used. - log.Info("No assertion_key_pair was detected. Falling back to signing key for all SAML operations.") + slog.InfoContext(context.Background(), "No assertion_key_pair was detected, falling back to signing key for all SAML operations", + teleport.ComponentKey, teleport.ComponentSAML, + ) keyStore, err = utils.ParseKeyStorePEM(signingKeyPair.PrivateKey, signingKeyPair.Cert) signingKeyStore = keyStore if err != nil { @@ -281,7 +290,9 @@ func GetSAMLServiceProvider(sc types.SAMLConnector, clock clockwork.Clock) (*sam // Case 2: An encryption keypair is configured. This means that encrypted SAML responses are expected. // Since gosaml2 always uses the main key for encryption, we set it to assertion_key_pair. // To handle signing correctly, we now instead set the optional signing key in gosaml2 to signing_key_pair. - log.Info("Detected assertion_key_pair and configured it to decrypt SAML responses.") + slog.InfoContext(context.Background(), "Detected assertion_key_pair and configured it to decrypt SAML responses", + teleport.ComponentKey, teleport.ComponentSAML, + ) keyStore, err = utils.ParseKeyStorePEM(encryptionKeyPair.PrivateKey, encryptionKeyPair.Cert) if err != nil { return nil, trace.Wrap(err, "failed to parse certificate or private key defined in assertion_key_pair") @@ -313,9 +324,7 @@ func GetSAMLServiceProvider(sc types.SAMLConnector, clock clockwork.Clock) (*sam // be used. switch sc.GetProvider() { case teleport.ADFS, teleport.JumpCloud: - log.WithFields(log.Fields{ - teleport.ComponentKey: teleport.ComponentSAML, - }).Debug("Setting ADFS/JumpCloud values.") + slog.DebugContext(context.Background(), "Setting ADFS/JumpCloud values", teleport.ComponentKey, teleport.ComponentSAML) if sp.SignAuthnRequests { sp.SignAuthnRequestsCanonicalizer = dsig.MakeC14N10ExclusiveCanonicalizerWithPrefixList(dsig.DefaultPrefix) diff --git a/lib/services/saml_idp_service_provider.go b/lib/services/saml_idp_service_provider.go index 3d5882c5e5e67..51c60fa1e807f 100644 --- a/lib/services/saml_idp_service_provider.go +++ b/lib/services/saml_idp_service_provider.go @@ -21,6 +21,7 @@ package services import ( "context" "fmt" + "log/slog" "net/url" "slices" "strings" @@ -28,7 +29,6 @@ import ( "github.com/crewjam/saml" "github.com/crewjam/saml/samlsp" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/utils" @@ -139,7 +139,10 @@ func FilterSAMLEntityDescriptor(ed *saml.EntityDescriptor, quiet bool) error { filtered := slices.DeleteFunc(ed.SPSSODescriptors[i].AssertionConsumerServices, func(acs saml.IndexedEndpoint) bool { if err := ValidateAssertionConsumerService(acs); err != nil { if !quiet { - log.Warnf("AssertionConsumerService binding for entity %q is invalid and will be ignored: %v", ed.EntityID, err) + slog.WarnContext(context.Background(), "AssertionConsumerService binding for entity is invalid and will be ignored", + "entity_id", ed.EntityID, + "error", err, + ) } return true } diff --git a/lib/services/semaphore.go b/lib/services/semaphore.go index 52211acbfa33c..4e1a672423ced 100644 --- a/lib/services/semaphore.go +++ b/lib/services/semaphore.go @@ -20,12 +20,12 @@ package services import ( "context" + "log/slog" "sync" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/retryutils" @@ -200,10 +200,17 @@ func (l *SemaphoreLock) keepAlive(ctx context.Context) { defer cancel() err = l.cfg.Service.CancelSemaphoreLease(cancelContext, lease) if err != nil { - log.Warnf("Failed to cancel semaphore lease %s/%s: %v", lease.SemaphoreKind, lease.SemaphoreName, err) + slog.WarnContext(cancelContext, "Failed to cancel semaphore lease %s/%s: %v", + "semaphore_kind", lease.SemaphoreKind, + "semaphore_name", lease.SemaphoreName, + "error", err, + ) } } else { - log.Errorf("Semaphore lease expired: %s/%s", lease.SemaphoreKind, lease.SemaphoreName) + slog.ErrorContext(context.Background(), "Semaphore lease expired", + "semaphore_kind", lease.SemaphoreKind, + "semaphore_name", lease.SemaphoreName, + ) } }() Outer: @@ -219,7 +226,11 @@ Outer: leaseCancel() // semaphore and/or lease no longer exist; best to log the error // and exit immediately. - log.Warnf("Halting keepalive on semaphore %s/%s early: %v", lease.SemaphoreKind, lease.SemaphoreName, err) + slog.WarnContext(leaseContext, "Halting keepalive on semaphore", + "semaphore_kind", lease.SemaphoreKind, + "semaphore_name", lease.SemaphoreName, + "error", err, + ) nodrop = true return } @@ -233,7 +244,11 @@ Outer: } continue Outer } - log.Debugf("Failed to renew semaphore lease %s/%s: %v", lease.SemaphoreKind, lease.SemaphoreName, err) + slog.DebugContext(leaseContext, "Failed to renew semaphore lease", + "semaphore_kind", lease.SemaphoreKind, + "semaphore_name", lease.SemaphoreName, + "error", err, + ) l.retry.Inc() select { case <-l.retry.After(): diff --git a/lib/services/simple/access_list.go b/lib/services/simple/access_list.go index 9effb4475c973..f8b8b78bdbe44 100644 --- a/lib/services/simple/access_list.go +++ b/lib/services/simple/access_list.go @@ -23,9 +23,7 @@ import ( "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" "github.com/gravitational/teleport/lib/backend" @@ -46,7 +44,6 @@ const ( // AccessListService is a simple access list backend service for use specifically by the cache. type AccessListService struct { - log logrus.FieldLogger service *generic.Service[*accesslist.AccessList] memberService *generic.Service[*accesslist.AccessListMember] reviewService *generic.Service[*accesslist.Review] @@ -93,7 +90,6 @@ func NewAccessListService(b backend.Backend) (*AccessListService, error) { } return &AccessListService{ - log: logrus.WithFields(logrus.Fields{teleport.ComponentKey: "access-list:simple-service"}), service: service, memberService: memberService, reviewService: reviewService, diff --git a/lib/services/suite/suite.go b/lib/services/suite/suite.go index ad2cb4695b3f2..3c1a445480166 100644 --- a/lib/services/suite/suite.go +++ b/lib/services/suite/suite.go @@ -22,6 +22,7 @@ import ( "context" "crypto/x509/pkix" "fmt" + "log/slog" "sort" "sync" "sync/atomic" @@ -33,7 +34,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "golang.org/x/crypto/bcrypt" protobuf "google.golang.org/protobuf/proto" @@ -2140,7 +2140,7 @@ skiploop: for { select { case event := <-w.Events(): - log.Debugf("Skipping pre-test event: %v", event) + slog.DebugContext(ctx, "Skipping pre-test event", "event", event) continue skiploop default: break skiploop @@ -2215,11 +2215,11 @@ waitLoop: t.Fatalf("Watcher exited with error %v", w.Error()) case event := <-w.Events(): if event.Type != types.OpPut { - log.Debugf("Skipping event %+v", event) + slog.DebugContext(context.Background(), "Skipping event", "event", event) continue } if resource.GetName() != event.Resource.GetName() || resource.GetKind() != event.Resource.GetKind() || resource.GetSubKind() != event.Resource.GetSubKind() { - log.Debugf("Skipping event %v resource %v, expecting %v", event.Type, event.Resource.GetMetadata(), event.Resource.GetMetadata()) + slog.DebugContext(context.Background(), "Skipping event", "event", event) continue waitLoop } require.Empty(t, cmp.Diff(resource, event.Resource)) @@ -2240,7 +2240,10 @@ waitLoop: t.Fatalf("Watcher exited with error %v", w.Error()) case event := <-w.Events(): if event.Type != types.OpDelete { - log.Debugf("Skipping stale event %v %v", event.Type, event.Resource.GetName()) + slog.DebugContext(context.Background(), "Skipping stale event", + "event_type", event.Type, + "resource_name", event.Resource.GetName(), + ) continue } diff --git a/lib/services/traits.go b/lib/services/traits.go index 4c6fa2294dbf1..fb27619f98539 100644 --- a/lib/services/traits.go +++ b/lib/services/traits.go @@ -19,11 +19,12 @@ package services import ( + "context" "fmt" + "log/slog" "regexp" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" apiutils "github.com/gravitational/teleport/api/utils" @@ -144,13 +145,19 @@ TraitMappingLoop: // show at most maxMismatchedTraitValuesLogged trait values to prevent huge log lines switch l := len(mismatched); { case l > maxMismatchedTraitValuesLogged: - log.WithField("expression", mapping.Value). - WithField("values", mismatched[0:maxMismatchedTraitValuesLogged]). - Debugf("%d trait value(s) did not match (showing first %d values)", len(mismatched), maxMismatchedTraitValuesLogged) + slog. + DebugContext(context.Background(), "trait value(s) did not match (showing first %d values)", + "mismatch_count", len(mismatched), + "max_mismatch_logged", maxMismatchedTraitValuesLogged, + "expression", mapping.Value, + "values", mismatched[0:maxMismatchedTraitValuesLogged], + ) case l > 0: - log.WithField("expression", mapping.Value). - WithField("values", mismatched). - Debugf("%d trait value(s) did not match", len(mismatched)) + slog.DebugContext(context.Background(), "trait value(s) did not match", + "mismatch_count", len(mismatched), + "expression", mapping.Value, + "values", mismatched, + ) } } } diff --git a/lib/services/unified_resource.go b/lib/services/unified_resource.go index 2cf2e1c89995f..1bf8bd95747c0 100644 --- a/lib/services/unified_resource.go +++ b/lib/services/unified_resource.go @@ -20,6 +20,7 @@ package services import ( "context" + "log/slog" "strings" "sync" "time" @@ -28,7 +29,6 @@ import ( "github.com/google/btree" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" @@ -37,6 +37,7 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/backend" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" "github.com/gravitational/teleport/lib/utils/pagination" ) @@ -68,9 +69,9 @@ type UnifiedResourceCacheConfig struct { // UnifiedResourceCache contains a representation of all resources that are displayable in the UI type UnifiedResourceCache struct { - rw sync.RWMutex - log *log.Entry - cfg UnifiedResourceCacheConfig + rw sync.RWMutex + logger *slog.Logger + cfg UnifiedResourceCacheConfig // nameTree is a BTree with items sorted by (hostname)/name/type nameTree *btree.BTreeG[*item] // typeTree is a BTree with items sorted by type/(hostname)/name @@ -101,10 +102,8 @@ func NewUnifiedResourceCache(ctx context.Context, cfg UnifiedResourceCacheConfig } m := &UnifiedResourceCache{ - log: log.WithFields(log.Fields{ - teleport.ComponentKey: cfg.Component, - }), - cfg: cfg, + logger: slog.With(teleport.ComponentKey, cfg.Component), + cfg: cfg, nameTree: btree.NewG(cfg.BTreeDegree, func(a, b *item) bool { return a.Less(b) }), @@ -267,7 +266,10 @@ func (c *UnifiedResourceCache) getRange(ctx context.Context, startKey backend.Ke } if len(res) == backend.DefaultRangeLimit { - c.log.Warnf("Range query hit backend limit. (this is a bug!) startKey=%q,limit=%d", startKey, backend.DefaultRangeLimit) + c.logger.WarnContext(ctx, "Range query hit backend limit. (this is a bug!)", + "start_key", startKey, + "range_limit", backend.DefaultRangeLimit, + ) } return res, nextKey, nil @@ -747,7 +749,11 @@ func (c *UnifiedResourceCache) processEventsAndUpdateCurrent(ctx context.Context for _, event := range events { if event.Resource == nil { - c.log.Warnf("Unexpected event: %v.", event) + c.logger.WarnContext(ctx, "Unexpected event", + "event_type", event.Type, + "resource_kind", event.Resource.GetKind(), + "resource_name", event.Resource.GetName(), + ) continue } @@ -775,15 +781,15 @@ func (c *UnifiedResourceCache) processEventsAndUpdateCurrent(ctx context.Context c.putLocked(types.Resource153ToUnifiedResource(unwrapped)) default: - c.log.Warnf("unsupported Resource153 type %T.", unwrapped) + c.logger.WarnContext(ctx, "unsupported Resource153 type", "resource_type", logutils.TypeAttr(unwrapped)) } default: - c.log.Warnf("unsupported Resource type %T.", r) + c.logger.WarnContext(ctx, "unsupported Resource type", "resource_type", logutils.TypeAttr(r)) } default: - c.log.Warnf("unsupported event type %s.", event.Type) + c.logger.WarnContext(ctx, "unsupported event type", "event_type", event.Type) continue } }