diff --git a/pkg/controller/argocdregister/argocdregister_controller.go b/pkg/controller/argocdregister/argocdregister_controller.go index 4346a6874d5..015cd0d159c 100644 --- a/pkg/controller/argocdregister/argocdregister_controller.go +++ b/pkg/controller/argocdregister/argocdregister_controller.go @@ -89,7 +89,7 @@ func NewReconciler(mgr manager.Manager, logger log.FieldLogger, rateLimiter flow } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("argocdregister-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), diff --git a/pkg/controller/argocdregister/argocdregister_controller_test.go b/pkg/controller/argocdregister/argocdregister_controller_test.go index 16abc021392..d597b8d04f4 100644 --- a/pkg/controller/argocdregister/argocdregister_controller_test.go +++ b/pkg/controller/argocdregister/argocdregister_controller_test.go @@ -94,10 +94,7 @@ func TestArgoCDRegisterReconcile(t *testing.T) { testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, credsSecret, corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, "foo-lqmsh-admin-kubeconfig", "kubeconfig", "{}"), - testServiceAccount("argocd-server", argoCDDefaultNamespace, - corev1.ObjectReference{Kind: "Secret", - Name: "argocd-token", - Namespace: argoCDDefaultNamespace}), + testServiceAccount("argocd-server", []corev1.ObjectReference{{Kind: "Secret", Name: "argocd-token", Namespace: argoCDDefaultNamespace}}...), testSecretWithNamespace(corev1.SecretTypeDockerConfigJson, "argocd-token", argoCDDefaultNamespace, "token", "{}"), }, argoCDEnabled: true, @@ -117,10 +114,7 @@ func TestArgoCDRegisterReconcile(t *testing.T) { testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, credsSecret, corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, "foo-lqmsh-admin-kubeconfig", "kubeconfig", "{}"), - testServiceAccount("argocd-server", argoCDDefaultNamespace, - corev1.ObjectReference{Kind: "Secret", - Name: "argocd-token", - Namespace: argoCDDefaultNamespace}), + testServiceAccount("argocd-server", []corev1.ObjectReference{{Kind: "Secret", Name: "argocd-token", Namespace: argoCDDefaultNamespace}}...), testSecretWithNamespace(corev1.SecretTypeDockerConfigJson, "argocd-token", argoCDDefaultNamespace, "token", "{}"), // Existing ArgoCD cluster secret testSecretWithNamespace(corev1.SecretTypeDockerConfigJson, "cluster-test-api.test.com-2774145043", argoCDDefaultNamespace, "test", "{}"), @@ -148,10 +142,7 @@ func TestArgoCDRegisterReconcile(t *testing.T) { testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, credsSecret, corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, "foo-lqmsh-admin-kubeconfig", "kubeconfig", "{}"), - testServiceAccount("argocd-server", argoCDDefaultNamespace, - corev1.ObjectReference{Kind: "Secret", - Name: "argocd-token", - Namespace: argoCDDefaultNamespace}), + testServiceAccount("argocd-server", []corev1.ObjectReference{{Kind: "Secret", Name: "argocd-token", Namespace: argoCDDefaultNamespace}}...), testSecretWithNamespace(corev1.SecretTypeDockerConfigJson, "argocd-token", argoCDDefaultNamespace, "token", "{}"), // Existing ArgoCD cluster secret testSecretWithNamespace(corev1.SecretTypeDockerConfigJson, "cluster-test-api.test.com-2774145043", argoCDDefaultNamespace, corev1.DockerConfigJsonKey, "{}"), @@ -173,10 +164,7 @@ func TestArgoCDRegisterReconcile(t *testing.T) { testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, credsSecret, corev1.DockerConfigJsonKey, "{}"), testSecret(corev1.SecretTypeDockerConfigJson, "foo-lqmsh-admin-kubeconfig", "kubeconfig", "{}"), - testServiceAccount("argocd-server", argoCDDefaultNamespace, - corev1.ObjectReference{Kind: "Secret", - Name: "argocd-token", - Namespace: argoCDDefaultNamespace}), + testServiceAccount("argocd-server", []corev1.ObjectReference{{Kind: "Secret", Name: "argocd-token", Namespace: argoCDDefaultNamespace}}...), testSecretWithNamespace(corev1.SecretTypeDockerConfigJson, "argocd-token", argoCDDefaultNamespace, "token", "{}"), }, argoCDEnabled: false, @@ -329,7 +317,7 @@ func testSecretWithNamespace(secretType corev1.SecretType, name, namespace, key, return s } -func testServiceAccount(name, namespace string, secrets ...corev1.ObjectReference) *corev1.ServiceAccount { +func testServiceAccount(name string, secrets ...corev1.ObjectReference) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/pkg/controller/awsprivatelink/awsprivatelink_controller.go b/pkg/controller/awsprivatelink/awsprivatelink_controller.go index 55784c848a1..aeb0b99e142 100644 --- a/pkg/controller/awsprivatelink/awsprivatelink_controller.go +++ b/pkg/controller/awsprivatelink/awsprivatelink_controller.go @@ -94,7 +94,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) (*R } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r *ReconcileAWSPrivateLink, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r *ReconcileAWSPrivateLink, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("awsprivatelink-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), @@ -546,7 +546,7 @@ func (r *ReconcileAWSPrivateLink) reconcilePrivateLink(cd *hivev1.ClusterDeploym } // Create the Private Hosted Zone for the VPC Endpoint. - hzModified, hostedZoneID, err := r.reconcileHostedZone(awsClient, cd, clusterMetadata, vpcEndpoint, apiDomain, logger) + hzModified, hostedZoneID, err := r.reconcileHostedZone(awsClient, cd, vpcEndpoint, apiDomain, logger) if err != nil { logger.WithError(err).Error("could not reconcile the Hosted Zone") @@ -568,7 +568,7 @@ func (r *ReconcileAWSPrivateLink) reconcilePrivateLink(cd *hivev1.ClusterDeploym } // Associate the VPCs to the hosted zone. - associationsModified, err := r.reconcileHostedZoneAssociations(awsClient, cd, hostedZoneID, vpcEndpoint, logger) + associationsModified, err := r.reconcileHostedZoneAssociations(awsClient, hostedZoneID, vpcEndpoint, logger) if err != nil { logger.WithError(err).Error("could not reconcile the associations of the Hosted Zone") @@ -763,7 +763,7 @@ func (r *ReconcileAWSPrivateLink) reconcileVPCEndpointService(awsClient *awsClie cd.Status.Platform.AWS.PrivateLink.VPCEndpointService.AdditionalAllowedPrincipals = &desiredPermsSlice } cd.Status.Platform.AWS.PrivateLink.VPCEndpointService.DefaultAllowedPrincipal = &defaultARN - if err := r.updatePrivateLinkStatus(cd, logger); err != nil { + if err := r.updatePrivateLinkStatus(cd); err != nil { logger.WithError(err).Error("error updating clusterdeployment status with vpcEndpointService additionalAllowedPrincipals") return modified, nil, err } @@ -787,7 +787,7 @@ func (r *ReconcileAWSPrivateLink) ensureVPCEndpointService(awsClient awsclient.C } if len(resp.ServiceConfigurations) == 0 { modified = true - serviceConfig, err = createVPCEndpointService(awsClient, cd, metadata, clusterNLB, logger) + serviceConfig, err = createVPCEndpointService(awsClient, metadata, clusterNLB, logger) if err != nil { logger.WithError(err).Error("failed to create VPC Endpoint Service for cluster") return modified, nil, errors.Wrap(err, "failed to create VPC Endpoint Service for cluster") @@ -801,7 +801,7 @@ func (r *ReconcileAWSPrivateLink) ensureVPCEndpointService(awsClient awsclient.C ID: *serviceConfig.ServiceId, Name: *serviceConfig.ServiceName, } - if err := r.updatePrivateLinkStatus(cd, logger); err != nil { + if err := r.updatePrivateLinkStatus(cd); err != nil { logger.WithError(err).Error("error updating clusterdeployment status with vpcEndpointService") return modified, nil, err } @@ -809,7 +809,7 @@ func (r *ReconcileAWSPrivateLink) ensureVPCEndpointService(awsClient awsclient.C return modified, serviceConfig, nil } -func createVPCEndpointService(awsClient awsclient.Client, cd *hivev1.ClusterDeployment, metadata *hivev1.ClusterMetadata, clusterNLB string, logger log.FieldLogger) (*ec2.ServiceConfiguration, error) { +func createVPCEndpointService(awsClient awsclient.Client, metadata *hivev1.ClusterMetadata, clusterNLB string, logger log.FieldLogger) (*ec2.ServiceConfiguration, error) { resp, err := awsClient.CreateVpcEndpointServiceConfiguration(&ec2.CreateVpcEndpointServiceConfigurationInput{ AcceptanceRequired: aws.Bool(false), NetworkLoadBalancerArns: aws.StringSlice([]string{clusterNLB}), @@ -875,7 +875,7 @@ func (r *ReconcileAWSPrivateLink) reconcileVPCEndpoint(awsClient *awsClient, initPrivateLinkStatus(cd) cd.Status.Platform.AWS.PrivateLink.VPCEndpointID = *vpcEndpoint.VpcEndpointId - if err := r.updatePrivateLinkStatus(cd, logger); err != nil { + if err := r.updatePrivateLinkStatus(cd); err != nil { logger.WithError(err).Error("error updating clusterdeployment status with vpcEndpointID") return modified, nil, err } @@ -931,7 +931,7 @@ func (r *ReconcileAWSPrivateLink) createVPCEndpoint(awsClient awsclient.Client, // where VPC endpoint was created. It also make sure the DNS zone has an ALIAS record pointing // to the regional DNS name of the VPC endpoint. func (r *ReconcileAWSPrivateLink) reconcileHostedZone(awsClient *awsClient, - cd *hivev1.ClusterDeployment, metadata *hivev1.ClusterMetadata, + cd *hivev1.ClusterDeployment, vpcEndpoint *ec2.VpcEndpoint, apiDomain string, logger log.FieldLogger) (bool, string, error) { modified, hostedZoneID, err := r.ensureHostedZone(awsClient.hub, cd, vpcEndpoint, apiDomain, logger) @@ -1015,7 +1015,11 @@ func (r *ReconcileAWSPrivateLink) ensureHostedZone(awsClient awsclient.Client, endpoint *ec2.VpcEndpoint, apiDomain string, logger log.FieldLogger) (bool, string, error) { modified := false - hzID, err := findHostedZone(awsClient, *endpoint.VpcId, cd.Spec.Platform.AWS.Region, apiDomain, logger) + var ( + hzID string + err error + ) + hzID, err = findHostedZone(awsClient, *endpoint.VpcId, cd.Spec.Platform.AWS.Region, apiDomain) if err != nil && errors.Is(err, errNoHostedZoneFoundForVPC) { modified = true hzID, err = r.createHostedZone(awsClient, cd, endpoint, apiDomain, logger) @@ -1030,7 +1034,7 @@ func (r *ReconcileAWSPrivateLink) ensureHostedZone(awsClient awsclient.Client, initPrivateLinkStatus(cd) cd.Status.Platform.AWS.PrivateLink.HostedZoneID = hzID - if err := r.updatePrivateLinkStatus(cd, logger); err != nil { + if err := r.updatePrivateLinkStatus(cd); err != nil { logger.WithError(err).Error("failed to update the hosted zone ID for cluster deployment") return modified, "", err } @@ -1043,7 +1047,7 @@ var errNoHostedZoneFoundForVPC = errors.New("no hosted zone found") // findHostedZone finds a Private Hosted Zone for apiDomain that is associated with the given // VPC. // If no such hosted zone exists, it return an errNoHostedZoneFoundForVPC error. -func findHostedZone(awsClient awsclient.Client, vpcID, vpcRegion, apiDomain string, logger log.FieldLogger) (string, error) { +func findHostedZone(awsClient awsclient.Client, vpcID, vpcRegion, apiDomain string) (string, error) { input := &route53.ListHostedZonesByVPCInput{ VPCId: aws.String(vpcID), VPCRegion: aws.String(vpcRegion), @@ -1097,7 +1101,6 @@ func (r *ReconcileAWSPrivateLink) createHostedZone(awsClient awsclient.Client, // reconcileHostedZoneAssociations ensures that the all the VPCs in the associatedVPCs list from // the controller config are associated to the PHZ hostedZoneID. func (r *ReconcileAWSPrivateLink) reconcileHostedZoneAssociations(awsClient *awsClient, - cd *hivev1.ClusterDeployment, hostedZoneID string, vpcEndpoint *ec2.VpcEndpoint, logger log.FieldLogger) (bool, error) { hzLog := logger.WithField("hostedZoneID", hostedZoneID) @@ -1362,7 +1365,7 @@ var retryBackoff = wait.Backoff{ Jitter: 0.1, } -func (r *ReconcileAWSPrivateLink) updatePrivateLinkStatus(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { +func (r *ReconcileAWSPrivateLink) updatePrivateLinkStatus(cd *hivev1.ClusterDeployment) error { return retry.RetryOnConflict(retryBackoff, func() error { curr := &hivev1.ClusterDeployment{} err := r.Client.Get(context.TODO(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}, curr) diff --git a/pkg/controller/awsprivatelink/cleanup.go b/pkg/controller/awsprivatelink/cleanup.go index d458a634272..b502f782405 100644 --- a/pkg/controller/awsprivatelink/cleanup.go +++ b/pkg/controller/awsprivatelink/cleanup.go @@ -105,18 +105,18 @@ func (r *ReconcileAWSPrivateLink) cleanupPrivateLink(cd *hivev1.ClusterDeploymen logger.WithError(err).Error("error cleaning up Hosted Zone") return err } - if err := r.cleanupVPCEndpoint(awsClient.hub, cd, metadata, logger); err != nil { + if err := r.cleanupVPCEndpoint(awsClient.hub, metadata, logger); err != nil { logger.WithError(err).Error("error cleaning up VPCEndpoint") return err } - if err := r.cleanupVPCEndpointService(awsClient.user, cd, metadata, logger); err != nil { + if err := r.cleanupVPCEndpointService(awsClient.user, metadata, logger); err != nil { logger.WithError(err).Error("error cleaning up VPCEndpoint Service") return err } initPrivateLinkStatus(cd) cd.Status.Platform.AWS.PrivateLink = nil - if err := r.updatePrivateLinkStatus(cd, logger); err != nil { + if err := r.updatePrivateLinkStatus(cd); err != nil { logger.WithError(err).Error("error updating clusterdeployment after cleanup of private link") return err } @@ -160,7 +160,7 @@ func (r *ReconcileAWSPrivateLink) cleanupHostedZone(awsClient awsclient.Client, } vpcEndpoint := endpointResp.VpcEndpoints[0] - hzID, err = findHostedZone(awsClient, *vpcEndpoint.VpcId, cd.Spec.Platform.AWS.Region, apiDomain, logger) + hzID, err = findHostedZone(awsClient, *vpcEndpoint.VpcId, cd.Spec.Platform.AWS.Region, apiDomain) if err != nil && errors.Is(err, errNoHostedZoneFoundForVPC) { return nil // no work } @@ -214,7 +214,7 @@ func (r *ReconcileAWSPrivateLink) cleanupHostedZone(awsClient awsclient.Client, } func (r *ReconcileAWSPrivateLink) cleanupVPCEndpoint(awsClient awsclient.Client, - cd *hivev1.ClusterDeployment, metadata *hivev1.ClusterMetadata, + metadata *hivev1.ClusterMetadata, logger log.FieldLogger) error { idLog := logger.WithField("infraID", metadata.InfraID) resp, err := awsClient.DescribeVpcEndpoints(&ec2.DescribeVpcEndpointsInput{ @@ -243,7 +243,7 @@ func (r *ReconcileAWSPrivateLink) cleanupVPCEndpoint(awsClient awsclient.Client, } func (r *ReconcileAWSPrivateLink) cleanupVPCEndpointService(awsClient awsclient.Client, - cd *hivev1.ClusterDeployment, metadata *hivev1.ClusterMetadata, + metadata *hivev1.ClusterMetadata, logger log.FieldLogger) error { idLog := logger.WithField("infraID", metadata.InfraID) resp, err := awsClient.DescribeVpcEndpointServiceConfigurations(&ec2.DescribeVpcEndpointServiceConfigurationsInput{ diff --git a/pkg/controller/clusterclaim/clusterclaim_controller.go b/pkg/controller/clusterclaim/clusterclaim_controller.go index ca93620d79a..014055c6317 100644 --- a/pkg/controller/clusterclaim/clusterclaim_controller.go +++ b/pkg/controller/clusterclaim/clusterclaim_controller.go @@ -62,7 +62,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) *Re } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r *ReconcileClusterClaim, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r *ReconcileClusterClaim, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("clusterclaim-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, r.logger), @@ -122,7 +122,7 @@ func requestsForClusterDeployment(ctx context.Context, cd *hivev1.ClusterDeploym return []reconcile.Request{{NamespacedName: *claim}} } -func requestsForRBACResourcesRole(c client.Client, resourceName string, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.Role] { +func requestsForRBACResourcesRole(c client.Client, resourceName string, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.Role, reconcile.Request] { return func(ctx context.Context, o *rbacv1.Role) []reconcile.Request { if o.GetName() != resourceName { return nil @@ -141,7 +141,7 @@ func requestsForRBACResourcesRole(c client.Client, resourceName string, logger l } } -func requestsForRBACResourcesRoleBinding(c client.Client, resourceName string, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.RoleBinding] { +func requestsForRBACResourcesRoleBinding(c client.Client, resourceName string, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.RoleBinding, reconcile.Request] { return func(ctx context.Context, o *rbacv1.RoleBinding) []reconcile.Request { if o.GetName() != resourceName { return nil @@ -534,7 +534,7 @@ func (r *ReconcileClusterClaim) createRBAC(claim *hivev1.ClusterClaim, cd *hivev if cd.Spec.ClusterMetadata == nil { return errors.New("ClusterDeployment does not have ClusterMetadata") } - if err := r.applyHiveClaimOwnerRole(claim, cd, logger); err != nil { + if err := r.applyHiveClaimOwnerRole(cd, logger); err != nil { return err } if err := r.applyHiveClaimOwnerRoleBinding(claim, cd, logger); err != nil { @@ -543,7 +543,7 @@ func (r *ReconcileClusterClaim) createRBAC(claim *hivev1.ClusterClaim, cd *hivev return nil } -func (r *ReconcileClusterClaim) applyHiveClaimOwnerRole(claim *hivev1.ClusterClaim, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { +func (r *ReconcileClusterClaim) applyHiveClaimOwnerRole(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { desiredRole := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Namespace: cd.Namespace, diff --git a/pkg/controller/clusterclaim/clusterclaim_controller_test.go b/pkg/controller/clusterclaim/clusterclaim_controller_test.go index 9f8e7efe002..9fb7b5ef275 100644 --- a/pkg/controller/clusterclaim/clusterclaim_controller_test.go +++ b/pkg/controller/clusterclaim/clusterclaim_controller_test.go @@ -802,10 +802,7 @@ func Test_getClaimLifetime(t *testing.T) { t.Run(test.name, func(t *testing.T) { var poolLifetime *hivev1.ClusterPoolClaimLifetime if test.defaultLifetime != nil { - if poolLifetime == nil { - poolLifetime = &hivev1.ClusterPoolClaimLifetime{} - } - poolLifetime.Default = test.defaultLifetime + poolLifetime = &hivev1.ClusterPoolClaimLifetime{Default: test.defaultLifetime} } if test.maximumLifetime != nil { if poolLifetime == nil { diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index c8147fd57c7..be2d5b2e0cf 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -131,7 +131,7 @@ func Add(mgr manager.Manager) error { } // Register the metrics. This is done here to ensure we define the metrics with optional label support after we have // read the hiveconfig, and we register them only once. - registerMetrics(mConfig, logger) + registerMetrics(mConfig) return AddToManager(mgr, NewReconciler(mgr, logger, clientRateLimiter), concurrentReconciles, queueRateLimiter) } @@ -168,7 +168,7 @@ func NewReconciler(mgr manager.Manager, logger log.FieldLogger, rateLimiter flow } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { cdReconciler, ok := r.(*ReconcileClusterDeployment) if !ok { return errors.New("reconciler supplied is not a ReconcileClusterDeployment") @@ -697,7 +697,8 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi // Sanity check the platform/cloud credentials and set hivev1.AuthenticationFailureClusterDeploymentCondition validCreds, authError := r.validatePlatformCreds(cd, cdLog) - _, err := r.setAuthenticationFailure(cd, validCreds, authError, cdLog) + var err error + _, err = r.setAuthenticationFailure(cd, validCreds, authError) if err != nil { cdLog.WithError(err).Error("unable to update clusterdeployment") return reconcile.Result{}, err @@ -717,7 +718,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi return reconcile.Result{}, err } - releaseImage := r.getReleaseImage(cd, imageSet, cdLog) + releaseImage := r.getReleaseImage(cd, imageSet) cdLog.Debug("loading pull secrets") pullSecret, err := r.mergePullSecrets(cd, cdLog) @@ -929,7 +930,7 @@ func dnsZoneNotReadyMaybeTimedOut(cd *hivev1.ClusterDeployment, logger log.Field // getReleaseImage looks for a a release image in clusterdeployment or its corresponding imageset in the following order: // 1 - specified in the cluster deployment spec.images.releaseImage // 2 - referenced in the cluster deployment spec.imageSet -func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string { +func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet) string { if cd.Spec.Provisioning != nil && cd.Spec.Provisioning.ReleaseImage != "" { return cd.Spec.Provisioning.ReleaseImage } @@ -1147,7 +1148,7 @@ func (r *ReconcileClusterDeployment) updateCondition( return r.Status().Update(context.TODO(), cd) } -func (r *ReconcileClusterDeployment) setAuthenticationFailure(cd *hivev1.ClusterDeployment, authSuccessful bool, authError error, cdLog log.FieldLogger) (bool, error) { +func (r *ReconcileClusterDeployment) setAuthenticationFailure(cd *hivev1.ClusterDeployment, authSuccessful bool, authError error) (bool, error) { var status corev1.ConditionStatus var reason, message string @@ -1437,7 +1438,7 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{}, errors.Wrap(err, "could not determine relocate status") case relocateStatus == hivev1.RelocateComplete: cdLog.Info("clusterdeployment relocated, removing finalizer") - err := r.removeClusterDeploymentFinalizer(cd, cdLog) + err := r.removeClusterDeploymentFinalizer(cd) if err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") } @@ -1484,7 +1485,7 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil default: cdLog.Infof("DNSZone gone, customization gone and deprovision request completed, removing deprovision finalizer") - if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { + if err := r.removeClusterDeploymentFinalizer(cd); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") return reconcile.Result{}, err } @@ -1498,7 +1499,7 @@ func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.Cl return r.Update(context.TODO(), cd) } -func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { +func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error { cd = cd.DeepCopy() controllerutils.DeleteFinalizer(cd, hivev1.FinalizerDeprovision) @@ -2054,7 +2055,7 @@ func (r *ReconcileClusterDeployment) updatePullSecretInfo(pullSecret string, cd return true, nil } -func configureTrustedCABundleConfigMap(cm *corev1.ConfigMap, cd *hivev1.ClusterDeployment) bool { +func configureTrustedCABundleConfigMap(cm *corev1.ConfigMap) bool { modified := false if cm.Labels == nil { cm.Labels = make(map[string]string) @@ -2087,7 +2088,7 @@ func (r *ReconcileClusterDeployment) ensureTrustedCABundleConfigMap(cd *hivev1.C if cm.Labels == nil { cm.Labels = make(map[string]string) } - configureTrustedCABundleConfigMap(cm, cd) + configureTrustedCABundleConfigMap(cm) if err := r.Create(context.TODO(), cm); err != nil { deleted, err2 := r.namespaceTerminated(cd.Namespace) if deleted { @@ -2106,7 +2107,7 @@ func (r *ReconcileClusterDeployment) ensureTrustedCABundleConfigMap(cd *hivev1.C return errors.Wrap(err, "Failed to retrieve trusted CA bundle ConfigMap") } } - if configureTrustedCABundleConfigMap(cm, cd) { + if configureTrustedCABundleConfigMap(cm) { if err := r.Update(context.TODO(), cm); err != nil { return errors.Wrap(err, "Failed to update the trusted CA bundle ConfigMap") } @@ -2114,7 +2115,7 @@ func (r *ReconcileClusterDeployment) ensureTrustedCABundleConfigMap(cd *hivev1.C return nil } -func calculateNextProvisionTime(failureTime time.Time, retries int, cdLog log.FieldLogger) time.Time { +func calculateNextProvisionTime(failureTime time.Time, retries int) time.Time { // (2^currentRetries) * 60 seconds up to a max of 24 hours. const sleepCap = 24 * time.Hour const retryCap = 11 // log_2_(24*60) diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go index ff3aaf17622..4f650a0d89c 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go @@ -94,7 +94,8 @@ func init() { log.SetLevel(log.DebugLevel) // While the metrics need not be registered for this test suite, they still need to be defined to avoid panics // during the tests - registerMetrics(&metricsconfig.MetricsConfig{}, log.WithField("controller", "clusterDeployment")) + var _ log.FieldLogger = log.WithField("controller", "clusterDeployment") + registerMetrics(&metricsconfig.MetricsConfig{}) } func fakeReadFile(content string) func(string) ([]byte, error) { @@ -3576,7 +3577,8 @@ func TestCalculateNextProvisionTime(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - actualNextTime := calculateNextProvisionTime(tc.failureTime, tc.attempt, log.WithField("controller", "clusterDeployment")) + var _ log.FieldLogger = log.WithField("controller", "clusterDeployment") + actualNextTime := calculateNextProvisionTime(tc.failureTime, tc.attempt) assert.Equal(t, tc.expectedNextTime.String(), actualNextTime.String(), "unexpected next provision time") }) } diff --git a/pkg/controller/clusterdeployment/clusterprovisions.go b/pkg/controller/clusterdeployment/clusterprovisions.go index e8f5efb3a23..6b65bcf7e19 100644 --- a/pkg/controller/clusterdeployment/clusterprovisions.go +++ b/pkg/controller/clusterdeployment/clusterprovisions.go @@ -442,7 +442,7 @@ func (r *ReconcileClusterDeployment) reconcileExistingProvision(cd *hivev1.Clust case hivev1.ClusterProvisionStageInitializing: return r.reconcileInitializingProvision(cd, provision, logger) case hivev1.ClusterProvisionStageProvisioning: - return r.reconcileProvisioningProvision(cd, provision, logger) + return r.reconcileProvisioningProvision(cd, logger) case hivev1.ClusterProvisionStageFailed: return r.reconcileFailedProvision(cd, provision, logger) case hivev1.ClusterProvisionStageComplete: @@ -502,7 +502,7 @@ func (r *ReconcileClusterDeployment) reconcileInitializingProvision(cd *hivev1.C return reconcile.Result{}, nil } -func (r *ReconcileClusterDeployment) reconcileProvisioningProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) (reconcile.Result, error) { +func (r *ReconcileClusterDeployment) reconcileProvisioningProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) { cdLog.Debug("still provisioning") if err := r.updateCondition(cd, hivev1.InstallLaunchErrorCondition, corev1.ConditionFalse, "InstallLaunchSuccessful", "Successfully launched install pod", cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update InstallLaunchErrorCondition") @@ -528,7 +528,7 @@ func (r *ReconcileClusterDeployment) reconcileFailedProvision(cd *hivev1.Cluster failedCond := controllerutils.FindCondition(provision.Status.Conditions, hivev1.ClusterProvisionFailedCondition) if failedCond != nil && failedCond.Status == corev1.ConditionTrue { - nextProvisionTime = calculateNextProvisionTime(failedCond.LastTransitionTime.Time, cd.Status.InstallRestarts, cdLog) + nextProvisionTime = calculateNextProvisionTime(failedCond.LastTransitionTime.Time, cd.Status.InstallRestarts) reason = failedCond.Reason message = failedCond.Message } else { @@ -781,15 +781,15 @@ func (r *ReconcileClusterDeployment) watchClusterProvisions(mgr manager.Manager, return c.Watch(source.Kind(mgr.GetCache(), &hivev1.ClusterProvision{}, handler)) } -var _ handler.TypedEventHandler[*hivev1.ClusterProvision] = &clusterProvisionEventHandler{} +var _ handler.TypedEventHandler[*hivev1.ClusterProvision, reconcile.Request] = &clusterProvisionEventHandler{} type clusterProvisionEventHandler struct { - handler.TypedEventHandler[*hivev1.ClusterProvision] + handler.TypedEventHandler[*hivev1.ClusterProvision, reconcile.Request] reconciler *ReconcileClusterDeployment } // Create implements handler.EventHandler -func (h *clusterProvisionEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*hivev1.ClusterProvision], q workqueue.RateLimitingInterface) { +func (h *clusterProvisionEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*hivev1.ClusterProvision], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { h.reconciler.logger.Info("ClusterProvision created") h.reconciler.trackClusterProvisionAdd(e.Object) h.TypedEventHandler.Create(ctx, e, q) diff --git a/pkg/controller/clusterdeployment/metrics.go b/pkg/controller/clusterdeployment/metrics.go index ed668ed1468..e8d42dbc916 100644 --- a/pkg/controller/clusterdeployment/metrics.go +++ b/pkg/controller/clusterdeployment/metrics.go @@ -2,7 +2,6 @@ package clusterdeployment import ( "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/metrics" @@ -69,7 +68,7 @@ func incProvisionFailedTerminal(cd *hivev1.ClusterDeployment) { metricProvisionFailedTerminal.Observe(cd, fixedLabels, 1) } -func registerMetrics(mConfig *metricsconfig.MetricsConfig, log log.FieldLogger) { +func registerMetrics(mConfig *metricsconfig.MetricsConfig) { mapClusterTypeLabelToValue := hivemetrics.GetOptionalClusterTypeLabels(mConfig) metricCompletedInstallJobRestarts = *hivemetrics.NewHistogramVecWithDynamicLabels( diff --git a/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go b/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go index a991b8fc218..2c3c0663579 100644 --- a/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go +++ b/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go @@ -103,7 +103,7 @@ func newReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) (re } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func add(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("clusterdeprovision-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), diff --git a/pkg/controller/clusterpool/clusterdeploymentexpectations.go b/pkg/controller/clusterpool/clusterdeploymentexpectations.go index 801f257c73d..d879d1589c6 100644 --- a/pkg/controller/clusterpool/clusterdeploymentexpectations.go +++ b/pkg/controller/clusterpool/clusterdeploymentexpectations.go @@ -32,15 +32,15 @@ func (r *ReconcileClusterPool) watchClusterDeployments(mgr manager.Manager, c co return c.Watch(source.Kind(mgr.GetCache(), &hivev1.ClusterDeployment{}, controllerutils.NewTypedRateLimitedUpdateEventHandler(h, controllerutils.IsClusterDeploymentErrorUpdateEvent))) } -var _ handler.TypedEventHandler[*hivev1.ClusterDeployment] = &clusterDeploymentEventHandler{} +var _ handler.TypedEventHandler[*hivev1.ClusterDeployment, reconcile.Request] = &clusterDeploymentEventHandler{} type clusterDeploymentEventHandler struct { - handler.TypedEventHandler[*hivev1.ClusterDeployment] + handler.TypedEventHandler[*hivev1.ClusterDeployment, reconcile.Request] reconciler *ReconcileClusterPool } // Create implements handler.EventHandler -func (h *clusterDeploymentEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*hivev1.ClusterDeployment], q workqueue.RateLimitingInterface) { +func (h *clusterDeploymentEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*hivev1.ClusterDeployment], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { h.reconciler.logger.Info("ClusterDeployment created") h.trackClusterDeploymentAdd(e.Object) h.TypedEventHandler.Create(context.TODO(), e, q) diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 667eacc5a1d..0fb03d0dc83 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -107,7 +107,7 @@ func indexClusterClaimsByClusterPool(o client.Object) []string { } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("clusterpool-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, r.logger), @@ -187,7 +187,7 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc return nil } -func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.TypedMapFunc[*hivev1.ClusterDeploymentCustomization] { +func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.TypedMapFunc[*hivev1.ClusterDeploymentCustomization, reconcile.Request] { return func(ctx context.Context, cdc *hivev1.ClusterDeploymentCustomization) []reconcile.Request { cpList := &hivev1.ClusterPoolList{} if err := c.List(context.Background(), cpList, client.InNamespace(cdc.GetNamespace())); err != nil { @@ -218,7 +218,7 @@ func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.Ty } } -func requestsForCDRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.RoleBinding] { +func requestsForCDRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.RoleBinding, reconcile.Request] { return func(ctx context.Context, o *rbacv1.RoleBinding) []reconcile.Request { if o.GetName() != resourceName { return nil @@ -237,7 +237,7 @@ func requestsForCDRBACResources(c client.Client, resourceName string, logger log } } -func requestsForRBACResources(c client.Client, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.RoleBinding] { +func requestsForRBACResources(c client.Client, logger log.FieldLogger) handler.TypedMapFunc[*rbacv1.RoleBinding, reconcile.Request] { return func(ctx context.Context, binding *rbacv1.RoleBinding) []reconcile.Request { if binding.RoleRef.Kind != "ClusterRole" || binding.RoleRef.Name != clusterPoolAdminRoleName { return nil @@ -456,7 +456,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. // One more (possible) status update: wait until the end to detect whether all unassigned CDs // are current with the pool config, since assigning or deleting CDs may eliminate the last // one that isn't. - if err := setCDsCurrentCondition(r.Client, cds, clp, poolVersion); err != nil { + if err := setCDsCurrentCondition(r.Client, cds, clp); err != nil { log.WithError(err).Error("error updating 'ClusterDeployments current' status") return reconcile.Result{}, err } diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 355df9c6212..ede7d13991a 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -1128,7 +1128,7 @@ func (cdcs *invalidCDCCollection) MarshalJSON() ([]byte, error) { // setCDsCurrentCondition idempotently sets the ClusterDeploymentsCurrent condition on the // ClusterPool according to whether all unassigned CDs have the same PoolVersion as the pool. -func setCDsCurrentCondition(c client.Client, cds *cdCollection, clp *hivev1.ClusterPool, poolVersion string) error { +func setCDsCurrentCondition(c client.Client, cds *cdCollection, clp *hivev1.ClusterPool) error { var status corev1.ConditionStatus var reason, message string names := func(cdList []*hivev1.ClusterDeployment) string { diff --git a/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go b/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go index d67d19c6163..7c8409d1def 100644 --- a/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go +++ b/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go @@ -53,7 +53,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New( fmt.Sprintf("%s-controller", ControllerName), diff --git a/pkg/controller/clusterprovision/clusterprovision_controller.go b/pkg/controller/clusterprovision/clusterprovision_controller.go index ad43f9717dc..5eff98e11dc 100644 --- a/pkg/controller/clusterprovision/clusterprovision_controller.go +++ b/pkg/controller/clusterprovision/clusterprovision_controller.go @@ -70,7 +70,7 @@ func Add(mgr manager.Manager) error { } // Register the metrics. This is done here to ensure we define the metrics with optional label support after we have // read the hiveconfig, and we register them only once. - registerMetrics(mConfig, logger) + registerMetrics(mConfig) return add(mgr, newReconciler(mgr, clientRateLimiter), concurrentReconciles, queueRateLimiter) } @@ -87,7 +87,7 @@ func newReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func add(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { provisionReconciler, ok := r.(*ReconcileClusterProvision) if !ok { return errors.New("reconciler supplied is not a ReconcileClusterProvision") @@ -297,7 +297,7 @@ func (r *ReconcileClusterProvision) reconcileRunningJob(instance *hivev1.Cluster pLog.Error("install job completed without completing initialization") return r.transitionStage(instance, hivev1.ClusterProvisionStageFailed, "InitializationNotComplete", "Install job completed without completing initialization", pLog) } - return r.reconcileSuccessfulJob(instance, job, pLog) + return r.reconcileSuccessfulJob(instance, pLog) case controllerutils.IsFailed(job): return r.reconcileFailedJob(instance, job, pLog) } @@ -384,7 +384,7 @@ func (r *ReconcileClusterProvision) getInstallPod(job *batchv1.Job, pLog log.Fie } } -func (r *ReconcileClusterProvision) reconcileSuccessfulJob(instance *hivev1.ClusterProvision, job *batchv1.Job, pLog log.FieldLogger) (reconcile.Result, error) { +func (r *ReconcileClusterProvision) reconcileSuccessfulJob(instance *hivev1.ClusterProvision, pLog log.FieldLogger) (reconcile.Result, error) { pLog.Info("install job succeeded") result, err := r.transitionStage(instance, hivev1.ClusterProvisionStageComplete, "InstallComplete", "Install job has completed successfully", pLog) if err == nil { diff --git a/pkg/controller/clusterprovision/clusterprovision_controller_test.go b/pkg/controller/clusterprovision/clusterprovision_controller_test.go index 0bb2a71037f..d79bb186a9b 100644 --- a/pkg/controller/clusterprovision/clusterprovision_controller_test.go +++ b/pkg/controller/clusterprovision/clusterprovision_controller_test.go @@ -43,7 +43,8 @@ func init() { log.SetLevel(log.DebugLevel) // While the metrics need not be registered for this test suite, they still need to be defined to avoid panics // during the tests - registerMetrics(&metricsconfig.MetricsConfig{}, log.WithField("controller", "clusterProvision")) + var _ log.FieldLogger = log.WithField("controller", "clusterProvision") + registerMetrics(&metricsconfig.MetricsConfig{}) } func TestClusterProvisionReconcile(t *testing.T) { diff --git a/pkg/controller/clusterprovision/jobexpectations.go b/pkg/controller/clusterprovision/jobexpectations.go index cd55e42bbd0..7a0934ad989 100644 --- a/pkg/controller/clusterprovision/jobexpectations.go +++ b/pkg/controller/clusterprovision/jobexpectations.go @@ -11,6 +11,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -24,22 +25,22 @@ func (r *ReconcileClusterProvision) watchJobs(mgr manager.Manager, c controller. return c.Watch(source.Kind(mgr.GetCache(), &batchv1.Job{}, handler)) } -var _ handler.TypedEventHandler[*batchv1.Job] = &jobEventHandler{} +var _ handler.TypedEventHandler[*batchv1.Job, reconcile.Request] = &jobEventHandler{} type jobEventHandler struct { - handler.TypedEventHandler[*batchv1.Job] + handler.TypedEventHandler[*batchv1.Job, reconcile.Request] reconciler *ReconcileClusterProvision } // Create implements handler.TypedEventHandler -func (h *jobEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*batchv1.Job], q workqueue.RateLimitingInterface) { +func (h *jobEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*batchv1.Job], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { h.reconciler.logger.Info("Job created") h.reconciler.trackJobAdd(e.Object) h.TypedEventHandler.Create(ctx, e, q) } // Delete implements handler.TypedEventHandler -func (h *jobEventHandler) Delete(ctx context.Context, e event.TypedDeleteEvent[*batchv1.Job], q workqueue.RateLimitingInterface) { +func (h *jobEventHandler) Delete(ctx context.Context, e event.TypedDeleteEvent[*batchv1.Job], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { h.reconciler.logger.Info("Job deleted") h.TypedEventHandler.Delete(ctx, e, q) } diff --git a/pkg/controller/clusterprovision/metrics.go b/pkg/controller/clusterprovision/metrics.go index 9987c47df05..e4eac8e242e 100644 --- a/pkg/controller/clusterprovision/metrics.go +++ b/pkg/controller/clusterprovision/metrics.go @@ -2,7 +2,6 @@ package clusterprovision import ( "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" "github.com/openshift/hive/apis/hive/v1/metricsconfig" hivemetrics "github.com/openshift/hive/pkg/controller/metrics" @@ -18,7 +17,7 @@ var ( metricInstallSuccessSeconds hivemetrics.HistogramVecWithDynamicLabels ) -func registerMetrics(mConfig *metricsconfig.MetricsConfig, log log.FieldLogger) { +func registerMetrics(mConfig *metricsconfig.MetricsConfig) { mapClusterTypeLabelToValue := hivemetrics.GetOptionalClusterTypeLabels(mConfig) metricClusterProvisionsTotal = *hivemetrics.NewCounterVecWithDynamicLabels( diff --git a/pkg/controller/clusterstate/clusterstate_controller.go b/pkg/controller/clusterstate/clusterstate_controller.go index 5ca78659d21..898db48a76d 100644 --- a/pkg/controller/clusterstate/clusterstate_controller.go +++ b/pkg/controller/clusterstate/clusterstate_controller.go @@ -65,7 +65,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { c, err := controller.New("clusterstate-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), MaxConcurrentReconciles: concurrentReconciles, diff --git a/pkg/controller/clustersync/clustersync_controller.go b/pkg/controller/clustersync/clustersync_controller.go index 20fa5868597..0562a178e37 100644 --- a/pkg/controller/clustersync/clustersync_controller.go +++ b/pkg/controller/clustersync/clustersync_controller.go @@ -176,7 +176,7 @@ func resourceHelperBuilderFunc( } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r *ReconcileClusterSync, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r *ReconcileClusterSync, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("clusterSync-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, r.logger), @@ -230,7 +230,7 @@ func requestsForSyncSet(ctx context.Context, ss *hivev1.SyncSet) []reconcile.Req return requests } -func requestsForSelectorSyncSet(c client.Client, logger log.FieldLogger) handler.TypedMapFunc[*hivev1.SelectorSyncSet] { +func requestsForSelectorSyncSet(c client.Client, logger log.FieldLogger) handler.TypedMapFunc[*hivev1.SelectorSyncSet, reconcile.Request] { return func(ctx context.Context, sss *hivev1.SelectorSyncSet) []reconcile.Request { logger := logger.WithField("selectorSyncSet", sss.Name) labelSelector, err := metav1.LabelSelectorAsSelector(&sss.Spec.ClusterDeploymentSelector) @@ -721,7 +721,7 @@ func decodeResources(syncSet CommonSyncSet, cd *hivev1.ClusterDeployment, logger } // Apply templates, if enabled if syncSet.GetSpec().EnableResourceTemplates { - if err := processParameters(u, cd, logger); err != nil { + if err := processParameters(u, cd); err != nil { logger.WithField("resourceIndex", i).WithError(err).Warn("error parameterizing object") decodeErrors = append(decodeErrors, errors.Wrapf(err, "failed to parameterize resource %d", i)) continue diff --git a/pkg/controller/clustersync/templates.go b/pkg/controller/clustersync/templates.go index 6e618d87595..05cc830609d 100644 --- a/pkg/controller/clustersync/templates.go +++ b/pkg/controller/clustersync/templates.go @@ -6,7 +6,6 @@ import ( "text/template" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -14,7 +13,7 @@ import ( ) // processParameters modifies `u`, appling text/template parameters found in string values therein. -func processParameters(u *unstructured.Unstructured, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { +func processParameters(u *unstructured.Unstructured, cd *hivev1.ClusterDeployment) error { resourceParamTemplate := template.New("resourceParams").Funcs( template.FuncMap{ "fromCDLabel": fromCDLabel(cd), diff --git a/pkg/controller/clusterversion/clusterversion_controller.go b/pkg/controller/clusterversion/clusterversion_controller.go index 1f599323055..6c5b5e51b56 100644 --- a/pkg/controller/clusterversion/clusterversion_controller.go +++ b/pkg/controller/clusterversion/clusterversion_controller.go @@ -70,7 +70,7 @@ func Add(mgr manager.Manager) error { } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("clusterversion-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), diff --git a/pkg/controller/controlplanecerts/controlplanecerts_controller.go b/pkg/controller/controlplanecerts/controlplanecerts_controller.go index d9a8440d921..a1ae4c51b67 100644 --- a/pkg/controller/controlplanecerts/controlplanecerts_controller.go +++ b/pkg/controller/controlplanecerts/controlplanecerts_controller.go @@ -97,7 +97,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("controlplanecerts-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), @@ -196,7 +196,7 @@ func (r *ReconcileControlPlaneCerts) Reconcile(ctx context.Context, request reco } // clear condition if certs were found - updated, err := r.setCertsNotFoundCondition(cd, !secretsAvailable, cdLog) + updated, err := r.setCertsNotFoundCondition(cd, !secretsAvailable) if err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update cluster deployment secrets not found condition") return reconcile.Result{}, err @@ -395,7 +395,7 @@ func (r *ReconcileControlPlaneCerts) getServingCertificatesJSONPatch(cd *hivev1. } -func (r *ReconcileControlPlaneCerts) setCertsNotFoundCondition(cd *hivev1.ClusterDeployment, notFound bool, cdLog log.FieldLogger) (bool, error) { +func (r *ReconcileControlPlaneCerts) setCertsNotFoundCondition(cd *hivev1.ClusterDeployment, notFound bool) (bool, error) { status := corev1.ConditionFalse reason := certsFoundReason message := certsFoundMessage diff --git a/pkg/controller/dnszone/dnszone_controller.go b/pkg/controller/dnszone/dnszone_controller.go index f9f8a5619f9..4f815ebdd0d 100644 --- a/pkg/controller/dnszone/dnszone_controller.go +++ b/pkg/controller/dnszone/dnszone_controller.go @@ -87,7 +87,7 @@ func newReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) *Re } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r *ReconcileDNSZone, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func add(mgr manager.Manager, r *ReconcileDNSZone, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New(ControllerName.String(), mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, r.logger), diff --git a/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go b/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go index 8935cda8c4a..bff316070c7 100644 --- a/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go +++ b/pkg/controller/fakeclusterinstall/fakeclusterinstall_controller.go @@ -55,7 +55,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { c, err := controller.New("fakeclusterinstall-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), MaxConcurrentReconciles: concurrentReconciles, diff --git a/pkg/controller/hibernation/hibernation_controller.go b/pkg/controller/hibernation/hibernation_controller.go index aa5c5cbf86b..d9964117e39 100644 --- a/pkg/controller/hibernation/hibernation_controller.go +++ b/pkg/controller/hibernation/hibernation_controller.go @@ -127,7 +127,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) *hi } // AddToManager adds a new Controller to the controller manager -func AddToManager(mgr manager.Manager, r *hibernationReconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r *hibernationReconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { c, err := controller.New("hibernation-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), MaxConcurrentReconciles: concurrentReconciles, @@ -366,7 +366,7 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile if shouldStopMachines(cd, hibernatingCondition) { return r.stopMachines(cd, cdLog) } - return r.checkClusterStopped(cd, false, cdLog) + return r.checkClusterStopped(cd, cdLog) } // If we get here, we're not supposed to be hibernating if isFakeCluster { @@ -449,7 +449,7 @@ func (r *hibernationReconciler) stopMachines(cd *hivev1.ClusterDeployment, logge return reconcile.Result{}, err } -func (r *hibernationReconciler) checkClusterStopped(cd *hivev1.ClusterDeployment, expectRunning bool, logger log.FieldLogger) (reconcile.Result, error) { +func (r *hibernationReconciler) checkClusterStopped(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) { actuator := r.getActuator(cd) if actuator == nil { logger.Warning("No compatible actuator found to check machine status") diff --git a/pkg/controller/machinepool/leaseexceptions.go b/pkg/controller/machinepool/leaseexceptions.go index 77c2f622b2f..d0e66acd41a 100644 --- a/pkg/controller/machinepool/leaseexceptions.go +++ b/pkg/controller/machinepool/leaseexceptions.go @@ -29,22 +29,22 @@ func (r *ReconcileMachinePool) watchMachinePoolNameLeases(mgr manager.Manager, c return c.Watch(source.Kind(mgr.GetCache(), &hivev1.MachinePoolNameLease{}, h)) } -var _ handler.TypedEventHandler[*hivev1.MachinePoolNameLease] = &machinePoolNameLeaseEventHandler{} +var _ handler.TypedEventHandler[*hivev1.MachinePoolNameLease, reconcile.Request] = &machinePoolNameLeaseEventHandler{} type machinePoolNameLeaseEventHandler struct { - handler.TypedEventHandler[*hivev1.MachinePoolNameLease] + handler.TypedEventHandler[*hivev1.MachinePoolNameLease, reconcile.Request] reconciler *ReconcileMachinePool } // Create implements handler.EventHandler -func (h *machinePoolNameLeaseEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*hivev1.MachinePoolNameLease], q workqueue.RateLimitingInterface) { +func (h *machinePoolNameLeaseEventHandler) Create(ctx context.Context, e event.TypedCreateEvent[*hivev1.MachinePoolNameLease], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { h.reconciler.logger.Info("running Create handler for MachinePoolNameLease") h.reconciler.trackLeaseAdd(e.Object) h.TypedEventHandler.Create(ctx, e, q) } // Delete implements handler.EventHandler -func (h *machinePoolNameLeaseEventHandler) Delete(ctx context.Context, e event.TypedDeleteEvent[*hivev1.MachinePoolNameLease], q workqueue.RateLimitingInterface) { +func (h *machinePoolNameLeaseEventHandler) Delete(ctx context.Context, e event.TypedDeleteEvent[*hivev1.MachinePoolNameLease], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { logger := h.reconciler.logger logger.Info("running Delete handler for MachinePoolNameLease, requeuing all pools for cluster") // FIXME Handle deletion for non MachinePoolNameLease event diff --git a/pkg/controller/privatelink/privatelink_controller.go b/pkg/controller/privatelink/privatelink_controller.go index b6d91cfdffa..8644039a43d 100644 --- a/pkg/controller/privatelink/privatelink_controller.go +++ b/pkg/controller/privatelink/privatelink_controller.go @@ -78,7 +78,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) (*P } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r *PrivateLinkReconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r *PrivateLinkReconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { logger := log.WithField("controller", ControllerName) // Create a new controller c, err := controller.New(string(ControllerName+"-controller"), mgr, controller.Options{ diff --git a/pkg/controller/remoteingress/remoteingress_controller.go b/pkg/controller/remoteingress/remoteingress_controller.go index e425be9eaaa..2e5b8b709c6 100644 --- a/pkg/controller/remoteingress/remoteingress_controller.go +++ b/pkg/controller/remoteingress/remoteingress_controller.go @@ -97,7 +97,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("remoteingress-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), @@ -251,7 +251,7 @@ func rawExtensionsFromClusterDeployment(rContext *reconcileContext) []runtime.Ra rawList := []runtime.RawExtension{} for _, ingress := range rContext.clusterDeployment.Spec.Ingress { - ingressObj := createIngressController(rContext.clusterDeployment, ingress, rContext.certBundleSecrets) + ingressObj := createIngressController(rContext.clusterDeployment, ingress) raw := runtime.RawExtension{Object: ingressObj} rawList = append(rawList, raw) } @@ -333,7 +333,7 @@ func (r *ReconcileRemoteClusterIngress) syncSyncSet(rContext *reconcileContext, // createIngressController will return an ingressController based on a clusterDeployment's // spec.Ingress object -func createIngressController(cd *hivev1.ClusterDeployment, ingress hivev1.ClusterIngress, secrets []*corev1.Secret) *ingresscontroller.IngressController { +func createIngressController(cd *hivev1.ClusterDeployment, ingress hivev1.ClusterIngress) *ingresscontroller.IngressController { newIngress := ingresscontroller.IngressController{ TypeMeta: metav1.TypeMeta{ Kind: "IngressController", diff --git a/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go b/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go index eb7036608db..a5946a588e5 100644 --- a/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go +++ b/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go @@ -67,7 +67,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New(ControllerName.String()+"-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), diff --git a/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go b/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go index 3ff9dc4aa7e..d543a18a174 100644 --- a/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go +++ b/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go @@ -477,7 +477,7 @@ func TestReconcile(t *testing.T) { assert.Equal(t, test.expectedResult.RequeueAfter, result.RequeueAfter) assert.Equal(t, test.expectedError, err) assert.Nil(t, ssListErr) - assert.True(t, areSyncSetSpecsEqual(t, &test.expectedSyncSetList, ssList)) + assert.True(t, areSyncSetSpecsEqual(&test.expectedSyncSetList, ssList)) assertSyncSetLabelsCorrect(t, ssList) }) } @@ -491,7 +491,7 @@ func assertSyncSetLabelsCorrect(t *testing.T, actual *hivev1.SyncSetList) { } } -func areSyncSetSpecsEqual(t *testing.T, expected, actual *hivev1.SyncSetList) bool { +func areSyncSetSpecsEqual(expected, actual *hivev1.SyncSetList) bool { if len(expected.Items) != len(actual.Items) { // They aren't the same size, they can't be the same. return false diff --git a/pkg/controller/unreachable/unreachable_controller.go b/pkg/controller/unreachable/unreachable_controller.go index 8c546348d9b..51504dd5d65 100644 --- a/pkg/controller/unreachable/unreachable_controller.go +++ b/pkg/controller/unreachable/unreachable_controller.go @@ -86,7 +86,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) rec } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New("unreachable-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), diff --git a/pkg/controller/utils/dnszone.go b/pkg/controller/utils/dnszone.go index e6d672ce262..7520e35e6c1 100644 --- a/pkg/controller/utils/dnszone.go +++ b/pkg/controller/utils/dnszone.go @@ -14,7 +14,7 @@ import ( "github.com/openshift/hive/pkg/constants" ) -func EnqueueDNSZonesOwnedByClusterDeployment(c client.Client, logger log.FieldLogger) handler.TypedEventHandler[*hivev1.ClusterDeployment] { +func EnqueueDNSZonesOwnedByClusterDeployment(c client.Client, logger log.FieldLogger) handler.TypedEventHandler[*hivev1.ClusterDeployment, reconcile.Request] { return handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, mapObj *hivev1.ClusterDeployment) []reconcile.Request { dnsZones := &hivev1.DNSZoneList{} diff --git a/pkg/controller/utils/logtagger.go b/pkg/controller/utils/logtagger.go index 7c3b85e8010..21ad0020f9b 100644 --- a/pkg/controller/utils/logtagger.go +++ b/pkg/controller/utils/logtagger.go @@ -79,7 +79,7 @@ func AddLogFields[O AdditionalLogFieldHavinThing](obj O, logger *log.Entry) *log logger.WithError(err).Warning("failed to extract additional log fields -- ignoring") case kvmap == nil: logger.Debug("no additional log fields found") - case kvmap != nil: + default: logger = logger.WithFields(log.Fields(kvmap)) } return logger diff --git a/pkg/controller/utils/ratelimitedeventhandler.go b/pkg/controller/utils/ratelimitedeventhandler.go index df1963f1aa3..36b3be21fe0 100644 --- a/pkg/controller/utils/ratelimitedeventhandler.go +++ b/pkg/controller/utils/ratelimitedeventhandler.go @@ -7,6 +7,7 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // NewRateLimitedUpdateEventHandler wraps the specified event handler inside a new @@ -31,7 +32,7 @@ type rateLimitedUpdateEventHandler struct { var _ handler.EventHandler = &rateLimitedUpdateEventHandler{} // Update implements handler.EventHandler -func (h *rateLimitedUpdateEventHandler) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h *rateLimitedUpdateEventHandler) Update(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { nq := q if h.shouldRateLimit(e) { nq = &rateLimitedAddQueue{q} @@ -42,21 +43,21 @@ func (h *rateLimitedUpdateEventHandler) Update(ctx context.Context, e event.Upda // rateLimitedAddQueue add queue wraps RateLimitingInterface queue // such that the Add call also becomes rate limited. type rateLimitedAddQueue struct { - workqueue.RateLimitingInterface + workqueue.TypedRateLimitingInterface[reconcile.Request] } -var _ workqueue.RateLimitingInterface = &rateLimitedAddQueue{} +var _ workqueue.TypedRateLimitingInterface[reconcile.Request] = &rateLimitedAddQueue{} // Add implements workqueue.Interface -func (q *rateLimitedAddQueue) Add(item interface{}) { - q.RateLimitingInterface.AddRateLimited(item) +func (q *rateLimitedAddQueue) Add(item reconcile.Request) { + q.TypedRateLimitingInterface.AddRateLimited(item) } // NewTypedRateLimitedUpdateEventHandler wraps the specified typed event handler inside a new // event handler that will rate limit the incoming UPDATE events when the provided // shouldRateLimit function returns true. -func NewTypedRateLimitedUpdateEventHandler[T runtime.Object](typedEventHandler handler.TypedEventHandler[T], shouldRateLimitFunc func(event.UpdateEvent) bool) handler.TypedEventHandler[T] { - return &typedRateLimitedUpdateEventHandler[T]{ +func NewTypedRateLimitedUpdateEventHandler[T runtime.Object, C comparable](typedEventHandler handler.TypedEventHandler[T, C], shouldRateLimitFunc func(event.UpdateEvent) bool) handler.TypedEventHandler[T, C] { + return &typedRateLimitedUpdateEventHandler[T, C]{ TypedEventHandler: typedEventHandler, shouldRateLimit: shouldRateLimitFunc, } @@ -65,8 +66,8 @@ func NewTypedRateLimitedUpdateEventHandler[T runtime.Object](typedEventHandler h // typedRateLimitedUpdateEventHandler wraps the specified typed event handler such // that it will rate limit the incoming UPDATE events when the provided // shouldRateLimit function returns true. -type typedRateLimitedUpdateEventHandler[T runtime.Object] struct { - handler.TypedEventHandler[T] +type typedRateLimitedUpdateEventHandler[T runtime.Object, C comparable] struct { + handler.TypedEventHandler[T, C] shouldRateLimit func(event.UpdateEvent) bool } diff --git a/pkg/controller/utils/ratelimitedeventhandler_test.go b/pkg/controller/utils/ratelimitedeventhandler_test.go index b4ba5370b1d..0747fb5344d 100644 --- a/pkg/controller/utils/ratelimitedeventhandler_test.go +++ b/pkg/controller/utils/ratelimitedeventhandler_test.go @@ -2,7 +2,6 @@ package utils import ( "context" - "fmt" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,7 +20,7 @@ func TestRateLimitedEventHandler(t *testing.T) { o := &hivev1.DNSZone{ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-name"}} // always not rate limited - q := &trackedQueue{RateLimitingInterface: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter())} + q := &trackedQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]())} h := NewRateLimitedUpdateEventHandler(&handler.EnqueueRequestForObject{}, func(_ event.UpdateEvent) bool { return false }) h.Update(context.TODO(), event.UpdateEvent{ObjectOld: o, ObjectNew: o}, q) @@ -29,7 +28,7 @@ func TestRateLimitedEventHandler(t *testing.T) { require.Equal(t, 0, len(q.ratelimitAdded)) // always rate limited - q = &trackedQueue{RateLimitingInterface: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter())} + q = &trackedQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]())} h = NewRateLimitedUpdateEventHandler(&handler.EnqueueRequestForObject{}, func(_ event.UpdateEvent) bool { return true }) h.Update(context.TODO(), event.UpdateEvent{ObjectOld: o, ObjectNew: o}, q) @@ -37,7 +36,7 @@ func TestRateLimitedEventHandler(t *testing.T) { require.Equal(t, 1, len(q.ratelimitAdded)) // always rate limited not UPDATE - q = &trackedQueue{RateLimitingInterface: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter())} + q = &trackedQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]())} h = NewRateLimitedUpdateEventHandler(&handler.EnqueueRequestForObject{}, func(_ event.UpdateEvent) bool { return true }) h.Generic(context.TODO(), event.GenericEvent{Object: o}, q) @@ -45,7 +44,7 @@ func TestRateLimitedEventHandler(t *testing.T) { require.Equal(t, 0, len(q.ratelimitAdded)) // always rate limited with complex handler - q = &trackedQueue{RateLimitingInterface: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter())} + q = &trackedQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]())} h = NewRateLimitedUpdateEventHandler(handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { return []reconcile.Request{{ NamespacedName: types.NamespacedName{ @@ -72,22 +71,22 @@ func TestRateLimitedEventHandler(t *testing.T) { } type trackedQueue struct { - workqueue.RateLimitingInterface + workqueue.TypedRateLimitingInterface[reconcile.Request] added []string ratelimitAdded []string } -var _ workqueue.RateLimitingInterface = &trackedQueue{} +var _ workqueue.TypedRateLimitingInterface[reconcile.Request] = &trackedQueue{} // Add implements workqueue.Interface -func (q *trackedQueue) Add(item interface{}) { - q.added = append(q.added, fmt.Sprintf("%s", item)) - q.RateLimitingInterface.Add(item) +func (q *trackedQueue) Add(item reconcile.Request) { + q.added = append(q.added, item.String()) + q.TypedRateLimitingInterface.Add(item) } // AddRateLimited implements workqueue.RateLimitingInterface -func (q *trackedQueue) AddRateLimited(item interface{}) { - q.ratelimitAdded = append(q.ratelimitAdded, fmt.Sprintf("%s", item)) - q.RateLimitingInterface.AddRateLimited(item) +func (q *trackedQueue) AddRateLimited(item reconcile.Request) { + q.ratelimitAdded = append(q.ratelimitAdded, item.String()) + q.TypedRateLimitingInterface.AddRateLimited(item) } diff --git a/pkg/controller/utils/utils.go b/pkg/controller/utils/utils.go index 079aef156fe..28a425737d1 100644 --- a/pkg/controller/utils/utils.go +++ b/pkg/controller/utils/utils.go @@ -129,7 +129,7 @@ func getClientRateLimiter(controllerName hivev1.ControllerName) (flowcontrol.Rat } // getQueueRateLimiter returns the workqueue rate limiter for the controller -func getQueueRateLimiter(controllerName hivev1.ControllerName) (workqueue.RateLimiter, error) { +func getQueueRateLimiter(controllerName hivev1.ControllerName) (workqueue.TypedRateLimiter[reconcile.Request], error) { var err error qps := defaultQueueQPS if value, ok := getValueFromEnvVariable(controllerName, QueueQPSEnvVariableFormat); ok { @@ -147,13 +147,13 @@ func getQueueRateLimiter(controllerName hivev1.ControllerName) (workqueue.RateLi } } - return workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), burst)}, + return workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](5*time.Millisecond, 1000*time.Second), + &workqueue.TypedBucketRateLimiter[reconcile.Request]{Limiter: rate.NewLimiter(rate.Limit(qps), burst)}, ), nil } -func GetControllerConfig(client client.Client, controllerName hivev1.ControllerName) (int, flowcontrol.RateLimiter, workqueue.RateLimiter, error) { +func GetControllerConfig(client client.Client, controllerName hivev1.ControllerName) (int, flowcontrol.RateLimiter, workqueue.TypedRateLimiter[reconcile.Request], error) { concurrentReconciles, err := getConcurrentReconciles(controllerName) if err != nil { return 0, nil, nil, err diff --git a/pkg/controller/velerobackup/velerobackup_controller.go b/pkg/controller/velerobackup/velerobackup_controller.go index 2d60cea87fd..a2400f7c60e 100644 --- a/pkg/controller/velerobackup/velerobackup_controller.go +++ b/pkg/controller/velerobackup/velerobackup_controller.go @@ -125,7 +125,7 @@ func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) (re } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler -func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { +func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) error { // Create a new controller c, err := controller.New(ControllerName.String()+"-controller", mgr, controller.Options{ Reconciler: controllerutils.NewDelayingReconciler(r, log.WithField("controller", ControllerName)), diff --git a/pkg/operator/hive/hive_controller.go b/pkg/operator/hive/hive_controller.go index e8c9c6f8d8b..5a32ca9661c 100644 --- a/pkg/operator/hive/hive_controller.go +++ b/pkg/operator/hive/hive_controller.go @@ -628,11 +628,11 @@ func (r *ReconcileHiveConfig) establishSecretWatch(hLog *log.Entry, hiveNSName s Informer: secretsInformer, } src.Handler = handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { hLog.Debug("eventHandler CreateFunc") q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: constants.HiveConfigName}}) }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { hLog.Debug("eventHandler UpdateFunc") q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: constants.HiveConfigName}}) },