Skip to content

Commit e302364

Browse files
Merge pull request #1290 from slashpai/remove_ctx_struct
Remove context field from structs
2 parents 136dcf2 + d3bc73d commit e302364

37 files changed

+831
-744
lines changed

pkg/client/client.go

Lines changed: 194 additions & 196 deletions
Large diffs are not rendered by default.

pkg/client/client_test.go

Lines changed: 57 additions & 57 deletions
Large diffs are not rendered by default.

pkg/client/status_reporter.go

Lines changed: 19 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -27,24 +27,22 @@ import (
2727
)
2828

2929
const (
30-
unavailableMessage string = "Rollout of the monitoring stack failed and is degraded. Please investigate the degraded status error."
31-
asExpectedReason string = "AsExpected"
32-
StorageNotConfiguredMessage = "Prometheus is running without persistent storage which can lead to data loss during upgrades and cluster disruptions. Please refer to the official documentation to see how to configure storage for Prometheus: https://docs.openshift.com/container-platform/4.8/monitoring/configuring-the-monitoring-stack.html"
33-
StorageNotConfiguredReason = "PrometheusDataPersistenceNotConfigured"
30+
unavailableMessage string = "Rollout of the monitoring stack failed and is degraded. Please investigate the degraded status error."
31+
asExpectedReason string = "AsExpected"
32+
StorageNotConfiguredMessage = "Prometheus is running without persistent storage which can lead to data loss during upgrades and cluster disruptions. Please refer to the official documentation to see how to configure storage for Prometheus: https://docs.openshift.com/container-platform/4.8/monitoring/configuring-the-monitoring-stack.html"
33+
StorageNotConfiguredReason = "PrometheusDataPersistenceNotConfigured"
3434
)
3535

3636
type StatusReporter struct {
3737
client clientv1.ClusterOperatorInterface
3838
clusterOperatorName string
39-
ctx context.Context
4039
namespace string
4140
userWorkloadNamespace string
4241
version string
4342
}
4443

45-
func NewStatusReporter(ctx context.Context, client clientv1.ClusterOperatorInterface, name string, namespace, userWorkloadNamespace, version string) *StatusReporter {
44+
func NewStatusReporter(client clientv1.ClusterOperatorInterface, name, namespace, userWorkloadNamespace, version string) *StatusReporter {
4645
return &StatusReporter{
47-
ctx: ctx,
4846
client: client,
4947
clusterOperatorName: name,
5048
namespace: namespace,
@@ -69,11 +67,11 @@ func (r *StatusReporter) relatedObjects() []v1.ObjectReference {
6967
}
7068
}
7169

72-
func (r *StatusReporter) SetDone(degradedConditionMessage string, degradedConditionReason string) error {
73-
co, err := r.client.Get(r.ctx, r.clusterOperatorName, metav1.GetOptions{})
70+
func (r *StatusReporter) SetDone(ctx context.Context, degradedConditionMessage string, degradedConditionReason string) error {
71+
co, err := r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
7472
if apierrors.IsNotFound(err) {
7573
co = r.newClusterOperator()
76-
co, err = r.client.Create(r.ctx, co, metav1.CreateOptions{})
74+
co, err = r.client.Create(ctx, co, metav1.CreateOptions{})
7775
}
7876
if err != nil && !apierrors.IsNotFound(err) {
7977
return err
@@ -102,7 +100,7 @@ func (r *StatusReporter) SetDone(degradedConditionMessage string, degradedCondit
102100
co.Status.Versions = nil
103101
}
104102

105-
_, err = r.client.UpdateStatus(r.ctx, co, metav1.UpdateOptions{})
103+
_, err = r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
106104
return err
107105
}
108106

@@ -113,11 +111,11 @@ func (r *StatusReporter) SetDone(degradedConditionMessage string, degradedCondit
113111
// This will ensure that the progressing state will be only set initially or in case of failure.
114112
// Once controller operator versions are available, an additional check will be introduced that toggles
115113
// the OperatorProgressing state in case of version upgrades.
116-
func (r *StatusReporter) SetInProgress() error {
117-
co, err := r.client.Get(r.ctx, r.clusterOperatorName, metav1.GetOptions{})
114+
func (r *StatusReporter) SetInProgress(ctx context.Context) error {
115+
co, err := r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
118116
if apierrors.IsNotFound(err) {
119117
co = r.newClusterOperator()
120-
co, err = r.client.Create(r.ctx, co, metav1.CreateOptions{})
118+
co, err = r.client.Create(ctx, co, metav1.CreateOptions{})
121119
}
122120
if err != nil && !apierrors.IsNotFound(err) {
123121
return err
@@ -131,19 +129,19 @@ func (r *StatusReporter) SetInProgress() error {
131129
co.Status.Conditions = conditions.entries()
132130
co.Status.RelatedObjects = r.relatedObjects()
133131

134-
_, err = r.client.UpdateStatus(r.ctx, co, metav1.UpdateOptions{})
132+
_, err = r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
135133
return err
136134
}
137135

138-
func (r *StatusReporter) Get() (*v1.ClusterOperator, error) {
139-
return r.client.Get(r.ctx, r.clusterOperatorName, metav1.GetOptions{})
136+
func (r *StatusReporter) Get(ctx context.Context) (*v1.ClusterOperator, error) {
137+
return r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
140138
}
141139

142-
func (r *StatusReporter) SetFailed(statusErr error, reason string) error {
143-
co, err := r.client.Get(r.ctx, r.clusterOperatorName, metav1.GetOptions{})
140+
func (r *StatusReporter) SetFailed(ctx context.Context, statusErr error, reason string) error {
141+
co, err := r.client.Get(ctx, r.clusterOperatorName, metav1.GetOptions{})
144142
if apierrors.IsNotFound(err) {
145143
co = r.newClusterOperator()
146-
co, err = r.client.Create(r.ctx, co, metav1.CreateOptions{})
144+
co, err = r.client.Create(ctx, co, metav1.CreateOptions{})
147145
}
148146
if err != nil && !apierrors.IsNotFound(err) {
149147
return err
@@ -160,7 +158,7 @@ func (r *StatusReporter) SetFailed(statusErr error, reason string) error {
160158
conditions.setCondition(v1.OperatorUpgradeable, v1.ConditionTrue, "", asExpectedReason, time)
161159
co.Status.Conditions = conditions.entries()
162160

163-
_, err = r.client.UpdateStatus(r.ctx, co, metav1.UpdateOptions{})
161+
_, err = r.client.UpdateStatus(ctx, co, metav1.UpdateOptions{})
164162
return err
165163
}
166164

pkg/client/status_reporter_test.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,6 @@ func TestStatusReporterSetDone(t *testing.T) {
101101
mock := &clusterOperatorMock{}
102102

103103
sr := NewStatusReporter(
104-
ctx,
105104
mock,
106105
tc.given.operatorName,
107106
tc.given.namespace,
@@ -113,7 +112,7 @@ func TestStatusReporterSetDone(t *testing.T) {
113112
w(mock)
114113
}
115114

116-
got := sr.SetDone("", "")
115+
got := sr.SetDone(ctx, "", "")
117116

118117
for _, check := range tc.check {
119118
if err := check(mock, got); err != nil {
@@ -194,7 +193,6 @@ func TestStatusReporterSetInProgress(t *testing.T) {
194193
mock := &clusterOperatorMock{}
195194

196195
sr := NewStatusReporter(
197-
ctx,
198196
mock,
199197
tc.given.operatorName,
200198
tc.given.namespace,
@@ -206,7 +204,7 @@ func TestStatusReporterSetInProgress(t *testing.T) {
206204
w(mock)
207205
}
208206

209-
got := sr.SetInProgress()
207+
got := sr.SetInProgress(ctx)
210208

211209
for _, check := range tc.check {
212210
if err := check(mock, got); err != nil {
@@ -292,7 +290,6 @@ func TestStatusReporterSetFailed(t *testing.T) {
292290
mock := &clusterOperatorMock{}
293291

294292
sr := NewStatusReporter(
295-
ctx,
296293
mock,
297294
tc.given.operatorName,
298295
tc.given.namespace,
@@ -304,7 +301,7 @@ func TestStatusReporterSetFailed(t *testing.T) {
304301
w(mock)
305302
}
306303

307-
got := sr.SetFailed(tc.given.err, "")
304+
got := sr.SetFailed(ctx, tc.given.err, "")
308305

309306
for _, check := range tc.check {
310307
if err := check(mock, got); err != nil {

pkg/operator/operator.go

Lines changed: 31 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -132,8 +132,6 @@ const (
132132
)
133133

134134
type Operator struct {
135-
ctx context.Context
136-
137135
namespace, namespaceUserWorkload string
138136

139137
configMapName string
@@ -177,7 +175,6 @@ func New(
177175
}
178176

179177
o := &Operator{
180-
ctx: ctx,
181178
images: images,
182179
telemetryMatches: telemetryMatches,
183180
configMapName: configMapName,
@@ -250,7 +247,7 @@ func New(
250247
o.informers = append(o.informers, informer)
251248

252249
informer = cache.NewSharedIndexInformer(
253-
o.client.InfrastructureListWatchForResource(o.ctx, clusterResourceName),
250+
o.client.InfrastructureListWatchForResource(ctx, clusterResourceName),
254251
&configv1.Infrastructure{}, resyncPeriod, cache.Indexers{},
255252
)
256253
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
@@ -373,7 +370,7 @@ func (o *Operator) Run(ctx context.Context) error {
373370
go r(ctx, 1)
374371
}
375372

376-
go o.worker()
373+
go o.worker(ctx)
377374

378375
ticker := time.NewTicker(5 * time.Minute)
379376
defer ticker.Stop()
@@ -449,20 +446,20 @@ func (o *Operator) handleEvent(obj interface{}) {
449446
o.enqueue(cmoConfigMap)
450447
}
451448

452-
func (o *Operator) worker() {
453-
for o.processNextWorkItem() {
449+
func (o *Operator) worker(ctx context.Context) {
450+
for o.processNextWorkItem(ctx) {
454451
}
455452
}
456453

457-
func (o *Operator) processNextWorkItem() bool {
454+
func (o *Operator) processNextWorkItem(ctx context.Context) bool {
458455
key, quit := o.queue.Get()
459456
if quit {
460457
return false
461458
}
462459
defer o.queue.Done(key)
463460

464461
o.reconcileAttempts.Inc()
465-
err := o.sync(key.(string))
462+
err := o.sync(ctx, key.(string))
466463
if err == nil {
467464
o.reconcileStatus.Set(1)
468465
o.queue.Forget(key)
@@ -493,23 +490,23 @@ func (o *Operator) enqueue(obj interface{}) {
493490
o.queue.Add(key)
494491
}
495492

496-
func (o *Operator) sync(key string) error {
497-
config, err := o.Config(key)
493+
func (o *Operator) sync(ctx context.Context, key string) error {
494+
config, err := o.Config(ctx, key)
498495
if err != nil {
499-
o.reportError(err, "InvalidConfiguration")
496+
o.reportError(ctx, err, "InvalidConfiguration")
500497
return err
501498
}
502499
config.SetImages(o.images)
503500
config.SetTelemetryMatches(o.telemetryMatches)
504501
config.SetRemoteWrite(o.remoteWrite)
505502

506503
var proxyConfig manifests.ProxyReader
507-
proxyConfig, err = o.loadProxyConfig()
504+
proxyConfig, err = o.loadProxyConfig(ctx)
508505
if err != nil {
509506
klog.Warningf("using proxy config from CMO configmap: %v", err)
510507
proxyConfig = config
511508
}
512-
factory := manifests.NewFactory(o.namespace, o.namespaceUserWorkload, config, o.loadInfrastructureConfig(), proxyConfig, o.assets)
509+
factory := manifests.NewFactory(o.namespace, o.namespaceUserWorkload, config, o.loadInfrastructureConfig(ctx), proxyConfig, o.assets)
513510

514511
tl := tasks.NewTaskRunner(
515512
o.client,
@@ -530,7 +527,7 @@ func (o *Operator) sync(key string) error {
530527
tasks.NewTaskSpec("Updating node-exporter", tasks.NewNodeExporterTask(o.client, factory)),
531528
tasks.NewTaskSpec("Updating kube-state-metrics", tasks.NewKubeStateMetricsTask(o.client, factory)),
532529
tasks.NewTaskSpec("Updating openshift-state-metrics", tasks.NewOpenShiftStateMetricsTask(o.client, factory)),
533-
tasks.NewTaskSpec("Updating prometheus-adapter", tasks.NewPrometheusAdapterTask(o.ctx, o.namespace, o.client, factory)),
530+
tasks.NewTaskSpec("Updating prometheus-adapter", tasks.NewPrometheusAdapterTask(ctx, o.namespace, o.client, factory)),
534531
tasks.NewTaskSpec("Updating Telemeter client", tasks.NewTelemeterClientTask(o.client, factory, config)),
535532
tasks.NewTaskSpec("Updating configuration sharing", tasks.NewConfigSharingTask(o.client, factory, config)),
536533
tasks.NewTaskSpec("Updating Thanos Querier", tasks.NewThanosQuerierTask(o.client, factory, config)),
@@ -539,14 +536,14 @@ func (o *Operator) sync(key string) error {
539536
}),
540537
)
541538
klog.Info("Updating ClusterOperator status to in progress.")
542-
err = o.client.StatusReporter().SetInProgress()
539+
err = o.client.StatusReporter().SetInProgress(ctx)
543540
if err != nil {
544541
klog.Errorf("error occurred while setting status to in progress: %v", err)
545542
}
546543

547-
taskName, err := tl.RunAll()
544+
taskName, err := tl.RunAll(ctx)
548545
if err != nil {
549-
o.reportError(err, taskName)
546+
o.reportError(ctx, err, taskName)
550547
return err
551548
}
552549

@@ -557,32 +554,33 @@ func (o *Operator) sync(key string) error {
557554
}
558555
klog.Info("Updating ClusterOperator status to done.")
559556
o.failedReconcileAttempts = 0
560-
err = o.client.StatusReporter().SetDone(degradedConditionMessage, degradedConditionReason)
557+
err = o.client.StatusReporter().SetDone(ctx, degradedConditionMessage, degradedConditionReason)
558+
561559
if err != nil {
562560
klog.Errorf("error occurred while setting status to done: %v", err)
563561
}
564562

565563
return nil
566564
}
567565

568-
func (o *Operator) reportError(err error, taskName string) {
566+
func (o *Operator) reportError(ctx context.Context, err error, taskName string) {
569567
klog.Infof("ClusterOperator reconciliation failed (attempt %d), retrying. Err: %v", o.failedReconcileAttempts+1, err)
570568
if o.failedReconcileAttempts == 2 {
571569
// Only update the ClusterOperator status after 3 retries have been attempted to avoid flapping status.
572570
klog.Infof("Updating ClusterOperator status to failed after %d attempts. Err: %v", o.failedReconcileAttempts+1, err)
573571
failedTaskReason := strings.Join(strings.Fields(taskName+"Failed"), "")
574-
reportErr := o.client.StatusReporter().SetFailed(err, failedTaskReason)
572+
reportErr := o.client.StatusReporter().SetFailed(ctx, err, failedTaskReason)
575573
if reportErr != nil {
576574
klog.Errorf("error occurred while setting status to failed: %v", reportErr)
577575
}
578576
}
579577
o.failedReconcileAttempts++
580578
}
581579

582-
func (o *Operator) loadInfrastructureConfig() *InfrastructureConfig {
580+
func (o *Operator) loadInfrastructureConfig(ctx context.Context) *InfrastructureConfig {
583581
var infrastructureConfig *InfrastructureConfig
584582

585-
infrastructure, err := o.client.GetInfrastructure(clusterResourceName)
583+
infrastructure, err := o.client.GetInfrastructure(ctx, clusterResourceName)
586584
if err != nil {
587585
klog.Warningf("Error getting cluster infrastructure: %v", err)
588586

@@ -602,10 +600,10 @@ func (o *Operator) loadInfrastructureConfig() *InfrastructureConfig {
602600
return o.lastKnowInfrastructureConfig
603601
}
604602

605-
func (o *Operator) loadProxyConfig() (*ProxyConfig, error) {
603+
func (o *Operator) loadProxyConfig(ctx context.Context) (*ProxyConfig, error) {
606604
var proxyConfig *ProxyConfig
607605

608-
proxy, err := o.client.GetProxy(clusterResourceName)
606+
proxy, err := o.client.GetProxy(ctx, clusterResourceName)
609607
if err != nil {
610608
klog.Warningf("Error getting cluster proxy configuration: %v", err)
611609

@@ -622,10 +620,10 @@ func (o *Operator) loadProxyConfig() (*ProxyConfig, error) {
622620
return o.lastKnowProxyConfig, nil
623621
}
624622

625-
func (o *Operator) loadUserWorkloadConfig() (*manifests.UserWorkloadConfiguration, error) {
623+
func (o *Operator) loadUserWorkloadConfig(ctx context.Context) (*manifests.UserWorkloadConfiguration, error) {
626624
cmKey := fmt.Sprintf("%s/%s", o.namespaceUserWorkload, o.userWorkloadConfigMapName)
627625

628-
userCM, err := o.client.GetConfigmap(o.namespaceUserWorkload, o.userWorkloadConfigMapName)
626+
userCM, err := o.client.GetConfigmap(ctx, o.namespaceUserWorkload, o.userWorkloadConfigMapName)
629627
if err != nil {
630628
if apierrors.IsNotFound(err) {
631629
klog.Warningf("User Workload Monitoring %q ConfigMap not found. Using defaults.", cmKey)
@@ -676,7 +674,7 @@ func (o *Operator) loadConfig(key string) (*manifests.Config, error) {
676674
return cParsed, nil
677675
}
678676

679-
func (o *Operator) Config(key string) (*manifests.Config, error) {
677+
func (o *Operator) Config(ctx context.Context, key string) (*manifests.Config, error) {
680678
c, err := o.loadConfig(key)
681679
if err != nil {
682680
return nil, err
@@ -687,7 +685,7 @@ func (o *Operator) Config(key string) (*manifests.Config, error) {
687685
// loadConfig() already initializes the structs with nil values for
688686
// UserWorkloadConfiguration struct.
689687
if *c.ClusterMonitoringConfiguration.UserWorkloadEnabled {
690-
c.UserWorkloadConfiguration, err = o.loadUserWorkloadConfig()
688+
c.UserWorkloadConfiguration, err = o.loadUserWorkloadConfig(ctx)
691689
if err != nil {
692690
return nil, err
693691
}
@@ -696,29 +694,29 @@ func (o *Operator) Config(key string) (*manifests.Config, error) {
696694
// Only fetch the token and cluster ID if they have not been specified in the config.
697695
if c.ClusterMonitoringConfiguration.TelemeterClientConfig.ClusterID == "" || c.ClusterMonitoringConfiguration.TelemeterClientConfig.Token == "" {
698696
err := c.LoadClusterID(func() (*configv1.ClusterVersion, error) {
699-
return o.client.GetClusterVersion("version")
697+
return o.client.GetClusterVersion(ctx, "version")
700698
})
701699

702700
if err != nil {
703701
klog.Warningf("Could not fetch cluster version from API. Proceeding without it: %v", err)
704702
}
705703

706704
err = c.LoadToken(func() (*v1.Secret, error) {
707-
return o.client.KubernetesInterface().CoreV1().Secrets("openshift-config").Get(o.ctx, "pull-secret", metav1.GetOptions{})
705+
return o.client.KubernetesInterface().CoreV1().Secrets("openshift-config").Get(ctx, "pull-secret", metav1.GetOptions{})
708706
})
709707

710708
if err != nil {
711709
klog.Warningf("Error loading token from API. Proceeding without it: %v", err)
712710
}
713711
}
714712

715-
cm, err := o.client.GetConfigmap("openshift-config", "etcd-metric-serving-ca")
713+
cm, err := o.client.GetConfigmap(ctx, "openshift-config", "etcd-metric-serving-ca")
716714
if err != nil {
717715
klog.Warningf("Error loading etcd CA certificates for Prometheus. Proceeding with etcd disabled. Error: %v", err)
718716
return c, nil
719717
}
720718

721-
s, err := o.client.GetSecret("openshift-config", "etcd-metric-client")
719+
s, err := o.client.GetSecret(ctx, "openshift-config", "etcd-metric-client")
722720
if err != nil {
723721
klog.Warningf("Error loading etcd client secrets for Prometheus. Proceeding with etcd disabled. Error: %v", err)
724722
return c, nil

0 commit comments

Comments
 (0)