diff --git a/pkg/hub/addon/healthcheck_controller.go b/pkg/hub/addon/healthcheck_controller.go index 063b3d762..4a6ba7b3b 100644 --- a/pkg/hub/addon/healthcheck_controller.go +++ b/pkg/hub/addon/healthcheck_controller.go @@ -4,6 +4,7 @@ import ( "context" operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonclient "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" @@ -22,8 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -const addOnAvailableConditionType = "Available" //TODO add this to ManagedClusterAddOn api - // managedClusterAddonHealthCheckController udpates managed cluster addons status through watching the managed cluster status on // the hub cluster. type managedClusterAddOnHealthCheckController struct { @@ -95,7 +94,7 @@ func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syn addOn.Namespace, addOn.Name, helpers.UpdateManagedClusterAddOnStatusFn(metav1.Condition{ - Type: addOnAvailableConditionType, + Type: addonv1alpha1.ManagedClusterAddOnConditionAvailable, Status: managedClusterAvailableCondition.Status, Reason: managedClusterAvailableCondition.Reason, Message: managedClusterAvailableCondition.Message, diff --git a/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml b/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml index cd945d237..dc58e1c51 100644 --- a/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml +++ b/pkg/hub/clusterrole/manifests/managedcluster-registration-clusterrole.yaml @@ -6,9 +6,7 @@ rules: # Allow spoke registration agent to get/update coordination.k8s.io/lease - apiGroups: ["coordination.k8s.io"] resources: ["leases"] - #TODO: for backward compatible, we do not limit the resource name in release 2.3. - #After release 2.3, we will limit the resource name. - #resourceNames: ["managed-cluster-lease"] + resourceNames: ["managed-cluster-lease"] verbs: ["get", "update"] # Allow agent to get/list/watch managed cluster addons - apiGroups: ["addon.open-cluster-management.io"] diff --git a/pkg/hub/managedcluster/controller.go b/pkg/hub/managedcluster/controller.go index ef754e23b..e81363c0a 100644 --- a/pkg/hub/managedcluster/controller.go +++ b/pkg/hub/managedcluster/controller.go @@ -140,9 +140,8 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn return err } - // TODO: we will add the managedcluster-namespace.yaml back to staticFiles - // in next release, currently, we need keep the namespace after the managed - // cluster is deleted. + // TODO consider to add the managedcluster-namespace.yaml back to staticFiles, + // currently, we keep the namespace after the managed cluster is deleted. applyFiles := []string{"manifests/managedcluster-namespace.yaml"} applyFiles = append(applyFiles, staticFiles...) diff --git a/pkg/spoke/addon/lease_controller.go b/pkg/spoke/addon/lease_controller.go index accce5632..4f203d88d 100644 --- a/pkg/spoke/addon/lease_controller.go +++ b/pkg/spoke/addon/lease_controller.go @@ -19,15 +19,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" coordv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" "k8s.io/client-go/tools/cache" + "k8s.io/utils/clock" ) const leaseDurationTimes = 5 // AddOnLeaseControllerLeaseDurationSeconds is exposed so that integration tests can crank up the lease update speed. -// TODO: we may add this to ManagedClusterAddOn API to allow addon to adjust its own lease duration seconds +// TODO we may add this to ManagedClusterAddOn API to allow addon to adjust its own lease duration seconds var AddOnLeaseControllerLeaseDurationSeconds = 60 // managedClusterAddOnLeaseController updates the managed cluster addons status on the hub cluster through checking the add-on @@ -37,7 +37,6 @@ type managedClusterAddOnLeaseController struct { clock clock.Clock addOnClient addonclient.Interface addOnLister addonlisterv1alpha1.ManagedClusterAddOnLister - hubLeaseClient coordv1client.CoordinationV1Interface managementLeaseClient coordv1client.CoordinationV1Interface spokeLeaseClient coordv1client.CoordinationV1Interface } @@ -46,7 +45,6 @@ type managedClusterAddOnLeaseController struct { func NewManagedClusterAddOnLeaseController(clusterName string, addOnClient addonclient.Interface, addOnInformer addoninformerv1alpha1.ManagedClusterAddOnInformer, - hubLeaseClient coordv1client.CoordinationV1Interface, managementLeaseClient coordv1client.CoordinationV1Interface, spokeLeaseClient coordv1client.CoordinationV1Interface, resyncInterval time.Duration, @@ -56,7 +54,6 @@ func NewManagedClusterAddOnLeaseController(clusterName string, clock: clock.RealClock{}, addOnClient: addOnClient, addOnLister: addOnInformer.Lister(), - hubLeaseClient: hubLeaseClient, managementLeaseClient: managementLeaseClient, spokeLeaseClient: spokeLeaseClient, } @@ -129,31 +126,6 @@ func (c *managedClusterAddOnLeaseController) syncSingle(ctx context.Context, var condition metav1.Condition switch { case errors.IsNotFound(err): - // for backward compatible, before release-2.3, addons update their leases on hub cluster, - // so if we cannot find addon lease on managed/management cluster, we will try to use addon hub lease. - // TODO: after release-2.3, we will remove these code - observedLease, err = c.hubLeaseClient.Leases(addOn.Namespace).Get(ctx, addOn.Name, metav1.GetOptions{}) - if err == nil { - if now.Before(observedLease.Spec.RenewTime.Add(gracePeriod)) { - // the lease is constantly updated, update its addon status to available - condition = metav1.Condition{ - Type: addonv1alpha1.ManagedClusterAddOnConditionAvailable, - Status: metav1.ConditionTrue, - Reason: "ManagedClusterAddOnLeaseUpdated", - Message: fmt.Sprintf("%s add-on is available.", addOn.Name), - } - break - } - - // the lease is not constantly updated, update its addon status to unavailable - condition = metav1.Condition{ - Type: addonv1alpha1.ManagedClusterAddOnConditionAvailable, - Status: metav1.ConditionFalse, - Reason: "ManagedClusterAddOnLeaseUpdateStopped", - Message: fmt.Sprintf("%s add-on is not available.", addOn.Name), - } - break - } condition = metav1.Condition{ Type: addonv1alpha1.ManagedClusterAddOnConditionAvailable, Status: metav1.ConditionUnknown, diff --git a/pkg/spoke/addon/lease_controller_test.go b/pkg/spoke/addon/lease_controller_test.go index 336ed5103..29cbb870e 100644 --- a/pkg/spoke/addon/lease_controller_test.go +++ b/pkg/spoke/addon/lease_controller_test.go @@ -14,9 +14,9 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" kubefake "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" + clocktesting "k8s.io/utils/clock/testing" ) var now = time.Now() @@ -100,7 +100,6 @@ func TestSync(t *testing.T) { name string queueKey string addOns []runtime.Object - hubLeases []runtime.Object managementLeases []runtime.Object spokeLeases []runtime.Object validateActions func(t *testing.T, ctx *testinghelpers.FakeSyncContext, actions []clienttesting.Action) @@ -109,7 +108,6 @@ func TestSync(t *testing.T) { name: "bad queue key", queueKey: "test/test/test", addOns: []runtime.Object{}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{}, validateActions: func(t *testing.T, ctx *testinghelpers.FakeSyncContext, actions []clienttesting.Action) { testinghelpers.AssertNoActions(t, actions) @@ -119,7 +117,6 @@ func TestSync(t *testing.T) { name: "no addons", queueKey: "test/test", addOns: []runtime.Object{}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{}, validateActions: func(t *testing.T, ctx *testinghelpers.FakeSyncContext, actions []clienttesting.Action) { testinghelpers.AssertNoActions(t, actions) @@ -137,7 +134,6 @@ func TestSync(t *testing.T) { InstallNamespace: "test", }, }}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{}, validateActions: func(t *testing.T, ctx *testinghelpers.FakeSyncContext, actions []clienttesting.Action) { testinghelpers.AssertActions(t, actions, "get", "patch") @@ -169,7 +165,6 @@ func TestSync(t *testing.T) { InstallNamespace: "test", }, }}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{ testinghelpers.NewAddOnLease("test", "test", now.Add(-5*time.Minute)), }, @@ -203,7 +198,6 @@ func TestSync(t *testing.T) { InstallNamespace: "test", }, }}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{ testinghelpers.NewAddOnLease("test", "test", now), }, @@ -247,7 +241,6 @@ func TestSync(t *testing.T) { }, }, }}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{ testinghelpers.NewAddOnLease("test", "test", now), }, @@ -275,7 +268,6 @@ func TestSync(t *testing.T) { }, }, }, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{ testinghelpers.NewAddOnLease("test1", "test1", now.Add(-5*time.Minute)), }, @@ -300,7 +292,6 @@ func TestSync(t *testing.T) { InstallNamespace: "test", }, }}, - hubLeases: []runtime.Object{}, managementLeases: []runtime.Object{ testinghelpers.NewAddOnLease("test", "test", now), }, @@ -322,35 +313,6 @@ func TestSync(t *testing.T) { } }, }, - { - name: "addon update its lease constantly (compatibility)", - queueKey: "test/test", - addOns: []runtime.Object{&addonv1alpha1.ManagedClusterAddOn{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: testinghelpers.TestManagedClusterName, - Name: "test", - }, - }}, - hubLeases: []runtime.Object{testinghelpers.NewAddOnLease(testinghelpers.TestManagedClusterName, "test", now)}, - spokeLeases: []runtime.Object{}, - validateActions: func(t *testing.T, ctx *testinghelpers.FakeSyncContext, actions []clienttesting.Action) { - testinghelpers.AssertActions(t, actions, "get", "patch") - patch := actions[1].(clienttesting.PatchAction).GetPatch() - addOn := &addonv1alpha1.ManagedClusterAddOn{} - err := json.Unmarshal(patch, addOn) - if err != nil { - t.Fatal(err) - } - addOnCond := meta.FindStatusCondition(addOn.Status.Conditions, "Available") - if addOnCond == nil { - t.Errorf("expected addon available condition, but failed") - return - } - if addOnCond.Status != metav1.ConditionTrue { - t.Errorf("expected addon available condition is available, but failed") - } - }, - }, { name: "addon has customized health check", queueKey: "test/test", @@ -365,7 +327,6 @@ func TestSync(t *testing.T) { }, }, }}, - hubLeases: []runtime.Object{}, spokeLeases: []runtime.Object{}, validateActions: func(t *testing.T, ctx *testinghelpers.FakeSyncContext, actions []clienttesting.Action) { testinghelpers.AssertNoActions(t, actions) @@ -384,14 +345,12 @@ func TestSync(t *testing.T) { } } - hubClient := kubefake.NewSimpleClientset(c.hubLeases...) managementLeaseClient := kubefake.NewSimpleClientset(c.managementLeases...) spokeLeaseClient := kubefake.NewSimpleClientset(c.spokeLeases...) ctrl := &managedClusterAddOnLeaseController{ clusterName: testinghelpers.TestManagedClusterName, - clock: clock.NewFakeClock(time.Now()), - hubLeaseClient: hubClient.CoordinationV1(), + clock: clocktesting.NewFakeClock(time.Now()), addOnClient: addOnClient, addOnLister: addOnInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Lister(), managementLeaseClient: managementLeaseClient.CoordinationV1(), diff --git a/pkg/spoke/spokeagent.go b/pkg/spoke/spokeagent.go index 60cb8ab0a..e63154a94 100644 --- a/pkg/spoke/spokeagent.go +++ b/pkg/spoke/spokeagent.go @@ -356,7 +356,6 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext o.ClusterName, addOnClient, addOnInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), - hubKubeClient.CoordinationV1(), managementKubeClient.CoordinationV1(), spokeKubeClient.CoordinationV1(), AddOnLeaseControllerSyncInterval, //TODO: this interval time should be allowed to change from outside diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go deleted file mode 100644 index ff97612df..000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clock - -import ( - "time" - - clocks "k8s.io/utils/clock" - testclocks "k8s.io/utils/clock/testing" -) - -// PassiveClock allows for injecting fake or real clocks into code -// that needs to read the current time but does not support scheduling -// activity in the future. -// -// Deprecated: Use k8s.io/utils/clock.PassiveClock instead. -type PassiveClock = clocks.PassiveClock - -// Clock allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -// -// Deprecated: Use k8s.io/utils/clock.WithTickerAndDelayedExecution instead. -type Clock = clocks.WithTickerAndDelayedExecution - -// Deprecated: Use k8s.io/utils/clock.RealClock instead. -type RealClock = clocks.RealClock - -// FakePassiveClock implements PassiveClock, but returns an arbitrary time. -// -// Deprecated: Use k8s.io/utils/clock/testing.FakePassiveClock instead. -type FakePassiveClock = testclocks.FakePassiveClock - -// FakeClock implements Clock, but returns an arbitrary time. -// -// Deprecated: Use k8s.io/utils/clock/testing.FakeClock instead. -type FakeClock = testclocks.FakeClock - -// NewFakePassiveClock returns a new FakePassiveClock. -// -// Deprecated: Use k8s.io/utils/clock/testing.NewFakePassiveClock instead. -func NewFakePassiveClock(t time.Time) *testclocks.FakePassiveClock { - return testclocks.NewFakePassiveClock(t) -} - -// NewFakeClock returns a new FakeClock. -// -// Deprecated: Use k8s.io/utils/clock/testing.NewFakeClock instead. -func NewFakeClock(t time.Time) *testclocks.FakeClock { - return testclocks.NewFakeClock(t) -} - -// IntervalClock implements Clock, but each invocation of Now steps -// the clock forward the specified duration. -// -// WARNING: most of the Clock methods just `panic`; -// only PassiveClock is honestly implemented. -// The alternative, SimpleIntervalClock, has only the -// PassiveClock methods. -// -// Deprecated: Use k8s.io/utils/clock/testing.SimpleIntervalClock instead. -type IntervalClock = testclocks.IntervalClock - -// Timer allows for injecting fake or real timers into code that -// needs to do arbitrary things based on time. -// -// Deprecated: Use k8s.io/utils/clock.Timer instead. -type Timer = clocks.Timer - -// Ticker defines the Ticker interface. -// -// Deprecated: Use k8s.io/utils/clock.Ticker instead. -type Ticker = clocks.Ticker diff --git a/vendor/modules.txt b/vendor/modules.txt index 375efbd8e..ae4c05565 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -740,7 +740,6 @@ k8s.io/apimachinery/pkg/runtime/serializer/yaml k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer