Skip to content

Commit 189d25b

Browse files
committed
Add Unit Tests
1 parent 312756a commit 189d25b

File tree

5 files changed

+57
-7
lines changed

5 files changed

+57
-7
lines changed

cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1703,6 +1703,7 @@ func TestNodeGroupGetOptions(t *testing.T) {
17031703
ScaleDownUnneededTime: time.Second,
17041704
ScaleDownUnreadyTime: time.Minute,
17051705
MaxNodeProvisionTime: 15 * time.Minute,
1706+
MaxNodeStartupTime: 35 * time.Minute,
17061707
}
17071708

17081709
cases := []struct {
@@ -1723,13 +1724,15 @@ func TestNodeGroupGetOptions(t *testing.T) {
17231724
config.DefaultScaleDownUnneededTimeKey: "1h",
17241725
config.DefaultScaleDownUnreadyTimeKey: "30m",
17251726
config.DefaultMaxNodeProvisionTimeKey: "60m",
1727+
config.DefaultMaxNodeStartupTimeKey: "35m",
17261728
},
17271729
expected: &config.NodeGroupAutoscalingOptions{
17281730
ScaleDownGpuUtilizationThreshold: 0.6,
17291731
ScaleDownUtilizationThreshold: 0.7,
17301732
ScaleDownUnneededTime: time.Hour,
17311733
ScaleDownUnreadyTime: 30 * time.Minute,
17321734
MaxNodeProvisionTime: 60 * time.Minute,
1735+
MaxNodeStartupTime: 35 * time.Minute,
17331736
},
17341737
},
17351738
{
@@ -1744,6 +1747,7 @@ func TestNodeGroupGetOptions(t *testing.T) {
17441747
ScaleDownUnneededTime: time.Minute,
17451748
ScaleDownUnreadyTime: defaultOptions.ScaleDownUnreadyTime,
17461749
MaxNodeProvisionTime: 15 * time.Minute,
1750+
MaxNodeStartupTime: 35 * time.Minute,
17471751
},
17481752
},
17491753
{

cluster-autoscaler/clusterstate/clusterstate_test.go

Lines changed: 35 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ func TestTooManyUnready(t *testing.T) {
423423
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
424424
MaxTotalUnreadyPercentage: 10,
425425
OkTotalUnreadyCount: 1,
426-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
426+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
427427
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
428428
assert.NoError(t, err)
429429
assert.False(t, clusterstate.IsClusterHealthy())
@@ -462,6 +462,37 @@ func TestUnreadyLongAfterCreation(t *testing.T) {
462462
assert.Empty(t, upcomingRegistered["ng1"])
463463
}
464464

465+
func TestUnreadyAfterCreationWithIncreasedStartupTime(t *testing.T) {
466+
now := time.Now()
467+
468+
ng1_1 := BuildTestNode("ng1-1", 1000, 1000)
469+
SetNodeReadyState(ng1_1, true, now.Add(-time.Minute))
470+
ng2_1 := BuildTestNode("ng2-1", 1000, 1000)
471+
SetNodeReadyState(ng2_1, false, now.Add(-time.Minute))
472+
ng2_1.CreationTimestamp = metav1.Time{Time: now.Add(-30 * time.Minute)}
473+
474+
provider := testprovider.NewTestCloudProviderBuilder().Build()
475+
provider.AddNodeGroup("ng1", 1, 10, 1)
476+
provider.AddNodeGroup("ng2", 1, 10, 1)
477+
provider.AddNode("ng1", ng1_1)
478+
provider.AddNode("ng2", ng2_1)
479+
480+
assert.NotNil(t, provider)
481+
fakeClient := &fake.Clientset{}
482+
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "some-map")
483+
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
484+
MaxTotalUnreadyPercentage: 10,
485+
OkTotalUnreadyCount: 1,
486+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
487+
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
488+
assert.NoError(t, err)
489+
assert.Equal(t, 0, len(clusterstate.GetClusterReadiness().Unready))
490+
assert.Equal(t, 1, len(clusterstate.GetClusterReadiness().NotStarted))
491+
upcoming, upcomingRegistered := clusterstate.GetUpcomingNodes()
492+
assert.Equal(t, 0, upcoming["ng1"])
493+
assert.Empty(t, upcomingRegistered["ng1"])
494+
}
495+
465496
func TestNotStarted(t *testing.T) {
466497
now := time.Now()
467498

@@ -484,7 +515,7 @@ func TestNotStarted(t *testing.T) {
484515
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
485516
MaxTotalUnreadyPercentage: 10,
486517
OkTotalUnreadyCount: 1,
487-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
518+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
488519
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
489520
assert.NoError(t, err)
490521
assert.Equal(t, 1, len(clusterstate.GetClusterReadiness().NotStarted))
@@ -546,7 +577,7 @@ func TestRegisterScaleDown(t *testing.T) {
546577
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
547578
MaxTotalUnreadyPercentage: 10,
548579
OkTotalUnreadyCount: 1,
549-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
580+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
550581
now := time.Now()
551582
clusterstate.RegisterScaleDown(provider.GetNodeGroup("ng1"), "ng1-1", now.Add(time.Minute), now)
552583
assert.Equal(t, 1, len(clusterstate.scaleDownRequests))
@@ -639,7 +670,7 @@ func TestUpcomingNodes(t *testing.T) {
639670
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
640671
MaxTotalUnreadyPercentage: 10,
641672
OkTotalUnreadyCount: 1,
642-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
673+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
643674
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1, ng5_1, ng5_2}, nil, now)
644675
assert.NoError(t, err)
645676
assert.Empty(t, clusterstate.GetScaleUpFailures())

cluster-autoscaler/core/static_autoscaler_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2257,7 +2257,7 @@ func TestStaticAutoscalerUpcomingScaleDownCandidates(t *testing.T) {
22572257

22582258
// Create CSR with unhealthy cluster protection effectively disabled, to guarantee we reach the tested logic.
22592259
csrConfig := clusterstate.ClusterStateRegistryConfig{OkTotalUnreadyCount: nodeGroupCount * unreadyNodesCount}
2260-
csr := clusterstate.NewClusterStateRegistry(provider, csrConfig, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)
2260+
csr := clusterstate.NewClusterStateRegistry(provider, csrConfig, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)
22612261

22622262
// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
22632263
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processorstest.NewTestProcessors(&ctx).NodeGroupConfigProcessor)

cluster-autoscaler/processors/nodegroupconfig/node_group_config_processor.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,11 +110,11 @@ func (p *DelegatingNodeGroupConfigProcessor) GetMaxNodeProvisionTime(nodeGroup c
110110
return ngConfig.MaxNodeProvisionTime, nil
111111
}
112112

113-
// GetMaxNodeProvisionTime returns MaxNodeStartupTime value that should be used for a given NodeGroup.
113+
// GetMaxNodeStartupTime returns MaxNodeStartupTime value that should be used for a given NodeGroup.
114114
func (p *DelegatingNodeGroupConfigProcessor) GetMaxNodeStartupTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
115115
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
116116
if err != nil && err != cloudprovider.ErrNotImplemented {
117-
return time.Duration(0), err
117+
return 15 * time.Minute, err
118118
}
119119
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
120120
return p.nodeGroupDefaults.MaxNodeStartupTime, nil

cluster-autoscaler/processors/nodegroupconfig/node_group_config_processor_test.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
4747
ScaleDownGpuUtilizationThreshold: 0.6,
4848
ScaleDownUtilizationThreshold: 0.5,
4949
MaxNodeProvisionTime: 15 * time.Minute,
50+
MaxNodeStartupTime: 15 * time.Minute,
5051
IgnoreDaemonSetsUtilization: true,
5152
}
5253
ngOpts := &config.NodeGroupAutoscalingOptions{
@@ -55,6 +56,7 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
5556
ScaleDownGpuUtilizationThreshold: 0.85,
5657
ScaleDownUtilizationThreshold: 0.75,
5758
MaxNodeProvisionTime: 60 * time.Minute,
59+
MaxNodeStartupTime: 35 * time.Minute,
5860
IgnoreDaemonSetsUtilization: false,
5961
}
6062

@@ -109,6 +111,17 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
109111
assert.Equal(t, res, results[w])
110112
}
111113

114+
testMaxNodeStartupTime := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
115+
res, err := p.GetMaxNodeStartupTime(ng)
116+
assert.Equal(t, err, we)
117+
results := map[Want]time.Duration{
118+
NIL: 15 * time.Minute,
119+
GLOBAL: 15 * time.Minute,
120+
NG: 35 * time.Minute,
121+
}
122+
assert.Equal(t, res, results[w])
123+
}
124+
112125
// for IgnoreDaemonSetsUtilization
113126
testIgnoreDSUtilization := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
114127
res, err := p.GetIgnoreDaemonSetsUtilization(ng)
@@ -127,13 +140,15 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
127140
"ScaleDownUtilizationThreshold": testUtilizationThreshold,
128141
"ScaleDownGpuUtilizationThreshold": testGpuThreshold,
129142
"MaxNodeProvisionTime": testMaxNodeProvisionTime,
143+
"MaxNodeStartupTime": testMaxNodeStartupTime,
130144
"IgnoreDaemonSetsUtilization": testIgnoreDSUtilization,
131145
"MultipleOptions": func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
132146
testUnneededTime(t, p, ng, w, we)
133147
testUnreadyTime(t, p, ng, w, we)
134148
testUtilizationThreshold(t, p, ng, w, we)
135149
testGpuThreshold(t, p, ng, w, we)
136150
testMaxNodeProvisionTime(t, p, ng, w, we)
151+
testMaxNodeStartupTime(t, p, ng, w, we)
137152
testIgnoreDSUtilization(t, p, ng, w, we)
138153
},
139154
"RepeatingTheSameCallGivesConsistentResults": func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {

0 commit comments

Comments
 (0)