Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Implement DRA support in Cluster Autoscaler #7350

Closed
wants to merge 21 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
4992123
DRA: extract interacting with the scheduler framework out of Predicat…
towca Sep 26, 2024
2d55ff2
DRA: introduce internal NodeInfo/PodInfo with DRA objects attached
towca Sep 27, 2024
7c1f8d5
DRA: migrate all of CA to use the new internal NodeInfo/PodInfo
towca Sep 27, 2024
dfd0234
DRA: remove AddNodeWithPods from ClusterSnapshot, replace uses with A…
towca Sep 30, 2024
fafb78a
DRA: add Initialize to ClusterSnapshot, remove AddNodes
towca Sep 30, 2024
c249f46
DRA: remove redundant IsPVCUsedByPods from ClusterSnapshot
towca Sep 30, 2024
f876a51
DRA: remove AddNode from ClusterSnapshot
towca Sep 30, 2024
bb87555
DRA: refactor utils related to NodeInfos
towca Sep 30, 2024
fad6868
DRA: propagate schedulerframework handle and DRA feature flag to Clus…
towca Sep 30, 2024
26e4787
DRA: Implement a Snapshot of DRA objects, its Provider, and utils
towca Sep 26, 2024
9e32e07
DRA: grab a snapshot of DRA objects and plumb to ClusterSnapshot.Init…
towca Sep 30, 2024
006685c
DRA: propagate DRA objects through NodeInfos in node_info utils
towca Sep 30, 2024
c5edd3b
DRA: rename ClusterSnapshot methods to better reflect their purpose
towca Oct 1, 2024
bdef0a7
DRA: extend ClusterSnapshot.SchedulePod, propagate scheduling state f…
towca Oct 1, 2024
0e055c4
DRA: plumb the DRA snapshot into scheduler framework through ClusterS…
towca Oct 1, 2024
0a11e9c
DRA: implement calculating utilization for DRA resources
towca Oct 1, 2024
ef9d420
DRA: integrate BasicClusterSnapshot with the DRA snapshot
towca Oct 1, 2024
38fb034
DRA: add integration tests
towca Sep 26, 2024
7e70b41
DRA: handle expendable pods using DRA
towca Oct 3, 2024
3544bb4
DRA: handle duplicating unschedulable pods using DRA
towca Oct 4, 2024
2e7eeea
DRA TMP: vendor in the required scheduler framework channges
towca Oct 7, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cluster-autoscaler/config/autoscaling_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,8 @@ type AutoscalingOptions struct {
ProvisioningRequestMaxBackoffTime time.Duration
// ProvisioningRequestMaxCacheSize is the max size for ProvisioningRequest cache that is stored for retry backoff.
ProvisioningRequestMaxBackoffCacheSize int
// EnableDynamicResources configures whether logic for handling DRA objects is enabled.
EnableDynamicResources bool
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would calling this EnableDynamicResourceAllocation maybe be a bit more clear?

}

// KubeClientOptions specify options for kube client
Expand Down
6 changes: 5 additions & 1 deletion cluster-autoscaler/context/autoscaling_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/expander"
processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
"k8s.io/client-go/informers"
Expand All @@ -44,7 +45,8 @@ type AutoscalingContext struct {
AutoscalingKubeClients
// CloudProvider used in CA.
CloudProvider cloudprovider.CloudProvider
// TODO(kgolab) - move away too as it's not config
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this comment no longer necessary?

// FrameworkHandle can be used to interact with the scheduler framework.
FrameworkHandle *framework.Handle
// PredicateChecker to check if a pod can fit into a node.
PredicateChecker predicatechecker.PredicateChecker
// ClusterSnapshot denotes cluster snapshot used for predicate checking.
Expand Down Expand Up @@ -98,6 +100,7 @@ func NewResourceLimiterFromAutoscalingOptions(options config.AutoscalingOptions)
// NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments
func NewAutoscalingContext(
options config.AutoscalingOptions,
fwHandle *framework.Handle,
predicateChecker predicatechecker.PredicateChecker,
clusterSnapshot clustersnapshot.ClusterSnapshot,
autoscalingKubeClients *AutoscalingKubeClients,
Expand All @@ -112,6 +115,7 @@ func NewAutoscalingContext(
AutoscalingOptions: options,
CloudProvider: cloudProvider,
AutoscalingKubeClients: *autoscalingKubeClients,
FrameworkHandle: fwHandle,
PredicateChecker: predicateChecker,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: expanderStrategy,
Expand Down
15 changes: 14 additions & 1 deletion cluster-autoscaler/core/autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
Expand All @@ -49,6 +50,7 @@ type AutoscalerOptions struct {
InformerFactory informers.SharedInformerFactory
AutoscalingKubeClients *context.AutoscalingKubeClients
CloudProvider cloudprovider.CloudProvider
FrameworkHandle *framework.Handle
PredicateChecker predicatechecker.PredicateChecker
ClusterSnapshot clustersnapshot.ClusterSnapshot
ExpanderStrategy expander.Strategy
Expand Down Expand Up @@ -86,6 +88,7 @@ func NewAutoscaler(opts AutoscalerOptions, informerFactory informers.SharedInfor
}
return NewStaticAutoscaler(
opts.AutoscalingOptions,
opts.FrameworkHandle,
opts.PredicateChecker,
opts.ClusterSnapshot,
opts.AutoscalingKubeClients,
Expand Down Expand Up @@ -114,8 +117,18 @@ func initializeDefaultOptions(opts *AutoscalerOptions, informerFactory informers
if opts.AutoscalingKubeClients == nil {
opts.AutoscalingKubeClients = context.NewAutoscalingKubeClients(opts.AutoscalingOptions, opts.KubeClient, opts.InformerFactory)
}
if opts.FrameworkHandle == nil {
fwHandle, err := framework.NewHandle(opts.InformerFactory, opts.SchedulerConfig)
if err != nil {
return err
}
opts.FrameworkHandle = fwHandle
}
if opts.ClusterSnapshot == nil {
opts.ClusterSnapshot = clustersnapshot.NewBasicClusterSnapshot()
opts.ClusterSnapshot = clustersnapshot.NewBasicClusterSnapshot(opts.FrameworkHandle, opts.EnableDynamicResources)
}
if opts.PredicateChecker == nil {
opts.PredicateChecker = predicatechecker.NewSchedulerBasedPredicateChecker(opts.FrameworkHandle)
}
if opts.RemainingPdbTracker == nil {
opts.RemainingPdbTracker = pdb.NewBasicRemainingPdbTracker()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
)
Expand Down Expand Up @@ -267,7 +268,7 @@ func TestCurrentlyDrainedNodesPodListProcessor(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
ctx := context.AutoscalingContext{
ScaleDownActuator: &mockActuator{&mockActuationStatus{tc.drainedNodes}},
ClusterSnapshot: clustersnapshot.NewBasicClusterSnapshot(),
ClusterSnapshot: clustersnapshot.NewBasicClusterSnapshot(framework.TestFrameworkHandleOrDie(t), true),
}
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, tc.nodes, tc.pods)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
"k8s.io/autoscaler/cluster-autoscaler/utils/test"
)
Expand Down Expand Up @@ -108,7 +109,7 @@ func TestFilterOutExpendable(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
processor := NewFilterOutExpendablePodListProcessor()
snapshot := clustersnapshot.NewBasicClusterSnapshot()
snapshot := clustersnapshot.NewBasicClusterSnapshot(framework.TestFrameworkHandleOrDie(t), true)
err := snapshot.Initialize(tc.nodes, nil)
assert.NoError(t, err)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ func TestFilterOutSchedulable(t *testing.T) {

for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
assert.NoError(t, err)
fwHandle := framework.TestFrameworkHandleOrDie(t)
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

var allExpectedScheduledPods []*apiv1.Pod
allExpectedScheduledPods = append(allExpectedScheduledPods, tc.expectedScheduledPods...)
Expand Down Expand Up @@ -248,9 +248,13 @@ func BenchmarkFilterOutSchedulable(b *testing.B) {
pendingPods: 12000,
},
}
snapshots := map[string]func() clustersnapshot.ClusterSnapshot{
"basic": func() clustersnapshot.ClusterSnapshot { return clustersnapshot.NewBasicClusterSnapshot() },
"delta": func() clustersnapshot.ClusterSnapshot { return clustersnapshot.NewDeltaClusterSnapshot() },
snapshots := map[string]func(fwHandle *framework.Handle) clustersnapshot.ClusterSnapshot{
"basic": func(fwHandle *framework.Handle) clustersnapshot.ClusterSnapshot {
return clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
},
"delta": func(fwHandle *framework.Handle) clustersnapshot.ClusterSnapshot {
return clustersnapshot.NewDeltaClusterSnapshot(fwHandle, true)
},
}
for snapshotName, snapshotFactory := range snapshots {
for _, tc := range tests {
Expand All @@ -275,10 +279,11 @@ func BenchmarkFilterOutSchedulable(b *testing.B) {
}
}

predicateChecker, err := predicatechecker.NewTestPredicateChecker()
fwHandle, err := framework.TestFrameworkHandle()
assert.NoError(b, err)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

clusterSnapshot := snapshotFactory()
clusterSnapshot := snapshotFactory(fwHandle)
if err := clusterSnapshot.Initialize(nodes, scheduledPods); err != nil {
assert.NoError(b, err)
}
Expand Down
2 changes: 1 addition & 1 deletion cluster-autoscaler/core/scaledown/actuation/actuator.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ func (a *Actuator) taintNode(node *apiv1.Node) error {
}

func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterSnapshot, error) {
snapshot := clustersnapshot.NewBasicClusterSnapshot()
snapshot := clustersnapshot.NewBasicClusterSnapshot(a.ctx.FrameworkHandle, a.ctx.EnableDynamicResources)
pods, err := a.ctx.AllPodLister().List()
if err != nil {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion cluster-autoscaler/core/scaledown/actuation/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -611,7 +611,7 @@ func TestPodsToEvict(t *testing.T) {
},
} {
t.Run(tn, func(t *testing.T) {
snapshot := clustersnapshot.NewBasicClusterSnapshot()
snapshot := clustersnapshot.NewBasicClusterSnapshot(framework.TestFrameworkHandleOrDie(t), true)
node := BuildTestNode("test-node", 1000, 1000)
err := snapshot.AddNodeInfo(framework.NewTestNodeInfo(node, tc.pods...))
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ func (callbacks *staticAutoscalerProcessorCallbacks) reset() {
// NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters
func NewStaticAutoscaler(
opts config.AutoscalingOptions,
fwHandle *framework.Handle,
predicateChecker predicatechecker.PredicateChecker,
clusterSnapshot clustersnapshot.ClusterSnapshot,
autoscalingKubeClients *context.AutoscalingKubeClients,
Expand All @@ -154,6 +155,7 @@ func NewStaticAutoscaler(
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
autoscalingContext := context.NewAutoscalingContext(
opts,
fwHandle,
predicateChecker,
clusterSnapshot,
autoscalingKubeClients,
Expand Down
5 changes: 3 additions & 2 deletions cluster-autoscaler/core/test/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,15 +214,16 @@ func NewScaleTestAutoscalingContext(
if err != nil {
return context.AutoscalingContext{}, err
}
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
fwHandle, err := framework.TestFrameworkHandle()
if err != nil {
return context.AutoscalingContext{}, err
}
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)
remainingPdbTracker := pdb.NewBasicRemainingPdbTracker()
if debuggingSnapshotter == nil {
debuggingSnapshotter = debuggingsnapshot.NewDebuggingSnapshotter(false)
}
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, options.EnableDynamicResources)
return context.AutoscalingContext{
AutoscalingOptions: options,
AutoscalingKubeClients: context.AutoscalingKubeClients{
Expand Down
15 changes: 8 additions & 7 deletions cluster-autoscaler/estimator/binpacking_estimator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,13 +209,13 @@ func TestBinpackingEstimate(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
fwHandle := framework.TestFrameworkHandleOrDie(t)
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
// Add one node in different zone to trigger topology spread constraints
err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(makeNode(100, 100, 10, "oldnode", "zone-jupiter")))
assert.NoError(t, err)

predicateChecker, err := predicatechecker.NewTestPredicateChecker()
assert.NoError(t, err)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)
limiter := NewThresholdBasedEstimationLimiter([]Threshold{NewStaticThreshold(tc.maxNodes, time.Duration(0))})
processor := NewDecreasingPodOrderer()
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
Expand Down Expand Up @@ -265,12 +265,13 @@ func BenchmarkBinpackingEstimate(b *testing.B) {
}

for i := 0; i < b.N; i++ {
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(makeNode(100, 100, 10, "oldnode", "zone-jupiter")))
fwHandle, err := framework.TestFrameworkHandle()
assert.NoError(b, err)

predicateChecker, err := predicatechecker.NewTestPredicateChecker()
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
err = clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(makeNode(100, 100, 10, "oldnode", "zone-jupiter")))
assert.NoError(b, err)

predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)
limiter := NewThresholdBasedEstimationLimiter([]Threshold{NewStaticThreshold(maxNodes, time.Duration(0))})
processor := NewDecreasingPodOrderer()
estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */)
Expand Down
5 changes: 3 additions & 2 deletions cluster-autoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,11 +495,12 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter

opts := core.AutoscalerOptions{
AutoscalingOptions: autoscalingOptions,
ClusterSnapshot: clustersnapshot.NewDeltaClusterSnapshot(),
FrameworkHandle: fwHandle,
ClusterSnapshot: clustersnapshot.NewDeltaClusterSnapshot(fwHandle, autoscalingOptions.EnableDynamicResources),
PredicateChecker: predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle),
KubeClient: kubeClient,
InformerFactory: informerFactory,
DebuggingSnapshotter: debuggingSnapshotter,
PredicateChecker: predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle),
DeleteOptions: deleteOptions,
DrainabilityRules: drainabilityRules,
ScaleUpOrchestrator: orchestrator.New(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,12 @@ func TestGetNodeInfosForGroups(t *testing.T) {
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
registry := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)

predicateChecker, err := predicatechecker.NewTestPredicateChecker()
assert.NoError(t, err)
fwHandle := framework.TestFrameworkHandleOrDie(t)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

nodes := []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1}
snapshot := clustersnapshot.NewBasicClusterSnapshot()
err = snapshot.Initialize(nodes, nil)
snapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
err := snapshot.Initialize(nodes, nil)
assert.NoError(t, err)

ctx := context.AutoscalingContext{
Expand Down Expand Up @@ -114,7 +114,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
// Test for a nodegroup without nodes and TemplateNodeInfo not implemented by cloud proivder
ctx = context.AutoscalingContext{
CloudProvider: provider2,
ClusterSnapshot: clustersnapshot.NewBasicClusterSnapshot(),
ClusterSnapshot: clustersnapshot.NewBasicClusterSnapshot(fwHandle, true),
PredicateChecker: predicateChecker,
AutoscalingKubeClients: context.AutoscalingKubeClients{
ListerRegistry: registry,
Expand Down Expand Up @@ -167,12 +167,12 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
registry := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)

predicateChecker, err := predicatechecker.NewTestPredicateChecker()
assert.NoError(t, err)
fwHandle := framework.TestFrameworkHandleOrDie(t)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

nodes := []*apiv1.Node{unready4, unready3, ready2, ready1}
snapshot := clustersnapshot.NewBasicClusterSnapshot()
err = snapshot.Initialize(nodes, nil)
snapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
err := snapshot.Initialize(nodes, nil)
assert.NoError(t, err)

// Fill cache
Expand Down Expand Up @@ -261,12 +261,13 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil, nil, nil, nil, nil)
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
registry := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
assert.NoError(t, err)

fwHandle := framework.TestFrameworkHandleOrDie(t)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

nodes := []*apiv1.Node{ready1}
snapshot := clustersnapshot.NewBasicClusterSnapshot()
err = snapshot.Initialize(nodes, nil)
snapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
err := snapshot.Initialize(nodes, nil)
assert.NoError(t, err)

ctx := context.AutoscalingContext{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func TestTargetCountInjectionPodListProcessor(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p := NewPodInjectionPodListProcessor(podinjectionbackoff.NewFakePodControllerRegistry())
clusterSnapshot := clustersnapshot.NewDeltaClusterSnapshot()
clusterSnapshot := clustersnapshot.NewDeltaClusterSnapshot(framework.TestFrameworkHandleOrDie(t), true)
err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node, tc.scheduledPods...))
assert.NoError(t, err)
ctx := context.AutoscalingContext{
Expand Down
9 changes: 5 additions & 4 deletions cluster-autoscaler/simulator/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"time"

"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
"k8s.io/autoscaler/cluster-autoscaler/utils/drain"
Expand Down Expand Up @@ -56,7 +57,7 @@ func TestFindEmptyNodes(t *testing.T) {
types.ConfigMirrorAnnotationKey: "",
}

clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot(framework.TestFrameworkHandleOrDie(t), true)
clustersnapshot.InitializeClusterSnapshotOrDie(t, clusterSnapshot, []*apiv1.Node{nodes[0], nodes[1], nodes[2], nodes[3]}, []*apiv1.Pod{pod1, pod2})
testTime := time.Date(2020, time.December, 18, 17, 0, 0, 0, time.UTC)
r := NewRemovalSimulator(nil, clusterSnapshot, nil, testDeleteOptions(), nil, false)
Expand Down Expand Up @@ -143,9 +144,9 @@ func TestFindNodesToRemove(t *testing.T) {
PodsToReschedule: []*apiv1.Pod{pod1, pod2},
}

clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
predicateChecker, err := predicatechecker.NewTestPredicateChecker()
assert.NoError(t, err)
fwHandle := framework.TestFrameworkHandleOrDie(t)
clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot(fwHandle, true)
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

tests := []findNodesToRemoveTestConfig{
{
Expand Down
8 changes: 5 additions & 3 deletions cluster-autoscaler/simulator/clustersnapshot/basic.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ import (
// BasicClusterSnapshot is simple, reference implementation of ClusterSnapshot.
// It is inefficient. But hopefully bug-free and good for initial testing.
type BasicClusterSnapshot struct {
data []*internalBasicSnapshotData
data []*internalBasicSnapshotData
draEnabled bool
fwHandle *framework.Handle
}

type internalBasicSnapshotData struct {
Expand Down Expand Up @@ -194,8 +196,8 @@ func (data *internalBasicSnapshotData) removePod(namespace, podName, nodeName st
}

// NewBasicClusterSnapshot creates instances of BasicClusterSnapshot.
func NewBasicClusterSnapshot() *BasicClusterSnapshot {
snapshot := &BasicClusterSnapshot{}
func NewBasicClusterSnapshot(fwHandle *framework.Handle, draEnabled bool) *BasicClusterSnapshot {
snapshot := &BasicClusterSnapshot{fwHandle: fwHandle, draEnabled: draEnabled}
snapshot.Clear()
return snapshot
}
Expand Down
Loading