Skip to content

Commit

Permalink
Merge pull request containers#282 from klihub/devel/topology-aware/co…
Browse files Browse the repository at this point in the history
…nfigurable-allocation-priority

topology-aware: configurable allocation priority
  • Loading branch information
marquiz authored Mar 28, 2024
2 parents 1144de7 + b285f08 commit 8e4c550
Show file tree
Hide file tree
Showing 25 changed files with 478 additions and 65 deletions.
3 changes: 2 additions & 1 deletion cmd/plugins/topology-aware/policy/coldstart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,8 @@ func TestColdStart(t *testing.T) {
allocations: allocations{
grants: make(map[string]Grant, 0),
},
options: &policyapi.BackendOptions{},
options: &policyapi.BackendOptions{},
cpuAllocator: &mockCPUAllocator{},
}
policy.allocations.policy = policy
policy.options.SendEvent = sendEvent
Expand Down
84 changes: 84 additions & 0 deletions cmd/plugins/topology-aware/policy/mocks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ import (

nri "github.com/containerd/nri/pkg/api"
resmgr "github.com/containers/nri-plugins/pkg/apis/resmgr/v1alpha1"
"github.com/containers/nri-plugins/pkg/cpuallocator"
"github.com/containers/nri-plugins/pkg/resmgr/cache"
"github.com/containers/nri-plugins/pkg/sysfs"
system "github.com/containers/nri-plugins/pkg/sysfs"
"github.com/containers/nri-plugins/pkg/topology"
"github.com/containers/nri-plugins/pkg/utils/cpuset"
Expand Down Expand Up @@ -103,6 +105,22 @@ func (p *mockCPUPackage) DieNodeIDs(idset.ID) []idset.ID {
return []idset.ID{}
}

func (p *mockCPUPackage) DieClusterIDs(idset.ID) []idset.ID {
return []idset.ID{}
}

func (p *mockCPUPackage) DieClusterCPUSet(idset.ID, idset.ID) cpuset.CPUSet {
return cpuset.New()
}

func (p *mockCPUPackage) LogicalDieClusterIDs(idset.ID) []idset.ID {
return []idset.ID{}
}

func (p *mockCPUPackage) LogicalDieClusterCPUSet(idset.ID, idset.ID) cpuset.CPUSet {
return cpuset.New()
}

func (p *mockCPUPackage) SstInfo() *sst.SstPackageInfo {
return &sst.SstPackageInfo{}
}
Expand Down Expand Up @@ -156,6 +174,33 @@ func (c *mockCPU) SstClos() int {
return -1
}

func (c *mockCPU) CacheCount() int {
return 0
}
func (c *mockCPU) GetCaches() []*sysfs.Cache {
panic("unimplemented")
}
func (c *mockCPU) GetCachesByLevel(int) []*sysfs.Cache {
panic("unimplemented")
}
func (c *mockCPU) GetCacheByIndex(int) *sysfs.Cache {
panic("unimplemented")
}
func (c *mockCPU) GetLastLevelCaches() []*sysfs.Cache {
panic("unimplemented")
}
func (c *mockCPU) GetLastLevelCacheCPUSet() cpuset.CPUSet {
panic("unimplemented")
}

func (c *mockCPU) ClusterID() int {
return 0
}

func (c *mockCPU) CoreKind() sysfs.CoreKind {
return sysfs.PerformanceCore
}

type mockSystem struct {
isolatedCPU int
nodes []system.Node
Expand Down Expand Up @@ -188,6 +233,27 @@ func (fake *mockSystem) Discover(flags system.DiscoveryFlag) error {
func (fake *mockSystem) Package(idset.ID) system.CPUPackage {
return &mockCPUPackage{}
}
func (fake *mockSystem) PossibleCPUs() cpuset.CPUSet {
return fake.CPUSet()
}
func (fake *mockSystem) PresentCPUs() cpuset.CPUSet {
return fake.CPUSet()
}
func (fake *mockSystem) OnlineCPUs() cpuset.CPUSet {
return fake.CPUSet()
}
func (fake *mockSystem) IsolatedCPUs() cpuset.CPUSet {
return fake.Isolated()
}
func (fake *mockSystem) OfflineCPUs() cpuset.CPUSet {
return cpuset.New()
}
func (fake *mockSystem) CoreKindCPUs(sysfs.CoreKind) cpuset.CPUSet {
return cpuset.New()
}
func (fake *mockSystem) AllThreadsForCPUs(cpuset.CPUSet) cpuset.CPUSet {
return cpuset.New()
}
func (fake *mockSystem) Offlined() cpuset.CPUSet {
return cpuset.New()
}
Expand Down Expand Up @@ -629,3 +695,21 @@ func (m *mockCache) OpenFile(string, string, os.FileMode) (*os.File, error) {
func (m *mockCache) WriteFile(string, string, os.FileMode, []byte) error {
panic("unimplemented")
}

type mockCPUAllocator struct{}

func (m *mockCPUAllocator) AllocateCpus(from *cpuset.CPUSet, cnt int, prefer cpuallocator.CPUPriority) (cpuset.CPUSet, error) {
return cpuset.New(0), nil
}

func (m *mockCPUAllocator) ReleaseCpus(from *cpuset.CPUSet, cnt int, prefer cpuallocator.CPUPriority) (cpuset.CPUSet, error) {
return cpuset.New(0), nil
}

func (m *mockCPUAllocator) GetCPUPriorities() map[cpuallocator.CPUPriority]cpuset.CPUSet {
return map[cpuallocator.CPUPriority]cpuset.CPUSet{}
}

var (
_ cpuallocator.CPUAllocator = &mockCPUAllocator{}
)
67 changes: 52 additions & 15 deletions cmd/plugins/topology-aware/policy/pod-preferences.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ const (
keyColdStartPreference = "cold-start"
// annotation key for reserved pools
keyReservedCPUsPreference = "prefer-reserved-cpus"
// annotation key for CPU Priority preference
keyCpuPriorityPreference = "prefer-cpu-priority"

// effective annotation key for isolated CPU preference
preferIsolatedCPUsKey = keyIsolationPreference + "." + kubernetes.ResmgrKeyNamespace
Expand All @@ -53,6 +55,8 @@ const (
preferColdStartKey = keyColdStartPreference + "." + kubernetes.ResmgrKeyNamespace
// annotation key for reserved pools
preferReservedCPUsKey = keyReservedCPUsPreference + "." + kubernetes.ResmgrKeyNamespace
// effective annotation key for CPU priority preference
preferCpuPriorityKey = keyCpuPriorityPreference + "." + kubernetes.ResmgrKeyNamespace
)

// cpuClass is a type of CPU to allocate
Expand Down Expand Up @@ -153,6 +157,36 @@ func sharedCPUsPreference(pod cache.Pod, container cache.Container) (bool, bool)
return preference, true
}

// cpuPrioPreference returns the CPU priority preference for the given container
// and whether the container was explicitly annotated with this setting.
func cpuPrioPreference(pod cache.Pod, container cache.Container, fallback cpuPrio) cpuPrio {
key := preferCpuPriorityKey
value, ok := pod.GetEffectiveAnnotation(key, container.GetName())

if !ok {
prio := fallback
log.Debug("%s: implicit CPU priority preference %q", container.PrettyName(), prio)
return prio
}

if value == "default" {
prio := defaultPrio
log.Debug("%s: explicit default CPU priority preference %q", container.PrettyName(), prio)
return prio
}

prio, ok := cpuPrioByName[value]
if !ok {
log.Error("%s: invalid CPU priority preference %q", container.PrettyName(), value)
prio := fallback
log.Debug("%s: implicit CPU priority preference %q", container.PrettyName(), prio)
return prio
}

log.Debug("%s: explicit CPU priority preference %q", container.PrettyName(), prio)
return prio
}

// memoryTypePreference returns what type of memory should be allocated for the container.
//
// If the effective annotations are not found, this function falls back to
Expand Down Expand Up @@ -370,7 +404,8 @@ func checkReservedCPUsAnnotations(c cache.Container) (bool, bool) {
// 2. fraction: amount of fractional CPU in milli-CPU
// 3. isolate: (bool) whether to prefer isolated full CPUs
// 4. cpuType: (cpuClass) class of CPU to allocate (reserved vs. normal)
func cpuAllocationPreferences(pod cache.Pod, container cache.Container) (int, int, bool, cpuClass) {
// 5. cpuPrio: preferred CPU allocator priority for CPU allocation.
func cpuAllocationPreferences(pod cache.Pod, container cache.Container) (int, int, bool, cpuClass, cpuPrio) {
//
// CPU allocation preferences for a container consist of
//
Expand Down Expand Up @@ -439,60 +474,62 @@ func cpuAllocationPreferences(pod cache.Pod, container cache.Container) (int, in
request := reqs.Requests[corev1.ResourceCPU]
qosClass := pod.GetQOSClass()
fraction := int(request.MilliValue())
prio := defaultPrio // ignored for fractional allocations

// easy cases: kube-system namespace, Burstable or BestEffort QoS class containers
preferReserved, explicitReservation := checkReservedCPUsAnnotations(container)
switch {
case container.PreserveCpuResources():
return 0, fraction, false, cpuPreserve
return 0, fraction, false, cpuPreserve, prio
case preferReserved == true:
return 0, fraction, false, cpuReserved
return 0, fraction, false, cpuReserved, prio
case checkReservedPoolNamespaces(namespace) && !explicitReservation:
return 0, fraction, false, cpuReserved
return 0, fraction, false, cpuReserved, prio
case qosClass == corev1.PodQOSBurstable:
return 0, fraction, false, cpuNormal
return 0, fraction, false, cpuNormal, prio
case qosClass == corev1.PodQOSBestEffort:
return 0, 0, false, cpuNormal
return 0, 0, false, cpuNormal, prio
}

// complex case: Guaranteed QoS class containers
cores := fraction / 1000
fraction = fraction % 1000
preferIsolated, explicitIsolated := isolatedCPUsPreference(pod, container)
preferShared, explicitShared := sharedCPUsPreference(pod, container)
prio = cpuPrioPreference(pod, container, defaultPrio) // ignored for fractional allocations

switch {
// sub-core CPU request
case cores == 0:
return 0, fraction, false, cpuNormal
return 0, fraction, false, cpuNormal, prio
// 1 <= CPU request < 2
case cores < 2:
// fractional allocation, potentially mixed
if fraction > 0 {
if preferShared {
return 0, 1000*cores + fraction, false, cpuNormal
return 0, 1000*cores + fraction, false, cpuNormal, prio
}
return cores, fraction, preferIsolated, cpuNormal
return cores, fraction, preferIsolated, cpuNormal, prio
}
// non-fractional allocation
if preferShared && explicitShared {
return 0, 1000*cores + fraction, false, cpuNormal
return 0, 1000*cores + fraction, false, cpuNormal, prio
}
return cores, fraction, preferIsolated, cpuNormal
return cores, fraction, preferIsolated, cpuNormal, prio
// CPU request >= 2
default:
// fractional allocation, only mixed if explicitly annotated as unshared
if fraction > 0 {
if !preferShared && explicitShared {
return cores, fraction, preferIsolated && explicitIsolated, cpuNormal
return cores, fraction, preferIsolated && explicitIsolated, cpuNormal, prio
}
return 0, 1000*cores + fraction, false, cpuNormal
return 0, 1000*cores + fraction, false, cpuNormal, prio
}
// non-fractional allocation
if preferShared && explicitShared {
return 0, 1000 * cores, false, cpuNormal
return 0, 1000 * cores, false, cpuNormal, prio
}
return cores, fraction, preferIsolated && explicitIsolated, cpuNormal
return cores, fraction, preferIsolated && explicitIsolated, cpuNormal, prio
}
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/plugins/topology-aware/policy/pod-preferences_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1038,7 +1038,7 @@ func TestCpuAllocationPreferences(t *testing.T) {
}
opt.PreferIsolated, opt.PreferShared = tc.preferIsolated, tc.preferShared
opt.ReservedPoolNamespaces = tc.reservedPoolNamespaces
full, fraction, isolate, cpuType := cpuAllocationPreferences(tc.pod, tc.container)
full, fraction, isolate, cpuType, _ := cpuAllocationPreferences(tc.pod, tc.container)
if full != tc.expectedFull || fraction != tc.expectedFraction ||
isolate != tc.expectedIsolate || cpuType != tc.expectedCpuType {
t.Errorf("Expected (%v, %v, %v, %s), but got (%v, %v, %v, %s)",
Expand Down
Loading

0 comments on commit 8e4c550

Please sign in to comment.