diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index a51b04a4f..ca6f3b3cd 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "strings" + "sync" "time" "github.com/awslabs/operatorpkg/option" @@ -176,33 +177,52 @@ func (p *Provisioner) GetPendingPods(ctx context.Context) ([]*corev1.Pod, error) func (p *Provisioner) consolidationWarnings(ctx context.Context, pods []*corev1.Pod) { // We have pending pods that have preferred anti-affinity or topology spread constraints. These can interact // unexpectedly with consolidation, so we warn once per hour when we see these pods. - antiAffinityPods := lo.FilterMap(pods, func(po *corev1.Pod, _ int) (client.ObjectKey, bool) { - if po.Spec.Affinity != nil && po.Spec.Affinity.PodAntiAffinity != nil { - if len(po.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 0 { - if p.cm.HasChanged(string(po.UID), "pod-antiaffinity") { - return client.ObjectKeyFromObject(po), true + + wg := sync.WaitGroup{} + + wg.Add(2) + + go func() { + antiAffinityPods := lo.FilterMap(pods, func(po *corev1.Pod, _ int) (client.ObjectKey, bool) { + if po.Spec.Affinity != nil && po.Spec.Affinity.PodAntiAffinity != nil { + if len(po.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 0 { + if p.cm.HasChanged(string(po.UID), "pod-antiaffinity") { + return client.ObjectKeyFromObject(po), true + } } } + return client.ObjectKey{}, false + }) + + // We reduce the amount of logging that we do per-pod by grouping log lines like this together + if len(antiAffinityPods) > 0 { + log.FromContext(ctx).WithValues("pods", pretty.Slice(antiAffinityPods, 10)).Info("pod(s) have a preferred Anti-Affinity which can prevent consolidation") } - return client.ObjectKey{}, false - }) - topologySpreadPods := lo.FilterMap(pods, func(po *corev1.Pod, _ int) (client.ObjectKey, bool) { - for _, tsc := range po.Spec.TopologySpreadConstraints { - if tsc.WhenUnsatisfiable == corev1.ScheduleAnyway { - if p.cm.HasChanged(string(po.UID), "pod-topology-spread") { - return client.ObjectKeyFromObject(po), true + + wg.Done() + }() + + go func() { + topologySpreadPods := lo.FilterMap(pods, func(po *corev1.Pod, _ int) (client.ObjectKey, bool) { + for _, tsc := range po.Spec.TopologySpreadConstraints { + if tsc.WhenUnsatisfiable == corev1.ScheduleAnyway { + if p.cm.HasChanged(string(po.UID), "pod-topology-spread") { + return client.ObjectKeyFromObject(po), true + } } } + return client.ObjectKey{}, false + }) + + // We reduce the amount of logging that we do per-pod by grouping log lines like this together + if len(topologySpreadPods) > 0 { + log.FromContext(ctx).WithValues("pods", pretty.Slice(topologySpreadPods, 10)).Info("pod(s) have a preferred TopologySpreadConstraint which can prevent consolidation") } - return client.ObjectKey{}, false - }) - // We reduce the amount of logging that we do per-pod by grouping log lines like this together - if len(antiAffinityPods) > 0 { - log.FromContext(ctx).WithValues("pods", pretty.Slice(antiAffinityPods, 10)).Info("pod(s) have a preferred Anti-Affinity which can prevent consolidation") - } - if len(topologySpreadPods) > 0 { - log.FromContext(ctx).WithValues("pods", pretty.Slice(topologySpreadPods, 10)).Info("pod(s) have a preferred TopologySpreadConstraint which can prevent consolidation") - } + + wg.Done() + }() + + wg.Wait() } var ErrNodePoolsNotFound = errors.New("no nodepools found")