Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CNF-14084: controller: update MachineConfig reconciliation #1025

Merged
merged 10 commits into from
Oct 9, 2024
80 changes: 54 additions & 26 deletions controllers/numaresourcesoperator_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,12 @@ import (
"reflect"
"time"

"github.com/k8stopologyawareschedwg/deployer/pkg/assets/selinux"
"github.com/k8stopologyawareschedwg/deployer/pkg/deployer/platform"
"github.com/k8stopologyawareschedwg/deployer/pkg/manifests"
apimanifests "github.com/k8stopologyawareschedwg/deployer/pkg/manifests/api"
rtemanifests "github.com/k8stopologyawareschedwg/deployer/pkg/manifests/rte"
k8swgrteupdate "github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/rte"
securityv1 "github.com/openshift/api/security/v1"
machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
"github.com/pkg/errors"
Expand All @@ -52,6 +54,7 @@ import (

nropv1 "github.com/openshift-kni/numaresources-operator/api/numaresourcesoperator/v1"
nodegroupv1 "github.com/openshift-kni/numaresources-operator/api/numaresourcesoperator/v1/helper/nodegroup"
"github.com/openshift-kni/numaresources-operator/internal/api/annotations"
"github.com/openshift-kni/numaresources-operator/internal/relatedobjects"
"github.com/openshift-kni/numaresources-operator/pkg/apply"
"github.com/openshift-kni/numaresources-operator/pkg/hash"
Expand Down Expand Up @@ -81,6 +84,8 @@ type NUMAResourcesOperatorReconciler struct {
ForwardMCPConds bool
}

type mcpWaitForUpdatedFunc func(string, *machineconfigv1.MachineConfigPool) bool

// TODO: narrow down

// Namespace Scoped
Expand Down Expand Up @@ -211,16 +216,16 @@ func (r *NUMAResourcesOperatorReconciler) reconcileResourceAPI(ctx context.Conte
func (r *NUMAResourcesOperatorReconciler) reconcileResourceMachineConfig(ctx context.Context, instance *nropv1.NUMAResourcesOperator, trees []nodegroupv1.Tree) (bool, ctrl.Result, string, error) {
// we need to sync machine configs first and wait for the MachineConfigPool updates
// before checking additional components for updates
_, err := r.syncMachineConfigs(ctx, instance, trees)
mcpUpdatedFunc, err := r.syncMachineConfigs(ctx, instance, trees)
if err != nil {
r.Recorder.Eventf(instance, corev1.EventTypeWarning, "FailedMCSync", "Failed to set up machine configuration for worker nodes: %v", err)
return true, ctrl.Result{}, status.ConditionDegraded, errors.Wrapf(err, "failed to sync machine configs")
}
r.Recorder.Eventf(instance, corev1.EventTypeNormal, "SuccessfulMCSync", "Enabled machine configuration for worker nodes")

// MCO need to update SELinux context and other stuff, and need to trigger a reboot.
// MCO needs to update the SELinux context removal and other stuff, and need to trigger a reboot.
// It can take a while.
mcpStatuses, allMCPsUpdated := syncMachineConfigPoolsStatuses(instance.Name, trees, r.ForwardMCPConds)
mcpStatuses, allMCPsUpdated := syncMachineConfigPoolsStatuses(instance.Name, trees, r.ForwardMCPConds, mcpUpdatedFunc)
instance.Status.MachineConfigPools = mcpStatuses
if !allMCPsUpdated {
// the Machine Config Pool still did not apply the machine config, wait for one minute
Expand Down Expand Up @@ -318,41 +323,54 @@ func (r *NUMAResourcesOperatorReconciler) syncNodeResourceTopologyAPI(ctx contex
return (updatedCount == len(objStates)), err
}

func (r *NUMAResourcesOperatorReconciler) syncMachineConfigs(ctx context.Context, instance *nropv1.NUMAResourcesOperator, trees []nodegroupv1.Tree) (bool, error) {
func (r *NUMAResourcesOperatorReconciler) syncMachineConfigs(ctx context.Context, instance *nropv1.NUMAResourcesOperator, trees []nodegroupv1.Tree) (mcpWaitForUpdatedFunc, error) {
klog.V(4).InfoS("Machine Config Sync start", "trees", len(trees))
defer klog.V(4).Info("Machine Config Sync stop")

existing := rtestate.FromClient(ctx, r.Client, r.Platform, r.RTEManifests, instance, trees, r.Namespace)

var err error
var updatedCount int
var mcpUpdatedFunc mcpWaitForUpdatedFunc
objStates := existing.MachineConfigsState(r.RTEManifests)
for _, objState := range objStates {
if err2 := controllerutil.SetControllerReference(instance, objState.Desired, r.Scheme); err2 != nil {
err = errors.Wrapf(err2, "failed to set controller reference to %s %s", objState.Desired.GetNamespace(), objState.Desired.GetName())
break
// Since 4.18 we're using a built-in SELinux policy,
// so the MachineConfig which applies the custom policy is no longer necessary.
// In case of operator upgrade from 4.1X → 4.18, it's necessary to remove the old MachineConfig,
// unless an emergency annotation is provided which forces the operator to use custom policy
if !annotations.IsCustomPolicyEnabled(instance.Annotations) {
for _, objState := range objStates {
if !objState.IsNotFoundError() {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this looks fragile. We want to target only the MachineConfig. So it's probably better to add new functionalities to pkg/objectstate/rte

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm also thinking about deleteUnusedMachineConfigs. We can perhaps reuse that function - or even better get rid of it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

...or not? #1029

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What you did in #1029 is true, but we still need to remove the machine config explicitly when moving to built-in policy, because the CR remain in the cluster.
And as long as it persist on the cluster the machine-config won't go anywhere

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, let's try to remove them in the reconciliation loop rather than explicitely in the deleteXXX functions.

Copy link
Collaborator Author

@Tal-or Tal-or Oct 7, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this looks fragile. We want to target only the MachineConfig. So it's probably better to add new functionalities to pkg/objectstate/rte

We do. you have the objStates := existing.MachineConfigsState(r.RTEManifests) that filters the MachineConfigs out of all the objects for you. (line 331 in the code)

OK, let's try to remove them in the reconciliation loop rather than explicitely in the deleteXXX functions.

This code is part of the reconciliation loop
it's called in syncMachineConfigs

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, I commented before to starting the effort in #1029 . It seems this of yours is the best approach on the table, and the removal of the deleteXXX functions should be tried (and preferably done!) on the side

klog.V(4).InfoS("delete Machine Config", "MachineConfig", objState.Desired.GetName())
if err2 := r.Client.Delete(ctx, objState.Desired); err2 != nil {
err = errors.Wrapf(err2, "could not delete MachineConfig %s", objState.Desired.GetName())
}
klog.V(4).InfoS("Machine Config deleted successfully", "MachineConfig", objState.Desired.GetName())
} // if not found, it's a fresh installation of 4.18+ (no upgrade)
}
mcpUpdatedFunc = IsMachineConfigPoolUpdatedAfterDeletion
} else {
for _, objState := range objStates {
if err2 := controllerutil.SetControllerReference(instance, objState.Desired, r.Scheme); err2 != nil {
err = errors.Wrapf(err2, "failed to set controller reference to %s %s", objState.Desired.GetNamespace(), objState.Desired.GetName())
break
}

if err2 := validateMachineConfigLabels(objState.Desired, trees); err2 != nil {
err = errors.Wrapf(err2, "machine conig %q labels validation failed", objState.Desired.GetName())
break
}
if err2 := validateMachineConfigLabels(objState.Desired, trees); err2 != nil {
err = errors.Wrapf(err2, "machine conig %q labels validation failed", objState.Desired.GetName())
break
}

_, updated, err2 := apply.ApplyObject(ctx, r.Client, objState)
if err2 != nil {
err = errors.Wrapf(err2, "could not apply (%s) %s/%s", objState.Desired.GetObjectKind().GroupVersionKind(), objState.Desired.GetNamespace(), objState.Desired.GetName())
break
}
if !updated {
continue
_, _, err2 := apply.ApplyObject(ctx, r.Client, objState)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's see if we can add apply.DeleteObject. I'll think about it

if err2 != nil {
err = errors.Wrapf(err2, "could not apply (%s) %s/%s", objState.Desired.GetObjectKind().GroupVersionKind(), objState.Desired.GetNamespace(), objState.Desired.GetName())
break
}
}
updatedCount++
mcpUpdatedFunc = IsMachineConfigPoolUpdated
Tal-or marked this conversation as resolved.
Show resolved Hide resolved
}

return (updatedCount == len(objStates)), err
return mcpUpdatedFunc, err
}

func syncMachineConfigPoolsStatuses(instanceName string, trees []nodegroupv1.Tree, forwardMCPConds bool) ([]nropv1.MachineConfigPool, bool) {
func syncMachineConfigPoolsStatuses(instanceName string, trees []nodegroupv1.Tree, forwardMCPConds bool, updatedFunc mcpWaitForUpdatedFunc) ([]nropv1.MachineConfigPool, bool) {
klog.V(4).InfoS("Machine Config Status Sync start", "trees", len(trees))
defer klog.V(4).Info("Machine Config Status Sync stop")

Expand All @@ -361,7 +379,7 @@ func syncMachineConfigPoolsStatuses(instanceName string, trees []nodegroupv1.Tre
for _, mcp := range tree.MachineConfigPools {
mcpStatuses = append(mcpStatuses, extractMCPStatus(mcp, forwardMCPConds))

isUpdated := IsMachineConfigPoolUpdated(instanceName, mcp)
isUpdated := updatedFunc(instanceName, mcp)
klog.V(5).InfoS("Machine Config Pool state", "name", mcp.Name, "instance", instanceName, "updated", isUpdated)

if !isUpdated {
Expand Down Expand Up @@ -461,9 +479,10 @@ func (r *NUMAResourcesOperatorReconciler) syncNUMAResourcesOperatorResources(ctx
}
rteupdate.DaemonSetHashAnnotation(r.RTEManifests.DaemonSet, cmHash)
}
rteupdate.SecurityContextConstraint(r.RTEManifests.SecurityContextConstraint, annotations.IsCustomPolicyEnabled(instance.Annotations))

existing := rtestate.FromClient(ctx, r.Client, r.Platform, r.RTEManifests, instance, trees, r.Namespace)
for _, objState := range existing.State(r.RTEManifests, daemonsetUpdater) {
for _, objState := range existing.State(r.RTEManifests, daemonsetUpdater, annotations.IsCustomPolicyEnabled(instance.Annotations)) {
if objState.Error != nil {
// We are likely in the bootstrap scenario. In this case, which is expected once, everything is fine.
// If it happens past bootstrap, still carry on. We know what to do, and we do want to enforce the desired state.
Expand Down Expand Up @@ -756,6 +775,15 @@ func daemonsetUpdater(mcpName string, gdm *rtestate.GeneratedDesiredManifest) er
klog.V(5).InfoS("DaemonSet update: cannot update config", "mcp", mcpName, "daemonset", gdm.DaemonSet.Name, "error", err)
return err
}
if gdm.ClusterPlatform != platform.Kubernetes {
if gdm.IsCustomPolicyEnabled && gdm.ClusterPlatform == platform.OpenShift {
k8swgrteupdate.SecurityContext(gdm.DaemonSet, selinux.RTEContextTypeLegacy)
klog.V(5).InfoS("DaemonSet update: selinux options", "container", manifests.ContainerNameRTE, "context", selinux.RTEContextTypeLegacy)
} else {
k8swgrteupdate.SecurityContext(gdm.DaemonSet, selinux.RTEContextType)
klog.V(5).InfoS("DaemonSet update: selinux options", "container", manifests.ContainerNameRTE, "context", selinux.RTEContextType)
}
}
return nil
}

Expand Down
149 changes: 148 additions & 1 deletion controllers/numaresourcesoperator_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package controllers

import (
"context"
"github.com/openshift-kni/numaresources-operator/internal/api/annotations"
"time"

apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -67,7 +68,7 @@ func NewFakeNUMAResourcesOperatorReconciler(plat platform.Platform, platVersion
return nil, err
}

rteManifests, err := rtemanifests.GetManifests(plat, platVersion, testNamespace, true)
rteManifests, err := rtemanifests.GetManifests(plat, platVersion, testNamespace, false, true)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -149,6 +150,7 @@ var _ = Describe("Test NUMAResourcesOperator Reconcile", func() {
{MatchLabels: label1},
{MatchLabels: label2},
})
nro.Annotations = map[string]string{annotations.SELinuxPolicyConfigAnnotation: annotations.SELinuxPolicyCustom}

mcp1 = testobjs.NewMachineConfigPool("test1", label1, &metav1.LabelSelector{MatchLabels: label1}, &metav1.LabelSelector{MatchLabels: label1})
mcp2 = testobjs.NewMachineConfigPool("test2", label2, &metav1.LabelSelector{MatchLabels: label2}, &metav1.LabelSelector{MatchLabels: label2})
Expand Down Expand Up @@ -210,6 +212,41 @@ var _ = Describe("Test NUMAResourcesOperator Reconcile", func() {
}
Expect(reconciler.Client.Get(context.TODO(), mcp2DSKey, ds)).To(Succeed())
})
When("NRO updated to remove the custom policy annotation", func() {
BeforeEach(func() {
// check we have at least two NodeGroups
Expect(len(nro.Spec.NodeGroups)).To(BeNumerically(">", 1))

By("Update NRO to have both NodeGroups")
key := client.ObjectKeyFromObject(nro)
nro := &nropv1.NUMAResourcesOperator{}
Expect(reconciler.Client.Get(context.TODO(), key, nro)).NotTo(HaveOccurred())

nro.Annotations = map[string]string{}
Expect(reconciler.Client.Update(context.TODO(), nro)).NotTo(HaveOccurred())

thirdLoopResult, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: key})
Expect(err).ToNot(HaveOccurred())
Expect(thirdLoopResult).To(Equal(reconcile.Result{RequeueAfter: time.Minute}))
})
It("should not create a machine config", func() {
mc := &machineconfigv1.MachineConfig{}

// Check mc1 not created
mc1Key := client.ObjectKey{
Name: objectnames.GetMachineConfigName(nro.Name, mcp1.Name),
}
err := reconciler.Client.Get(context.TODO(), mc1Key, mc)
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "MachineConfig %s is expected to not be found", mc1Key.String())

// Check mc1 not created
mc2Key := client.ObjectKey{
Name: objectnames.GetMachineConfigName(nro.Name, mcp2.Name),
}
err = reconciler.Client.Get(context.TODO(), mc2Key, mc)
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "MachineConfig %s is expected to not be found", mc2Key.String())
})
})
When("a NodeGroup is deleted", func() {
BeforeEach(func() {
// check we have at least two NodeGroups
Expand All @@ -223,6 +260,7 @@ var _ = Describe("Test NUMAResourcesOperator Reconcile", func() {
nro.Spec.NodeGroups = []nropv1.NodeGroup{{
MachineConfigPoolSelector: &metav1.LabelSelector{MatchLabels: label1},
}}
nro.Annotations = map[string]string{annotations.SELinuxPolicyConfigAnnotation: annotations.SELinuxPolicyCustom}
Expect(reconciler.Client.Update(context.TODO(), nro)).NotTo(HaveOccurred())

thirdLoopResult, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: key})
Expand Down Expand Up @@ -319,6 +357,7 @@ var _ = Describe("Test NUMAResourcesOperator Reconcile", func() {
{MatchLabels: label1},
{MatchLabels: label2},
})
nro.Annotations = map[string]string{annotations.SELinuxPolicyConfigAnnotation: annotations.SELinuxPolicyCustom}

mcp1 = testobjs.NewMachineConfigPool("test1", label1, &metav1.LabelSelector{MatchLabels: label1}, &metav1.LabelSelector{MatchLabels: label1})
mcp2 = testobjs.NewMachineConfigPool("test2", label2, &metav1.LabelSelector{MatchLabels: label2}, &metav1.LabelSelector{MatchLabels: label2})
Expand Down Expand Up @@ -558,6 +597,7 @@ var _ = Describe("Test NUMAResourcesOperator Reconcile", func() {
},
},
}
nro.Annotations = map[string]string{annotations.SELinuxPolicyConfigAnnotation: annotations.SELinuxPolicyCustom}
})

When("machine config selector matches machine config labels", func() {
Expand Down Expand Up @@ -1088,6 +1128,113 @@ var _ = Describe("Test NUMAResourcesOperator Reconcile", func() {
Expect(ds.Spec.Template.Spec.Tolerations).To(Equal(reconciler.RTEManifests.DaemonSet.Spec.Template.Spec.Tolerations), "DS tolerations not restored to defaults")
})
})
Context("emulating upgrade from 4.1X to 4.18 which has a built-in selinux policy for RTE pods", func() {
var nro *nropv1.NUMAResourcesOperator
var mcp1 *machineconfigv1.MachineConfigPool
var mcp2 *machineconfigv1.MachineConfigPool

var reconciler *NUMAResourcesOperatorReconciler
var label1, label2 map[string]string

BeforeEach(func() {
label1 = map[string]string{
"test1": "test1",
}
label2 = map[string]string{
"test2": "test2",
}

nro = testobjs.NewNUMAResourcesOperator(objectnames.DefaultNUMAResourcesOperatorCrName, []*metav1.LabelSelector{
{MatchLabels: label1},
{MatchLabels: label2},
})
// reconciling NRO object with custom policy, emulates the old behavior version
nro.Annotations = map[string]string{annotations.SELinuxPolicyConfigAnnotation: annotations.SELinuxPolicyCustom}

mcp1 = testobjs.NewMachineConfigPool("test1", label1, &metav1.LabelSelector{MatchLabels: label1}, &metav1.LabelSelector{MatchLabels: label1})
mcp2 = testobjs.NewMachineConfigPool("test2", label2, &metav1.LabelSelector{MatchLabels: label2}, &metav1.LabelSelector{MatchLabels: label2})

var err error
reconciler, err = NewFakeNUMAResourcesOperatorReconciler(platform.OpenShift, defaultOCPVersion, nro, mcp1, mcp2)
Expect(err).ToNot(HaveOccurred())

key := client.ObjectKeyFromObject(nro)
firstLoopResult, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: key})
Expect(err).ToNot(HaveOccurred())
Expect(firstLoopResult).To(Equal(reconcile.Result{RequeueAfter: time.Minute}))

// Ensure mcp1 is ready
Expect(reconciler.Client.Get(context.TODO(), client.ObjectKeyFromObject(mcp1), mcp1)).To(Succeed())
mcp1.Status.Configuration.Source = []corev1.ObjectReference{
{
Name: objectnames.GetMachineConfigName(nro.Name, mcp1.Name),
},
}
mcp1.Status.Conditions = []machineconfigv1.MachineConfigPoolCondition{
{
Type: machineconfigv1.MachineConfigPoolUpdated,
Status: corev1.ConditionTrue,
},
}
Expect(reconciler.Client.Update(context.TODO(), mcp1)).To(Succeed())

// ensure mcp2 is ready
Expect(reconciler.Client.Get(context.TODO(), client.ObjectKeyFromObject(mcp2), mcp2)).To(Succeed())
mcp2.Status.Configuration.Source = []corev1.ObjectReference{
{
Name: objectnames.GetMachineConfigName(nro.Name, mcp2.Name),
},
}
mcp2.Status.Conditions = []machineconfigv1.MachineConfigPoolCondition{
{
Type: machineconfigv1.MachineConfigPoolUpdated,
Status: corev1.ConditionTrue,
},
}
Expect(reconciler.Client.Update(context.TODO(), mcp2)).To(Succeed())

secondLoopResult, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: key})
Expect(err).ToNot(HaveOccurred())
Expect(secondLoopResult).To(Equal(reconcile.Result{RequeueAfter: 5 * time.Second}))

By("Check DaemonSets are created")
mcp1DSKey := client.ObjectKey{
Name: objectnames.GetComponentName(nro.Name, mcp1.Name),
Namespace: testNamespace,
}
ds := &appsv1.DaemonSet{}
Expect(reconciler.Client.Get(context.TODO(), mcp1DSKey, ds)).ToNot(HaveOccurred())

mcp2DSKey := client.ObjectKey{
Name: objectnames.GetComponentName(nro.Name, mcp2.Name),
Namespace: testNamespace,
}
Expect(reconciler.Client.Get(context.TODO(), mcp2DSKey, ds)).To(Succeed())

By("upgrading from 4.1X to 4.18")
Expect(reconciler.Client.Get(context.TODO(), client.ObjectKeyFromObject(nro), nro)).To(Succeed())
nro.Annotations = map[string]string{}
Expect(reconciler.Client.Update(context.TODO(), nro)).To(Succeed())

thirdLoopResult, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: key})
Expect(err).ToNot(HaveOccurred())
Expect(thirdLoopResult).To(Equal(reconcile.Result{RequeueAfter: time.Minute}))
})
It("should delete existing mc", func() {
mc1Key := client.ObjectKey{
Name: objectnames.GetMachineConfigName(nro.Name, mcp1.Name),
}
mc := &machineconfigv1.MachineConfig{}
err := reconciler.Client.Get(context.TODO(), mc1Key, mc)
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "MachineConfig %s expected to be deleted; err=%v", mc1Key.Name, err)

mc2Key := client.ObjectKey{
Name: objectnames.GetMachineConfigName(nro.Name, mcp2.Name),
}
err = reconciler.Client.Get(context.TODO(), mc2Key, mc)
Expect(apierrors.IsNotFound(err)).To(BeTrue(), "MachineConfig %s expected to be deleted; err=%v", mc2Key.Name, err)
})
})
})

func getConditionByType(conditions []metav1.Condition, conditionType string) *metav1.Condition {
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ require (
github.com/go-logr/logr v1.4.2
github.com/google/go-cmp v0.6.0
github.com/jaypipes/ghw v0.12.0
github.com/k8stopologyawareschedwg/deployer v0.20.4
github.com/k8stopologyawareschedwg/deployer v0.21.0
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.2
github.com/k8stopologyawareschedwg/podfingerprint v0.2.2
github.com/k8stopologyawareschedwg/resource-topology-exporter v0.16.1
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1177,8 +1177,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/k8stopologyawareschedwg/deployer v0.20.4 h1:aDpGoYHTEab8v+isLCqehgYy3ZafAattrAt6oLI6Jv0=
github.com/k8stopologyawareschedwg/deployer v0.20.4/go.mod h1:hnPU2dPLclkKXU28H+RkRrS21LFpmMTAU55mISsPuMk=
github.com/k8stopologyawareschedwg/deployer v0.21.0 h1:GA2HbYTIxynCc+R8ceGxnQo9AdQZBFUgNjHm3gJVRFQ=
github.com/k8stopologyawareschedwg/deployer v0.21.0/go.mod h1:hnPU2dPLclkKXU28H+RkRrS21LFpmMTAU55mISsPuMk=
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.2 h1:uAwqOtyrFYggq3pVf3hs1XKkBxrQ8dkgjWz3LCLJsiY=
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.2/go.mod h1:LBzS4n6GX1C69tzSd5EibZ9cGOXFuHP7GxEMDYVe1sM=
github.com/k8stopologyawareschedwg/podfingerprint v0.2.2 h1:iFHPfZInM9pz2neye5RdmORMp1hPmte1EGJYpOOzZVg=
Expand Down
Loading