diff --git a/api/core/v1alpha2/vmbdacondition/condition.go b/api/core/v1alpha2/vmbdacondition/condition.go index eda617681c..19d30e18cd 100644 --- a/api/core/v1alpha2/vmbdacondition/condition.go +++ b/api/core/v1alpha2/vmbdacondition/condition.go @@ -63,6 +63,8 @@ const ( // or the virtual disk is already attached to the virtual machine spec. // Only the one that was created or started sooner can be processed. Conflict AttachedReason = "Conflict" + // DeviceNotAvailableOnNode indicates that the block device's PersistentVolume is not available on the node where the virtual machine is running. + DeviceNotAvailableOnNode AttachedReason = "DeviceNotAvailableOnNode" // CapacityAvailable signifies that the capacity not reached and attaching available. CapacityAvailable DiskAttachmentCapacityAvailableReason = "CapacityAvailable" diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service.go b/images/virtualization-artifact/pkg/controller/service/attachment_service.go index 6fda25d52c..c81bebc54a 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" virtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -250,6 +251,10 @@ func (s AttachmentService) GetPersistentVolumeClaim(ctx context.Context, ad *Att return object.FetchObject(ctx, types.NamespacedName{Namespace: ad.Namespace, Name: ad.PVCName}, s.client, &corev1.PersistentVolumeClaim{}) } +func (s AttachmentService) GetPersistentVolume(ctx context.Context, pvName string) (*corev1.PersistentVolume, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: pvName}, s.client, &corev1.PersistentVolume{}) +} + func (s AttachmentService) GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualMachine{}) } @@ -262,6 +267,48 @@ func (s AttachmentService) GetKVVMI(ctx context.Context, vm *v1alpha2.VirtualMac return object.FetchObject(ctx, types.NamespacedName{Namespace: vm.Namespace, Name: vm.Name}, s.client, &virtv1.VirtualMachineInstance{}) } +func (s AttachmentService) IsPVAvailableOnVMNode(ctx context.Context, pvc *corev1.PersistentVolumeClaim, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { + if pvc == nil { + return false, errors.New("pvc is nil") + } + if kvvmi == nil { + return false, errors.New("kvvmi is nil") + } + if pvc.Spec.VolumeName == "" || kvvmi.Status.NodeName == "" { + return true, nil + } + + pv, err := s.GetPersistentVolume(ctx, pvc.Spec.VolumeName) + if err != nil { + return false, fmt.Errorf("failed to get PersistentVolume %q: %w", pvc.Spec.VolumeName, err) + } + if pv == nil { + return false, fmt.Errorf("PersistentVolume %q not found", pvc.Spec.VolumeName) + } + + if pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil { + return true, nil + } + + nodeName := kvvmi.Status.NodeName + node := &corev1.Node{} + err = s.client.Get(ctx, types.NamespacedName{Name: nodeName}, node) + if err != nil { + return false, fmt.Errorf("failed to get Node %q: %w", nodeName, err) + } + + selector, err := nodeaffinity.NewNodeSelector(pv.Spec.NodeAffinity.Required) + if err != nil { + return false, fmt.Errorf("failed to get node selector: %w", err) + } + + if !selector.Match(node) { + return false, nil + } + + return true, nil +} + func isSameBlockDeviceRefs(a, b v1alpha2.VMBDAObjectRef) bool { return a.Kind == b.Kind && a.Name == b.Name } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go index 36d14e0b17..52b3c92f7b 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go @@ -235,6 +235,29 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMac return reconcile.Result{}, nil } + if ad.PVCName != "" { + pvc, err := h.attacher.GetPersistentVolumeClaim(ctx, ad) + if err != nil { + return reconcile.Result{}, err + } + + if pvc != nil { + available, err := h.attacher.IsPVAvailableOnVMNode(ctx, pvc, kvvmi) + if err != nil { + return reconcile.Result{}, err + } + + if !available { + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed + cb. + Status(metav1.ConditionFalse). + Reason(vmbdacondition.DeviceNotAvailableOnNode). + Message(fmt.Sprintf("PersistentVolume %q is not available on node %q where the virtual machine is running", pvc.Spec.VolumeName, kvvmi.Status.NodeName)) + return reconcile.Result{}, nil + } + } + } + log.Info("Send attachment request") err = h.attacher.HotPlugDisk(ctx, ad, vm, kvvm)