From 1ae2ae7ebcb7228d50fa05df33de699edd300d50 Mon Sep 17 00:00:00 2001 From: Martin Necas Date: Mon, 24 Feb 2025 15:05:32 +0900 Subject: [PATCH] MTV-2093 | Sort the disks by bus Issue: When running a migration of a VM which has various bus controllers the PVs are not populated by correct disk. This causes that some larger disks are in small PVs. This issue happens due to the libvirt domain-VMware API missmatch. The virt-v2v is outputing the disks in order as they are in the libvirt domain but Forklift is using the order from API and sorting the disks by key. The libvirt is sorted in order of devices SCSI, SATA and IDE wheras sorting by key from VMware API results in order IDE, SATA and SCSI. Fix: Gather information about the disk busses and sort the disks by SCSI, SATA and IDE. And within each of these categories we sort by the key from lower to higer. Ref: https://issues.redhat.com/browse/MTV-2093 Signed-off-by: Martin Necas --- .../plan/adapter/vsphere/builder.go | 34 +++++++- .../plan/adapter/vsphere/builder_test.go | 84 +++++++++++++++++++ .../provider/container/vsphere/model.go | 84 +++++++++++++++++++ .../provider/model/vsphere/model.go | 9 ++ 4 files changed, 207 insertions(+), 4 deletions(-) diff --git a/pkg/controller/plan/adapter/vsphere/builder.go b/pkg/controller/plan/adapter/vsphere/builder.go index 22453918e..eb485b480 100644 --- a/pkg/controller/plan/adapter/vsphere/builder.go +++ b/pkg/controller/plan/adapter/vsphere/builder.go @@ -730,15 +730,41 @@ func (r *Builder) mapFirmware(vm *model.VM, object *cnv.VirtualMachineSpec) { object.Template.Spec.Domain.Firmware = firmware } +func (r *Builder) filterDisksWithBus(disks []vsphere.Disk, bus string) []vsphere.Disk { + var resp []vsphere.Disk + for _, disk := range disks { + if disk.Bus == bus { + resp = append(resp, disk) + } + } + return resp +} + +// The disks are first sorted by the buses going in order SCSI, SATA and IDE and within the controller the +// disks are sorted by the key. This needs to be done because the virt-v2v outputs the files in an order, +// which it gets from libvirt. The libvirt orders the devices starting with SCSI, SATA and IDE. +// When we were sorting by the keys the order was IDE, SATA and SCSI. This cause that some PVs were populated by +// incorrect disks. +// https://github.com/libvirt/libvirt/blob/master/src/vmx/vmx.c#L1713 +func (r *Builder) sortedDisks(disks []vsphere.Disk) []vsphere.Disk { + var buses = []string{container.SCSI, container.SATA, container.IDE} + var resp []vsphere.Disk + for _, bus := range buses { + disksWithBus := r.filterDisksWithBus(disks, bus) + sort.Slice(disksWithBus, func(i, j int) bool { + return disksWithBus[i].Key < disksWithBus[j].Key + }) + resp = append(resp, disksWithBus...) + } + return resp +} + func (r *Builder) mapDisks(vm *model.VM, vmRef ref.Ref, persistentVolumeClaims []*core.PersistentVolumeClaim, object *cnv.VirtualMachineSpec) { var kVolumes []cnv.Volume var kDisks []cnv.Disk var templateErr error - disks := vm.Disks - sort.Slice(disks, func(i, j int) bool { - return disks[i].Key < disks[j].Key - }) + disks := r.sortedDisks(vm.Disks) pvcMap := make(map[string]*core.PersistentVolumeClaim) for i := range persistentVolumeClaims { pvc := persistentVolumeClaims[i] diff --git a/pkg/controller/plan/adapter/vsphere/builder_test.go b/pkg/controller/plan/adapter/vsphere/builder_test.go index a0e1e495d..8e42d6639 100644 --- a/pkg/controller/plan/adapter/vsphere/builder_test.go +++ b/pkg/controller/plan/adapter/vsphere/builder_test.go @@ -3,6 +3,7 @@ package vsphere import ( v1beta1 "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" + container "github.com/konveyor/forklift-controller/pkg/controller/provider/container/vsphere" "github.com/konveyor/forklift-controller/pkg/controller/provider/model/vsphere" model "github.com/konveyor/forklift-controller/pkg/controller/provider/web/vsphere" "github.com/konveyor/forklift-controller/pkg/lib/logging" @@ -131,6 +132,89 @@ var _ = Describe("vSphere builder", func() { }}, }, "00:50:56:83:25:47:ip:172.29.3.193,172.29.3.1,16"), ) + + DescribeTable("should", func(disks []vsphere.Disk, output []vsphere.Disk) { + Expect(builder.sortedDisks(disks)).Should(Equal(output)) + }, + Entry("sort all disks by buses", + []vsphere.Disk{ + {Key: 1, Bus: container.IDE}, + {Key: 1, Bus: container.SATA}, + {Key: 1, Bus: container.SCSI}, + {Key: 2, Bus: container.SCSI}, + }, + []vsphere.Disk{ + {Key: 1, Bus: container.SCSI}, + {Key: 2, Bus: container.SCSI}, + {Key: 1, Bus: container.SATA}, + {Key: 1, Bus: container.IDE}, + }, + ), + Entry("sort IDE and SATA disks by buses", + []vsphere.Disk{ + {Key: 1, Bus: container.IDE}, + {Key: 1, Bus: container.SATA}, + }, + []vsphere.Disk{ + {Key: 1, Bus: container.SATA}, + {Key: 1, Bus: container.IDE}, + }, + ), + Entry("sort multiple SATA disks by buses", + []vsphere.Disk{ + {Key: 3, Bus: container.SATA}, + {Key: 1, Bus: container.SATA}, + {Key: 2, Bus: container.SATA}, + }, + []vsphere.Disk{ + {Key: 1, Bus: container.SATA}, + {Key: 2, Bus: container.SATA}, + {Key: 3, Bus: container.SATA}, + }, + ), + Entry("sort multiple SATA and multiple SCSI disks by buses", + []vsphere.Disk{ + {Key: 3, Bus: container.SATA}, + {Key: 3, Bus: container.SCSI}, + {Key: 2, Bus: container.SCSI}, + {Key: 1, Bus: container.SATA}, + {Key: 2, Bus: container.SATA}, + {Key: 1, Bus: container.SCSI}, + }, + []vsphere.Disk{ + {Key: 1, Bus: container.SCSI}, + {Key: 2, Bus: container.SCSI}, + {Key: 3, Bus: container.SCSI}, + {Key: 1, Bus: container.SATA}, + {Key: 2, Bus: container.SATA}, + {Key: 3, Bus: container.SATA}, + }, + ), + Entry("sort multiple all disks by buses", + []vsphere.Disk{ + {Key: 2, Bus: container.IDE}, + {Key: 3, Bus: container.SATA}, + {Key: 3, Bus: container.SCSI}, + {Key: 2, Bus: container.SCSI}, + {Key: 3, Bus: container.IDE}, + {Key: 1, Bus: container.SATA}, + {Key: 2, Bus: container.SATA}, + {Key: 1, Bus: container.SCSI}, + {Key: 1, Bus: container.IDE}, + }, + []vsphere.Disk{ + {Key: 1, Bus: container.SCSI}, + {Key: 2, Bus: container.SCSI}, + {Key: 3, Bus: container.SCSI}, + {Key: 1, Bus: container.SATA}, + {Key: 2, Bus: container.SATA}, + {Key: 3, Bus: container.SATA}, + {Key: 1, Bus: container.IDE}, + {Key: 2, Bus: container.IDE}, + {Key: 3, Bus: container.IDE}, + }, + ), + ) }) //nolint:errcheck diff --git a/pkg/controller/provider/container/vsphere/model.go b/pkg/controller/provider/container/vsphere/model.go index 1c40ba047..113abf5bb 100644 --- a/pkg/controller/provider/container/vsphere/model.go +++ b/pkg/controller/provider/container/vsphere/model.go @@ -12,6 +12,15 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +// Bus types +const ( + NVME = "nvme" + USB = "usb" + SATA = "sata" + SCSI = "scsi" + IDE = "ide" +) + // Model adapter. // Each adapter provides provider-specific management of a model. type Adapter interface { @@ -762,6 +771,7 @@ func (v *VmAdapter) Apply(u types.ObjectUpdate) { } v.model.Devices = devList v.model.NICs = nicList + v.updateControllers(&devArray) v.updateDisks(&devArray) } } @@ -769,6 +779,75 @@ func (v *VmAdapter) Apply(u types.ObjectUpdate) { } } +// Update virtual disk devices. +func (v *VmAdapter) updateControllers(devArray *types.ArrayOfVirtualDevice) { + controllers := []model.Controller{} + for _, dev := range devArray.VirtualDevice { + var md model.Controller + switch controller := dev.(type) { + case *types.VirtualIDEController: + md = model.Controller{ + Bus: IDE, + Disks: controller.Device, + Key: controller.Key, + } + case *types.VirtualBusLogicController: + md = model.Controller{ + Bus: SCSI, + Disks: controller.Device, + Key: controller.Key, + } + case *types.VirtualLsiLogicController: + md = model.Controller{ + Bus: SCSI, + Disks: controller.Device, + Key: controller.Key, + } + case *types.VirtualLsiLogicSASController: + md = model.Controller{ + Bus: SCSI, + Disks: controller.Device, + Key: controller.Key, + } + case *types.ParaVirtualSCSIController: + md = model.Controller{ + Bus: SCSI, + Disks: controller.Device, + Key: controller.Key, + } + case *types.VirtualAHCIController: + md = model.Controller{ + Bus: SATA, + Disks: controller.Device, + Key: controller.Key, + } + case *types.VirtualUSBController: + md = model.Controller{ + Bus: USB, + Disks: controller.Device, + Key: controller.Key, + } + case *types.VirtualNVMEController: + md = model.Controller{ + Bus: NVME, + Disks: controller.Device, + Key: controller.Key, + } + } + controllers = append(controllers, md) + } + v.model.Controllers = controllers +} + +func (v *VmAdapter) getDiskController(key int32) *model.Controller { + for _, controller := range v.model.Controllers { + if controller.Key == key { + return &controller + } + } + return nil +} + // Update virtual disk devices. func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { disks := []model.Disk{} @@ -776,6 +855,7 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { switch dev.(type) { case *types.VirtualDisk: disk := dev.(*types.VirtualDisk) + controller := v.getDiskController(disk.ControllerKey) switch backing := disk.Backing.(type) { case *types.VirtualDiskFlatVer1BackingInfo: md := model.Disk{ @@ -783,6 +863,7 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { File: backing.FileName, Capacity: disk.CapacityInBytes, Mode: backing.DiskMode, + Bus: controller.Bus, } if backing.Datastore != nil { datastoreId, _ := sanitize(backing.Datastore.Value) @@ -799,6 +880,7 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { Capacity: disk.CapacityInBytes, Shared: backing.Sharing != "sharingNone", Mode: backing.DiskMode, + Bus: controller.Bus, } if backing.Datastore != nil { datastoreId, _ := sanitize(backing.Datastore.Value) @@ -816,6 +898,7 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { Shared: backing.Sharing != "sharingNone", Mode: backing.DiskMode, RDM: true, + Bus: controller.Bus, } if backing.Datastore != nil { datastoreId, _ := sanitize(backing.Datastore.Value) @@ -832,6 +915,7 @@ func (v *VmAdapter) updateDisks(devArray *types.ArrayOfVirtualDevice) { Capacity: disk.CapacityInBytes, Shared: backing.Sharing != "sharingNone", RDM: true, + Bus: controller.Bus, } disks = append(disks, md) } diff --git a/pkg/controller/provider/model/vsphere/model.go b/pkg/controller/provider/model/vsphere/model.go index 1069ce68c..cbcbaf167 100644 --- a/pkg/controller/provider/model/vsphere/model.go +++ b/pkg/controller/provider/model/vsphere/model.go @@ -264,6 +264,7 @@ type VM struct { Devices []Device `sql:""` NICs []NIC `sql:""` Disks []Disk `sql:""` + Controllers []Controller `sql:""` Networks []Ref `sql:""` Concerns []Concern `sql:""` GuestNetworks []GuestNetwork `sql:""` @@ -276,6 +277,13 @@ func (m *VM) Validated() bool { return m.RevisionValidated == m.Revision } +// Virtual Controller. +type Controller struct { + Key int32 `json:"key"` + Bus string `json:"bus"` + Disks []int32 `sql:""` +} + // Virtual Disk. type Disk struct { Key int32 `json:"key"` @@ -284,6 +292,7 @@ type Disk struct { Capacity int64 `json:"capacity"` Shared bool `json:"shared"` RDM bool `json:"rdm"` + Bus string `json:"bus"` Mode string `json:"mode,omitempty"` }