Skip to content

Commit

Permalink
Merge pull request #5217 from Yingshun/viommu_mig
Browse files Browse the repository at this point in the history
vIOMMU: Add a case of migration
  • Loading branch information
chloerh committed Oct 13, 2023
2 parents 3670a2e + 89a6b06 commit a2a9856
Show file tree
Hide file tree
Showing 3 changed files with 148 additions and 0 deletions.
56 changes: 56 additions & 0 deletions libvirt/tests/cfg/sriov/vIOMMU/migration_iommu_device.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
- vIOMMU.migration.iommu_device:
type = migration_iommu_device
# Migrating non-started VM causes undefined behavior
start_vm = yes
# Console output can only be monitored via virsh console output
only_pty = True
take_regular_screendumps = no
# Options to pass to virsh migrate command before <domain> <desturi>
virsh_migrate_options = ""
# Extra options to pass after <domain> <desturi>
virsh_migrate_extra = ""
# SSH connection time out
ssh_timeout = 60
migration_setup = "yes"
storage_type = 'nfs'
setup_local_nfs = 'yes'
disk_type = "file"
disk_source_protocol = "netfs"
mnt_path_name = ${nfs_mount_dir}
image_convert = 'no'
virsh_migrate_dest_state = "running"
virsh_migrate_src_state = "shut off"
virsh_migrate_options = "--p2p --live --verbose --persistent"
virsh_migrate_desturi = "qemu+ssh://${migrate_dest_host}/system"
virsh_migrate_connect_uri = "qemu:///system"
check_network_accessibility_after_mig = "yes"
disk_driver = {'name': 'qemu', 'type': 'qcow2', 'iommu': 'on'}
variants:
- virtio:
only q35, aarch64
func_supported_since_libvirt_ver = (8, 3, 0)
iommu_dict = {'model': 'virtio'}
- intel:
only q35
start_vm = "yes"
enable_guest_iommu = "yes"
iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on', 'caching_mode': 'on', 'eim': 'on', 'iotlb': 'on', 'aw_bits': '48'}}
- smmuv3:
only aarch64
func_supported_since_libvirt_ver = (5, 5, 0)
iommu_dict = {'model': 'smmuv3'}
variants:
- virtio_muti_devices:
disk_dict = {'target': {'dev': 'vda', 'bus': 'virtio'}, 'device': 'disk', 'driver': ${disk_driver}}
video_dict = {'primary': 'yes', 'model_heads': '1', 'model_type': 'virtio', 'driver': {'iommu': 'on'}}
variants:
- vhost_on:
interface_driver_name = "vhost"
- vhost_off:
interface_driver_name = "qemu"
interface_driver = {'driver_attr': {'name': '${interface_driver_name}', 'iommu': 'on'}}
iface_dict = {'type_name': 'network', 'model': 'virtio', 'driver': ${interface_driver}, 'source': {'network': 'default'}}
- scsi_controller:
controller_dicts = [{'type': 'scsi', 'model': 'virtio-scsi','driver': {'iommu': 'on'}}]
disk_dict = {'target': {'dev': 'sda', 'bus': 'scsi'}}
cleanup_ifaces = no
89 changes: 89 additions & 0 deletions libvirt/tests/src/sriov/vIOMMU/migration_iommu_device.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
from virttest import libvirt_version
from virttest import virsh

from virttest.libvirt_xml import vm_xml
from virttest.utils_libvirt import libvirt_vmxml

from provider.sriov import sriov_base
from provider.migration import base_steps


def run(test, params, env):
"""
Test vm migration with iommu device
This case starts vm with different iommu device settings then migrate it
to and back to check network works well.
"""
def check_iommu_xml(vm, params):
"""
Check the iommu xml of the migrated vm
:param vm: VM object
:param params: Dictionary with the test parameters
"""
iommu_dict = eval(params.get('iommu_dict', '{}'))
if not iommu_dict:
return
server_ip = params.get("server_ip")
server_user = params.get("server_user", "root")
server_pwd = params.get("server_pwd")
remote_virsh_dargs = {'remote_ip': server_ip,
'remote_user': server_user,
'remote_pwd': server_pwd,
'unprivileged_user': None,
'ssh_remote_auth': True}
virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)
migrated_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
vm.name, virsh_instance=virsh_session_remote)
vm_iommu = migrated_vmxml.devices.by_device_tag(
'iommu')[0].fetch_attrs()
for attr, value in iommu_dict.items():
if vm_iommu.get(attr) != value:
test.fail('iommu xml(%s) comparison failed.'
'Expected "%s",got "%s".'
% (attr, value, vm_iommu))

def setup_test():
"""
Setup test
"""
test.log.info("TEST_SETUP: Prepare a VM with different iommu devices.")
test_obj.setup_iommu_test(iommu_dict=iommu_dict,
cleanup_ifaces=cleanup_ifaces)
test_obj.prepare_controller()
test.log.debug(vm_xml.VMXML.new_from_dumpxml(vm.name))
for dev in ["disk", "video"]:
dev_dict = eval(params.get('%s_dict' % dev, '{}'))
if dev == "disk":
dev_dict = test_obj.update_disk_addr(dev_dict)
test.log.debug(dev_dict)
libvirt_vmxml.modify_vm_device(
vm_xml.VMXML.new_from_dumpxml(vm.name), dev, dev_dict)
if cleanup_ifaces:
libvirt_vmxml.modify_vm_device(
vm_xml.VMXML.new_from_dumpxml(vm.name),
"interface", iface_dict)
migration_obj.setup_default()

libvirt_version.is_libvirt_feature_supported(params)
cleanup_ifaces = "yes" == params.get("cleanup_ifaces", "yes")
iommu_dict = eval(params.get('iommu_dict', '{}'))
iface_dict = eval(params.get('iface_dict', '{}'))

vm_name = params.get("main_vm", "avocado-vt-vm1")
vm = env.get_vm(vm_name)
test_obj = sriov_base.SRIOVTest(vm, test, params)
migration_obj = base_steps.MigrationBase(test, vm, params)

try:
setup_test()
test.log.info("TEST_STEP: Migrate the VM to the target host.")
migration_obj.run_migration()
migration_obj.verify_default()
check_iommu_xml(vm, params)

test.log.info("TEST_STEP: Migrate back the VM to the source host.")
migration_obj.run_migration_back()
migration_obj.migration_test.ping_vm(vm, params)
finally:
test_obj.teardown_iommu_test()
3 changes: 3 additions & 0 deletions provider/sriov/sriov_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,9 @@ def update_disk_addr(self, disk_dict):
dev_attrs.update({'slot': self.dev_slot})

disk_dict.update({"address": {'attrs': dev_attrs}})
if disk_dict['target']['bus'] == "scsi":
disk_dict['address']['attrs'].update({'type': 'drive'})

if self.controller_dicts[-1]['model'] == 'pcie-root-port':
self.controller_dicts.pop()
return disk_dict
Expand Down

0 comments on commit a2a9856

Please sign in to comment.