diff --git a/libvirt/tests/cfg/scsi/scsi_device.cfg b/libvirt/tests/cfg/scsi/scsi_device.cfg index 40ea5035ec0..da5636396c5 100644 --- a/libvirt/tests/cfg/scsi/scsi_device.cfg +++ b/libvirt/tests/cfg/scsi/scsi_device.cfg @@ -2,6 +2,7 @@ type = scsi_device status_error = "no" start_vm = "no" + vms = avocado-vt-vm1 vm1 variants scsi_type: - scsi_hostdev: variants: @@ -28,6 +29,12 @@ controller_type = "scsi" controller_model = "virtio-scsi" controller_index = "0" + - shared_by_two_guests: + only hotplug + - qemu_pr_helper: + only coldplug + managed = "yes" + target_device = "vdb" - negative: variants test_scenario: - max_sectors_controller: diff --git a/libvirt/tests/src/scsi/scsi_device.py b/libvirt/tests/src/scsi/scsi_device.py index f9dfe4b95f7..f9f9959bdad 100644 --- a/libvirt/tests/src/scsi/scsi_device.py +++ b/libvirt/tests/src/scsi/scsi_device.py @@ -12,6 +12,8 @@ import logging import os +from avocado.utils import process + from virttest import virt_vm from virttest import virsh @@ -438,6 +440,109 @@ def check_scsi_controller(test, params, env): test.fail("Get index=0 scsi controller although detached") +def test_hotplug_scsi_hostdev_shared_by_two_guests(test, params, env): + """ + Test hotplug scsi device into two guests + + :param test: one test object instance + :param params: dict wrapped with params + :param env: environment instance + """ + block_device1 = setup_iscsi_block_device() + hostdev_xml = create_host_dev(params, block_device1) + hostdev_xml.shareable = True + vm_names = params.get("vms").split() + for vm_name in vm_names: + vm = env.get_vm(vm_name) + if vm.is_dead(): + vm.start() + vm.wait_for_login().close() + virsh.attach_device(vm_name, hostdev_xml.xml, flagstr="--live", + ignore_status=False) + + +def check_hostdev_shareable_attr(test, params, env): + """ + check scsi shareable attribute + + :param test: one test object instance + :param params: dict wrapped with params + :param env: environment instance + """ + vm_names = params.get("vms").split() + for vm_name in vm_names: + vm_output = virsh.dumpxml(vm_name).stdout_text + if 'shareable' not in vm_output: + test.fail("VM: % failed to find shareable attribute in output: %s" % (vm_name, vm_output)) + + +def test_coldplug_scsi_hostdev_qemu_pr_helper(test, params, env): + """ + Test coldplug scsi hostdev and check qemu-pr-helper status + + :param test: one test object instance + :param params: dict wrapped with params + :param env: environment instance + """ + vm_name = params.get("main_vm") + block_device = setup_scsi_debug_block_device() + + disk_src_dict = {"attrs": {"dev": block_device}} + target_device = params.get("target_device") + + customized_disk = libvirt_disk.create_primitive_disk_xml( + "block", "lun", + target_device, 'scsi', + 'raw', disk_src_dict, None) + + # update reservation attributes + reservations_dict = {"reservations_managed": "yes"} + disk_source = customized_disk.source + disk_source.reservations = customized_disk.new_reservations(**reservations_dict) + customized_disk.source = disk_source + + LOG.info("disk xml is: %s", customized_disk) + xml_dump = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + xml_dump.add_device(customized_disk) + xml_dump.sync() + + +def check_qemu_pr_helper(test, params, env): + """ + check qemu_qr_helper process can be restarted when VM issue pr cmds + + :param test: one test object instance + :param params: dict wrapped with params + :param env: environment instance + """ + old_qr_pid = process.run("pidof qemu-pr-helper", + ignore_status=True, shell=True).stdout_text.strip() + if old_qr_pid is None: + test.fail("qemu-pr-helper is not started after VM is started") + process.system("killall qemu-pr-helper && sleep 2", + ignore_status=True, shell=True) + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + session = vm.wait_for_login() + _, cmd_o = session.cmd_status_output("lsscsi|grep scsi_debug|awk '{print $6}'") + # send series of pr commands to VM + sg_cmd_list = ["sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry --in -k %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry --in -r %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry --in -r %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 %s && sleep 1" % cmd_o, + "sg_persist --no-inquiry --in -k %s && sleep 1" % cmd_o] + for sg_cmd in sg_cmd_list: + session.cmd_status_output(sg_cmd) + new_qr_pid = process.run("pidof qemu-pr-helper", + ignore_status=True, shell=True).stdout_text.strip() + if new_qr_pid is None: + test.fail("qemu-pr-helper is not restarted after issuing pr commands to VM") + + def run(test, params, env): """ Test manipulate scsi device. @@ -451,13 +556,21 @@ def run(test, params, env): vm_name = params.get("main_vm") vm = env.get_vm(vm_name) - # Back up xml file - if vm.is_alive(): - vm.destroy(gracefully=False) + vm_names = params.get("vms").split() + if len(vm_names) < 2: + test.cancel("No multi vms provided.") + + # Backup vm xml files. + vms_backup = [] + # it need use 2 VMs for testing. + for i in list(range(2)): + if virsh.is_alive(vm_name[i]): + virsh.destroy(vm_name[i], gracefully=False) + vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i]) + vms_backup.append(vmxml_backup) coldplug = "yes" == params.get("coldplug") define_error = "yes" == params.get("define_error", "no") - xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) plug_mode = params.get("plug_mode") scsi_type = params.get("scsi_type") @@ -494,15 +607,21 @@ def run(test, params, env): check_vdisk_hostdev_address_unit(test, params, env) elif test_scenario == "unplug_scsi_controller": check_scsi_controller(test, params, env) + elif test_scenario == "shared_by_two_guests": + check_hostdev_shareable_attr(test, params, env) + elif test_scenario == "qemu_pr_helper": + check_qemu_pr_helper(test, params, env) finally: - # Recover VM. - if vm.is_alive(): - vm.destroy(gracefully=False) - LOG.info("Restoring vm...") - xml_backup.sync() + # Recover VMs. + for i in list(range(2)): + if virsh.is_alive(vm_name[i]): + virsh.destroy(vm_name[i], gracefully=False) + LOG.info("Restoring vms...") + for vmxml_backup in vms_backup: + vmxml_backup.sync() # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) - if test_scenario == "boot_order": + if test_scenario in ["boot_order", "same_hostdev_address", "tap_library", "qemu_pr_helper"]: try: libvirt.delete_scsi_disk() except Exception as e: