diff --git a/provider/storage_benchmark.py b/provider/storage_benchmark.py index 06b71b0493..62dfaea142 100644 --- a/provider/storage_benchmark.py +++ b/provider/storage_benchmark.py @@ -134,8 +134,9 @@ def __kill_procs(self, session): :param session: vm session :type session: aexpect.client.ShellSession """ - LOG_JOB.info("Killing all %s processes by force.", self.name) - session.cmd_output(self._kill_pid % self.name, timeout=120) + proc_name = self.name if self.os_type == "linux" else f"{self.name}.exe" + LOG_JOB.info("Killing all %s processes by force.", proc_name) + session.cmd_output(self._kill_pid % proc_name, timeout=120) def __remove_env_files(self, session, timeout=300): """ diff --git a/qemu/tests/cfg/virtio_fs_nfs_migration.cfg b/qemu/tests/cfg/virtio_fs_nfs_migration.cfg new file mode 100644 index 0000000000..456478e5cc --- /dev/null +++ b/qemu/tests/cfg/virtio_fs_nfs_migration.cfg @@ -0,0 +1,120 @@ +- virtio_fs_nfs_migration: + no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1 + no Win2008 Win7 Win2012 Win2012..r2 Win8 Win8.1 + + type = virtio_fs_nfs_migration + virt_test_type = qemu + required_qemu = [9.0.0,) + kill_vm = yes + start_vm = yes + not_preprocess = yes + + # local nfs config + setup_local_nfs = yes + export_options = 'rw,insecure,no_root_squash,async' + export_dir = /var/tmp/virtio_fs_test_nfs + nfs_mount_options = rw + nfs_mount_dir = /var/mnt/fs + nfs_mount_dir_targetfs = /var/mnt/targetfs + + filesystems = fs + filesystems_migration = targetfs + fs_driver = virtio-fs + fs_source_type = mount + fs_source_dir_fs = ${nfs_mount_dir} + fs_source_dir_targetfs = ${nfs_mount_dir_targetfs} + fs_target = myfs + fs_dest = /mnt/${fs_target} + force_create_fs_source = no + remove_fs_source = no + fs_driver_props = {"queue-size": 1024} + fs_binary_extra_options = "" + fs_binary_extra_options_targetfs = " --modcaps=+dac_read_search" + driver_name = viofs + + test_file = test_file + guest_file = "%s/${test_file}" + pre_command = "mkdir -p ${nfs_mount_dir_targetfs} ${nfs_mount_dir} ${export_dir}" + post_command = "rm -rf ${nfs_mount_dir_targetfs} ${nfs_mount_dir} ${export_dir}" + cmd_md5 = 'md5sum %s/${test_file}' + cmd_dd = 'dd if=/dev/urandom of=%s bs=1M count=200 oflag=direct' + fio_name = fiotest_fs + fio_options = '--name=${fio_name} --filename=%s/${test_file}.fio --ioengine=libaio --rw=rw ' + fio_options += '--bs=4k --size=1G --runtime=1800 --time_based' + cmd_chk_fio = "ps aux | grep fio | grep ${fio_name} | grep -v grep" + + share_mem = yes + vm_mem_share = yes + vm_mem_backend = memory-backend-memfd + Win10.i386: + mem = 4096 + !s390, s390x: + mem_devs = mem1 + backend_mem_mem1 = memory-backend-memfd + size_mem1 = ${mem}M + use_mem_mem1 = no + guest_numa_nodes = shm0 + numa_memdev_shm0 = mem-mem1 + numa_nodeid_shm0 = 0 + Windows: + # install winfsp tool + i386, i686: + install_winfsp_path = 'C:\Program Files' + devcon_dirname = 'x86' + x86_64: + install_winfsp_path = 'C:\Program Files (x86)' + devcon_dirname = 'amd64' + install_winfsp_cmd = 'msiexec /i WIN_UTILS:\winfsp.msi /qn' + check_installed_cmd = 'dir "%s" |findstr /I winfsp' + viofs_log_file = C:\viofs_log.txt + viofs_svc_name = VirtioFsSvc + viofs_exe_path = C:\virtiofs.exe + viofs_exe_copy_cmd = xcopy %s C:\ /Y + viofs_sc_create_cmd = 'sc create ${viofs_svc_name} binpath=${viofs_exe_path} start=auto' + viofs_sc_create_cmd += ' depend="WinFsp.Launcher/VirtioFsDrv" DisplayName="Virtio FS Service"' + viofs_sc_start_cmd = 'sc start ${viofs_svc_name}' + viofs_sc_query_cmd = 'sc query ${viofs_svc_name}' + viofs_sc_delete_cmd = 'sc delete ${viofs_svc_name}' + debug_log_operation = 'enable' + viofs_debug_enable_cmd = 'reg add HKLM\Software\VirtIO-FS /v DebugFlags /d 0xFFFFFFFF /t REG_DWORD' + viofs_log_enable_cmd = 'reg add HKLM\Software\VirtIO-FS /v DebugLogFile /d ${viofs_log_file} /t REG_SZ' + viofs_debug_delete_cmd = 'reg delete HKLM\Software\VirtIO-FS /v DebugFlags /f' + viofs_log_delete_cmd = 'reg delete HKLM\Software\VirtIO-FS /v DebugLogFile /f' + viofs_reg_query_cmd = 'reg query HKLM\Software\VirtIO-FS' + virtio_win_media_type = iso + cdroms += " virtio" + cmd_md5 = "%s && md5sum.exe ${test_file}" + cmd_dd = 'dd if=/dev/random of=%s bs=1M count=200' + guest_file = "%s\${test_file}" + fio_options = '--name=fiotest --filename=%s\${test_file}.fio --ioengine=windowsaio --rw=rw ' + fio_options += '--bs=4k --size=1G --runtime=1800 --time_based --thread' + fio_name = fio.exe + cmd_chk_fio = 'TASKLIST /FI "IMAGENAME eq FIO.EXE' + variants: + - cache_mode_auto: + fs_binary_extra_options += " --cache auto" + fs_binary_extra_options_targetfs += " --cache auto" + - cache_mode_always: + fs_binary_extra_options += " --cache always" + fs_binary_extra_options_targetfs += " --cache always" + - cache_mode_never: + fs_binary_extra_options += " --cache never" + fs_binary_extra_options_targetfs += " --cache never" + - cache_mode_metadata: + fs_binary_extra_options += " --cache metadata" + fs_binary_extra_options_targetfs += " --cache metadata" + variants: + - @default: + - writeback: + only cache_mode_auto cache_mode_always + fs_binary_extra_options += " --writeback" + fs_binary_extra_options_targetfs += " --writeback" + - dio: + only cache_mode_never + fs_binary_extra_options += " --allow-direct-io" + fs_binary_extra_options_targetfs += " --allow-direct-io" + variants: + - file_handles_never: + fs_binary_extra_options += " --inode-file-handles=never --migration-mode=file-handles --modcaps=+dac_read_search" + - file_handles_mandatory: + fs_binary_extra_options += " --inode-file-handles=mandatory --migration-mode=file-handles --modcaps=+dac_read_search" diff --git a/qemu/tests/virtio_fs_nfs_migration.py b/qemu/tests/virtio_fs_nfs_migration.py new file mode 100644 index 0000000000..a70c1595e8 --- /dev/null +++ b/qemu/tests/virtio_fs_nfs_migration.py @@ -0,0 +1,279 @@ +import re + +from virttest import env_process, error_context, nfs, utils_disk, utils_misc, utils_test + +from provider import virtio_fs_utils +from provider.storage_benchmark import generate_instance + + +@error_context.context_aware +def run(test, params, env): + """ + Basic migration test with diffirent cache modes and i/o over nfs + Steps: + 1. Setup a local nfs server and mount it to dir fs and targetfs + 2. Run the virtiofsd daemon to share fs with different cache modes + 3. Boot the source guest with the virtiofs device in step2 + 4. Mount the virtiofs targets inside the guest + 5. Create a file and get its md5, then run fio + 6. Run the virtiofsd daemon to share targetfs + 7. Boot the target guest with the virtiofs device in step6 + 8. Do migration from the source guest to the target guest + 9. No error occurs, the virtiofs is mounted automatically and + the file md5 should keep the same, fio is still running + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def create_service(session): + if os_type == "windows": + error_context.context("Create virtiofs service in guest.", test.log.info) + + driver_name = params["driver_name"] + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) + viofs_svc_name = params["viofs_svc_name"] + virtio_fs_utils.create_viofs_service( + test, params, session, service=viofs_svc_name + ) + return session + + def delete_service(): + if os_type == "windows": + error_context.context("Delete virtiofs service in guest.", test.log.info) + session = vm.wait_for_login() + virtio_fs_utils.delete_viofs_serivce(test, params, session) + session.close() + + def start_service(session): + def start_multifs_instance(fs_tag, fs_target, fs_volume_label): + """ + Only for windows and only for multiple shared directory. + """ + error_context.context( + "MultiFS-%s: Start virtiofs instance with" + " tag %s to %s." % (fs_tag, fs_target, fs_volume_label), + test.log.info, + ) + instance_start_cmd = params["instance_start_cmd"] + output = session.cmd_output( + instance_start_cmd % (fs_target, fs_target, fs_volume_label) + ) + if re.search("KO.*error", output, re.I): + test.fail( + "MultiFS-%s: Start virtiofs instance failed, " + "output is %s." % (fs_tag, output) + ) + + for fs in params.objects("filesystems"): + fs_params = params.object_params(fs) + + fs_target = fs_params["fs_target"] + fs_dest = fs_params["fs_dest"] + + if os_type == "linux": + utils_misc.make_dirs(fs_dest, session) + error_context.context( + "Mount virtiofs target %s to %s inside" + " guest." % (fs_target, fs_dest), + test.log.info, + ) + if not utils_disk.mount( + fs_target, fs_dest, "virtiofs", session=session + ): + utils_misc.safe_rmdir(fs_dest, session=session) + test.fail(f"Failed to mount virtiofs {fs_target}") + else: + if params["viofs_svc_name"] == "VirtioFsSvc": + error_context.context( + "Start virtiofs service in guest.", test.log.info + ) + debug_log_operation = params.get("debug_log_operation") + if debug_log_operation: + session = virtio_fs_utils.operate_debug_log( + test, params, session, vm, debug_log_operation + ) + virtio_fs_utils.start_viofs_service(test, params, session) + else: + error_context.context( + "Start winfsp.launcher instance in guest.", test.log.info + ) + fs_volume_label = fs_params["volume_label"] + start_multifs_instance(fs, fs_target, fs_volume_label) + + fs_dest = "%s:" % virtio_fs_utils.get_virtiofs_driver_letter( + test, fs_target, session + ) + + guest_mnts[fs_target] = fs_dest + return session + + def stop_service(session): + error_context.context("Stop virtiofs service in guest.", test.log.info) + if os_type == "linux": + for fs_target, fs_dest in guest_mnts.items(): + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) + utils_misc.safe_rmdir(fs_dest, session=session) + else: + if params["viofs_svc_name"] == "WinFSP.Launcher": + for fs_target in guest_mnts.keys(): + error_context.context( + "Unmount fs with WinFsp.Launcher.z", test.log.info + ) + instance_stop_cmd = params["instance_stop_cmd"] + session.cmd(instance_stop_cmd % fs_target) + else: + if guest_mnts: + virtio_fs_utils.stop_viofs_service(test, params, session) + session.close() + + def start_io(session): + def do_fio(): + fs_dest = guest_mnts[fs_target] + error_context.context("Start fio on %s." % fs_dest, test.log.info) + + fio = generate_instance(params, vm, "fio") + guest_fio_objects[fs_target] = fio + tmo = params.get_numeric("fio_runtime", 1800) + bg_test = utils_test.BackgroundTest( + fio.run, (params["fio_options"] % fs_dest, tmo) + ) + bg_test.start() + + def dd_file(): + error_context.context("Create a file and get its md5", test.log.info) + fs_dest = guest_mnts[fs_target] + guest_file = fs_params["guest_file"] % fs_dest + io_timeout = params.get_numeric("io_timeout", 300) + session.cmd(params["cmd_dd"] % guest_file, io_timeout) + + cmd_md5 = params["cmd_md5"] % fs_dest + md5 = session.cmd_output(cmd_md5, io_timeout).strip().split()[0] + guest_files_md5[fs_target] = md5 + test.log.debug("The guest file md5: %s", md5) + + for fs in params.objects("filesystems"): + fs_params = params.object_params(fs) + fs_target = fs_params["fs_target"] + dd_file() + do_fio() + + def stop_io(): + error_context.context("Stop fio", test.log.info) + for fio in guest_fio_objects.values(): + fio.clean(force=True) + + def test_migration(): + def check_fio_running(): + error_context.context("Check fio is running after migration", test.log.info) + for fs in params.objects("filesystems"): + fs_params = params.object_params(fs) + fio_name = fs_params["fio_name"] + out = session.cmd_output(params["cmd_chk_fio"]) + test.log.debug("Status of fio process: %s", out) + + if fio_name not in out: + test.fail(f"Failed to get a running fio process for {fs}") + + def check_service_activated(): + error_context.context( + "Check virtiofs service activated after migration", + test.log.info, + ) + tmo = params.get_numeric("active_timeout", 10) + if os_type == "linux": + for fs_target, fs_dest in guest_mnts.items(): + if not utils_misc.wait_for( + lambda: utils_disk.is_mount( + fs_target, fs_dest, "virtiofs", None, True, session + ), + tmo, + ): + test.fail(f"Failed to mount {fs_target}") + else: + for fs_target in guest_mnts.keys(): + vol_lable = virtio_fs_utils.get_virtiofs_driver_letter( + test, fs_target, session + ) + test.log.debug( + "Fs target %s mounted on volume %s", fs_target, vol_lable + ) + + def check_file_md5(): + error_context.context("Check file md5 after migration", test.log.info) + for fs_target, original_md5 in guest_files_md5.items(): + fs_dest = guest_mnts[fs_target] + cmd_md5 = params["cmd_md5"] % fs_dest + md5 = session.cmd_output(cmd_md5).strip().split()[0] + test.log.debug("File md5: %s", md5) + + if md5 != original_md5: + test.fail(f"Wrong file md5 found: {md5}") + + # FIXME: Replace the vm's params to use a different shared virtio fs + vm.params["filesystems"] = vm.params["filesystems_migration"] + vm.migrate() + session = vm.wait_for_login() + + check_service_activated() + check_fio_running() + check_file_md5() + + return session + + def setup_local_nfs(): + error_context.context("Setup nfs server, mount it to two dirs", test.log.info) + + # Setup the local nfs server and mount it to nfs_mount_dir + nfs_obj = nfs.Nfs(params) + nfs_obj.setup() + + # Mount the local nfs server to nfs_mount_dir_targetfs + target_params = params.copy() + target_params["nfs_mount_dir"] = params["nfs_mount_dir_targetfs"] + target_params["setup_local_nfs"] = "no" + nfs_target = nfs.Nfs(target_params) + nfs_target.mount() + + return nfs_obj, nfs_target + + def cleanup_local_nfs(): + error_context.context("Umount all and stop nfs server", test.log.info) + if target_nfs: + target_nfs.umount() + if local_nfs: + local_nfs.cleanup() + + guest_mnts = dict() + guest_files_md5 = dict() + guest_fio_objects = dict() + os_type = params["os_type"] + local_nfs = None + target_nfs = None + vm = None + session = None + + try: + local_nfs, target_nfs = setup_local_nfs() + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) + vm = env.get_vm(params.get("main_vm")) + vm.verify_alive() + session = vm.wait_for_login() + session = create_service(session) + session = start_service(session) + start_io(session) + session = test_migration() + finally: + try: + stop_io() + stop_service(session) + delete_service() + finally: + if vm: + vm.destroy() + cleanup_local_nfs()