Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

virtio_fs_migration_on_error: Migration test with migration-on-error #4196

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 88 additions & 0 deletions qemu/tests/cfg/virtio_fs_migration_on_error.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
- virtio_fs_migration_on_error:
no RHEL.6 RHEL.7 RHEL.8.0 RHEL.8.1
no Win2008 Win7 Win2012 Win2012..r2 Win8 Win8.1

type = virtio_fs_migration_on_error
virt_test_type = qemu
required_qemu = [9.0.0,)
kill_vm = yes
start_vm = yes

filesystems = fs
filesystems_migration = targetfs
fs_driver = virtio-fs
fs_source_type = mount
fs_source_dir_fs = /var/tmp/virtio_fs_source
fs_source_dir_targetfs = /var/tmp/virtio_fs_target
fs_target = myfs
fs_dest = /mnt/${fs_target}
force_create_fs_source = no
zhencliu marked this conversation as resolved.
Show resolved Hide resolved
remove_fs_source = no
fs_driver_props = {"queue-size": 1024}
driver_name = viofs

test_file = test_file
test_data = hello_virtiofs
pre_command = mkdir -p ${fs_source_dir_fs} ${fs_source_dir_targetfs}
post_command = rm -rf ${fs_source_dir_fs} ${fs_source_dir_targetfs}
read_file_cmd = "cat %s/${test_file}"
fs_binary_extra_options = " --cache auto"
on_error = abort

share_mem = yes
vm_mem_share = yes
vm_mem_backend = memory-backend-memfd
Win10.i386:
mem = 4096
!s390, s390x:
mem_devs = mem1
backend_mem_mem1 = memory-backend-memfd
size_mem1 = ${mem}M
use_mem_mem1 = no
guest_numa_nodes = shm0
numa_memdev_shm0 = mem-mem1
numa_nodeid_shm0 = 0
Windows:
# install winfsp tool
i386, i686:
install_winfsp_path = 'C:\Program Files'
devcon_dirname = 'x86'
x86_64:
install_winfsp_path = 'C:\Program Files (x86)'
devcon_dirname = 'amd64'
install_winfsp_cmd = 'msiexec /i WIN_UTILS:\winfsp.msi /qn'
check_installed_cmd = 'dir "%s" |findstr /I winfsp'
viofs_log_file = C:\viofs_log.txt
viofs_svc_name = VirtioFsSvc
viofs_exe_path = C:\virtiofs.exe
viofs_exe_copy_cmd = xcopy %s C:\ /Y
viofs_sc_create_cmd = 'sc create ${viofs_svc_name} binpath=${viofs_exe_path} start=auto'
viofs_sc_create_cmd += ' depend="WinFsp.Launcher/VirtioFsDrv" DisplayName="Virtio FS Service"'
viofs_sc_start_cmd = 'sc start ${viofs_svc_name}'
viofs_sc_query_cmd = 'sc query ${viofs_svc_name}'
viofs_sc_delete_cmd = 'sc delete ${viofs_svc_name}'
debug_log_operation = 'enable'
viofs_debug_enable_cmd = 'reg add HKLM\Software\VirtIO-FS /v DebugFlags /d 0xFFFFFFFF /t REG_DWORD'
viofs_log_enable_cmd = 'reg add HKLM\Software\VirtIO-FS /v DebugLogFile /d ${viofs_log_file} /t REG_SZ'
viofs_debug_delete_cmd = 'reg delete HKLM\Software\VirtIO-FS /v DebugFlags /f'
viofs_log_delete_cmd = 'reg delete HKLM\Software\VirtIO-FS /v DebugLogFile /f'
viofs_reg_query_cmd = 'reg query HKLM\Software\VirtIO-FS'
virtio_win_media_type = iso
cdroms += " virtio"
read_file_cmd = "type %s\${test_file}"
variants:
- @default:
chk_msg = Error loading back-end state of virtio-user-fs device.+\(tag: "${fs_target}"\): Back-end failed to process its internal state
pre_command += " && echo -e ${test_data} > ${fs_source_dir_fs}/${test_file}"
- abort:
on_error = abort
chk_msg = Error loading back-end state of virtio-user-fs device.+\(tag: "${fs_target}"\): Back-end failed to process its internal state
fs_binary_extra_options_targetfs = "${fs_binary_extra_options} --migration-on-error abort"
pre_command += " && echo -e ${test_data} > ${fs_source_dir_fs}/${test_file}"
- guest_error:
on_error = guest_error
fs_binary_extra_options_targetfs = "${fs_binary_extra_options} --migration-on-error guest-error"
variants:
- diff_dir:
pre_command += " && touch ${fs_source_dir_fs}/${test_file}.src1 ${fs_source_dir_fs}/${test_file}.src2"
pre_command += " && touch ${fs_source_dir_targetfs}/${test_file}.tgt1 ${fs_source_dir_targetfs}/${test_file}.tgt2"
220 changes: 220 additions & 0 deletions qemu/tests/virtio_fs_migration_on_error.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
import os
import re

from virttest import error_context, utils_disk, utils_misc, utils_test, virt_vm

from provider import virtio_fs_utils


@error_context.context_aware
def run(test, params, env):
"""
Basic migration test with diffirent cache modes over localfs
Steps:
1. Create a shared directory on the host and write a file
2. Run the virtiofsd daemon on the host with different cache modes
3. Boot the source guest with the virtiofs device in step1
4. Mount the virtiofs targets inside the guest
5. Create a different directory on the host and write a file
6. Run the virtiofsd daemon to share the directory in step5
7. Boot the target guest with the virtiofs device in step5
8. Do migration from the source guest to the target guest
9. No error occurs, the virtiofs is mounted automatically and
the file content keeps the same on the target guest

:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""

def create_service(session):
if os_type == "windows":
error_context.context("Create virtiofs service in guest.", test.log.info)

driver_name = params["driver_name"]

session = utils_test.qemu.windrv_check_running_verifier(
session, vm, test, driver_name
)
viofs_svc_name = params["viofs_svc_name"]
virtio_fs_utils.create_viofs_service(
test, params, session, service=viofs_svc_name
)
return session

def delete_service():
if os_type == "windows":
error_context.context("Delete virtiofs service in guest.", test.log.info)
session = vm.wait_for_login()
virtio_fs_utils.delete_viofs_serivce(test, params, session)
session.close()

def start_service(session):
def start_multifs_instance(fs_tag, fs_target, fs_volume_label):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No multifs in this case.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch! The reason why I handle all of these in the 'multifs' way is that multifs is a test matrix and currently talked with Tingting, we can just cover this scenario in one test case, maybe in future, we will cover it from more test cases, so in the automation code, I considered this situation that it should be easy to extend to multifs later

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zhencliu Thanks for your explanation, I prefer to remove them at the moment. 1) Not sure when will enable multifs. 2) If there are something update or bug fix for this part, it would take resources to maintain them in more than one py files.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am OK with that, but maybe it might be better to confirm this with Tingting, hi @hellohellenmao , what's your opinion please?

"""
Only for windows and only for multiple shared directory.
"""
error_context.context(
"MultiFS-%s: Start virtiofs instance with"
" tag %s to %s." % (fs_tag, fs_target, fs_volume_label),
test.log.info,
)
instance_start_cmd = params["instance_start_cmd"]
output = session.cmd_output(
instance_start_cmd % (fs_target, fs_target, fs_volume_label)
)
if re.search("KO.*error", output, re.I):
test.fail(
"MultiFS-%s: Start virtiofs instance failed, "
"output is %s." % (fs_tag, output)
)

for fs in params.objects("filesystems"):
fs_params = params.object_params(fs)

fs_target = fs_params["fs_target"]
fs_dest = fs_params["fs_dest"]

if os_type == "linux":
utils_misc.make_dirs(fs_dest, session)
error_context.context(
"Mount virtiofs target %s to %s inside"
" guest." % (fs_target, fs_dest),
test.log.info,
)
if not utils_disk.mount(
fs_target, fs_dest, "virtiofs", session=session
):
utils_misc.safe_rmdir(fs_dest, session=session)
test.fail("Failed to mount virtiofs {fs_target}.")
else:
if params["viofs_svc_name"] == "VirtioFsSvc":
error_context.context(
"Start virtiofs service in guest.", test.log.info
)
debug_log_operation = params.get("debug_log_operation")
if debug_log_operation:
session = virtio_fs_utils.operate_debug_log(
test, params, session, vm, debug_log_operation
)
virtio_fs_utils.start_viofs_service(test, params, session)
else:
error_context.context(
"Start winfsp.launcher instance in guest.", test.log.info
)
fs_volume_label = fs_params["volume_label"]
start_multifs_instance(fs, fs_target, fs_volume_label)

fs_dest = "%s:" % virtio_fs_utils.get_virtiofs_driver_letter(
test, fs_target, session
)

guest_mnts[fs_target] = fs_dest
return session

def stop_service():
error_context.context("Stop virtiofs service in guest.", test.log.info)

session = vm.wait_for_login()
if os_type == "linux":
for fs_target, fs_dest in guest_mnts.items():
utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session)
utils_misc.safe_rmdir(fs_dest, session=session)
else:
if params["viofs_svc_name"] == "WinFSP.Launcher":
for fs_target in guest_mnts.keys():
error_context.context(
"Unmount fs with WinFsp.Launcher.z", test.log.info
)
instance_stop_cmd = params["instance_stop_cmd"]
session.cmd(instance_stop_cmd % fs_target)
else:
if guest_mnts:
virtio_fs_utils.stop_viofs_service(test, params, session)
session.close()

def check_message():
log_file = os.path.join(
test.resultsdir, params.get("debug_log_file", "debug.log")
)
with open(log_file, "r") as f:
out = f.read().strip()
m = re.search(params["chk_msg"], out, re.M)
if m is not None:
test.log.debug("Expected message: %s", m.group())
return True
return False

def check_service_activated():
error_context.context(
"Check virtiofs service activated after migration.",
test.log.info,
)
session = vm.wait_for_login()
tmo = params.get_numeric("active_timeout", 10)
if os_type == "linux":
for fs_target, fs_dest in guest_mnts.items():
if not utils_misc.wait_for(
lambda: utils_disk.is_mount(
fs_target, fs_dest, "virtiofs", None, True, session
),
tmo,
):
test.log.fail(f"Failed to mount {fs_target}")
else:
for fs_target in guest_mnts.keys():
vol_lable = virtio_fs_utils.get_virtiofs_driver_letter(
test, fs_target, session
)
test.log.debug(
"Fs target %s mounted on volume %s", fs_target, vol_lable
)
session.close()

def check_file_content():
error_context.context("Check file content", test.log.info)
for fs_dest in guest_mnts.values():
out = session.cmd_output(params["read_file_cmd"] % fs_dest).strip()
test.log.debug("File content: %s", out)
if out != params["test_data"]:
test.fail(f"Wrong file content found: {out}")

def test_migration_abort():
check_file_content()
try:
vm.migrate()
except virt_vm.VMMigrateFailedError:
# Sometimes we got status: failed, mostly we got status: completed
test.log.debug("Expected migration failure")

error_context.context("Check error message after migration", test.log.info)
tmo = params.get_numeric("chk_msg_timeout", 600)
if not utils_misc.wait_for(check_message, tmo, step=30):
test.fail("Failed to get the expected message")

def test_migration_guest_error():
vm.migrate()
check_service_activated()

guest_mnts = dict()
os_type = params["os_type"]
on_error = params["on_error"]
test_funcs = {
"abort": test_migration_abort,
"guest_error": test_migration_guest_error,
}

vm = env.get_vm(params.get("main_vm"))
vm.verify_alive()
session = vm.wait_for_login()

try:
session = create_service(session)
session = start_service(session)
# FIXME: Replace the vm's params to use a different shared virtio fs
vm.params["filesystems"] = vm.params["filesystems_migration"]
test_funcs[on_error]()
finally:
if not vm.is_dead():
stop_service()
delete_service()
Loading