diff --git a/libvirt/tests/cfg/migration/migration_uri/migration_network_data_transport_tcp_migration_address.cfg b/libvirt/tests/cfg/migration/migration_uri/migration_network_data_transport_tcp_migration_address.cfg new file mode 100644 index 0000000000..6842595b84 --- /dev/null +++ b/libvirt/tests/cfg/migration/migration_uri/migration_network_data_transport_tcp_migration_address.cfg @@ -0,0 +1,92 @@ +- migration.migration_uri.network_data_transport.tcp.migration_address: + type = migration_network_data_transport_tcp + migration_setup = 'yes' + storage_type = 'nfs' + setup_local_nfs = 'yes' + disk_type = "file" + disk_source_protocol = "netfs" + mnt_path_name = ${nfs_mount_dir} + # Console output can only be monitored via virsh console output + only_pty = True + take_regular_screendumps = no + # Extra options to pass after + virsh_migrate_extra = '' + # SSH connection time out + ssh_timeout = 60 + # Local URI + virsh_migrate_connect_uri = 'qemu:///system' + virsh_migrate_dest_state = "running" + virsh_migrate_src_state = "shut off" + image_convert = 'no' + server_ip = "${migrate_dest_host}" + server_user = "root" + server_pwd = "${migrate_dest_pwd}" + status_error = "no" + check_network_accessibility_after_mig = "yes" + migrate_desturi_port = "16509" + migrate_desturi_type = "tcp" + virsh_migrate_desturi = "qemu+tcp://${migrate_dest_host}/system" + test_case = "migration_address" + variants: + - p2p: + virsh_migrate_options = '--live --p2p --verbose' + - non_p2p: + virsh_migrate_options = '--live --verbose' + variants: + - all_ipv6_dest_and_ipv4_src: + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='::'"}' + virsh_migrate_extra = "--migrateuri tcp://${migrate_dest_host}" + - all_ipv6_dest_and_ipv6_src: + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='::'"}' + ipv6_config = "yes" + virsh_migrate_extra = "--migrateuri tcp://[${ipv6_addr_des}]" + - all_ipv4_dest_and_ipv6_src: + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='0.0.0.0'"}' + ipv6_config = "yes" + status_error = "yes" + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + virsh_migrate_extra = "--migrateuri tcp://[${ipv6_addr_des}]" + - all_ipv4_dest_and_ipv4_src: + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='0.0.0.0'"}' + virsh_migrate_extra = "--migrateuri tcp://${migrate_dest_host}" + - ipv4_dest_and_ipv6_src: + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='${migrate_dest_host}'"}' + ipv6_config = "yes" + status_error = "yes" + virsh_migrate_extra = "--migrateuri tcp://[${ipv6_addr_des}]" + - ipv4_dest_and_ipv4_src: + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='${migrate_dest_host}'"}' + virsh_migrate_extra = "--migrateuri tcp://${migrate_dest_host}" + - ipv6_dest_and_ipv6_src: + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + ipv6_config = "yes" + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='[${ipv6_addr_des}]'"}' + virsh_migrate_extra = "--migrateuri tcp://[${ipv6_addr_des}]" + - ipv6_dest_and_ipv4_src: + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + ipv6_config = "yes" + status_error = "yes" + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='[${ipv6_addr_des}]'"}' + virsh_migrate_extra = "--migrateuri tcp://${migrate_dest_host}" + - default_dest_in_ipv4_env_and_ipv4_src: + default_qemu_conf = "yes" + ipv4_env_on_target = "yes" + server_ifname = "eno1" + ip_addr_suffix = "64" + virsh_migrate_extra = "--migrateuri tcp://${migrate_dest_host}" + - default_dest_in_ipv6_env_and_ipv4_src: + default_qemu_conf = "yes" + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + ipv6_config = "yes" + virsh_migrate_extra = "--migrateuri tcp://${migrate_dest_host}" + - default_dest_in_ipv6_env_and_ipv6_src: + default_qemu_conf = "yes" + ipv6_addr_des = "ENTER.YOUR.IPv6.TRAGET" + ipv6_config = "yes" + virsh_migrate_extra = "--migrateuri tcp://[${ipv6_addr_des}]" + - invalid_listen_dest: + qemu_conf_dest = '{r".*migration_address\s*=.*": "migration_address='01.3.4.4'"}' + status_error = "yes" + err_msg = "Failed to bind socket: Cannot assign requested address" diff --git a/libvirt/tests/src/migration/migration_uri/migration_network_data_transport_tcp.py b/libvirt/tests/src/migration/migration_uri/migration_network_data_transport_tcp.py index dc95ad65a6..c0a28b85e3 100644 --- a/libvirt/tests/src/migration/migration_uri/migration_network_data_transport_tcp.py +++ b/libvirt/tests/src/migration/migration_uri/migration_network_data_transport_tcp.py @@ -3,14 +3,65 @@ from virttest import libvirt_remote from virttest import libvirt_version from virttest import remote +from virttest import utils_config from virttest import utils_net from virttest.utils_libvirt import libvirt_config from provider.migration import base_steps +from provider.migration import migration_base qemu_conf_remote = None remove_key_remote = None +ipv6_obj = None +NM_service = None + + +def setup_ipv4_env(params): + """ + Setup ipv4 env on target host + + :param params: dictionary with the test parameter + """ + ip_addr_suffix = params.get("ip_addr_suffix", "64") + + global NM_service + # Check NetworkManager service, if running, will stop this service. + if migration_base.check_NM(params, remote_host=True): + NM_service = migration_base.get_NM_service(params, remote_host=True) + if NM_service and NM_service.status(): + NM_service.stop() + + global ipv6_obj, ipv6_list + # Delete all ipv6 addr on target host + ipv6_obj = utils_net.IPv6Manager(params) + ipv6_obj.session = ipv6_obj.get_session() + runner = ipv6_obj.session.cmd_output + ipv6_list = ipv6_obj.get_addr_list(runner=runner) + for ipv6_addr in ipv6_list: + utils_net.del_net_if_ip(ipv6_obj.server_ifname, (ipv6_addr + '/' + ip_addr_suffix), runner) + ipv6_obj.close_session() + + +def cleanup_ipv4_env(params): + """ + Cleanup ipv4 env on target host + + :param params: dictionary with the test parameter + """ + ip_addr_suffix = params.get("ip_addr_suffix", "64") + + global NM_service + # If NM_sercice exists, start NetworkManager service + if NM_service and not NM_service.status(): + NM_service.start() + global ipv6_obj, ipv6_list + # Recover all ipv6 addr on target host + ipv6_obj.session = ipv6_obj.get_session() + runner = ipv6_obj.session.cmd_output + for ipv6_addr in ipv6_list: + utils_net.set_net_if_ip(ipv6_obj.server_ifname, (ipv6_addr + '/' + ip_addr_suffix), runner) + ipv6_obj.close_session() def run(test, params, env): @@ -29,11 +80,11 @@ def setup_migration_host(): """ set_migration_host = "yes" == params.get("set_migration_host", "no") src_hosts_dict = eval(params.get("src_hosts_conf", "{}")) - qemu_conf_path = params.get("qemu_conf_path") qemu_conf_dest = params.get("qemu_conf_dest", "{}") server_params = {'server_ip': params.get("migrate_dest_host"), 'server_user': params.get("remote_user", "root"), 'server_pwd': params.get("migrate_dest_pwd")} + qemu_conf_path = utils_config.LibvirtQemuConfig().conf_path test.log.info("Setup for migration_host.") if set_migration_host: @@ -55,6 +106,35 @@ def setup_migration_host(): server_params, qemu_conf_dest, qemu_conf_path) migration_obj.setup_connection() + def setup_migration_address(): + """ + Setup for migration_address case + """ + qemu_conf_path = utils_config.LibvirtQemuConfig().conf_path + qemu_conf_dest = params.get("qemu_conf_dest", "{}") + default_qemu_conf = "yes" == params.get("default_qemu_conf", "no") + ipv4_env_on_target = "yes" == params.get("ipv4_env_on_target", "no") + + test.log.info("Setup for migration_address case.") + if default_qemu_conf: + server_params = {'server_ip': params.get("migrate_dest_host"), + 'server_user': params.get("remote_user", "root"), + 'server_pwd': params.get("migrate_dest_pwd"), + 'file_path': qemu_conf_path} + global remove_key_remote + remove_key_remote = libvirt_config.remove_key_in_conf(["migration_address"], + "qemu", + remote_params=server_params) + else: + global qemu_conf_remote + qemu_conf_remote = libvirt_remote.update_remote_file(params, + qemu_conf_dest, + qemu_conf_path) + if ipv4_env_on_target: + test.log.info("Set ipv4 env on target host.") + setup_ipv4_env(params) + migration_obj.setup_connection() + def run_again_for_migration_completion(): """ Run migration again for migration_completion case @@ -93,6 +173,24 @@ def cleanup_migration_host(): if remove_key_remote: del remove_key_remote + def cleanup_migration_address(): + """ + Cleanup for migration_address case + """ + ipv4_env_on_target = "yes" == params.get("ipv4_env_on_target", "no") + + test.log.info("Cleanup for migration_address case.") + migration_obj.cleanup_connection() + global qemu_conf_remote + if qemu_conf_remote: + test.log.info("Recover remote qemu configurations") + del qemu_conf_remote + global remove_key_remote + if remove_key_remote: + del remove_key_remote + if ipv4_env_on_target: + cleanup_ipv4_env(params) + libvirt_version.is_libvirt_feature_supported(params) test_case = params.get('test_case', '') diff --git a/provider/migration/migration_base.py b/provider/migration/migration_base.py index c92c3e57a3..16b93921b3 100644 --- a/provider/migration/migration_base.py +++ b/provider/migration/migration_base.py @@ -5,7 +5,9 @@ import time from avocado.core import exceptions +from avocado.utils import process +from virttest import remote from virttest import virsh # pylint: disable=W0611 from virttest import utils_misc # pylint: disable=W0611 from virttest import utils_libvirtd # pylint: disable=W0611 @@ -19,6 +21,7 @@ from virttest.utils_libvirt import libvirt_service # pylint: disable=W0611 from virttest.utils_test import libvirt_domjobinfo # pylint: disable=W0611 from virttest.utils_test import libvirt +from virttest.staging import service from provider.migration import base_steps # pylint: disable=W0611 @@ -630,3 +633,42 @@ def destroy_dest_vm(params): dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("main_vm") virsh.destroy(vm_name, ignore_status=False, debug=True, uri=dest_uri) + + +def check_NM(params, remote_host=False): + """ + Check NetworkManager service + + :param params: dictionary with the test parameter + :param remote_host: if True, will check the NetworkManager service of target host + :return: if True, NetworkManager service exists already + """ + cmd = "rpm -q NetworkManager" + if remote_host: + ret = remote.run_remote_cmd(cmd, params, ignore_status=False) + else: + ret = process.run(cmd, ignore_status=False, shell=True) + if ret.exit_status: + return False + return True + + +def get_NM_service(params=None, remote_host=False): + """ + Get NetworkManager service object + + :param params: dictionary with the test parameter + :param remote_host: if True, will get the NetworkManager service of target host + :return: NetworkManager service object + """ + if remote_host: + server_ip = params.get("server_ip") + server_user = params.get("server_user", "root") + server_pwd = params.get("server_pwd") + remote_runner = remote.RemoteRunner(host=server_ip, + username=server_user, + password=server_pwd) + runner = remote_runner.run + else: + runner = process.run + return service.Factory.create_service("NetworkManager", run=runner)