Skip to content

Commit

Permalink
Add storage and image management support
Browse files Browse the repository at this point in the history
Added pool_selectors and image_pool_name

Signed-off-by: Zhenchao Liu <[email protected]>
  • Loading branch information
zhencliu committed Aug 1, 2024
1 parent 8df2176 commit d0a1ebe
Show file tree
Hide file tree
Showing 60 changed files with 3,823 additions and 24 deletions.
15 changes: 7 additions & 8 deletions avocado_vt/plugins/vt_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from avocado.core.plugin_interfaces import JobPreTests as Pre
from avocado.utils.stacktrace import log_exc_info
from virttest.vt_cluster import cluster, node_metadata
from virttest.vt_imgr import vt_imgr
from virttest.vt_resmgr import startup_resmgr, teardown_resmgr, cleanup_resmgr


class ClusterSetupError(Exception):
Expand Down Expand Up @@ -50,21 +52,18 @@ def _pre_node_setup():
def _pre_mgr_setup():
try:
# Pre-setup the cluster manager
# e.g:
# startup_resmgr()
# vt_imgr.startup()
pass
startup_resmgr()
vt_imgr.startup()
except Exception as err:
raise ClusterManagerSetupError(err)

@staticmethod
def _post_mgr_cleanup():
try:
# Post-cleanup the cluster manager
# e.g:
# teardown_resmgr()
# vt_imgr.teardown()
pass
vt_imgr.teardown()
teardown_resmgr()
cleanup_resmgr()
except Exception as err:
raise ClusterManagerCleanupError(err)

Expand Down
18 changes: 18 additions & 0 deletions avocado_vt/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
)
from virttest._wrappers import load_source
from virttest.vt_cluster import cluster, logger, selector
from virttest.vt_resmgr import get_all_resource_pools

# avocado-vt no longer needs autotest for the majority of its functionality,
# except by:
Expand Down Expand Up @@ -365,6 +366,9 @@ def _runTest(self):

def _init_partition(self):
self._cluster_partition = cluster.create_partition()
# Add partition uuid as a case-specific global param, which is the
# only way to get the case's running env easily
self.params["cluster_partition_uuid"] = self._cluster_partition.uuid

def _setup_partition(self):
for node in self.params.objects("nodes"):
Expand All @@ -378,6 +382,19 @@ def _setup_partition(self):
_node.tag = node
self._cluster_partition.add_node(_node)

for pool_tag in self.params.objects("pools"):
pool_params = self.params.object_params(pool_tag)
pool_selectors = pool_params.get("pool_selectors")

pools = set(get_all_resource_pools()) - set(self._cluster_partition.pools.values())
pool_id = selector.select_resource_pool(list(pools), pool_selectors)
if not pool_id:
raise selector.SelectorError(
f'No available pool for "{pool_tag}" with "{pool_selectors}"'
)
self._cluster_partition.pools[pool_tag] = pool_id


def _clear_partition(self):
cluster_dir = os.path.join(self.resultsdir, "cluster")
if self._cluster_partition.nodes:
Expand All @@ -388,6 +405,7 @@ def _clear_partition(self):
node.upload_logs(node_dir)
cluster.clear_partition(self._cluster_partition)
self._cluster_partition = None
self.params["cluster_partition_uuid"] = None

def _start_logger_client(self):
if self._cluster_partition.nodes:
Expand Down
23 changes: 23 additions & 0 deletions virttest/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from avocado.utils import process

from virttest.vt_cluster import cluster, node
from virttest.vt_resmgr import setup_resmgr

from . import arch, asset, cartesian_config, data_dir, defaults, utils_selinux
from .compat import get_opt
Expand Down Expand Up @@ -895,6 +896,27 @@ def _register_hosts(hosts_configs):
LOG.debug("Host %s registered", host)


def _setup_managers(pools_params):
def _verify_pools_params():
# Check if the pools' params are set correctly
required_options = ["type", "access.nodes"]
for params in pools_params.values():
for pool_name, pool_params in params.items():
for option in required_options:
opts = option.split('.')
i, sub_params = 0, pool_params
while i < len(opts):
if opts[i] not in sub_params:
raise ValueError(f"Missed '{opts[i]}' for '{pool_name}'")
elif not sub_params[opts[i]]:
raise ValueError(f"Missed a value for '{opts[i]}' for '{pool_name}'")
sub_params = sub_params.get(opts[i])
i += 1

_verify_pools_params()
setup_resmgr(pools_params)


def _config_master_server(master_config):
"""Configure the master server."""
if master_config:
Expand Down Expand Up @@ -1084,6 +1106,7 @@ def bootstrap(options, interactive=False):
cluster_config = _load_cluster_config(vt_cluster_config)
_register_hosts(cluster_config.get("hosts"))
_config_master_server(cluster_config.get("master"))
_setup_managers(cluster_config.get("pools"))

LOG.info("")
LOG.info("VT-BOOTSTRAP FINISHED")
Expand Down
76 changes: 60 additions & 16 deletions virttest/env_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
)
from virttest.test_setup.storage import StorageConfig
from virttest.utils_version import VersionInterval
from virttest.vt_imgr import vt_imgr


utils_libvirtd = lazy_import("virttest.utils_libvirtd")
virsh = lazy_import("virttest.virsh")
Expand Down Expand Up @@ -132,32 +134,55 @@ def preprocess_image(test, params, image_name, vm_process_status=None):
only for keep it work with process_images()
:note: Currently this function just creates an image if requested.
"""
# FIXME:
image_id = None
if params.get_boolean("multihost"):
image_config = vt_imgr.define_image_config(image_name, params)
image_id = vt_imgr.create_image_object(image_config)

params = params.object_params(image_name)
base_dir = params.get("images_base_dir", data_dir.get_data_dir())

if not storage.preprocess_image_backend(base_dir, params, image_name):
LOG.error("Backend can't be prepared correctly.")

image_filename = storage.get_image_filename(params, base_dir)
image_filename = None
if not params.get_boolean("multihost"):
image_filename = storage.get_image_filename(params, base_dir)

create_image = False
if params.get("force_create_image") == "yes":
create_image = True
elif params.get("create_image") == "yes" and not storage.file_exists(
params, image_filename
):
create_image = True
elif params.get("create_image") == "yes":
# FIXME: check all volumes allocated
if params.get_boolean("multihost"):
volume = vt_imgr.query_image(
image_id,
request=f"spec.virt-images.{image_name}.spec.volume.meta"
)
create_image = True if not volume["meta"]["allocated"] else False
else:
create_image = True if not storage.file_exists(params, image_filename) else False
else:
# FIXME: sync all volumes configurations
if params.get_boolean("multihost"):
vt_imgr.query_image(image_id)

if params.get("backup_image_before_testing", "no") == "yes":
# FIXME: add backup_image
image = qemu_storage.QemuImg(params, base_dir, image_name)
image.backup_image(params, base_dir, "backup", True, True)
if create_image:
if storage.file_exists(params, image_filename):
# As rbd image can not be covered, so need remove it if we need
# force create a new image.
storage.file_remove(params, image_filename)
image = qemu_storage.QemuImg(params, base_dir, image_name)
LOG.info("Create image on %s." % image.storage_type)
image.create(params)
if params.get_boolean("multihost"):
vt_imgr.handle_image(image_id, {"create": {}})
else:
if storage.file_exists(params, image_filename):
# As rbd image can not be covered, so need remove it if we need
# force create a new image.
storage.file_remove(params, image_filename)
image = qemu_storage.QemuImg(params, base_dir, image_name)
LOG.info("Create image on %s." % image.storage_type)
image.create(params)


def preprocess_fs_source(test, params, fs_name, vm_process_status=None):
Expand Down Expand Up @@ -537,6 +562,16 @@ def postprocess_image(test, params, image_name, vm_process_status=None):
)
return

# FIXME: multihost
image_id = None
if params.get_boolean("multihost"):
image_id = vt_imgr.get_image_by_tag(image_name)
if image_id is None:
LOG.warning(f"Cannot find image {image_name}")
image_config = vt_imgr.define_image_config(image_name, params)
image_id = vt_imgr.create_image_object(image_config)
params = params.object_params(image_name)

restored, removed = (False, False)
clone_master = params.get("clone_master", None)
base_dir = params.get("images_base_dir", data_dir.get_data_dir())
Expand Down Expand Up @@ -594,10 +629,18 @@ def postprocess_image(test, params, image_name, vm_process_status=None):
)
LOG.info("Remove image on %s." % image.storage_type)
if clone_master is None:
image.remove()
if params.get_boolean("multihost"):
vt_imgr.handle_image(image_id, {"destroy": {}})
vt_imgr.destroy_image_object(image_id)
else:
image.remove()
elif clone_master == "yes":
if image_name in params.get("master_images_clone").split():
image.remove()
if params.get_boolean("multihost"):
vt_imgr.handle_image(image_id, {"destroy": {}})
vt_imgr.destroy_image_object(image_id)
else:
image.remove()


def postprocess_fs_source(test, params, fs_name, vm_process_status=None):
Expand Down Expand Up @@ -827,8 +870,9 @@ def _process_images_serial(
or None for no vm exist.
"""
for image_name in images:
image_params = params.object_params(image_name)
image_func(test, image_params, image_name, vm_process_status)
#image_params = params.object_params(image_name)
#image_func(test, image_params, image_name, vm_process_status)
image_func(test, params, image_name, vm_process_status)
if exit_event and exit_event.is_set():
LOG.error("Received exit_event, stop processing of images.")
break
Expand Down
3 changes: 3 additions & 0 deletions virttest/vt_agent/core/data_dir.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,6 @@
LOG_DIR = os.path.join(BASE_DIR, "log")
AGENT_LOG_FILENAME = os.path.join(LOG_DIR, "agent.log")
SERVICE_LOG_FILENAME = os.path.join(LOG_DIR, "service.log")

#ENV_DIR = os.path.join(BASE_DIR, "env")
BACKING_MGR_ENV_FILENAME = os.path.join(BASE_DIR, "backing_mgr.env")
4 changes: 4 additions & 0 deletions virttest/vt_agent/managers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
from .connect import ConnectManager
from .console import ConsoleManager
from .image import ImageHandlerManager
from .resource_backing import ResourceBackingManager

connect_mgr = ConnectManager()
console_mgr = ConsoleManager()
resbacking_mgr = ResourceBackingManager()
image_handler_mgr = ImageHandlerManager()
26 changes: 26 additions & 0 deletions virttest/vt_agent/managers/image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import logging

from .images import get_image_handler


LOG = logging.getLogger("avocado.service." + __name__)


class ImageHandlerManager(object):

def __init__(self):
pass

def handle_image(self, image_config, config):
r, o = 0, dict()
try:
cmd, arguments = config.popitem()
image_type = image_config["meta"]["type"]
handler = get_image_handler(image_type, cmd)
ret = handler(image_config, arguments)
if ret:
o["out"] = ret
except Exception as e:
r, o["out"] = 1, str(e)
LOG.debug("Failed to handle image(%s): %s", str(e))
return r, o
16 changes: 16 additions & 0 deletions virttest/vt_agent/managers/images/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from .qemu import get_qemu_image_handler
#from .xen import get_xen_image_handler


_image_handler_getters = {
"qemu": get_qemu_image_handler,
#"xen": get_xen_image_handler,
}


def get_image_handler(image_type, cmd):
getter = _image_handler_getters.get(image_type)
return getter(cmd)


__all__ = ["get_image_handler"]
1 change: 1 addition & 0 deletions virttest/vt_agent/managers/images/qemu/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .qemu_image_handlers import get_qemu_image_handler
Loading

0 comments on commit d0a1ebe

Please sign in to comment.