diff --git a/virttest/vt_agent/agents/__init__.py b/virttest/vt_agent/agents/__init__.py new file mode 100644 index 0000000000..a3de443c46 --- /dev/null +++ b/virttest/vt_agent/agents/__init__.py @@ -0,0 +1,2 @@ +from .image_agent import image_agent +from .resource_backing_agent import resbacking_agent diff --git a/virttest/vt_agent/agents/image_agent.py b/virttest/vt_agent/agents/image_agent.py new file mode 100644 index 0000000000..bc9daef43e --- /dev/null +++ b/virttest/vt_agent/agents/image_agent.py @@ -0,0 +1,19 @@ +from .images import get_image_handler + + +class _ImageAgent(object): + + def __init__(self): + self._images = dict() + + def info_image(self, image_config): + pass + + def update_image(self, image_config, update_config): + cmd, arguments = update_config.popitems() + image_type = image_config["meta"]["type"] + handler = get_image_handler(image_type, cmd) + handler(image_config, arguments) + + +image_agent = _ImageAgent() diff --git a/virttest/vt_agent/agents/images/__init__.py b/virttest/vt_agent/agents/images/__init__.py new file mode 100644 index 0000000000..a202217960 --- /dev/null +++ b/virttest/vt_agent/agents/images/__init__.py @@ -0,0 +1,15 @@ +from .qemu import get_qemu_image_handler +#from .libvirt import get_libvirt_image_handler + + +_image_handler_getters = dict() +_image_handler_getters["qemu"] = get_qemu_image_handler +#_image_handler_getters["libvirt"] = get_libvirt_image_handler + + +def get_image_handler(image_type, cmd): + getter = _image_handler_getters.get(image_type) + return getter(cmd) + + +__all__ = ["get_image_handler"] diff --git a/virttest/vt_agent/agents/images/qemu/__init__.py b/virttest/vt_agent/agents/images/qemu/__init__.py new file mode 100644 index 0000000000..6c0f8dea34 --- /dev/null +++ b/virttest/vt_agent/agents/images/qemu/__init__.py @@ -0,0 +1,14 @@ +import qemu_image_handlers + + +_qemu_image_handlers = { + "create": qemu_image_handlers.create, + "destroy": qemu_image_handlers.destroy, +} + + +def get_qemu_image_handler(cmd): + return _qemu_image_handlers.get(cmd) + + +__all__ = ["get_qemu_image_handler"] diff --git a/virttest/vt_agent/agents/images/qemu/qemu_image_handlers.py b/virttest/vt_agent/agents/images/qemu/qemu_image_handlers.py new file mode 100644 index 0000000000..e326e85299 --- /dev/null +++ b/virttest/vt_agent/agents/images/qemu/qemu_image_handlers.py @@ -0,0 +1,373 @@ +import collections +import json + +from avocado.core import exceptions +from avocado.utils import path as utils_path +from avocado.utils import process + +from virttest import utils_numeric + + +QEMU_IMG_BINARY = utils_path.find_command("qemu-img") + + +def _get_base_image(image_tag, topology): + base = None + topo, images = list(topology.items())[0] + + if topo == "chain": + idx = images.index(image_tag) + base = images[idx-1] if idx > 0 else None + + return base + + +def _get_dir_volume_info(volume_config): + auth = None + info = { + "driver": "file", + "filename": volume_config["spec"]["uri"], + } + + return auth, info + + +def _get_nfs_volume_info(volume_config): + auth = None + info = { + "driver": "file", + "filename": volume_config["spec"]["uri"], + } + + return auth, info + + +def _get_ceph_volume_info(volume_config): + auth = dict() + + pool_config = volume_config["meta"]["pool"] + pool_meta = pool_config["meta"] + pool_spec = pool_config["spec"] + + volume_opts = { + "driver": "rbd", + "pool": pool_spec["pool"], + "image": pool_spec["image"], + } + + if pool_spec.get("conf") is not None: + volume_opts["conf"] = pool_spec["conf"] + if pool_spec.get("namespace") is not None: + volume_opts["namespace"] = pool_spec["namespace"] + + return auth, volume_opts + + +def _get_iscsi_direct_volume_info(volume_config): + auth = dict() + + pool_config = volume_config["meta"]["pool"] + pool_meta = pool_config["meta"] + pool_spec = pool_config["spec"] + + # required options for iscsi + volume_opts = { + "driver": "iscsi", + "transport": pool_spec["transport"], + "portal": pool_spec["portal"], + "target": pool_spec["target"], + } + + # optional option + if pool_spec["user"] is not None: + volume_opts["user"] = pool_spec["user"] + + return auth, volume_opts + + +def _get_nbd_volume_info(volume_config): + auth = dict() + + volume_meta = volume_config["meta"] + volume_spec = volume_config["spec"] + pool_config = volume_meta["pool"] + pool_meta = pool_config["meta"] + pool_spec = pool_config["spec"] + + volume_opts = {"driver": "nbd"} + if pool_spec.get("host"): + volume_opts.update({ + "server.type": "inet", + "server.host": pool_spec["host"], + "server.port": volume_spec.get("port", 10809), + }) + elif pool_spec.get("path"): + volume_opts.update({ + "server.type": "unix", + "server.path": pool_spec["path"], + }) + else: + raise + + if volume_spec.get("export"): + volume_opts["export"] = volume_spec["export"] + + return auth, volume_opts + + +def get_qemu_virt_image_volume_info(volume_config): + volume_info_getters = { + "filesystem": _get_dir_volume_info, + "nfs": _get_nfs_volume_info, + "ceph": _get_ceph_volume_info, + "iscsi-direct": _get_iscsi_direct_volume_info, + "nbd": _get_nbd_volume_info, + } + + pool_config = volume_config["meta"]["pool"] + pool_type = pool_config["meta"]["type"] + volume_info_getter = volume_info_getters[pool_type] + + return pool_info_getter(volume_config) + + +def get_qemu_virt_image_info(virt_image_config): + info = collections.OrderedDict() + info["file"] = collections.OrderedDict() + + volume_config = virt_image_config["spec"]["volume"] + access_auth, volume_info = get_qemu_virt_image_volume_info(volume_config) + info["file"].update(volume_info) + info["driver"] = image_format = virt_image_config["spec"]["format"] + + encryption = collections.OrderedDict() + encryption_config = virt_image_config["spec"].get("encryption"): + if encryption_config: + encryption = { + "type": encryption_config["type"], + "id": encryption_config["name"], + "format": encryption_config["format"], + "data": encryption_config["data"], + "file": encryption_config["file"], + } + if image_format == "luks": + info["key-secret"] = encryption_config["name"] + elif image_format == "qcow2" and encryption_config["format"] == "luks": + info["encrypt.key-secret"] = encryption_config["name"] + info["encrypt.format"] = encryption_config["encrypt"]["format"] + + # TODO: Add filters here + + return access_auth, encryption, info + + +def get_qemu_virt_image_object(object_opts): + def _get_qemu_virt_image_tls_x509_object(auth): + obj = "--object tls-creds-x509,id={name},endpoint=client,dir={dir}" + opts = {"tls-creds": auth["name"]} + return obj.format(**auth), opts + + def _get_qemu_virt_image_secret_object(encryption): + obj = "--object secret,id={name},format={format}" + + if encryption["stored"] == "file": + obj += ",file={file}" + opts = {"password-secret": auth["name"]} + else: + # TODO: cookie-secret + obj += ",data={data}" + opts = {"key-secret": auth["name"]} + + # luks in qcow2 + encrypt = encryption["encrypt"] + if encrypt: + opts = {f"encrypt.{k}": v for k, v in opts.items()} + opts.update({f"encrypt.{k}": v for k in encrypt if encrypt[k]}) + + return obj.format(**encryption), opts + + mapping = { + "secret": _get_qemu_virt_image_secret_object, + "tls-creds-x509": _get_qemu_virt_image_tls_x509_object, + } + + getter = mapping.get(object_opts["object-type"]) + return getter(object_opts) if getter else None, None + + +def get_qemu_virt_image_json(virt_image_opts): + """Generate image json representation.""" + return "'json:%s'" % json.dumps(virt_image_opts) + + +def get_qemu_virt_image_opts(virt_image_opts): + """Generate image-opts.""" + + def _dict_to_dot(dct): + """Convert dictionary to dot representation.""" + flat = [] + prefix = [] + stack = [dct.items()] + while stack: + it = stack[-1] + try: + key, value = next(it) + except StopIteration: + if prefix: + prefix.pop() + stack.pop() + continue + if isinstance(value, collections.Mapping): + prefix.append(key) + stack.append(value.items()) + else: + flat.append((".".join(prefix + [key]), value)) + return flat + + return ",".join(["%s=%s" % (attr, value) for attr, value in _dict_to_dot(virt_image_opts)]) + + +def get_qemu_virt_image_repr(virt_image_config, output=None): + def _parse_virt_image_options(): + options = [ + "preallocation", "cluster_size", "lazy_refcounts", + "compat", "extent_size_hint", "compression_type", + ] + opts = {k: v for k in options if k in virt_image_spec and virt_image_spec[k]} + + # TODO: data_file, backing_file + + return opts + + virt_image_spec = virt_image_config["spec"] + + mapping = { + "uri": lambda i: virt_image_spec["volume"]["spec"]["uri"], + "json": get_qemu_virt_image_json, + "opts": get_qemu_virt_image_opts, + } + + func = mapping.get(output) + if func is None: + func = mapping["json"] if auth or sec else mapping["uri"] + + auth, sec, info = get_qemu_virt_image_info(virt_image_config) + + auth_repr, auth_opts = get_qemu_virt_image_object(auth) if auth else "", {} + sec_repr, sec_opts = get_qemu_virt_image_object(sec) if sec else "", {} + image_repr = func(info) + + opts = _parse_virt_image_options() + if auth_opts: + opts.update(auth_opts) + if sec_opts: + opts.update(sec_opts) + if image_opts: + opts.update(image_opts) + + return auth_repr, sec_repr, opts, image_repr + + +def create(image_config, arguments): + create_cmd = ( + "create {secret_object} {image_format} " + "{backing_file} {backing_format} {unsafe!b} {options} " + "{image_filename} {image_size}" + ) + + def _dd(image_tag): + qemu_img_cmd = "" + virt_image_config = image_spec["virt-images"][image_tag] + volume_config = virt_image_config["spec"]["volume"] + + if virt_image_config["spec"]["format"] == "raw": + count = utils_numeric.normalize_data_size( + volume_config["spec"]["size"] + order_magnitude="M" + ) + qemu_img_cmd = "dd if=/dev/zero of=%s count=%s bs=1M" % ( + volume_config["spec"]["path"], + count, + block_size, + ) + else: + raise + + def _qemu_img_create(virt_image_tag): + qemu_img_cmd = "" + virt_image_config = image_spec["virt-images"][virt_image_tag] + volume_config = virt_image_config["spec"]["volume"] + + cmd_dict = {} + cmd_dict["image_format"] = image_spec["format"] + cmd_dict["secret_objects"] = secret_objects = list() + + base_tag = _get_base_image(virt_image_tag, image_meta["topology"]) + if base_tag is not None: + base_virt_image_config = image_spec["virt-images"][base_tag] + auth_repr, sec_repr, _, cmd_dict["backing_file"] = get_qemu_virt_image_repr(base_virt_image_config, image_repr_format) + if auth_repr: + secret_objects.append(auth_repr) + if sec_repr: + secret_objects.append(sec_repr) + if opts: + options.update(opts) + cmd_dict["backing_format"] = base_virt_image_config["spec"]["format"] + + auth_repr, sec_repr, options, image_uri = get_qemu_virt_image_repr(virt_image_config, "uri") + if auth_repr: + secret_objects.append(auth_repr) + if sec_repr: + secret_objects.append(sec_repr) + + cmd_dict["image_filename"] = image_uri + cmd_dict["image_size"] = virt_image_spec["size"] + if options: + cmd_dict["options"] = ",".join(options) + + qemu_img_cmd = ( + qemu_image_binary + + " " + + cmd_formatter.format(create_cmd, **cmd_dict) + ) + + LOG.info("Create image by command: %s", qemu_img_cmd) + cmd_result = process.run( + qemu_img_cmd, shell=True, verbose=False, ignore_status=True + ) + + if cmd_result.exit_status != 0: + LOG.warning( + "Failed to create image %s\n%s" % (virt_image_tag, cmd_result) + ) + return cmd_result.exit_status + + + + qemu_image_binary = arguments.get("qemu_img_binary", QEMU_IMG_BINARY) + image_repr_format = arguments.get("source_repr") + image_meta = image_config["meta"] + image_spec = image_config["spec"] + + for tag in image_spec["virt-images"]: + _qemu_img_create(tag) + + +def destroy(image_config, arguments): + LOG.debug("To destroy the qemu image %s" % image_config["meta"]["name"]) + + +def snapshot(image_config, arguments): + pass + + +def rebase(image_config, arguments): + pass + + +def commit(image_config, arguments): + pass + + +def convert(image_config, arguments): + pass diff --git a/virttest/vt_agent/agents/resource_backing_agent.py b/virttest/vt_agent/agents/resource_backing_agent.py new file mode 100644 index 0000000000..5b9e4965ca --- /dev/null +++ b/virttest/vt_agent/agents/resource_backing_agent.py @@ -0,0 +1,60 @@ +from .resource_backings import ( + get_resource_backing_class, + get_pool_connection_class, +) + + +class _ResourceBackingAgent(object): + + def __init__(self): + self._pool_connections = dict() + self._backings = dict() + + def create_pool_connection(self, pool_id, pool_params): + pool_id = pool_params["uuid"] + pool_type = pool_params["type"] + pool_conn_class = get_pool_connection_class(pool_type) + pool_conn = pool_conn_class(pool_params) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del(self._pool_connections[pool_id]) + + def info_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + return pool_conn.info() + + def create_backing(self, backing_config): + pool_id = backing_config["pool"] + pool_conn = self._pool_connections[pool_id] + pool_type = pool_conn.connect_pool_type + res_type = backing_config["type"] + backing_class = get_resource_backing_class(pool_type, res_type) + backing = backing_class(backing_config) + backing.create(pool_conn) + self._backings[backing.backing_id] = backing + return backing.backing_id + + def destroy_backing(self, backing_id): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.destroy(pool_conn) + del(self._backings[backing_id]) + + def update_backing(self, backing_id, new_config): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + cmd, arguments = new_config.popitem() + handler = backing.get_update_handler(cmd) + handler(arguments) + + def info_backing(self, backing_id, request, verbose=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.query(pool_conn, request, verbose) + + +resbacking_agent = _ResourceBackingAgent() diff --git a/virttest/vt_agent/agents/resource_backings/__init__.py b/virttest/vt_agent/agents/resource_backings/__init__.py new file mode 100644 index 0000000000..cd96e38a3b --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/__init__.py @@ -0,0 +1,21 @@ +from .storage import _DirPoolConnection, _DirVolumeBacking + + +_pool_conn_classes = dict() +_pool_conn_classes[_DirPoolConnection.connect_pool_type] = _DirPoolConnection + +_backing_classes = dict() +_backing_classes[_DirVolumeBacking.source_pool_type] = {_DirVolumeBacking.binding_resource_type: _DirVolumeBacking} + + +def get_resource_backing_class(pool_type, resource_type): + backing_classes = _backing_classes.get(pool_type, {}) + return backing_classes.get(resource_type) + + +def get_pool_connection_class(pool_type): + return _pool_conn_classes.get(pool_type) + + +__all__ = ["get_pool_connection_class", + "get_resource_backing_class"] diff --git a/virttest/vt_agent/agents/resource_backings/backing.py b/virttest/vt_agent/agents/resource_backings/backing.py new file mode 100644 index 0000000000..79ea8fea34 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/backing.py @@ -0,0 +1,51 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourceBacking(ABC): + _BINDING_RESOURCE_TYPE = None + _SOURCE_POOL_TYPE = None + + def __init__(self, backing_config): + self._uuid = uuid.uuid4() + self._source_pool = backing_config["pool"] + self._resource_id = backing_config["uuid"] + self._handlers = { + "allocate": self.allocate_resource, + "release": self.release_resource, + } + + @classmethod + def binding_resource_type(cls): + return cls._BINDING_RESOURCE_TYPE + + @property + def binding_resource_id(self): + return self._resource_id + + @property + def source_pool_id(self): + return self._source_pool + + @classmethod + def source_pool_type(cls): + return cls._SOURCE_POOL_TYPE + + @property + def backing_id(self): + return self._uuid + + def get_update_handler(self, cmd): + return self._handlers.get(cmd) + + @abstractmethod + def allocate_resource(self, pool_connection, arguments): + pass + + @abstractmethod + def release_resource(self, pool_connection, arguments): + pass + + @abstractmethod + def info_resource(self, pool_connection, verbose=False): + pass diff --git a/virttest/vt_agent/agents/resource_backings/backing_mgr.py b/virttest/vt_agent/agents/resource_backings/backing_mgr.py new file mode 100644 index 0000000000..50f70db203 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/backing_mgr.py @@ -0,0 +1,47 @@ +from abc import ABC, abstractmethod +from .resbackings import get_backing_class +from .resbackings import get_pool_connection_class + + +class _ResourceBackingManager(ABC): + _ATTACHED_POOL_TYPE = None + + def __init__(self): + self._pool_connections = dict() + self._backings = dict() + + @abstractmethod + def create_pool_connection(self, pool_params): + pool_id = pool_params["uuid"] + pool_type = pool_params["type"] + pool_conn_class = get_pool_connection_class(pool_type) + pool_conn = pool_conn_class(pool_params) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + @abstractmethod + def create_backing(self, config, need_allocate=False): + pass + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release_resource(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, config): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, config) + + def get_backing(self, backing_id): + return self._backings.get(backing_id) + + def info_backing(self, backing_id): + return self._backings[backing_id].to_specs() diff --git a/virttest/vt_agent/agents/resource_backings/cvm/__init__.py b/virttest/vt_agent/agents/resource_backings/cvm/__init__.py new file mode 100644 index 0000000000..d31c959cc0 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/cvm/__init__.py @@ -0,0 +1,27 @@ +from .. import _CVMResBackingMgr +from .. import _all_subclasses + + +class LaunchSecurity(object): + _instance = None + + @classmethod + def dispatch(cls): + return cls._instance + + @classmethod + def startup(cls, config): + if cls._instance is not None: + return cls._instance + + for mgr_cls in _all_subclasses(_CVMResBackingMgr): + if mgr_cls.get_platform_flags() is not None: + cls._instance = mgr_cls(config) + cls._instance.startup() + return cls._instance + + raise + + @classmethod + def teardown(cls): + cls._instance.teardown() diff --git a/virttest/vt_agent/agents/resource_backings/cvm/_sev_resmgr.py b/virttest/vt_agent/agents/resource_backings/cvm/_sev_resmgr.py new file mode 100644 index 0000000000..fcb2982cd8 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/cvm/_sev_resmgr.py @@ -0,0 +1,98 @@ +from .. import _ResBacking +from .. import _ResBackingCaps +from .. import _CVMResBackingMgr + + +class _SEVCommResBacking(_ResBacking): + + def __init__(self, requests): + super().__init__(requests) + self._cbitpos = None + self._reduced_phys_bits = None + # self._sev_device = '/dev/sev' + # self._kernel_hashes = None + + def to_specs(self): + return {"cbitpos": self._cbitpos, "reduced-phys-bits": self._reduced_phys_bits} + + +class _SEVResBacking(_SEVCommResBacking): + RESOURCE_TYPE = "sev" + + def __init__(self): + super().__init__() + self._dh_cert = None + self._session = None + + def allocate(self, requests): + pass + + def free(self): + pass + + def to_specs(self): + pass + + +class _SNPResBacking(_SEVCommResBacking): + RESOURCE_TYPE = "snp" + + def __init__(self): + super().__init__() + + def allocate(self, requests): + pass + + def free(self): + pass + + def to_specs(self): + pass + + +class _SEVResBackingCaps(_ResBackingCaps): + + def __init__(self, params): + self._cbitpos = None + self._reduced_phys_bits = None + self._sev_device = None + self._max_sev_guests = None + self._max_snp_guests = None + self._pdh = None + self._cert_chain = None + self._cpu0_id = None + + def load(self): + pass + + def is_capable(self, requests): + pass + + def increase(self, backing): + pass + + def decrease(self, backing): + pass + + @property + def max_sev_guests(self): + return self._max_sev_guests + + @property + def max_snp_guests(self): + return self._max_snp_guests + + +class _SEVResBackingMgr(_CVMResBackingMgr): + + def __init__(self, config): + super().__init__(config) + self._caps = _SEVResBackingCaps(config) + _SEVResBackingMgr._platform_flags = config + + def startup(self): + reset_sev_platform() + super().startup() + + def teardown(self): + reset_sev_platform() diff --git a/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/_sev_platform_mgr.py b/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/_sev_platform_mgr.py new file mode 100644 index 0000000000..ad162be551 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/_sev_platform_mgr.py @@ -0,0 +1,6 @@ +def reset_platform(): + pass + + +def rotate_pdh(): + pass diff --git a/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/_tdx_platform_mgr.py b/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/_tdx_platform_mgr.py new file mode 100644 index 0000000000..ad162be551 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/_tdx_platform_mgr.py @@ -0,0 +1,6 @@ +def reset_platform(): + pass + + +def rotate_pdh(): + pass diff --git a/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/cvm_platform_mgr.py b/virttest/vt_agent/agents/resource_backings/cvm_platform_mgr/cvm_platform_mgr.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/virttest/vt_agent/agents/resource_backings/dispatcher.py b/virttest/vt_agent/agents/resource_backings/dispatcher.py new file mode 100644 index 0000000000..801240f063 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/dispatcher.py @@ -0,0 +1,48 @@ +from .storage import _dir_backing_mgr +#from .storage import _nfs_backing_mgr +# from .storage import _ceph_backing_mgr +# from .cvm import _sev_backing_mgr +# from .cvm import _tdx_backing_mgr + + +class _BackingMgrDispatcher(object): + + def __init__(self): + self._managers_mapping = dict() + self._backings_mapping = dict() + self._pools_mapping = dict() + + def dispatch_by_pool(self, pool_id): + return self._pools_mapping.get(pool_id, None) + + def dispatch_by_backing(self, backing_id): + return self._backings_mapping.get(backing_id, None) + + @classmethod + def register(cls, mgr): + self._managers_mapping[mgr.attached_pool_type] = mgr + + def map_pool(self, pool_id, pool_type): + backing_mgr = self._managers_mapping[pool_type] + self._pools_mapping[pool_id] = backing_mgr + + def unmap_pool(self, pool_id): + del self._pools_mapping[pool_id] + + def map_backing(self, backing_id, backing_mgr): + self._backings_mapping[backing_id] = backing_mgr + + def unmap_backing(self, backing_id): + del self._backings_mapping[backing_id] + + +_backing_mgr_dispatcher = _BackingMgrDispatcher() + +# Register storage backing managers +_backing_mgr_dispatcher.register(_dir_backing_mgr) +#_backing_mgr_dispatcher.register(_nfs_backing_mgr) +# _backing_mgr_dispatcher.register(_ceph_backing_mgr) + +# Register cvm backing managers +# _backing_mgr_dispatcher.register(_sev_backing_mgr) +# _backing_mgr_dispatcher.register(_tdx_backing_mgr) diff --git a/virttest/vt_agent/agents/resource_backings/pool_connection.py b/virttest/vt_agent/agents/resource_backings/pool_connection.py new file mode 100644 index 0000000000..2d8727b6b4 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/pool_connection.py @@ -0,0 +1,37 @@ +from abc import ABC, abstractmethod + + +class _ResourcePoolAccess(ABC): + + @abstractmethod + def __init__(self, pool_access_params): + pass + + +class _ResourcePoolConnection(ABC): + _CONNECT_POOL_TYPE = None + + def __init__(self, pool_params): + self._pool_id = pool_params["uuid"] + + @classmethod + def connect_pool_type(cls): + return cls._CONNECT_POOL_TYPE + + @abstractmethod + def startup(self): + pass + + @abstractmethod + def shutdown(self): + pass + + @property + @abstractmethod + def connected(self): + return False + + @property + @abstractmethod + def info(self): + return False diff --git a/virttest/vt_agent/agents/resource_backings/storage/__init__.py b/virttest/vt_agent/agents/resource_backings/storage/__init__.py new file mode 100644 index 0000000000..46e0228ab4 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/__init__.py @@ -0,0 +1,2 @@ +from .dir import _DirPoolConnection, _DirVolumeBacking +#from .nfs import _NfsPoolConnection diff --git a/virttest/vt_agent/agents/resource_backings/storage/dir/__init__.py b/virttest/vt_agent/agents/resource_backings/storage/dir/__init__.py new file mode 100644 index 0000000000..dd0d5847c5 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/dir/__init__.py @@ -0,0 +1,2 @@ +from .dir_backing import _DirVolumeBacking +from .dir_pool_connection import _DirPoolConnection diff --git a/virttest/vt_agent/agents/resource_backings/storage/dir/dir_backing.py b/virttest/vt_agent/agents/resource_backings/storage/dir/dir_backing.py new file mode 100644 index 0000000000..19e378c66e --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/dir/dir_backing.py @@ -0,0 +1,46 @@ +import os +from virttest import utils_io +from ...backing import _ResourceBacking + + +class _DirVolumeBacking(_ResourceBacking): + _SOURCE_POOL_TYPE = "filesystem" + _BINDING_RESOURCE_TYPE = "volume" + + def __init__(self, config): + super().__init__(config) + self._allocation = 0 + self._handlers.update({ + "resize": self.resize_volume, + }) + + + def _allocate_volume(self, pool_connection, arguments): + path = os.path.join(pool_connection.root_dir, self._filename) + utils_io.dd(path, self._size) + + def _release_volume(self, pool_connection, arguments): + path = os.path.join(pool_connection.root_dir, self._filename) + os.unlink(path) + + def allocate_resource(self, pool_connection, arguments): + self._allocate_volume(pool_connection, arguments) + + def release_resource(self, pool_connection, arguments): + self._release_volume(pool_connection, arguments) + + def resize_volume(self, pool_connection, arguments): + pass + + def info_resource(self, pool_connection, verbose=False): + info = { + "spec": { + "path": os.path.join(pool_connection.root_dir, self._filename), + } + } + + if verbose: + s = os.stat(path) + info["spec"].update({"allocation": s.st_size}) + + return info diff --git a/virttest/vt_agent/agents/resource_backings/storage/dir/dir_backing_mgr.py b/virttest/vt_agent/agents/resource_backings/storage/dir/dir_backing_mgr.py new file mode 100644 index 0000000000..825ca60fee --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/dir/dir_backing_mgr.py @@ -0,0 +1,46 @@ +from ...backing_mgr import _ResourceBackingManager +from .dir_backing import _get_backing_class +from .dir_pool_connection import _DirPoolConnection + + +class _DirBackingManager(_ResourceBackingManager): + _ATTACHED_POOL_TYPE = "filesystem" + + def __init__(self): + super().__init__() + + def create_pool_connection(self, pool_config): + pool_conn = _DirPoolConnection(pool_config) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + def create_backing(self, config, need_allocate=False): + pool_id = config["pool_id"] + pool_conn = self._pool_connections[pool_id] + backing_class = _get_backing_class(config["resource_type"]) + backing = backing_class(config) + self._backings[backing.uuid] = backing + if need_allocate: + backing.allocate(pool_conn) + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, new_backing_spec): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, new_backing_spec) + + def info_backing(self, backing_id): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.info(pool_conn) diff --git a/virttest/vt_agent/agents/resource_backings/storage/dir/dir_pool_connection.py b/virttest/vt_agent/agents/resource_backings/storage/dir/dir_pool_connection.py new file mode 100644 index 0000000000..9adfa8dc21 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/dir/dir_pool_connection.py @@ -0,0 +1,30 @@ +import os + +from avocado.utils.path import init_dir +from ...pool_connection import _ResourcePoolConnection + + +class _DirPoolConnection(_ResourcePoolConnection): + + def __init__(self, pool_params): + super().__init__(pool_params) + self._root_dir = pool_params["pool_dir_root"] + + def startup(self): + init_dir(self.root_dir) + + def shutdown(self): + if not os.listdir(self.root_dir): + os.removedirs(self.root_dir) + + @property + def connected(self): + return os.path.exists(self.root_dir) + + @property + def root_dir(self): + return self._root_dir + + @property + def info(self): + return dict() diff --git a/virttest/vt_agent/agents/resource_backings/storage/nfs/__init__.py b/virttest/vt_agent/agents/resource_backings/storage/nfs/__init__.py new file mode 100644 index 0000000000..0eb8062f6e --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/nfs/__init__.py @@ -0,0 +1 @@ +from .nfs_backing_mgr import _nfs_backing_mgr diff --git a/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_backing.py b/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_backing.py new file mode 100644 index 0000000000..fa4eeec747 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_backing.py @@ -0,0 +1,47 @@ +import os +from virttest import utils_io +from ...backing import _ResourceBacking + + +class _NfsVolumeBacking(_ResourceBacking): + _SOURCE_POOL_TYPE = "nfs" + _BINDING_RESOURCE_TYPE = "volume" + + def __init__(self, config): + super().__init__(config) + self._allocation = 0 + self._handlers.update({ + "resize": self.resize_volume, + }) + + def allocate_resource(self, pool_connection, arguments): + path = os.path.join(pool_connection.mnt, self._name) + utils_io.dd(path, self._size) + + def release_resource(self, pool_connection, arguments): + path = os.path.join(pool_connection.mnt, self._name) + os.unlink(path) + + def resize_volume(self, pool_connection, arguments): + pass + + def info_resource(self, pool_connection, verbose=False): + info = { + "spec": { + "path": os.path.join(pool_connection.mnt, self._filename), + } + } + + if verbose: + s = os.stat(path) + info["spec"].update({"allocation": s.st_size}) + + return info + + +def _get_backing_class(resource_type): + """ + Get the backing class for a given resource type in case there are + more than one resources are supported by a nfs pool + """ + return _NfsVolumeBacking diff --git a/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_backing_mgr.py b/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_backing_mgr.py new file mode 100644 index 0000000000..91abd364a9 --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_backing_mgr.py @@ -0,0 +1,46 @@ +from ...backing_mgr import _ResourceBackingManager +from .nfs_backing import _get_backing_class +from .nfs_pool_connection import _NfsPoolConnection + + +class _NfsBackingManager(_ResourceBackingManager): + _ATTACHED_POOL_TYPE = "nfs" + + def __init__(self): + super().__init__() + + def create_pool_connection(self, pool_config, pool_access_config): + pool_conn = _NfsPoolConnection(pool_config, pool_access_config) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + def create_backing(self, config, need_allocate=False): + pool_id = config["pool_id"] + pool_conn = self._pool_connections[pool_id] + backing_class = _get_backing_class(config["resource_type"]) + backing = backing_class(config) + self._backings[backing.uuid] = backing + if need_allocate: + backing.allocate(pool_conn) + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, new_backing_spec): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, new_backing_spec) + + def info_backing(self, backing_id): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.info(pool_conn) diff --git a/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_pool_connection.py b/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_pool_connection.py new file mode 100644 index 0000000000..7dc935103c --- /dev/null +++ b/virttest/vt_agent/agents/resource_backings/storage/nfs/nfs_pool_connection.py @@ -0,0 +1,61 @@ +import os + +import utils_disk + +from avocado.utils.path import init_dir + +from ...pool_connection import _ResourcePoolAccess +from ...pool_connection import _ResourcePoolConnection + + +class _NfsPoolAccess(_ResourcePoolAccess): + """ + Mount options + """ + + def __init__(self, pool_access_config): + self._options = pool_access_config["nfs_options"] + + def __str__(self): + return self._options + + +class _NfsPoolConnection(_ResourcePoolConnection): + _CONNECT_POOL_TYPE = "nfs" + + def __init__(self, pool_config): + super().__init__(pool_params) + self._connected_pool = pool_params["pool_id"] + self._nfs_server = pool_params["nfs_server"] + self._export_dir = pool_params["nfs_export_dir"] + self._nfs_access = _NfsPoolAccess(pool_params["access"]) + self._mnt = pool_params["nfs_mnt_dir"] + self._create_mnt = not os.path.exists(self.mnt) + + def startup(self): + src = "{host}:{export}".format(self._nfs_server, self._export_dir) + dst = self.mnt + if self._create_mnt: + init_dir(dst) + options = str(self._nfs_access) + utils_disk.mount(src, dst, fstype="nfs", options=options) + + def shutdown(self): + src = "{host}:{export}".format(self._nfs_server, self._export_dir) + dst = self._mnt + utils_disk.umount(src, dst, fstype="nfs") + if self._create_mnt: + os.removedirs(self.mnt) + + def connected(self): + src = "{host}:{export}".format(self._nfs_server, self._export_dir) + dst = self.mnt + return utils_disk.is_mount(src, dst, fstype="nfs") + + @property + def mnt(self): + return self._mnt + + @property + def info(self): + return dict() diff --git a/virttest/vt_agent/services/__init__.py b/virttest/vt_agent/services/__init__.py index e69de29bb2..357f63b3f6 100644 --- a/virttest/vt_agent/services/__init__.py +++ b/virttest/vt_agent/services/__init__.py @@ -0,0 +1,2 @@ +from .image import * +from .resource_backing import * diff --git a/virttest/vt_agent/services/image.py b/virttest/vt_agent/services/image.py new file mode 100644 index 0000000000..2a68635817 --- /dev/null +++ b/virttest/vt_agent/services/image.py @@ -0,0 +1,52 @@ +from ..agents import image_agent +from .resource_backing import info_backing + + +def info_image(image_config): + """ + Get all the configuration of the image. + + This function will call info_backing to get the volume's configuration + """ + def _update_dict(src, new): + for k, v in new.items(): + if k not in src: + # a new key, add it + src[k] = v + else: + # an existed key + if isinstance(v, dict): + # a dict value, update the dict + _update_dict(src[k], v) + elif not src[k]: + # assign the new value if source is not assigned + src[k] = v + else: + # we should not be here + raise + + def _update_config(config, new_config): + if "meta" in new_config: + _update_dict(config["meta"], new_config["meta"]) + if "spec" in new_config: + _update_dict(config["spec"], new_config["spec"]) + + for virt_image_config in image_config["spec"]["virt-images"].values(): + volume_config = virt_image_config["spec"]["volume"] + backing_id = volume_config["meta"]["backing"] + backing_config = info_backing(backing_id) + _update_config(volume_config, backing_config) + + return image_config + + +def update_image(image_config, update_config): + """ + Update the upper-level image. + + For a qemu image, this function mainly executes qemu-img command, + such as create/rebase/commit etc. + """ + # Get all the configuration of the image + image_config = info_image(image_config) + image_agent.update_image(image_config, update_config) diff --git a/virttest/vt_agent/services/resource_backing.py b/virttest/vt_agent/services/resource_backing.py new file mode 100644 index 0000000000..35f2530b15 --- /dev/null +++ b/virttest/vt_agent/services/resource_backing.py @@ -0,0 +1,99 @@ +from ..agents import resbacking_agent + + +def connect_pool(pool_id, pool_config): + """ + Connect to a specified resource pool. + + :param pool_id: The resource pool id + :type pool_id: string + :param pool_config: The resource pool configuration + :type pool_config: dict + """ + resbacking_agent.create_pool_connection(pool_id, pool_config) + + +def disconnect_pool(pool_id): + """ + Disconnect from a specified resource pool. + + :param pool_id: The resource pool id + :type pool_id: string + :param pool_config: The resource pool configuration + :type pool_config: dict + """ + resbacking_agent.destroy_pool_connection(pool_id) + + +def info_pool_connection(pool_id): + """ + Get a specified pool connection information. + + :param pool_id: The resource pool id + :type pool_id: string + :return: The resource pool connection description + :rtype: dict + """ + resbacking_agent.info_pool_connection(pool_id) + + +def create_backing(backing_config): + """ + Create a resource backing object on the worker node, which is bound + to one resource only + + :param backing_config: The resource backing configuration, usually, + it's a snippet of the resource configuration, + required for allocating the resource + :type backing_config: dict + :return: The resource backing id + :rtype: string + """ + return resbacking_agent.create_backing(backing_config) + + +def destroy_backing(backing_id): + """ + Destroy the backing + + :param backing_id: The cluster resource id + :type backing_id: string + """ + resbacking_agent.destroy_backing(backing_id) + + +def info_backing(backing_id, verbose=False): + """ + Get the information of a resource with a specified backing + + We need not get all the information of the resource, because we can + get the static information by the resource object from the master + node, e.g. size, here we only get the dynamic information, such as + file based volume path and allocation. + + :param resource_id: The backing id + :type resource_id: string + :param verbose: Get all information if verbose is True while + Get the required information if verbose is False + :type verbose: boolean + :return: The information of a resource, e.g. + { + 'spec':{ + 'allocation': 12345, + 'path': '/p1/f1', + } + } + :rtype: dict + """ + return resbacking_agent.info_backing(backing_id, verbose) + + +def update_backing(backing_id, config): + """ + :param backing_id: The resource backing id + :type backing_id: string + :param config: The specified action and the snippet of + the resource's spec and meta info used for update + :type config: dict + """ + resbacking_agent.update_backing(backing_id, config) diff --git a/virttest/vt_imgr/.api.py b/virttest/vt_imgr/.api.py new file mode 100644 index 0000000000..948768e7ae --- /dev/null +++ b/virttest/vt_imgr/.api.py @@ -0,0 +1,106 @@ +from .vt_resmgr import vt_resmgr + + +class ImageNotFound(Exception): + def __init__(self, image_id): + self._id = image_id + + def __str__(self): + return 'Cannot find the pool(id="%s)"' % self._id + + +class UnknownImageType(Exception): + def __init__(self, image_type): + self._type = image_type + + def __str__(self): + return 'Unknown image type "%s"' % self._type + + +def create_image(config): + """ + Create a logical image without any specific storage allocation, + + :param config: The image's meta and spec data + :type config: dict + :return: The image id + :rtype: string + """ + pass + + +def destroy_image(image_id): + """ + Destroy the logical image, the specific storage allocation + will be released, note the image's backing image will not be + touched + + :param image_id: The resource id + :type image_id: string + """ + pass + + +def get_image(image_id): + """ + Get all information for a specified image + + :param image_id: The image id + :type image_id: string + :return: All the information of an image, e.g. + { + 'meta': { + 'id': 'image1', + 'backing': 'image2' + }, + 'spec': { + 'name': 'stg', + 'format': 'qcow2', + 'backing': { + The backing's information here + }, + 'volume': { + 'meta': { + 'id': 'nfs_vol1' + }, + 'spec': { + 'pool': 'nfs_pool1', + 'type': 'volume', + 'size': 65536, + 'name': 'stg.qcow2', + 'path': [{'node1': '/mnt1/stg.qcow2'}, + {'node2': '/mnt2/stg.qcow2'}], + } + } + } + } + :rtype: dict + """ + pass + + +def update_image(image_id, config): + """ + Update an image, the command format: + {'action': arguments}, in which + the 'action' can be the following for a qemu image: + 'create': qemu-img create + 'destroy': Remove the allocated resource + 'convert': qemu-img convert + 'snapshot': qemu-img snapshot + 'resize': qemu-img resize + arguments is a dict object which contains all related settings for a + specific action + + Examples: + qemu-img create + {'create': } + qemu-img convert + {'convert': } + + :param image_id: The image id + :type image_id: string + :param config: The specified action and its arguments + :type config: dict + """ + pass diff --git a/virttest/vt_imgr/__init__.py b/virttest/vt_imgr/__init__.py new file mode 100644 index 0000000000..73fcb2e24a --- /dev/null +++ b/virttest/vt_imgr/__init__.py @@ -0,0 +1 @@ +from vt_imgr import vt_imgr diff --git a/virttest/vt_imgr/images/__init__.py b/virttest/vt_imgr/images/__init__.py new file mode 100644 index 0000000000..eee9605c4e --- /dev/null +++ b/virttest/vt_imgr/images/__init__.py @@ -0,0 +1,15 @@ +from .qemu import _QemuImage + + +_image_classes = dict() +_image_classes[_QemuImage.image_type] = _QemuImage + + +def get_image_class(image_type): + for t, cls in _image_classes.items(): + if t == image_type: + return cls + return None + + +__all__ = ["get_image_class"] diff --git a/virttest/vt_imgr/images/image.py b/virttest/vt_imgr/images/image.py new file mode 100644 index 0000000000..f55b1d54d6 --- /dev/null +++ b/virttest/vt_imgr/images/image.py @@ -0,0 +1,78 @@ +import uuid +from abc import ABC, abstractmethod + + +class _Image(ABC): + """ + This is the upper-level image, in the context of a VM, it's mapping + to the VM's disk. It can have one or more lower-level images, + e.g. A qemu image can have a lower-level image chain: + base ---> sn + in which "sn" is the top lower-level image name while "base" is the + backing lower-level image name of "sn" + """ + + # Supported image types: qemu + _IMAGE_TYPE = None + + def __init__(self, image_config): + self._config = image_config + self.image_meta["uuid"] = uuid.uuid4() + self._virt_images = dict() + self._handlers = dict() + + @classmethod + def image_type(cls): + return cls._IMAGE_TYPE + + @property + def image_id(self): + return self.image_meta["uuid"] + + @property + def image_config(self): + return self._config + + @property + def image_meta(self): + return self.image_config["meta"] + + @property + def image_spec(self): + return self.image_config["spec"] + + @classmethod + def define_config(cls, image_name, params): + image_type = params.object_params(image_name).get("image_type", "qemu") + + return { + "meta": { + "uuid": None, + "name": image_name, + "type": image_type, + "topology": None, + }, + "spec": { + "virt-images": {} + }, + } + + @abstractmethod + def create(self): + raise NotImplemented + + @abstractmethod + def destroy(self): + raise NotImplemented + + @abstractmethod + def query(self): + raise NotImplemented + + @abstractmethod + def backup(self): + raise NotImplemented + + def get_image_handler(self, cmd): + return self._handlers.get(cmd) + diff --git a/virttest/vt_imgr/images/image_handlers.py b/virttest/vt_imgr/images/image_handlers.py new file mode 100644 index 0000000000..f1e5557b57 --- /dev/null +++ b/virttest/vt_imgr/images/image_handlers.py @@ -0,0 +1,14 @@ +from abc import ABC, abstractmethod + + +class _ImageUpdateCommand(ABC): + _UPDATE_ACTION = None + + @staticmethod + @abstractmethod + def execute(image, arguments): + raise NotImplemented + + @classmethod + def action(cls): + return cls._UPDATE_ACTION diff --git a/virttest/vt_imgr/images/qemu/__init__.py b/virttest/vt_imgr/images/qemu/__init__.py new file mode 100644 index 0000000000..a9808db67e --- /dev/null +++ b/virttest/vt_imgr/images/qemu/__init__.py @@ -0,0 +1 @@ +from .qemu_image import _QemuImage diff --git a/virttest/vt_imgr/images/qemu/qemu_image.py b/virttest/vt_imgr/images/qemu/qemu_image.py new file mode 100644 index 0000000000..c4d29be0be --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_image.py @@ -0,0 +1,140 @@ +from virttest.vt_cluster import cluster + +from ..image import _Image +from .qemu_virt_image import get_virt_image_class + + +class _QemuImage(_Image): + + # The upper-level image type + _IMAGE_TYPE = "qemu" + + def __init__(self, image_config): + super().__init__(image_config) + # Store images with the same order as tags defined in image_chain + self._handlers = { + "create": self.qemu_img_create, + "destroy": self.qemu_img_destroy, + "rebase": self.qemu_img_rebase, + "commit": self.qemu_img_commit, + "snapshot": self.qemu_img_snapshot, + } + + @classmethod + def _define_virt_image_config(cls, image_tag, image_params): + image_format = image_params.get("image_format", "qcow2") + virt_image_class = get_virt_image_class(image_format) + return virt_image_class.define_config(image_tag, image_params) + + @classmethod + def _define_config_legacy(cls, image_name, params): + def _define_topo_chain_config(): + images = config["spec"]["virt-images"] + for image_tag in image_chain: + image_params = params.object_params(image_tag) + images[image_tag] = cls._define_virt_image_config(image_tag, + image_params) + + def _define_topo_none_config(): + images = config["spec"]["virt-images"] + image_params = params.object_params(image_name) + images[image_tag] = cls._define_virt_image_config(image_name, image_params) + + config = super().define_config(image_name, params) + config["meta"]["name"] = image_name + + image_chain = params.object_params(image_name).objects("image_chain") + if image_chain: + config["meta"]["topology"] = {"chain": image_chain} + _define_topo_chain_config() + else: + config["meta"]["topology"] = {"none": image_name} + _define_topo_none_config() + + return config + + @classmethod + def define_config(cls, image_name, params): + """ + Define the image configuration by its cartesian params + """ + return cls._define_config_legacy(image_name, params) + + @property + def virt_image_names(self): + topo_name, images = list(self.image_meta["topology"].items())[0] + if topo_name == "none": + return [images] + elif topo_name == "chain": + return images + raise ValueError("Unknown topology name %s" % topo_name) + + @property + def virt_image_volumes(self): + volumes = {} + for image_tag in self.virt_image_names: + volumes[image_tag] = self._virt_images[image_tag].volume_id + return volumes + + def create(self): + """ + Create the image objects, all its lower-level image objects and + their volume objects will be created. + """ + for image_tag in self.virt_image_names: + config = self.image_spec["virt-images"][image_tag] + image_format = config["spec"]["format"] + virt_image_class = get_virt_image_class(image_format) + virt_image = virt_image_class(config) + virt_image.create() + self._virt_images[image_tag] = virt_image + + def destroy(self): + """ + Destroy the image object, all its lower-level image objects + will be destroyed. + """ + for image_tag in self.virt_image_names[::-1]: + virt_image = self._virt_images[image_tag] + if not virt_image.keep(): + virt_image.destroy() + del(self._virt_images[image_tag]) + + def query(self, request, verbose=False): + pass + + def qemu_img_create(self, arguments): + """ + qemu-img create + + Allocate storage by resource management system first. + """ + for image_tag in self.virt_image_names: + virt_image = self._virt_images[image_tag] + virt_image.allocate_volume(arguments) + + node_id = arguments["nodes"][0] + node = cluster.get_node(node_id) + # call rpc to create image on a worker node + node.update_image(self.image_config, {"create": arguments}) + + def qemu_img_destroy(self, arguments): + """ + Release storage by resource management system. + + Note the image object still exists, i.e. all the lower-level + image objects and their volume objects will not be destroyed. + """ + for image_tag in self.virt_image_names[::-1]: + virt_image = self._virt_images[image_tag] + virt_image.release_volume(arguments) + + def qemu_img_rebase(self, arguments): + update_image(self.image_config, {"rebase": arguments}) + + def qemu_img_commit(self, arguments): + update_image(self.image_config, {"commit": arguments}) + + def qemu_img_snapshot(self, arguments): + image_config = dict() + update_image(self.image_config, {"snapshot": arguments}) diff --git a/virttest/vt_imgr/images/qemu/qemu_image_handlers.py b/virttest/vt_imgr/images/qemu/qemu_image_handlers.py new file mode 100644 index 0000000000..0dda4797a2 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_image_handlers.py @@ -0,0 +1,117 @@ +from ..image_handlers import _ImageUpdateCommand + + +class _QemuImageCreate(_ImageUpdateCommand): + """ + qemu-img create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE [-F BACKING_FMT]] [-u] [-o OPTIONS] FILENAME [SIZE] + {"create": {}} + {"create": {"images": [stg]}} + + """ + + _UPDATE_ACTION = "create" + + @staticmethod + def execute(image, arguments): + image.spec["topology"] + if params.get("create_with_dd") == "yes" and self.image_format == "raw": + # maps K,M,G,T => (count, bs) + human = { + "K": (1, 1), + "M": (1, 1024), + "G": (1024, 1024), + "T": (1024, 1048576), + } + if self.size[-1] in human: + block_size = human[self.size[-1]][1] + size = int(self.size[:-1]) * human[self.size[-1]][0] + qemu_img_cmd = "dd if=/dev/zero of=%s count=%s bs=%sK" % ( + self.image_filename, + size, + block_size, + ) + else: + cmd_dict = {} + cmd_dict["image_format"] = self.image_format + if self.base_tag: + # if base image has secret, use json representation + base_key_secrets = self.encryption_config.base_key_secrets + if self.base_tag in [ + s.image_id for s in base_key_secrets + ] or self._need_auth_info(self.base_tag): + base_params = params.object_params(self.base_tag) + cmd_dict["backing_file"] = "'%s'" % get_image_json( + self.base_tag, base_params, self.root_dir + ) + else: + cmd_dict["backing_file"] = self.base_image_filename + cmd_dict["backing_format"] = self.base_format + + # secret objects of the backing images + secret_objects = self._backing_access_secret_objects + + # secret object of the image itself + if self._image_access_secret_object: + secret_objects.extend(self._image_access_secret_object) + + image_secret_objects = self._secret_objects + if image_secret_objects: + secret_objects.extend(image_secret_objects) + if secret_objects: + cmd_dict["secret_object"] = " ".join(secret_objects) + + # tls creds objects of the backing images of the source + tls_creds_objects = self._backing_access_tls_creds_objects + + # tls creds object of the source image itself + if self._image_access_tls_creds_object: + tls_creds_objects.append(self._image_access_tls_creds_object) + + if tls_creds_objects: + cmd_dict["tls_creds_object"] = " ".join(tls_creds_objects) + + cmd_dict["image_filename"] = self.image_filename + cmd_dict["image_size"] = self.size + options = self._parse_options(params) + if options: + cmd_dict["options"] = ",".join(options) + qemu_img_cmd = ( + self.image_cmd + + " " + + self._cmd_formatter.format(self.create_cmd, **cmd_dict) + ) + + if params.get("image_backend", "filesystem") == "filesystem": + image_dirname = os.path.dirname(self.image_filename) + if image_dirname and not os.path.isdir(image_dirname): + e_msg = ( + "Parent directory of the image file %s does " + "not exist" % self.image_filename + ) + LOG.error(e_msg) + LOG.error("This usually means a serious setup exceptions.") + LOG.error( + "Please verify if your data dir contains the " + "expected directory structure" + ) + LOG.error("Backing data dir: %s", data_dir.get_backing_data_dir()) + LOG.error("Directory structure:") + for root, _, _ in os.walk(data_dir.get_backing_data_dir()): + LOG.error(root) + + LOG.warning( + "We'll try to proceed by creating the dir. " + "Other errors may ensue" + ) + os.makedirs(image_dirname) + + msg = "Create image by command: %s" % qemu_img_cmd + error_context.context(msg, LOG.info) + cmd_result = process.run( + qemu_img_cmd, shell=True, verbose=False, ignore_status=True + ) + if cmd_result.exit_status != 0 and not ignore_errors: + raise exceptions.TestError( + "Failed to create image %s\n%s" % (self.image_filename, cmd_result) + ) + pass diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py new file mode 100644 index 0000000000..3d353afb0a --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py @@ -0,0 +1,16 @@ +from .raw_qemu_virt_image import _RawQemuVirtImage +from .qcow2_qemu_virt_image import _Qcow2QemuVirtImage +from .luks_qemu_virt_image import _LuksQemuVirtImage + + +_image_classes= dict() +_image_classes[_RawQemuVirtImage.image_format] = _RawQemuVirtImage +_image_classes[_Qcow2QemuVirtImage.image_format] = _Qcow2QemuVirtImage +_image_classes[_LuksQemuVirtImage.image_format] = _LuksQemuVirtImage + + +def get_virt_image_class(image_format): + return _image_classes.get(image_format) + + +__all__ = ['get_virt_image_class'] diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py new file mode 100644 index 0000000000..ed5a5f527a --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py @@ -0,0 +1,27 @@ +from virttest.utils_misc import generate_random_string + +from .qemu_virt_image import _QemuVirtImage + + +class _LuksQemuVirtImage(_QemuVirtImage): + _IMAGE_FORMAT = "luks" + + @classmethod + def _define_config_legacy(cls, image_name, image_params): + config = super()._define_config_legacy(image_name, image_params) + spec = config["spec"] + spec.update({ + "preallocation": image_params.get("preallocated"), + "extent_size_hint": image_params.get("image_extent_size_hint"), + }) + spec["encryption"].update({ + "name": "secret_{s}".format(s=generate_random_string(6)), + "data": image_params.get("image_secret", "redhat"), + "format": image_params.get("image_secret_format", "raw"), + "stored": image_params.get("image_secret_stored", "data"), + "file": None, + "type": "luks", + "object": "secret", + }) + + return config diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py new file mode 100644 index 0000000000..e0da5de9f2 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py @@ -0,0 +1,34 @@ +from virttest.utils_misc import generate_random_string + +from .qemu_virt_image import _QemuVirtImage + + +class _Qcow2QemuVirtImage(_QemuVirtImage): + + _IMAGE_FORMAT = "qcow2" + + @classmethod + def _define_config_legacy(cls, image_name, image_params): + config = super()._define_config_legacy(image_name, image_params) + spec = config["spec"] + spec.update({ + "cluster-size": image_params.get("image_cluster_size"), + "lazy-refcounts": image_params.get("lazy_refcounts"), + "compat": image_params.get("qcow2_compatible"), + "preallocation": image_params.get("preallocated"), + "extent_size_hint": image_params.get("image_extent_size_hint"), + "compression_type": image_params.get("image_compression_type"), + }) + spec["encryption"] = { + "name": "secret_{s}".format(s=generate_random_string(6)), + "data": image_params.get("image_secret", "redhat"), + "format": image_params.get("image_secret_format", "raw"), + "stored": image_params.get("image_secret_stored", "data"), + "file": None, + "encrypt": { + "format": "luks", + } + "object": "secret", + } + + return config diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py new file mode 100644 index 0000000000..36f8e9f6f9 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py @@ -0,0 +1,48 @@ +from abc import abstractmethod + +from ...virt_image import _VirtImage +from virttest.vt_resmgr import * + + +class _QemuVirtImage(_VirtImage): + """ + A virt image has one storage resource(volume), take qemu virt image + as an example, the cartesian params beginning with 'image_', e.g. + 'image_size' describe this object + """ + + @classmethod + def _define_config_legacy(cls, image_name, image_params): + config = super().define_config(image_name, image_params) + config["spec"]["volume"] = define_resource_config("volume", image_params) + return config + + @classmethod + def define_config(cls, image_name, image_params): + """ + Define the raw image configuration by its cartesian params. + Currently use the existing image params, in future, we'll design + a new set of params to describe an lower-level image. + """ + #TODO: Design new image params + #if image_params.get_boolean("image_new_params_enabled"): + # return cls._define_config_new(image_name, image_params) + return cls._define_config_legacy(image_name, image_params) + + def create(self): + volume_config = self.virt_image_spec["volume"] + create_resource(volume_config) + + def destroy(self): + destroy_resource(self.volume_id) + + def info(self, force_share=False, output="human"): + pass + + def allocate_volume(self, arguments): + update_resource(self.volume_id, {"bind": arguments}) + update_resource(self.volume_id, {"allocate": arguments}) + + def release_volume(self, arguments): + update_resource(self.volume_id, {"release": arguments}) + update_resource(self.volume_id, {"unbind": arguments}) diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py new file mode 100644 index 0000000000..84a63ea409 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py @@ -0,0 +1,17 @@ +from .qemu_virt_image import _QemuVirtImage +from virttest.vt_resmgr import * + + +class _RawQemuVirtImage(_QemuVirtImage): + _IMAGE_FORMAT = "raw" + + @classmethod + def _define_config_legacy(cls, image_name, image_params): + config = super()._define_config_legacy(image_name, image_params) + spec = config["spec"] + spec.update({ + "preallocation": image_params.get("preallocated"), + "extent_size_hint": image_params.get("image_extent_size_hint"), + }) + + return config diff --git a/virttest/vt_imgr/images/virt_image.py b/virttest/vt_imgr/images/virt_image.py new file mode 100644 index 0000000000..2bbcef8536 --- /dev/null +++ b/virttest/vt_imgr/images/virt_image.py @@ -0,0 +1,75 @@ +from abc import ABC, abstractmethod + + +class _VirtImage(ABC): + """ + The lower-level image, which has a storage resource(volume), is + defined by the cartesian params beginning with 'image_'. One or + more lower-level images can represent a upper-level image. + """ + + _IMAGE_FORMAT = None + + def __init__(self, config): + self._config = config + + @classmethod + def format(cls): + return cls._IMAGE_FORMAT + + @classmethod + def define_config(cls, image_name, image_params): + return { + "meta": { + "name": image_name, + }, + "spec": { + "format": cls.format(), + "volume": {}, + }, + } + + @property + def volume_id(self): + return self.virt_image_spec["volume"]["meta"]["uuid"] + + @property + def name(self): + return self.virt_image_meta["name"] + + @property + def virt_image_config(self): + return self._config + + @property + def virt_image_spec(self): + return self.virt_image_config["spec"] + + @property + def virt_image_meta(self): + return self.virt_image_config["meta"] + + @property + @abstractmethod + def keep(self): + raise NotImplemented + + @abstractmethod + def create(self): + raise NotImplemented + + @abstractmethod + def destroy(self): + raise NotImplemented + + @abstractmethod + def info(self): + raise NotImplemented + + @abstractmethod + def allocate_volume(self, arguments): + raise NotImplemented + + @abstractmethod + def release_volume(self, arguments): + raise NotImplemented diff --git a/virttest/vt_imgr/vt_imgr.py b/virttest/vt_imgr/vt_imgr.py new file mode 100644 index 0000000000..684902ecbc --- /dev/null +++ b/virttest/vt_imgr/vt_imgr.py @@ -0,0 +1,211 @@ +""" +The upper-level image manager. + +from virttest.vt_imgr import vt_imgr + +# Define the image configuration +image_config = vt_imgr.define_image_config(image_name, params) + +# Create the upper-level image object +image_id = vt_imgr.create_image(image_config) + +# qemu-img create +vt_imgr.update_image(image_id, {"create":{"nodes": ["n1", "n2"]}}) + +# Query the summary config of the "image1" +vt_imgr.query_image(image_id, query="config", verbose=False) +returned: + {"meta": {"uuid": "uuid-sn" + "name": "sn", + "type": "qemu", + "topology": {"chain": ["base", "sn"]}}, + "spec": {"images": {"base": {"meta": {}, + "spec": {"format": "raw", + "volume": "volume-uuid1"}}, + "sn": {"meta": {}, + "spec": {"format": "qcow2", + "volume": "volume-uuid2"} + } + } + } + } +""" + + +import logging + +from .images import get_image_class + +LOG = logging.getLogger("avocado." + __name__) + + +# TODO: +# Add drivers for diff handlers +# Add access permission for images +# serialize +class _VTImageManager(object): + + def __init__(self): + self._images = dict() + + def define_image_config(self, image_name, params): + """ + Define the upper-level image(e.g. in the context of a VM, it's + mapping to a VM's disk) configuration by its cartesian params. + E.g. An upper-level qemu image has an lower-level image chain + base ---> sn + | | + resource resource + :param image_name: The image tag defined in cartesian params, + e.g. for a qemu image, the tag should be the + top image("sn" in the example above) if the + "image_chain" is defined, usually it is + defined in the "images" param, e.g. "image1" + :type image_name: string + :param params: The params for all the lower-level images + Note it's *NOT* an image-specific params like + params.object_params("sn") + *BUT* the params for both "sn" and "base" + Examples: + 1. images_vm1 = "image1 sn" + image_chain_sn = "base sn" + image_name = "sn" + params = the_case_params.object_params('vm1') + 2. images = "image1 stg" + image_name = "image1" + params = the_case_params + :type params: A Params object + :return: The image configuration + :rtype: dict + """ + image_params = params.object_params(image_name) + image_type = image_params.get("image_type", "qemu") + image_class = get_image_class(image_type) + return image_class.define_config(image_name, params) + + def create_image(self, image_config): + """ + Create an upper-level image(e.g. in the context of a VM, it's + mapping to a VM's disk) object by its configuration without + any storage allocation. All its lower-level images and their + mapping storage resource objects will be created. + :param image_config: The image configuration. + Call define_image_config to get it. + :type image_config: dict + :return: The image object id + :rtype: string + """ + image_type = image_config["meta"]["type"] + image_class = get_image_class(image_type) + image = image_class(image_config) + image.create() + self._images[image.image_id] = image + return image.image_id + + def destroy_image(self, image_id): + """ + Destroy a specified image. All its storage allocation should + be released. Note if 'remove_image=no', then don't release the + storage allocation. + :param image_id: The image id + :type image_id: string + """ + image = self._images.get(image_id) + image.destroy() + if image.virt_images: + LOG.info("Keep the image") + else: + del(self._images[image_id]) + + def update_image(self, image_id, config): + """ + Update a specified upper-level image + + config format: + {command: arguments} + + Supported commands for a qemu image: + create: qemu-img create + destroy: Destroy the specified lower-level images + resize: qemu-img resize + map: qemu-img map + convert: qemu-img convert + commit: qemu-img commit + snapshot: qemu-img snapshot + rebase: qemu-img rebase + add: Add a lower-level image object + delete: Delete a lower-level image object + + Note: Not all images support the above operations + The arguments is a dict object which contains all related settings + for a specific command + :param image_id: The image id + :type image_id: string + """ + image = self._images.get(image_id) + cmd, arguments = list(config.items())[0] + image_handler = image.get_image_handler(cmd) + image_handler(arguments) + + def backup_image(self, image_id): + image = self._images.get(image_id) + image.backup() + + def query_image(self, image_id, request=None, verbose=False): + """ + Query the configuration of a specified upper-level image, the + general format of the image configuration: + {"meta": {"uuid": "zzz" + "name": "xxx", + "type": "yyy" + "topology": {}}, + "spec": {"virt-images":{}} + } + E.g. A qemu image having an image chain: + {"meta": {"uuid": "uuid-sn" + "name": "sn", + "type": "qemu", + "topology": {"chain": ["base", "sn"]}}, + "spec": {"virt-images": {"base": {"meta": {}, + "spec": {"format": "raw", + "volume": {"meta": {"uuid": "id1"}, + "spec": {"size": 5678}} + } + }, + "sn": {"meta": {}, + "spec": {"format": "qcow2", + "volume": {"meta": {"uuid": "id2"}, + "spec": {"size": 5678}} + } + } + } + } + } + :param request: The query content, format: + None + meta[.] + spec[.virt-images.[.meta[.]]] + spec[.virt-images.[.spec[.]]] + Note the prefix "config.spec.virt-images" can omitted when + querying a specific 's configuration: + [.meta[.]] + [.spec[.]] + Examples: + 1. Query the image's configuration + request=None + 2. Query the image's lower images' configurations + request=spec.virt-images + 3. Query sn's volume configuration + request=spec.virt-images.sn.spec.volume + :type request: string + :param verbose: False: Return a summary of the configuration + True: Return all the configuration + :type verbose: boolean + :return: The upper-level image's configuration, or a snippet of + :rtype: dict + """ + image = self._images.get(image_id) + return image.query(request, verbose) + + +vt_imgr = _VTImageManager() diff --git a/virttest/vt_resmgr/__init__.py b/virttest/vt_resmgr/__init__.py new file mode 100644 index 0000000000..0a0e47b0b0 --- /dev/null +++ b/virttest/vt_resmgr/__init__.py @@ -0,0 +1 @@ +from .api import * diff --git a/virttest/vt_resmgr/api.py b/virttest/vt_resmgr/api.py new file mode 100644 index 0000000000..79c1f85edf --- /dev/null +++ b/virttest/vt_resmgr/api.py @@ -0,0 +1,274 @@ +""" +The resource management APIs, open to the test cases. + +A test case can call these APIs to handle the resource directly, e.g. +allocate a volume from a nfs pool, or call the upper-level APIs to +handle a volume indirectly, e.g. create a qcow2 qemu image, the image +manager call these APIs to allocate a volume. + +# Register a nfs pool +pool_config = define_pool_config("nfs", nfs_pool_params) +pool_id = register_resouce_pool(pool_config) + +# Connect the nfs pool from worker nodes +attach_resource_pool(pool_id) + +# Create a volume object +image_params = params.object_params("stg") +config = define_resource_config("volume", image_params) +res_id = create_resource(config) + +# Bind the nfs resource to worker nodes +config = {'bind': {'nodes': ['node1', 'node2']}} +update_resource(res_id, config) + +# Allocate the nfs file based volumes +config = {'allocate': {}} +update_resource(res_id, config) + +# Unbind the nfs resource from node1 +config = {'unbind': {'nodes': ['node1']}} +update_resource(res_id, config) + +# Release the nfs file based volumes +config = {'release': {}} +update_resource(res_id, config) + +# Unbind the nfs resource +config = {'unbind': {}} +update_resource(res_id, config) + +# Destroy the nfs resource +destroy_resource(res_id) +""" +from .vt_resmgr import vt_resmgr + + +class PoolNotFound(Exception): + def __init__(self, pool_id): + self._id = pool_id + + def __str__(self): + return 'Cannot find the pool(id="%s)"' % self._id + + +class UnknownPoolType(Exception): + def __init__(self, pool_type): + self._type = pool_type + + def __str__(self): + return 'Unknown pool type "%s"' % self._type + + +class ResourceNotFound(Exception): + pass + + +class ResourceBusy(Exception): + pass + + +class ResourceNotAvailable(Exception): + pass + + +class UnknownResourceType(Exception): + pass + + +def define_pool_config(pool_type, pool_params): + """ + Define a resource pool's configuration by its cartesian params. + + :param pool_type: The resource pool type, defined by "pool_type" + :type pool_type: string + :param pool_params: The resource pool's specific params + :type pool_params: Param + :return: The resource pool's configuration, + format: {"meta":{...}, "spec":{...}} + The specific attributes depend on the specific pool + :rtype: dict + """ + return vt_resmgr.define_pool_config(pool_type, pool_params) + + +def register_resouce_pool(pool_config): + """ + Register a resource pool, the pool should be ready for use before + registration, i.e, the users need to setup the resource pool first + + :param pool_config: The pool's configuration, generated by + define_pool_config function + :type pool_config: dict + :return: The resource pool id + :rtype: string + """ + pool_id = vt_resmgr.register_pool(pool_config) + if pool_id is None: + raise UnknownPoolType(pool_config["meta"]["type"]) + return pool_id + + +def unregister_resouce_pool(pool_id): + """ + Unregister a resource pool + + :param pool_id: The id of the pool to unregister + :type pool_id: string + """ + vt_resmgr.unregister_pool(pool_id) + + +def attach_resource_pool(pool_id): + """ + Attach the registered pool to worker nodes, then the pool can be + accessed by the worker nodes + + :param pool_id: The id of the pool to attach + :type pool_id: string + """ + vt_resmgr.attach_pool(pool_id) + + +def detach_resource_pool(pool_id): + """ + Detach the pool from the worker nodes, after that, the pool cannot + be accessed + + :param pool_id: The id of the pool to detach + :type pool_id: string + """ + vt_resmgr.detach_pool(pool_id) + + +def define_resource_config(resource_type, resource_params): + """ + Define a resource's configuration by its cartesian params + + :param resource_type: The resource type, it's usually implied, e.g. + the image's storage resource is a "volume", + supported: "volume" + :type resource_type: string + :param resource_params: The resource's specific params, usually + defined by an upper-level object, e.g. + "image1" has a storage resource, so + resource_params = image1's params + i.e. use image1's params to define its + storage resource's configuration + :type resource_params: Param + :return: The resource's configuration, + format: {"meta":{...}, "spec":{...}} + The specific attributes depend on the specific resource + :rtype: dict + """ + pool_id = vt_resmgr.select_pool(resource_type, resource_params) + if pool_id is None: + raise ResourceNotAvailable() + pool = vt_resmgr.get_pool_by_id(pool_id) + return pool.define_resource_config(resource_type, resource_params) + + +def create_resource(resource_config): + """ + Create a resource object without any specific resource allocation. + + :param resource_config: The resource configuration, generated by + define_resource_config function + :type resource_config: dict + :return: The resource id + :rtype: string + """ + pool_id = resource_config["spec"]["pool"] + pool = vt_resmgr.get_pool_by_id(pool_id) + if pool is None: + raise PoolNotFound(pool_id) + return pool.create_resource(resouce_config) + + +def destroy_resource(resource_id): + """ + Destroy the resource object, the specific resource allocation + will be released + + :param resource_id: The resource id + :type resource_id: string + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + pool.destroy_resource(resource_id) + + +def query_resource(resource_id, request): + """ + Query the configuration of a specified resource, the general format + of the resource configuration: + {"meta": {"uuid": "xxx" + "type": "yyy", + "bindings": []} + "spec": {"pool": "xxx", ...}} + E.g. A storage volume resource + {'meta': { + 'uuid': 'res_id1', + 'type': 'volume', + 'bindings': [{'node': 'node1', + 'backing': 'ref1'}, + ] + }, + 'spec': { + 'pool': 'nfs_pool1', + 'size': 65536, + 'path': '/mnt/sn.qcow2', + } + } + :param resource_id: The resource id + :type resource_id: string + :param request: The query content, format: + config[.meta[.]] + config[.spec[.]] + Examples: + config + config.meta + config.spec.pool + :type request: string + :return: The resource's configuration, it can be either the whole + one or a snippet + :rtype: dict + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + return pool.info_resource(resource_id) + + +def update_resource(resource_id, config): + """ + Update a resource, the config format: + {'command': arguments} + Supported commands: + 'bind': Bind a specified resource to one or more worker nodes in order + to access the specific resource allocation, note the resource + is *NOT* allocated with the bind command + 'unbind': Unbind a specified resource from one or more worker nodes, + the specific resource will be released only when all bindings + are gone + 'allocate': Allocate the resource + 'release': Release the resource + The arguments is a dict object which contains all related settings for a + specific action + + Examples: + Bind a resource to one or more nodes + {'bind': {'nodes': ['node1']}} + {'bind': {'nodes': ['node1', 'node2']}} + Unbind a resource from one or more nodes + {'unbind': {'nodes': []}} + {'unbind': {'nodes': ['node1', 'node2']}} + Allocate the resource + {'allocate': {'nodes': ['node1']}} + Release the resource + {'release': {'nodes': []}} + + :param resource_id: The resource id + :type resource_id: string + :param config: The specified action and its arguments + :type config: dict + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + pool.update_resource(resource_id, config) diff --git a/virttest/vt_resmgr/resources/__init__.py b/virttest/vt_resmgr/resources/__init__.py new file mode 100644 index 0000000000..f2c0ae3f59 --- /dev/null +++ b/virttest/vt_resmgr/resources/__init__.py @@ -0,0 +1,17 @@ +#from .cvm import _SnpPool +#from .cvm import _TdxPool +from .storage import _CephPool +from .storage import _DirPool + +_pool_classes = dict() +#_pool_classes[_SnpPool.pool_type] = _SnpPool +#_pool_classes[_TdxPool.pool_type] = _TdxPool +_pool_classes[_CephPool.pool_type] = _CephPool +_pool_classes[_DirPool.pool_type] = _DirPool + + +def get_resource_pool_class(pool_type): + return _pool_classes.get(pool_type) + + +__all__ = ["get_resource_pool_class"] diff --git a/virttest/vt_resmgr/resources/cvm/__init__.py b/virttest/vt_resmgr/resources/cvm/__init__.py new file mode 100644 index 0000000000..0a0e47b0b0 --- /dev/null +++ b/virttest/vt_resmgr/resources/cvm/__init__.py @@ -0,0 +1 @@ +from .api import * diff --git a/virttest/vt_resmgr/resources/pool.py b/virttest/vt_resmgr/resources/pool.py new file mode 100644 index 0000000000..7f8c544082 --- /dev/null +++ b/virttest/vt_resmgr/resources/pool.py @@ -0,0 +1,122 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourcePool(ABC): + """ + A resource pool is used to manage resources. A resource must be + allocated from a specific pool, and a pool can hold many resources + """ + + _POOL_TYPE = None + + def __init__(self, pool_config): + self._config = pool_config + self.pool_meta["uuid"] = uuid.uuid4() + self._resources = dict() # {resource id: resource object} + self._caps = dict() + + if not set(self.attaching_nodes).difference(set(["*"])): + self.attaching_nodes = [n.name for n in cluster.get_all_nodes()] + + @property + def pool_id(self): + return self._uuid + + @property + def pool_config(self): + return self._config + + @property + def pool_meta(self): + return self._config["meta"] + + @property + def pool_spec(self): + return self._config["spec"] + + @classmethod + def define_config(cls, pool_params): + return { + "meta": { + "uuid": None, + "type": None, + "auth": pool_params.get("auth"), + "nodes": pool_params.objects("nodes", ["*"]), + }, + "spec": {}, + } + + @abstractmethod + def meet_resource_request(self, resource_type, resource_params): + """ + Check if the pool can support a resource's allocation + """ + raise NotImplementedError + + def define_resource_config(self, resource_type, resource_params): + """ + Define the resource configuration, format: + {"meta": {...}, "spec": {...}} + It depends on the specific resource. + """ + res_cls = self.get_resource_class(resource_type) + config = res_cls.define_config(resource_params) + config["meta"]["pool"] = self.pool_config + return config + + @classmethod + @abstractmethod + def get_resource_class(cls, resource_type): + raise NotImplementedError + + def create_resource(self, resource_config): + """ + Create a resource object, no real resource allocated + """ + meta = resource_config["meta"] + res_cls = self.get_resource_class(meta["type"]) + res = res_cls(resource_config) + self._resources[res.resource_id] = res + return res.resource_id + + def destroy_resource(self, resource_id): + """ + Destroy the resource object, all its backings should be released + """ + res = self._resources[resource_id] + res.destroy() + del(self._resources[resource_id]) + + def update_resource(self, resource_id, config): + resource = self._resources.get(resource_id) + cmd, arguments = config.popitem() + handler = resource.get_update_handler(cmd) + handler(arguments) + + def info_resource(self, resource_id): + """ + Get the reference of a specified resource + """ + res = self._resources.get(resource_id) + return res.resource_info + + @property + def attaching_nodes(self): + return self.pool_meta["nodes"] + + @attaching_nodes.setter + def attaching_nodes(self, nodes): + self.pool_meta["nodes"] = nodes + + """ + @property + def pool_capability(self): + node_name = self.attaching_nodes[0] + node = cluster.get_node(node_name) + return node.proxy.get_pool_capability() + """ + + @classmethod + def pool_type(cls): + return cls._POOL_TYPE diff --git a/virttest/vt_resmgr/resources/resource.py b/virttest/vt_resmgr/resources/resource.py new file mode 100644 index 0000000000..182adfa9c9 --- /dev/null +++ b/virttest/vt_resmgr/resources/resource.py @@ -0,0 +1,147 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourceBinding(object): + """ + A binding binds a resource to an allocated resource backing + at a worker node. A resource can have many bindings, but one + binding can only bind one backing at one worker node. + """ + + def __init__(self, node_name): + self._node_name = node_id + self._backing_id = None + + def create_backing(self, resource_config, need_allocate=False): + """ + Create a resource backing object via RPC + """ + node = get_node(self._node_name) + self._backing_id = node.proxy.create_backing(resource_config, need_allocate) + + def destroy_backing(self, need_release=False): + """ + Destroy the resource backing object via RPC + """ + node = get_node(self._node_name) + node.proxy.destroy_backing(self._backing_id, need_release) + + def update_backing(self, spec): + node = get_node(self._node_name) + node.proxy.update_backing(self._backing_id, spec) + + @property + def reference(self): + return {"node": self.node_name, "id": self.backing_id} + + @property + def node_name(self): + """ + Get the node id of the resource backing + """ + return self._node_name + + @property + def backing_id(self): + """ + Get the resource backing id + """ + return self._backing_id + + +class _Resource(ABC): + """ + A resource defines what users request, it's independent of a VM, + users can request kinds of resources for any purpose. The resource + can be bound to several backings on different worker nodes. + + Note: A resource can bind to only one backing on a worker node. + """ + + _RESOURCE_TYPE = None + + def __init__(self, resource_config): + self._config = resource_config + self.mata["uuid"] = uuid.uuid4() + self._handlers = { + "bind": self.bind, + "unbind": self.unbind, + "allocate": self.allocate, + "release": self.release, + } + + @classmethod + def resource_type(cls): + raise cls._RESOURCE_TYPE + + @property + def resource_config(self): + return self._config + + @property + def resource_spec(self): + return self.resource_config["spec"] + + @property + def resource_meta(self): + return self.resource_config["meta"] + + @property + def resource_id(self): + return self.resource_meta["uuid"] + + @property + def resource_pool(self): + return self.resource_meta["pool"]["meta"]["uuid"] + + @property + def resource_bindings(self): + return self.resource_meta["bindings"] + + @classmethod + def define_config(cls, resource_params): + return { + "meta": { + "uuid": None, + "type": None, + "bindings": {}, + }, + "spec": { + "pool": None, + } + } + + @property + @abstractmethod + def backing_config(self): + """ + Define the required information of the resource, used + for allocating the resource on the worker nodes + """ + config = dict() + config["uuid"] = self.resource_id + config["pool"] = self.resource_pool + return config + + def get_update_handler(self, command): + return self._handlers.get(command) + + @abstractmethod + def bind(self, arguments): + """ + Bind the resource to one or more worker nodes + """ + raise NotImplemented + + @abstractmethod + def unbind(self, arguments): + raise NotImplemented + + @abstractmethod + def allocate(self, arguments): + raise NotImplemented + + @abstractmethod + def release(self, arguments): + raise NotImplemented diff --git a/virttest/vt_resmgr/resources/storage/__init__.py b/virttest/vt_resmgr/resources/storage/__init__.py new file mode 100644 index 0000000000..116d616d9b --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/__init__.py @@ -0,0 +1,14 @@ +from .dir import _DirPool +from .ceph import _CephPool +#from .nfs import _NfsPool +#from .nbd import _NbdPool +#from .iscsi_direct import _IscsiDirectPool + + +__all__ = ( + _CephPool, + _DirPool, +# _NfsPool, +# _NbdPool, +# _IscsiDirectPool, +) diff --git a/virttest/vt_resmgr/resources/storage/ceph/__init__.py b/virttest/vt_resmgr/resources/storage/ceph/__init__.py new file mode 100644 index 0000000000..8ec3b25a7a --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/ceph/__init__.py @@ -0,0 +1 @@ +from .ceph_pool import _CephPool diff --git a/virttest/vt_resmgr/resources/storage/dir/__init__.py b/virttest/vt_resmgr/resources/storage/dir/__init__.py new file mode 100644 index 0000000000..c09faaf942 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/dir/__init__.py @@ -0,0 +1 @@ +from .dir_pool import _DirPool diff --git a/virttest/vt_resmgr/resources/storage/dir/dir_pool.py b/virttest/vt_resmgr/resources/storage/dir/dir_pool.py new file mode 100644 index 0000000000..72bde4d13a --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/dir/dir_pool.py @@ -0,0 +1,44 @@ +import logging + +from ...pool import _ResourcePool +from .dir_resources import get_dir_resource_class + + +LOG = logging.getLogger("avocado." + __name__) + + +# cartesian params for a Directory pool +#[pool1] +#pool_type = "filesystem" +#pool_dir_root = "/root/avocado/images" + +class _DirPool(_ResourcePool): + _POOL_TYPE = "filesystem" + + def __init__(self, pool_config): + super().__init__(pool_config) + + @classmethod + def define_config(cls, pool_params): + config = super().define_config(pool_params) + config["meta"]["type"] = cls._POOL_TYPE + config["spec"]["root"] = pool_params["pool_dir_root"] + + @classmethod + def get_resource_class(cls, resource_type): + return get_dir_resource_class(resource_type) + + def meet_resource_request(self, resource_type, resource_params): + # Check if the pool can supply a resource with a specified type + cls = self.get_resource_class(resource_type) + if cls is None: + return False + + # Check if this is the pool with the specified type + storage_type = resource_params.get("storage_type") + if storage_type: + if storage_type != self.pool_type: + return False + + # TODO: Check if the pool has capacity to allocate the resource + return True diff --git a/virttest/vt_resmgr/resources/storage/dir/dir_resources.py b/virttest/vt_resmgr/resources/storage/dir/dir_resources.py new file mode 100644 index 0000000000..776a70a2df --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/dir/dir_resources.py @@ -0,0 +1,84 @@ +import logging + +from virttest import utils_numeric +from virttest.vt_cluster import cluster + +from ..volume import _FileVolume + + +LOG = logging.getLogger("avocado." + __name__) + + +class _DirFileVolume(_FileVolume): + """ + The directory file-based volume + """ + + @classmethod + def _define_config_legacy(cls, resource_params): + return super().define_config(resource_params) + + @classmethod + def define_config(cls, resource_params): + return cls._define_config_legacy(resource_params) + + def bind(self, arguments): + """ + Bind the resource to a backing on a worker node. + Note: A local dir resource has one and only one binding in the cluster + """ + if not self.resource_bindings: + LOG.warning("The dir resource has already bound to %s," + "it can bind to only one worker node in the cluster", + list(self.resource_bindings.keys())[0]) + else: + nodes = arguments["nodes"] + if len(nodes) != 1: + LOG.debug("A dir resource must bind to only one worker node.") + raise + node = cluster.get_node(nodes[0]) + backing_id = node.proxy.create_backing(self.backing_config) + self.resource_bindings[nodes[0]] = backing_id + + def unbind(self, arguments): + """ + Unbind the resource from a worker node. + Note: A dir resource must be released before unbinding + because it has only one binding + """ + node_name, backing_id = list(self.resource_bindings.items())[0] + node = cluster.get_node(node_name) + node.proxy.update_backing(backing_id, {"release": arguments}) + node.proxy.destroy_backing(backing_id) + self.resource_bindings.pop(node_name) + + def allocate(self, arguments): + node_name, backing_id = list(self.resource_bindings.items())[0] + node = cluster.get_node(node_name) + node.proxy.update_backing(backing_id, {"allocate": arguments}) + + def release(self, arguments): + node_name, backing_id = list(self.resource_bindings.items())[0] + node = cluster.get_node(node_name) + node.proxy.update_backing(backing_id, {"release": arguments}) + + def resize(self, arguments): + """ + Resize the local dir volume resource + """ + new = int(utils_numeric.normalize_data_size(arguments["size"], "B")) + if new != self.resource_spec["size"]: + node_name, backing_id = list(self.resource_bindings.items())[0] + node = cluster.get_node(node_name) + node.proxy.update_backing(backing_id, {"resize": arguments}) + self.resource_spec["size"] = new + else: + LOG.debug("Updated size(%s) is the same with the original", new) + + +def get_dir_resource_class(resource_type): + mapping = { + "volume": _DirFileVolume, + } + + return mapping.get(resource_type) diff --git a/virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py b/virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py new file mode 100644 index 0000000000..4631b968fa --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py @@ -0,0 +1 @@ +from .iscsi_direct_pool import _IscsiDirectPool diff --git a/virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py b/virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py new file mode 100644 index 0000000000..33738d1af9 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py @@ -0,0 +1,20 @@ +import logging + +from ...resource import _Resource +from ...pool import _ResourcePool + + +LOG = logging.getLogger("avocado." + __name__) + + +class _IscsiDirectResource(_Resource): + """ + The iscsi-direct pool resource + """ + + def _initialize(self, config): + self._lun = config["lun"] + + +class _IscsiDirectPool(_ResourcePool): + POOL_TYPE = "iscsi-direct" diff --git a/virttest/vt_resmgr/resources/storage/nbd/__init__.py b/virttest/vt_resmgr/resources/storage/nbd/__init__.py new file mode 100644 index 0000000000..8a29e248f3 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nbd/__init__.py @@ -0,0 +1 @@ +from .nbd_pool import _NbdPool diff --git a/virttest/vt_resmgr/resources/storage/nfs/__init__.py b/virttest/vt_resmgr/resources/storage/nfs/__init__.py new file mode 100644 index 0000000000..a0e90ec573 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/__init__.py @@ -0,0 +1 @@ +from .nfs_pool import _NfsPool diff --git a/virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py b/virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py new file mode 100644 index 0000000000..b0dcdc716c --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py @@ -0,0 +1,50 @@ +import logging + +from ...pool import _ResourcePool +from .nfs_resources import get_nfs_resource_class + + +LOG = logging.getLogger("avocado." + __name__) + + +# Configure two nfs pools +#[nfs_pool1] +#pool_type = "nfs" +#pool_access_nodes = "node1 node2" +#pool_nfs_server = +#pool_nfs_export = +#pool_nfs_mount = +#pool_nfs_mount_options = + +class _NfsPool(_ResourcePool): + _POOL_TYPE = "nfs" + + @classmethod + def define_config(cls, pool_params): + config = super().define_config(pool_params) + config["meta"]["type"] = cls._POOL_TYPE + config["spec"].update({ + "server": pool_params["pool_nfs_server"], + "export": pool_params["pool_nfs_export"], + "mount": pool_params["pool_nfs_mount"], + "mount-options": pool_params.get("pool_nfs_mount_options"), + } + + @classmethod + def get_resource_class(cls, resource_type): + return get_nfs_resource_class(resource_type) + + def meet_resource_request(self, resource_type, resource_params): + # Check if the pool can supply a resource with a specified type + cls = self.get_resource_class(resource_type) + if cls is None: + return False + + # Check if the is the pool with the specified type + storage_type = resource_params.get("storage_type") + if storage_type: + if storage_type != self.pool_type: + return False + + # TODO: Check if the pool has capacity to allocate the resource + return True diff --git a/virttest/vt_resmgr/resources/storage/nfs/nfs_resources.py b/virttest/vt_resmgr/resources/storage/nfs/nfs_resources.py new file mode 100644 index 0000000000..dbff7fa0ca --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/nfs_resources.py @@ -0,0 +1,55 @@ +import logging + +from ..volume import _FileVolume +from .nfs_resource_handlers import get_nfs_resource_handler + + +LOG = logging.getLogger("avocado." + __name__) + + +class _NfsFileVolume(_FileVolume): + """ + The nfs file-based volume + """ + + @classmethod + def _define_config_legacy(cls, resource_params): + return super().define_config(resource_params) + + @classmethod + def define_config(cls, resource_params): + return cls._define_config_legacy(resource_params) + + + def _create_binding(self, node_name, need_allocate=False): + binding = _ResourceBinding(node_name) + binding.create_backing(self.backing_config, need_allocate) + self._bindings[node_name] = binding + + def _destroy_binding(self, node_name): + need_release = True if len(self._bindings) == 1 else False + binding = self._bindings[node_name] + binding.destroy_backing(need_release) + del self._bindings[node_name] + + def bind(self, arguments): + nodes = arguments["nodes"] + for node_name in nodes: + self._create_binding(self, node_name) + + def unbind(self, arguments): + nodes = arguments.get("nodes") + nodes = nodes or list(self._bindings.keys()) + for node_name in nodes: + self._destroy_binding(self, node_name) + + def resize(self, arguments): + pass + + +def get_nfs_resource_class(resource_type): + mapping = { + "volume": _NfsFileVolume, + } + + return mapping.get(resource_type) diff --git a/virttest/vt_resmgr/resources/storage/storage_pool.py b/virttest/vt_resmgr/resources/storage/storage_pool.py new file mode 100644 index 0000000000..d038be7c1e --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/storage_pool.py @@ -0,0 +1,47 @@ +import logging + +from ..pool import _ResourcePool + + +LOG = logging.getLogger("avocado." + __name__) + + +class _StoragePool(_ResourcePool): + + @classmethod + def define_resource_config(cls, resource_name, resource_type, resource_params): + cls = get_resource_class(resource_type) + config = cls.define_config(resource_name, resource_params) + config["spec"]["pool"] = self.pool_id + return config + + def meet_resource_request(self, resource_type, resource_params): + # Check if the pool is the specified one + pool_name = resource_params.get("image_pool_name") + if pool_name == self.pool_name: + return True + + # Check if the pool can supply a resource with a specified type + cls = get_resource_class(resource_type) + if cls is None: + return False + + # Check if the is the pool with the specified type + storage_type = resource_params.get("storage_type") + if storage_type: + if storage_type != self.pool_type: + return False + + # TODO: Check if the pool has capacity to allocate the resource + return True + + def create_resource(self, resource_config): + meta = resource_config["meta"] + cls = get_resource_class(meta["type"]) + res = cls(resource_config) + self._resources[res.resource_id] = res + return res.resource_id + + @classmethod + def get_resource_class(cls, resource_type): + pass diff --git a/virttest/vt_resmgr/resources/storage/volume.py b/virttest/vt_resmgr/resources/storage/volume.py new file mode 100644 index 0000000000..65b6703a5a --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/volume.py @@ -0,0 +1,122 @@ +import os + +from virttest import utils_numeric + +from ..resource import _Resource + + +class _Volume(_Resource): + """ + Storage volumes are abstractions of physical partitions, + LVM logical volumes, file-based disk images + """ + + _RESOURCE_TYPE = "volume" + _VOLUME_TYPE = None + + @classmethod + def volume_type(cls): + return cls._VOLUME_TYPE + + @classmethod + def define_config(cls, resource_params): + size = int(utils_numeric.normalize_data_size( + resource_params.get("image_size", "20G"), order_magnitude="B" + )) + + config = super().define_config(resource_params) + config["meta"].update({ + "type": cls._RESOURCE_TYPE, + "raw": resource_params.get_boolean("image_raw_device"), + }) + config["spec"].update({ + "size": size, + "allocation": None, + }) + + return config + + @property + def backing_config(self): + config = super().backing_config + config.update({ + "size": self.resource_spec["size"], + "type": self.resource_type, + "volume-type": self.volume_type, + }) + return config + + +class _FileVolume(_Volume): + """For file based volumes""" + + _VOLUME_TYPE = "file" + + def __init__(self, resource_config): + super().__init__(resource_config) + self._handlers.update({ + "resize": self.resize, + }) + + @classmethod + def define_config(cls, resource_params): + config = super().define_config(resource_params) + + config["meta"].update({ + "volume-type": cls._VOLUME_TYPE, + }) + + image_name = resource_params.get("image_name", "image") + if os.path.isabs(image_name): + config["spec"]["uri"] = image_name + config["spec"]["filename"] = os.path.basename(image_name) + else: + image_format = resource_params.get("image_format", "qcow2") + config["spec"]["filename"] = "%s.%s" % (image_name, image_format) + config["spec"]["uri"] = None + + return config + + @property + def backing_config(self): + config = super().backing_config + config.update({ + "uri": self.resource_spec["uri"], + }) + return config + + def resize(self, arguments): + raise NotImplemented + + +class _BlockVolume(_Volume): + """For disk, lvm, iscsi based volumes""" + + _VOLUME_TYPE = "block" + + @classmethod + def define_config(cls, resource_params): + config = super().define_config(resource_params) + + config["meta"].update({ + "volume-type": cls._VOLUME_TYPE, + }) + config["spec"].update({ + "uri": None, + }) + + return config + + @property + def backing_config(self): + config = super().backing_config + config.update({ + "uri": self.resource_spec["uri"], + }) + return config + + +class _NetworkVolume(_Volume): + """For rbd, iscsi-direct based volumes""" + + _VOLUME_TYPE = "network" diff --git a/virttest/vt_resmgr/vt_resmgr.py b/virttest/vt_resmgr/vt_resmgr.py new file mode 100644 index 0000000000..d9985ccf54 --- /dev/null +++ b/virttest/vt_resmgr/vt_resmgr.py @@ -0,0 +1,122 @@ +from .resources import get_resource_pool_class + +from virttest.vt_cluster import cluster + + +class _VTResourceManager(object): + + def __init__(self): + self._pools = dict() # {pool id: pool object} + + def setup(self, pool_config_list): + for config in pool_config_list: + pool_id = self.register_pool(config) + self.attach_pool(pool_id) + + def teardown(self): + for pool_id in self.pools: + self.detach_pool(pool_id) + self.unregister_pool(pool_id) + + def get_pool_by_name(self, pool_name): + pools = [p for p in self.pools.values() if p.pool_name == pool_name] + return pools[0] if pools else None + + def get_pool_by_id(self, pool_id): + return self.pools.get(pool_id) + + def get_pool_by_resource(self, resource_id): + pools = [p for p in self.pools.values() if resource_id in p.resources] + return pools[0] if pools else None + + def select_pool(self, resource_type, resource_params): + """ + Select the resource pool by its cartesian params + + :param resource_type: The resource's type, supported: + "volume" + :type resource_type: string + :param resource_params: The resource's specific params, e.g. + params.object_params('image1') + :type resource_params: dict or Param + :return: The resource pool id + :rtype: string + """ + for pool_id, pool in self.pools.items(): + if pool.meet_resource_request(resource_type, resource_params): + return pool_id + return None + + def define_pool_config(self, pool_type, pool_params): + pool_class = get_resource_pool_class(pool_type) + return pool_class.define_config(pool_params) + + def register_pool(self, pool_config): + pool_type = pool_config["meta"]["type"] + pool_class = get_resource_pool_class(pool_type) + pool = pool_class(pool_config) + self._pools[pool.pool_id] = pool + return pool.pool_id + + def unregister_pool(self, pool_id): + """ + The pool should be detached from all worker nodes + """ + pool = self.pools[pool_id] + if pool.is_attached(): + raise + del self._pools[pool_id] + + def attach_pool_to(self, pool, node): + """ + Attach a pool to a specific node + """ + node.proxy.connect_pool(pool.pool_id, pool.pool_config) + + def attach_pool(self, pool_id): + pool = self.get_pool_by_id(pool_id) + for node_name in pool.attaching_nodes: + node = cluster.get_node(node_name) + self.attach_pool_to(pool, node) + + def detach_pool_from(self, pool, node): + """ + Detach a pool from a specific node + """ + node.proxy.disconnect_pool(pool.pool_id) + + def detach_pool(self, pool_id): + pool = self.get_pool_by_id(pool_id) + for node_name in pool.attaching_nodes: + node = cluster.get_node(node_name) + self.detach_pool_from(pool, node) + + def info_pool(self, pool_id): + """ + Get the pool's information, including 'meta' and 'spec': + meta: + e.g. version for tdx, 1.0 or 1.5 + spec: + common specific attributes + e.g. nfs_server for nfs pool + node-specific attributes + e.g. [node1:{path:/mnt1,permission:rw}, node2:{}] + """ + info = dict() + pool = self.get_pool_by_id(pool_id) + info.update(pool.pool_config) + for node_name in pool.attaching_nodes: + node = cluster.get_node(node_name) + access_info = node.proxy.get_pool_connection(pool_id) + info.update(access_info) + + def pool_capability(self, pool_id): + pool = self.get_pool_by_id(pool_id) + return pool.capability + + @property + def pools(self): + return self._pools + + +vt_resmgr = _VTResourceManager()