From 977900002a1d5ff7e8175da4c9d784d533bb33c5 Mon Sep 17 00:00:00 2001 From: Zhenchao Liu Date: Fri, 8 Mar 2024 15:53:07 +0800 Subject: [PATCH] Add storage and image management support Signed-off-by: Zhenchao Liu --- virttest/vt_agent/managers/image/__init__.py | 1 + .../vt_agent/managers/image/dispatcher.py | 20 ++ .../vt_agent/managers/image/image_handler.py | 40 +++ .../vt_agent/managers/image/qemu/__init__.py | 1 + .../managers/image/qemu/qemu_image_handler.py | 16 ++ .../vt_agent/managers/resbackings/__init__.py | 1 + .../vt_agent/managers/resbackings/backing.py | 38 +++ .../managers/resbackings/backing_mgr.py | 40 +++ .../managers/resbackings/cvm/__init__.py | 27 ++ .../managers/resbackings/cvm/_sev_resmgr.py | 98 +++++++ .../cvm_platform_mgr/_sev_platform_mgr.py | 6 + .../cvm_platform_mgr/_tdx_platform_mgr.py | 6 + .../cvm_platform_mgr/cvm_platform_mgr.py | 0 .../managers/resbackings/dispatcher.py | 49 ++++ .../managers/resbackings/pool_connection.py | 27 ++ .../managers/resbackings/storage/__init__.py | 2 + .../resbackings/storage/dir/__init__.py | 1 + .../resbackings/storage/dir/dir_backing.py | 33 +++ .../storage/dir/dir_backing_mgr.py | 46 ++++ .../storage/dir/dir_pool_connection.py | 25 ++ .../resbackings/storage/nfs/__init__.py | 1 + .../resbackings/storage/nfs/nfs_backing.py | 33 +++ .../storage/nfs/nfs_backing_mgr.py | 46 ++++ .../storage/nfs/nfs_pool_connection.py | 49 ++++ virttest/vt_agent/services/__init__.py | 0 virttest/vt_agent/services/virt/image_api.py | 13 + .../vt_agent/services/virt/resbacking_api.py | 99 +++++++ virttest/vt_imgr/.api.py | 106 ++++++++ virttest/vt_imgr/__init__.py | 1 + virttest/vt_imgr/images/__init__.py | 15 + virttest/vt_imgr/images/image.py | 69 +++++ virttest/vt_imgr/images/qemu/__init__.py | 1 + virttest/vt_imgr/images/qemu/qemu_image.py | 83 ++++++ .../images/qemu/qemu_virt_image/__init__.py | 16 ++ .../qemu_virt_image/luks_qemu_virt_image.py | 15 + .../qemu_virt_image/qcow2_qemu_virt_image.py | 16 ++ .../qemu/qemu_virt_image/qemu_virt_image.py | 33 +++ .../qemu_virt_image/raw_qemu_virt_image.py | 13 + virttest/vt_imgr/images/virt_image.py | 71 +++++ virttest/vt_imgr/vt_imgr.py | 184 +++++++++++++ virttest/vt_resmgr/__init__.py | 1 + virttest/vt_resmgr/api.py | 256 ++++++++++++++++++ virttest/vt_resmgr/resources/__init__.py | 14 + virttest/vt_resmgr/resources/cvm/__init__.py | 1 + virttest/vt_resmgr/resources/cvm/api.py | 73 +++++ .../vt_resmgr/resources/cvm/conductor.py.bak | 58 ++++ virttest/vt_resmgr/resources/pool.py | 95 +++++++ virttest/vt_resmgr/resources/resource.py | 144 ++++++++++ .../vt_resmgr/resources/resource_handlers.py | 33 +++ .../vt_resmgr/resources/storage/__init__.py | 14 + .../resources/storage/ceph/__init__.py | 1 + .../resources/storage/dir/__init__.py | 1 + .../resources/storage/dir/dir_pool.py | 25 ++ .../resources/storage/dir/dir_resource.py | 66 +++++ .../storage/iscsi_direct/__init__.py | 1 + .../storage/iscsi_direct/iscsi_direct_pool.py | 20 ++ .../resources/storage/nbd/__init__.py | 1 + .../resources/storage/nfs/__init__.py | 1 + .../resources/storage/nfs/nfs_pool.py | 53 ++++ .../resources/storage/nfs/nfs_resource.py | 94 +++++++ .../storage/nfs/nfs_resource_handlers.py | 15 + .../vt_resmgr/resources/storage/volume.py | 45 +++ virttest/vt_resmgr/vt_resmgr.py | 115 ++++++++ 63 files changed, 2468 insertions(+) create mode 100644 virttest/vt_agent/managers/image/__init__.py create mode 100644 virttest/vt_agent/managers/image/dispatcher.py create mode 100644 virttest/vt_agent/managers/image/image_handler.py create mode 100644 virttest/vt_agent/managers/image/qemu/__init__.py create mode 100644 virttest/vt_agent/managers/image/qemu/qemu_image_handler.py create mode 100644 virttest/vt_agent/managers/resbackings/__init__.py create mode 100644 virttest/vt_agent/managers/resbackings/backing.py create mode 100644 virttest/vt_agent/managers/resbackings/backing_mgr.py create mode 100644 virttest/vt_agent/managers/resbackings/cvm/__init__.py create mode 100644 virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py create mode 100644 virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py create mode 100644 virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py create mode 100644 virttest/vt_agent/managers/resbackings/cvm_platform_mgr/cvm_platform_mgr.py create mode 100644 virttest/vt_agent/managers/resbackings/dispatcher.py create mode 100644 virttest/vt_agent/managers/resbackings/pool_connection.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/__init__.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/dir/__init__.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py create mode 100644 virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py create mode 100644 virttest/vt_agent/services/__init__.py create mode 100644 virttest/vt_agent/services/virt/image_api.py create mode 100644 virttest/vt_agent/services/virt/resbacking_api.py create mode 100644 virttest/vt_imgr/.api.py create mode 100644 virttest/vt_imgr/__init__.py create mode 100644 virttest/vt_imgr/images/__init__.py create mode 100644 virttest/vt_imgr/images/image.py create mode 100644 virttest/vt_imgr/images/qemu/__init__.py create mode 100644 virttest/vt_imgr/images/qemu/qemu_image.py create mode 100644 virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py create mode 100644 virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py create mode 100644 virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py create mode 100644 virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py create mode 100644 virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py create mode 100644 virttest/vt_imgr/images/virt_image.py create mode 100644 virttest/vt_imgr/vt_imgr.py create mode 100644 virttest/vt_resmgr/__init__.py create mode 100644 virttest/vt_resmgr/api.py create mode 100644 virttest/vt_resmgr/resources/__init__.py create mode 100644 virttest/vt_resmgr/resources/cvm/__init__.py create mode 100644 virttest/vt_resmgr/resources/cvm/api.py create mode 100644 virttest/vt_resmgr/resources/cvm/conductor.py.bak create mode 100644 virttest/vt_resmgr/resources/pool.py create mode 100644 virttest/vt_resmgr/resources/resource.py create mode 100644 virttest/vt_resmgr/resources/resource_handlers.py create mode 100644 virttest/vt_resmgr/resources/storage/__init__.py create mode 100644 virttest/vt_resmgr/resources/storage/ceph/__init__.py create mode 100644 virttest/vt_resmgr/resources/storage/dir/__init__.py create mode 100644 virttest/vt_resmgr/resources/storage/dir/dir_pool.py create mode 100644 virttest/vt_resmgr/resources/storage/dir/dir_resource.py create mode 100644 virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py create mode 100644 virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py create mode 100644 virttest/vt_resmgr/resources/storage/nbd/__init__.py create mode 100644 virttest/vt_resmgr/resources/storage/nfs/__init__.py create mode 100644 virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py create mode 100644 virttest/vt_resmgr/resources/storage/nfs/nfs_resource.py create mode 100644 virttest/vt_resmgr/resources/storage/nfs/nfs_resource_handlers.py create mode 100644 virttest/vt_resmgr/resources/storage/volume.py create mode 100644 virttest/vt_resmgr/vt_resmgr.py diff --git a/virttest/vt_agent/managers/image/__init__.py b/virttest/vt_agent/managers/image/__init__.py new file mode 100644 index 0000000000..50ee313c63 --- /dev/null +++ b/virttest/vt_agent/managers/image/__init__.py @@ -0,0 +1 @@ +from .dispatcher import _image_handler_dispatcher diff --git a/virttest/vt_agent/managers/image/dispatcher.py b/virttest/vt_agent/managers/image/dispatcher.py new file mode 100644 index 0000000000..861f702b8f --- /dev/null +++ b/virttest/vt_agent/managers/image/dispatcher.py @@ -0,0 +1,20 @@ +from .qemu import _qemu_image_handler + +# from .xen import _xen_image_handler + + +class _ImageHandlerDispatcher(object): + + def __init__(self): + self._managers_mapping = dict() + self._backings_mapping = dict() + self._pools_mapping = dict() + + def dispatch(self, key): + return + + +_image_handler_dispatcher = _ImageHandlerDispatcher() + +_image_handler_dispatcher.register(_qemu_image_handler) +# _image_handler_dispatcher.register(_xen_image_handler) diff --git a/virttest/vt_agent/managers/image/image_handler.py b/virttest/vt_agent/managers/image/image_handler.py new file mode 100644 index 0000000000..907c95e8b2 --- /dev/null +++ b/virttest/vt_agent/managers/image/image_handler.py @@ -0,0 +1,40 @@ +from abc import ABC, abstractmethod + + +class _ResourceBackingManager(ABC): + _ATTACHED_POOL_TYPE = None + + def __init__(self): + self._pool_connections = dict() + self._backings = dict() + + @abstractmethod + def create_pool_connection(self, pool_config, pool_access_config): + pass + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + @abstractmethod + def create_backing(self, config, need_allocate=False): + pass + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release_resource(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, config): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, config) + + def get_backing(self, backing_id): + return self._backings.get(backing_id) + + def info_backing(self, backing_id): + return self._backings[backing_id].to_specs() diff --git a/virttest/vt_agent/managers/image/qemu/__init__.py b/virttest/vt_agent/managers/image/qemu/__init__.py new file mode 100644 index 0000000000..8b5f895ef4 --- /dev/null +++ b/virttest/vt_agent/managers/image/qemu/__init__.py @@ -0,0 +1 @@ +from .dir import _dir_backing_mgr diff --git a/virttest/vt_agent/managers/image/qemu/qemu_image_handler.py b/virttest/vt_agent/managers/image/qemu/qemu_image_handler.py new file mode 100644 index 0000000000..b28789d00a --- /dev/null +++ b/virttest/vt_agent/managers/image/qemu/qemu_image_handler.py @@ -0,0 +1,16 @@ +class _QemuImageHandler(_ImageHandler): + + def convert(self, source_spec, target_spec): + pass + + def commit(self, top_spec, backing_spec): + pass + + def create(self, image_spec): + pass + + def destroy(self, image_spec): + pass + + def info(self, image_spec): + pass diff --git a/virttest/vt_agent/managers/resbackings/__init__.py b/virttest/vt_agent/managers/resbackings/__init__.py new file mode 100644 index 0000000000..39e82a8165 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/__init__.py @@ -0,0 +1 @@ +from .dispatcher import _resbacking_mgr_dispatcher diff --git a/virttest/vt_agent/managers/resbackings/backing.py b/virttest/vt_agent/managers/resbackings/backing.py new file mode 100644 index 0000000000..38adf44684 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/backing.py @@ -0,0 +1,38 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourceBacking(ABC): + _RESOURCE_TYPE = None + + def __init__(self, config): + self._uuid = uuid.uuid4() + self._source_pool = config["spec"]["pool_id"] + + @property + def uuid(self): + return self._uuid + + @abstractmethod + def allocate(self, pool_connection): + pass + + @abstractmethod + def release(self, pool_connection): + pass + + @abstractmethod + def update(self, pool_connection, new_spec): + pass + + @abstractmethod + def info(self, pool_connection): + pass + + @property + def resource_type(self): + return self._RESOURCE_TYPE + + @property + def source_pool(self): + return self._source_pool diff --git a/virttest/vt_agent/managers/resbackings/backing_mgr.py b/virttest/vt_agent/managers/resbackings/backing_mgr.py new file mode 100644 index 0000000000..907c95e8b2 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/backing_mgr.py @@ -0,0 +1,40 @@ +from abc import ABC, abstractmethod + + +class _ResourceBackingManager(ABC): + _ATTACHED_POOL_TYPE = None + + def __init__(self): + self._pool_connections = dict() + self._backings = dict() + + @abstractmethod + def create_pool_connection(self, pool_config, pool_access_config): + pass + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + @abstractmethod + def create_backing(self, config, need_allocate=False): + pass + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release_resource(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, config): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, config) + + def get_backing(self, backing_id): + return self._backings.get(backing_id) + + def info_backing(self, backing_id): + return self._backings[backing_id].to_specs() diff --git a/virttest/vt_agent/managers/resbackings/cvm/__init__.py b/virttest/vt_agent/managers/resbackings/cvm/__init__.py new file mode 100644 index 0000000000..d31c959cc0 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm/__init__.py @@ -0,0 +1,27 @@ +from .. import _CVMResBackingMgr +from .. import _all_subclasses + + +class LaunchSecurity(object): + _instance = None + + @classmethod + def dispatch(cls): + return cls._instance + + @classmethod + def startup(cls, config): + if cls._instance is not None: + return cls._instance + + for mgr_cls in _all_subclasses(_CVMResBackingMgr): + if mgr_cls.get_platform_flags() is not None: + cls._instance = mgr_cls(config) + cls._instance.startup() + return cls._instance + + raise + + @classmethod + def teardown(cls): + cls._instance.teardown() diff --git a/virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py b/virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py new file mode 100644 index 0000000000..fcb2982cd8 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py @@ -0,0 +1,98 @@ +from .. import _ResBacking +from .. import _ResBackingCaps +from .. import _CVMResBackingMgr + + +class _SEVCommResBacking(_ResBacking): + + def __init__(self, requests): + super().__init__(requests) + self._cbitpos = None + self._reduced_phys_bits = None + # self._sev_device = '/dev/sev' + # self._kernel_hashes = None + + def to_specs(self): + return {"cbitpos": self._cbitpos, "reduced-phys-bits": self._reduced_phys_bits} + + +class _SEVResBacking(_SEVCommResBacking): + RESOURCE_TYPE = "sev" + + def __init__(self): + super().__init__() + self._dh_cert = None + self._session = None + + def allocate(self, requests): + pass + + def free(self): + pass + + def to_specs(self): + pass + + +class _SNPResBacking(_SEVCommResBacking): + RESOURCE_TYPE = "snp" + + def __init__(self): + super().__init__() + + def allocate(self, requests): + pass + + def free(self): + pass + + def to_specs(self): + pass + + +class _SEVResBackingCaps(_ResBackingCaps): + + def __init__(self, params): + self._cbitpos = None + self._reduced_phys_bits = None + self._sev_device = None + self._max_sev_guests = None + self._max_snp_guests = None + self._pdh = None + self._cert_chain = None + self._cpu0_id = None + + def load(self): + pass + + def is_capable(self, requests): + pass + + def increase(self, backing): + pass + + def decrease(self, backing): + pass + + @property + def max_sev_guests(self): + return self._max_sev_guests + + @property + def max_snp_guests(self): + return self._max_snp_guests + + +class _SEVResBackingMgr(_CVMResBackingMgr): + + def __init__(self, config): + super().__init__(config) + self._caps = _SEVResBackingCaps(config) + _SEVResBackingMgr._platform_flags = config + + def startup(self): + reset_sev_platform() + super().startup() + + def teardown(self): + reset_sev_platform() diff --git a/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py new file mode 100644 index 0000000000..ad162be551 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py @@ -0,0 +1,6 @@ +def reset_platform(): + pass + + +def rotate_pdh(): + pass diff --git a/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py new file mode 100644 index 0000000000..ad162be551 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py @@ -0,0 +1,6 @@ +def reset_platform(): + pass + + +def rotate_pdh(): + pass diff --git a/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/cvm_platform_mgr.py b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/cvm_platform_mgr.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/virttest/vt_agent/managers/resbackings/dispatcher.py b/virttest/vt_agent/managers/resbackings/dispatcher.py new file mode 100644 index 0000000000..df0c6b201f --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/dispatcher.py @@ -0,0 +1,49 @@ +from .storage import _dir_backing_mgr +from .storage import _nfs_backing_mgr + +# from .storage import _ceph_backing_mgr +# from .cvm import _sev_backing_mgr +# from .cvm import _tdx_backing_mgr + + +class _BackingMgrDispatcher(object): + + def __init__(self): + self._managers_mapping = dict() + self._backings_mapping = dict() + self._pools_mapping = dict() + + def dispatch_by_pool(self, pool_id): + return self._pools_mapping.get(pool_id, None) + + def dispatch_by_backing(self, backing_id): + return self._backings_mapping.get(backing_id, None) + + @classmethod + def register(cls, mgr): + self._managers_mapping[mgr.attached_pool_type] = mgr + + def map_pool(self, pool_id, pool_type): + backing_mgr = self._managers_mapping[pool_type] + self._pools_mapping[pool_id] = backing_mgr + + def unmap_pool(self, pool_id): + del self._pools_mapping[pool_id] + + def map_backing(self, backing_id, backing_mgr): + self._backings_mapping[backing_id] = backing_mgr + + def unmap_backing(self, backing_id): + del self._backings_mapping[backing_id] + + +_backing_mgr_dispatcher = _BackingMgrDispatcher() + +# Register storage backing managers +_backing_mgr_dispatcher.register(_dir_backing_mgr) +_backing_mgr_dispatcher.register(_nfs_backing_mgr) +# _backing_mgr_dispatcher.register(_ceph_backing_mgr) + +# Register cvm backing managers +# _backing_mgr_dispatcher.register(_sev_backing_mgr) +# _backing_mgr_dispatcher.register(_tdx_backing_mgr) diff --git a/virttest/vt_agent/managers/resbackings/pool_connection.py b/virttest/vt_agent/managers/resbackings/pool_connection.py new file mode 100644 index 0000000000..fbc14f5448 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/pool_connection.py @@ -0,0 +1,27 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourcePoolAccess(ABC): + + @abstractmethod + def __init__(self, pool_access_config): + pass + + +class _ResourcePoolConnection(ABC): + + def __init__(self, pool_config, pool_access_config): + self._connected_pool = pool_config["pool_id"] + + @abstractmethod + def startup(self): + pass + + @abstractmethod + def shutdown(self, backing): + pass + + @abstractmethod + def connected(self): + return False diff --git a/virttest/vt_agent/managers/resbackings/storage/__init__.py b/virttest/vt_agent/managers/resbackings/storage/__init__.py new file mode 100644 index 0000000000..2b7d4383f5 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/__init__.py @@ -0,0 +1,2 @@ +from .dir import _dir_backing_mgr +from .nfs import _nfs_backing_mgr diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/__init__.py b/virttest/vt_agent/managers/resbackings/storage/dir/__init__.py new file mode 100644 index 0000000000..995ae5f7e7 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/__init__.py @@ -0,0 +1 @@ +from .dir_backing_mgr import _dir_backing_mgr diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py new file mode 100644 index 0000000000..c4e72711ed --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py @@ -0,0 +1,33 @@ +import os +from virttest import utils_io +from ...backing import _ResourceBacking + + +class _DirVolumeBacking(_ResourceBacking): + + def __init__(self, config): + super().__init__(config) + self._size = config["size"] + self._name = config["name"] + + @property + def allocate(self, pool_connection): + path = os.path.join(pool_connection.dir, self._name) + utils_io.dd(path, self._size) + + def release(self, pool_connection): + path = os.path.join(pool_connection.dir, self._name) + os.unlink(path) + + def info(self, pool_connection): + path = os.path.join(pool_connection.dir, self._name) + s = os.stat(path) + return {"path": path, "allocation": s.st_size} + + +def _get_backing_class(resource_type): + """ + Get the backing class for a given resource type in case there are + more than one resources are supported by a nfs pool + """ + return _DirVolumeBacking diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py new file mode 100644 index 0000000000..e069e605e6 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py @@ -0,0 +1,46 @@ +from ...backing_mgr import _ResourceBackingManager +from .dir_backing import _get_backing_class +from .dir_pool_connection import _DirPoolConnection + + +class _DirBackingManager(_ResourceBackingManager): + _ATTACHED_POOL_TYPE = "nfs" + + def __init__(self): + super().__init__() + + def create_pool_connection(self, pool_config, pool_access_config): + pool_conn = _DirPoolConnection(pool_config, pool_access_config) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + def create_backing(self, config, need_allocate=False): + pool_id = config["pool_id"] + pool_conn = self._pool_connections[pool_id] + backing_class = _get_backing_class(config["resource_type"]) + backing = backing_class(config) + self._backings[backing.uuid] = backing + if need_allocate: + backing.allocate(pool_conn) + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, new_backing_spec): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, new_backing_spec) + + def info_backing(self, backing_id): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.info(pool_conn) diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py b/virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py new file mode 100644 index 0000000000..04b73f3be5 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py @@ -0,0 +1,25 @@ +import utils_disk + +from ...pool_connection import _ResourcePoolConnection + + +class _DirPoolConnection(_ResourcePoolConnection): + + def __init__(self, pool_config, pool_access_config): + super().__init__(pool_config, pool_access_config) + self._dir = pool_config.get("root_dir") + if self._mnt is None: + self._create_default_dir() + + def startup(self): + pass + + def shutdown(self): + pass + + def connected(self): + return os.path.exists(self.dir) + + @property + def dir(self): + return self._dir diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py b/virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py new file mode 100644 index 0000000000..0eb8062f6e --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py @@ -0,0 +1 @@ +from .nfs_backing_mgr import _nfs_backing_mgr diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py new file mode 100644 index 0000000000..e5f864b6b3 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py @@ -0,0 +1,33 @@ +import os +from virttest import utils_io +from ...backing import _ResourceBacking + + +class _NfsVolumeBacking(_ResourceBacking): + + def __init__(self, config): + super().__init__(config) + self._size = config["size"] + self._name = config["name"] + + @property + def allocate(self, pool_connection): + path = os.path.join(pool_connection.mnt, self._name) + utils_io.dd(path, self._size) + + def release(self, pool_connection): + path = os.path.join(pool_connection.mnt, self._name) + os.unlink(path) + + def info(self, pool_connection): + path = os.path.join(pool_connection.mnt, self._name) + s = os.stat(path) + return {"path": path, "allocation": s.st_size} + + +def _get_backing_class(resource_type): + """ + Get the backing class for a given resource type in case there are + more than one resources are supported by a nfs pool + """ + return _NfsVolumeBacking diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py new file mode 100644 index 0000000000..91abd364a9 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py @@ -0,0 +1,46 @@ +from ...backing_mgr import _ResourceBackingManager +from .nfs_backing import _get_backing_class +from .nfs_pool_connection import _NfsPoolConnection + + +class _NfsBackingManager(_ResourceBackingManager): + _ATTACHED_POOL_TYPE = "nfs" + + def __init__(self): + super().__init__() + + def create_pool_connection(self, pool_config, pool_access_config): + pool_conn = _NfsPoolConnection(pool_config, pool_access_config) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del self._pool_connections[pool_id] + + def create_backing(self, config, need_allocate=False): + pool_id = config["pool_id"] + pool_conn = self._pool_connections[pool_id] + backing_class = _get_backing_class(config["resource_type"]) + backing = backing_class(config) + self._backings[backing.uuid] = backing + if need_allocate: + backing.allocate(pool_conn) + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release(pool_conn) + del self._backings[backing_id] + + def update_backing(self, backing_id, new_backing_spec): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, new_backing_spec) + + def info_backing(self, backing_id): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.info(pool_conn) diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py new file mode 100644 index 0000000000..56258930ea --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py @@ -0,0 +1,49 @@ +import utils_disk + +from ...pool_connection import _ResourcePoolAccess +from ...pool_connection import _ResourcePoolConnection + + +class _NfsPoolAccess(_ResourcePoolAccess): + """ + Mount options + """ + + def __init__(self, pool_access_config): + self._options = pool_access_config["nfs_options"] + + def __str__(self): + return self._options + + +class _NfsPoolConnection(_ResourcePoolConnection): + + def __init__(self, pool_config, pool_access_config): + super().__init__(pool_config, pool_access_config) + self._connected_pool = pool_config["pool_id"] + self._nfs_server = pool_config["nfs_server"] + self._export_dir = pool_config["export_dir"] + self._nfs_access = _NfsPoolAccess(pool_access_config) + self._mnt = pool_config.get(nfs_mnt_dir) + if self._mnt is None: + self._create_default_mnt() + + def startup(self): + src = "{host}:{export}".format(self._nfs_server, self._export_dir) + dst = self._mnt + options = str(self._nfs_access) + utils_disk.mount(src, dst, fstype="nfs", options=options) + + def shutdown(self): + src = "{host}:{export}".format(self._nfs_server, self._export_dir) + dst = self._mnt + utils_disk.umount(src, dst, fstype="nfs") + + def connected(self): + src = "{host}:{export}".format(self._nfs_server, self._export_dir) + dst = self._mnt + return utils_disk.is_mount(src, dst, fstype="nfs") + + @property + def mnt(self): + return self._mnt diff --git a/virttest/vt_agent/services/__init__.py b/virttest/vt_agent/services/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/virttest/vt_agent/services/virt/image_api.py b/virttest/vt_agent/services/virt/image_api.py new file mode 100644 index 0000000000..6848e76c41 --- /dev/null +++ b/virttest/vt_agent/services/virt/image_api.py @@ -0,0 +1,13 @@ +from ...managers.image import _backing_mgr_dispatcher + + +def handle_image(config): + """ + :param backing_id: The resource backing id + :type backing_id: string + :param config: The specified action and the snippet of + the resource's spec and meta info used for update + :type config: dict + """ + image_handler = _image_handler_dispatcher.dispatch(config) + image_handler.do(config) diff --git a/virttest/vt_agent/services/virt/resbacking_api.py b/virttest/vt_agent/services/virt/resbacking_api.py new file mode 100644 index 0000000000..2ea2b39bd9 --- /dev/null +++ b/virttest/vt_agent/services/virt/resbacking_api.py @@ -0,0 +1,99 @@ +from ...managers.resbackings import _backing_mgr_dispatcher + + +def create_pool_connection(pool_config, pool_access): + pool_id = pool_config["pool_id"] + pool_type = pool_config["pool_type"] + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + if backing_mgr is None: + _backing_mgr_dispatcher.map_pool(pool_id, pool_type) + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + backing_mgr.create_pool_connection(pool_config, pool_access) + + +def destroy_pool_connection(pool_id): + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + backing_mgr.destroy_pool_connection(pool_id) + _backing_mgr_dispatcher.unmap_pool(pool_id) + + +""" +def allocate(backing_id): + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + backing_mgr.allocate(backing_id) + + +def release(backing_id): + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + backing_mgr.release(backing_id) +""" + + +# def create_backing(config): +def create_backing(config, need_allocate=False): + """ + Create a resource backing on the worker node, which is bound to one and + only one resource, VT can access the specific resource allocation with + the backing when starting VM on the worker node + + :param config: The config including the resource's meta and spec data + :type config: dict + :return: The resource id + :rtype: string + """ + pool_id = config["spec"]["pool"] + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + # backing_id = backing_mgr.create_backing(config) + backing_id = backing_mgr.create_backing(config, need_allocate) + _backing_mgr_dispatcher.map_backing(backing_id, backing_mgr) + return backing_id + + +# def destroy_backing(backing_id): +def destroy_backing(backing_id, need_release=False): + """ + Destroy the backing, all resources allocated on worker nodes will be + released. + + :param backing_id: The cluster resource id + :type backing_id: string + """ + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + # backing_mgr.destroy_backing(backing_id) + backing_mgr.destroy_backing(backing_id, need_release) + _backing_mgr_dispatcher.unmap_backing(backing_id) + + +def info_backing(backing_id): + """ + Get the information of a resource with a specified backing + + We need not get all the information of the resource, because the + static can be got by the resource object, e.g. size, here we only + get the information which is dynamic, such as path and allocation + + :param resource_id: The backing id + :type resource_id: string + :return: The information of a resource, e.g. + { + 'spec':{ + 'allocation': 12, + 'path': [{'node1': '/p1/f1'},{'node2': '/p2/f1'}], + } + } + :rtype: dict + """ + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + return backing_mgr.info_backing(backing_id) + + +def update_backing(backing_id, config): + """ + :param backing_id: The resource backing id + :type backing_id: string + :param config: The specified action and the snippet of + the resource's spec and meta info used for update + :type config: dict + """ + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + backing_mgr.update_backing(backing_id, config) diff --git a/virttest/vt_imgr/.api.py b/virttest/vt_imgr/.api.py new file mode 100644 index 0000000000..948768e7ae --- /dev/null +++ b/virttest/vt_imgr/.api.py @@ -0,0 +1,106 @@ +from .vt_resmgr import vt_resmgr + + +class ImageNotFound(Exception): + def __init__(self, image_id): + self._id = image_id + + def __str__(self): + return 'Cannot find the pool(id="%s)"' % self._id + + +class UnknownImageType(Exception): + def __init__(self, image_type): + self._type = image_type + + def __str__(self): + return 'Unknown image type "%s"' % self._type + + +def create_image(config): + """ + Create a logical image without any specific storage allocation, + + :param config: The image's meta and spec data + :type config: dict + :return: The image id + :rtype: string + """ + pass + + +def destroy_image(image_id): + """ + Destroy the logical image, the specific storage allocation + will be released, note the image's backing image will not be + touched + + :param image_id: The resource id + :type image_id: string + """ + pass + + +def get_image(image_id): + """ + Get all information for a specified image + + :param image_id: The image id + :type image_id: string + :return: All the information of an image, e.g. + { + 'meta': { + 'id': 'image1', + 'backing': 'image2' + }, + 'spec': { + 'name': 'stg', + 'format': 'qcow2', + 'backing': { + The backing's information here + }, + 'volume': { + 'meta': { + 'id': 'nfs_vol1' + }, + 'spec': { + 'pool': 'nfs_pool1', + 'type': 'volume', + 'size': 65536, + 'name': 'stg.qcow2', + 'path': [{'node1': '/mnt1/stg.qcow2'}, + {'node2': '/mnt2/stg.qcow2'}], + } + } + } + } + :rtype: dict + """ + pass + + +def update_image(image_id, config): + """ + Update an image, the command format: + {'action': arguments}, in which + the 'action' can be the following for a qemu image: + 'create': qemu-img create + 'destroy': Remove the allocated resource + 'convert': qemu-img convert + 'snapshot': qemu-img snapshot + 'resize': qemu-img resize + arguments is a dict object which contains all related settings for a + specific action + + Examples: + qemu-img create + {'create': } + qemu-img convert + {'convert': } + + :param image_id: The image id + :type image_id: string + :param config: The specified action and its arguments + :type config: dict + """ + pass diff --git a/virttest/vt_imgr/__init__.py b/virttest/vt_imgr/__init__.py new file mode 100644 index 0000000000..73fcb2e24a --- /dev/null +++ b/virttest/vt_imgr/__init__.py @@ -0,0 +1 @@ +from vt_imgr import vt_imgr diff --git a/virttest/vt_imgr/images/__init__.py b/virttest/vt_imgr/images/__init__.py new file mode 100644 index 0000000000..eee9605c4e --- /dev/null +++ b/virttest/vt_imgr/images/__init__.py @@ -0,0 +1,15 @@ +from .qemu import _QemuImage + + +_image_classes = dict() +_image_classes[_QemuImage.image_type] = _QemuImage + + +def get_image_class(image_type): + for t, cls in _image_classes.items(): + if t == image_type: + return cls + return None + + +__all__ = ["get_image_class"] diff --git a/virttest/vt_imgr/images/image.py b/virttest/vt_imgr/images/image.py new file mode 100644 index 0000000000..54c37a5d9f --- /dev/null +++ b/virttest/vt_imgr/images/image.py @@ -0,0 +1,69 @@ +import uuid +from abc import ABC, abstractmethod + + +class _Image(ABC): + """ + This is the upper-level image, in the context of a VM, it's mapping + to the VM's disk. It can have one or more lower-level images, + e.g. A qemu image can have a lower-level image chain: + base ---> sn + in which "sn" is the top lower-level image name while "base" is the + backing lower-level image name of "sn" + """ + + _IMAGE_TYPE = None + + # The upper-level image configuration template + _CONF_TEMP = { + "meta": { + "uuid": None, + # The image tag, usually defined in "images" param + "name": None, + "type": _IMAGE_TYPE, + }, + "spec": {}, + } + + def __init__(self, image_tag, params): + self._id = uuid.uuid4() + self._image_name = image_tag + self._params = params + self._virt_images = None + + @classmethod + def image_type(cls): + return cls._IMAGE_TYPE + + @property + def image_id(self): + return self._id + + @abstractmethod + @property + def image_spec(self): + raise NotImplemented + + @abstractmethod + def define_conf(self): + raise NotImplemented + + @abstractmethod + def create_virt_images(self): + raise NotImplemented + + @abstractmethod + def destroy_virt_images(self): + raise NotImplemented + + @abstractmethod + def update_virt_images(self): + raise NotImplemented + + @abstractmethod + def info_virt_images(self): + raise NotImplemented + + @abstractmethod + def backup_virt_images(self): + raise NotImplemented diff --git a/virttest/vt_imgr/images/qemu/__init__.py b/virttest/vt_imgr/images/qemu/__init__.py new file mode 100644 index 0000000000..a9808db67e --- /dev/null +++ b/virttest/vt_imgr/images/qemu/__init__.py @@ -0,0 +1 @@ +from .qemu_image import _QemuImage diff --git a/virttest/vt_imgr/images/qemu/qemu_image.py b/virttest/vt_imgr/images/qemu/qemu_image.py new file mode 100644 index 0000000000..12551de419 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_image.py @@ -0,0 +1,83 @@ +from collections import OrderedDict + +from ..image import _Image +from .qemu_virt_image import get_virt_image_class + + +class _QemuImage(_Image): + + # The upper-level image type + _IMAGE_TYPE = "qemu" + + # The qemu upper-level image configuration template + # The topology of the lower-level images, which construct + # the upper-level qemu image + _CONF_TEMP = _Image._CONF_TEMP.update({"spec": {"topology": None}}) + + def __init__(self, image_tag, params): + super().__init__(image_tag, params) + # Store images with the same order as tags defined in image_chain + self._virt_images = OrderedDict() + self.create_virt_images() + self._conf = self.define_conf() + + @property + def conf(self): + return self._conf + + @property + def image_spec(self): + pass + + @classmethod + def _define_virt_image_config(cls, image_tag, image_params): + config = dict(meta=dict(), spec=dict()) + image_format = image_params.get("image_format", "qcow2") + virt_image_class = get_virt_image_class(image_format) + config = virt_image_class.define_config(image_tag, image_params) + return dict(image_tag=config) + + @classmethod + def _define_image_chain_config(cls, image_chain_list, params): + image_tag = image_chain_list[0] + image_params = params.object_params(image_tag) + config = cls._define_virt_image_config(image_tag, image_params) + if len(image_chain_list) > 1: + config[image_tag]["spec"]["backing"] = cls._define_image_chain_config(image_chain_list[1:], params) + return config + + def define_config(self): + config = self._CONF_TEMP.copy() + config["meta"]["name"] = self._image_name + + top_image_params = params.object_params(image_tag) + top_image_chain = top_image_params.objects("image_chain") + if top_image_chain: + config["spec"]["topology"] = cls._define_image_chain_config(top_image_chain, params) + else: + config["spec"]["topology"] = cls._define_virt_image_config(image_tag, top_image_params) + + def create_virt_images(self): + top_image_params = self._params.object_params(self._image_name) + top_image_chain = top_image_params.objects("image_chain") + if not top_image_chain: + top_image_chain = [self._image_name] + for image_tag in top_image_chain: + image_params = self._params.object_params(image_tag) + image_format = image_params.get("image_format", "qcow2") + virt_image_class = get_virt_image_class(image_format) + virt_image = virt_image_class(image_tag, image_params) + #virt_image.create() + self._virt_images[image_tag] = virt_image + + def destroy_virt_images(self): + for virt_image in self._virt_images.values(): + if not virt_image.keep(): + virt_image.destroy() + del(self._virt_images[image_tag]) + + def update_virt_images(self): + pass + + def info_virt_images(self): + pass diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py new file mode 100644 index 0000000000..3d353afb0a --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/__init__.py @@ -0,0 +1,16 @@ +from .raw_qemu_virt_image import _RawQemuVirtImage +from .qcow2_qemu_virt_image import _Qcow2QemuVirtImage +from .luks_qemu_virt_image import _LuksQemuVirtImage + + +_image_classes= dict() +_image_classes[_RawQemuVirtImage.image_format] = _RawQemuVirtImage +_image_classes[_Qcow2QemuVirtImage.image_format] = _Qcow2QemuVirtImage +_image_classes[_LuksQemuVirtImage.image_format] = _LuksQemuVirtImage + + +def get_virt_image_class(image_format): + return _image_classes.get(image_format) + + +__all__ = ['get_virt_image_class'] diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py new file mode 100644 index 0000000000..c7ae10b51a --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/luks_qemu_virt_image.py @@ -0,0 +1,15 @@ +from .qemu_virt_image import _QemuVirtImage +from virttest.vt_resmgr import * + + +class _LuksQemuVirtImage(_QemuVirtImage): + _IMAGE_FORMAT = "luks" + + _CONFIG_TEMP = _QemuVirtImage._CONFIG_TEMP.update( + {"spec": {"format": "luks", + "preallocation": None, + "extent_size_hint": None, + "encryption": {"data": None, + "file": None}} + } + ) diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py new file mode 100644 index 0000000000..6a77450264 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/qcow2_qemu_virt_image.py @@ -0,0 +1,16 @@ +from .qemu_virt_image import _QemuVirtImage +from virttest.vt_resmgr import * + + +class _Qcow2QemuVirtImage(_QemuVirtImage): + _IMAGE_FORMAT = "qcow2" + _CONFIG_TEMP = _QemuVirtImage._CONFIG_TEMP.update( + {"spec": {"format": "qcow2", + "preallocation": None, + "extent_size_hint": None, + "cluster_size": None, + "lazy_refcounts": None, + "compat": None, + "compression_type": None} + } + ) diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py new file mode 100644 index 0000000000..4615bd69c5 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/qemu_virt_image.py @@ -0,0 +1,33 @@ +from ...virt_image import _VirtImage +from virttest.vt_resmgr import * + + +class _QemuVirtImage(_VirtImage): + """ + A virt image has one storage resource(volume), take qemu virt image + as an example, the cartesian params beginning with 'image_', e.g. + 'image_size' describe this object + """ + + @config.setter + def config(self, image_params): + resource_type = "volume" + self.spec["volume"] = define_resource_config(resource_type, image_params) + + @property + def keep(self): + return self._keep_alive + + def create_volume(self): + volume_config = self.spec["volume"] + volume_id = create_resource(volume_config) + self.spec["volume"]["meta"]["uuid"] = volume_id + + def destroy_volume(self): + pass + + def info(self, force_share=False, output="human"): + pass + + def update(self, config): + pass diff --git a/virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py b/virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py new file mode 100644 index 0000000000..66bef22284 --- /dev/null +++ b/virttest/vt_imgr/images/qemu/qemu_virt_image/raw_qemu_virt_image.py @@ -0,0 +1,13 @@ +from .qemu_virt_image import _QemuVirtImage +from virttest.vt_resmgr import * + + +class _RawQemuVirtImage(_QemuVirtImage): + _IMAGE_FORMAT = "raw" + + _CONFIG_TEMP = _QemuVirtImage._CONFIG_TEMP.update( + {"spec": {"format": _IMAGE_FORMAT, + "preallocation": None, + "extent_size_hint": None} + } + ) diff --git a/virttest/vt_imgr/images/virt_image.py b/virttest/vt_imgr/images/virt_image.py new file mode 100644 index 0000000000..756fa63c6f --- /dev/null +++ b/virttest/vt_imgr/images/virt_image.py @@ -0,0 +1,71 @@ +from abc import ABC, abstractmethod + + +class _VirtImage(ABC): + """ + The lower-level image, which has a storage resource(volume), is + defined by the cartesian params beginning with 'image_'. One or + more lower-level images can represent a upper-level image. + """ + + _IMAGE_FORMAT = None + + _CONFIG_TEMP = { + "meta": None, + "spec": {"name": None, + "format": _IMAGE_FORMAT, + "volume": None} + } + } + + def __init__(self, image_tag, image_params): + self._name = image_tag + self._params = image_params + self._config = None + self.config = image_params + + @classmethod + def format(cls): + return cls._IMAGE_FORMAT + + @property + def name(self): + return self._name + + @property + def config(self): + return self._config + + @abstractmethod + @config.setter + def config(self, image_params): + raise NotImplemented + + @property + def spec(self): + return self.config["spec"] + + @property + def meta(self): + return self.config["meta"] + + @abstractmethod + @property + def keep(self): + raise NotImplemented + + @abstractmethod + def create(self): + raise NotImplemented + + @abstractmethod + def destroy(self): + raise NotImplemented + + @abstractmethod + def info(self): + raise NotImplemented + + @abstractmethod + def update(self, config): + raise NotImplemented diff --git a/virttest/vt_imgr/vt_imgr.py b/virttest/vt_imgr/vt_imgr.py new file mode 100644 index 0000000000..e16379fb00 --- /dev/null +++ b/virttest/vt_imgr/vt_imgr.py @@ -0,0 +1,184 @@ +""" +The upper-level image manager. + +# Create the upper-level image object +image_id = vt_imgr.create_image("image1", params) + +# qemu-img create +vt_imgr.update_image(image_id, {"qemu-img-create":{}}) + +# Query the summary spec info of the "image1" +image_spec = vt_imgr.query_image(image_id, query="config", verbose=False) + {"meta": {"uuid": "uuid-sn" + "name": "sn", + "type": "qemu", + "topology": {"chain": ["base", "sn"]}}, + "spec": {"images": ["base": {"meta": {}, + "spec": {"format": "raw", + "volume": "volume-uuid1"} + }, + "sn": {"meta": {}, + "spec": {"format": "qcow2", + "volume": "volume-uuid2"} + } + ] + } + } + +""" + +from .images import get_image_class + + +# TODO: +# Add drivers for diff handlers +# Add access permission for images +# serialize +class _VTImageManager(object): + + def __init__(self): + self._images = dict() + + def create_image(self, image_tag, params): + """ + Create an upper-level image(e.g. in the context of a VM, it's + mapping to a VM's disk) object by its cartesian params without + any storage allocation. All its lower-level images and their + mapping storage resource objects will be created. + E.g. A upper-level qemu image has an lower-level image chain + base ---> sn + | | + resource resource + :param image_tag: The image tag defined in cartesian params, + e.g. for a qemu image, the tag should be + the one of the top image("sn" in the example + above) if an image chain is defined, often + it's the tag defined in the images param, + e.g. "image1" + :type image_tag: string + :param params: The params for all the lower-level images + Note it's *NOT* an image-specific params like + params.object_params("sn") + *BUT* the params for both "sn" and "base" + Examples: + 1. images_vm1 = "image1 sn"" + image_chain_sn = "base sn" + image_tag = "sn" + params = the_case_params.object_params('vm1') + 2. images = 'image1 stg' + image_tag = 'image1' + params = the_case_params + :type params: A Params object + :return: The image id + :rtype: string + """ + image_params = params.object_params(image_tag) + image_type = image_params.get('image_type', 'qemu') + image_class = get_image_class(image_type) + image = image_class(image_tag, params) + #image.create_virt_images() + self._images[image.image_id] = image + return image.image_id + + def destroy_image(self, image_id): + """ + Destroy a specified image. All its storage allocation should + be released. Note if 'remove_image=no', then don't release the + storage allocation. + :param image_id: The image id + :type image_id: string + """ + image = self._images.get(image_id) + image.destory_virt_images() + if not image.virt_images: + del(self._images[image_id]) + + def update_image(self, image_id, config): + """ + Update a specified upper-level image + Config format: + {cmd: arguments} + Supported commands for a qemu image: + create: Create the specified lower-level images(qemu-img create) + destroy: Destroy the specified lower-level images + resize: Resize the specified qcow2 lower-level images(qemu-img resize) + map: Map the qcow2 virt image, e.g. qemu-img map + convert: Convert the specified virt image to another, e.g. qemu-img convert + commit: Commit the specified virt image, e.g. qemu-img commit + snapshot: Create a snapshot, e.g. qemu-img snapshot + rebase: Rebase the virt image, e.g. qemu-img rebase + add: Add a lower-level image + delete: Delete a lower-level image + Note: Not all images support the above operations + The arguments is a dict object which contains all related settings + for a specific command + :param image_id: The image id + :type image_id: string + """ + image = self._images.get(image_id) + image.update_virt_images(config) + + def backup_image(self, image_id): + image = self._images.get(image_id) + image.backup_virt_images() + + def query_image(self, image_id, request, verbose=False): + """ + Query the configuration of a specified upper-level image, the + general format of the image configuration: + {"meta": {"uuid": "zzz" + "name": "xxx", + "type": "yyy" + "topology": {}}, + "spec": {"images":[]} + } + E.g. A qemu image having an image chain: + {"meta": {"uuid": "uuid-sn" + "name": "sn", + "type": "qemu", + "topology": {"chain": ["base", "sn"]}}, + "spec": {"images": ["base": {"meta": {}, + "spec": {"format": "raw", + "volume": {"meta": {"uuid": "id1"}, + "spec": {"size": 5678}} + } + }, + "sn": {"meta": {}, + "spec": {"format": "qcow2", + "volume": {"meta": {"uuid": "id2"}, + "spec": {"size": 5678}} + } + } + ] + } + } + :param request: The query content, format: + config[.meta[.]] + config[.spec[.images.[.meta[.]]]] + config[.spec[.images.[.spec[.]]]] + Note the prefix "config.spec.images" can omitted when + querying a specific 's configuration: + [.meta[.]] + [.spec[.]] + Examples: + config + config.spec.images + config.spec.images.sn.spec.volume + sn.spec + sn.spec.volume.spec.size + :type request: string + :param verbose: False: Return a summary of the configuration + E.g. request = "sn.spec" + response = {"format": "qcow2", + "volume": "id1"} + True: Return the detailed configuration + :type verbose: boolean + :return: The upper-level image's configuration, it can be either + the whole one or a snippet + :rtype: dict + """ + image = self._images.get(image_id) + return image.info_virt_images() + + +vt_imgr = _VTImageManager() diff --git a/virttest/vt_resmgr/__init__.py b/virttest/vt_resmgr/__init__.py new file mode 100644 index 0000000000..0a0e47b0b0 --- /dev/null +++ b/virttest/vt_resmgr/__init__.py @@ -0,0 +1 @@ +from .api import * diff --git a/virttest/vt_resmgr/api.py b/virttest/vt_resmgr/api.py new file mode 100644 index 0000000000..d3cac988b7 --- /dev/null +++ b/virttest/vt_resmgr/api.py @@ -0,0 +1,256 @@ +""" +Th lower-level resource management APIs, open to the test cases. +A test case can call these APIs to handle the resource directly, e.g. +allocate a volume from a nfs pool, or call the upper-level APIs to +handle a volume indirectly, e.g. create a qcow2 qemu image, the image +manager call these APIs to allocate a volume. + +# Create a volume from a nfs pool +image_params = params.object_params("stg") +config = define_resource_config("volume", image_params) +res_id = create_resource(config) + +# Bind the nfs resource to worker nodes(resource allocated) +config = {'bind': {'nodes': ['node1', 'node2'], 'pool': 'nfspool1'}} +update_resource(res_id, config) + +# Unbind the nfs resource to node1 +config = {'unbind': {'nodes': ['node1']}} +update_resource(res_id, config) + +# Unbind the nfs resource(resource released) +config = {'unbind': {}} +update_resource(res_id, config) + +# Destroy the nfs resource +destroy_resource(res_id) +""" +from .vt_resmgr import vt_resmgr + + +class PoolNotFound(Exception): + def __init__(self, pool_id): + self._id = pool_id + + def __str__(self): + return 'Cannot find the pool(id="%s)"' % self._id + + +class UnknownPoolType(Exception): + def __init__(self, pool_type): + self._type = pool_type + + def __str__(self): + return 'Unknown pool type "%s"' % self._type + + +class ResourceNotFound(Exception): + pass + + +class ResourceBusy(Exception): + pass + + +class ResourceNotAvailable(Exception): + pass + + +class UnknownResourceType(Exception): + pass + + +def register_resouce_pool(pool_params): + """ + Register a resource pool, the pool should be ready for + use before registration + + :param pool_params: The pool's cartesian params, e.g. + :type pool_params: dict or Param + :return: The resource pool id + :rtype: string + """ + pool_id = vt_resmgr.register_pool(pool_params) + if pool_id is None: + raise UnknownPoolType(pool_params["pool_type"]) + return pool_id + + +def unregister_resouce_pool(pool_id): + """ + Unregister a resource pool + + :param pool_id: The id of the pool to unregister + :type pool_id: string + """ + vt_resmgr.unregister_pool(pool_id) + + +def attach_resource_pool(pool_id): + """ + Attach the registered pool to worker nodes, then the pool can be + accessed by the worker nodes + + :param pool_id: The id of the pool to attach + :type pool_id: string + """ + vt_resmgr.attach_pool(pool_id) + + +def detach_resource_pool(pool_id): + """ + Detach the pool from the worker nodes, after that, the pool cannot + be accessed + + :param pool_id: The id of the pool to detach + :type pool_id: string + """ + vt_resmgr.detach_pool(pool_id) + + +def define_resource_config(resource_type, resource_params): + """ + Define a resource's configuration by its cartesian params + + :param resource_type: The resource type, it's usually implied, e.g. + the image's storage resource is a "volume", + supported: "volume" + :type resource_type: string + :param resource_params: The resource's specific params, usually + defined by an upper-level object, e.g. + "image1" has a storage resource, so + resource_params = image1's params + i.e. use image1's params to define its + storage resource's configuration + :type resource_params: Param + :return: The resource's configuration, + format: {"meta":{...}, "spec":{...}} + The specific attributes depend on the specific resource + :rtype: dict + """ + pool_id = vt_resmgr.select_pool(resource_type, resource_params) + if pool_id is None: + raise ResourceNotAvailable() + pool = vt_resmgr.get_pool_by_id(pool_id) + return pool.define_resource_config(resource_type, resource_params) + + +def create_resource(resource_type, resource_params): + pool_id = vt_resmgr.select_pool(resource_type, resource_params) + pool = vt_resmgr.get_pool_by_id(pool_id) + config = pool.define_resource_config(resource_type, resource_params) + return pool.create_resource(config) + + +def create_resource(config): + """ + Create a logical resource without any specific resource allocation, + the following is required to create a new resource: + 'meta': + It depends on the specific resource + 'spec': + 'type': The resource type, e.g. 'volume' + 'pool': The id of the pool where the resource will be allocated + The other attributes of a specific resource, e.g. the 'size' of + a file-based volume + Example: + {'meta':{},'spec':{'size':123,'pool':'nfs_pool1','name':'stg'}} + + :param config: The config includes the resource's meta and spec data, + this is generated by define_resource function + :type config: dict + :return: The resource id + :rtype: string + """ + pool_id = config["spec"]["pool"] + pool = vt_resmgr.get_pool_by_id(pool_id) + if pool is None: + raise PoolNotFound(pool_id) + return pool.create_resource(config) + + +def destroy_resource(resource_id): + """ + Destroy the logical resource, the specific resource allocation + will be released + + :param resource_id: The resource id + :type resource_id: string + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + pool.destroy_resource(resource_id) + + +def query_resource(resource_id, request): + """ + Query the configuration of a specified resource, the general format + of the resource configuration: + {"meta": {"uuid": "xxx" + "type": "yyy", + "bindings": []} + "spec": {"pool": "xxx", ...}} + E.g. A storage volume resource + {'meta': { + 'uuid': 'res_id1', + 'type': 'volume', + 'bindings': [{'node': 'node1', + 'backing': 'ref1'}, + ] + }, + 'spec': { + 'pool': 'nfs_pool1', + 'size': 65536, + 'path': '/mnt/sn.qcow2', + } + } + :param resource_id: The resource id + :type resource_id: string + :param request: The query content, format: + config[.meta[.]] + config[.spec[.]] + Examples: + config + config.meta + config.spec.pool + :type request: string + :return: The resource's configuration, it can be either the whole + one or a snippet + :rtype: dict + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + return pool.info_resource(resource_id) + + +def update_resource(resource_id, config): + """ + Update a resource, the config format: + {'action': arguments} + Supported actions: + 'bind': Bind a specified resource to one or more worker nodes in order + to access the specific resource allocation, note the allocation + is done within the bind command + 'unbind': Unbind a specified resource from one or more worker nodes, + the specific resource allocation will be released only when + all bindings are gone + 'resize': Resize a resource, it's only available for the storage volume + resource currently + The arguments is a dict object which contains all related settings for a + specific action + + Examples: + Bind a resource to one or more nodes + {'bind': {'nodes': ['node1'], 'pool': 'nfspool1'}} + {'bind': {'nodes': ['node1', 'node2'], 'pool': 'nfspool1'}} + Unbind a resource from one or more nodes + {'unbind': {'nodes': ['node1']}} + {'unbind': {'nodes': ['node1', 'node2']}} + Resize a specified storage volume resource + {'resize': {'spec': {'size': 123456}}} + + :param resource_id: The resource id + :type resource_id: string + :param config: The specified action and its arguments + :type config: dict + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + pool.update_resource(resource_id, config) diff --git a/virttest/vt_resmgr/resources/__init__.py b/virttest/vt_resmgr/resources/__init__.py new file mode 100644 index 0000000000..54b18b8118 --- /dev/null +++ b/virttest/vt_resmgr/resources/__init__.py @@ -0,0 +1,14 @@ +from .storage import _CephPool +from .storage import _DirPool + + +_pool_classes = dict() +_pool_classes[_CephPool.pool_type] = _CephPool +_pool_classes[_DirPool.pool_type] = _DirPool + + +def get_resource_pool_class(pool_type): + return _pool_classes.get(pool_type) + + +__all__ = ["get_resource_pool_class"] diff --git a/virttest/vt_resmgr/resources/cvm/__init__.py b/virttest/vt_resmgr/resources/cvm/__init__.py new file mode 100644 index 0000000000..389da0b231 --- /dev/null +++ b/virttest/vt_resmgr/resources/cvm/__init__.py @@ -0,0 +1 @@ +from .api import _cvm_resmgr diff --git a/virttest/vt_resmgr/resources/cvm/api.py b/virttest/vt_resmgr/resources/cvm/api.py new file mode 100644 index 0000000000..df858f695b --- /dev/null +++ b/virttest/vt_resmgr/resources/cvm/api.py @@ -0,0 +1,73 @@ +import logging + +from ...resmgr import Resource, ResMgr + + +LOG = logging.getLogger("avocado." + __name__) + + +class CVMResMgrError(Exception): + pass + + +class SEVResource(Resource): + TYPE = "sev" + + def _to_attributes(self, resource_params): + pass + + @property + def requests(self): + return {"type": self.TYPE} + + +class SNPResource(Resource): + TYPE = "snp" + + def _to_attributes(self, resource_params): + pass + + @property + def requests(self): + return {"type": self.TYPE} + + +class TDXResource(Resource): + TYPE = "tdx" + + def _to_attributes(self, resource_params): + pass + + @property + def requests(self): + return {"type": self.TYPE} + + +class CVMResMgr(ResMgr): + + def _initialize(self, config): + pass + + def check_resource_managed(self, spec): + pass + + def _get_resource_type(self, spec): + return spec["type"] + + def is_cvm_supported(node_uuid): + """ + Check if the platform supports CVM + """ + node = get_node(node_uuid) + return node.proxy.is_cvm_supported() + + def enabled(self, resource_type, node_uuid): + """ + Check if the platform supports a specific CVM type + e.g. a AMD SEV/SNP machine cannot allocate a TDX resource + """ + node = get_node(node_uuid) + return node.proxy.enabled(resource_type) + + +_cvm_resmgr = CVMResMgr() diff --git a/virttest/vt_resmgr/resources/cvm/conductor.py.bak b/virttest/vt_resmgr/resources/cvm/conductor.py.bak new file mode 100644 index 0000000000..6c5a3ddd4c --- /dev/null +++ b/virttest/vt_resmgr/resources/cvm/conductor.py.bak @@ -0,0 +1,58 @@ +class Conductor(object): + CHANNEL_TYPE = None + + def __init__(self, node_id): + self._channel = None + self._node_id = node_id + + def _worker(self, node_id): + return get_node(node_id) + + def create(self): + pass + + def destroy(self): + pass + + @property + def channel(self): + return self._channel + + +class RPCConductor(Conductor): + CHANNEL_TYPE = 'rpc' + + def __init__(self, node_id): + super().__init__(node_id) + + def create(self): + node = self._worker(self._node_id) + self._channel = node.proxy.virt + + def destroy(self): + self._channel = None + + +class SSHConductor(Conductor): + CHANNEL_TYPE = 'ssh' + + def __init__(self, node_id): + super().__init__(node_id) + + def create(self): + node = self._worker(self._node_id) + self._channel = node.connect(node.connection_auth) + + def destroy(self): + self._channel.close() + self._channel = None + + +class Channel(object): + @staticmethod + def channel(node_id, channel_type): + for cls in Conductor.__subclasses__: + if cls.CHANNEL_TYPE = channel_type: + return cls(node_id) + break + return None diff --git a/virttest/vt_resmgr/resources/pool.py b/virttest/vt_resmgr/resources/pool.py new file mode 100644 index 0000000000..d854c44f00 --- /dev/null +++ b/virttest/vt_resmgr/resources/pool.py @@ -0,0 +1,95 @@ +import uuid +from abc import ABC, abstractmethod + +from .resource import _Resource + + +class _ResourcePool(ABC): + """ + A resource pool is used to manage resources. A resource must be + allocated from a specific pool, and a pool can hold many resources + """ + + _POOL_TYPE = None + + def __init__(self, pool_params): + self._id = uuid.uuid4() + self._resources = dict() # {resource id: resource object} + self._managed_resource_types = list() + self._accesses = dict() # {node id: pool access object} + self._initialize(pool_params) + + def _initialize(self, pool_params): + self._name = pool_params.get("pool_name") + + def check_resource_managed(self, spec): + """ + Check if this is the manager which is managing the specified resource + """ + res_type = self._get_resource_type(spec) + return True if res_type in self._managed_resource_types else False + + def _get_resource_type(spec): + raise NotImplementedError + + @abstractmethod + def meet_resource_request(self, resource_type, resource_params): + """ + Check if the pool can support a resource's allocation + """ + raise NotImplementedError + + @abstractmethod + def define_resource_conf(self, resource_type, resource_params): + """ + Define the pool's configuration, including both "meat" and "spec" + """ + raise NotImplementedError + + @abstractmethod + def create_resource(self, config): + """ + Create a resource, no real resource allocated + """ + raise NotImplementedError + + def destroy_resource(self, resource_id): + """ + Destroy the resource, all its backings should be released + """ + res = self._resources[resource_id] + res.destroy_bindings() + del self._resources[resource_id] + + @abstractmethod + def update_resource(self, resource_id, config): + raise NotImplementedError + + def info_resource(self, resource_id): + """ + Get the reference of a specified resource + """ + res = self._resources.get(resource_id) + return res.resource_info + + @property + def attaching_nodes(self): + return self._accesses.keys() + + @property + def pool_capability(self): + node_id = self.attaching_nodes.keys()[0] + node = get_node(node_id) + return node.proxy.get_pool_capability() + + @property + def pool_name(self): + return self._name + + @classmethod + def pool_type(cls): + return cls._POOL_TYPE + + @property + def pool_config(self): + pass diff --git a/virttest/vt_resmgr/resources/resource.py b/virttest/vt_resmgr/resources/resource.py new file mode 100644 index 0000000000..aba31a6a9c --- /dev/null +++ b/virttest/vt_resmgr/resources/resource.py @@ -0,0 +1,144 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourceBinding(object): + """ + A binding binds a resource to an allocated resource backing + at a worker node. A resource can have many bindings, but one + binding can only bind one backing at one worker node. + """ + + def __init__(self, pool_id, node_id): + self._pool_id = pool_id + self._node_id = node_id + self._backing_id = None + + def create_backing(self, resource_config, need_allocate=False): + """ + Create a resource backing object via RPC + """ + node = get_node(self._node_id) + self._backing_id = node.proxy.create_backing(resource_config, need_allocate) + + def destroy_backing(self, need_release=False): + """ + Destroy the resource backing object via RPC + """ + node = get_node(self._node_id) + node.proxy.destroy_backing(self._backing_id, need_release) + + def update_backing(self, spec): + node = get_node(self._node_id) + node.proxy.update_backing(self._backing_id, spec) + + @property + def reference(self): + return {"node": self.node_id, "id": self.backing_id} + + @property + def node_id(self): + """ + Get the node id of the resource backing + """ + return self._node_id + + @property + def backing_id(self): + """ + Get the resource backing id + """ + return self._backing_id + + +class _Resource(ABC): + """ + A resource defines what users request, it's independent of a VM, + users can request a kind of resources for any purpose, it can bind + several allocated resource backings at different worker nodes. + + The common attributes of a resource: + meta: + resource id + access: + nodes: + permission: + references: + node id + backing id + spec: + resource pood id + specific attributes + """ + + _RESOURCE_TYPE = None + + def __init__(self, resource_config): + self._id = uuid.uuid4() + self._name = None + self._bindings = dict() + self._initialize(resource_config) + + def _initialize(self, resource_config): + self._pool_id = resource_config.get("pool") + + @property + def resource_type(cls): + raise cls._RESOURCE_TYPE + + @property + def resource_id(self): + return self._id + + @property + def resource_pool(self): + return self._pool_id + + @property + @abstractmethod + def resource_info(self): + """ + Static resource configurations as well as the dynamic ones, + the former comes from users' settings while the latter comes + from the resource allocation accessed from the worker nodes + """ + raise NotImplemented + + @abstractmethod + def info_bindings(self): + raise NotImplemented + + @abstractmethod + def create_bindings(self, nodes): + """ + Create the bindings on the specified worker nodes + """ + raise NotImplemented + + @abstractmethod + def destroy_bindings(self, nodes): + """ + Destroy the bindings on the specified worker nodes + """ + raise NotImplemented + + @abstractmethod + def update_bindings(self, config): + raise NotImplementedError + + @abstractmethod + def _update_meta(self, new_meta): + raise NotImplementedError + + @abstractmethod + def _update_spec(self, new_spec): + raise NotImplementedError + + def update_config(self, new_config): + meta = new_config.get("meta") + if meta is not None: + self._update_meta(meta) + + spec = new_config.get("spec") + if spec is not None: + self._update_spec(spec) diff --git a/virttest/vt_resmgr/resources/resource_handlers.py b/virttest/vt_resmgr/resources/resource_handlers.py new file mode 100644 index 0000000000..f718fc55df --- /dev/null +++ b/virttest/vt_resmgr/resources/resource_handlers.py @@ -0,0 +1,33 @@ +from abc import ABC, abstractmethod + + +class _UpdateCommand(ABC): + _UPDATE_ACTION = None + + @abstractmethod + @staticmethod + def execute(resource, arguments): + raise NotImplemented + + @classmethod + def action(cls): + return cls._UPDATE_ACTION + + +class _BindCommand(_UpdateCommand): + _UPDATE_ACTION = "bind" + + @staticmethod + def execute(resource, arguments): + pool = arguments["pool"] + nodes = arguments["nodes"] + resource.create_bindings(pool, nodes) + + +class _UnbindCommand(_UpdateCommand): + _UPDATE_ACTION = "unbind" + + @staticmethod + def execute(resource, arguments): + nodes = arguments.get("nodes") + resource.destroy_bindings(nodes) diff --git a/virttest/vt_resmgr/resources/storage/__init__.py b/virttest/vt_resmgr/resources/storage/__init__.py new file mode 100644 index 0000000000..116d616d9b --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/__init__.py @@ -0,0 +1,14 @@ +from .dir import _DirPool +from .ceph import _CephPool +#from .nfs import _NfsPool +#from .nbd import _NbdPool +#from .iscsi_direct import _IscsiDirectPool + + +__all__ = ( + _CephPool, + _DirPool, +# _NfsPool, +# _NbdPool, +# _IscsiDirectPool, +) diff --git a/virttest/vt_resmgr/resources/storage/ceph/__init__.py b/virttest/vt_resmgr/resources/storage/ceph/__init__.py new file mode 100644 index 0000000000..8ec3b25a7a --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/ceph/__init__.py @@ -0,0 +1 @@ +from .ceph_pool import _CephPool diff --git a/virttest/vt_resmgr/resources/storage/dir/__init__.py b/virttest/vt_resmgr/resources/storage/dir/__init__.py new file mode 100644 index 0000000000..c09faaf942 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/dir/__init__.py @@ -0,0 +1 @@ +from .dir_pool import _DirPool diff --git a/virttest/vt_resmgr/resources/storage/dir/dir_pool.py b/virttest/vt_resmgr/resources/storage/dir/dir_pool.py new file mode 100644 index 0000000000..a08f994709 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/dir/dir_pool.py @@ -0,0 +1,25 @@ +import logging + +from ...pool import _ResourcePool +from .dir_resource import _get_resource_class + + +LOG = logging.getLogger("avocado." + __name__) + + +class _DirPool(_ResourcePool): + _POOL_TYPE = "nfs" + + def _initialize(self, pool_config): + super().__init__(pool_config) + self._root_dir = pool_config["root_dir"] + + def create_resource(self, resource_config): + spec = resource_config["spec"] + cls = _get_resource_class(spec["type"]) + res = cls(resource_config) + self._resources[res.resource_id] = res + return res.resource_id + + @classmethod + def meet_resource_request(cls, resource_params): diff --git a/virttest/vt_resmgr/resources/storage/dir/dir_resource.py b/virttest/vt_resmgr/resources/storage/dir/dir_resource.py new file mode 100644 index 0000000000..d97cb4fb34 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/dir/dir_resource.py @@ -0,0 +1,66 @@ +import logging + +from ..volume import _FileVolume + + +LOG = logging.getLogger("avocado." + __name__) + + +class _DirFileVolume(_FileVolume): + """ + The nfs file-based volume + + Resource attributes: + meta: + resource id + references: + node id + reference id + spec: + size + name + path + """ + + def _initialize(self, resource_config): + super()._initialize(resource_config) + meta = resource_config["meta"] + spec = resource_config["spec"] + self._name = spec["name"] + self._capacity = spec["size"] + self._allocation = 0 + + def create_bindings(self, pool_id, nodes): + """ + A local dir resource has only one binding, + it is allocated when creating the binding + """ + if len(nodes) != 1: + LOG.warning("A dir resource should have one binding only") + + binding = _ResourceBinding(pool_id, nodes[0]) + binding.create_backing(self.resource_info, True) + self._bindings[node_id] = binding + + def destroy_bindings(self, nodes=None): + """ + Always release the resource when destroying its binding + """ + node_id = list(self._bindings.keys())[0] + self._bindings[node_id].destroy_backing(True) + del self._bindings[node_id] + + def _update_binding(self, binding, config): + pass + + def update_bindings(self, config): + for node_id, binding in self._bindings.items(): + self._update_binding(node_id, config) + + @property + def resource_info(self): + pass + + +def _get_resource_class(resource_type): + return _DirFileVolume diff --git a/virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py b/virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py new file mode 100644 index 0000000000..4631b968fa --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/iscsi_direct/__init__.py @@ -0,0 +1 @@ +from .iscsi_direct_pool import _IscsiDirectPool diff --git a/virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py b/virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py new file mode 100644 index 0000000000..33738d1af9 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/iscsi_direct/iscsi_direct_pool.py @@ -0,0 +1,20 @@ +import logging + +from ...resource import _Resource +from ...pool import _ResourcePool + + +LOG = logging.getLogger("avocado." + __name__) + + +class _IscsiDirectResource(_Resource): + """ + The iscsi-direct pool resource + """ + + def _initialize(self, config): + self._lun = config["lun"] + + +class _IscsiDirectPool(_ResourcePool): + POOL_TYPE = "iscsi-direct" diff --git a/virttest/vt_resmgr/resources/storage/nbd/__init__.py b/virttest/vt_resmgr/resources/storage/nbd/__init__.py new file mode 100644 index 0000000000..8a29e248f3 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nbd/__init__.py @@ -0,0 +1 @@ +from .nbd_pool import _NbdPool diff --git a/virttest/vt_resmgr/resources/storage/nfs/__init__.py b/virttest/vt_resmgr/resources/storage/nfs/__init__.py new file mode 100644 index 0000000000..a0e90ec573 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/__init__.py @@ -0,0 +1 @@ +from .nfs_pool import _NfsPool diff --git a/virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py b/virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py new file mode 100644 index 0000000000..1bde48a481 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/nfs_pool.py @@ -0,0 +1,53 @@ +import logging + +from ...pool import _ResourcePool +from .nfs_resource import get_resource_class + + +LOG = logging.getLogger("avocado." + __name__) + + +class _NfsPool(_ResourcePool): + _POOL_TYPE = "nfs" + + def _initialize(self, pool_config): + super().__init__(pool_config) + self._nfs_server = pool_config["nfs_server_ip"] + self._export_dir = pool_config["nfs_mount_src"] + + @classmethod + def define_resource_config(cls, resource_name, resource_type, resource_params): + cls = get_resource_class(resource_type) + config = cls.define_config(resource_name, resource_params) + config["spec"]["pool"] = self.pool_id + return config + + def meet_resource_request(self, resource_type, resource_params): + # Check if the pool is the specified one + pool_name = resource_params.get("image_pool_name") + if pool_name == self.pool_name: + return True + + # Check if the pool can supply a resource with a specified type + cls = get_resource_class(resource_type) + if cls is None: + return False + + # Check if the is the pool with the specified type + storage_type = resource_params.get("storage_type") + if storage_type: + if storage_type != self.pool_type: + return False + + # TODO: Check if the pool has capacity to allocate the resource + return True + + def create_resource(self, resource_config): + meta = resource_config["meta"] + cls = get_resource_class(meta["type"]) + res = cls(resource_config) + self._resources[res.resource_id] = res + return res.resource_id + + def update_resource(self, resource_id, config): + res = self._resources[resource_id] diff --git a/virttest/vt_resmgr/resources/storage/nfs/nfs_resource.py b/virttest/vt_resmgr/resources/storage/nfs/nfs_resource.py new file mode 100644 index 0000000000..12447ca9d3 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/nfs_resource.py @@ -0,0 +1,94 @@ +import logging + +from ..volume import _FileVolume + + +LOG = logging.getLogger("avocado." + __name__) + + +class _NfsFileVolume(_FileVolume): + """ + The nfs file-based volume + + Resource attributes: + meta: + resource id + references: + node id + reference id + spec: + size + name + path + """ + _RESOURCE_CONFIG_TEMP = {} + + def _initialize(self, resource_config): + super()._initialize(resource_config) + meta = resource_config["meta"] + spec = resource_config["spec"] + self._name = spec["name"] + # self._path = spec.get('filename') + self._capacity = spec["size"] + self._allocation = 0 + + @classmethod + def define_config(cls, volume_name, volume_params): + pass + + def _create_binding(self, pool_id, node_id, need_allocate=False): + binding = _ResourceBinding(pool_id, node_id) + binding.create_backing(self.resource_info, need_allocate) + self._bindings[node_id] = binding + + def create_bindings(self, pool_id, nodes): + """ + Create the bindings for a nfs resource + A NFS resource will only be allocated once when creating the + first binding, for the other bindings, there's no allocation + """ + allocated = True if self._bindings else False + bindings = list() + node_list = nodes.copy() + try: + # Create the first binding with allocation + if not allocated: + node_id = node_list.pop(0) + self._create_binding(pool_id, node_id, True) + bindings.append(node_id) + + # Create the bindings without allocation + for node_id in node_list: + self._create_binding(pool_id, node_id, False) + bindings.append(node_id) + except Exception: + # Remove the created bindings when an error occurs + for node_id in bindings: + self._destroy_binding(node_id) + + def _destroy_binding(self, node_id): + need_release = True if len(self._bindings) == 1 else False + binding = self._bindings[node_id] + binding.destroy_backing(need_release) + del self._bindings[node_id] + + def destroy_bindings(self, nodes=None): + nodes = list(self._bindings.keys()) if not nodes else nodes + for node_id in nodes: + self._destroy_binding(self, node_id) + + def _update_binding(self, node_id, config): + binding = self._bindings['node_id'] + binding.update_backings(config) + + def update_bindings(self, config): + for node_id, binding in self._bindings.items(): + self._update_binding(node_id, config) + + @property + def resource_info(self): + pass + + +def get_resource_class(resource_type): + return _NfsFileVolume diff --git a/virttest/vt_resmgr/resources/storage/nfs/nfs_resource_handlers.py b/virttest/vt_resmgr/resources/storage/nfs/nfs_resource_handlers.py new file mode 100644 index 0000000000..c2acbfb0ba --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/nfs/nfs_resource_handlers.py @@ -0,0 +1,15 @@ +import logging + +from ...resource_handlers import _UpdateCommand + + +LOG = logging.getLogger("avocado." + __name__) + + +class _NfsResourceResizeCommand(_UpdateCommand): + _UPDATE_ACTION = "resize" + + @staticmethod + def execute(resource, arguments): + nodes = arguments["nodes"] + resource.update_bindings(pool, nodes) diff --git a/virttest/vt_resmgr/resources/storage/volume.py b/virttest/vt_resmgr/resources/storage/volume.py new file mode 100644 index 0000000000..0a7be7df38 --- /dev/null +++ b/virttest/vt_resmgr/resources/storage/volume.py @@ -0,0 +1,45 @@ +from ..resource import _Resource + + +class _Volume(_Resource): + """ + Storage volumes are abstractions of physical partitions, + LVM logical volumes, file-based disk images + """ + + _RESOURCE_TYPE = "volume" + _VOLUME_TYPE = None + + @property + def volume_type(cls): + return cls._VOLUME_TYPE + + +class _FileVolume(_Volume): + """For file based volumes""" + + _VOLUME_TYPE = "file" + + def __init__(self, config): + self._path = None + self._capacity = 0 + self._allocation = 0 + super().__init__(config) + + +class _BlockVolume(_Volume): + """For disk, lvm, iscsi based volumes""" + + _VOLUME_TYPE = "block" + + def __init__(self, config): + self._path = None + self._capacity = 0 + self._allocation = 0 + super().__init__(config) + + +class _NetworkVolume(_Volume): + """For rbd, iscsi-direct based volumes""" + + _VOLUME_TYPE = "network" diff --git a/virttest/vt_resmgr/vt_resmgr.py b/virttest/vt_resmgr/vt_resmgr.py new file mode 100644 index 0000000000..adb39a9a6c --- /dev/null +++ b/virttest/vt_resmgr/vt_resmgr.py @@ -0,0 +1,115 @@ +from .resources import get_resource_pool_class + + +class _VTResourceManager(object): + + def __init__(self): + self._pools = dict() # {pool id: pool object} + + def initialize(self, pool_config_list): + for config in pool_config_list: + pool_id = self.register_pool(config) + self.attach_pool(pool_id) + + def deinitialize(self): + for pool_id in self.pools: + self.unregister_pool(pool_id) + + def get_pool_by_name(self, pool_name): + pools = [p for p in self.pools.values() if p.pool_name == pool_name] + return pools[0] if pools else None + + def get_pool_by_id(self, pool_id): + return self.pools.get(pool_id, None) + + def get_pool_by_resource(self, resource_id): + pools = [p for p in self.pools.values() if resource_id in p.resources] + return pools[0] if pools else None + + def select_pool(self, resource_type, resource_params): + """ + Select the resource pool by its cartesian params + + :param resource_type: The resource's type, supported: + "volume" + :type resource_type: string + :param resource_params: The resource's specific params, e.g. + params.object_params('image1') + :type resource_params: dict or Param + :return: The resource pool id + :rtype: string + """ + for pool_id, pool in self.pools.items(): + if pool.meet_resource_request(resource_type, resource_params): + return pool_id + return None + + def register_pool(self, pool_params): + pool_type = pool_params["pool_type"] + pool_class = get_resource_pool_class(pool_type) + pool = pool_class(pool_params) + self._pools[pool.pool_id] = pool + return pool.pool_id + + def unregister_pool(self, pool_id): + """ + The pool should be detached from all worker nodes + """ + pool = self.pools[pool_id] + self.detach_pool(pool_id) + del self._pools[pool_id] + + def attach_pool_to(self, pool, node): + """ + Attach a pool to a specific node + """ + access_config = pool.attaching_nodes[node.node_id] + node.proxy.create_pool_connection(pool.config, access_config) + + def attach_pool(self, pool_id): + pool = self.get_pool_by_id(pool_id) + for node_id in pool.attaching_nodes: + node = get_node(node_id) + self.attach_pool_to(pool, node) + + def detach_pool_from(self, pool, node): + """ + Detach a pool from a specific node + """ + node.proxy.destroy_pool_connection(pool.pool_id) + + def detach_pool(self, pool_id): + pool = self.get_pool_by_id(pool_id) + for node_id in pool.attaching_nodes: + node = get_node(node_id) + self.detach_pool_from(pool, node) + + def info_pool(self, pool_id): + """ + Get the pool's information, including 'meta' and 'spec': + meta: + e.g. version for tdx, 1.0 or 1.5 + spec: + common specific attributes + e.g. nfs_server for nfs pool + node-specific attributes + e.g. [node1:{path:/mnt1,permission:rw}, node2:{}] + """ + info = dict() + pool = self.get_pool_by_id(pool_id) + info.update(pool.config) + for node_id in pool.attaching_nodes: + node = get_node(node_id) + access_info = node.proxy.get_pool_connection(pool_id) + info.update(access_info) + + def pool_capability(self, pool_id): + pool = self.get_pool_by_id(pool_id) + return pool.capability + + @property + def pools(self): + return self._pools + + +vt_resmgr = _VTResourceManager()