diff --git a/virttest/vt_agent/managers/image/__init__.py b/virttest/vt_agent/managers/image/__init__.py new file mode 100644 index 0000000000..50ee313c63 --- /dev/null +++ b/virttest/vt_agent/managers/image/__init__.py @@ -0,0 +1 @@ +from .dispatcher import _image_handler_dispatcher diff --git a/virttest/vt_agent/managers/image/dispatcher.py b/virttest/vt_agent/managers/image/dispatcher.py new file mode 100644 index 0000000000..7e8c0d76b0 --- /dev/null +++ b/virttest/vt_agent/managers/image/dispatcher.py @@ -0,0 +1,19 @@ +from .qemu import _qemu_image_handler +#from .xen import _xen_image_handler + + +class _ImageHandlerDispatcher(object): + + def __init__(self): + self._managers_mapping = dict() + self._backings_mapping = dict() + self._pools_mapping = dict() + + def dispatch(self, key): + return + + +_image_handler_dispatcher = _ImageHandlerDispatcher() + +_image_handler_dispatcher.register(_qemu_image_handler) +#_image_handler_dispatcher.register(_xen_image_handler) diff --git a/virttest/vt_agent/managers/image/image_handler.py b/virttest/vt_agent/managers/image/image_handler.py new file mode 100644 index 0000000000..af1057ef0d --- /dev/null +++ b/virttest/vt_agent/managers/image/image_handler.py @@ -0,0 +1,40 @@ +from abc import ABC, abstractmethod + + +class _ResourceBackingManager(ABC): + _ATTACHED_POOL_TYPE = None + + def __init__(self): + self._pool_connections = dict() + self._backings = dict() + + @abstractmethod + def create_pool_connection(self, pool_config, pool_access_config): + pass + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del(self._pool_connections[pool_id]) + + @abstractmethod + def create_backing(self, config, need_allocate=False): + pass + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release_resource(pool_conn) + del(self._backings[backing_id]) + + def update_backing(self, backing_id, config): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, config) + + def get_backing(self, backing_id): + return self._backings.get(backing_id) + + def info_backing(self, backing_id): + return self._backings[backing_id].to_specs() diff --git a/virttest/vt_agent/managers/image/qemu/__init__.py b/virttest/vt_agent/managers/image/qemu/__init__.py new file mode 100644 index 0000000000..8b5f895ef4 --- /dev/null +++ b/virttest/vt_agent/managers/image/qemu/__init__.py @@ -0,0 +1 @@ +from .dir import _dir_backing_mgr diff --git a/virttest/vt_agent/managers/image/qemu/qemu_image_handler.py b/virttest/vt_agent/managers/image/qemu/qemu_image_handler.py new file mode 100644 index 0000000000..b28789d00a --- /dev/null +++ b/virttest/vt_agent/managers/image/qemu/qemu_image_handler.py @@ -0,0 +1,16 @@ +class _QemuImageHandler(_ImageHandler): + + def convert(self, source_spec, target_spec): + pass + + def commit(self, top_spec, backing_spec): + pass + + def create(self, image_spec): + pass + + def destroy(self, image_spec): + pass + + def info(self, image_spec): + pass diff --git a/virttest/vt_agent/managers/resbackings/__init__.py b/virttest/vt_agent/managers/resbackings/__init__.py new file mode 100644 index 0000000000..39e82a8165 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/__init__.py @@ -0,0 +1 @@ +from .dispatcher import _resbacking_mgr_dispatcher diff --git a/virttest/vt_agent/managers/resbackings/backing.py b/virttest/vt_agent/managers/resbackings/backing.py new file mode 100644 index 0000000000..81d47d4ad2 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/backing.py @@ -0,0 +1,38 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourceBacking(ABC): + _RESOURCE_TYPE = None + + def __init__(self, config): + self._uuid = uuid.uuid4() + self._source_pool = config['spec']['pool_id'] + + @property + def uuid(self): + return self._uuid + + @abstractmethod + def allocate(self, pool_connection): + pass + + @abstractmethod + def release(self, pool_connection): + pass + + @abstractmethod + def update(self, pool_connection, new_spec): + pass + + @abstractmethod + def info(self, pool_connection): + pass + + @property + def resource_type(self): + return self._RESOURCE_TYPE + + @property + def source_pool(self): + return self._source_pool diff --git a/virttest/vt_agent/managers/resbackings/backing_mgr.py b/virttest/vt_agent/managers/resbackings/backing_mgr.py new file mode 100644 index 0000000000..af1057ef0d --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/backing_mgr.py @@ -0,0 +1,40 @@ +from abc import ABC, abstractmethod + + +class _ResourceBackingManager(ABC): + _ATTACHED_POOL_TYPE = None + + def __init__(self): + self._pool_connections = dict() + self._backings = dict() + + @abstractmethod + def create_pool_connection(self, pool_config, pool_access_config): + pass + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del(self._pool_connections[pool_id]) + + @abstractmethod + def create_backing(self, config, need_allocate=False): + pass + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release_resource(pool_conn) + del(self._backings[backing_id]) + + def update_backing(self, backing_id, config): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, config) + + def get_backing(self, backing_id): + return self._backings.get(backing_id) + + def info_backing(self, backing_id): + return self._backings[backing_id].to_specs() diff --git a/virttest/vt_agent/managers/resbackings/cvm/__init__.py b/virttest/vt_agent/managers/resbackings/cvm/__init__.py new file mode 100644 index 0000000000..d31c959cc0 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm/__init__.py @@ -0,0 +1,27 @@ +from .. import _CVMResBackingMgr +from .. import _all_subclasses + + +class LaunchSecurity(object): + _instance = None + + @classmethod + def dispatch(cls): + return cls._instance + + @classmethod + def startup(cls, config): + if cls._instance is not None: + return cls._instance + + for mgr_cls in _all_subclasses(_CVMResBackingMgr): + if mgr_cls.get_platform_flags() is not None: + cls._instance = mgr_cls(config) + cls._instance.startup() + return cls._instance + + raise + + @classmethod + def teardown(cls): + cls._instance.teardown() diff --git a/virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py b/virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py new file mode 100644 index 0000000000..4438aa35c9 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm/_sev_resmgr.py @@ -0,0 +1,99 @@ +from .. import _ResBacking +from .. import _ResBackingCaps +from .. import _CVMResBackingMgr + + +class _SEVCommResBacking(_ResBacking): + + def __init__(self, requests): + super().__init__(requests) + self._cbitpos = None + self._reduced_phys_bits = None + #self._sev_device = '/dev/sev' + #self._kernel_hashes = None + + def to_specs(self): + return {'cbitpos': self._cbitpos, + 'reduced-phys-bits': self._reduced_phys_bits} + + +class _SEVResBacking(_SEVCommResBacking): + RESOURCE_TYPE = 'sev' + + def __init__(self): + super().__init__() + self._dh_cert = None + self._session = None + + def allocate(self, requests): + pass + + def free(self): + pass + + def to_specs(self): + pass + + +class _SNPResBacking(_SEVCommResBacking): + RESOURCE_TYPE = 'snp' + + def __init__(self): + super().__init__() + + def allocate(self, requests): + pass + + def free(self): + pass + + def to_specs(self): + pass + + +class _SEVResBackingCaps(_ResBackingCaps): + + def __init__(self, params): + self._cbitpos = None + self._reduced_phys_bits = None + self._sev_device = None + self._max_sev_guests = None + self._max_snp_guests = None + self._pdh = None + self._cert_chain = None + self._cpu0_id = None + + def load(self): + pass + + def is_capable(self, requests): + pass + + def increase(self, backing): + pass + + def decrease(self, backing): + pass + + @property + def max_sev_guests(self): + return self._max_sev_guests + + @property + def max_snp_guests(self): + return self._max_snp_guests + + +class _SEVResBackingMgr(_CVMResBackingMgr): + + def __init__(self, config): + super().__init__(config) + self._caps = _SEVResBackingCaps(config) + _SEVResBackingMgr._platform_flags = config + + def startup(self): + reset_sev_platform() + super().startup() + + def teardown(self): + reset_sev_platform() diff --git a/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py new file mode 100644 index 0000000000..ad162be551 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_sev_platform_mgr.py @@ -0,0 +1,6 @@ +def reset_platform(): + pass + + +def rotate_pdh(): + pass diff --git a/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py new file mode 100644 index 0000000000..ad162be551 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/_tdx_platform_mgr.py @@ -0,0 +1,6 @@ +def reset_platform(): + pass + + +def rotate_pdh(): + pass diff --git a/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/cvm_platform_mgr.py b/virttest/vt_agent/managers/resbackings/cvm_platform_mgr/cvm_platform_mgr.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/virttest/vt_agent/managers/resbackings/dispatcher.py b/virttest/vt_agent/managers/resbackings/dispatcher.py new file mode 100644 index 0000000000..a484961d03 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/dispatcher.py @@ -0,0 +1,48 @@ +from .storage import _dir_backing_mgr +from .storage import _nfs_backing_mgr +#from .storage import _ceph_backing_mgr +#from .cvm import _sev_backing_mgr +#from .cvm import _tdx_backing_mgr + + +class _BackingMgrDispatcher(object): + + def __init__(self): + self._managers_mapping = dict() + self._backings_mapping = dict() + self._pools_mapping = dict() + + def dispatch_by_pool(self, pool_id): + return self._pools_mapping.get(pool_id, None) + + def dispatch_by_backing(self, backing_id): + return self._backings_mapping.get(backing_id, None) + + @classmethod + def register(cls, mgr): + self._managers_mapping[mgr.attached_pool_type] = mgr + + def map_pool(self, pool_id, pool_type): + backing_mgr = self._managers_mapping[pool_type] + self._pools_mapping[pool_id] = backing_mgr + + def unmap_pool(self, pool_id): + del(self._pools_mapping[pool_id]) + + def map_backing(self, backing_id, backing_mgr): + self._backings_mapping[backing_id] = backing_mgr + + def unmap_backing(self, backing_id): + del(self._backings_mapping[backing_id]) + + +_backing_mgr_dispatcher = _BackingMgrDispatcher() + +# Register storage backing managers +_backing_mgr_dispatcher.register(_dir_backing_mgr) +_backing_mgr_dispatcher.register(_nfs_backing_mgr) +#_backing_mgr_dispatcher.register(_ceph_backing_mgr) + +# Register cvm backing managers +#_backing_mgr_dispatcher.register(_sev_backing_mgr) +#_backing_mgr_dispatcher.register(_tdx_backing_mgr) diff --git a/virttest/vt_agent/managers/resbackings/pool_connection.py b/virttest/vt_agent/managers/resbackings/pool_connection.py new file mode 100644 index 0000000000..6a22d4693a --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/pool_connection.py @@ -0,0 +1,27 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourcePoolAccess(ABC): + + @abstractmethod + def __init__(self, pool_access_config): + pass + + +class _ResourcePoolConnection(ABC): + + def __init__(self, pool_config, pool_access_config): + self._connected_pool = pool_config['pool_id'] + + @abstractmethod + def startup(self): + pass + + @abstractmethod + def shutdown(self, backing): + pass + + @abstractmethod + def connected(self): + return False diff --git a/virttest/vt_agent/managers/resbackings/storage/__init__.py b/virttest/vt_agent/managers/resbackings/storage/__init__.py new file mode 100644 index 0000000000..2b7d4383f5 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/__init__.py @@ -0,0 +1,2 @@ +from .dir import _dir_backing_mgr +from .nfs import _nfs_backing_mgr diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/__init__.py b/virttest/vt_agent/managers/resbackings/storage/dir/__init__.py new file mode 100644 index 0000000000..995ae5f7e7 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/__init__.py @@ -0,0 +1 @@ +from .dir_backing_mgr import _dir_backing_mgr diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py new file mode 100644 index 0000000000..f47ead5c90 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing.py @@ -0,0 +1,33 @@ +import os +from virttest import utils_io +from ...backing import _ResourceBacking + + +class _DirVolumeBacking(_ResourceBacking): + + def __init__(self, config): + super().__init__(config) + self._size = config['size'] + self._name = config['name'] + + @property + def allocate(self, pool_connection): + path = os.path.join(pool_connection.dir, self._name) + utils_io.dd(path, self._size) + + def release(self, pool_connection): + path = os.path.join(pool_connection.dir, self._name) + os.unlink(path) + + def info(self, pool_connection): + path = os.path.join(pool_connection.dir, self._name) + s = os.stat(path) + return {'path': path, 'allocation': s.st_size} + + +def _get_backing_class(resource_type): + """ + Get the backing class for a given resource type in case there are + more than one resources are supported by a nfs pool + """ + return _DirVolumeBacking diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py new file mode 100644 index 0000000000..176d958fec --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/dir_backing_mgr.py @@ -0,0 +1,46 @@ +from ...backing_mgr import _ResourceBackingManager +from .dir_backing import _get_backing_class +from .dir_pool_connection import _DirPoolConnection + + +class _DirBackingManager(_ResourceBackingManager): + _ATTACHED_POOL_TYPE = 'nfs' + + def __init__(self): + super().__init__() + + def create_pool_connection(self, pool_config, pool_access_config): + pool_conn = _DirPoolConnection(pool_config, pool_access_config) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del(self._pool_connections[pool_id]) + + def create_backing(self, config, need_allocate=False): + pool_id = config['pool_id'] + pool_conn = self._pool_connections[pool_id] + backing_class = _get_backing_class(config['resource_type']) + backing = backing_class(config) + self._backings[backing.uuid] = backing + if need_allocate: + backing.allocate(pool_conn) + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release(pool_conn) + del(self._backings[backing_id]) + + def update_backing(self, backing_id, new_backing_spec): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, new_backing_spec) + + def info_backing(self, backing_id): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.info(pool_conn) diff --git a/virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py b/virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py new file mode 100644 index 0000000000..ef3a09147e --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/dir/dir_pool_connection.py @@ -0,0 +1,25 @@ +import utils_disk + +from ...pool_connection import _ResourcePoolConnection + + +class _DirPoolConnection(_ResourcePoolConnection): + + def __init__(self, pool_config, pool_access_config): + super().__init__(pool_config, pool_access_config) + self._dir = pool_config.get('root_dir') + if self._mnt is None: + self._create_default_dir() + + def startup(self): + pass + + def shutdown(self): + pass + + def connected(self): + return os.path.exists(self.dir) + + @property + def dir(self): + return self._dir diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py b/virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py new file mode 100644 index 0000000000..0eb8062f6e --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/__init__.py @@ -0,0 +1 @@ +from .nfs_backing_mgr import _nfs_backing_mgr diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py new file mode 100644 index 0000000000..56a69ee215 --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing.py @@ -0,0 +1,33 @@ +import os +from virttest import utils_io +from ...backing import _ResourceBacking + + +class _NfsVolumeBacking(_ResourceBacking): + + def __init__(self, config): + super().__init__(config) + self._size = config['size'] + self._name = config['name'] + + @property + def allocate(self, pool_connection): + path = os.path.join(pool_connection.mnt, self._name) + utils_io.dd(path, self._size) + + def release(self, pool_connection): + path = os.path.join(pool_connection.mnt, self._name) + os.unlink(path) + + def info(self, pool_connection): + path = os.path.join(pool_connection.mnt, self._name) + s = os.stat(path) + return {'path': path, 'allocation': s.st_size} + + +def _get_backing_class(resource_type): + """ + Get the backing class for a given resource type in case there are + more than one resources are supported by a nfs pool + """ + return _NfsVolumeBacking diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py new file mode 100644 index 0000000000..3eac7eef4f --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_backing_mgr.py @@ -0,0 +1,46 @@ +from ...backing_mgr import _ResourceBackingManager +from .nfs_backing import _get_backing_class +from .nfs_pool_connection import _NfsPoolConnection + + +class _NfsBackingManager(_ResourceBackingManager): + _ATTACHED_POOL_TYPE = 'nfs' + + def __init__(self): + super().__init__() + + def create_pool_connection(self, pool_config, pool_access_config): + pool_conn = _NfsPoolConnection(pool_config, pool_access_config) + pool_conn.startup() + self._pool_connections[pool_id] = pool_conn + + def destroy_pool_connection(self, pool_id): + pool_conn = self._pool_connections[pool_id] + pool_conn.shutdown() + del(self._pool_connections[pool_id]) + + def create_backing(self, config, need_allocate=False): + pool_id = config['pool_id'] + pool_conn = self._pool_connections[pool_id] + backing_class = _get_backing_class(config['resource_type']) + backing = backing_class(config) + self._backings[backing.uuid] = backing + if need_allocate: + backing.allocate(pool_conn) + + def destroy_backing(self, backing_id, need_release=False): + backing = self._backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + if need_release: + backing.release(pool_conn) + del(self._backings[backing_id]) + + def update_backing(self, backing_id, new_backing_spec): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + backing.update(pool_conn, new_backing_spec) + + def info_backing(self, backing_id): + backing = self._allocated_backings[backing_id] + pool_conn = self._pool_connections[backing.source_pool] + return backing.info(pool_conn) diff --git a/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py new file mode 100644 index 0000000000..9b2d919efc --- /dev/null +++ b/virttest/vt_agent/managers/resbackings/storage/nfs/nfs_pool_connection.py @@ -0,0 +1,49 @@ +import utils_disk + +from ...pool_connection import _ResourcePoolAccess +from ...pool_connection import _ResourcePoolConnection + + +class _NfsPoolAccess(_ResourcePoolAccess): + """ + Mount options + """ + + def __init__(self, pool_access_config): + self._options = pool_access_config['nfs_options'] + + def __str__(self): + return self._options + + +class _NfsPoolConnection(_ResourcePoolConnection): + + def __init__(self, pool_config, pool_access_config): + super().__init__(pool_config, pool_access_config) + self._connected_pool = pool_config['pool_id'] + self._nfs_server = pool_config['nfs_server'] + self._export_dir = pool_config['export_dir'] + self._nfs_access = _NfsPoolAccess(pool_access_config) + self._mnt = pool_config.get(nfs_mnt_dir) + if self._mnt is None: + self._create_default_mnt() + + def startup(self): + src = '{host}:{export}'.format(self._nfs_server, self._export_dir) + dst = self._mnt + options = str(self._nfs_access) + utils_disk.mount(src, dst, fstype='nfs', options=options) + + def shutdown(self): + src = '{host}:{export}'.format(self._nfs_server, self._export_dir) + dst = self._mnt + utils_disk.umount(src, dst, fstype='nfs') + + def connected(self): + src = '{host}:{export}'.format(self._nfs_server, self._export_dir) + dst = self._mnt + return utils_disk.is_mount(src, dst, fstype='nfs') + + @property + def mnt(self): + return self._mnt diff --git a/virttest/vt_agent/services/__init__.py b/virttest/vt_agent/services/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/virttest/vt_agent/services/virt/image_api.py b/virttest/vt_agent/services/virt/image_api.py new file mode 100644 index 0000000000..6848e76c41 --- /dev/null +++ b/virttest/vt_agent/services/virt/image_api.py @@ -0,0 +1,13 @@ +from ...managers.image import _backing_mgr_dispatcher + + +def handle_image(config): + """ + :param backing_id: The resource backing id + :type backing_id: string + :param config: The specified action and the snippet of + the resource's spec and meta info used for update + :type config: dict + """ + image_handler = _image_handler_dispatcher.dispatch(config) + image_handler.do(config) diff --git a/virttest/vt_agent/services/virt/resbacking_api.py b/virttest/vt_agent/services/virt/resbacking_api.py new file mode 100644 index 0000000000..4247054544 --- /dev/null +++ b/virttest/vt_agent/services/virt/resbacking_api.py @@ -0,0 +1,98 @@ +from ...managers.resbackings import _backing_mgr_dispatcher + + +def create_pool_connection(pool_config, pool_access): + pool_id = pool_config['pool_id'] + pool_type = pool_config['pool_type'] + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + if backing_mgr is None: + _backing_mgr_dispatcher.map_pool(pool_id, pool_type) + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + backing_mgr.create_pool_connection(pool_config, pool_access) + + +def destroy_pool_connection(pool_id): + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + backing_mgr.destroy_pool_connection(pool_id) + _backing_mgr_dispatcher.unmap_pool(pool_id) + +""" +def allocate(backing_id): + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + backing_mgr.allocate(backing_id) + + +def release(backing_id): + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + backing_mgr.release(backing_id) +""" + + +#def create_backing(config): +def create_backing(config, need_allocate=False): + """ + Create a resource backing on the worker node, which is bound to one and + only one resource, VT can access the specific resource allocation with + the backing when starting VM on the worker node + + :param config: The config including the resource's meta and spec data + :type config: dict + :return: The resource id + :rtype: string + """ + pool_id = config['spec']['pool'] + backing_mgr = _backing_mgr_dispatcher.dispatch_by_pool(pool_id) + #backing_id = backing_mgr.create_backing(config) + backing_id = backing_mgr.create_backing(config, need_allocate) + _backing_mgr_dispatcher.map_backing(backing_id, backing_mgr) + return backing_id + + +#def destroy_backing(backing_id): +def destroy_backing(backing_id, need_release=False): + """ + Destroy the backing, all resources allocated on worker nodes will be + released. + + :param backing_id: The cluster resource id + :type backing_id: string + """ + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + #backing_mgr.destroy_backing(backing_id) + backing_mgr.destroy_backing(backing_id, need_release) + _backing_mgr_dispatcher.unmap_backing(backing_id) + + +def info_backing(backing_id): + """ + Get the information of a resource with a specified backing + + We need not get all the information of the resource, because the + static can be got by the resource object, e.g. size, here we only + get the information which is dynamic, such as path and allocation + + :param resource_id: The backing id + :type resource_id: string + :return: The information of a resource, e.g. + { + 'spec':{ + 'allocation': 12, + 'path': [{'node1': '/p1/f1'},{'node2': '/p2/f1'}], + } + } + :rtype: dict + """ + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + return backing_mgr.info_backing(backing_id) + + +def update_backing(backing_id, config): + """ + :param backing_id: The resource backing id + :type backing_id: string + :param config: The specified action and the snippet of + the resource's spec and meta info used for update + :type config: dict + """ + backing_mgr = _backing_mgr_dispatcher.dispatch_by_backing(backing_id) + backing_mgr.update_backing(backing_id, config) diff --git a/virttest/vt_imgr/.api.py b/virttest/vt_imgr/.api.py new file mode 100644 index 0000000000..948768e7ae --- /dev/null +++ b/virttest/vt_imgr/.api.py @@ -0,0 +1,106 @@ +from .vt_resmgr import vt_resmgr + + +class ImageNotFound(Exception): + def __init__(self, image_id): + self._id = image_id + + def __str__(self): + return 'Cannot find the pool(id="%s)"' % self._id + + +class UnknownImageType(Exception): + def __init__(self, image_type): + self._type = image_type + + def __str__(self): + return 'Unknown image type "%s"' % self._type + + +def create_image(config): + """ + Create a logical image without any specific storage allocation, + + :param config: The image's meta and spec data + :type config: dict + :return: The image id + :rtype: string + """ + pass + + +def destroy_image(image_id): + """ + Destroy the logical image, the specific storage allocation + will be released, note the image's backing image will not be + touched + + :param image_id: The resource id + :type image_id: string + """ + pass + + +def get_image(image_id): + """ + Get all information for a specified image + + :param image_id: The image id + :type image_id: string + :return: All the information of an image, e.g. + { + 'meta': { + 'id': 'image1', + 'backing': 'image2' + }, + 'spec': { + 'name': 'stg', + 'format': 'qcow2', + 'backing': { + The backing's information here + }, + 'volume': { + 'meta': { + 'id': 'nfs_vol1' + }, + 'spec': { + 'pool': 'nfs_pool1', + 'type': 'volume', + 'size': 65536, + 'name': 'stg.qcow2', + 'path': [{'node1': '/mnt1/stg.qcow2'}, + {'node2': '/mnt2/stg.qcow2'}], + } + } + } + } + :rtype: dict + """ + pass + + +def update_image(image_id, config): + """ + Update an image, the command format: + {'action': arguments}, in which + the 'action' can be the following for a qemu image: + 'create': qemu-img create + 'destroy': Remove the allocated resource + 'convert': qemu-img convert + 'snapshot': qemu-img snapshot + 'resize': qemu-img resize + arguments is a dict object which contains all related settings for a + specific action + + Examples: + qemu-img create + {'create': } + qemu-img convert + {'convert': } + + :param image_id: The image id + :type image_id: string + :param config: The specified action and its arguments + :type config: dict + """ + pass diff --git a/virttest/vt_imgr/__init__.py b/virttest/vt_imgr/__init__.py new file mode 100644 index 0000000000..317bce95d2 --- /dev/null +++ b/virttest/vt_imgr/__init__.py @@ -0,0 +1 @@ +from vt_imgr import vt_image_manager diff --git a/virttest/vt_imgr/image.py b/virttest/vt_imgr/image.py new file mode 100644 index 0000000000..6bf64839ec --- /dev/null +++ b/virttest/vt_imgr/image.py @@ -0,0 +1,66 @@ +import uuid +from abc import ABC, abstractmethod + + +class _LogicalImage(ABC): + """ + A logical image could have one or more _Image objects, take qemu + logical image as an example, it can contain a top _Image object + and its backing _Image object, in the context of a VM's disk, a + logical image is the media of the disk, i.e. one logical image + for one VM's disk + """ + + def __init__(self, top_image_tag): + self._id = uuid.uuid4() + + @property + def image_id(self): + return self._id + + @property + def image_spec(self): + pass + + def create_image(self): + pass + + def destroy_image(self): + pass + + def handle_image(self): + pass + + + +class _Image(ABC): + """ + An image has one storage resource(volume), the cartesian params of + a image describes this object + """ + + _IMAGE_TYPE = None + + def __init__(self, image_id, image_params): + self._tag = image_id + self._initialize(image_params) + + def _initialize(self, image_params): + pass + + def image_type(cls): + raise cls._IMAGE_TYPE + + @property + def image_id(self): + return self._id + + @property + def image_spec(self): + pass + + def create(self): + pass + + def destroy(self): + pass diff --git a/virttest/vt_imgr/qemu/qemu_image.py b/virttest/vt_imgr/qemu/qemu_image.py new file mode 100644 index 0000000000..5cf1a227dd --- /dev/null +++ b/virttest/vt_imgr/qemu/qemu_image.py @@ -0,0 +1,83 @@ +from ..image import _Image + + +class _QemuImage(_Image): + + _IMAGE_TYPE = 'qemu' + + def _initialize(self, config): + super()._initialize(config) + spec = config['spec'] + self._size = spec['size'] + self._format = spec['format'] + + def create(self): + pass + + def destroy(self): + pass + + def convert(self, target_id): + pass + + def rebase(self, backing_id): + pass + + def commit(self, backing_id): + pass + + def snapshot_create(self): + pass + + def snapshot_del(self, blkdebug_cfg=""): + pass + + def snapshot_list(self, force_share=False): + pass + + def snapshot_apply(self): + pass + + def bitmap_add(self, bitmap_name): + pass + + def bitmap_remove(self, bitmap_name): + pass + + def bitmap_clear(self, bitmap_name): + pass + + def bitmap_enable(self, bitmap_name): + pass + + def bitmap_disable(self, bitmap_name): + pass + + def bitmap_merge(self, bitmap_name_source, + bitmap_name_target, bitmap_image_source): + pass + + def info(self, force_share=False, output="human"): + pass + + def compare(self, target_id, strict_mode=False, + verbose=True, force_share=False): + pass + + def check(self, force_share=False): + pass + + def amend(self, cache_mode=None, ignore_status=False): + pass + + def resize(self, size, shrink=False, preallocation=None): + pass + + def map(self, output="human"): + pass + + def measure(self, target_fmt, size=None, output="human"): + pass + + def dd(self, target_id, bs=None, count=None, skip=None): + pass diff --git a/virttest/vt_imgr/qemu/qemu_image_manager.py b/virttest/vt_imgr/qemu/qemu_image_manager.py new file mode 100644 index 0000000000..efb167cc97 --- /dev/null +++ b/virttest/vt_imgr/qemu/qemu_image_manager.py @@ -0,0 +1,82 @@ +from ..image_manager import _ImageManager + + +class _VTQemuImageManager(_ImageManager): + + def __init__(self): + self._images = dict() # {image id: image object} + + @classmethod + def _get_image_class(cls, pool_type): + pass + + def create(self, image_spec): + pass + + def destroy(self): + pass + + def convert(self, source_id, target_id): + pass + + def rebase(self, top_id, backing_id): + pass + + def commit(self, top_id, backing_id): + pass + + def snapshot_create(self): + pass + + def snapshot_del(self, blkdebug_cfg=""): + pass + + def snapshot_list(self, force_share=False): + pass + + def snapshot_apply(self): + pass + + def bitmap_add(self, image_id, bitmap_name): + pass + + def bitmap_remove(self, image_id, bitmap_name): + pass + + def bitmap_clear(self, image_id, bitmap_name): + pass + + def bitmap_enable(self, image_id, bitmap_name): + pass + + def bitmap_disable(self, image_id, bitmap_name): + pass + + def bitmap_merge(self, image_id, bitmap_name_source, + bitmap_name_target, bitmap_image_source): + pass + + def info(self, image_id, force_share=False, output="human"): + pass + + def compare(self, source_id, target_id, strict_mode=False, + verbose=True, force_share=False): + pass + + def check(self, image_id, force_share=False): + pass + + def amend(self, image_id, cache_mode=None, ignore_status=False): + pass + + def resize(self, image_id, size, shrink=False, preallocation=None): + pass + + def map(self, image_id, output="human"): + pass + + def measure(self, target_fmt, size=None, output="human"): + pass + + def dd(self, source_id, target_id, bs=None, count=None, skip=None): + pass diff --git a/virttest/vt_imgr/vt_imgr.py b/virttest/vt_imgr/vt_imgr.py new file mode 100644 index 0000000000..d76d6b12ec --- /dev/null +++ b/virttest/vt_imgr/vt_imgr.py @@ -0,0 +1,35 @@ +class _LogicalImageManager(object): + + def __init__(self): + """ + :param params: the reference to the original cartesian params + """ + self._images = dict() + + def create_logical_image(self, image_tag, params): + """ + Create a logical image without any storage allocation, based on + the cartesian params, create all its image objects, e.g. for a + qemu logical image which has a image chain: + top image('top') --> backing image('backing') + | | + resource resource + """ + img_cls = self.get_logical_image_class() + return image_id + + def destroy_logical_image(self, logical_image_id): + pass + + def clone_logical_image(self, logical_image_id): + pass + + def update_logical_image(self, logical_image_id, arguments): + logical_image = self._images[image_id] + logical_image.update(arguments) + + +# Add drivers for diff handlers +# Add access permission for images +# serialize +vt_imgr = _LogicalImageManager() diff --git a/virttest/vt_resmgr/__init__.py b/virttest/vt_resmgr/__init__.py new file mode 100644 index 0000000000..0a0e47b0b0 --- /dev/null +++ b/virttest/vt_resmgr/__init__.py @@ -0,0 +1 @@ +from .api import * diff --git a/virttest/vt_resmgr/api.py b/virttest/vt_resmgr/api.py new file mode 100644 index 0000000000..1b3f54180b --- /dev/null +++ b/virttest/vt_resmgr/api.py @@ -0,0 +1,220 @@ +""" +# Create a cluster level nfs resource +config = {'meta':{},'spec':{'size':123,'pool':'nfs_pool1','name':'stg'}} +res_id = create_resource(config) + +# Bind the nfs resource to worker nodes(resource allocated) +args = {'bind': {'nodes': ['node1', 'node2'], 'pool': 'nfspool1'}} +update_resource(res_id, args) + +# Unbind the nfs resource to node1 +args = {'unbind': {'nodes': ['node1']}} +update_resource(res_id, args) + +# Unbind the nfs resource(resource released) +args = {'unbind': {}} +update_resource(res_id, args) + +# Destroy the nfs resource +destroy_resource(res_id) +""" + + +from .vt_resmgr import vt_resmgr + + +class PoolNotFound(Exception): + def __init__(self, pool_id): + self._id = pool_id + + def __str__(self): + return 'Cannot find the pool(id="%s)"' % self._id + + +class UnknownPoolType(Exception): + def __init__(self, pool_type): + self._type = pool_type + + def __str__(self): + return 'Unknown pool type "%s"' % self._type + + +class ResourceNotFound(Exception): + pass + + +class ResourceBusy(Exception): + pass + + +class ResourceNotAvailable(Exception): + pass + + +class UnknownResourceType(Exception): + pass + + +def register_resouce_pool(config): + """ + Register a resource pool, the pool should be ready for + use before registration + + :param config: The config includes the pool's meta and spec data, + e.g. {'meta':{'access':{}},'spec':{'name':'p1','id':'id1'}} + :type config: dict + :return: The resource pool id + :rtype: string + """ + pool_id = vt_resmgr.register_pool(config) + if pool_id is None: + raise UnknownPoolType(config['type']) + return pool_id + + +def unregister_resouce_pool(pool_id): + """ + Unregister a resource pool + + :param pool_id: The id of the pool to unregister + :type pool_id: string + """ + pool = vt_resmgr.get_pool_by_id(pool_id) + if pool is None: + raise PoolNotFound(pool_id) + vt_resmgr.unregister_pool(pool_id) + + +def attach_resource_pool(pool_id): + """ + Attach the registered pool to worker nodes, then the pool can be + accessed by the worker nodes + + :param pool_id: The id of the pool to attach + :type pool_id: string + """ + pool = vt_resmgr.get_pool_by_id(pool_id) + if pool is None: + raise PoolNotFound(pool_id) + vt_resmgr.attach_pool(pool_id) + + +def detach_resource_pool(pool_id): + """ + Detach the pool from the worker nodes, after that, the pool cannot + be accessed + + :param pool_id: The id of the pool to detach + :type pool_id: string + """ + pool = vt_resmgr.get_pool_by_id(pool_id) + if pool is None: + raise PoolNotFound(pool_id) + vt_resmgr.detach_pool(pool_id) + + +def create_resource(config): + """ + Create a logical resource without any specific resource allocation, + the following is required to create a new resource: + 'meta': + It depends on the specific resource + 'spec': + 'type': The resource type, e.g. 'volume' + 'pool': The id of the pool where the resource will be allocated + The other attributes of a specific resource, e.g. the 'size' of + a file-based volume + Example: + {'meta':{},'spec':{'size':123,'pool':'nfs_pool1','name':'stg'}} + + :param config: The config includes the resource's meta and spec data + :type config: dict + :return: The resource id + :rtype: string + """ + pool_id = config['spec']['pool'] + pool = vt_resmgr.get_pool_by_id(pool_id) + if pool is None: + raise PoolNotFound(pool_id) + return pool.create_resource(config) + + +def destroy_resource(resource_id): + """ + Destroy the logical resource, the specific resource allocation + will be released + + :param resource_id: The resource id + :type resource_id: string + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + pool.destroy_resource(resource_id) + + +def get_resource(resource_id): + """ + Get all meta and spec information for a specified resource + + :param resource_id: The resource id + :type resource_id: string + :return: All the information of a resource, e.g. + { + 'meta': { + 'id': 'res_id1', + 'permission': { + 'owner': 'root', + 'group': 'root', + 'mode': '0755' + }, + 'bindings': [ + {'node1': 'ref1'}, + {'node2': 'ref2'} + ] + }, + 'spec': { + 'pool': 'nfs_pool1', + 'type': 'volume', + 'size': 65536, + 'name': 'stg', + 'path': [{'node1': '/mnt1/stg'}, {'node2': '/mnt2/stg'}], + } + } + :rtype: dict + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + return pool.info_resource(resource_id) + + +def update_resource(resource_id, config): + """ + Update a resource, the command format: + {'action': arguments} + in which 'action' can be the following: + 'bind': Bind a specified resource to one or more worker nodes in order + to access the specific resource allocation, note the allocation + is done within the bind command + 'unbind': Unbind a specified resource from one or more worker nodes, + the specific resource allocation will be released only when + all bindings are gone + 'resize': Resize a resource, it's only available for the storage volume + resource currently + arguments is a dict object which contains all related settings for a + specific action + + Examples: + Bind a resource to one or more nodes + {'bind': {'nodes': ['node1'], 'pool': 'nfspool1'}} + {'bind': {'nodes': ['node1', 'node2'], 'pool': 'nfspool1'}} + Unbind a resource from one or more nodes + {'unbind': {'nodes': ['node1']}} + {'unbind': {'nodes': ['node1', 'node2']}} + Resize a specified storage volume resource + {'resize': {'spec': {'size': 123456}}} + + :param resource_id: The resource id + :type resource_id: string + :param config: The specified action and its arguments + :type config: dict + """ + pool = vt_resmgr.get_pool_by_resource(resource_id) + pool.update_resource(resource_id, config) diff --git a/virttest/vt_resmgr/cvm/__init__.py b/virttest/vt_resmgr/cvm/__init__.py new file mode 100644 index 0000000000..389da0b231 --- /dev/null +++ b/virttest/vt_resmgr/cvm/__init__.py @@ -0,0 +1 @@ +from .api import _cvm_resmgr diff --git a/virttest/vt_resmgr/cvm/api.py b/virttest/vt_resmgr/cvm/api.py new file mode 100644 index 0000000000..681e6d7571 --- /dev/null +++ b/virttest/vt_resmgr/cvm/api.py @@ -0,0 +1,73 @@ +import logging + +from ...resmgr import Resource, ResMgr + + +LOG = logging.getLogger('avocado.' + __name__) + + +class CVMResMgrError(Exception): + pass + + +class SEVResource(Resource): + TYPE = 'sev' + + def _to_attributes(self, resource_params): + pass + + @property + def requests(self): + return {'type': self.TYPE} + + +class SNPResource(Resource): + TYPE = 'snp' + + def _to_attributes(self, resource_params): + pass + + @property + def requests(self): + return {'type': self.TYPE} + + +class TDXResource(Resource): + TYPE = 'tdx' + + def _to_attributes(self, resource_params): + pass + + @property + def requests(self): + return {'type': self.TYPE} + + +class CVMResMgr(ResMgr): + + def _initialize(self, config): + pass + + def check_resource_managed(self, spec): + pass + + def _get_resource_type(self, spec): + return spec['type'] + + def is_cvm_supported(node_uuid): + """ + Check if the platform supports CVM + """ + node = get_node(node_uuid) + return node.proxy.is_cvm_supported() + + def enabled(self, resource_type, node_uuid): + """ + Check if the platform supports a specific CVM type + e.g. a AMD SEV/SNP machine cannot allocate a TDX resource + """ + node = get_node(node_uuid) + return node.proxy.enabled(resource_type) + + +_cvm_resmgr = CVMResMgr() diff --git a/virttest/vt_resmgr/cvm/conductor.py.bak b/virttest/vt_resmgr/cvm/conductor.py.bak new file mode 100644 index 0000000000..6c5a3ddd4c --- /dev/null +++ b/virttest/vt_resmgr/cvm/conductor.py.bak @@ -0,0 +1,58 @@ +class Conductor(object): + CHANNEL_TYPE = None + + def __init__(self, node_id): + self._channel = None + self._node_id = node_id + + def _worker(self, node_id): + return get_node(node_id) + + def create(self): + pass + + def destroy(self): + pass + + @property + def channel(self): + return self._channel + + +class RPCConductor(Conductor): + CHANNEL_TYPE = 'rpc' + + def __init__(self, node_id): + super().__init__(node_id) + + def create(self): + node = self._worker(self._node_id) + self._channel = node.proxy.virt + + def destroy(self): + self._channel = None + + +class SSHConductor(Conductor): + CHANNEL_TYPE = 'ssh' + + def __init__(self, node_id): + super().__init__(node_id) + + def create(self): + node = self._worker(self._node_id) + self._channel = node.connect(node.connection_auth) + + def destroy(self): + self._channel.close() + self._channel = None + + +class Channel(object): + @staticmethod + def channel(node_id, channel_type): + for cls in Conductor.__subclasses__: + if cls.CHANNEL_TYPE = channel_type: + return cls(node_id) + break + return None diff --git a/virttest/vt_resmgr/pool.py b/virttest/vt_resmgr/pool.py new file mode 100644 index 0000000000..d44164a5b0 --- /dev/null +++ b/virttest/vt_resmgr/pool.py @@ -0,0 +1,134 @@ +import uuid +from abc import ABC, abstractmethod + +from .resource import _Resource + + +class _UpdateCommand(ABC): + _UPDATE_ACTION = None + + @abstractmethod + @staticmethod + def execute(resource, arguments): + raise NotImplemented + + @property + @classmethod + def action(cls): + return cls._UPDATE_ACTION + + +class _BindCommand(_UpdateCommand): + _UPDATE_ACTION = 'bind' + + @staticmethod + def execute(resource, arguments): + pool = arguments['pool'] + nodes = arguments['nodes'] + resource.create_bindings(pool, nodes) + + +class _UnbindCommand(_UpdateCommand): + _UPDATE_ACTION = 'unbind' + + @staticmethod + def execute(resource, arguments): + nodes = arguments.get('nodes') + resource.destroy_bindings(nodes) + + +class ResizeCommand(_UpdateCommand): + _UPDATE_ACTION = 'resize' + + @staticmethod + def execute(resource, arguments): + resource.update_bindings(arguments) + resource.update_config(arguments) + + +class _ResourcePool(ABC): + """ + A resource pool is used to manage resources. A resource must be + allocated from a specific pool, and a pool can hold many resources + """ + + _POOL_TYPE = None + _UPDATE_HANDLERS = dict() + + def __init__(self, pool_config): + self._id = uuid.uuid4() + self._name = None + self._resources = dict() # {resource id: resource object} + self._managed_resource_types = list() + self._accesses = dict() # {node id: pool access object} + self._register_update_handlers() + self._initialize(pool_config) + + def _initialize(self, pool_config): + self._name = pool_config.get('name') + + @classmethod + def _register_update_handlers(cls): + for handler_cls in _UpdateCommand.__subclasses__(): + self._UPDATE_HANDLERS[handler_cls.action] = handler_cls + + def check_resource_managed(self, spec): + """ + Check if this is the manager which is managing the specified resource + """ + res_type = self._get_resource_type(spec) + return True if res_type in self._managed_resource_types else False + + def _get_resource_type(spec): + raise NotImplementedError + + @abstractmethod + def create_resource(self, config): + """ + Create a resource, no real resource allocated + """ + raise NotImplementedError + + def destroy_resource(self, resource_id): + """ + Destroy the resource, all its backings should be released + """ + res = self._resources[resource_id] + res.destroy_bindings() + del(self._resources[resource_id]) + + def update_resource(self, resource_id, update_arguments): + conf = update_arguments.copy() + action, arguments = conf.popitem() + res = self._resources[resource_id] + self._UPDATE_HANDLERS[action].execute(res, arguments) + + def info_resource(self, resource_id): + """ + Get the reference of a specified resource + """ + res = self._resources.get(resource_id) + return res.resource_info + + @property + def attaching_nodes(self): + return self._accesses.keys() + + @property + def pool_capability(self): + node_id = self.attaching_nodes.keys()[0] + node = get_node(node_id) + return node.proxy.get_pool_capability() + + @property + def pool_name(self): + return self._name + + @property + @classmethod + def pool_type(cls): + return cls._POOL_TYPE + + @property + def pool_config(self): + pass diff --git a/virttest/vt_resmgr/pool_collections.py b/virttest/vt_resmgr/pool_collections.py new file mode 100644 index 0000000000..722823be00 --- /dev/null +++ b/virttest/vt_resmgr/pool_collections.py @@ -0,0 +1,33 @@ +from .storage import _NfsPool +from .storage import _NbdPool +from .storage import _CephPool +from .storage import _DirPool +from .storage import _IscsiDirectPool + + +class PoolCollections(object): + _POOL_CLASSES = dict() + + @classmethod + def register_pool_class(cls, pool_class): + cls._POOL_CLASSES[pool_class.pool_type] = pool_class + + @classmethod + def get_pool_class(cls, pool_type): + return cls._POOL_CLASSES.get(pool_type) + + +# Register storage resource pools +PoolCollections.register_pool_class(_DirPool) +PoolCollections.register_pool_class(_NfsPool) +PoolCollections.register_pool_class(_NbdPool) +PoolCollections.register_pool_class(_CephPool) +PoolCollections.register_pool_class(_IscsiDirectPool) + +# Register cvm resource pools +#PoolCollections.register_pool_class(_SevPool) +#PoolCollections.register_pool_class(_SnpPool) +#PoolCollections.register_pool_class(_TdxPool) + +# Register network resource pools +#PoolCollections.register_pool_class(_VirtioNicPool) diff --git a/virttest/vt_resmgr/resource.py b/virttest/vt_resmgr/resource.py new file mode 100644 index 0000000000..26f8beeb6d --- /dev/null +++ b/virttest/vt_resmgr/resource.py @@ -0,0 +1,159 @@ +import uuid +from abc import ABC, abstractmethod + + +class _ResourceBinding(object): + """ + A binding binds a resource to an allocated resource backing + at a worker node. A resource can have many bindings, but one + binding can only bind one backing at one worker node. + """ + + def __init__(self, pool_id, node_id): + self._pool_id = pool_id + self._node_id = node_id + self._backing_id = None + + def create_backing(self, resource_config, need_allocate=False): + """ + Create a resource backing object via RPC + """ + node = get_node(self._node_id) + self._backing_id = node.proxy.create_backing(resource_config, + need_allocate) + + def destroy_backing(self, need_release=False): + """ + Destroy the resource backing object via RPC + """ + node = get_node(self._node_id) + node.proxy.destroy_backing(self._backing_id, need_release) + + def update_backing(self, spec): + node = get_node(self._node_id) + node.proxy.update(self._backing_id, spec) + + def bind_backing(self): + """ + Bind a resource backing object via RPC + """ + node = get_node(self._node_id) + self._backing_id = node.proxy.bind(config) + + def unbind_backing(self): + """ + Create a resource backing object via RPC + """ + node = get_node(self._node_id) + self._backing_id = node.proxy.unbind(config) + + @property + def reference(self): + return {'node': self.node_id, 'id': self.backing_id} + + @property + def node_id(self): + """ + Get the node id of the resource backing + """ + return self._node_id + + @property + def backing_id(self): + """ + Get the resource backing id + """ + return self._backing_id + + +class _Resource(ABC): + """ + A resource defines what users request, it's independent of a VM, + users can request a kind of resources for any purpose, it can bind + several allocated resource backings at different worker nodes. + + The common attributes of a resource: + meta: + resource id + access: + nodes: + permission: + references: + node id + backing id + spec: + resource pood id + specific attributes + """ + + _RESOURCE_TYPE = None + + def __init__(self, resource_config): + self._id = uuid.uuid4() + self._name = None + self._bindings = dict() + self._initialize(resource_config) + + def _initialize(self, resource_config): + self._pool_id = resource_config.get('pool_id') + + @property + def resource_type(cls): + raise cls._RESOURCE_TYPE + + @property + def resource_id(self): + return self._id + + @property + def resource_pool(self): + return self._pool_id + + @property + @abstractmethod + def resource_info(self): + """ + Static resource configurations as well as the dynamic ones, + the former comes from users' settings while the latter comes + from the allocation accessed via the worker nodes + """ + raise NotImplemented + + @abstractmethod + def info_bindings(self): + raise NotImplemented + + @abstractmethod + def create_bindings(self, nodes): + """ + Create the bindings on the specified worker nodes + """ + raise NotImplemented + + @abstractmethod + def destroy_bindings(self, nodes): + """ + Destroy the bindings on the specified worker nodes + """ + raise NotImplemented + + @abstractmethod + def update_bindings(self, config): + raise NotImplementedError + + @abstractmethod + def _update_meta(self, new_meta): + raise NotImplementedError + + @abstractmethod + def _update_spec(self, new_spec): + raise NotImplementedError + + def update_config(self, new_config): + meta = new_config.get('meta') + if meta is not None: + self._update_meta(meta) + + spec = new_config.get('spec') + if spec is not None: + self._update_spec(spec) diff --git a/virttest/vt_resmgr/storage/__init__.py b/virttest/vt_resmgr/storage/__init__.py new file mode 100644 index 0000000000..cf760f8000 --- /dev/null +++ b/virttest/vt_resmgr/storage/__init__.py @@ -0,0 +1,14 @@ +from .dir import _DirPool +from .nfs import _NfsPool +from .nbd import _NbdPool +from .ceph import _CephPool +from .iscsi_direct import _IscsiDirectPool + + +__all__ = ( + _DirPool, + _NfsPool, + _NbdPool, + _CephPool, + _IscsiDirectPool, +) diff --git a/virttest/vt_resmgr/storage/ceph/__init__.py b/virttest/vt_resmgr/storage/ceph/__init__.py new file mode 100644 index 0000000000..8ec3b25a7a --- /dev/null +++ b/virttest/vt_resmgr/storage/ceph/__init__.py @@ -0,0 +1 @@ +from .ceph_pool import _CephPool diff --git a/virttest/vt_resmgr/storage/dir/__init__.py b/virttest/vt_resmgr/storage/dir/__init__.py new file mode 100644 index 0000000000..c09faaf942 --- /dev/null +++ b/virttest/vt_resmgr/storage/dir/__init__.py @@ -0,0 +1 @@ +from .dir_pool import _DirPool diff --git a/virttest/vt_resmgr/storage/dir/dir_pool.py b/virttest/vt_resmgr/storage/dir/dir_pool.py new file mode 100644 index 0000000000..4d4afa72d4 --- /dev/null +++ b/virttest/vt_resmgr/storage/dir/dir_pool.py @@ -0,0 +1,22 @@ +import logging + +from ...pool import _ResourcePool +from .dir_resource import _get_resource_class + + +LOG = logging.getLogger('avocado.' + __name__) + + +class _DirPool(_ResourcePool): + _POOL_TYPE = 'nfs' + + def _initialize(self, pool_config): + super().__init__(pool_config) + self._root_dir = pool_config['root_dir'] + + def create_resource(self, resource_config): + spec = resource_config['spec'] + cls = _get_resource_class(spec['type']) + res = cls(resource_config) + self._resources[res.resource_id] = res + return res.resource_id diff --git a/virttest/vt_resmgr/storage/dir/dir_resource.py b/virttest/vt_resmgr/storage/dir/dir_resource.py new file mode 100644 index 0000000000..cec76b8713 --- /dev/null +++ b/virttest/vt_resmgr/storage/dir/dir_resource.py @@ -0,0 +1,66 @@ +import logging + +from ..volume import _FileVolume + + +LOG = logging.getLogger('avocado.' + __name__) + + +class _DirFileVolume(_FileVolume): + """ + The nfs file-based volume + + Resource attributes: + meta: + resource id + references: + node id + reference id + spec: + size + name + path + """ + + def _initialize(self, resource_config): + super()._initialize(resource_config) + meta = resource_config['meta'] + spec = resource_config['spec'] + self._name = spec['name'] + self._capacity = spec['size'] + self._allocation = 0 + + def create_bindings(self, pool_id, nodes): + """ + A local dir resource has only one binding, + it is allocated when creating the binding + """ + if len(nodes) != 1: + LOG.warning('A dir resource should have one binding only') + + binding = _ResourceBinding(pool_id, nodes[0]) + binding.create_backing(self.resource_info, True) + self._bindings[node_id] = binding + + def destroy_bindings(self, nodes=None): + """ + Always release the resource when destroying its binding + """ + node_id = list(self._bindings.keys())[0] + self._bindings[node_id].destroy_backing(True) + del(self._bindings[node_id]) + + def _update_binding(self, binding, config): + pass + + def update_bindings(self, config): + for node_id, binding in self._bindings.items(): + self._update_binding(node_id, config) + + @property + def resource_info(self): + pass + + +def _get_resource_class(resource_type): + return _DirFileVolume diff --git a/virttest/vt_resmgr/storage/iscsi_direct/__init__.py b/virttest/vt_resmgr/storage/iscsi_direct/__init__.py new file mode 100644 index 0000000000..4631b968fa --- /dev/null +++ b/virttest/vt_resmgr/storage/iscsi_direct/__init__.py @@ -0,0 +1 @@ +from .iscsi_direct_pool import _IscsiDirectPool diff --git a/virttest/vt_resmgr/storage/iscsi_direct/iscsi_direct_pool.py b/virttest/vt_resmgr/storage/iscsi_direct/iscsi_direct_pool.py new file mode 100644 index 0000000000..1f37708310 --- /dev/null +++ b/virttest/vt_resmgr/storage/iscsi_direct/iscsi_direct_pool.py @@ -0,0 +1,20 @@ +import logging + +from ...resource import _Resource +from ...pool import _ResourcePool + + +LOG = logging.getLogger('avocado.' + __name__) + + +class _IscsiDirectResource(_Resource): + """ + The iscsi-direct pool resource + """ + + def _initialize(self, config): + self._lun = config['lun'] + + +class _IscsiDirectPool(_ResourcePool): + POOL_TYPE = 'iscsi-direct' diff --git a/virttest/vt_resmgr/storage/nbd/__init__.py b/virttest/vt_resmgr/storage/nbd/__init__.py new file mode 100644 index 0000000000..8a29e248f3 --- /dev/null +++ b/virttest/vt_resmgr/storage/nbd/__init__.py @@ -0,0 +1 @@ +from .nbd_pool import _NbdPool diff --git a/virttest/vt_resmgr/storage/nfs/__init__.py b/virttest/vt_resmgr/storage/nfs/__init__.py new file mode 100644 index 0000000000..a0e90ec573 --- /dev/null +++ b/virttest/vt_resmgr/storage/nfs/__init__.py @@ -0,0 +1 @@ +from .nfs_pool import _NfsPool diff --git a/virttest/vt_resmgr/storage/nfs/nfs_pool.py b/virttest/vt_resmgr/storage/nfs/nfs_pool.py new file mode 100644 index 0000000000..536eed18f9 --- /dev/null +++ b/virttest/vt_resmgr/storage/nfs/nfs_pool.py @@ -0,0 +1,23 @@ +import logging + +from ...pool import _ResourcePool +from .nfs_resource import get_resource_class + + +LOG = logging.getLogger('avocado.' + __name__) + + +class _NfsPool(_ResourcePool): + _POOL_TYPE = 'nfs' + + def _initialize(self, pool_config): + super().__init__(pool_config) + self._nfs_server = pool_config['nfs_server_ip'] + self._export_dir = pool_config['nfs_mount_src'] + + def create_resource(self, resource_config): + spec = resource_config['spec'] + cls = get_resource_class(spec['type']) + res = cls(resource_config) + self._resources[res.resource_id] = res + return res.resource_id diff --git a/virttest/vt_resmgr/storage/nfs/nfs_resource.py b/virttest/vt_resmgr/storage/nfs/nfs_resource.py new file mode 100644 index 0000000000..a36613cdf7 --- /dev/null +++ b/virttest/vt_resmgr/storage/nfs/nfs_resource.py @@ -0,0 +1,108 @@ +import logging + +from ..volume import _FileVolume + + +LOG = logging.getLogger('avocado.' + __name__) + + +class _NfsFileVolume(_FileVolume): + """ + The nfs file-based volume + + Resource attributes: + meta: + resource id + references: + node id + reference id + spec: + size + name + path + """ + + def _initialize(self, resource_config): + super()._initialize(resource_config) + meta = resource_config['meta'] + spec = resource_config['spec'] + self._name = spec['name'] + #self._path = spec.get('filename') + self._capacity = spec['size'] + self._allocation = 0 + +""" + def allocate(self, nodes=None): + if nodes: + node_id = nodes[0] + binding = self._bindings[node_id] + else: + node_id, binding = list(self._bindings.items())[0] + node = get_node(node_id) + node.proxy.allocate(binding.backing_id) + + def release(self, nodes=None): + if nodes: + node_id = nodes[0] + binding = self._bindings[node_id] + else: + node_id, binding = list(self._bindings.items())[0] + node = get_node(node_id) + node.proxy.release(binding.backing_id) +""" + + def _create_binding(self, pool_id, node_id, need_allocate=False): + binding = _ResourceBinding(pool_id, node_id) + binding.create_backing(self.resource_info, need_allocate) + self._bindings[node_id] = binding + + def create_bindings(self, pool_id, nodes): + """ + Create the bindings for a nfs resource + A NFS resource will only be allocated once when creating the + first binding, for the other bindings, there's no allocation + """ + allocated = True if self._bindings else False + bindings = list() + node_list = nodes.copy() + try: + # Create the first binding with allocation + if not allocated: + node_id = node_list.pop(0) + self._create_binding(pool_id, node_id, True) + bindings.append(node_id) + + # Create the bindings without allocation + for node_id in node_list: + self._create_binding(pool_id, node_id, False) + bindings.append(node_id) + except Exception: + # Remove the created bindings when an error occurs + for node_id in bindings: + self._destroy_binding(node_id) + + def _destroy_binding(self, node_id): + need_release = True if len(self._bindings) == 1 else False + binding = self._bindings[node_id] + binding.destroy_backing(need_release) + del(self._bindings[node_id]) + + def destroy_bindings(self, nodes=None): + nodes = list(self._bindings.keys()) if not nodes else nodes + for node_id in nodes: + self._destroy_binding(self, node_id): + + def _update_binding(self, binding, config): + pass + + def update_bindings(self, config): + for node_id, binding in self._bindings.items(): + self._update_binding(node_id, config) + + @property + def resource_info(self): + pass + + +def get_resource_class(resource_type): + return _NfsFileVolume diff --git a/virttest/vt_resmgr/storage/volume.py b/virttest/vt_resmgr/storage/volume.py new file mode 100644 index 0000000000..2d17374fab --- /dev/null +++ b/virttest/vt_resmgr/storage/volume.py @@ -0,0 +1,45 @@ +from ..resource import _Resource + + +class _Volume(_Resource): + """ + Storage volumes are abstractions of physical partitions, + LVM logical volumes, file-based disk images + """ + + _RESOURCE_TYPE = 'volume' + _VOLUME_TYPE = None + + @property + def volume_type(cls): + return cls._VOLUME_TYPE + + +class _FileVolume(_Volume): + """For file based volumes""" + + _VOLUME_TYPE = 'file' + + def __init__(self, config): + self._path = None + self._capacity = 0 + self._allocation = 0 + super().__init__(config) + + +class _BlockVolume(_Volume): + """For disk, lvm, iscsi based volumes""" + + _VOLUME_TYPE = 'block' + + def __init__(self, config): + self._path = None + self._capacity = 0 + self._allocation = 0 + super().__init__(config) + + +class _NetworkVolume(_Volume): + """For rbd, iscsi-direct based volumes""" + + _VOLUME_TYPE = 'network' diff --git a/virttest/vt_resmgr/vt_resmgr.py b/virttest/vt_resmgr/vt_resmgr.py new file mode 100644 index 0000000000..04187c6912 --- /dev/null +++ b/virttest/vt_resmgr/vt_resmgr.py @@ -0,0 +1,102 @@ +from .pool_collections import PoolCollections + + +class _VTResourceManager(object): + _POOL_CLASSES = dict() + + def __init__(self): + self._pools = dict() # {pool id: pool object} + + @classmethod + def _get_pool_class(cls, pool_type): + return cls._POOL_CLASSES.get(pool_type) + + def initialize(self, pool_config_list): + for config in pool_config_list: + pool_id = self.register_pool(config) + self.attach_pool(pool_id) + + def deinitialize(self): + for pool_id in self.pools: + self.unregister_pool(pool_id) + + def get_pool_by_name(self, pool_name): + pools = [p for p in self.pools.values() if p.pool_name == pool_name] + return pools[0] if pools else None + + def get_pool_by_id(self, pool_id): + return self.pools.get(pool_id, None) + + def get_pool_by_resource(self, resource_id): + pools = [p for p in self.pools.values() if resource_id in p.resources] + return pools[0] if pools else None + + def register_pool(self, pool_config): + pool_type = pool_config['type'] + pool_class = PoolCollections.get_pool_class(pool_type) + pool = pool_class(pool_config) + self._pools[pool.pool_id] = pool + return pool.pool_id + + def unregister_pool(self, pool_id): + """ + The pool should be detached from all worker nodes + """ + pool = self.pools[pool_id] + self.detach_pool(pool_id) + del(self._pools[pool_id]) + + def attach_pool_to(self, pool, node): + """ + Attach a pool to a specific node + """ + access_config = pool.attaching_nodes[node.node_id] + node.proxy.create_pool_connection(pool.config, access_config) + + def attach_pool(self, pool_id): + pool = self.get_pool_by_id(pool_id) + for node_id in pool.attaching_nodes: + node = get_node(node_id) + self.attach_pool_to(pool, node) + + def detach_pool_from(self, pool, node): + """ + Detach a pool from a specific node + """ + node.proxy.destroy_pool_connection(pool.pool_id) + + def detach_pool(self, pool_id): + pool = self.get_pool_by_id(pool_id) + for node_id in pool.attaching_nodes: + node = get_node(node_id) + self.detach_pool_from(pool, node) + + def info_pool(self, pool_id): + """ + Get the pool's information, including 'meta' and 'spec': + meta: + e.g. version for tdx, 1.0 or 1.5 + spec: + common specific attributes + e.g. nfs_server for nfs pool + node-specific attributes + e.g. [node1:{path:/mnt1,permission:rw}, node2:{}] + """ + info = dict() + pool = self.get_pool_by_id(pool_id) + info.update(pool.config) + for node_id in pool.attaching_nodes: + node = get_node(node_id) + access_info = node.proxy.get_pool_connection(pool_id) + info.update(access_info) + + def pool_capability(self, pool_id): + pool = self.get_pool_by_id(pool_id) + return pool.capability + + @property + def pools(self): + return self._pools + + +vt_resmgr = _VTResourceManager()