From adf2dd568039e08132af8070f7c81a9eb227fe16 Mon Sep 17 00:00:00 2001 From: Bartlomiej Gmerek <42570669+Gmerold@users.noreply.github.com> Date: Tue, 9 Jan 2024 17:03:02 +0100 Subject: [PATCH] feat: Adding DPDK support in UPF (#57) --- README.md | 23 +- config.yaml | 53 +- .../kubernetes_charm_libraries/v0/multus.py | 27 +- requirements.txt | 3 + src/charm.py | 321 ++++++- src/dpdk.py | 180 ++++ tests/unit/test_charm.py | 822 +++++++++++++++++- tests/unit/test_dpdk.py | 305 +++++++ 8 files changed, 1648 insertions(+), 86 deletions(-) create mode 100644 src/dpdk.py create mode 100644 tests/unit/test_dpdk.py diff --git a/README.md b/README.md index 249d9eb9..7787c162 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,28 @@ juju deploy sdcore-upf-k8s --trust --channel=edge ### Exposing the UPF externally -If a load balancer such as `metallb` is present, the charm will configure an externally accessible service port with the load balancer upon install of the charm. +If a load balancer such as `metallb` is present, the charm will configure an externally accessible +service port with the load balancer upon install of the charm. + +### Running UPF in DPDK mode + +By default, UPF runs in `af_packet` mode. To run UPF in `dpdk` mode, `upf-mode` config option +should be used, i.e.: + +```shell +juju deploy sdcore-upf --trust --channel=edge --config upf-mode="dpdk" --config enable-hugepages=True --config access-interface-mac-address="00:b0:d0:63:c2:26" --config core-interface-mac-address="00:b0:d0:63:c2:36" +``` + +As shown in the example above, when running UPF in `dpdk` mode, it is necessary to enable +HugePages and pass the MAC addresses of the `Access` and `Core` interfaces. + +```{note} +Example command shown above assumes using default network configuration. If needed, network configs +should be changed to match the actual configuration. +``` + +For detailed instructions on running UPF in `dpdk` mode please visit +[How-to: Running UPF in DPDK mode](https://canonical-charmed-5g.readthedocs-hosted.com/en/latest/how-to/running_upf_in_dpdk_mode/). ## Image diff --git a/config.yaml b/config.yaml index 1136e7e2..2cea8d32 100644 --- a/config.yaml +++ b/config.yaml @@ -1,4 +1,9 @@ options: + upf-mode: + type: string + default: af_packet + description: | + Either `af_packet` (default) or `dpdk`. dnn: type: string default: internet @@ -7,25 +12,14 @@ options: type: string default: 192.168.251.0/24 description: gNodeB subnet. - core-interface: - type: string - description: Interface on the host to use for the Core Network. - core-ip: - type: string - default: 192.168.250.3/24 - description: IP address used by the UPF's Core interface. - core-gateway-ip: - type: string - default: 192.168.250.1 - description: Gateway IP address to the Core Network. - core-interface-mtu-size: - type: int - description: | - MTU for the core interface (1200 <= MTU <= 65535) in bytes. - If not specified, Multus will use its default value (typically 1500). access-interface: type: string description: Interface on the host to use for the Access Network. + access-interface-mac-address: + type: string + description: | + MAC address of the UPF's Access interface. + Required only if `upf-mode` is `dpdk`. access-ip: type: string default: 192.168.252.3/24 @@ -39,9 +33,30 @@ options: description: | MTU for the access interface (1200 <= MTU <= 65535) in bytes. If not specified, Multus will use its default value (typically 1500). + core-interface: + type: string + description: Interface on the host to use for the Core Network. + core-interface-mac-address: + type: string + description: | + MAC address of the UPF's Core interface. + Required only if `upf-mode` is `dpdk`. + core-ip: + type: string + default: 192.168.250.3/24 + description: IP address used by the UPF's Core interface. + core-gateway-ip: + type: string + default: 192.168.250.1 + description: Gateway IP address to the Core Network. + core-interface-mtu-size: + type: int + description: | + MTU for the core interface (1200 <= MTU <= 65535) in bytes. + If not specified, Multus will use its default value (typically 1500). external-upf-hostname: type: string - description: |- + description: | Externally accessible FQDN for the UPF. If not provided, it will default to the LoadBalancer Service hostname. If that is not available, it will default to the internal @@ -49,4 +64,6 @@ options: enable-hugepages: type: boolean default: false - description: When enabled, HugePages of 1Gi will be used for a total of 2Gi HugePages. + description: | + When enabled, HugePages of 1Gi will be used for a total of 2Gi HugePages. + HugePages must be enabled if `upf-mode` is `dpdk`. diff --git a/lib/charms/kubernetes_charm_libraries/v0/multus.py b/lib/charms/kubernetes_charm_libraries/v0/multus.py index 4b25dde9..25e83c26 100644 --- a/lib/charms/kubernetes_charm_libraries/v0/multus.py +++ b/lib/charms/kubernetes_charm_libraries/v0/multus.py @@ -123,7 +123,7 @@ def _on_config_changed(self, event: EventBase): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 12 +LIBPATCH = 13 logger = logging.getLogger(__name__) @@ -148,6 +148,8 @@ def __eq__(self, other): class NetworkAnnotation: """NetworkAnnotation.""" + NETWORK_ANNOTATION_RESOURCE_KEY = "k8s.v1.cni.cncf.io/networks" + name: str interface: str mac: Optional[str] = None @@ -359,7 +361,7 @@ def patch_statefulset( template=PodTemplateSpec( metadata=ObjectMeta( annotations={ - "k8s.v1.cni.cncf.io/networks": json.dumps( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY: json.dumps( [ network_annotation.dict() for network_annotation in network_annotations @@ -414,7 +416,9 @@ def unpatch_statefulset( selector=statefulset.spec.selector, # type: ignore[attr-defined] serviceName=statefulset.spec.serviceName, # type: ignore[attr-defined] template=PodTemplateSpec( - metadata=ObjectMeta(annotations={"k8s.v1.cni.cncf.io/networks": "[]"}), + metadata=ObjectMeta( + annotations={NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY: "[]"} + ), spec=PodSpec(containers=[container]), ), ) @@ -506,10 +510,10 @@ def _pod_is_patched( def _annotations_contains_multus_networks( annotations: dict, network_annotations: list[NetworkAnnotation] ) -> bool: - if "k8s.v1.cni.cncf.io/networks" not in annotations: + if NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY not in annotations: return False try: - if json.loads(annotations["k8s.v1.cni.cncf.io/networks"]) != [ + if json.loads(annotations[NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY]) != [ network_annotation.dict() for network_annotation in network_annotations ]: return False @@ -568,7 +572,7 @@ def __init__( self, charm: CharmBase, network_attachment_definitions_func: Callable[[], list[NetworkAttachmentDefinition]], - network_annotations: list[NetworkAnnotation], + network_annotations_func: Callable[[], list[NetworkAnnotation]], container_name: str, refresh_event: BoundEvent, cap_net_admin: bool = False, @@ -580,7 +584,8 @@ def __init__( charm: Charm object network_attachment_definitions_func: A callable to a function returning a list of `NetworkAttachmentDefinition` to be created. - network_annotations: List of NetworkAnnotation. + network_annotations_func: A callable to a function returning a list + of `NetworkAnnotation` to be added to the container. container_name: Container name cap_net_admin: Container requires NET_ADMIN capability privileged: Container requires privileged security context @@ -590,7 +595,7 @@ def __init__( super().__init__(charm, "kubernetes-multus") self.kubernetes = KubernetesClient(namespace=self.model.name) self.network_attachment_definitions_func = network_attachment_definitions_func - self.network_annotations = network_annotations + self.network_annotations_func = network_annotations_func self.container_name = container_name self.cap_net_admin = cap_net_admin self.privileged = privileged @@ -608,7 +613,7 @@ def _configure_multus(self, event: BoundEvent) -> None: if not self._statefulset_is_patched(): self.kubernetes.patch_statefulset( name=self.model.app.name, - network_annotations=self.network_annotations, + network_annotations=self.network_annotations_func(), container_name=self.container_name, cap_net_admin=self.cap_net_admin, privileged=self.privileged, @@ -682,7 +687,7 @@ def _statefulset_is_patched(self) -> bool: """Returns whether statefuset is patched with network annotations and capabilities.""" return self.kubernetes.statefulset_is_patched( name=self.model.app.name, - network_annotations=self.network_annotations, + network_annotations=self.network_annotations_func(), container_name=self.container_name, cap_net_admin=self.cap_net_admin, privileged=self.privileged, @@ -692,7 +697,7 @@ def _pod_is_ready(self) -> bool: """Returns whether pod is ready with network annotations and capabilities.""" return self.kubernetes.pod_is_ready( pod_name=self._pod, - network_annotations=self.network_annotations, + network_annotations=self.network_annotations_func(), container_name=self.container_name, cap_net_admin=self.cap_net_admin, privileged=self.privileged, diff --git a/requirements.txt b/requirements.txt index 54ab53ca..65ee728c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,10 @@ +httpx +ipaddress ops jinja2 lightkube lightkube-models +macaddress pydantic pytest-interface-tester PyYAML>=6.0.1 diff --git a/src/charm.py b/src/charm.py index 0faa8858..471c3fb6 100755 --- a/src/charm.py +++ b/src/charm.py @@ -9,8 +9,9 @@ import logging import time from subprocess import check_output -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional +import macaddress # type: ignore[import] from charms.kubernetes_charm_libraries.v0.hugepages_volumes_patch import ( # type: ignore[import] HugePagesVolume, KubernetesHugePagesPatchCharmLib, @@ -37,6 +38,8 @@ from ops.model import ActiveStatus, BlockedStatus, Container, ModelError, WaitingStatus from ops.pebble import ExecError, Layer +from dpdk import DPDK + logger = logging.getLogger(__name__) BESSD_CONTAINER_CONFIG_PATH = "/etc/bess/conf" @@ -48,14 +51,16 @@ CORE_INTERFACE_NAME = "core" ACCESS_INTERFACE_BRIDGE_NAME = "access-br" CORE_INTERFACE_BRIDGE_NAME = "core-br" +DPDK_ACCESS_INTERFACE_RESOURCE_NAME = "intel.com/intel_sriov_vfio_access" +DPDK_CORE_INTERFACE_RESOURCE_NAME = "intel.com/intel_sriov_vfio_core" CONFIG_FILE_NAME = "upf.json" BESSCTL_CONFIGURE_EXECUTED_FILE_NAME = "bessctl_configure_executed" -UPF_MODE = "af_packet" BESSD_PORT = 10514 PROMETHEUS_PORT = 8080 PFCP_PORT = 8805 REQUIRED_CPU_EXTENSIONS = ["avx2", "rdrand"] REQUIRED_CPU_EXTENSIONS_HUGEPAGES = ["pdpe1gb"] +SUPPORTED_UPF_MODES = ["af_packet", "dpdk"] # The default field manager set when using kubectl to create resources DEFAULT_FIELD_MANAGER = "controller" @@ -117,16 +122,7 @@ def __init__(self, *args): charm=self, container_name=self._bessd_container_name, cap_net_admin=True, - network_annotations=[ - NetworkAnnotation( - name=ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, - interface=ACCESS_INTERFACE_NAME, - ), - NetworkAnnotation( - name=CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, - interface=CORE_INTERFACE_NAME, - ), - ], + network_annotations_func=self._generate_network_annotations, network_attachment_definitions_func=self._network_attachment_definitions_from_config, refresh_event=self.on.nad_config_changed, ) @@ -298,38 +294,153 @@ def _volumes_request_func_from_config(self) -> list[HugePagesVolume]: return [HugePagesVolume(mount_path="/dev/hugepages", size="1Gi", limit="2Gi")] return [] - def _network_attachment_definitions_from_config( - self, - ) -> list[NetworkAttachmentDefinition]: + def _generate_network_annotations(self) -> List[NetworkAnnotation]: + """Generates a list of NetworkAnnotations to be used by UPF's StatefulSet. + + Returns: + List[NetworkAnnotation]: List of NetworkAnnotations + """ + access_network_annotation = NetworkAnnotation( + name=ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, + interface=ACCESS_INTERFACE_NAME, + ) + core_network_annotation = NetworkAnnotation( + name=CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, + interface=CORE_INTERFACE_NAME, + ) + if self._get_upf_mode() == "dpdk": + access_network_annotation.mac = self._get_access_interface_mac_address() + access_network_annotation.ips = [self._get_access_network_ip_config()] + core_network_annotation.mac = self._get_core_interface_mac_address() + core_network_annotation.ips = [self._get_core_network_ip_config()] + return [access_network_annotation, core_network_annotation] + + def _network_attachment_definitions_from_config(self) -> list[NetworkAttachmentDefinition]: """Returns list of Multus NetworkAttachmentDefinitions to be created based on config. Returns: network_attachment_definitions: list[NetworkAttachmentDefinition] """ - access_nad_config = self._get_access_nad_config() + if self._get_upf_mode() == "dpdk": + access_nad = self._create_dpdk_access_nad_from_config() + core_nad = self._create_dpdk_core_nad_from_config() + else: + access_nad = self._create_access_nad_from_config() + core_nad = self._create_core_nad_from_config() + + return [access_nad, core_nad] + def _create_access_nad_from_config(self) -> NetworkAttachmentDefinition: + """Returns a NetworkAttachmentDefinition for the Access interface. + + Returns: + NetworkAttachmentDefinition: NetworkAttachmentDefinition object + """ + access_nad_config = self._get_access_nad_base_config() + access_nad_config["ipam"].update( + {"addresses": [{"address": self._get_access_network_ip_config()}]} + ) if access_interface := self._get_access_interface_config(): access_nad_config.update({"type": "macvlan", "master": access_interface}) else: access_nad_config.update({"type": "bridge", "bridge": ACCESS_INTERFACE_BRIDGE_NAME}) - core_nad_config = self._get_core_nad_config() + return NetworkAttachmentDefinition( + metadata=ObjectMeta(name=ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME), + spec={"config": json.dumps(access_nad_config)}, + ) + def _create_core_nad_from_config(self) -> NetworkAttachmentDefinition: + """Returns a NetworkAttachmentDefinition for the Core interface. + + Returns: + NetworkAttachmentDefinition: NetworkAttachmentDefinition object + """ + core_nad_config = self._get_core_nad_base_config() + core_nad_config["ipam"].update( + {"addresses": [{"address": self._get_core_network_ip_config()}]} + ) if core_interface := self._get_core_interface_config(): core_nad_config.update({"type": "macvlan", "master": core_interface}) else: core_nad_config.update({"type": "bridge", "bridge": CORE_INTERFACE_BRIDGE_NAME}) - return [ - NetworkAttachmentDefinition( - metadata=ObjectMeta(name=ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME), - spec={"config": json.dumps(access_nad_config)}, + return NetworkAttachmentDefinition( + metadata=ObjectMeta(name=CORE_NETWORK_ATTACHMENT_DEFINITION_NAME), + spec={"config": json.dumps(core_nad_config)}, + ) + + def _create_dpdk_access_nad_from_config(self) -> NetworkAttachmentDefinition: + """Returns a DPDK-compatible NetworkAttachmentDefinition for the Access interface. + + Returns: + NetworkAttachmentDefinition: NetworkAttachmentDefinition object + """ + access_nad_config = self._get_access_nad_base_config() + access_nad_config.update({"type": "vfioveth"}) + + return NetworkAttachmentDefinition( + metadata=ObjectMeta( + name=ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, + annotations={ + "k8s.v1.cni.cncf.io/resourceName": DPDK_ACCESS_INTERFACE_RESOURCE_NAME, + }, ), - NetworkAttachmentDefinition( - metadata=ObjectMeta(name=CORE_NETWORK_ATTACHMENT_DEFINITION_NAME), - spec={"config": json.dumps(core_nad_config)}, + spec={"config": json.dumps(access_nad_config)}, + ) + + def _create_dpdk_core_nad_from_config(self) -> NetworkAttachmentDefinition: + """Returns a DPDK-compatible NetworkAttachmentDefinition for the Core interface. + + Returns: + NetworkAttachmentDefinition: NetworkAttachmentDefinition object + """ + core_nad_config = self._get_core_nad_base_config() + core_nad_config.update({"type": "vfioveth"}) + + return NetworkAttachmentDefinition( + metadata=ObjectMeta( + name=CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, + annotations={ + "k8s.v1.cni.cncf.io/resourceName": DPDK_CORE_INTERFACE_RESOURCE_NAME, + }, ), - ] + spec={"config": json.dumps(core_nad_config)}, + ) + + def _get_access_nad_base_config(self) -> Dict[Any, Any]: + """Base Access NetworkAttachmentDefinition config to be extended according to charm config. + + Returns: + config (dict): Base Access NAD config + """ + base_access_nad_config = { + "cniVersion": "0.3.1", + "ipam": { + "type": "static", + }, + "capabilities": {"mac": True}, + } + if access_mtu := self._get_access_interface_mtu_config(): + base_access_nad_config.update({"mtu": access_mtu}) + return base_access_nad_config + + def _get_core_nad_base_config(self) -> Dict[Any, Any]: + """Base Core NetworkAttachmentDefinition config to be extended according to charm config. + + Returns: + config (dict): Base Core NAD config + """ + base_core_nad_config = { + "cniVersion": "0.3.1", + "ipam": { + "type": "static", + }, + "capabilities": {"mac": True}, + } + if core_mtu := self._get_core_interface_mtu_config(): + base_core_nad_config.update({"mtu": core_mtu}) + return base_core_nad_config def _write_bessd_config_file(self, content: str) -> None: """Write the configuration file for the 5G UPF service. @@ -389,6 +500,8 @@ def _on_config_changed(self, event: EventBase): return self.on.nad_config_changed.emit() self.on.hugepages_volumes_config_changed.emit() + if self._get_upf_mode() == "dpdk": + self._configure_bessd_for_dpdk() if not self._bessd_container.can_connect(): self.unit.status = WaitingStatus("Waiting for bessd container to be ready") return @@ -400,6 +513,11 @@ def _on_bessd_pebble_ready(self, event: EventBase) -> None: """Handle Pebble ready event.""" if not self.unit.is_leader(): return + if invalid_configs := self._get_invalid_configs(): + self.unit.status = BlockedStatus( + f"The following configurations are not valid: {invalid_configs}" + ) + return if not self._kubernetes_multus.is_ready(): self.unit.status = WaitingStatus("Waiting for Multus to be ready") return @@ -429,7 +547,7 @@ def _configure_bessd_workload(self) -> None: core_ip_address = self._get_core_network_ip_config() content = render_bessd_config_file( upf_hostname=self._upf_hostname, - upf_mode=UPF_MODE, + upf_mode=self._get_upf_mode(), # type: ignore[arg-type] access_interface_name=ACCESS_INTERFACE_NAME, core_interface_name=CORE_INTERFACE_NAME, core_ip_address=core_ip_address.split("/")[0] if core_ip_address else "", @@ -442,6 +560,7 @@ def _configure_bessd_workload(self) -> None: self._write_bessd_config_file(content=content) restart = True self._create_default_route() + self._create_ran_route() if not self._ip_tables_rule_exists(): self._create_ip_tables_rule() plan = self._bessd_container.get_plan() @@ -479,6 +598,17 @@ def _run_bess_configuration(self) -> None: time.sleep(2) raise TimeoutError("Timed out trying to run configuration for bess") + def _configure_bessd_for_dpdk(self) -> None: + """Configures bessd container for DPDK.""" + dpdk = DPDK( + statefulset_name=self.model.app.name, + namespace=self._namespace, + dpdk_access_interface_resource_name=DPDK_ACCESS_INTERFACE_RESOURCE_NAME, + dpdk_core_interface_resource_name=DPDK_CORE_INTERFACE_RESOURCE_NAME, + ) + if not dpdk.is_configured(container_name=self._bessd_container_name): + dpdk.configure(container_name=self._bessd_container_name) + def _is_bessctl_executed(self) -> bool: """Check if BESSD_CONFIG_CHECK_FILE_NAME exists. @@ -508,24 +638,66 @@ def _get_invalid_configs(self) -> list[str]: list: List of strings matching config keys. """ invalid_configs = [] + if not self._upf_mode_config_is_valid(): + invalid_configs.append("upf-mode") if not self._get_dnn_config(): invalid_configs.append("dnn") + if invalid_access_network_configs := self._get_invalid_access_network_configs(): + invalid_configs.extend(invalid_access_network_configs) + if invalid_core_network_configs := self._get_invalid_core_network_configs(): + invalid_configs.extend(invalid_core_network_configs) + if not self._gnb_subnet_config_is_valid(): + invalid_configs.append("gnb-subnet") + if invalid_dpdk_configs := self._get_invalid_dpdk_configs(): + invalid_configs.extend(invalid_dpdk_configs) + return invalid_configs + + def _get_invalid_access_network_configs(self) -> list[str]: + """Returns list of invalid configurations related to the Access network. + + Returns: + list: List of strings matching config keys. + """ + invalid_configs = [] if not self._access_ip_config_is_valid(): invalid_configs.append("access-ip") - if not self._core_ip_config_is_valid(): - invalid_configs.append("core-ip") if not self._access_gateway_ip_config_is_valid(): invalid_configs.append("access-gateway-ip") - if not self._core_gateway_ip_config_is_valid(): - invalid_configs.append("core-gateway-ip") - if not self._gnb_subnet_config_is_valid(): - invalid_configs.append("gnb-subnet") if not self._access_interface_mtu_size_is_valid(): invalid_configs.append("access-interface-mtu-size") + return invalid_configs + + def _get_invalid_core_network_configs(self) -> list[str]: + """Returns list of invalid configurations related to the Core network. + + Returns: + list: List of strings matching config keys. + """ + invalid_configs = [] + if not self._core_ip_config_is_valid(): + invalid_configs.append("core-ip") + if not self._core_gateway_ip_config_is_valid(): + invalid_configs.append("core-gateway-ip") if not self._core_interface_mtu_size_is_valid(): invalid_configs.append("core-interface-mtu-size") return invalid_configs + def _get_invalid_dpdk_configs(self) -> list[str]: + """Returns list of invalid configurations related to DPDK support. + + Returns: + list: List of strings matching config keys. + """ + invalid_configs = [] + if self._get_upf_mode() == "dpdk": + if not self._hugepages_is_enabled(): + invalid_configs.append("enable-hugepages") + if not self._access_interface_mac_address_is_valid(): + invalid_configs.append("access-interface-mac-address") + if not self._core_interface_mac_address_is_valid(): + invalid_configs.append("core-interface-mac-address") + return invalid_configs + def _create_default_route(self) -> None: """Creates ip route towards core network.""" self._exec_command_in_bessd_workload( @@ -533,6 +705,13 @@ def _create_default_route(self) -> None: ) logger.info("Default core network route created") + def _create_ran_route(self) -> None: + """Creates ip route towards gnb-subnet.""" + self._exec_command_in_bessd_workload( + command=f"ip route replace {self._get_gnb_subnet_config()} via {self._get_access_network_gateway_ip_config()}" # noqa: E501 + ) + logger.info("Route to gnb-subnet created") + def _ip_tables_rule_exists(self) -> bool: """Returns whether iptables rule already exists using the `--check` parameter. @@ -666,6 +845,17 @@ def _routectl_environment_variables(self) -> dict: "PYTHONUNBUFFERED": "1", } + def _get_upf_mode(self) -> Optional[str]: + return self.model.config.get("upf-mode") + + def _upf_mode_config_is_valid(self) -> bool: + """Checks whether the `upf-mode` config is valid. + + Returns: + bool: Whether the `upf-mode` config is valid + """ + return self._get_upf_mode() in SUPPORTED_UPF_MODES + def _get_dnn_config(self) -> Optional[str]: return self.model.config.get("dnn") @@ -686,6 +876,25 @@ def _get_core_network_ip_config(self) -> Optional[str]: def _get_core_interface_config(self) -> Optional[str]: return self.model.config.get("core-interface") + def _get_core_interface_mac_address(self) -> Optional[str]: + """Reads the `core-interface-mac-address` charm config. + + Returns: + Optional[str]: The `core-interface-mac-address` charm config + """ + return self.model.config.get("core-interface-mac-address") + + def _core_interface_mac_address_is_valid(self) -> bool: + """Checks whether the `core-interface-mac-address` config is valid. + + Returns: + bool: Whether the `core-interface-mac-address` config is valid + """ + core_iface_mac_address = self._get_core_interface_mac_address() + if not core_iface_mac_address: + return False + return mac_address_is_valid(core_iface_mac_address) + def _access_ip_config_is_valid(self) -> bool: """Checks whether the access-ip config is valid. @@ -703,6 +912,25 @@ def _get_access_network_ip_config(self) -> Optional[str]: def _get_access_interface_config(self) -> Optional[str]: return self.model.config.get("access-interface") + def _get_access_interface_mac_address(self) -> Optional[str]: + """Reads the `access-interface-mac-address` charm config. + + Returns: + Optional[str]: The `access-interface-mac-address` charm config + """ + return self.model.config.get("access-interface-mac-address") + + def _access_interface_mac_address_is_valid(self) -> bool: + """Checks whether the `access-interface-mac-address` config is valid. + + Returns: + bool: Whether the `access-interface-mac-address` config is valid + """ + access_iface_mac_address = self._get_access_interface_mac_address() + if not access_iface_mac_address: + return False + return mac_address_is_valid(access_iface_mac_address) + def _core_gateway_ip_config_is_valid(self) -> bool: """Checks whether the core-gateway-ip config is valid. @@ -1009,5 +1237,34 @@ def ip_is_valid(ip_address: str) -> bool: return False +def ip_belongs_to_subnet(ip_address: str, subnet: str) -> bool: + """Checks whether given IP address belongs to a given subnet. + + Args: + ip_address (str): IP address + subnet (str): Subnet address + + Returns: + bool: True if given IP address belongs to a given subnet + """ + return ipaddress.ip_address(ip_address) in ipaddress.ip_network(subnet, strict=False) + + +def mac_address_is_valid(mac_address: str) -> bool: + """Check whether given MAC address is valid. + + Args: + mac_address (str): MAC address + + Returns: + bool: True if given MAC address is valid + """ + try: + macaddress.MAC(mac_address) + return True + except ValueError: + return False + + if __name__ == "__main__": # pragma: no cover main(UPFOperatorCharm) diff --git a/src/dpdk.py b/src/dpdk.py new file mode 100644 index 00000000..e82356a0 --- /dev/null +++ b/src/dpdk.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Module used to update Kubernetes Statefulset to support DPDK.""" + +import logging +from typing import Iterable, Optional + +from lightkube import Client +from lightkube.core.exceptions import ApiError +from lightkube.models.core_v1 import Container +from lightkube.resources.apps_v1 import StatefulSet + +logger = logging.getLogger(__name__) + + +class DPDKError(Exception): + """DPDKError.""" + + def __init__(self, message: str): + self.message = message + super().__init__(self.message) + + +class DPDK: + """Class used to update Kubernetes Statefulset to support DPDK.""" + + def __init__( + self, + statefulset_name: str, + namespace: str, + dpdk_access_interface_resource_name: str, + dpdk_core_interface_resource_name: str, + ): + self.k8s_client = Client() + self.statefulset_name = statefulset_name + self.namespace = namespace + self.dpdk_resource_requirements = { + "requests": { + dpdk_access_interface_resource_name: "1", + dpdk_core_interface_resource_name: "1", + }, + "limits": { + dpdk_access_interface_resource_name: "1", + dpdk_core_interface_resource_name: "1", + }, + } + + def is_configured(self, container_name: str) -> bool: + """Checks whether the container config required for DPDK has been applied or not. + + Args: + container_name (str): Name of the container to update + + Returns: + bool: True if container config required for DPDK is applied, otherwise False + """ + statefulset = self._get_statefulset(self.statefulset_name, self.namespace) + if not statefulset: + raise RuntimeError("StatefulSet not found!") + container = self._get_container( + container_name=container_name, + containers=statefulset.spec.template.spec.containers, # type: ignore[union-attr] + ) + if not container: + raise RuntimeError("Container not found!") + if not container.securityContext.privileged: # type: ignore[union-attr] + return False + if not self._resource_requirements_applied(container, self.dpdk_resource_requirements): + return False + return True + + def configure(self, container_name: str) -> None: + """Applies config required by DPDK to a given container. + + Args: + container_name (str): Name of the container to update + """ + statefulset = self._get_statefulset(self.statefulset_name, self.namespace) + if not statefulset: + raise RuntimeError("StatefulSet not found!") + container = self._get_container( + container_name=container_name, + containers=statefulset.spec.template.spec.containers, # type: ignore[union-attr] + ) + if not container: + raise RuntimeError("Container not found!") + container.securityContext.privileged = True # type: ignore[union-attr] + self._apply_resource_requirements( + container=container, + resource_requirements=self.dpdk_resource_requirements, + ) + + self._replace_statefulset(statefulset=statefulset) + logger.info("Container %s configured for DPDK", container_name) + + def _get_statefulset(self, statefulset_name: str, namespace: str) -> Optional[StatefulSet]: + """Returns StatefulSet object with given name from given namespace. + + Args: + statefulset_name (str): Name of the StatefulSet to get + namespace (str): Namespace to get StatefulSet from + + Returns: + StatefulSet: StatefulSet object + """ + try: + return self.k8s_client.get(res=StatefulSet, name=statefulset_name, namespace=namespace) # type: ignore[return-value] # noqa: E501 + except ApiError as e: + raise DPDKError(f"Could not get statefulset `{statefulset_name}`: {e.status.message}") + + @staticmethod + def _get_container( + containers: Iterable[Container], container_name: str + ) -> Optional[Container]: + """Returns Container object with given name. + + Args: + containers (Iterable[Container]): Containers to search among + container_name (str): Name of the Container to get + + Returns: + Container: Container object + """ + try: + return next(iter(filter(lambda ctr: ctr.name == container_name, containers))) + except StopIteration: + raise DPDKError(f"Container `{container_name}` not found") + + @staticmethod + def _apply_resource_requirements(container: Container, resource_requirements: dict) -> None: + """Applies given resource requests and limits to a given container. + + Args: + container (Container): Container to update + resource_requirements (dict): Dictionary of `requests` and `limits` + """ + for request, value in resource_requirements["requests"].items(): + container.resources.requests.update({request: int(value)}) # type: ignore[union-attr] + for limit, value in resource_requirements["limits"].items(): + container.resources.limits.update({limit: int(value)}) # type: ignore[union-attr] + logger.info( + "Applied ResourceRequirements to the %s container: %s", + container, + resource_requirements, + ) + + @staticmethod + def _resource_requirements_applied(container: Container, resource_requirements: dict) -> bool: + """Checks whether the container ResourceRequirements have been applied or not. + + Args: + container (Container): Container to check + resource_requirements (dict): Dictionary of `requests` and `limits` + + Returns: + bool: True if container ResourceRequirements have been applied, otherwise False + """ + for request, value in resource_requirements["requests"].items(): + if not container.resources.requests.get(request) == value: # type: ignore[union-attr] + return False + for limit, value in resource_requirements["limits"].items(): + if not container.resources.limits.get(limit) == value: # type: ignore[union-attr] + return False + return True + + def _replace_statefulset(self, statefulset: StatefulSet) -> None: + """Replaces StatefulSet. + + Args: + statefulset (StatefulSet): StatefulSet object to replace + """ + try: + self.k8s_client.replace(obj=statefulset) + logger.info("Statefulset %s replaced", statefulset.metadata.name) # type: ignore[union-attr] # noqa: E501 + except ApiError as e: + raise DPDKError( + f"Could not replace statefulset `{statefulset.metadata.name}`: {e.status.message}" # type: ignore[union-attr] # noqa: E501, W505 + ) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 92832bb8..ab769aff 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -6,6 +6,7 @@ from unittest.mock import Mock, call, patch from charms.kubernetes_charm_libraries.v0.multus import ( # type: ignore[import] + NetworkAnnotation, NetworkAttachmentDefinition, ) from lightkube.models.core_v1 import Node, NodeStatus, ServicePort, ServiceSpec @@ -14,7 +15,16 @@ from ops import testing from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, WaitingStatus -from charm import IncompatibleCPUError, UPFOperatorCharm +from charm import ( + ACCESS_INTERFACE_NAME, + ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, + CORE_INTERFACE_NAME, + CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, + DPDK_ACCESS_INTERFACE_RESOURCE_NAME, + DPDK_CORE_INTERFACE_RESOURCE_NAME, + IncompatibleCPUError, + UPFOperatorCharm, +) MULTUS_LIBRARY_PATH = "charms.kubernetes_charm_libraries.v0.multus" HUGEPAGES_LIBRARY_PATH = "charms.kubernetes_charm_libraries.v0.hugepages_volumes_patch" @@ -24,14 +34,17 @@ VALID_MTU_SIZE_1 = 65535 # Upper edge value VALID_MTU_SIZE_2 = 1200 # Lower edge value TEST_PFCP_PORT = 1234 -ACCESS_INTERFACE_NAME = "access-net" DEFAULT_ACCESS_IP = "192.168.252.3/24" INVALID_ACCESS_IP = "192.168.252.3/44" VALID_ACCESS_IP = "192.168.252.5/24" ACCESS_GW_IP = "192.168.252.1" GNB_SUBNET = "192.168.251.0/24" -CORE_IP = "192.168.250.3/24" +VALID_CORE_IP = "192.168.250.3/24" CORE_GW_IP = "192.168.250.1" +VALID_ACCESS_MAC = "00-b0-d0-63-c2-26" +INVALID_ACCESS_MAC = "something" +VALID_CORE_MAC = "00-b0-d0-63-c2-36" +INVALID_CORE_MAC = "wrong" def read_file(path: str) -> str: @@ -81,10 +94,114 @@ def test_given_bad_config_when_config_changed_then_status_is_blocked(self): BlockedStatus("The following configurations are not valid: ['dnn']"), ) + def test_given_empty_upf_mode_when_config_changed_then_status_is_blocked(self): + self.harness.update_config(key_values={"upf-mode": ""}) + + self.assertEqual( + self.harness.model.unit.status, + BlockedStatus("The following configurations are not valid: ['upf-mode']"), + ) + + def test_given_unsupported_upf_mode_when_config_changed_then_status_is_blocked(self): + self.harness.update_config(key_values={"upf-mode": "unsupported"}) + + self.assertEqual( + self.harness.model.unit.status, + BlockedStatus("The following configurations are not valid: ['upf-mode']"), + ) + + def test_given_upf_mode_set_to_dpdk_but_other_required_configs_not_set_when_config_changed_then_status_is_blocked( # noqa: E501 + self, + ): + self.harness.update_config(key_values={"upf-mode": "dpdk"}) + + self.assertEqual( + self.harness.model.unit.status, + BlockedStatus( + "The following configurations are not valid: ['enable-hugepages', 'access-interface-mac-address', 'core-interface-mac-address']" # noqa: E501, W505 + ), + ) + + @patch("charm.check_output") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + def test_given_upf_mode_set_to_dpdk_and_hugepages_enabled_but_mac_addresses_of_access_and_core_interfaces_not_set_when_config_changed_then_status_is_blocked( # noqa: E501 + self, patched_list, patched_check_output + ): + patched_check_output.return_value = b"Flags: avx2 ssse3 fma cx16 rdrand pdpe1gb" + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config(key_values={"upf-mode": "dpdk", "enable-hugepages": True}) + + self.assertEqual( + self.harness.model.unit.status, + BlockedStatus( + "The following configurations are not valid: ['access-interface-mac-address', 'core-interface-mac-address']" # noqa: E501, W505 + ), + ) + + @patch("charm.check_output") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + def test_given_upf_mode_set_to_dpdk_and_hugepages_enabled_but_access_interface_mac_addresses_is_invalid_when_config_changed_then_status_is_blocked( # noqa: E501 + self, patched_list, patched_check_output + ): + patched_check_output.return_value = b"Flags: avx2 ssse3 fma cx16 rdrand pdpe1gb" + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": INVALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + + self.assertEqual( + self.harness.model.unit.status, + BlockedStatus( + "The following configurations are not valid: ['access-interface-mac-address']" + ), + ) + + @patch("charm.check_output") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + def test_given_upf_mode_set_to_dpdk_and_hugepages_enabled_but_core_interface_mac_addresses_is_invalid_when_config_changed_then_status_is_blocked( # noqa: E501 + self, patched_list, patched_check_output + ): + patched_check_output.return_value = b"Flags: avx2 ssse3 fma cx16 rdrand pdpe1gb" + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": INVALID_CORE_MAC, + } + ) + + self.assertEqual( + self.harness.model.unit.status, + BlockedStatus( + "The following configurations are not valid: ['core-interface-mac-address']" + ), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready") def test_given_bessd_config_file_not_yet_written_when_bessd_pebble_ready_then_config_file_is_written( # noqa: E501 - self, - _, + self, _ ): self.harness.handle_exec("bessd", [], result=0) self.harness.container_pebble_ready(container_name="bessd") @@ -97,8 +214,7 @@ def test_given_bessd_config_file_not_yet_written_when_bessd_pebble_ready_then_co @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready") def test_given_bessd_config_file_not_yet_written_when_config_storage_attached_then_config_file_is_written( # noqa: E501 - self, - _, + self, _ ): self.harness.handle_exec("bessd", [], result=0) self.harness.set_can_connect("bessd", True) @@ -170,6 +286,7 @@ def test_given_can_connect_to_bessd_when_bessd_pebble_ready_then_ip_route_is_cre ip_route_replace_called = False timeout = 0 environment = {} + replace_gnb_subnet_route_cmd = ["ip", "route", "replace", GNB_SUBNET, "via", ACCESS_GW_IP] def ip_handler(args: testing.ExecArgs) -> testing.ExecResult: nonlocal ip_route_replace_called @@ -180,8 +297,19 @@ def ip_handler(args: testing.ExecArgs) -> testing.ExecResult: environment = args.environment return testing.ExecResult(exit_code=0) - ip_cmd = ["ip", "route", "replace", "default", "via", CORE_GW_IP, "metric", "110"] - self.harness.handle_exec("bessd", ip_cmd, handler=ip_handler) + replace_default_route_cmd = [ + "ip", + "route", + "replace", + "default", + "via", + CORE_GW_IP, + "metric", + "110", + ] + + self.harness.handle_exec("bessd", replace_default_route_cmd, handler=ip_handler) + self.harness.handle_exec("bessd", replace_gnb_subnet_route_cmd, result=0) self.harness.handle_exec("bessd", ["iptables-legacy"], result=0) self.harness.handle_exec("bessd", ["/opt/bess/bessctl/bessctl"], result=0) patch_is_ready.return_value = True @@ -192,6 +320,47 @@ def ip_handler(args: testing.ExecArgs) -> testing.ExecResult: self.assertEqual(timeout, 30) self.assertEqual(environment, {}) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready") + def test_given_can_connect_to_bessd_when_bessd_pebble_ready_then_gnb_subnet_route_is_created( + self, patch_is_ready + ): + gnb_subnet_route_replace_called = False + timeout = 0 + environment = {} + replace_default_route_cmd = [ + "ip", + "route", + "replace", + "default", + "via", + CORE_GW_IP, + "metric", + "110", + ] + + def ip_handler(args: testing.ExecArgs) -> testing.ExecResult: + nonlocal gnb_subnet_route_replace_called + nonlocal timeout + nonlocal environment + gnb_subnet_route_replace_called = True + timeout = args.timeout + environment = args.environment + return testing.ExecResult(exit_code=0) + + replace_gnb_subnet_route_cmd = ["ip", "route", "replace", GNB_SUBNET, "via", ACCESS_GW_IP] + + self.harness.handle_exec("bessd", replace_gnb_subnet_route_cmd, handler=ip_handler) + self.harness.handle_exec("bessd", replace_default_route_cmd, result=0) + self.harness.handle_exec("bessd", ["iptables-legacy"], result=0) + self.harness.handle_exec("bessd", ["/opt/bess/bessctl/bessctl"], result=0) + patch_is_ready.return_value = True + + self.harness.container_pebble_ready(container_name="bessd") + + self.assertTrue(gnb_subnet_route_replace_called) + self.assertEqual(timeout, 30) + self.assertEqual(environment, {}) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready") def test_given_iptables_rule_is_not_yet_created_when_bessd_pebble_ready_then_rule_is_created( self, patch_is_ready @@ -678,7 +847,7 @@ def test_given_default_config_when_network_attachment_definitions_from_config_is "access-ip": DEFAULT_ACCESS_IP, "access-gateway-ip": ACCESS_GW_IP, "gnb-subnet": GNB_SUBNET, - "core-ip": CORE_IP, + "core-ip": VALID_CORE_IP, "core-gateway-ip": CORE_GW_IP, } ) @@ -699,17 +868,576 @@ def test_given_default_config_with_interfaces_when_network_attachment_definition "access-ip": DEFAULT_ACCESS_IP, "access-gateway-ip": ACCESS_GW_IP, "gnb-subnet": GNB_SUBNET, - "core-interface": "core-net", - "core-ip": CORE_IP, + "core-interface": CORE_INTERFACE_NAME, + "core-ip": VALID_CORE_IP, "core-gateway-ip": CORE_GW_IP, } ) nads = self.harness.charm._network_attachment_definitions_from_config() for nad in nads: config = json.loads(nad.spec["config"]) - self.assertEqual(config["master"], nad.metadata.name) + self.assertTrue(ACCESS_INTERFACE_NAME or CORE_INTERFACE_NAME in config["master"]) self.assertEqual(config["type"], "macvlan") + @patch("lightkube.core.client.Client.create") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_configured_to_run_in_dpdk_mode_when_create_network_attachment_definitions_then_2_nads_are_returned( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_create_object + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + + create_nad_calls = kubernetes_create_object.call_args_list + self.assertEqual(len(create_nad_calls), 2) + + @patch("lightkube.core.client.Client.create") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_configured_to_run_in_dpdk_mode_when_create_network_attachment_definitions_then_nad_type_is_vfioveth( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_create_object + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + + create_nad_calls = kubernetes_create_object.call_args_list + for create_nad_call in create_nad_calls: + create_nad_call_args = next( + iter(filter(lambda call_item: isinstance(call_item, dict), create_nad_call)) + ) + nad_config = json.loads(create_nad_call_args.get("obj").spec.get("config")) + self.assertEqual(nad_config["type"], "vfioveth") + + @patch("lightkube.core.client.Client.create") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_configured_to_run_in_dpdk_mode_when_create_network_attachment_definitions_then_access_nad_has_valid_dpdk_access_resource_specified_in_annotations( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_create_object + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + + def _get_create_access_nad_call(mock_call): + return next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and call_item.get("obj").metadata.name # noqa: W503 + == ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + mock_call, + ) + ), + None, + ) + + create_nad_calls = kubernetes_create_object.mock_calls + create_access_nad_calls = [ + _get_create_access_nad_call(create_nad_call) + for create_nad_call in create_nad_calls + if _get_create_access_nad_call(create_nad_call) + ] + self.assertEqual(len(create_access_nad_calls), 1) + nad_annotations = create_access_nad_calls[0].get("obj").metadata.annotations + self.assertTrue( + DPDK_ACCESS_INTERFACE_RESOURCE_NAME + in nad_annotations["k8s.v1.cni.cncf.io/resourceName"] + ) + + @patch("lightkube.core.client.Client.create") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_configured_to_run_in_dpdk_mode_when_create_network_attachment_definitions_then_core_nad_has_valid_dpdk_core_resource_specified_in_annotations( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_create_object + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + + def _get_create_core_nad_call(mock_call): + return next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and call_item.get("obj").metadata.name # noqa: W503 + == CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + mock_call, + ) + ), + None, + ) + + create_nad_calls = kubernetes_create_object.mock_calls + create_core_nad_calls = [ + _get_create_core_nad_call(create_nad_call) + for create_nad_call in create_nad_calls + if _get_create_core_nad_call(create_nad_call) + ] + self.assertEqual(len(create_core_nad_calls), 1) + nad_annotations = create_core_nad_calls[0].get("obj").metadata.annotations + self.assertTrue( + DPDK_CORE_INTERFACE_RESOURCE_NAME in nad_annotations["k8s.v1.cni.cncf.io/resourceName"] + ) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_default_mode_when_patch_statefulset_then_2_network_annotations_are_created( # noqa: E501 + self, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + self.harness.update_config() + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + self.assertEqual(len(network_annotations), 2) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_default_mode_when_generate_network_annotations_is_called_then_access_network_annotation_created( # noqa: E501 + self, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + self.harness.update_config() + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + access_network_annotation = next( + iter( + filter( + lambda network_annotation: network_annotation.get("name") + == ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + network_annotations, + ) + ) + ) + self.assertTrue(access_network_annotation) + self.assertEqual(access_network_annotation.get("interface"), ACCESS_INTERFACE_NAME) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_default_mode_when_generate_network_annotations_is_called_then_access_network_annotation_created_without_dpdk_specific_data( # noqa: E501 + self, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + self.harness.update_config() + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + access_network_annotation = next( + iter( + filter( + lambda network_annotation: network_annotation.get("name") + == ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + network_annotations, + ) + ) + ) + self.assertFalse(access_network_annotation.get("mac")) + self.assertFalse(access_network_annotation.get("ips")) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_default_mode_when_generate_network_annotations_is_called_then_core_network_annotation_created( # noqa: E501 + self, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + self.harness.update_config() + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + access_network_annotation = next( + iter( + filter( + lambda network_annotation: network_annotation.get("name") + == CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + network_annotations, + ) + ) + ) + self.assertTrue(access_network_annotation) + self.assertEqual(access_network_annotation.get("interface"), CORE_INTERFACE_NAME) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_default_mode_when_generate_network_annotations_is_called_then_core_network_annotation_created_without_dpdk_specific_data( # noqa: E501 + self, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + self.harness.update_config() + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + access_network_annotation = next( + iter( + filter( + lambda network_annotation: network_annotation.get("name") + == CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + network_annotations, + ) + ) + ) + self.assertFalse(access_network_annotation.get("mac")) + self.assertFalse(access_network_annotation.get("ips")) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_dpdk_mode_when_patch_statefulset_then_2_network_annotations_are_created( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + self.assertEqual(len(network_annotations), 2) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_dpdk_mode_when_generate_network_annotations_is_called_then_access_network_annotation_created( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "access-ip": VALID_ACCESS_IP, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + access_network_annotation = next( + iter( + filter( + lambda network_annotation: network_annotation.get("name") + == ACCESS_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + network_annotations, + ) + ) + ) + self.assertTrue(access_network_annotation) + self.assertEqual(access_network_annotation.get("interface"), ACCESS_INTERFACE_NAME) + self.assertEqual(access_network_annotation.get("mac"), VALID_ACCESS_MAC) + self.assertEqual(access_network_annotation.get("ips"), [VALID_ACCESS_IP]) + + @patch("lightkube.core.client.Client.patch") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.Client.list") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_upf_charm_configured_to_run_in_dpdk_mode_when_generate_network_annotations_is_called_then_core_network_annotation_created( # noqa: E501 + self, patched_list, patch_get_service, kubernetes_statefulset_patch + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + patched_list.side_effect = [ + [Node(status=NodeStatus(allocatable={"hugepages-1Gi": "3Gi"}))], + [], + [], + ] + self.harness.update_config( + key_values={ + "upf-mode": "dpdk", + "enable-hugepages": True, + "core-ip": VALID_CORE_IP, + "access-interface-mac-address": VALID_ACCESS_MAC, + "core-interface-mac-address": VALID_CORE_MAC, + } + ) + patch_statefulset = kubernetes_statefulset_patch.call_args_list[0] + patch_statefulset_call_args = next( + iter( + filter( + lambda call_item: isinstance(call_item, dict) + and "StatefulSet" in str(call_item.get("obj")) # noqa: W503 + and call_item.get("name") == self.harness.charm.app.name, # noqa: W503 + patch_statefulset, + ) + ) + ) + network_annotations = json.loads( + patch_statefulset_call_args.get("obj").spec.template.metadata.annotations.get( + NetworkAnnotation.NETWORK_ANNOTATION_RESOURCE_KEY + ) + ) + access_network_annotation = next( + iter( + filter( + lambda network_annotation: network_annotation.get("name") + == CORE_NETWORK_ATTACHMENT_DEFINITION_NAME, # noqa: W503 + network_annotations, + ) + ) + ) + self.assertTrue(access_network_annotation) + self.assertEqual(access_network_annotation.get("interface"), CORE_INTERFACE_NAME) + self.assertEqual(access_network_annotation.get("mac"), VALID_CORE_MAC) + self.assertEqual(access_network_annotation.get("ips"), [VALID_CORE_IP]) + @patch("charm.check_output") @patch("charm.Client", new=Mock) def test_given_cpu_not_supporting_required_instructions_when_install_then_incompatiblecpuerror_is_raised( # noqa: E501 @@ -770,6 +1498,39 @@ def test_when_remove_then_external_service_is_deleted(self, patch_client): namespace=self.namespace, ) + @patch("lightkube.core.client.Client.create") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_default_config_when_create_network_attachment_definitions_then_interface_mtu_not_set_in_the_network_attachment_definitions( # noqa: E501 + self, patch_get_service, kubernetes_create_object + ): + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock + self.harness.update_config( + key_values={ + "access-ip": "192.168.252.3/24", + "access-gateway-ip": ACCESS_GW_IP, + "gnb-subnet": GNB_SUBNET, + "core-ip": VALID_CORE_IP, + "core-gateway-ip": CORE_GW_IP, + } + ) + + create_nad_calls = kubernetes_create_object.call_args_list + for create_nad_call in create_nad_calls: + create_nad_call_args = next( + iter(filter(lambda call_item: isinstance(call_item, dict), create_nad_call)) + ) + nad_config = json.loads(create_nad_call_args.get("obj").spec.get("config")) + self.assertNotIn("mtu", nad_config) + @patch("charm.check_output") @patch("charm.Client", new=Mock) @patch(f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched") @@ -882,7 +1643,7 @@ def test_given_default_config_when_network_attachment_definitions_from_config_is "access-ip": "192.168.252.3/24", "access-gateway-ip": ACCESS_GW_IP, "gnb-subnet": GNB_SUBNET, - "core-ip": CORE_IP, + "core-ip": VALID_CORE_IP, "core-gateway-ip": CORE_GW_IP, } ) @@ -891,27 +1652,40 @@ def test_given_default_config_when_network_attachment_definitions_from_config_is config = json.loads(nad.spec["config"]) self.assertNotIn("mtu", config) - @patch(f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched") - def test_given_default_config_with_interfaces_mtu_sizes_when_network_attachment_definitions_from_config_is_called_then_mtu_sizes_specified_in_nad( # noqa: E501 - self, - patch_hugepages_is_patched, + @patch("lightkube.core.client.Client.create") + @patch("ops.model.Container.get_service") + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + @patch( + f"{HUGEPAGES_LIBRARY_PATH}.KubernetesHugePagesPatchCharmLib.is_patched", + Mock(return_value=True), + ) + @patch(f"{MULTUS_LIBRARY_PATH}.KubernetesMultusCharmLib.is_ready", Mock(return_value=True)) + @patch("charm.DPDK.is_configured", Mock(return_value=True)) + def test_given_default_config_with_interfaces_mtu_sizes_when_create_network_attachment_definitions_then_interface_mtu_set_in_the_network_attachment_definitions( # noqa: E501 + self, patch_get_service, kubernetes_create_object ): - patch_hugepages_is_patched.return_value = True + service_info_mock = Mock() + service_info_mock.is_running.return_value = True + patch_get_service.return_value = service_info_mock self.harness.update_config( key_values={ "access-ip": "192.168.252.3/24", "access-gateway-ip": ACCESS_GW_IP, "access-interface-mtu-size": VALID_MTU_SIZE_1, "gnb-subnet": GNB_SUBNET, - "core-ip": CORE_IP, + "core-ip": VALID_CORE_IP, "core-gateway-ip": CORE_GW_IP, "core-interface-mtu-size": VALID_MTU_SIZE_1, } ) - nads = self.harness.charm._network_attachment_definitions_from_config() - for nad in nads: - config = json.loads(nad.spec["config"]) - self.assertEqual(config["mtu"], 65535) + + create_nad_calls = kubernetes_create_object.call_args_list + for create_nad_call in create_nad_calls: + create_nad_call_args = next( + iter(filter(lambda call_item: isinstance(call_item, dict), create_nad_call)) + ) + nad_config = json.loads(create_nad_call_args.get("obj").spec.get("config")) + self.assertEqual(nad_config["mtu"], VALID_MTU_SIZE_1) def test_given_default_config_with_interfaces_too_small_and_too_big_mtu_sizes_when_network_attachment_definitions_from_config_is_called_then_status_is_blocked( # noqa: E501 self, diff --git a/tests/unit/test_dpdk.py b/tests/unit/test_dpdk.py new file mode 100644 index 00000000..083ee804 --- /dev/null +++ b/tests/unit/test_dpdk.py @@ -0,0 +1,305 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +import unittest +from unittest.mock import MagicMock, Mock, patch + +from lightkube.core.exceptions import ApiError +from lightkube.models.apps_v1 import StatefulSetSpec +from lightkube.models.core_v1 import ( + Container, + PodSpec, + PodTemplateSpec, + ResourceRequirements, + SecurityContext, +) +from lightkube.models.meta_v1 import LabelSelector, ObjectMeta +from lightkube.resources.apps_v1 import StatefulSet + +from dpdk import DPDK, DPDKError + +TEST_CONTAINER_NAME = "bullseye" +TEST_RESOURCE_REQUESTS = {"test_request": 1234} +TEST_RESOURCE_LIMITS = {"test_limit": 4321} +TEST_RESOURCE_REQUIREMENTS = { + "requests": TEST_RESOURCE_REQUESTS, + "limits": TEST_RESOURCE_LIMITS, +} + + +class TestDPDKStatefulSetUpdater(unittest.TestCase): + @patch("lightkube.core.client.GenericSyncClient", new=Mock) + def setUp(self) -> None: + self.dpdk_statefulset_updater = DPDK( + statefulset_name="doesntmatter", + namespace="whatever", + dpdk_access_interface_resource_name="who", + dpdk_core_interface_resource_name="cares", + ) + + @patch("lightkube.core.client.Client.get") + def test_given_lightkube_client_returns_api_error_on_get_when_container_configured_for_dpdk_called_then_dpdk_statefulset_updater_error_is_raised( # noqa: E501 + self, patched_lightkube_client_get + ): + patched_lightkube_client_get.side_effect = ApiError(response=MagicMock()) + + with self.assertRaises(DPDKError): + self.dpdk_statefulset_updater.is_configured("justatest") + + @patch("lightkube.core.client.Client.get") + def test_given_container_not_is_statefulset_when_container_configured_for_dpdk_called_then_dpdk_statefulset_updater_error_is_raised( # noqa: E501 + self, patched_lightkube_client_get + ): + test_statefulset = StatefulSet( + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec(spec=PodSpec(containers=[])), + ) + ) + patched_lightkube_client_get.return_value = test_statefulset + + with self.assertRaises(DPDKError): + self.dpdk_statefulset_updater.is_configured("justatest") + + @patch("lightkube.core.client.Client.get") + def test_given_container_is_not_privileged_when_container_configured_for_dpdk_called_then_false_is_returned( # noqa: E501 + self, patched_lightkube_client_get + ): + test_statefulset = StatefulSet( + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + securityContext=SecurityContext(privileged=False), + ) + ] + ) + ), + ) + ) + patched_lightkube_client_get.return_value = test_statefulset + + self.assertFalse(self.dpdk_statefulset_updater.is_configured(TEST_CONTAINER_NAME)) + + @patch("lightkube.core.client.Client.get") + def test_given_resource_requirements_not_applied_to_the_container_when_container_configured_for_dpdk_called_then_false_is_returned( # noqa: E501 + self, patched_lightkube_client_get + ): + test_statefulset = StatefulSet( + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements(limits={}, requests={}), + securityContext=SecurityContext(privileged=True), + ) + ] + ) + ), + ) + ) + patched_lightkube_client_get.return_value = test_statefulset + + self.assertFalse(self.dpdk_statefulset_updater.is_configured(TEST_CONTAINER_NAME)) + + @patch("lightkube.core.client.Client.get") + def test_given_resource_requests_applied_but_limits_not_applied_to_the_container_when_container_configured_for_dpdk_called_then_false_is_returned( # noqa: E501 + self, patched_lightkube_client_get + ): + self.dpdk_statefulset_updater.dpdk_resource_requirements = TEST_RESOURCE_REQUIREMENTS + test_statefulset = StatefulSet( + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements( + limits={}, + requests=TEST_RESOURCE_REQUESTS, + ), + securityContext=SecurityContext(privileged=True), + ) + ] + ) + ), + ) + ) + patched_lightkube_client_get.return_value = test_statefulset + + self.assertFalse(self.dpdk_statefulset_updater.is_configured(TEST_CONTAINER_NAME)) + + @patch("lightkube.core.client.Client.get") + def test_given_resource_limits_applied_but_requests_not_applied_to_the_container_when_container_configured_for_dpdk_called_then_false_is_returned( # noqa: E501 + self, patched_lightkube_client_get + ): + self.dpdk_statefulset_updater.dpdk_resource_requirements = TEST_RESOURCE_REQUIREMENTS + test_statefulset = StatefulSet( + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements( + limits=TEST_RESOURCE_LIMITS, + requests={}, + ), + securityContext=SecurityContext(privileged=True), + ) + ] + ) + ), + ) + ) + patched_lightkube_client_get.return_value = test_statefulset + + self.assertFalse(self.dpdk_statefulset_updater.is_configured(TEST_CONTAINER_NAME)) + + @patch("lightkube.core.client.Client.get") + def test_given_container_is_privileged_and_has_resource_requirements_applied_when_container_configured_for_dpdk_called_then_true_is_returned( # noqa: E501 + self, patched_lightkube_client_get + ): + self.dpdk_statefulset_updater.dpdk_resource_requirements = TEST_RESOURCE_REQUIREMENTS + test_statefulset = StatefulSet( + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements( + limits=TEST_RESOURCE_LIMITS, + requests=TEST_RESOURCE_REQUESTS, + ), + securityContext=SecurityContext(privileged=True), + ) + ] + ) + ), + ) + ) + patched_lightkube_client_get.return_value = test_statefulset + + self.assertTrue(self.dpdk_statefulset_updater.is_configured(TEST_CONTAINER_NAME)) + + @patch("lightkube.core.client.Client.get") + def test_given_container_exists_and_requires_configuration_when_configure_container_for_dpdk_then_container_is_configured( # noqa: E501 + self, patched_lightkube_client_get + ): + self.dpdk_statefulset_updater.dpdk_resource_requirements = TEST_RESOURCE_REQUIREMENTS + test_statefulset = StatefulSet( + metadata=ObjectMeta(name="whatever"), + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements(limits={}, requests={}), + securityContext=SecurityContext(privileged=False), + ) + ] + ) + ), + ), + ) + patched_lightkube_client_get.return_value = test_statefulset + expected_updated_container_spec = Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements( + limits=TEST_RESOURCE_LIMITS, + requests=TEST_RESOURCE_REQUESTS, + ), + securityContext=SecurityContext(privileged=True), + ) + + self.dpdk_statefulset_updater.configure(TEST_CONTAINER_NAME) + + self.assertEqual( + test_statefulset.spec.template.spec.containers[0], + expected_updated_container_spec, + ) + + @patch("lightkube.core.client.Client.get") + @patch("lightkube.core.client.Client.replace") + def test_given_client_when_configure_container_for_dpdk_then_statefulset_is_replaced( # noqa: E501 + self, patched_lightkube_client_replace, patched_lightkube_client_get + ): + self.dpdk_statefulset_updater.dpdk_resource_requirements = TEST_RESOURCE_REQUIREMENTS + test_statefulset = StatefulSet( + metadata=ObjectMeta(name="whatever"), + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements( + limits=TEST_RESOURCE_LIMITS, + requests=TEST_RESOURCE_REQUESTS, + ), + securityContext=SecurityContext(privileged=True), + ) + ] + ) + ), + ), + ) + patched_lightkube_client_get.return_value = test_statefulset + + self.dpdk_statefulset_updater.configure(TEST_CONTAINER_NAME) + + patched_lightkube_client_replace.assert_called_once_with(obj=test_statefulset) + + @patch("lightkube.core.client.Client.get") + @patch("lightkube.core.client.Client.replace") + def test_given_lightkube_client_returns_api_error_on_replace_when_configure_container_for_dpdk_then_dpdk_statefulset_updater_error_is_raised( # noqa: E501 + self, patched_lightkube_client_replace, patched_lightkube_client_get + ): + self.dpdk_statefulset_updater.dpdk_resource_requirements = TEST_RESOURCE_REQUIREMENTS + test_statefulset = StatefulSet( + metadata=ObjectMeta(name="whatever"), + spec=StatefulSetSpec( + selector=LabelSelector(), + serviceName="whatever", + template=PodTemplateSpec( + spec=PodSpec( + containers=[ + Container( + name=TEST_CONTAINER_NAME, + resources=ResourceRequirements( + limits=TEST_RESOURCE_LIMITS, + requests=TEST_RESOURCE_REQUESTS, + ), + securityContext=SecurityContext(privileged=True), + ) + ] + ) + ), + ), + ) + patched_lightkube_client_get.return_value = test_statefulset + patched_lightkube_client_replace.side_effect = ApiError(response=MagicMock()) + + with self.assertRaises(DPDKError): + self.dpdk_statefulset_updater.configure(TEST_CONTAINER_NAME)