From c2d020a49135137747ec0225ac64bfda8bc4853a Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 9 Jun 2021 11:41:31 -0400 Subject: [PATCH 01/22] (Issue #110) Add setup files to make viya4-ark pip installable via Git --- .../model/viya_deployment_report.py | 23 +++++++++++ setup.py | 40 +++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 setup.py diff --git a/deployment_report/model/viya_deployment_report.py b/deployment_report/model/viya_deployment_report.py index 4741103..119ec36 100644 --- a/deployment_report/model/viya_deployment_report.py +++ b/deployment_report/model/viya_deployment_report.py @@ -62,6 +62,29 @@ def __init__(self) -> None: """ self._report_data = None + def as_dict(self) -> Optional[Dict]: + """ + Returns a dictionary representation of the data gathered by this report. All nested objects are maintained + as-is. To parse to JSON, the KubernetesObjectJSONEncoder is needed. + + :return: A dictionary representation of the data gathered by this report with all nested objects maintained + as-is, or None if data has not been gathered. + """ + return self._report_data + + def as_dict_json_encoded(self) -> Optional[Dict]: + """ + Returns a dictionary representation of the data gathered by this report. All nested objects are encoded, using + the KubernetesObjectJSONEncoder, to native Python objects for JSON compatibility. + + :return: A dictionary representation of the data gathered by this report with all nested objects encoded for + JSON compatibility. + """ + if self._report_data: + return json.loads(json.dumps(self._report_data, cls=KubernetesObjectJSONEncoder)) + + return None + def gather_details(self, kubectl: KubectlInterface, include_pod_log_snips: bool = INCLUDE_POD_LOG_SNIPS_DEFAULT) -> None: """ diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..25e5753 --- /dev/null +++ b/setup.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +#################################################################### +# ### setup.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### + +from setuptools import find_packages, setup + +# get the install requirements from requirements.txt +with open("requirements.txt") as requirements_file: + install_requirements = requirements_file.read().splitlines() + +setup( + name="viya4-ark", + author="SAS Institute Inc.", + + license="Apache-2.0", + licenses_files=["LICENSE"], + + description=("The SAS Viya Administration Resource Kit (SAS Viya ARK) provides tools and utilities to help SAS " + "customers prepare for and gather information about a SAS Viya deployment."), + long_description=open("README.md").read(), + long_description_content_type="text/markdown", + + url="https://github.com/sassoftware/viya4-ark", + + packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]), + + python_requires=">=3.6", + # these are setup to match the definitions in requirements.txt + install_requires=install_requirements +) From 900a5465a1d495763a642d08f3f69adb96ff97f1 Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 9 Jun 2021 13:54:20 -0400 Subject: [PATCH 02/22] (Issue #110) Add setup files to make viya4-ark pip installable via Git --- setup.cfg | 8 ++++++++ setup.py | 21 ++------------------- 2 files changed, 10 insertions(+), 19 deletions(-) create mode 100644 setup.cfg diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..1347328 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,8 @@ +[metadata] +name = viya4-ark +author = SAS Institute Inc. +summary = The SAS Viya Administration Resource Kit (SAS Viya ARK) provides tools and utilities to help SAS customers prepare for and gather information about a SAS Viya deployment. +description-file = README.md +description-content-type = text/markdown +home-page = https://github.com/sassoftware/viya4-ark +license = Apache-2.0 diff --git a/setup.py b/setup.py index 25e5753..d3dd4f5 100644 --- a/setup.py +++ b/setup.py @@ -14,27 +14,10 @@ from setuptools import find_packages, setup -# get the install requirements from requirements.txt -with open("requirements.txt") as requirements_file: - install_requirements = requirements_file.read().splitlines() - setup( - name="viya4-ark", - author="SAS Institute Inc.", - - license="Apache-2.0", + setup_requires=["pbr"], + pbr=True, licenses_files=["LICENSE"], - - description=("The SAS Viya Administration Resource Kit (SAS Viya ARK) provides tools and utilities to help SAS " - "customers prepare for and gather information about a SAS Viya deployment."), - long_description=open("README.md").read(), - long_description_content_type="text/markdown", - - url="https://github.com/sassoftware/viya4-ark", - packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]), - python_requires=">=3.6", - # these are setup to match the definitions in requirements.txt - install_requires=install_requirements ) From d6385d45aac23f450a79780674eea8fd8ec35b3a Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 9 Jun 2021 14:36:08 -0400 Subject: [PATCH 03/22] (Issue #110) Update copyright on files modified in 2021 --- deployment_report/model/static/viya_deployment_report_keys.py | 2 +- deployment_report/model/test/test_viya_deployment_report.py | 2 +- .../model/utils/test/test_viya_deployment_report_utils.py | 2 +- deployment_report/model/utils/viya_deployment_report_utils.py | 2 +- deployment_report/model/viya_deployment_report.py | 2 +- deployment_report/templates/viya_deployment_report.html.j2 | 2 +- download_pod_logs/model.py | 2 +- pre_install_report/library/pre_install_check.py | 2 +- pre_install_report/library/pre_install_check_permissions.py | 2 +- pre_install_report/library/pre_install_utils.py | 4 ++-- pre_install_report/library/utils/viya_constants.py | 2 +- .../templates/report_template_viya_pre_install_check.j2 | 2 +- pre_install_report/test/test_pre_install_report.py | 2 +- viya_ark_library/k8s/sas_k8s_objects.py | 2 +- viya_ark_library/k8s/sas_kubectl.py | 2 +- viya_ark_library/k8s/sas_kubectl_interface.py | 2 +- viya_ark_library/k8s/test_impl/sas_kubectl_test.py | 2 +- viya_ark_library/structured_logging/parser.py | 2 +- 18 files changed, 19 insertions(+), 19 deletions(-) diff --git a/deployment_report/model/static/viya_deployment_report_keys.py b/deployment_report/model/static/viya_deployment_report_keys.py index 6d1c47c..86d2eae 100644 --- a/deployment_report/model/static/viya_deployment_report_keys.py +++ b/deployment_report/model/static/viya_deployment_report_keys.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/deployment_report/model/test/test_viya_deployment_report.py b/deployment_report/model/test/test_viya_deployment_report.py index cca5cd1..ccc4cae 100644 --- a/deployment_report/model/test/test_viya_deployment_report.py +++ b/deployment_report/model/test/test_viya_deployment_report.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py index 53b095b..3983452 100644 --- a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py +++ b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/deployment_report/model/utils/viya_deployment_report_utils.py b/deployment_report/model/utils/viya_deployment_report_utils.py index 3f7d3f0..84ae462 100644 --- a/deployment_report/model/utils/viya_deployment_report_utils.py +++ b/deployment_report/model/utils/viya_deployment_report_utils.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/deployment_report/model/viya_deployment_report.py b/deployment_report/model/viya_deployment_report.py index 119ec36..17ffe90 100644 --- a/deployment_report/model/viya_deployment_report.py +++ b/deployment_report/model/viya_deployment_report.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/deployment_report/templates/viya_deployment_report.html.j2 b/deployment_report/templates/viya_deployment_report.html.j2 index f79b9d6..074cb73 100644 --- a/deployment_report/templates/viya_deployment_report.html.j2 +++ b/deployment_report/templates/viya_deployment_report.html.j2 @@ -3,7 +3,7 @@ {# ----------------------------------------------------------- #} {# Author: SAS Institute Inc. #} {# ----------------------------------------------------------- #} -{# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. #} +{# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. #} {# All Rights Reserved. #} {# SPDX-License-Identifier: Apache-2.0 #} {# ----------------------------------------------------------- #} diff --git a/download_pod_logs/model.py b/download_pod_logs/model.py index a51bba9..8b8ff4f 100644 --- a/download_pod_logs/model.py +++ b/download_pod_logs/model.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/pre_install_report/library/pre_install_check.py b/pre_install_report/library/pre_install_check.py index 87ea560..efe163e 100644 --- a/pre_install_report/library/pre_install_check.py +++ b/pre_install_report/library/pre_install_check.py @@ -5,7 +5,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index e141c3f..4ea5302 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -5,7 +5,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/pre_install_report/library/pre_install_utils.py b/pre_install_report/library/pre_install_utils.py index c27718d..6c76f69 100644 --- a/pre_install_report/library/pre_install_utils.py +++ b/pre_install_report/library/pre_install_utils.py @@ -1,11 +1,11 @@ #!/usr/bin/env python3 #################################################################### -# ### pre_install_check permissions.py ### +# ### pre_install_check permissions.py ### #################################################################### # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/pre_install_report/library/utils/viya_constants.py b/pre_install_report/library/utils/viya_constants.py index bf38f33..c1fb103 100644 --- a/pre_install_report/library/utils/viya_constants.py +++ b/pre_install_report/library/utils/viya_constants.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/pre_install_report/templates/report_template_viya_pre_install_check.j2 b/pre_install_report/templates/report_template_viya_pre_install_check.j2 index 7d469a0..83a2092 100644 --- a/pre_install_report/templates/report_template_viya_pre_install_check.j2 +++ b/pre_install_report/templates/report_template_viya_pre_install_check.j2 @@ -3,7 +3,7 @@ {# ----------------------------------------------------------- #} {# Author: SAS Institute Inc. #} {# ----------------------------------------------------------- #} -{# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. #} +{# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. #} {# All Rights Reserved. #} {# SPDX-License-Identifier: Apache-2.0 #} {# ----------------------------------------------------------- #} diff --git a/pre_install_report/test/test_pre_install_report.py b/pre_install_report/test/test_pre_install_report.py index d31055c..6a85888 100644 --- a/pre_install_report/test/test_pre_install_report.py +++ b/pre_install_report/test/test_pre_install_report.py @@ -5,7 +5,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/viya_ark_library/k8s/sas_k8s_objects.py b/viya_ark_library/k8s/sas_k8s_objects.py index f377964..3ef221e 100644 --- a/viya_ark_library/k8s/sas_k8s_objects.py +++ b/viya_ark_library/k8s/sas_k8s_objects.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/viya_ark_library/k8s/sas_kubectl.py b/viya_ark_library/k8s/sas_kubectl.py index 1a8300f..a9e0293 100644 --- a/viya_ark_library/k8s/sas_kubectl.py +++ b/viya_ark_library/k8s/sas_kubectl.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/viya_ark_library/k8s/sas_kubectl_interface.py b/viya_ark_library/k8s/sas_kubectl_interface.py index d2e3970..0c2da1c 100644 --- a/viya_ark_library/k8s/sas_kubectl_interface.py +++ b/viya_ark_library/k8s/sas_kubectl_interface.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py index 9aee73f..c991bec 100644 --- a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py +++ b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### diff --git a/viya_ark_library/structured_logging/parser.py b/viya_ark_library/structured_logging/parser.py index 24c4fbb..661401e 100644 --- a/viya_ark_library/structured_logging/parser.py +++ b/viya_ark_library/structured_logging/parser.py @@ -4,7 +4,7 @@ # ### Author: SAS Institute Inc. ### #################################################################### # ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### # All Rights Reserved. ### # SPDX-License-Identifier: Apache-2.0 ### # ### From 823db699a86da8c5457a9bda9d5b13c671bf852a Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Mon, 28 Jun 2021 11:38:42 -0400 Subject: [PATCH 04/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- .../library/pre_install_check.py | 4 +- .../library/pre_install_check_permissions.py | 73 +++++++++++++++++-- .../library/pre_install_utils.py | 23 ++++-- .../library/utils/hello-ingress-k8s-v118.yaml | 27 +++++++ .../library/utils/hello-ingress.yaml | 17 +++-- .../test/test_pre_install_report.py | 49 +++++++++++++ requirements.txt | 5 +- 7 files changed, 174 insertions(+), 24 deletions(-) create mode 100644 pre_install_report/library/utils/hello-ingress-k8s-v118.yaml diff --git a/pre_install_report/library/pre_install_check.py b/pre_install_report/library/pre_install_check.py index efe163e..e25048e 100644 --- a/pre_install_report/library/pre_install_check.py +++ b/pre_install_report/library/pre_install_check.py @@ -400,9 +400,11 @@ def _check_permissions(self, permissions_check: PreCheckPermissions): permissions_check: instance of PreCheckPermissions class """ namespace = self._kubectl.get_namespace() + permissions_check.get_server_gitVersion() + permissions_check.set_ingress_manifest_file() permissions_check.get_sc_resources() - permissions_check.manage_pvc(viya_constants.KUBECTL_APPLY, False) + permissions_check.manage_pvc(viya_constants.KUBECTL_APPLY, False) permissions_check.check_sample_application() permissions_check.check_sample_ingress() permissions_check.check_deploy_crd() diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index 4ea5302..052f300 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -11,12 +11,15 @@ # ### #################################################################### import os -from typing import List +from subprocess import CalledProcessError +from typing import List, Dict import requests +import sys import pprint +import semantic_version from requests.packages.urllib3.exceptions import InsecureRequestWarning -from pre_install_report.library.utils import viya_constants +from pre_install_report.library.utils import viya_constants, viya_messages from pre_install_report.library.pre_install_utils import PreCheckUtils from viya_ark_library.logging import ViyaARKLogger from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource @@ -42,6 +45,8 @@ PROVISIONER_AZURE_DISK = "kubernetes.io/azure-disk" PROVISIONER_AWS_EBS = "kubernetes.io/aws-ebs" +INGRESS_REL = '<1.19' + class PreCheckPermissions(object): """ @@ -72,10 +77,11 @@ def __init__(self, params): self.cluster_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = viya_constants.ADEQUATE_PERMS self.ingress_data = {} self.ingress_data[viya_constants.INGRESS_CONTROLLER] = self.ingress_controller - self.ingress_file = "hello-ingress.yaml" + self._ingress_file = "hello-ingress.yaml" self._storage_class_sc: List[KubernetesResource] = None self._sample_deployment = 0 self._sample_output = "" + self._k8s_gitVersion = None def _set_results_cluster_admin(self, resource_key, rc): """ @@ -394,23 +400,54 @@ def check_sample_service(self): """ Deploy Kubernetes Service for hello-world appliction in specified namespace and set the permissions status in the namespace_admin_permission_data dict object - - """ rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_APPLY, 'helloworld-svc.yaml') self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc) + def get_server_gitVersion(self): + """ + Retrieve the Kubernetes servervrsion and validate the gitVersion + """ + try: + versions: Dict = self.utils.get_k8s_version() + serverversion = versions.get('serverVersion') + gitVersion = serverversion.get('gitVersion') + self.logger.info("gitversion {} ".format(str(gitVersion))) + + if gitVersion.startswith("v"): + gitVersion = gitVersion[1:] + self.set_k8s_gitVersion(gitVersion) + except CalledProcessError as cpe: + self.logger.exception('kubectl version command failed. Return code = {}'.format(str(cpe.returncode))) + sys.exit(viya_messages.RUNTIME_ERROR_RC_) + + def set_ingress_manifest_file(self): + """ + Retrieve the server Kubernetes gitVersion using and compare it using + https://pypi.org/project/semantic_version/ 2.8.5 initial version + """ + try: + curr_version = semantic_version.Version(str(self.get_k8s_gitVersion())) + + if(curr_version in semantic_version.SimpleSpec(INGRESS_REL)): + self._ingress_file = "hello-ingress-k8s-v118.yaml" + self.logger.debug("hello-ingress file deployed {} major {} minor {}" + .format(str(self._ingress_file), str(curr_version.major), + str(curr_version.minor))) + except ValueError as cpe: + self.logger.exception(viya_messages.EXCEPTION_MESSAGE.format(str(cpe))) + sys.exit(viya_messages.RUNTIME_ERROR_RC_) + def check_sample_ingress(self): """ Deploy Kubernetes Ingress for hello-world appliction in the specified namespace and set the permissions status in the namespace_admin_permission_data dict object. If nginx is ingress controller check Ingress deployment (default) - """ rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_APPLY, - self.ingress_file) + self._ingress_file) self._set_results_namespace_admin(viya_constants.PERM_INGRESS, rc) def check_sample_response(self): @@ -508,7 +545,7 @@ def check_delete_sample_ingress(self): """ rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_DELETE, - self.ingress_file) + self._ingress_file) self._set_results_namespace_admin(viya_constants.PERM_DELETE + viya_constants.PERM_INGRESS, rc) def check_create_custom_resource(self): @@ -665,3 +702,23 @@ def get_cluster_admin_permission_aggregate(self): return: dict object with cluster admin aggregate permissions data """ return self.cluster_admin_permission_aggregate + + def set_k8s_gitVersion(self, version: str): + """ + Set the current Kubernetes Server Version + """ + self._k8s_gitVersion = version + + def get_k8s_gitVersion(self): + """ + Get the current Kubernetes Server Version + return: string object + """ + return self._k8s_gitVersion + + def get_ingress_file_name(self): + """ + Get the ingress manifest to be deployed + return: string object + """ + return self._ingress_file diff --git a/pre_install_report/library/pre_install_utils.py b/pre_install_report/library/pre_install_utils.py index 6c76f69..471fc07 100644 --- a/pre_install_report/library/pre_install_utils.py +++ b/pre_install_report/library/pre_install_utils.py @@ -14,7 +14,7 @@ from subprocess import CalledProcessError import os import pprint -from typing import List +from typing import List, Dict from pre_install_report.library.utils import viya_constants from viya_ark_library.k8s.sas_kubectl_interface import KubectlInterface, KubernetesApiResources @@ -47,15 +47,13 @@ def deploy_manifest_file(self, action, file_name): """ rc = 0 data = '' - error_msg = '' file_path = self._get_filepath(file_name) try: data = self._kubectl.manage_resource(action, file_path, False) except CalledProcessError as cpe: rc = cpe.returncode - error_msg = str(cpe.output) - self.logger.error("deploy_manifest_file rc {} action {} filepath {} error_msg {}".format(str(rc), action, - file_path, error_msg)) + self.logger.error("deploy_manifest_file rc {} action {} filepath {} error_msg {} error_out {}" + .format(str(rc), str(action), str(file_path), str(cpe.stderr), str(cpe.stdout))) return 1 self.logger.info("deploy_manifest_file rc {} action {} filepath {} data{}".format(str(rc), action, @@ -112,7 +110,6 @@ def get_rbac_group_cmd(self): def can_i(self, test_cmd): """ Run the specified can-i command in designated namespace - cmd: kubectl can-icommand to be executed return: True if action is permitted. If not, return false """ @@ -171,6 +168,20 @@ def get_resource(self, resource_kind, resource_name): pprint.pformat(k8s_resource.as_dict()))) return k8s_resource + def get_k8s_version(self): + """ + Retrieve the kubectl version details + return: Dict Object or raise cpe + """ + + try: + versions: Dict = self._kubectl.version() + return versions + except CalledProcessError as cpe: + self.logger.info('kubectl version failed ' + " version " + 'return code = ' + + cpe.returncode) + raise cpe + def _get_filepath(self, file_name): """ Assemble and return path for specied file in project library diff --git a/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml b/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml new file mode 100644 index 0000000..48b9014 --- /dev/null +++ b/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml @@ -0,0 +1,27 @@ +#################################################################### +#### hello-ingress.yml #### +#################################################################### +#################################################################### +#### Author: SAS Institute Inc. #### +#### #### +#################################################################### +#################################################################### +# +# Copyright (c) 2019-2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: hello-world + annotations: + kubernetes.io/ingress.class: "nginx" +spec: + rules: + - http: + paths: + - backend: + serviceName: hello-world + servicePort: 8080 + path: /hello-world diff --git a/pre_install_report/library/utils/hello-ingress.yaml b/pre_install_report/library/utils/hello-ingress.yaml index 1cf4111..a4bbaa2 100644 --- a/pre_install_report/library/utils/hello-ingress.yaml +++ b/pre_install_report/library/utils/hello-ingress.yaml @@ -7,21 +7,24 @@ #################################################################### #################################################################### # -# Copyright (c) 2019-2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright (c) 2019-2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # --- -apiVersion: extensions/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: hello-world annotations: - kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: / spec: rules: - http: paths: - - backend: - serviceName: hello-world - servicePort: 8080 - path: /hello-world + - path: /hello-world + pathType: Prefix + backend: + service: + name: hello-world + port: + number: 8080 \ No newline at end of file diff --git a/pre_install_report/test/test_pre_install_report.py b/pre_install_report/test/test_pre_install_report.py index 6a85888..eab6d15 100644 --- a/pre_install_report/test/test_pre_install_report.py +++ b/pre_install_report/test/test_pre_install_report.py @@ -16,6 +16,7 @@ import pprint import json import logging +import semantic_version from pint import UnitRegistry from pre_install_report.library.utils import viya_constants @@ -587,6 +588,54 @@ def test_kubconfig_file(): os.environ['KUBECONFIG'] = str(old_kubeconfig) +def test_get_k8s_version(): + """ + Retrieve thhe server Kubernetes mjor and minor version using + https://pypi.org/project/packaging/ 20.9 initial version + Used by python setup tools + """ + # versions: Dict = self.utils.get_k8s_version() + version_string = "1.18.9-eks-d1db3c" + version_string2 = "1.19.0" + version_string3 = '1.19.a' + + params = {} + params[viya_constants.INGRESS_CONTROLLER] = 'nginx' + params[viya_constants.INGRESS_HOST] = '10.240.9.8' + params[viya_constants.INGRESS_PORT] = '80' + params['logger'] = sas_logger + + # initialize the PreCheckPermissions object + perms = PreCheckPermissions(params) + perms.set_k8s_gitVersion(version_string) + perms.set_ingress_manifest_file() + # check for correct ingress manifest + assert(str(perms.get_ingress_file_name() in "hello-ingress-k8s-v118.yaml")) + + perms.set_k8s_gitVersion(version_string2) + perms.set_ingress_manifest_file() + # check for correct ingress manifest + assert(str(perms.get_ingress_file_name() in "hello-ingress.yaml")) + + # check curren version less than 1.20 + curr_version = semantic_version.Version(str(version_string2)) + assert (curr_version in semantic_version.SimpleSpec('<1.20')) + + perms.set_k8s_gitVersion(version_string2) + perms.set_ingress_manifest_file() + # check for correct ingress manifest + assert(str(perms.get_ingress_file_name() not in "hello-ingress_invalid.yaml")) + + # initialize the PreCheckPermissions object + perms.set_k8s_gitVersion(version_string3) + # check for system exit rc 7 + try: + perms.set_ingress_manifest_file() + except SystemExit as exc: + assert exc.code == viya_messages.RUNTIME_ERROR_RC_ + pass + + def test_check_permissions(): # namespace = 'default' params = {} diff --git a/requirements.txt b/requirements.txt index 29870c4..9a2050a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ jinja2>=2.11.3 -Pint==0.11 -requests==2.22.0 +Pint>=0.11 +requests>=2.22.0 pyyaml>=5.4 ldap3==2.9 +semantic_version>=2.8.5 \ No newline at end of file From 4cd9b34184dda8b47034a13e2d89aa8e4ef73d5b Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Mon, 28 Jun 2021 14:10:52 -0400 Subject: [PATCH 05/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- pre_install_report/library/utils/hello-ingress-k8s-v118.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml b/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml index 48b9014..be4df26 100644 --- a/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml +++ b/pre_install_report/library/utils/hello-ingress-k8s-v118.yaml @@ -7,7 +7,7 @@ #################################################################### #################################################################### # -# Copyright (c) 2019-2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # --- From fff9e93a98359646c4b797977c3317975c39b153 Mon Sep 17 00:00:00 2001 From: Amy Ho <39809594+cuddlehub@users.noreply.github.com> Date: Mon, 28 Jun 2021 15:38:55 -0400 Subject: [PATCH 06/22] Issue 115: add database storage information to deployment report (#117) * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report * (#115) add database storage information to deployment report --- .../static/viya_deployment_report_keys.py | 1 + .../model/test/test_viya_deployment_report.py | 35 ++------------ .../test/test_viya_deployment_report_utils.py | 38 +++++++++++++++ .../utils/viya_deployment_report_utils.py | 48 +++++++++++++++++++ .../model/viya_deployment_report.py | 31 ++++-------- .../templates/viya_deployment_report.html.j2 | 20 +++++++- .../api_resources_both_ingress.json | 18 ++++++- .../api_resources_istio_only.json | 18 ++++++- .../api_resources_nginx_only.json | 18 ++++++- .../api_resources_no_ingress.json | 18 ++++++- .../response_data/resources_configmaps.json | 38 ++++++++++++++- .../k8s/test_impl/sas_kubectl_test.py | 1 + 12 files changed, 223 insertions(+), 61 deletions(-) diff --git a/deployment_report/model/static/viya_deployment_report_keys.py b/deployment_report/model/static/viya_deployment_report_keys.py index 86d2eae..d354b4e 100644 --- a/deployment_report/model/static/viya_deployment_report_keys.py +++ b/deployment_report/model/static/viya_deployment_report_keys.py @@ -44,6 +44,7 @@ class Kubernetes(object): API_RESOURCES_DICT = "apiResources" API_VERSIONS_LIST = "apiVersions" CADENCE_INFO = "cadenceInfo" + DB_INFO = "dbInfo" DISCOVERED_KINDS_DICT = "discoveredKinds" INGRESS_CTRL = "ingressController" NAMESPACE = "namespace" diff --git a/deployment_report/model/test/test_viya_deployment_report.py b/deployment_report/model/test/test_viya_deployment_report.py index ccc4cae..78a4ba3 100644 --- a/deployment_report/model/test/test_viya_deployment_report.py +++ b/deployment_report/model/test/test_viya_deployment_report.py @@ -86,10 +86,11 @@ def test_get_kubernetes_details(report: ViyaDeploymentReport) -> None: kube_details: Dict = report.get_kubernetes_details() # check for all expected entries - assert len(kube_details) == 8 + assert len(kube_details) == 9 assert ReportKeys.Kubernetes.API_RESOURCES_DICT in kube_details assert ReportKeys.Kubernetes.API_VERSIONS_LIST in kube_details assert ReportKeys.Kubernetes.CADENCE_INFO in kube_details + assert ReportKeys.Kubernetes.DB_INFO in kube_details assert ReportKeys.Kubernetes.DISCOVERED_KINDS_DICT in kube_details assert ReportKeys.Kubernetes.INGRESS_CTRL in kube_details assert ReportKeys.Kubernetes.NAMESPACE in kube_details @@ -116,8 +117,9 @@ def test_get_api_resources(report: ViyaDeploymentReport) -> None: api_resources: Dict = report.get_api_resources() # check for expected attributes - assert len(api_resources) == 12 + assert len(api_resources) == 13 assert KubernetesResource.Kinds.CAS_DEPLOYMENT in api_resources + assert KubernetesResource.Kinds.CONFIGMAP in api_resources assert KubernetesResource.Kinds.CRON_JOB in api_resources assert KubernetesResource.Kinds.DEPLOYMENT in api_resources assert KubernetesResource.Kinds.INGRESS in api_resources @@ -603,32 +605,3 @@ def test_write_report_unpopulated() -> None: # make sure None is returned assert data_file is None assert html_file is None - - -def test_get_cadence_version(report: ViyaDeploymentReport) -> None: - """ - This test verifies that the provided cadence data is returned when values is passed to get_cadence_version(). - - :param report: The populated ViyaDeploymentReport returned by the report() fixture. - """ - # check for expected attributes - - cadence_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps") - cadence_info: Text = None - - for c in cadence_data: - cadence_info = report.get_cadence_version(c) - if cadence_info: - break - - assert cadence_info == KubectlTest.Values.CADENCEINFO - - -def test_get_cadence_version_none() -> None: - """ - This test verifies that a None value is returned for the cadence when the report is unpopulated. - """ - - # make sure None is returned - assert ViyaDeploymentReport().get_sas_component_resources(KubectlTest.Values.CADENCEINFO, - KubernetesResource.Kinds.CONFIGMAP) is None diff --git a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py index 3983452..03aa56c 100644 --- a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py +++ b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py @@ -1150,3 +1150,41 @@ def test_aggregate_component_resources_sas_scheduled_backup_job(gathered_resourc assert len(component[ITEMS_KEY][kind]) == len(name_list) for name in name_list: assert name in component[ITEMS_KEY][kind] + + +def test_get_cadence_version(gathered_resources: Dict) -> None: + """ + This test verifies that the provided cadence data is returned when values is passed to get_cadence_version(). + + :param report: The populated ViyaDeploymentReport returned by the report() fixture. + """ + # check for expected attributes + + cadence_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps") + cadence_info: Text = None + + for c in cadence_data: + cadence_info = ViyaDeploymentReportUtils.get_cadence_version(c) + if cadence_info: + break + + assert cadence_info == KubectlTest.Values.CADENCEINFO + + +def test_get_db_info(gathered_resources: Dict) -> None: + """ + This test verifies that the provided db data is returned when values is passed to get_db_info(). + + :param report: The populated ViyaDeploymentReport returned by the report() fixture. + """ + # check for expected attributes + + db_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps") + db_dict: Dict = dict() + + for c in db_data: + db_dict = ViyaDeploymentReportUtils.get_db_info(c) + if db_dict: + break + + assert db_dict["Type"] == KubectlTest.Values.DBINFO diff --git a/deployment_report/model/utils/viya_deployment_report_utils.py b/deployment_report/model/utils/viya_deployment_report_utils.py index 84ae462..7dcd992 100644 --- a/deployment_report/model/utils/viya_deployment_report_utils.py +++ b/deployment_report/model/utils/viya_deployment_report_utils.py @@ -512,3 +512,51 @@ def aggregate_component_resources(resource_details: Dict, gathered_resources: Di if resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) is not None: component[NAME_KEY] = \ resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) + + @staticmethod + def get_cadence_version(resource: KubernetesResource) -> Optional[Text]: + """ + Returns the cadence version of the targeted SAS deployment. + + :param resource: The key of the value to return. + :return: A string representing the cadence version of the targeted SAS deployment. + """ + cadence_info: Optional[Text] = None + try: + if 'sas-deployment-metadata' in resource.get_name(): + cadence_data: Optional[Dict] = resource.get_data() + cadence_info = ( + f"{cadence_data['SAS_CADENCE_DISPLAY_NAME']} " + f"{cadence_data['SAS_CADENCE_VERSION']} " + f"({cadence_data['SAS_CADENCE_RELEASE']})" + ) + return cadence_info + except KeyError: + return None + + @staticmethod + def get_db_info(resource: KubernetesResource) -> Optional[Dict]: + """ + Returns the db information of the targeted SAS deployment. + + :param resource: The key of the value to return. + :return: A dict representing the db information of the targeted SAS deployment. + """ + db_dict: Optional[Dict] = dict() + try: + if 'sas-postgres-config' in resource.get_name(): + db_data: Optional[Dict] = resource.get_data() + if db_data['EXTERNAL_DATABASE'] == "false": + return {"Type": "Internal"} + + db_dict = { + "Type": "External", + "Host": db_data['DATABASE_HOST'], + "Port": db_data['DATABASE_PORT'], + "Name": db_data['DATABASE_NAME'], + "User": db_data['SPRING_DATASOURCE_USERNAME'] + } + + return db_dict + except KeyError: + return None diff --git a/deployment_report/model/viya_deployment_report.py b/deployment_report/model/viya_deployment_report.py index 17ffe90..1f2f76e 100644 --- a/deployment_report/model/viya_deployment_report.py +++ b/deployment_report/model/viya_deployment_report.py @@ -134,13 +134,17 @@ def gather_details(self, kubectl: KubectlInterface, # start by gathering details about ConfigMap # cadence_info: Optional[Text] = None + db_dict: Optional[Dict] = dict() try: ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, k8s_kinds.CONFIGMAP) for item in gathered_resources[k8s_kinds.CONFIGMAP]['items']: resource_definition = gathered_resources[k8s_kinds.CONFIGMAP]['items'][item]['resourceDefinition'] - cadence_info = self.get_cadence_version(resource_definition) - if cadence_info: + if not cadence_info: + cadence_info = ViyaDeploymentReportUtils.get_cadence_version(resource_definition) + if not db_dict: + db_dict = ViyaDeploymentReportUtils.get_db_info(resource_definition) + if db_dict and cadence_info: break except CalledProcessError: @@ -296,6 +300,8 @@ def gather_details(self, kubectl: KubectlInterface, k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT]: Dict = dict() # create a key to hold the cadence version information: str|None # k8s_details_dict[Keys.Kubernetes.CADENCE_INFO]: Optional[Text] = cadence_info + # create a key to hold the viya db information: dict # + k8s_details_dict[Keys.Kubernetes.DB_INFO]: Dict = db_dict # add the availability and count of all discovered resources # for kind_name, kind_details in gathered_resources.items(): @@ -579,24 +585,3 @@ def write_report(self, output_directory: Text = OUTPUT_DIRECTORY_DEFAULT, include_definitions=include_resource_definitions) return os.path.abspath(data_file_path), html_file_path - - @staticmethod - def get_cadence_version(resource: KubernetesResource) -> Optional[Text]: - """ - Returns the cadence version of the targeted SAS deployment. - - :param resource: The key of the value to return. - :return: A string representing the cadence version of the targeted SAS deployment. - """ - cadence_info: Optional[Text] = None - try: - if 'sas-deployment-metadata' in resource.get_name(): - cadence_data: Optional[Dict] = resource.get_data() - cadence_info = ( - f"{cadence_data['SAS_CADENCE_DISPLAY_NAME']} " - f"{cadence_data['SAS_CADENCE_VERSION']} " - f"({cadence_data['SAS_CADENCE_RELEASE']})" - ) - return cadence_info - except KeyError: - return None diff --git a/deployment_report/templates/viya_deployment_report.html.j2 b/deployment_report/templates/viya_deployment_report.html.j2 index 074cb73..d6c74e4 100644 --- a/deployment_report/templates/viya_deployment_report.html.j2 +++ b/deployment_report/templates/viya_deployment_report.html.j2 @@ -94,7 +94,7 @@ Cadence Version - {{ report_data.kubernetes.cadenceInfo | default("could not be determined- cadinfo") }}{{ report_data.kubernetes.cadenceInfo | default("could not be determined- cadinfo") }} {# Cluster Overview: Overview Table #} @@ -129,6 +129,24 @@ {# Cluster Overview: Kubernetes Versions Accordion #} + {# Cluster Overview: Storage Accordion #} + {% set kind_name = "Storage" %} +
+

Storage

+
+ Database + + {% for key, value in report_data.kubernetes.dbInfo.items() %} + + + + + {% endfor %} +
{{key}}{{value}}
+
+
+ {# Cluster Overview: Storage Accordion #} + {# Cluster Overview: Nodes Accordion #} {% if report_data.kubernetes.nodes.count > 0 %} diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json index 7a51c6c..abd5fd9 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json @@ -193,5 +193,21 @@ "update", "watch" ] + }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] } -} \ No newline at end of file +} diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json index ffc819e..b995b3e 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json @@ -177,5 +177,21 @@ "update", "watch" ] + }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] } -} \ No newline at end of file +} diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json index 622ca81..7dcf74a 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json @@ -177,5 +177,21 @@ "update", "watch" ] + }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] } -} \ No newline at end of file +} diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json index 931403a..ad3d72c 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json @@ -161,5 +161,21 @@ "update", "watch" ] + }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] } -} \ No newline at end of file +} diff --git a/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json b/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json index 0d13b34..a2a5742 100644 --- a/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json +++ b/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json @@ -47,10 +47,44 @@ "sas.com/deployment": "sas-viya" }, "name": "sas-logon-app-parameters-g4hg56gm5b", - "namespace": "d24140", + "namespace": "test", "resourceVersion": "53636348", - "selfLink": "/api/v1/namespaces/d24140/configmaps/sas-logon-app-parameters-g4hg56gm5b", + "selfLink": "/api/v1/namespaces/test/configmaps/sas-logon-app-parameters-g4hg56gm5b", "uid": "2a38df05-ef72-439b-8155-faa984796d26" } + }, + { + "apiVersion": "v1", + "data": { + "CONSUL_HTTP_ADDR": "https://sas-consul-server:8500", + "DATABASE_HOST": "sas-crunchy-data-postgres", + "DATABASE_NAME": "SharedServices", + "DATABASE_PORT": "5432", + "DATABASE_SSL_ENABLED": "true", + "EXTERNAL_DATABASE": "false", + "NSQ_MAX_TRIES": "60", + "POSTGRES_OPERATOR_SERVICE_SCHEME": "https", + "REQUESTS_CA_BUNDLE": "/security/trustedcerts.pem", + "SAS_DATABASE_DATABASESERVERNAME": "sas-crunchy-data-postgres", + "SAS_DATABASE_SCHEMA": "${application.schema}", + "SPRING_DATASOURCE_DRIVER-CLASS-NAME": "org.postgresql.Driver", + "SSL_CERT_FILE": "/security/trustedcerts.pem" + }, + "kind": "ConfigMap", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "" + }, + "creationTimestamp": "2021-05-18T20:02:42Z", + "labels": { + "sas.com/admin": "cluster-local", + "sas.com/deployment": "sas-viya" + }, + "name": "sas-postgres-config-5dt8fm92c7", + "namespace": "test", + "resourceVersion": "7736", + "selfLink": "/api/v1/namespaces/test/configmaps/sas-postgres-config-5dt8fm92c7", + "uid": "9924c601-602f-404a-b113-7b6671f0e88f" + } } ] diff --git a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py index c991bec..d3c467a 100644 --- a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py +++ b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py @@ -77,6 +77,7 @@ class Values(object): """ NAMESPACE: Text = "test" CADENCEINFO: Text = "Fast R/TR 2020 (20201214.1607958443388)" + DBINFO: Text = "Internal" # Component: prometheus COMPONENT_PROMETHEUS_DEPLOYMENT_NAME: Text = "pushgateway-test-prometheus-pushgateway" From 30a0af886d16949698655e0509fdd7eb02c05336 Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Mon, 28 Jun 2021 15:57:00 -0400 Subject: [PATCH 07/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- .../library/pre_install_check_permissions.py | 10 ++++++++-- pre_install_report/test/test_pre_install_report.py | 9 +++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index 052f300..1911f2d 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -45,7 +45,8 @@ PROVISIONER_AZURE_DISK = "kubernetes.io/azure-disk" PROVISIONER_AWS_EBS = "kubernetes.io/aws-ebs" -INGRESS_REL = '<1.19' +INGRESS_REL_EQUAL = '==1.18' +INGRESS_REL_LESS = '<1.18' class PreCheckPermissions(object): @@ -431,11 +432,16 @@ def set_ingress_manifest_file(self): try: curr_version = semantic_version.Version(str(self.get_k8s_gitVersion())) - if(curr_version in semantic_version.SimpleSpec(INGRESS_REL)): + if(curr_version in semantic_version.SimpleSpec(INGRESS_REL_EQUAL)): self._ingress_file = "hello-ingress-k8s-v118.yaml" self.logger.debug("hello-ingress file deployed {} major {} minor {}" .format(str(self._ingress_file), str(curr_version.major), str(curr_version.minor))) + if(curr_version in semantic_version.SimpleSpec(INGRESS_REL_LESS)): + self._ingress_file = "hello-ingress-k8s-v118.yaml" + self.logger.error("This release of Kubernetes is not supported. major {} minor {}" + .format(str(curr_version.major), + str(curr_version.minor))) except ValueError as cpe: self.logger.exception(viya_messages.EXCEPTION_MESSAGE.format(str(cpe))) sys.exit(viya_messages.RUNTIME_ERROR_RC_) diff --git a/pre_install_report/test/test_pre_install_report.py b/pre_install_report/test/test_pre_install_report.py index eab6d15..60fde5c 100644 --- a/pre_install_report/test/test_pre_install_report.py +++ b/pre_install_report/test/test_pre_install_report.py @@ -598,6 +598,7 @@ def test_get_k8s_version(): version_string = "1.18.9-eks-d1db3c" version_string2 = "1.19.0" version_string3 = '1.19.a' + version_string4 = '1.17.1' params = {} params[viya_constants.INGRESS_CONTROLLER] = 'nginx' @@ -612,6 +613,13 @@ def test_get_k8s_version(): # check for correct ingress manifest assert(str(perms.get_ingress_file_name() in "hello-ingress-k8s-v118.yaml")) + # initialize the PreCheckPermissions object + perms = PreCheckPermissions(params) + perms.set_k8s_gitVersion(version_string4) + perms.set_ingress_manifest_file() + # check for correct ingress manifest + assert(str(perms.get_ingress_file_name() in "hello-ingress-k8s-v118.yaml")) + perms.set_k8s_gitVersion(version_string2) perms.set_ingress_manifest_file() # check for correct ingress manifest @@ -620,6 +628,7 @@ def test_get_k8s_version(): # check curren version less than 1.20 curr_version = semantic_version.Version(str(version_string2)) assert (curr_version in semantic_version.SimpleSpec('<1.20')) + assert (curr_version in semantic_version.SimpleSpec('==1.19')) perms.set_k8s_gitVersion(version_string2) perms.set_ingress_manifest_file() From 32cc68bdb62458c8367c4d1cfdc4d2791cfe661c Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Thu, 8 Jul 2021 11:09:15 -0400 Subject: [PATCH 08/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- .../library/pre_install_check_permissions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index 1911f2d..e6655df 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -45,8 +45,8 @@ PROVISIONER_AZURE_DISK = "kubernetes.io/azure-disk" PROVISIONER_AWS_EBS = "kubernetes.io/aws-ebs" -INGRESS_REL_EQUAL = '==1.18' -INGRESS_REL_LESS = '<1.18' +INGRESS_MIN_SUPPORTED_REL_EQ = '==1.18' +INGRESS_UNSUPPORTED_REL_LT = '<1.18' class PreCheckPermissions(object): @@ -432,12 +432,12 @@ def set_ingress_manifest_file(self): try: curr_version = semantic_version.Version(str(self.get_k8s_gitVersion())) - if(curr_version in semantic_version.SimpleSpec(INGRESS_REL_EQUAL)): + if(curr_version in semantic_version.SimpleSpec(INGRESS_MIN_SUPPORTED_REL_EQ)): self._ingress_file = "hello-ingress-k8s-v118.yaml" self.logger.debug("hello-ingress file deployed {} major {} minor {}" .format(str(self._ingress_file), str(curr_version.major), str(curr_version.minor))) - if(curr_version in semantic_version.SimpleSpec(INGRESS_REL_LESS)): + if(curr_version in semantic_version.SimpleSpec(INGRESS_UNSUPPORTED_REL_LT)): self._ingress_file = "hello-ingress-k8s-v118.yaml" self.logger.error("This release of Kubernetes is not supported. major {} minor {}" .format(str(curr_version.major), From a8e5f836bf230c5c5638ea4bc0b25391bf670081 Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Thu, 8 Jul 2021 12:06:09 -0400 Subject: [PATCH 09/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- pre_install_report/library/pre_install_check_permissions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index e6655df..61fa66e 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -45,7 +45,7 @@ PROVISIONER_AZURE_DISK = "kubernetes.io/azure-disk" PROVISIONER_AWS_EBS = "kubernetes.io/aws-ebs" -INGRESS_MIN_SUPPORTED_REL_EQ = '==1.18' +INGRESS_V1BETA1_REL_EQ = '==1.18' INGRESS_UNSUPPORTED_REL_LT = '<1.18' @@ -432,7 +432,7 @@ def set_ingress_manifest_file(self): try: curr_version = semantic_version.Version(str(self.get_k8s_gitVersion())) - if(curr_version in semantic_version.SimpleSpec(INGRESS_MIN_SUPPORTED_REL_EQ)): + if(curr_version in semantic_version.SimpleSpec(INGRESS_V1BETA1_REL_EQ)): self._ingress_file = "hello-ingress-k8s-v118.yaml" self.logger.debug("hello-ingress file deployed {} major {} minor {}" .format(str(self._ingress_file), str(curr_version.major), From 467d746379082285dd8e7f0a6b2f2295c9c172b4 Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Thu, 8 Jul 2021 14:25:03 -0400 Subject: [PATCH 10/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- .../library/pre_install_check_permissions.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index 61fa66e..122f4a2 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -413,13 +413,13 @@ def get_server_gitVersion(self): """ try: versions: Dict = self.utils.get_k8s_version() - serverversion = versions.get('serverVersion') - gitVersion = serverversion.get('gitVersion') - self.logger.info("gitversion {} ".format(str(gitVersion))) + server_version = versions.get('serverVersion') + git_version = server_version.get('gitVersion') + self.logger.info("gitversion {} ".format(str(git_version))) - if gitVersion.startswith("v"): - gitVersion = gitVersion[1:] - self.set_k8s_gitVersion(gitVersion) + if git_version.startswith("v"): + git_version = git_version[1:] + self.set_k8s_gitVersion(git_version) except CalledProcessError as cpe: self.logger.exception('kubectl version command failed. Return code = {}'.format(str(cpe.returncode))) sys.exit(viya_messages.RUNTIME_ERROR_RC_) From 6265e17a9cb9ffc1b0a125bdc12fcf99cf2fb949 Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Thu, 8 Jul 2021 20:04:44 -0400 Subject: [PATCH 11/22] (#issue_118) update hello-ingress to networking.k8s.io/v1 --- .../library/pre_install_check.py | 2 +- .../library/pre_install_check_permissions.py | 20 +++++++++---------- .../test/test_pre_install_report.py | 10 +++++----- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pre_install_report/library/pre_install_check.py b/pre_install_report/library/pre_install_check.py index e25048e..685ef53 100644 --- a/pre_install_report/library/pre_install_check.py +++ b/pre_install_report/library/pre_install_check.py @@ -400,7 +400,7 @@ def _check_permissions(self, permissions_check: PreCheckPermissions): permissions_check: instance of PreCheckPermissions class """ namespace = self._kubectl.get_namespace() - permissions_check.get_server_gitVersion() + permissions_check.get_server_git_version() permissions_check.set_ingress_manifest_file() permissions_check.get_sc_resources() diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py index 122f4a2..b45a11a 100644 --- a/pre_install_report/library/pre_install_check_permissions.py +++ b/pre_install_report/library/pre_install_check_permissions.py @@ -82,7 +82,7 @@ def __init__(self, params): self._storage_class_sc: List[KubernetesResource] = None self._sample_deployment = 0 self._sample_output = "" - self._k8s_gitVersion = None + self._k8s_git_version = None def _set_results_cluster_admin(self, resource_key, rc): """ @@ -407,19 +407,19 @@ def check_sample_service(self): 'helloworld-svc.yaml') self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc) - def get_server_gitVersion(self): + def get_server_git_version(self): """ - Retrieve the Kubernetes servervrsion and validate the gitVersion + Retrieve the Kubernetes server version and validate the git version """ try: versions: Dict = self.utils.get_k8s_version() server_version = versions.get('serverVersion') git_version = server_version.get('gitVersion') - self.logger.info("gitversion {} ".format(str(git_version))) + self.logger.info("git_version {} ".format(str(git_version))) if git_version.startswith("v"): git_version = git_version[1:] - self.set_k8s_gitVersion(git_version) + self.set_k8s_git_version(git_version) except CalledProcessError as cpe: self.logger.exception('kubectl version command failed. Return code = {}'.format(str(cpe.returncode))) sys.exit(viya_messages.RUNTIME_ERROR_RC_) @@ -430,7 +430,7 @@ def set_ingress_manifest_file(self): https://pypi.org/project/semantic_version/ 2.8.5 initial version """ try: - curr_version = semantic_version.Version(str(self.get_k8s_gitVersion())) + curr_version = semantic_version.Version(str(self.get_k8s_git_version())) if(curr_version in semantic_version.SimpleSpec(INGRESS_V1BETA1_REL_EQ)): self._ingress_file = "hello-ingress-k8s-v118.yaml" @@ -709,18 +709,18 @@ def get_cluster_admin_permission_aggregate(self): """ return self.cluster_admin_permission_aggregate - def set_k8s_gitVersion(self, version: str): + def set_k8s_git_version(self, version: str): """ Set the current Kubernetes Server Version """ - self._k8s_gitVersion = version + self._k8s_git_version = version - def get_k8s_gitVersion(self): + def get_k8s_git_version(self): """ Get the current Kubernetes Server Version return: string object """ - return self._k8s_gitVersion + return self._k8s_git_version def get_ingress_file_name(self): """ diff --git a/pre_install_report/test/test_pre_install_report.py b/pre_install_report/test/test_pre_install_report.py index 60fde5c..257fd5a 100644 --- a/pre_install_report/test/test_pre_install_report.py +++ b/pre_install_report/test/test_pre_install_report.py @@ -608,19 +608,19 @@ def test_get_k8s_version(): # initialize the PreCheckPermissions object perms = PreCheckPermissions(params) - perms.set_k8s_gitVersion(version_string) + perms.set_k8s_git_version(version_string) perms.set_ingress_manifest_file() # check for correct ingress manifest assert(str(perms.get_ingress_file_name() in "hello-ingress-k8s-v118.yaml")) # initialize the PreCheckPermissions object perms = PreCheckPermissions(params) - perms.set_k8s_gitVersion(version_string4) + perms.set_k8s_git_version(version_string4) perms.set_ingress_manifest_file() # check for correct ingress manifest assert(str(perms.get_ingress_file_name() in "hello-ingress-k8s-v118.yaml")) - perms.set_k8s_gitVersion(version_string2) + perms.set_k8s_git_version(version_string2) perms.set_ingress_manifest_file() # check for correct ingress manifest assert(str(perms.get_ingress_file_name() in "hello-ingress.yaml")) @@ -630,13 +630,13 @@ def test_get_k8s_version(): assert (curr_version in semantic_version.SimpleSpec('<1.20')) assert (curr_version in semantic_version.SimpleSpec('==1.19')) - perms.set_k8s_gitVersion(version_string2) + perms.set_k8s_git_version(version_string2) perms.set_ingress_manifest_file() # check for correct ingress manifest assert(str(perms.get_ingress_file_name() not in "hello-ingress_invalid.yaml")) # initialize the PreCheckPermissions object - perms.set_k8s_gitVersion(version_string3) + perms.set_k8s_git_version(version_string3) # check for system exit rc 7 try: perms.set_ingress_manifest_file() From a78cc78c461fcc029f355b3fa4ab7ad92d4ab89e Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 21 Jul 2021 15:29:59 -0400 Subject: [PATCH 12/22] (Issue #120) Deployment Report: Update supported Ingress definitions --- ...ya_deployment_report_ingress_controller.py | 19 - .../model/test/test_viya_deployment_report.py | 62 +- .../model/utils/component_util.py | 82 ++ deployment_report/model/utils/config_util.py | 79 ++ deployment_report/model/utils/ingress_util.py | 98 ++ deployment_report/model/utils/metrics_util.py | 73 + .../model/utils/relationship_util.py | 383 ++++++ .../model/utils/resource_util.py | 133 ++ .../model/utils/test/conftest.py | 186 +++ .../model/utils/test/test_component_util.py | 722 ++++++++++ .../model/utils/test/test_config_util.py | 56 + .../model/utils/test/test_ingress_utils.py | 250 ++++ .../model/utils/test/test_metrics_util.py | 110 ++ .../utils/test/test_relationship_util.py | 355 +++++ .../model/utils/test/test_resource_util.py | 438 ++++++ .../test/test_viya_deployment_report_utils.py | 1190 ----------------- .../utils/viya_deployment_report_utils.py | 562 -------- .../model/viya_deployment_report.py | 278 ++-- deployment_report/templates/httpproxy.html.j2 | 29 + deployment_report/templates/route.html.j2 | 23 + viya_ark_library/k8s/sas_k8s_ingress.py | 46 + viya_ark_library/k8s/sas_k8s_objects.py | 6 + .../k8s/test/test_sas_k8s_ingress.py | 45 + .../api_resources_ingress_all.json | 245 ++++ ...son => api_resources_ingress_contour.json} | 48 +- ....json => api_resources_ingress_istio.json} | 32 +- ....json => api_resources_ingress_nginx.json} | 32 +- ...s.json => api_resources_ingress_none.json} | 32 +- .../api_resources_ingress_openshift.json | 197 +++ .../response_data/resources_httpproxy.json | 82 ++ .../response_data/resources_ingresses.json | 2 +- .../response_data/resources_routes.json | 46 + .../k8s/test_impl/sas_kubectl_test.py | 306 ++++- 33 files changed, 4167 insertions(+), 2080 deletions(-) delete mode 100644 deployment_report/model/static/viya_deployment_report_ingress_controller.py create mode 100644 deployment_report/model/utils/component_util.py create mode 100644 deployment_report/model/utils/config_util.py create mode 100644 deployment_report/model/utils/ingress_util.py create mode 100644 deployment_report/model/utils/metrics_util.py create mode 100644 deployment_report/model/utils/relationship_util.py create mode 100644 deployment_report/model/utils/resource_util.py create mode 100644 deployment_report/model/utils/test/conftest.py create mode 100644 deployment_report/model/utils/test/test_component_util.py create mode 100644 deployment_report/model/utils/test/test_config_util.py create mode 100644 deployment_report/model/utils/test/test_ingress_utils.py create mode 100644 deployment_report/model/utils/test/test_metrics_util.py create mode 100644 deployment_report/model/utils/test/test_relationship_util.py create mode 100644 deployment_report/model/utils/test/test_resource_util.py delete mode 100644 deployment_report/model/utils/test/test_viya_deployment_report_utils.py delete mode 100644 deployment_report/model/utils/viya_deployment_report_utils.py create mode 100644 deployment_report/templates/httpproxy.html.j2 create mode 100644 deployment_report/templates/route.html.j2 create mode 100644 viya_ark_library/k8s/sas_k8s_ingress.py create mode 100644 viya_ark_library/k8s/test/test_sas_k8s_ingress.py create mode 100644 viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_all.json rename viya_ark_library/k8s/test_impl/response_data/{api_resources_both_ingress.json => api_resources_ingress_contour.json} (89%) rename viya_ark_library/k8s/test_impl/response_data/{api_resources_istio_only.json => api_resources_ingress_istio.json} (100%) rename viya_ark_library/k8s/test_impl/response_data/{api_resources_nginx_only.json => api_resources_ingress_nginx.json} (100%) rename viya_ark_library/k8s/test_impl/response_data/{api_resources_no_ingress.json => api_resources_ingress_none.json} (100%) create mode 100644 viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_openshift.json create mode 100644 viya_ark_library/k8s/test_impl/response_data/resources_httpproxy.json create mode 100644 viya_ark_library/k8s/test_impl/response_data/resources_routes.json diff --git a/deployment_report/model/static/viya_deployment_report_ingress_controller.py b/deployment_report/model/static/viya_deployment_report_ingress_controller.py deleted file mode 100644 index 3fde003..0000000 --- a/deployment_report/model/static/viya_deployment_report_ingress_controller.py +++ /dev/null @@ -1,19 +0,0 @@ -#################################################################### -# ### viya_deployment_report_ingress_controller.py ### -#################################################################### -# ### Author: SAS Institute Inc. ### -#################################################################### -# ### -# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ### -# All Rights Reserved. ### -# SPDX-License-Identifier: Apache-2.0 ### -# ### -#################################################################### - - -class ViyaDeploymentReportIngressController(object): - """ - Class defining static references to the supported ingress controller values. - """ - KUBE_NGINX = "kube-nginx" - ISTIO = "istio" diff --git a/deployment_report/model/test/test_viya_deployment_report.py b/deployment_report/model/test/test_viya_deployment_report.py index 78a4ba3..5b27dc4 100644 --- a/deployment_report/model/test/test_viya_deployment_report.py +++ b/deployment_report/model/test/test_viya_deployment_report.py @@ -18,10 +18,9 @@ from deployment_report.model.viya_deployment_report import ViyaDeploymentReport from deployment_report.model.static.viya_deployment_report_keys import ITEMS_KEY from deployment_report.model.static.viya_deployment_report_keys import ViyaDeploymentReportKeys as ReportKeys -from deployment_report.model.static.viya_deployment_report_ingress_controller import \ - ViyaDeploymentReportIngressController as ExpectedIngressController from viya_ark_library.k8s.sas_k8s_errors import KubectlRequestForbiddenError +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest @@ -106,26 +105,30 @@ def test_get_kubernetes_details_unpopulated() -> None: assert ViyaDeploymentReport().get_kubernetes_details() is None -def test_get_api_resources(report: ViyaDeploymentReport) -> None: +def test_get_api_resources() -> None: """ This test verifies that all the expected api-resources values returned by the KubectlTest implementation are present in the "kubernetes.apiResources" dictionary in the completed report. - - :param report: The populated ViyaDeploymentReport returned by the report() fixture. """ + report: ViyaDeploymentReport = ViyaDeploymentReport() + report.gather_details(kubectl=KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.ALL_NGINX_USED)) + # get the API resources information api_resources: Dict = report.get_api_resources() # check for expected attributes - assert len(api_resources) == 13 + assert len(api_resources) == 16 assert KubernetesResource.Kinds.CAS_DEPLOYMENT in api_resources assert KubernetesResource.Kinds.CONFIGMAP in api_resources + assert KubernetesResource.Kinds.CONTOUR_HTTPPROXY in api_resources assert KubernetesResource.Kinds.CRON_JOB in api_resources assert KubernetesResource.Kinds.DEPLOYMENT in api_resources assert KubernetesResource.Kinds.INGRESS in api_resources + assert KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE assert KubernetesResource.Kinds.JOB in api_resources assert KubernetesResource.Kinds.NODE in api_resources assert KubernetesResource.Kinds.NODE_METRICS in api_resources + assert KubernetesResource.Kinds.OPENSHIFT_ROUTE in api_resources assert KubernetesResource.Kinds.POD in api_resources assert KubernetesResource.Kinds.POD_METRICS in api_resources assert KubernetesResource.Kinds.REPLICA_SET in api_resources @@ -183,11 +186,13 @@ def test_get_discovered_resources(report: ViyaDeploymentReport) -> None: discovered_resources: Dict = report.get_discovered_resources() # check for expected attributes - assert len(discovered_resources) == 11 + assert len(discovered_resources) == 13 assert KubernetesResource.Kinds.CAS_DEPLOYMENT in discovered_resources + assert KubernetesResource.Kinds.CONTOUR_HTTPPROXY in discovered_resources assert KubernetesResource.Kinds.CRON_JOB in discovered_resources assert KubernetesResource.Kinds.DEPLOYMENT in discovered_resources assert KubernetesResource.Kinds.INGRESS in discovered_resources + assert KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE in discovered_resources assert KubernetesResource.Kinds.JOB in discovered_resources assert KubernetesResource.Kinds.NODE in discovered_resources assert KubernetesResource.Kinds.POD in discovered_resources @@ -214,7 +219,7 @@ def test_get_discovered_resources_unpopulated() -> None: assert ViyaDeploymentReport().get_discovered_resources() is None -def test_get_ingress_controller_nginx_only(report: ViyaDeploymentReport) -> None: +def test_get_ingress_controller(report: ViyaDeploymentReport) -> None: """ This test verifies that NGINX is returned as the ingress controller when VirtualService objects are not available in the api-resources. @@ -222,46 +227,7 @@ def test_get_ingress_controller_nginx_only(report: ViyaDeploymentReport) -> None :param report: The populated ViyaDeploymentReport returned by the report() fixture. """ # check for expected attributes - assert report.get_ingress_controller() == ExpectedIngressController.KUBE_NGINX - - -def test_get_ingress_controller_istio_only() -> None: - """ - This test verifies that ISTIO is returned as the ingress controller when Ingress objects are not available in the - api-resources. - """ - # run the report - report: ViyaDeploymentReport = ViyaDeploymentReport() - report.gather_details(kubectl=KubectlTest(KubectlTest.IngressSimulator.ISTIO_ONLY)) - - # check for expected attributes - assert report.get_ingress_controller() == ExpectedIngressController.ISTIO - - -def test_get_ingress_controller_both_nginx_used() -> None: - """ - This test verifies that NGINX is returned as the ingress controller when both Ingress and VirtualService resources - are available but Ingress is defined for components. - """ - # run the report - report: ViyaDeploymentReport = ViyaDeploymentReport() - report.gather_details(kubectl=KubectlTest(KubectlTest.IngressSimulator.BOTH_RESOURCES_NGINX_USED)) - - # check for expected attributes - assert report.get_ingress_controller() is ExpectedIngressController.KUBE_NGINX - - -def test_get_ingress_controller_both_istio_used() -> None: - """ - This test verifies that ISTIO is returned as the ingress controller when both Ingress and VirtualService resources - are available but VirtualService is defined for components. - """ - # run the report - report: ViyaDeploymentReport = ViyaDeploymentReport() - report.gather_details(kubectl=KubectlTest(KubectlTest.IngressSimulator.BOTH_RESOURCES_ISTIO_USED)) - - # check for expected attributes - assert report.get_ingress_controller() is ExpectedIngressController.ISTIO + assert report.get_ingress_controller() == SupportedIngress.Controllers.NGINX def test_get_ingress_controller_none() -> None: diff --git a/deployment_report/model/utils/component_util.py b/deployment_report/model/utils/component_util.py new file mode 100644 index 0000000..aca623a --- /dev/null +++ b/deployment_report/model/utils/component_util.py @@ -0,0 +1,82 @@ +#################################################################### +# ### component_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, List, Optional, Text + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + NAME_KEY, \ + ViyaDeploymentReportKeys as ReportKeys + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +def aggregate_resources(resource_details: Dict, gathered_resources: Dict, component: Dict): + """ + Aggregates the various resources that comprise a SAS component deployed into the Kubernetes cluster. + + This method is called recursively for each relationship extension to aggregate all related resources. + + :param resource_details: The details of the resource to aggregate into a component. + :param gathered_resources: The complete dictionary of resources gathered in the Kubernetes cluster. + :param component: The dictionary where the resources for the current component will be compiled. + """ + # set up the component dict + if NAME_KEY not in component: + component[NAME_KEY]: Text = "" + + if ITEMS_KEY not in component: + component[ITEMS_KEY]: Dict = dict() + + # get the relationships extension list for this resource + resource_ext: Dict = resource_details[ReportKeys.ResourceDetails.EXT_DICT] + resource_relationships: List = resource_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + + # get the resource definition + resource: KubernetesResource = resource_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # if a SAS component name is defined, use it since this is the most canonical value + if resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) is not None: + component[NAME_KEY] = resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) + + # if a resource of this kind hasn't been added for the component, create the kind key + if resource.get_kind() not in component[ITEMS_KEY]: + component[ITEMS_KEY][resource.get_kind()]: Dict = dict() + + # add the resource details to its kind dictionary, keyed by its name + component[ITEMS_KEY][resource.get_kind()][resource.get_name()]: Dict = resource_details + + # aggregate any resources defined in the relationships extension # + for relationship in resource_relationships: + # get the name and kind of the related resource # + rel_kind: Text = relationship[ReportKeys.ResourceDetails.Ext.Relationship.KIND] + rel_name: Text = relationship[ReportKeys.ResourceDetails.Ext.Relationship.NAME] + + # get the details for the related resource + try: + related_resource_details: Optional[Dict] = gathered_resources[rel_kind][ITEMS_KEY][rel_name] + except KeyError: + # ignore any failures that may be raised if the resource is transient and not defined + # note that the related resource wasn't found + related_resource_details: Optional[Dict] = None + + # aggregate the related resource + if related_resource_details is not None: + aggregate_resources(related_resource_details, gathered_resources, component) + + # if this is the last resource in the chain and the component doesn't have a name determined from an annotation, + # set a name based on the available values + if not component[NAME_KEY]: + component[NAME_KEY]: Text = resource.get_name() + + # if a SAS component name is defined, use it instead + if resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) is not None: + component[NAME_KEY] = resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) diff --git a/deployment_report/model/utils/config_util.py b/deployment_report/model/utils/config_util.py new file mode 100644 index 0000000..716ed70 --- /dev/null +++ b/deployment_report/model/utils/config_util.py @@ -0,0 +1,79 @@ +#################################################################### +# ### config_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, Optional, Text + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +def get_cadence_version(config_map: KubernetesResource) -> Optional[Text]: + """ + Returns the cadence version of the targeted SAS deployment. + + :param config_map: The ConfigMap resource to evaluate. + :return: A string representing the cadence version of the targeted SAS deployment. + """ + # initialize the return value + cadence_info: Optional[Text] = None + + try: + # look for ConfigMap with expected name + if 'sas-deployment-metadata' in config_map.get_name(): + # get the ConfigMap data + cadence_data: Optional[Dict] = config_map.get_data() + + # build the cadence string + cadence_info = ( + f"{cadence_data['SAS_CADENCE_DISPLAY_NAME']} " + f"{cadence_data['SAS_CADENCE_VERSION']} " + f"({cadence_data['SAS_CADENCE_RELEASE']})" + ) + + return cadence_info + except KeyError: + # if an expected key wasn't defined, return None + return None + + +def get_db_info(config_map: KubernetesResource) -> Optional[Dict]: + """ + Returns the db information of the targeted SAS deployment. + + :param config_map: The ConfigMap resource to evaluate. + :return: A dict representing the db information of the targeted SAS deployment. + """ + # initialize the return value + db_dict: Optional[Dict] = dict() + + try: + # make sure the ConfigMap has the expected name + if 'sas-postgres-config' in config_map.get_name(): + # get the ConfigMap data + db_data: Optional[Dict] = config_map.get_data() + + # check whether the db configuration is external + if db_data['EXTERNAL_DATABASE'] == "false": + # return internal config information (all other details will be defined in the component in report) + return {"Type": "Internal"} + + # if external, create the dict of all relevant info + db_dict = { + "Type": "External", + "Host": db_data['DATABASE_HOST'], + "Port": db_data['DATABASE_PORT'], + "Name": db_data['DATABASE_NAME'], + "User": db_data['SPRING_DATASOURCE_USERNAME'] + } + + return db_dict + except KeyError: + # if an expected key wasn't defined, return None + return None diff --git a/deployment_report/model/utils/ingress_util.py b/deployment_report/model/utils/ingress_util.py new file mode 100644 index 0000000..8fcf2de --- /dev/null +++ b/deployment_report/model/utils/ingress_util.py @@ -0,0 +1,98 @@ +#################################################################### +# ### ingress_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, List, Optional, Text + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as ReportKeys + +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +def determine_ingress_controller(gathered_resources: Dict) -> Optional[Text]: + """ + Determines the ingress controller being used in the Kubernetes cluster. + + :param gathered_resources: The complete dictionary of gathered resources from the Kubernetes cluster. + :return: The ingress controller used in the target cluster or None if the controller cannot be determined. + """ + for ingress_controller, kind in SupportedIngress.get_ingress_controller_to_kind_map().items(): + # check if this kind is in the dictionary of gathered resources + if kind in gathered_resources: + for resource_details in gathered_resources[kind][ITEMS_KEY].values(): + # get the resource definition + resource: KubernetesResource = resource_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # check if the resource was created by SAS + if resource.is_sas_resource(): + return ingress_controller + + # if a controller couldn't be determined, return None + return None + + +def ignorable_for_controller_if_unavailable(ingress_controller: Text, kind: Text) -> bool: + """ + Determines whether the given kind is ignorable if unavailable given the ingress controller. + + Example: Unavailable HTTPProxy, Route, and VirtualService kinds can be ignored if ingress is controlled by NGINX. + + :param ingress_controller: The ingress controller used by the deployment. + :param kind: The kind of the unavailable resource. + """ + #################### + # Contour + #################### + if ingress_controller == SupportedIngress.Controllers.CONTOUR and ( + kind == KubernetesResource.Kinds.INGRESS or + kind == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE or + kind == KubernetesResource.Kinds.OPENSHIFT_ROUTE + ): + # ignore Ingress, Route, and VirtualService if controller is Contour + return True + + #################### + # Istio + #################### + elif ingress_controller == SupportedIngress.Controllers.ISTIO and ( + kind == KubernetesResource.Kinds.CONTOUR_HTTPPROXY or + kind == KubernetesResource.Kinds.INGRESS or + kind == KubernetesResource.Kinds.OPENSHIFT_ROUTE + ): + # ignore HTTPProxy, Ingress, and Route if controller is Istio + return True + + #################### + # NGINX + #################### + elif ingress_controller == SupportedIngress.Controllers.NGINX and ( + kind == KubernetesResource.Kinds.CONTOUR_HTTPPROXY or + kind == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE or + kind == KubernetesResource.Kinds.OPENSHIFT_ROUTE + ): + # ignore HTTPProxy, Route, and VirtualService if controller is NGINX + return True + + #################### + # OpenShift + #################### + elif ingress_controller == SupportedIngress.Controllers.OPENSHIFT and ( + kind == KubernetesResource.Kinds.CONTOUR_HTTPPROXY or + kind == KubernetesResource.Kinds.INGRESS or + kind == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE + ): + # ignore HTTPProxy, Ingress, and VirtualService if controller is OpenShift + return True + + # not ignorable + return False diff --git a/deployment_report/model/utils/metrics_util.py b/deployment_report/model/utils/metrics_util.py new file mode 100644 index 0000000..9bf9d63 --- /dev/null +++ b/deployment_report/model/utils/metrics_util.py @@ -0,0 +1,73 @@ +#################################################################### +# ### metrics_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from subprocess import CalledProcessError +from typing import Dict + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as ReportKeys + +from viya_ark_library.k8s.sas_kubectl_interface import KubectlInterface + + +def get_pod_metrics(kubectl: KubectlInterface, pods: Dict) -> None: + """ + Retrieves Pod metrics from the Kubernetes API and defines the ext.metrics dictionary for all gathered Pods. + + :param kubectl: The KubectlInterface object for issuing requests to the Kubernetes cluster for Pod metrics. + :param pods: The Pod resources gathered in the Kubernetes cluster. + """ + # get Pod metrics if Pods are defined + if pods[ReportKeys.KindDetails.COUNT] > 0: + try: + # get Pod metrics + pod_metrics: Dict = kubectl.top_pods().as_dict() + + # iterate over the returned metrics and add them to the Pod extensions + for pod_name, metrics in pod_metrics.items(): + try: + pod_ext: Dict = pods[ITEMS_KEY][pod_name][ReportKeys.ResourceDetails.EXT_DICT] + pod_ext[ReportKeys.ResourceDetails.Ext.METRICS_DICT]: Dict = metrics + except KeyError: + # if the Pod isn't defined, move on without error + pass + + except CalledProcessError: + # if Pod metrics aren't available, move on without error + pass + + +def get_node_metrics(kubectl: KubectlInterface, nodes: Dict) -> None: + """ + Retrieves Node metrics from the Kubernetes API and defines the metrics extension for all gathered Nodes. + + :param kubectl: The KubectlInterface object for issuing requests to the Kubernetes cluster for Node metrics. + :param nodes: The Node resources gathered in the Kubernetes cluster. + """ + # get Node metrics if Nodes are defined + if nodes[ReportKeys.KindDetails.COUNT] > 0: + try: + # get Node metrics + node_metrics: Dict = kubectl.top_nodes().as_dict() + + # iterate over the returned metrics and add them to the Node extensions + for node_name, metrics in node_metrics.items(): + try: + node_ext: Dict = nodes[ITEMS_KEY][node_name][ReportKeys.ResourceDetails.EXT_DICT] + node_ext[ReportKeys.ResourceDetails.Ext.METRICS_DICT]: Dict = metrics + except KeyError: + # if the Node isn't defined, move on without error + pass + + except CalledProcessError: + # if Node metrics aren't available, move on without error + pass diff --git a/deployment_report/model/utils/relationship_util.py b/deployment_report/model/utils/relationship_util.py new file mode 100644 index 0000000..bedb710 --- /dev/null +++ b/deployment_report/model/utils/relationship_util.py @@ -0,0 +1,383 @@ +#################################################################### +# ### relationship_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, List, Optional, Text + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as ReportKeys + +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +def create_relationship_dict(kind: Text, name: Text) -> Dict: + """ + Creates a relationship dict object for a resource's ext.relationship list. + + :param kind: The kind value of the related resource. + :param name: The name value of the related resource. + :return: The dictionary defining the related resource. + """ + relationship: Dict = dict() + relationship[ReportKeys.ResourceDetails.Ext.Relationship.KIND]: Text = kind + relationship[ReportKeys.ResourceDetails.Ext.Relationship.NAME]: Text = name + return relationship + + +def define_node_to_pod_relationships(nodes: Dict, pods: Dict) -> None: + """ + Defines the ext.relationship from a Node to a Pod running within that Node. + + :param nodes: The Node resources gathered in the Kubernetes cluster. + :param pods: The Pod resources gathered in the Kubernetes cluster. + """ + # create association between Node and Pod, if both are defined + if pods[ReportKeys.KindDetails.COUNT] > 0 and nodes[ReportKeys.KindDetails.COUNT] > 0: + # loop over all Pods to get their Node definition + for pod_details in pods[ITEMS_KEY].values(): + # get the definition of the current Pod + pod: KubernetesResource = pod_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # get the Pod's Node definition + node_name: Text = pod.get_spec_value(KubernetesResource.Keys.NODE_NAME) + + try: + # create the Pod relationship and add it to the Node's relationships extension list + relationship: Dict = create_relationship_dict(pod.get_kind(), pod.get_name()) + + # add the relationship to the Node's relationships extension list + node_ext: Dict = nodes[ITEMS_KEY][node_name][ReportKeys.ResourceDetails.EXT_DICT] + node_relationships: List = node_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + node_relationships.append(relationship) + except KeyError: + # if the Node isn't defined, move on without error + pass + + +def define_pod_to_service_relationships(pods: Dict, services: Dict) -> None: + """ + Defines the upstream ext.relationship from a Pod to a Service that exposes the + Pod. + + :param pods: The Pod resources gathered in the Kubernetes cluster. + :param services: The Service resources gathered in the Kubernetes cluster. + """ + # create the association between Pod and Service, if both are defined + if services[ReportKeys.KindDetails.COUNT] > 0 and pods[ReportKeys.KindDetails.COUNT] > 0: + + # iterate over all Services to process the defined selectors + for service_details in services[ITEMS_KEY].values(): + # get the definition for this Service + service: KubernetesResource = service_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # get the selectors + selectors: Dict = service.get_spec_value(KubernetesResource.Keys.SELECTOR) + + # if the Service doesn't define any selectors, continue to the next Service + if selectors is None: + continue + + # loop through all Pods and find any with matching labels + for pod_details in pods[ITEMS_KEY].values(): + # get the definition for this Pod + pod: KubernetesResource = pod_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # loop through the labels defined by the Service selector and make sure all exist on the Pod + for selector_label, selector_value in selectors.items(): + # check if the Pod has the same label/value + if pod.get_label(selector_label) != selector_value: + # if the label doesn't exist or isn't the same value, break the loop + break + else: + # if the loop didn't break, define the relationship + service_relationship: Dict = create_relationship_dict(service.get_kind(), service.get_name()) + + # and add it to the pods relationship list + pod_ext: Dict = pod_details[ReportKeys.ResourceDetails.EXT_DICT] + pod_relationships: List = pod_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + pod_relationships.append(service_relationship) + + +def define_service_to_ingress_relationships(ingress_controller: Text, gathered_resources: Dict) -> None: + """ + Defines the upstream ext.relationship from a Service to the ingress kind supported by + the ingress controller. + + :param ingress_controller: The ingress controller used by the deployment. + :param gathered_resources: The dictionary of resources gathered from the k8s deployment. + """ + services: Dict = gathered_resources[KubernetesResource.Kinds.SERVICE] + + # if no services were gathered, there's nothing to do + if services[ReportKeys.KindDetails.COUNT] > 0: + # get the dictionary of supported controller to kind mappings + ingress_kind_map: Dict[Text, Text] = SupportedIngress.get_ingress_controller_to_kind_map() + + # get the resource kind mapped to the ingress controller + resource_kind: Text = ingress_kind_map.get(ingress_controller, None) + + # if a kind wasn't returned, there's nothing to do + if resource_kind: + # get the dictionary of resources + resources: Optional[Dict] = gathered_resources.get(resource_kind) + + if resources: + # if none of this resource type was gathered, there's nothing to do + if resources[ReportKeys.KindDetails.COUNT] > 0: + #################### + # Contour + #################### + if resource_kind == KubernetesResource.Kinds.CONTOUR_HTTPPROXY: + _define_service_to_contour_httpproxy_relationships(services, resources) + + #################### + # Istio + #################### + elif resource_kind == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE: + _define_service_to_istio_virtual_service_relationships(services, resources) + + #################### + # NGINX + #################### + elif resource_kind == KubernetesResource.Kinds.INGRESS: + _define_service_to_nginx_ingress_relationships(services, resources) + + #################### + # OpenShift + #################### + elif resource_kind == KubernetesResource.Kinds.OPENSHIFT_ROUTE: + _define_service_to_openshift_route_relationships(services, resources) + + +def _define_service_to_contour_httpproxy_relationships(services: Dict, httpproxies: Dict) -> None: + """ + Internal method that defines the upstream ext.relationship from a Service to the Contour HTTPProxy that controls + its in-bound HTTP traffic. + + :param services: The Service resources gathered in the Kubernetes cluster. + :param httpproxies: The HTTPProxy resources gathered in the Kubernetes cluster. + """ + # iterate over all HTTPProxy objects and find for which Services they define paths + for proxy_details in httpproxies[ITEMS_KEY].values(): + # get the definition for the current HTTPProxy + proxy: KubernetesResource = proxy_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # get the routes for this HTTPProxy + routes: List = proxy.get_spec_value(KubernetesResource.Keys.ROUTES) + + if routes: + # iterate over all routes to process all associated services + for route in routes: + # get the services associated with this route + route_services: List = route.get(KubernetesResource.Keys.SERVICES, list()) + + # iterate over the list of services + for route_service in route_services: + # get the service name + service_name: Text = route_service.get(KubernetesResource.Keys.NAME) + + try: + # get the Service associated with this path + service: Dict = services[ITEMS_KEY][service_name] + + # get the current list of service relationships + service_ext: Dict = service[ReportKeys.ResourceDetails.EXT_DICT] + service_relationships: List = service_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + + # make sure this relationship isn't already defined + for rel in service_relationships: + if rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == proxy.get_kind() and \ + rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == proxy.get_name(): + # this relationship already exists, break + break + else: + # the relationship wasn't found, add it + # create the relationship to the HTTPProxy + proxy_relationship: Dict = create_relationship_dict(proxy.get_kind(), proxy.get_name()) + + # and add it to the Service's relationships + service_relationships.append(proxy_relationship) + except KeyError: + # if the Service isn't defined, move on without error + pass + + +def _define_service_to_istio_virtual_service_relationships(services: Dict, virtual_services: Dict) -> None: + """ + Internal method that defines the upstream ext.relationship from a Service to the Istio VirtualService that + controls its traffic. + + :param services: The Service resources gathered in the Kubernetes cluster. + :param virtual_services: The VirtualService resources gathered in the Kubernetes cluster. + """ + # iterate over all VirtualService objects and find which Services they define routes for + for virtual_service_details in virtual_services[ITEMS_KEY].values(): + # get the definition of the current VirtualService + virtual_service: KubernetesResource = virtual_service_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # get the http definitions for this VirtualService + http_definitions: List = virtual_service.get_spec_value(KubernetesResource.Keys.HTTP) + + if http_definitions: + # iterate over all http definitions to process their route definitions + for http_definition in http_definitions: + # get the routes defined + routes: List = http_definition.get(KubernetesResource.Keys.ROUTE) + + if routes: + # iterate over all routes to process their destination hosts + for route in routes: + # get the name of the Service associated with this route + service_name: Text = route[KubernetesResource.Keys.DESTINATION][KubernetesResource.Keys.HOST] + + try: + # get the Service associated with this route + service: Dict = services[ITEMS_KEY][service_name] + + # create the VirtualService relationship + virtual_service_relationship: Dict = create_relationship_dict(virtual_service.get_kind(), + virtual_service.get_name()) + + # and add it to the Service's relationships + service_ext: Dict = service[ReportKeys.ResourceDetails.EXT_DICT] + service_relationships = service_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + service_relationships.append(virtual_service_relationship) + except KeyError: + # if the Service isn't defined, move on without error + pass + else: + # get the tcp definitions for this VirtualService + tcp_definitions: List = virtual_service.get_spec_value(KubernetesResource.Keys.TCP) + + if tcp_definitions: + # iterate over all tcp definitions to process their route definitions + for tcp_definition in tcp_definitions: + # get the routes defined + routes: List = tcp_definition.get(KubernetesResource.Keys.ROUTE) + + if routes: + # iterate over all routes to process their destination hosts + for route in routes: + # get the name of the Service associated with this route # + service_destination: Dict = route[KubernetesResource.Keys.DESTINATION] + service_name: Text = service_destination[KubernetesResource.Keys.HOST] + + # remove any additional address information if given a full address + if "." in service_name: + service_name = service_name[:service_name.find(".")] + + try: + # get the Service associated with this route + service: Dict = services[ITEMS_KEY][service_name] + + # create the VirtualService relationship + virtual_service_relationship: Dict = \ + create_relationship_dict(virtual_service.get_kind(), virtual_service.get_name()) + + # and add it to the Service's relationships + service_ext: Dict = service[ReportKeys.ResourceDetails.EXT_DICT] + svc_relationships: List = service_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + svc_relationships.append(virtual_service_relationship) + except KeyError: + # if the Service isn't defined, move on without error + pass + + +def _define_service_to_nginx_ingress_relationships(services: Dict, ingresses: Dict) -> None: + """ + Internal method that defines the upstream ext.relationship from a Service to an Ingress that controls + its in-bound HTTP traffic. + + :param services: The Service resources gathered in the Kubernetes cluster. + :param ingresses: The Ingress resources gathered in the Kubernetes cluster. + """ + # iterate over all Ingress objects and find for which Services they define paths + for ingress_details in ingresses[ITEMS_KEY].values(): + # get the definition for the current Ingress + ingress: KubernetesResource = ingress_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # get the rules for this Ingress + rules: List = ingress.get_spec_value(KubernetesResource.Keys.RULES) + + # iterate over all rules to process all http paths defined + for rule in rules: + # get the http paths for this rule + http_paths: List = rule[KubernetesResource.Keys.HTTP][KubernetesResource.Keys.PATHS] + + # iterate over all http paths to process each backend defined + for http_path in http_paths: + # init the service name variable + service_name: Text + + # check the api version of this Ingress + if ingress.get_api_version().startswith("networking.k8s.io"): + # get the Service name defined in this api version + path_service: Dict = http_path[KubernetesResource.Keys.BACKEND][KubernetesResource.Keys.SERVICE] + service_name = path_service[KubernetesResource.Keys.NAME] + + # use the old definition schema + else: + # get the Service name for api version + service_name = http_path[KubernetesResource.Keys.BACKEND][KubernetesResource.Keys.SERVICE_NAME] + + try: + # get the Service associated with this path + service: Dict = services[ITEMS_KEY][service_name] + + # create the relationship to the Ingress + ingress_relationship: Dict = create_relationship_dict(ingress.get_kind(), ingress.get_name()) + + # and add it to the Service's relationships + service_ext: Dict = service[ReportKeys.ResourceDetails.EXT_DICT] + service_relationships: List = service_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + service_relationships.append(ingress_relationship) + except KeyError: + # if the Service isn't defined, move on without error + pass + + +def _define_service_to_openshift_route_relationships(services: Dict, routes: Dict) -> None: + """ + Internal method that defines the upstream ext.relationship from a Service to the OpenShift Route that controls + its in-bound HTTP traffic. + + :param services: The Service resources gathered in the Kubernetes cluster. + :param routes: The Route resources gathered in the Kubernetes cluster. + """ + # iterate over all Route objects and find for which Services they define paths + for route_details in routes[ITEMS_KEY].values(): + # get the definition for the current Route + route: KubernetesResource = route_details[ReportKeys.ResourceDetails.RESOURCE_DEFINITION] + + # get the routes for this HTTPProxy + to_service_dict: Dict = route.get_spec_value(KubernetesResource.Keys.TO) + + if to_service_dict: + service_name: Text = to_service_dict.get(KubernetesResource.Keys.NAME, None) + + if service_name: + try: + # get the Service associated with this path + service: Dict = services[ITEMS_KEY][service_name] + + # get the current list of service relationships + service_ext: Dict = service[ReportKeys.ResourceDetails.EXT_DICT] + service_relationships: List = service_ext[ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST] + + # create the relationship to the Route + route_relationship: Dict = create_relationship_dict(route.get_kind(), route.get_name()) + + # and add it to the Service's relationships + service_relationships.append(route_relationship) + except KeyError: + # if the Service isn't defined, move on without error + pass diff --git a/deployment_report/model/utils/resource_util.py b/deployment_report/model/utils/resource_util.py new file mode 100644 index 0000000..d5284a5 --- /dev/null +++ b/deployment_report/model/utils/resource_util.py @@ -0,0 +1,133 @@ +#################################################################### +# ### resource_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from subprocess import CalledProcessError +from typing import Dict, List, Optional, Text + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as Keys +from deployment_report.model.utils import relationship_util + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources, KubernetesResource +from viya_ark_library.k8s.sas_kubectl_interface import KubectlInterface + + +# last applied config annotation +_LAST_APPLIED_CONFIG_ANNOTATION_ = "kubectl.kubernetes.io/last-applied-configuration" + + +def gather_details(kubectl: KubectlInterface, gathered_resources: Dict, + api_resources: KubernetesApiResources, resource_kind: Text) -> None: + """ + Gathers details about resources in the target Kubernetes cluster. + + The method is called recursively and will gather details about any resources described in the current resource's + "ownerReferences", if defined. If all discovered resource kinds are listable, a complete ownership chain will be + gathered. + + :param kubectl: The KubectlInterface object for issuing requests to the target Kubernetes cluster. + :param gathered_resources: The dictionary where gathered resources will be stored. + :param api_resources: The KubernetesApiResources object defining the API resources available in the target + Kubernetes cluster. + :param resource_kind: The 'kind' value of the resources to gather. + """ + # if an attempt has been made to gather this kind, return without moving forward + if resource_kind in gathered_resources: + return + + # get the resource name associated with the kind + resource_name: Text = api_resources.get_name(resource_kind) + + # create a list to hold the resources gathered for this kind + resources: List[KubernetesResource] = list() + + # note whether the resource is available for listing + resource_available: bool = True + + if resource_name: + try: + # get the name associated with the resource kind + resources: Optional[List[KubernetesResource]] = kubectl.get_resources(api_resources.get_name(resource_kind)) + except CalledProcessError as e: + if resource_kind == KubernetesResource.Kinds.POD: + # if a CalledProcessError is raised for pods, surface the error + # if the resource kind is not "Pod", move forward without raising an error since + # pods can still be reported + raise e + else: + # note that this resource was not available + resource_available = False + + # save the resources by kind + gathered_resources[resource_kind]: Dict = dict() + + # create a key to note whether this resource kind was available for listing: bool + gathered_resources[resource_kind][Keys.KindDetails.AVAILABLE]: bool = resource_available + + # create a key to define the number of resources of this kind returned by k8s: int + gathered_resources[resource_kind][Keys.KindDetails.COUNT]: int = len(resources) + + # create a key to hold the resources returned by k8s: dict + gathered_resources[resource_kind][ITEMS_KEY]: bool = dict() + + # store a unique list of kinds in any 'ownerReferences' definitions + owner_kinds: List = list() + + # loop over the resources returned + for resource in resources: + # remove the 'managedFields' key, if it exists, to reduce file size + metadata: Optional[Dict] = resource.get_metadata() + if metadata: + metadata.pop(KubernetesResource.Keys.MANAGED_FIELDS, None) + + # remove the 'kubectl.kubernetes.io/last-applied-configuration' annotation, if it exists, to reduce file size + annotations: Optional[Dict] = resource.get_annotations() + if annotations: + annotations.pop(_LAST_APPLIED_CONFIG_ANNOTATION_, None) + + # add the resource to its kind dictionary + # create a key set to the name of the resource, under which all resource details will be stored: dict + resource_details = gathered_resources[resource_kind][ITEMS_KEY][resource.get_name()] = dict() + + # create a key to hold extra details about the resource not provided in the resource definition: dict + resource_details[Keys.ResourceDetails.EXT_DICT]: Dict = dict() + + # create a key to hold the ext.relationships list, which defines the name and kind of resources + # related to this resource: list + resource_ext: Dict = resource_details[Keys.ResourceDetails.EXT_DICT] + resource_relationships = resource_ext[Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST] = list() + + # create a key under which the resource definition will be stored: KubernetesResource + resource_details[Keys.ResourceDetails.RESOURCE_DEFINITION]: KubernetesResource = resource + + # see if this resource defines any 'ownerReferences' + owner_references: List = resource.get_metadata_value(KubernetesResource.Keys.OWNER_REFERENCES) + + # if the resource does define 'ownerReferences', process them + if owner_references is not None: + # iterate over the references + for owner_reference in owner_references: + # if the owner reference kind isn't in the owner_kinds list, add it + if owner_reference[KubernetesResource.Keys.KIND] not in owner_kinds: + owner_kinds.append(owner_reference[KubernetesResource.Keys.KIND]) + + # create a relationship for the owning object + owner_kind = owner_reference[KubernetesResource.Keys.KIND] + owner_name = owner_reference[KubernetesResource.Keys.NAME] + relationship: Dict = relationship_util.create_relationship_dict(owner_kind, owner_name) + + # and add it to the relationship list + resource_relationships.append(relationship) + + # if more kinds have been discovered, gather them as well # + for owner_kind in owner_kinds: + gather_details(kubectl, gathered_resources, api_resources, owner_kind) diff --git a/deployment_report/model/utils/test/conftest.py b/deployment_report/model/utils/test/conftest.py new file mode 100644 index 0000000..fad2f64 --- /dev/null +++ b/deployment_report/model/utils/test/conftest.py @@ -0,0 +1,186 @@ +#################################################################### +# ### conftest.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +import pytest + +from typing import Dict + +from deployment_report.model.utils import resource_util + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources +from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest + + +#################################################################### +# Unit Test Fixtures ### +#################################################################### +@pytest.fixture(scope="module") +def gathered_resources_no_ingress() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources including all supported ingress kinds, with Contour being + the ingress controller used. + """ + return _gathered_resources(KubectlTest.IngressSimulator.NONE) + + +@pytest.fixture(scope="module") +def gathered_resources_all_ingress_defined_contour_used() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources including all supported ingress kinds, with Contour being + the ingress controller used. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ALL_CONTOUR_USED) + + +@pytest.fixture(scope="module") +def gathered_resources_all_ingress_defined_istio_used() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources including all supported ingress kinds, with Istio being + the ingress controller used. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ALL_ISTIO_USED) + + +@pytest.fixture(scope="module") +def gathered_resources_all_ingress_defined_nginx_used() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources including all supported ingress kinds, with NGINX being + the ingress controller used. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ALL_NGINX_USED) + + +@pytest.fixture(scope="module") +def gathered_resources_all_ingress_defined_openshift_used() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources including all supported ingress kinds, with OpenShift being + the ingress controller used. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ALL_OPENSHIFT_USED) + + +@pytest.fixture(scope="module") +def gathered_resources_only_contour() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources with only Contour ingress kinds defined. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ONLY_CONTOUR) + + +@pytest.fixture(scope="module") +def gathered_resources_only_istio() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources with only Istio ingress kinds defined. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ONLY_ISTIO) + + +@pytest.fixture(scope="module") +def gathered_resources_only_nginx() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources with only NGINX ingress kinds defined. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ONLY_NGINX) + + +@pytest.fixture(scope="module") +def gathered_resources_only_openshift() -> Dict: + """ + This fixture creates a KubectlTest object, collects the available api-resources and calls + ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. + Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to + allow for testing of the methods that define them. + + This method is run once before tests in this file are executed. All tests use the same instance. + + :return: A dictionary of all gathered resources with only OpenShift ingress kinds defined. + """ + return _gathered_resources(KubectlTest.IngressSimulator.ONLY_OPENSHIFT) + + +def _gathered_resources(ingress_simulator: KubectlTest.IngressSimulator) -> Dict: + """ + Internal helper method for creating a gathered_resources dict based on + a provided ingress simulation. + + :param ingress_simulator: The ingress simulation to use. + """ + # set up kubectl and get API resources + kubectl: KubectlTest = KubectlTest(ingress_simulator=ingress_simulator) + api_resources: KubernetesApiResources = kubectl.api_resources() + + # set up dict to hold gathered resources + gathered_resources: Dict = dict() + + for resource_kind in api_resources.as_dict().keys(): + resource_util.gather_details( + kubectl=kubectl, + gathered_resources=gathered_resources, + api_resources=api_resources, + resource_kind=resource_kind) + + return gathered_resources diff --git a/deployment_report/model/utils/test/test_component_util.py b/deployment_report/model/utils/test/test_component_util.py new file mode 100644 index 0000000..cd908c6 --- /dev/null +++ b/deployment_report/model/utils/test/test_component_util.py @@ -0,0 +1,722 @@ +#################################################################### +# ### test_component_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +import copy +import pytest + +from typing import Dict + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + NAME_KEY +from deployment_report.model.utils import \ + component_util, \ + relationship_util + +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource +from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest + +# create aliases to KubectlTest classes +TestVals: KubectlTest.Values = KubectlTest.Values() + + +#################################################################### +# Unit Tests ### +#################################################################### +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_aggregate_resources_prometheus(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources which comprise the prometheus component are correctly aggregated to create + the component definition. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # define pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the prometheus pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][TestVals.COMPONENT_PROMETHEUS_POD_NAME] + + # aggregate the resources in the prometheus component + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_PROMETHEUS_NAME + + # make sure the correct number of resources were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_PROMETHEUS_RESOURCE_COUNT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_PROMETHEUS_RESOURCES_DICT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_no_ingress") +def test_aggregate_resources_sas_annotations_no_ingress(gathered_resources_no_ingress: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when no ingress is found. + + :param gathered_resources_no_ingress: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_no_ingress) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_NO_INGRESS + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NO_INGRESS.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_contour_used") +def test_aggregate_resources_sas_annotations_all_ingress_defined_contour_used( + gathered_resources_all_ingress_defined_contour_used: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when all ingress kinds are present but Contour is providing ingress control. + + :param gathered_resources_all_ingress_defined_contour_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_contour_used) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_CONTOUR + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_CONTOUR.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_istio_used") +def test_aggregate_resources_sas_annotations_all_istio_used(gathered_resources_all_ingress_defined_istio_used: Dict) \ + -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when all ingress kinds are defined but Istio is providing ingress control. + + :param gathered_resources_all_ingress_defined_istio_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_istio_used) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_ISTIO + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_ISTIO.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_nginx_used") +def test_aggregate_resources_sas_annotations_all_nginx_used(gathered_resources_all_ingress_defined_nginx_used: Dict) \ + -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when all ingress kinds are defined but NGINX is providing ingress control. + + :param gathered_resources_all_ingress_defined_nginx_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_nginx_used) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_NGINX + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NGINX.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_openshift_used") +def test_aggregate_resources_sas_annotations_all_openshift_used( + gathered_resources_all_ingress_defined_openshift_used: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when all ingress kinds are defined but OpenShift is providing ingress control. + + :param gathered_resources_all_ingress_defined_openshift_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_openshift_used) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_OPENSHIFT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_OPENSHIFT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_contour") +def test_aggregate_resources_sas_annotations_only_contour(gathered_resources_only_contour: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when only Contour kinds are found. + + :param gathered_resources_only_contour: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_contour) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_CONTOUR + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_CONTOUR.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_istio") +def test_aggregate_resources_sas_annotations_only_istio(gathered_resources_only_istio: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when only Istio kinds are found. + + :param gathered_resources_only_istio: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_istio) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_ISTIO + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_ISTIO.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_aggregate_resources_sas_annotations_only_nginx(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when only NGINX kinds are found. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_NGINX + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NGINX.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_openshift") +def test_aggregate_resources_sas_annotations_only_openshift(gathered_resources_only_openshift: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to + create the component definition when only OpenShift kinds are found. + + :param gathered_resources_only_openshift: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_openshift) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # define the service to ingress relationships to allow for aggregation + for controller in SupportedIngress.get_ingress_controller_to_kind_map().keys(): + relationship_util.define_service_to_ingress_relationships( + ingress_controller=controller, + gathered_resources=gathered_resources_copy + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-annotations pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # aggregate the sas-annotations resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_OPENSHIFT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_OPENSHIFT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_aggregate_resources_sas_cache_server(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-cacheserver component are correctly aggregated to + create the component definition. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-cacheserver pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_CACHE_SERVER_POD_NAME] + + # aggregate the resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_CACHE_SERVER_NAME + + # make sure the right number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_CACHE_SERVER_RESOURCE_COUNT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_CACHE_SERVER_RESOURCE_DICT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_aggregate_resources_sas_cas_operator(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-cas-operator component are correctly aggregated to + create the component definition. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-cas-operator pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_CAS_OPERATOR_POD_NAME] + + # aggregate the resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_CAS_OPERATOR_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_CAS_OPERATOR_RESOURCE_COUNT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_CAS_OPERATOR_RESOURCE_DICT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_aggregate_resources_sas_cas_server(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-cas-server component are correctly aggregated to + create the component definition. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # define the pod to service relationships to allow for aggregation + relationship_util.define_pod_to_service_relationships( + pods=gathered_resources_copy[KubernetesResource.Kinds.POD], + services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-cas-server pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_CAS_SERVER_POD_NAME] + + # aggregate the resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_CAS_SERVER_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_CAS_SERVER_RESOURCE_COUNT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_CAS_SERVER_RESOURCE_DICT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_aggregate_resources_sas_scheduled_backup_job(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources which comprise the sas-scheduled-backup-job component are correctly aggregated + to create the component definition. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # create a dictionary to hold the component + component: Dict = dict() + + # get the sas-scheduled-backup-job pod + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_POD_NAME] + + # aggregate the resources + component_util.aggregate_resources( + resource_details=pod, + gathered_resources=gathered_resources_copy, + component=component + ) + + # make sure the component name is correct + assert component[NAME_KEY] == TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_NAME + + # make sure the correct number of resource types were aggregated + assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_RESOURCE_COUNT + + # make sure the all resources are accounted for + for kind, name_list in TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_RESOURCE_DICT.items(): + assert kind in component[ITEMS_KEY] + assert len(component[ITEMS_KEY][kind]) == len(name_list) + for name in name_list: + assert name in component[ITEMS_KEY][kind] diff --git a/deployment_report/model/utils/test/test_config_util.py b/deployment_report/model/utils/test/test_config_util.py new file mode 100644 index 0000000..213391e --- /dev/null +++ b/deployment_report/model/utils/test/test_config_util.py @@ -0,0 +1,56 @@ +#################################################################### +# ### test_config_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, Optional, Text + +from deployment_report.model.utils import config_util + +from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest + + +#################################################################### +# Unit Tests ### +#################################################################### +def test_get_cadence_version() -> None: + """ + This test verifies that the provided cadence data is returned when values is passed to get_cadence_version(). + """ + # check for expected attributes + kubectl: KubectlTest = KubectlTest() + + cadence_data = kubectl.get_resources("ConfigMaps") + cadence_info: Optional[Text] = None + + for c in cadence_data: + cadence_info = config_util.get_cadence_version(c) + if cadence_info: + break + + assert cadence_info == KubectlTest.Values.CADENCEINFO + + +def test_get_db_info() -> None: + """ + This test verifies that the provided db data is returned when values is passed to get_db_info(). + """ + # check for expected attributes + # check for expected attributes + kubectl: KubectlTest = KubectlTest() + + db_data = kubectl.get_resources("ConfigMaps") + db_dict: Dict = dict() + + for c in db_data: + db_dict = config_util.get_db_info(c) + if db_dict: + break + + assert db_dict["Type"] == KubectlTest.Values.DBINFO diff --git a/deployment_report/model/utils/test/test_ingress_utils.py b/deployment_report/model/utils/test/test_ingress_utils.py new file mode 100644 index 0000000..44a37fa --- /dev/null +++ b/deployment_report/model/utils/test/test_ingress_utils.py @@ -0,0 +1,250 @@ +#################################################################### +# ### test_ingress_utils.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +import pytest + +from typing import Dict, Optional, Text + +from deployment_report.model.utils import ingress_util + +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +#################################################################### +# Unit Tests ### +#################################################################### +@pytest.mark.usefixtures("gathered_resources_no_ingress") +def test_determine_ingress_controller_no_ingress(gathered_resources_no_ingress: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when no ingress kinds are available. + + :param gathered_resources_no_ingress: test fixture + """ + controller: Optional[Text] = ingress_util.determine_ingress_controller(gathered_resources_no_ingress) + + assert controller is None + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_contour_used") +def test_determine_ingress_controller_all_ingress_defined_contour_used( + gathered_resources_all_ingress_defined_contour_used: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when all ingress kinds are available in the + cluster but Contour is controlling ingress. + + :param gathered_resources_all_ingress_defined_contour_used: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_all_ingress_defined_contour_used) + + assert controller == SupportedIngress.Controllers.CONTOUR + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_istio_used") +def test_determine_ingress_controller_all_ingress_defined_istio_used( + gathered_resources_all_ingress_defined_istio_used: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when all ingress kinds are available in the + cluster but Istio is controlling ingress. + + :param gathered_resources_all_ingress_defined_istio_used: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_all_ingress_defined_istio_used) + + assert controller == SupportedIngress.Controllers.ISTIO + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_nginx_used") +def test_determine_ingress_controller_all_ingress_defined_nginx_used( + gathered_resources_all_ingress_defined_nginx_used: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when all ingress kinds are available in the + cluster but NGINX is controlling ingress. + + :param gathered_resources_all_ingress_defined_nginx_used: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_all_ingress_defined_nginx_used) + + assert controller == SupportedIngress.Controllers.NGINX + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_openshift_used") +def test_determine_ingress_controller_all_ingress_defined_openshift_used( + gathered_resources_all_ingress_defined_openshift_used: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when all ingress kinds are available in the + cluster but OpenShift is controlling ingress. + + :param gathered_resources_all_ingress_defined_openshift_used: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_all_ingress_defined_openshift_used) + + assert controller == SupportedIngress.Controllers.OPENSHIFT + + +@pytest.mark.usefixtures("gathered_resources_only_contour") +def test_determine_ingress_controller_only_contour(gathered_resources_only_contour: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when only Contour ingress kinds are + available. + + :param gathered_resources_only_contour: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_only_contour) + + assert controller == SupportedIngress.Controllers.CONTOUR + + +@pytest.mark.usefixtures("gathered_resources_only_istio") +def test_determine_ingress_controller_only_istio(gathered_resources_only_istio: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when only Istio ingress kinds are + available. + + :param gathered_resources_only_istio: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_only_istio) + + assert controller == SupportedIngress.Controllers.ISTIO + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_determine_ingress_controller_only_nginx(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when only NGINX ingress kinds are + available. + + :param gathered_resources_only_nginx: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_only_nginx) + + assert controller == SupportedIngress.Controllers.NGINX + + +@pytest.mark.usefixtures("gathered_resources_only_openshift") +def test_determine_ingress_controller_only_openshift(gathered_resources_only_openshift: Dict) -> None: + """ + This test verifies that the ingress controller is correctly determined when only OpenShift ingress kinds are + available. + + :param gathered_resources_only_openshift: test fixture + """ + controller: Optional[Text] = \ + ingress_util.determine_ingress_controller(gathered_resources_only_openshift) + + assert controller == SupportedIngress.Controllers.OPENSHIFT + + +def test_ignorable_for_controller_if_unavailable_contour() -> None: + """ + This test verifies that the correct value is returned if the kind can be ignored when ingress is controlled by + Contour. + """ + # ignorable + # Ingress + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.CONTOUR, + KubernetesResource.Kinds.INGRESS) + # Route + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.CONTOUR, + KubernetesResource.Kinds.OPENSHIFT_ROUTE) + # VirtualService + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.CONTOUR, + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE) + + # not ignorable + # Pod + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.CONTOUR, + KubernetesResource.Kinds.POD) + # HTTPProxy + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.CONTOUR, + KubernetesResource.Kinds.CONTOUR_HTTPPROXY) + + +def test_ignorable_for_controller_if_unavailable_istio() -> None: + """ + This test verifies that the correct value is returned if the kind can be ignored when ingress is controlled by + Istio. + """ + # ignorable + # HTTPProxy + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.ISTIO, + KubernetesResource.Kinds.CONTOUR_HTTPPROXY) + # Ingress + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.ISTIO, + KubernetesResource.Kinds.INGRESS) + # Route + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.ISTIO, + KubernetesResource.Kinds.OPENSHIFT_ROUTE) + + # not ignorable + # Pod + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.ISTIO, + KubernetesResource.Kinds.POD) + # VirtualService + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.ISTIO, + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE) + + +def test_ignorable_for_controller_if_unavailable_nginx() -> None: + """ + This test verifies that the correct value is returned if the kind can be ignored when ingress is controlled by + NGINX. + """ + # ignorable + # HTTPProxy + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.NGINX, + KubernetesResource.Kinds.CONTOUR_HTTPPROXY) + # Route + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.NGINX, + KubernetesResource.Kinds.OPENSHIFT_ROUTE) + # VirtualService + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.NGINX, + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE) + + # not ignorable + # Pod + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.NGINX, + KubernetesResource.Kinds.POD) + # Ingress + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.NGINX, + KubernetesResource.Kinds.INGRESS) + + +def test_ignorable_for_controller_if_unavailable_openshift() -> None: + """ + This test verifies that the correct value is returned if the kind can be ignored when ingress is controlled by + OpenShift. + """ + # ignorable + # HTTPProxy + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.OPENSHIFT, + KubernetesResource.Kinds.CONTOUR_HTTPPROXY) + # Ingress + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.OPENSHIFT, + KubernetesResource.Kinds.INGRESS) + # VirtualService + assert ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.OPENSHIFT, + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE) + + # not ignorable + # Pod + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.OPENSHIFT, + KubernetesResource.Kinds.POD) + # Route + assert not ingress_util.ignorable_for_controller_if_unavailable(SupportedIngress.Controllers.OPENSHIFT, + KubernetesResource.Kinds.OPENSHIFT_ROUTE) diff --git a/deployment_report/model/utils/test/test_metrics_util.py b/deployment_report/model/utils/test/test_metrics_util.py new file mode 100644 index 0000000..c9ac354 --- /dev/null +++ b/deployment_report/model/utils/test/test_metrics_util.py @@ -0,0 +1,110 @@ +#################################################################### +# ### test_metrics_utils.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +import copy +import pytest + +from typing import Dict + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as ReportKeys +from deployment_report.model.utils import metrics_util + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource +from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest + + +#################################################################### +# Unit Tests ### +#################################################################### +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_get_pod_metrics(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that pod metrics are correctly defined per pod when metrics are available. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # get the list of all pods + pods: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD] + + # add the metrics + metrics_util.get_pod_metrics(kubectl=KubectlTest(), pods=pods) + + # verify that the metrics were added + for pod in pods[ITEMS_KEY].values(): + assert ReportKeys.ResourceDetails.Ext.METRICS_DICT in pod[ReportKeys.ResourceDetails.EXT_DICT] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_get_pod_metrics_unavailable(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that no errors are raised when pod metrics are not available. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # get the list of all pods + pods: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD] + + # try to add the metrics + metrics_util.get_pod_metrics(kubectl=KubectlTest(include_metrics=False), pods=pods) + + # make sure the metrics dictionary was not added + for pod in pods[ITEMS_KEY].values(): + assert ReportKeys.ResourceDetails.Ext.METRICS_DICT not in pod[ReportKeys.ResourceDetails.EXT_DICT] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_get_node_metrics(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that node metrics are correctly defined per node when metrics are available. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # get the list of all nodes + nodes: Dict = gathered_resources_copy[KubernetesResource.Kinds.NODE] + + # add the metrics + metrics_util.get_node_metrics(kubectl=KubectlTest(), nodes=nodes) + + # make sure the metrics were added + for node in nodes[ITEMS_KEY].values(): + assert ReportKeys.ResourceDetails.Ext.METRICS_DICT in node[ReportKeys.ResourceDetails.EXT_DICT] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_get_node_metrics_unavailable(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that no errors are raised when pod metrics are not available. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # get the list of all nodes + nodes: Dict = gathered_resources_copy[KubernetesResource.Kinds.NODE] + + # try to add the metrics + metrics_util.get_node_metrics(kubectl=KubectlTest(include_metrics=False), nodes=nodes) + + # make sure the metrics dictionary was not added + for node in nodes[ITEMS_KEY].values(): + assert ReportKeys.ResourceDetails.Ext.METRICS_DICT not in node[ReportKeys.ResourceDetails.EXT_DICT] \ No newline at end of file diff --git a/deployment_report/model/utils/test/test_relationship_util.py b/deployment_report/model/utils/test/test_relationship_util.py new file mode 100644 index 0000000..06846c8 --- /dev/null +++ b/deployment_report/model/utils/test/test_relationship_util.py @@ -0,0 +1,355 @@ +#################################################################### +# ### test_relationship_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +import copy +import pytest + +from typing import Dict, List + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as ReportKeys +from deployment_report.model.utils import relationship_util + +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource +from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest + +# create an alias to the test values +TestVals: KubectlTest.Values = KubectlTest.Values() + + +#################################################################### +# Unit Tests ### +#################################################################### +def test_create_relationship_dict() -> None: + """ + This test verifies that a relationship dictionary is correctly created. + """ + # create dictionary + rel: Dict = relationship_util.create_relationship_dict(KubernetesResource.Kinds.SERVICE, "foo") + + # make sure attributes are correct + assert isinstance(rel, dict) + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == "foo" + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_define_node_to_pod_relationships(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that the relationship between Nodes and Pods is correctly defined. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # call utils to define Node to Pod relationship + relationship_util.define_node_to_pod_relationships( + gathered_resources_copy[KubernetesResource.Kinds.NODE], + gathered_resources_copy[KubernetesResource.Kinds.POD] + ) + + # get the node + node: Dict = gathered_resources_copy[KubernetesResource.Kinds.NODE][ITEMS_KEY][TestVals.RESOURCE_NODE_1_NAME] + + # make sure the relationships were defined and all exist + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in node[ReportKeys.ResourceDetails.EXT_DICT] + assert len(node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 6 + + # get the relationship + rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_PROMETHEUS_POD_NAME + + # get the relationship + rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][1] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME + + # get the relationship + rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][2] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CACHE_SERVER_POD_NAME + + # get the relationship + rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][3] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_OPERATOR_POD_NAME + + # get the relationship + rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][4] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_SERVER_POD_NAME + + # get the relationship + rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][5] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_POD_NAME + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_define_pod_to_service_relationships(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that the relationship between and pod and a service is correctly defined. + + :param gathered_resources_only_nginx: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_only_nginx) + + # reset the relationship lists for pods to make the service relationships easier to verify + for pod in gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY].values(): + pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]: List = list() + + # call utils to define Pod to Service relationship + relationship_util.define_pod_to_service_relationships( + gathered_resources_copy[KubernetesResource.Kinds.POD], + gathered_resources_copy[KubernetesResource.Kinds.SERVICE] + ) + + # prometheus pod + + # get the Pod resource + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][TestVals.COMPONENT_PROMETHEUS_POD_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] + assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_PROMETHEUS_SERVICE_NAME + + # sas-annotations pod + + # get the Pod resource + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] + assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME + + # sas-cacheserver pod + + # get the Pod resource + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_CACHE_SERVER_POD_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] + assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CACHE_SERVER_SERVICE_NAME + + # sas-cas-operator pod + + # get the Pod resource + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_CAS_OPERATOR_POD_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] + assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_OPERATOR_SERVICE_NAME + + # sas-cas-server pod + + # get the Pod resource + pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ + TestVals.COMPONENT_SAS_CAS_SERVER_POD_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] + assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 2 + + # get the relationship + rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_SERVER_SERVICE_NAME + + # get the relationship + rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][1] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_SERVER_EXTNP_SERVICE_NAME + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_contour_used") +def test_define_service_to_ingress_relationships_contour(gathered_resources_all_ingress_defined_contour_used: Dict) \ + -> None: + """ + This test verifies that the service to httpproxy relationships are correctly defined. + + :param gathered_resources_all_ingress_defined_contour_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_contour_used) + + # call utils to define Service to HTTPProxy relationship + relationship_util.define_service_to_ingress_relationships(SupportedIngress.Controllers.CONTOUR, + gathered_resources_copy) + + # get the Service resource + service: Dict = gathered_resources_copy[KubernetesResource.Kinds.SERVICE][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in service[ReportKeys.ResourceDetails.EXT_DICT] + assert len(service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.CONTOUR_HTTPPROXY + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == \ + TestVals.COMPONENT_SAS_ANNOTATIONS_CONTOUR_HTTPPROXY_NAME + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_istio_used") +def test_define_service_to_ingress_relationships_istio(gathered_resources_all_ingress_defined_istio_used: Dict) \ + -> None: + """ + This test verifies that the service to virtual service relationships are correctly defined. + + :param gathered_resources_all_ingress_defined_istio_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_istio_used) + + # call utils to define Service to VirtualService relationship + relationship_util.define_service_to_ingress_relationships(SupportedIngress.Controllers.ISTIO, + gathered_resources_copy) + + # get the Service resource + service: Dict = gathered_resources_copy[KubernetesResource.Kinds.SERVICE][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in service[ReportKeys.ResourceDetails.EXT_DICT] + assert len(service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == \ + TestVals.COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_nginx_used") +def test_define_service_to_ingress_relationships_nginx(gathered_resources_all_ingress_defined_nginx_used: Dict) -> None: + """ + This test verifies that the relationships between Services and Ingress objects are correctly defined. + + :param gathered_resources_all_ingress_defined_nginx_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_nginx_used) + + # call utils to define Service to Ingress relationships for nginx + relationship_util.define_service_to_ingress_relationships(SupportedIngress.Controllers.NGINX, + gathered_resources_copy) + + # get the Service details + service: Dict = gathered_resources_copy[KubernetesResource.Kinds.SERVICE][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] + + # makes sure relationship is defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in service[ReportKeys.ResourceDetails.EXT_DICT] + assert len(service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 2 + + # get the first relationship + rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.INGRESS + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == \ + TestVals.COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME_DEPRECATED_DEFINITION + + # get the second relationship + rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][1] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.INGRESS + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_openshift_used") +def test_define_service_to_ingress_relationships_openshift( + gathered_resources_all_ingress_defined_openshift_used: Dict) -> None: + """ + This test verifies that the service to route relationships are correctly defined. + + :param gathered_resources_all_ingress_defined_openshift_used: test fixture + """ + # copy the gathered_resources dict so it won't be altered for other tests + gathered_resources_copy: Dict = copy.deepcopy(gathered_resources_all_ingress_defined_openshift_used) + + # call utils to define Service to Route relationship + relationship_util.define_service_to_ingress_relationships(SupportedIngress.Controllers.OPENSHIFT, + gathered_resources_copy) + + # get the Service resource + service: Dict = gathered_resources_copy[KubernetesResource.Kinds.SERVICE][ITEMS_KEY][ + TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] + + # make sure the relationship was defined and exists + assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in service[ReportKeys.ResourceDetails.EXT_DICT] + assert len(service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + # get the relationship + rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] + + # make sure the relationship attributes are correct + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.OPENSHIFT_ROUTE + assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == \ + TestVals.COMPONENT_SAS_ANNOTATIONS_OPENSHIFT_ROUTE_NAME diff --git a/deployment_report/model/utils/test/test_resource_util.py b/deployment_report/model/utils/test/test_resource_util.py new file mode 100644 index 0000000..ccbefbd --- /dev/null +++ b/deployment_report/model/utils/test/test_resource_util.py @@ -0,0 +1,438 @@ +#################################################################### +# ### test_resource_util.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +import pytest + +from typing import Dict + +from deployment_report.model.static.viya_deployment_report_keys import \ + ITEMS_KEY, \ + ViyaDeploymentReportKeys as ReportKeys + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource +from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest + +# alias to KubectlTest.Values to shorten name +TestVals: KubectlTest.Values = KubectlTest.Values() + + +#################################################################### +# Unit Tests ### +#################################################################### +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_nginx_used") +def test_gather_details_all_resources(gathered_resources_all_ingress_defined_nginx_used: Dict) -> None: + """ + This test verifies that all resources gathered during the execution of the gathered_resources fixture are present. + The gathered_resources fixture includes Pods, which are the smallest unit, so Pods as well as all owning object, + nodes, and networking resources should be gathered. + + :param gathered_resources_all_ingress_defined_nginx_used: test fixture + """ + # make sure the correct number of resources categories are defined + assert len(gathered_resources_all_ingress_defined_nginx_used) == TestVals.RESOURCE_LIST_ALL_COUNT + + for kind in TestVals.RESOURCE_LIST_ALL: + assert kind in gathered_resources_all_ingress_defined_nginx_used + assert gathered_resources_all_ingress_defined_nginx_used[kind][ReportKeys.KindDetails.AVAILABLE] is True + + +@pytest.mark.usefixtures("gathered_resources_only_contour") +def test_gather_details_only_contour(gathered_resources_only_contour: Dict) -> None: + """ + This test verifies that all resources gathered during the execution of the gathered_resources fixture are present. + The gathered_resources fixture includes Pods, which are the smallest unit, so Pods as well as all owning object, + nodes, and networking resources should be gathered. + + :param gathered_resources_only_contour: test fixture + """ + # make sure the correct number of resources categories are defined + assert len(gathered_resources_only_contour) == TestVals.RESOURCE_LIST_CONTOUR_COUNT + + for kind in TestVals.RESOURCE_LIST_CONTOUR: + assert kind in gathered_resources_only_contour + assert gathered_resources_only_contour[kind][ReportKeys.KindDetails.AVAILABLE] is True + + +@pytest.mark.usefixtures("gathered_resources_only_istio") +def test_gather_details_only_istio(gathered_resources_only_istio: Dict) -> None: + """ + This test verifies that all resources gathered during the execution of the gathered_resources fixture are present. + The gathered_resources fixture includes Pods, which are the smallest unit, so Pods as well as all owning object, + nodes, and networking resources should be gathered. + + :param gathered_resources_only_istio: test fixture + """ + # make sure the correct number of resources categories are defined + assert len(gathered_resources_only_istio) == TestVals.RESOURCE_LIST_ISTIO_COUNT + + for kind in TestVals.RESOURCE_LIST_ISTIO: + assert kind in gathered_resources_only_istio + assert gathered_resources_only_istio[kind][ReportKeys.KindDetails.AVAILABLE] is True + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_only_nginx(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that all resources gathered during the execution of the gathered_resources fixture are present. + The gathered_resources fixture includes Pods, which are the smallest unit, so Pods as well as all owning object, + nodes, and networking resources should be gathered. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the correct number of resources categories are defined + assert len(gathered_resources_only_nginx) == TestVals.RESOURCE_LIST_NGINX_COUNT + + for kind in TestVals.RESOURCE_LIST_NGINX: + assert kind in gathered_resources_only_nginx + assert gathered_resources_only_nginx[kind][ReportKeys.KindDetails.AVAILABLE] is True + + +@pytest.mark.usefixtures("gathered_resources_only_openshift") +def test_gather_details_only_openshift(gathered_resources_only_openshift) -> None: + """ + This test verifies that all resources gathered during the execution of the gathered_resources fixture are present. + The gathered_resources fixture includes Pods, which are the smallest unit, so Pods as well as all owning object, + nodes, and networking resources should be gathered. + + :param gathered_resources_only_openshift: test fixture + """ + # make sure the correct number of resources categories are defined + assert len(gathered_resources_only_openshift) == TestVals.RESOURCE_LIST_OPENSHIFT_COUNT + + for kind in TestVals.RESOURCE_LIST_OPENSHIFT: + assert kind in gathered_resources_only_openshift + assert gathered_resources_only_openshift[kind][ReportKeys.KindDetails.AVAILABLE] is True + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_cas_deployments(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about CASDeployment resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the CASDeployment count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.CAS_DEPLOYMENT][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_CAS_DEPLOYMENT_COUNT + + for name in TestVals.RESOURCE_CAS_DEPLOYMENT_LIST: + # make sure the expected CASDeployment is available by name + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.CAS_DEPLOYMENT][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_cron_jobs(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about CronJob resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the CronJob count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.CRON_JOB][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_CRON_JOB_COUNT + + for name in TestVals.RESOURCE_CRON_JOB_LIST: + # make sure the expected CronJob is available by name + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.CRON_JOB][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_deployments(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about Deployment resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the Deployment resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.DEPLOYMENT][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_DEPLOYMENT_COUNT + + for name in TestVals.RESOURCE_DEPLOYMENT_LIST: + # make sure the expected Deployment is available by name + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.DEPLOYMENT][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_contour_used") +def test_gather_details_httpproxy_all_contour_used(gathered_resources_all_ingress_defined_contour_used: Dict) -> None: + """ + This test verifies that expected details about HTTPProxy resources are in the gathered details. + + :param gathered_resources_all_ingress_defined_contour_used: test fixture + """ + # make sure the HTTPProxy resource count is correct + assert gathered_resources_all_ingress_defined_contour_used[KubernetesResource.Kinds.CONTOUR_HTTPPROXY][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_HTTPPROXY_COUNT + + for name in TestVals.RESOURCE_HTTPPROXY_LIST: + # make sure the expected HTTPProxy is available by name + # relationships to Service objects have not yet been made + assert name in gathered_resources_all_ingress_defined_contour_used[KubernetesResource.Kinds.CONTOUR_HTTPPROXY][ + ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_contour") +def test_gather_details_httpproxy_only_contour(gathered_resources_only_contour: Dict) -> None: + """ + This test verifies that expected details about HTTPProxy resources are in the gathered details. + + :param gathered_resources_only_contour: test fixture + """ + # make sure the HTTPProxy resource count is correct + assert gathered_resources_only_contour[KubernetesResource.Kinds.CONTOUR_HTTPPROXY][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_HTTPPROXY_COUNT + + for name in TestVals.RESOURCE_HTTPPROXY_LIST: + # make sure the expected HTTPProxy is available by name + # relationships to Service objects have not yet been made + assert name in gathered_resources_only_contour[KubernetesResource.Kinds.CONTOUR_HTTPPROXY][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_no_ingress") +def test_gather_details_httpproxy_no_ingress(gathered_resources_no_ingress: Dict) -> None: + """ + This test verifies that no details are gathered about HTTPProxy resources if no ingress kinds are found. + + :param gathered_resources_no_ingress: test fixture + """ + assert KubernetesResource.Kinds.CONTOUR_HTTPPROXY not in gathered_resources_no_ingress + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_nginx_used") +def test_gather_details_ingresses_all_nginx_used(gathered_resources_all_ingress_defined_nginx_used: Dict) \ + -> None: + """ + This test verifies that expected details about Ingress resources are in the gathered details. + + :param gathered_resources_all_ingress_defined_nginx_used: test fixture. + """ + # make sure the Ingress resource count is correct + assert gathered_resources_all_ingress_defined_nginx_used[KubernetesResource.Kinds.INGRESS][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_INGRESS_COUNT + + for name in TestVals.RESOURCE_INGRESS_LIST: + # make sure the expected Deployment is available by name + # relationships have not yet been defined between Ingress and Service objects + assert name in gathered_resources_all_ingress_defined_nginx_used[KubernetesResource.Kinds.INGRESS][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_ingresses_only_nginx(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about Ingress resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the Ingress resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.INGRESS][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_INGRESS_COUNT + + for name in TestVals.RESOURCE_INGRESS_LIST: + # make sure the expected Deployment is available by name + # relationships have not yet been defined between Ingress and Service objects + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.INGRESS][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_no_ingress") +def test_gather_details_ingresses_no_ingress(gathered_resources_no_ingress: Dict) -> None: + """ + This test verifies that no details are gathered about Ingress resources if no ingress kinds are found. + + :param gathered_resources_no_ingress: test fixture + """ + assert KubernetesResource.Kinds.INGRESS not in gathered_resources_no_ingress + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_jobs(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about Job resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the Job resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.JOB][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_JOB_COUNT + + for name in TestVals.RESOURCE_JOB_LIST: + # make sure the expected Job is available by name + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.JOB][ITEMS_KEY] + + # the Job object is owned by the CronJob object, make sure it has an ext.relationships definition with 1 + # relationship + assert len(gathered_resources_only_nginx[KubernetesResource.Kinds.JOB][ITEMS_KEY][name][ + ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_pods(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about Pod resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the Pod resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.POD][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_POD_COUNT + + for name in TestVals.RESOURCE_POD_LIST: + # make sure the expected Pod is available by name + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.POD][ITEMS_KEY] + + # the Pod object is owned by controller objects, make sure it has an ext.relationships definition with 1 + # relationship + assert len(gathered_resources_only_nginx[KubernetesResource.Kinds.POD][ITEMS_KEY][name][ + ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_replica_sets(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about ReplicaSet resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the ReplicaSet resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.REPLICA_SET][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_REPLICA_SET_COUNT + + for name in TestVals.RESOURCE_REPLICA_SET_LIST: + # make sure the expected ReplicaSet is available by name + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.REPLICA_SET][ITEMS_KEY] + + # the ReplicaSet object is owned Deployment objects, make sure it has an ext.relationships definition with 1 + # relationship + assert len(gathered_resources_only_nginx[KubernetesResource.Kinds.REPLICA_SET][ITEMS_KEY][name][ + ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_openshift_used") +def test_gather_details_route_all_openshift_used(gathered_resources_all_ingress_defined_openshift_used: Dict) -> None: + """ + This test verifies that expected details about Route resources are in the gathered details. + + :param gathered_resources_all_ingress_defined_openshift_used: test fixture + """ + # make sure the Route resource count is correct + assert gathered_resources_all_ingress_defined_openshift_used[KubernetesResource.Kinds.OPENSHIFT_ROUTE][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_ROUTE_COUNT + + for name in TestVals.RESOURCE_ROUTE_LIST: + # make sure the expected Route is available by name + # relationships to Service objects have not yet been made + assert name in gathered_resources_all_ingress_defined_openshift_used[KubernetesResource.Kinds.OPENSHIFT_ROUTE][ + ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_openshift") +def test_gather_details_route_only_openshift(gathered_resources_only_openshift: Dict) -> None: + """ + This test verifies that expected details about Route resources are in the gathered details. + + :param gathered_resources_only_openshift: test fixture + """ + # make sure the Route resource count is correct + assert gathered_resources_only_openshift[KubernetesResource.Kinds.OPENSHIFT_ROUTE][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_ROUTE_COUNT + + for name in TestVals.RESOURCE_ROUTE_LIST: + # make sure the expected Route is available by name + # relationships to Service objects have not yet been made + assert name in gathered_resources_only_openshift[KubernetesResource.Kinds.OPENSHIFT_ROUTE][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_no_ingress") +def test_gather_details_route_no_ingress(gathered_resources_no_ingress: Dict) -> None: + """ + This test verifies that no details are gathered about Ingress resources if no ingress kinds are found. + + :param gathered_resources_no_ingress: test fixture + """ + assert KubernetesResource.Kinds.OPENSHIFT_ROUTE not in gathered_resources_no_ingress + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_services(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about Service resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the Service resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.SERVICE][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_SERVICE_COUNT + + for name in TestVals.RESOURCE_SERVICE_LIST: + # make sure the expected Service is available by name + # relationships to pods and networking objects have not yet been made + assert name in gathered_resources_only_nginx[KubernetesResource.Kinds.SERVICE][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_nginx") +def test_gather_details_stateful_sets(gathered_resources_only_nginx: Dict) -> None: + """ + This test verifies that expected details about StatefulSet resources are in the gathered details. + + :param gathered_resources_only_nginx: test fixture + """ + # make sure the StatefulSet resource count is correct + assert gathered_resources_only_nginx[KubernetesResource.Kinds.STATEFUL_SET][ReportKeys.KindDetails.COUNT] == \ + TestVals.RESOURCE_STATEFUL_SET_COUNT + + # make sure the expected StatefulSet is available by name + assert TestVals.COMPONENT_SAS_CACHE_SERVER_STATEFUL_SET_NAME in \ + gathered_resources_only_nginx[KubernetesResource.Kinds.STATEFUL_SET][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_all_ingress_defined_istio_used") +def test_gather_details_virtual_services_all_istio_used(gathered_resources_all_ingress_defined_istio_used: Dict) \ + -> None: + """ + This test verifies that expected details about VirtualService resources are in the gathered details. + + :param gathered_resources_all_ingress_defined_istio_used: test fixture + """ + # make sure the VirtualService resource count is correct + assert gathered_resources_all_ingress_defined_istio_used[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_VIRTUAL_SERVICE_COUNT + + for name in TestVals.RESOURCE_VIRTUAL_SERVICE_LIST: + # make sure the expected VirtualService is available by name + # relationships to Service objects have not yet been made + assert name in \ + gathered_resources_all_ingress_defined_istio_used[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ + ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_only_istio") +def test_gather_details_virtual_services_only_istio(gathered_resources_only_istio: Dict) -> None: + """ + This test verifies that expected details about VirtualService resources are in the gathered details. + + :param gathered_resources_only_istio: test fixture + """ + # make sure the VirtualService resource count is correct + assert gathered_resources_only_istio[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ + ReportKeys.KindDetails.COUNT] == TestVals.RESOURCE_VIRTUAL_SERVICE_COUNT + + for name in TestVals.RESOURCE_VIRTUAL_SERVICE_LIST: + # make sure the expected VirtualService is available by name + # relationships to Service objects have not yet been made + assert name in gathered_resources_only_istio[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ITEMS_KEY] + + +@pytest.mark.usefixtures("gathered_resources_no_ingress") +def test_gather_details_virtual_service_no_ingress(gathered_resources_no_ingress: Dict) -> None: + """ + This test verifies that no details are gathered about VirtualService resources if no ingress kinds are found. + + :param gathered_resources_no_ingress: test fixture + """ + assert KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE not in gathered_resources_no_ingress diff --git a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py deleted file mode 100644 index 03aa56c..0000000 --- a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py +++ /dev/null @@ -1,1190 +0,0 @@ -#################################################################### -# ### test_viya_deployment_report_utils.py ### -#################################################################### -# ### Author: SAS Institute Inc. ### -#################################################################### -# ### -# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### -# All Rights Reserved. ### -# SPDX-License-Identifier: Apache-2.0 ### -# ### -#################################################################### -import copy -import pytest - -from typing import Dict, List, Text - -from deployment_report.model.static.viya_deployment_report_ingress_controller import \ - ViyaDeploymentReportIngressController as ExpectedIngressController -from deployment_report.model.static.viya_deployment_report_keys import ViyaDeploymentReportKeys as ReportKeys -from deployment_report.model.static.viya_deployment_report_keys import ITEMS_KEY, NAME_KEY -from deployment_report.model.utils.viya_deployment_report_utils import ViyaDeploymentReportUtils - -from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources, KubernetesResource -from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest - -# alias to KubectlTest.Values to shorten name -TestVals: KubectlTest.Values = KubectlTest.Values() - - -#################################################################### -# Unit Test Fixtures ### -#################################################################### -@pytest.fixture(scope="module") -def gathered_resources() -> Dict: - """ - This fixture creates a KubectlTest object, collects the available api-resources and calls - ViyaDeploymentReport.gather_resource_details() to gather all resources to create complete components. - Relationships not defined by gather_resource_details() are not defined in the resulting dictionary to - allow for testing of the methods that define them. - - This method is once at the beginning before tests in this file are executed. All tests use the same instance. - - :return: A dictionary of all Resources gathered starting with the Pods defined. - """ - # set up kubectl and get API resources - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.BOTH) - api_resources: KubernetesApiResources = kubectl.api_resources() - - # set up dict to hold gathered resources - gathered_resources: Dict = dict() - - # create a list of resource kinds to gather - # nodes and networking kinds do not typically have owning objects, so these need to be called individually - kinds_list: List = [ - KubernetesResource.Kinds.CONFIGMAP, - KubernetesResource.Kinds.INGRESS, - KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE, - KubernetesResource.Kinds.NODE, - KubernetesResource.Kinds.POD, - KubernetesResource.Kinds.SERVICE] - - for resource_kind in kinds_list: - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=resource_kind) - - return gathered_resources - - -#################################################################### -# Unit Tests ### -#################################################################### -def test_gather_resource_details(gathered_resources: Dict) -> None: - """ - This test verifies that all resources gathered during the execution of the gathered_resources fixture are present. - The gathered_resources fixture includes Pods, which are the smallest unit, so Pods as well as all owning object, - nodes, and networking resources should be gathered. - """ - # make sure the correct number of resources categories are defined - assert len(gathered_resources) == TestVals.RESOURCE_KINDS_COUNT - - for kind in TestVals.RESOURCE_KINDS_LIST: - assert kind in gathered_resources - assert gathered_resources[kind][ReportKeys.KindDetails.AVAILABLE] is True - - -def test_gather_resource_details_cas_deployments(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about CASDeployment resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the CASDeployment count is correct - assert gathered_resources[KubernetesResource.Kinds.CAS_DEPLOYMENT][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_CAS_DEPLOYMENT_COUNT - - for name in TestVals.RESOURCE_CAS_DEPLOYMENT_LIST: - # make sure the expected CASDeployment is available by name - assert name in gathered_resources[KubernetesResource.Kinds.CAS_DEPLOYMENT][ITEMS_KEY] - - -def test_gather_resource_details_cron_jobs(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about CronJob resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the CronJob count is correct - assert gathered_resources[KubernetesResource.Kinds.CRON_JOB][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_CRON_JOB_COUNT - - for name in TestVals.RESOURCE_CRON_JOB_LIST: - # make sure the expected CronJob is available by name - assert name in gathered_resources[KubernetesResource.Kinds.CRON_JOB][ITEMS_KEY] - - -def test_gather_resource_details_deployments(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about Deployment resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the Deployment resource count is correct - assert gathered_resources[KubernetesResource.Kinds.DEPLOYMENT][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_DEPLOYMENT_COUNT - - for name in TestVals.RESOURCE_DEPLOYMENT_LIST: - # make sure the expected Deployment is available by name - assert name in gathered_resources[KubernetesResource.Kinds.DEPLOYMENT][ITEMS_KEY] - - -def test_gather_resource_details_ingresses(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about Ingress resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the Ingress resource count is correct - assert gathered_resources[KubernetesResource.Kinds.INGRESS][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_INGRESS_COUNT - - for name in TestVals.RESOURCE_INGRESS_LIST: - # make sure the expected Deployment is available by name - # relationships have not yet been defined between Ingress and Service objects - assert name in gathered_resources[KubernetesResource.Kinds.INGRESS][ITEMS_KEY] - - -def test_gather_resource_details_jobs(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about Job resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the Job resource count is correct - assert gathered_resources[KubernetesResource.Kinds.JOB][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_JOB_COUNT - - for name in TestVals.RESOURCE_JOB_LIST: - # make sure the expected Job is available by name - assert name in gathered_resources[KubernetesResource.Kinds.JOB][ITEMS_KEY] - - # the Job object is owned by the CronJob object, make sure it has an ext.relationships definition with 1 - # relationship - assert len(gathered_resources[KubernetesResource.Kinds.JOB][ITEMS_KEY][name][ - ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - -def test_gather_resource_details_pods(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about Pod resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the Pod resource count is correct - assert gathered_resources[KubernetesResource.Kinds.POD][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_POD_COUNT - - for name in TestVals.RESOURCE_POD_LIST: - # make sure the expected Pod is available by name - assert name in gathered_resources[KubernetesResource.Kinds.POD][ITEMS_KEY] - - # the Pod object is owned by controller objects, make sure it has an ext.relationships definition with 1 - # relationship - assert len(gathered_resources[KubernetesResource.Kinds.POD][ITEMS_KEY][name][ - ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - -def test_gather_resource_details_replica_sets(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about ReplicaSet resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the ReplicaSet resource count is correct - assert gathered_resources[KubernetesResource.Kinds.REPLICA_SET][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_REPLICA_SET_COUNT - - for name in TestVals.RESOURCE_REPLICA_SET_LIST: - # make sure the expected ReplicaSet is available by name - assert name in gathered_resources[KubernetesResource.Kinds.REPLICA_SET][ITEMS_KEY] - - # the ReplicaSet object is owned Deployment objects, make sure it has an ext.relationships definition with 1 - # relationship - assert len(gathered_resources[KubernetesResource.Kinds.REPLICA_SET][ITEMS_KEY][name][ - ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - -def test_gather_resource_details_services(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about Service resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the Service resource count is correct - assert gathered_resources[KubernetesResource.Kinds.SERVICE][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_SERVICE_COUNT - - for name in TestVals.RESOURCE_SERVICE_LIST: - # make sure the expected Service is available by name - # relationships to pods and networking objects have not yet been made - assert name in gathered_resources[KubernetesResource.Kinds.SERVICE][ITEMS_KEY] - - -def test_gather_resource_details_stateful_sets(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about StatefulSet resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the StatefulSet resource count is correct - assert gathered_resources[KubernetesResource.Kinds.STATEFUL_SET][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_STATEFUL_SET_COUNT - - # make sure the expected StatefulSet is available by name - assert TestVals.COMPONENT_SAS_CACHE_SERVER_STATEFUL_SET_NAME in \ - gathered_resources[KubernetesResource.Kinds.STATEFUL_SET][ITEMS_KEY] - - -def test_gather_resource_details_virtual_services(gathered_resources: Dict) -> None: - """ - This test verifies that expected details about VirtualService resources are in the gathered details. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # make sure the VirtualService resource count is correct - assert gathered_resources[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_VIRTUAL_SERVICE_COUNT - - for name in TestVals.RESOURCE_VIRTUAL_SERVICE_LIST: - # make sure the expected VirtualService is available by name - # relationships to Service objects have not yet been made - assert name in gathered_resources[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ITEMS_KEY] - - -def test_gather_resource_details_from_services() -> None: - """ - This test verifies that all Services are gathered. The CAS Services have owning objects which will also cause - Deployments and CASDeployments to be discovered. This test is only concerned with Services and is meant to verify - that gather_resource_details() doesn't raise an error if Pods are not available. - """ - # set up kubectl and get API resources - kubectl: KubectlTest = KubectlTest() - api_resources: KubernetesApiResources = kubectl.api_resources() - - # set up dict to hold gathered resources - gathered_resources: Dict = dict() - - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.SERVICE) - - # make sure the correct number of resource categories were returned - assert len(gathered_resources) == 3 - - # make sure the correct number of services were returned - assert gathered_resources[KubernetesResource.Kinds.SERVICE][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_SERVICE_COUNT - - # make sure the expected Services are available by name - for service_name in TestVals.RESOURCE_SERVICE_LIST: - assert service_name in gathered_resources[KubernetesResource.Kinds.SERVICE][ITEMS_KEY] - - -def test_gather_resource_details_from_ingresses() -> None: - """ - This test verifies that all Ingresses are gathered. The ingress object doesn't have an owner, so only Ingresses - will be gathered. This test is mean to make sure gather_resource_details() doesn't raise an error if other - resources aren't available. - """ - # set up kubectl and get API resources - kubectl: KubectlTest = KubectlTest() - api_resources: KubernetesApiResources = kubectl.api_resources() - - # set up dict to hold gathered resources - gathered_resources: Dict = dict() - - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.INGRESS) - - # make sure the correct number of resource categories were returned - assert len(gathered_resources) == 1 - - # make sure the correct number of Ingress objects were returned - assert gathered_resources[KubernetesResource.Kinds.INGRESS][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_INGRESS_COUNT - - # make sure the expected Ingress is available - for name in TestVals.RESOURCE_INGRESS_LIST: - assert name in gathered_resources[KubernetesResource.Kinds.INGRESS][ITEMS_KEY] - - -def test_gather_resource_details_from_virtual_services() -> None: - """ - This test verifies that all VirtualServices are gathered. The VirtualService object doesn't have an owner, - so only VirtualServices will be gathered. - """ - # set up kubectl and get API resources - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.ISTIO_ONLY) - api_resources: KubernetesApiResources = kubectl.api_resources() - - # set up dict to hold gathered resources - gathered_resources: Dict = dict() - - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE) - - # make sure the correct number of resource categories were returned - assert len(gathered_resources) == 1 - - # make sure the correct number of VirtualService objects were returned - assert gathered_resources[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ReportKeys.KindDetails.COUNT] == \ - TestVals.RESOURCE_VIRTUAL_SERVICE_COUNT - - # make sure the expected VirtualService is available - for name in TestVals.RESOURCE_VIRTUAL_SERVICE_LIST: - assert name in gathered_resources[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ITEMS_KEY] - - -def test_define_service_to_ingress_relationships(gathered_resources: Dict) -> None: - """ - This test verifies that the relationships between Services and Ingress objects are correctly defined. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # call utils to define Service to Ingress relationships - ViyaDeploymentReportUtils.define_service_to_ingress_relationships( - gathered_resources_copy[KubernetesResource.Kinds.SERVICE], - gathered_resources_copy[KubernetesResource.Kinds.INGRESS] - ) - - # get the Service details - service: Dict = gathered_resources_copy[KubernetesResource.Kinds.SERVICE][ITEMS_KEY][ - TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] - - # makes sure relationship is defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in service[ReportKeys.ResourceDetails.EXT_DICT] - assert len(service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 2 - - # get the first relationship - rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.INGRESS - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == \ - TestVals.COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME_DEPRECATED_DEFINITION - - # get the second relationship - rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][1] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.INGRESS - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME - - -def test_define_service_to_virtual_service_relationships(gathered_resources: Dict) -> None: - """ - This test verifies that the service to virtual service relationships are correctly defined. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # call utils to define Service to VirtualService relationship - ViyaDeploymentReportUtils.define_service_to_virtual_service_relationships( - gathered_resources_copy[KubernetesResource.Kinds.SERVICE], - gathered_resources_copy[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE] - ) - - # get the Service resource - service: Dict = gathered_resources_copy[KubernetesResource.Kinds.SERVICE][ITEMS_KEY][ - TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] - - # make sure the relationship was defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in service[ReportKeys.ResourceDetails.EXT_DICT] - assert len(service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - # get the relationship - rel: Dict = service[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == \ - TestVals.COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME - - -def test_define_pod_to_service_relationships(gathered_resources: Dict) -> None: - """ - This test verifies that the relationship between and pod and a service is correctly defined. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # reset the relationship lists for pods to make the service relationships easier to verify - for pod in gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY].values(): - pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]: List = list() - - # call utils to define Pod to Service relationship - ViyaDeploymentReportUtils.define_pod_to_service_relationships( - gathered_resources_copy[KubernetesResource.Kinds.POD], - gathered_resources_copy[KubernetesResource.Kinds.SERVICE] - ) - - # prometheus pod - - # get the Pod resource - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][TestVals.COMPONENT_PROMETHEUS_POD_NAME] - - # make sure the relationship was defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] - assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - # get the relationship - rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_PROMETHEUS_SERVICE_NAME - - # sas-annotations pod - - # get the Pod resource - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] - - # make sure the relationship was defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] - assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - # get the relationship - rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME - - # sas-cacheserver pod - - # get the Pod resource - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_CACHE_SERVER_POD_NAME] - - # make sure the relationship was defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] - assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - # get the relationship - rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CACHE_SERVER_SERVICE_NAME - - # sas-cas-operator pod - - # get the Pod resource - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_CAS_OPERATOR_POD_NAME] - - # make sure the relationship was defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] - assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 1 - - # get the relationship - rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_OPERATOR_SERVICE_NAME - - # sas-cas-server pod - - # get the Pod resource - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_CAS_SERVER_POD_NAME] - - # make sure the relationship was defined and exists - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in pod[ReportKeys.ResourceDetails.EXT_DICT] - assert len(pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 2 - - # get the relationship - rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_SERVER_SERVICE_NAME - - # get the relationship - rel: Dict = pod[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][1] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_SERVER_EXTNP_SERVICE_NAME - - -def test_define_node_to_pod_relationships(gathered_resources: Dict) -> None: - """ - This test verifies that the relationship between Nodes and Pods is correctly defined. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # call utils to define Node to Pod relationship - ViyaDeploymentReportUtils.define_node_to_pod_relationships( - gathered_resources_copy[KubernetesResource.Kinds.NODE], - gathered_resources_copy[KubernetesResource.Kinds.POD] - ) - - # get the node - node: Dict = gathered_resources_copy[KubernetesResource.Kinds.NODE][ITEMS_KEY][TestVals.RESOURCE_NODE_1_NAME] - - # make sure the relationships were defined and all exist - assert ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST in node[ReportKeys.ResourceDetails.EXT_DICT] - assert len(node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST]) == 6 - - # get the relationship - rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][0] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_PROMETHEUS_POD_NAME - - # get the relationship - rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][1] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME - - # get the relationship - rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][2] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CACHE_SERVER_POD_NAME - - # get the relationship - rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][3] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_OPERATOR_POD_NAME - - # get the relationship - rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][4] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_CAS_SERVER_POD_NAME - - # get the relationship - rel: Dict = node[ReportKeys.ResourceDetails.EXT_DICT][ReportKeys.ResourceDetails.Ext.RELATIONSHIPS_LIST][5] - - # make sure the relationship attributes are correct - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.POD - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_POD_NAME - - -def test_create_relationship_dict() -> None: - """ - This test verifies that a relationship dictionary is correctly created. - """ - # create dictionary - rel: Dict = ViyaDeploymentReportUtils._create_relationship_dict(KubernetesResource.Kinds.SERVICE, "foo") - - # make sure attributes are correct - assert isinstance(rel, dict) - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.KIND] == KubernetesResource.Kinds.SERVICE - assert rel[ReportKeys.ResourceDetails.Ext.Relationship.NAME] == "foo" - - -def test_get_pod_metrics(gathered_resources: Dict) -> None: - """ - This test verifies that pod metrics are correctly defined per pod when metrics are available. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # get the list of all pods - pods: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD] - - # add the metrics - ViyaDeploymentReportUtils.get_pod_metrics(kubectl=KubectlTest(), pods=pods) - - # verify that the metrics were added - for pod in pods[ITEMS_KEY].values(): - assert ReportKeys.ResourceDetails.Ext.METRICS_DICT in pod[ReportKeys.ResourceDetails.EXT_DICT] - - -def test_get_pod_metrics_unavailable(gathered_resources: Dict) -> None: - """ - This test verifies that no errors are raised when pod metrics are not available. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # get the list of all pods - pods: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD] - - # try to add the metrics - ViyaDeploymentReportUtils.get_pod_metrics(kubectl=KubectlTest(include_metrics=False), pods=pods) - - # make sure the metrics dictionary was not added - for pod in pods[ITEMS_KEY].values(): - assert ReportKeys.ResourceDetails.Ext.METRICS_DICT not in pod[ReportKeys.ResourceDetails.EXT_DICT] - - -def test_get_node_metrics(gathered_resources: Dict) -> None: - """ - This test verifies that node metrics are correctly defined per node when metrics are available. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # get the list of all nodes - nodes: Dict = gathered_resources_copy[KubernetesResource.Kinds.NODE] - - # add the metrics - ViyaDeploymentReportUtils.get_node_metrics(kubectl=KubectlTest(), nodes=nodes) - - # make sure the metrics were added - for node in nodes[ITEMS_KEY].values(): - assert ReportKeys.ResourceDetails.Ext.METRICS_DICT in node[ReportKeys.ResourceDetails.EXT_DICT] - - -def test_get_node_metrics_unavailable(gathered_resources: Dict) -> None: - """ - This test verifies that no errors are raised when pod metrics are not available. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # get the list of all nodes - nodes: Dict = gathered_resources_copy[KubernetesResource.Kinds.NODE] - - # try to add the metrics - ViyaDeploymentReportUtils.get_node_metrics(kubectl=KubectlTest(include_metrics=False), nodes=nodes) - - # make sure the metrics dictionary was not added - for node in nodes[ITEMS_KEY].values(): - assert ReportKeys.ResourceDetails.Ext.METRICS_DICT not in node[ReportKeys.ResourceDetails.EXT_DICT] - - -def test_determine_ingress_controller_nginx_only() -> None: - """ - This test verifies that the ingress controller is correctly determined when only Ingress objects are available. - """ - # create a KubectlTest instance configured to simulate only NGINX artifacts - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.NGINX_ONLY) - - # get the list of api-resources with only NGINX kinds - api_resources: KubernetesApiResources = kubectl.api_resources() - - # create a dictionary to hold the gathered resources - gathered_resources: Dict = dict() - - # gather Ingress objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.INGRESS - ) - - # gather VirtualService objects (this is how the ViyaDeploymentReport behaves) - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE - ) - - # call the utility method to determine the ingress controller - ingress_controller: Text = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) - - # assert that NGINX controller was determined - assert ingress_controller == ExpectedIngressController.KUBE_NGINX - - -def test_determine_ingress_controller_istio_only() -> None: - """ - This test verifies that the ingress controller is correctly determined when only VirtualService objects are - available. - """ - # create a KubectlTest object configured to simulate only ISTIO artifacts - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.ISTIO_ONLY) - - # get the api-resources without Ingress objects - api_resources: KubernetesApiResources = kubectl.api_resources() - - # create dictionary to hold gathered resources - gathered_resources: Dict = dict() - - # gather Ingress objects (this is how ViyaDeploymentReport behaves) - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.INGRESS - ) - - # gather VirtualService objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE - ) - - # call the utility method to determine the ingress controller - ingress_controller: Text = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) - - # assert that ISTIO controller was determined - assert ingress_controller == ExpectedIngressController.ISTIO - - -def test_determine_ingress_controller_both_nginx_used() -> None: - """ - This test verifies that the ingress controller is correctly determined when both Ingress and VirtualService - resources are defined, but only Ingress objects are available. - """ - # create a KubectlTest object configured to simulate having both resources but only Ingress objects created - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.BOTH_RESOURCES_NGINX_USED) - - # get the api-resources with VirtualService and Ingress available - api_resources: KubernetesApiResources = kubectl.api_resources() - - # create a dictionary to hold the gathered resources - gathered_resources: Dict = dict() - - # gather Ingress objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.INGRESS - ) - - # gather VirtualService objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE - ) - - # call the utility method to determine the ingress controller - ingress_controller: Text = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) - - # assert that NGINX controller was determined - assert ingress_controller == ExpectedIngressController.KUBE_NGINX - - -def test_determine_ingress_controller_both_istio_used() -> None: - """ - This test verifies that the ingress controller is correctly determined when both Ingress and VirtualService - resources are defined, but only VirtualService objects are available. - """ - # create a KubectlTest object configured to simulate having both resources but only VirtualService objects created - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.BOTH_RESOURCES_ISTIO_USED) - - # get the api-resources with both Ingress and VirtualService defined - api_resources: KubernetesApiResources = kubectl.api_resources() - - # create a dictionary to hold gathered resources - gathered_resources: Dict = dict() - - # gather Ingress objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.INGRESS - ) - - # gather VirtualService objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE - ) - - # call the utility method to determine the ingress controller - ingress_controller: Text = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) - - # assert that ISTIO controller was determined - assert ingress_controller == ExpectedIngressController.ISTIO - - -def test_determine_ingress_controller_both(gathered_resources: Dict) -> None: - """ - This test verifies that the ingress controller is correctly determined when both Ingress and VirtualService - resources are defined, and both objects are available. This code path should determine the controller to be NGINIX - as that is the first kind checked for. - """ - # call the utility method to determine the ingress controller - ingress_controller: Text = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) - - # assert that NGINX controller was not determined - assert ingress_controller is ExpectedIngressController.KUBE_NGINX - - -def test_determine_ingress_controller_none() -> None: - """ - This test verifies that a None value is returned for the ingress controller when neither Ingress or VirtualService - objects are available. - """ - # create a KubectlTest object configured to have neither Ingress or VirtualService - kubectl: KubectlTest = KubectlTest(ingress_simulator=KubectlTest.IngressSimulator.NONE) - - # get the api-resources without Ingress or VirtualService - api_resources: KubernetesApiResources = kubectl.api_resources() - - # create a dictionary to hold gathered resources - gathered_resources: Dict = dict() - - # gather Ingress objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.INGRESS - ) - - # gather VirtualService objects - ViyaDeploymentReportUtils.gather_resource_details( - kubectl=kubectl, - gathered_resources=gathered_resources, - api_resources=api_resources, - resource_kind=KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE - ) - - # call the utility method to determine the ingress controller - ingress_controller: Text = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) - - # assert that a controller was not determined - assert ingress_controller is None - - -def test_aggregate_component_resources_prometheus(gathered_resources: Dict) -> None: - """ - This test verifies that all resources which comprise the prometheus component are correctly aggregated to create - the component definition. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # define pod to service relationships to allow for aggregation - ViyaDeploymentReportUtils.define_pod_to_service_relationships( - pods=gathered_resources_copy[KubernetesResource.Kinds.POD], - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] - ) - - # create a dictionary to hold the component - component: Dict = dict() - - # get the prometheus pod - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][TestVals.COMPONENT_PROMETHEUS_POD_NAME] - - # aggregate the resources in the prometheus component - ViyaDeploymentReportUtils.aggregate_component_resources( - resource_details=pod, - gathered_resources=gathered_resources_copy, - component=component - ) - - # make sure the component name is correct - assert component[NAME_KEY] == TestVals.COMPONENT_PROMETHEUS_NAME - - # make sure the correct number of resources were aggregated - assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_PROMETHEUS_RESOURCE_COUNT - - # make sure the all resources are accounted for - for kind, name_list in TestVals.COMPONENT_PROMETHEUS_RESOURCES_DICT.items(): - assert kind in component[ITEMS_KEY] - assert len(component[ITEMS_KEY][kind]) == len(name_list) - for name in name_list: - assert name in component[ITEMS_KEY][kind] - - -def test_aggregate_component_resources_sas_annotations(gathered_resources: Dict) -> None: - """ - This test verifies that all resources which comprise the sas-annotations component are correctly aggregated to - create the component definition. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # define the pod to service relationships to allow for aggregation - ViyaDeploymentReportUtils.define_pod_to_service_relationships( - pods=gathered_resources_copy[KubernetesResource.Kinds.POD], - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] - ) - - # define the service to ingress relationships to allow for aggregation - ViyaDeploymentReportUtils.define_service_to_ingress_relationships( - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE], - ingresses=gathered_resources_copy[KubernetesResource.Kinds.INGRESS] - ) - - # define the service to virtual service relationships to allow for aggregation - ViyaDeploymentReportUtils.define_service_to_virtual_service_relationships( - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE], - virtual_services=gathered_resources_copy[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE] - ) - - # create a dictionary to hold the component - component: Dict = dict() - - # get the sas-annotations pod - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_ANNOTATIONS_POD_NAME] - - # aggregate the sas-annotations resources - ViyaDeploymentReportUtils.aggregate_component_resources( - resource_details=pod, - gathered_resources=gathered_resources_copy, - component=component - ) - - # make sure the component name is correct - assert component[NAME_KEY] == TestVals.COMPONENT_SAS_ANNOTATIONS_NAME - - # make sure the correct number of resource types were aggregated - assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT - - # make sure the all resources are accounted for - for kind, name_list in TestVals.COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT.items(): - assert kind in component[ITEMS_KEY] - assert len(component[ITEMS_KEY][kind]) == len(name_list) - for name in name_list: - assert name in component[ITEMS_KEY][kind] - - -def test_aggregate_component_resources_sas_cache_server(gathered_resources: Dict) -> None: - """ - This test verifies that all resources which comprise the sas-cacheserver component are correctly aggregated to - create the component definition. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # define the pod to service relationships to allow for aggregation - ViyaDeploymentReportUtils.define_pod_to_service_relationships( - pods=gathered_resources_copy[KubernetesResource.Kinds.POD], - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] - ) - - # create a dictionary to hold the component - component: Dict = dict() - - # get the sas-cacheserver pod - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_CACHE_SERVER_POD_NAME] - - # aggregate the resources - ViyaDeploymentReportUtils.aggregate_component_resources( - resource_details=pod, - gathered_resources=gathered_resources_copy, - component=component - ) - - # make sure the component name is correct - assert component[NAME_KEY] == TestVals.COMPONENT_SAS_CACHE_SERVER_NAME - - # make sure the right number of resource types were aggregated - assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_CACHE_SERVER_RESOURCE_COUNT - - # make sure the all resources are accounted for - for kind, name_list in TestVals.COMPONENT_SAS_CACHE_SERVER_RESOURCE_DICT.items(): - assert kind in component[ITEMS_KEY] - assert len(component[ITEMS_KEY][kind]) == len(name_list) - for name in name_list: - assert name in component[ITEMS_KEY][kind] - - -def test_aggregate_component_resources_sas_cas_operator(gathered_resources: Dict) -> None: - """ - This test verifies that all resources which comprise the sas-cas-operator component are correctly aggregated to - create the component definition. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # define the pod to service relationships to allow for aggregation - ViyaDeploymentReportUtils.define_pod_to_service_relationships( - pods=gathered_resources_copy[KubernetesResource.Kinds.POD], - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] - ) - - # create a dictionary to hold the component - component: Dict = dict() - - # get the sas-cas-operator pod - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_CAS_OPERATOR_POD_NAME] - - # aggregate the resources - ViyaDeploymentReportUtils.aggregate_component_resources( - resource_details=pod, - gathered_resources=gathered_resources_copy, - component=component - ) - - # make sure the component name is correct - assert component[NAME_KEY] == TestVals.COMPONENT_SAS_CAS_OPERATOR_NAME - - # make sure the correct number of resource types were aggregated - assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_CAS_OPERATOR_RESOURCE_COUNT - - # make sure the all resources are accounted for - for kind, name_list in TestVals.COMPONENT_SAS_CAS_OPERATOR_RESOURCE_DICT.items(): - assert kind in component[ITEMS_KEY] - assert len(component[ITEMS_KEY][kind]) == len(name_list) - for name in name_list: - assert name in component[ITEMS_KEY][kind] - - -def test_aggregate_component_resources_sas_cas_server(gathered_resources: Dict) -> None: - """ - This test verifies that all resources which comprise the sas-cas-server component are correctly aggregated to - create the component definition. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # define the pod to service relationships to allow for aggregation - ViyaDeploymentReportUtils.define_pod_to_service_relationships( - pods=gathered_resources_copy[KubernetesResource.Kinds.POD], - services=gathered_resources_copy[KubernetesResource.Kinds.SERVICE] - ) - - # create a dictionary to hold the component - component: Dict = dict() - - # get the sas-cas-server pod - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_CAS_SERVER_POD_NAME] - - # aggregate the resources - ViyaDeploymentReportUtils.aggregate_component_resources( - resource_details=pod, - gathered_resources=gathered_resources_copy, - component=component - ) - - # make sure the component name is correct - assert component[NAME_KEY] == TestVals.COMPONENT_SAS_CAS_SERVER_NAME - - # make sure the correct number of resource types were aggregated - assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_CAS_SERVER_RESOURCE_COUNT - - # make sure the all resources are accounted for - for kind, name_list in TestVals.COMPONENT_SAS_CAS_SERVER_RESOURCE_DICT.items(): - assert kind in component[ITEMS_KEY] - assert len(component[ITEMS_KEY][kind]) == len(name_list) - for name in name_list: - assert name in component[ITEMS_KEY][kind] - - -def test_aggregate_component_resources_sas_scheduled_backup_job(gathered_resources: Dict) -> None: - """ - This test verifies that all resources which comprise the sas-scheduled-backup-job component are correctly aggregated - to create the component definition. - - :param gathered_resources: The dictionary of all gathered resources provided by the gathered_resources fixture. - """ - # copy the gathered_resources dict so it won't be altered for other tests - gathered_resources_copy: Dict = copy.deepcopy(gathered_resources) - - # create a dictionary to hold the component - component: Dict = dict() - - # get the sas-scheduled-backup-job pod - pod: Dict = gathered_resources_copy[KubernetesResource.Kinds.POD][ITEMS_KEY][ - TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_POD_NAME] - - # aggregate the resources - ViyaDeploymentReportUtils.aggregate_component_resources( - resource_details=pod, - gathered_resources=gathered_resources_copy, - component=component - ) - - # make sure the component name is correct - assert component[NAME_KEY] == TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_NAME - - # make sure the correct number of resource types were aggregated - assert len(component[ITEMS_KEY]) == TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_RESOURCE_COUNT - - # make sure the all resources are accounted for - for kind, name_list in TestVals.COMPONENT_SAS_SCHEDULED_BACKUP_JOB_RESOURCE_DICT.items(): - assert kind in component[ITEMS_KEY] - assert len(component[ITEMS_KEY][kind]) == len(name_list) - for name in name_list: - assert name in component[ITEMS_KEY][kind] - - -def test_get_cadence_version(gathered_resources: Dict) -> None: - """ - This test verifies that the provided cadence data is returned when values is passed to get_cadence_version(). - - :param report: The populated ViyaDeploymentReport returned by the report() fixture. - """ - # check for expected attributes - - cadence_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps") - cadence_info: Text = None - - for c in cadence_data: - cadence_info = ViyaDeploymentReportUtils.get_cadence_version(c) - if cadence_info: - break - - assert cadence_info == KubectlTest.Values.CADENCEINFO - - -def test_get_db_info(gathered_resources: Dict) -> None: - """ - This test verifies that the provided db data is returned when values is passed to get_db_info(). - - :param report: The populated ViyaDeploymentReport returned by the report() fixture. - """ - # check for expected attributes - - db_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps") - db_dict: Dict = dict() - - for c in db_data: - db_dict = ViyaDeploymentReportUtils.get_db_info(c) - if db_dict: - break - - assert db_dict["Type"] == KubectlTest.Values.DBINFO diff --git a/deployment_report/model/utils/viya_deployment_report_utils.py b/deployment_report/model/utils/viya_deployment_report_utils.py deleted file mode 100644 index 7dcd992..0000000 --- a/deployment_report/model/utils/viya_deployment_report_utils.py +++ /dev/null @@ -1,562 +0,0 @@ -#################################################################### -# ### viya_deployment_report_utils.py ### -#################################################################### -# ### Author: SAS Institute Inc. ### -#################################################################### -# ### -# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### -# All Rights Reserved. ### -# SPDX-License-Identifier: Apache-2.0 ### -# ### -#################################################################### -from subprocess import CalledProcessError -from typing import Dict, List, Optional, Text - -from deployment_report.model.static.viya_deployment_report_keys import ITEMS_KEY, NAME_KEY -from deployment_report.model.static.viya_deployment_report_ingress_controller \ - import ViyaDeploymentReportIngressController -from deployment_report.model.static.viya_deployment_report_keys import ViyaDeploymentReportKeys as Keys - -from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources, KubernetesResource -from viya_ark_library.k8s.sas_kubectl_interface import KubectlInterface - - -class ViyaDeploymentReportUtils(object): - """ - Helper class for the ViyaDeploymentReport model which defines statically invoked methods for running repeatable or - atomic workflow tasks. - """ - - @staticmethod - def gather_resource_details(kubectl: KubectlInterface, gathered_resources: Dict, - api_resources: KubernetesApiResources, resource_kind: Text) -> None: - """ - Static method for gathering details about resources in the target Kubernetes cluster. - - The method is called recursively and will gather details about any resources described in current resource's - "ownerReferences", if defined. If all discovered resource kinds are listable, a complete ownership chain will be - gathered. - - :param kubectl: The KubectlInterface object for issuing requests to the target Kubernetes cluster. - :param gathered_resources: The dictionary where gathered resources will be stored. - :param api_resources: The Kubectle.ApiResources object defining the API resources of the target Kubernetes - cluster. - :param resource_kind: The 'kind' value of the resources to gather. - """ - # if an attempt has been made to gather this kind, return without moving forward # - if resource_kind in gathered_resources: - return - - # get the requested resources from the k8s API # - resource_name: Text = api_resources.get_name(resource_kind) - resources: List[KubernetesResource] = list() - resource_available: bool = True - if resource_name is not None: - try: - resources: Optional[List[KubernetesResource]] = kubectl.get_resources( - api_resources.get_name(resource_kind)) - except CalledProcessError as e: - if resource_kind == KubernetesResource.Kinds.POD: - # if a CalledProcessError is raised for pods, surface the error # - # if the resource kind is not "Pod", move forward without raising an error since # - # pods can still be reported # - raise e - else: - # note that this resource was not available - resource_available = False - - # save the resources by kind # - gathered_resources[resource_kind]: Dict = dict() - - # create a key to note whether this resource kind was available for listing: bool # - gathered_resources[resource_kind][Keys.KindDetails.AVAILABLE]: bool = resource_available - - # create a key to define the number of resources of this kind returned by k8s: int # - gathered_resources[resource_kind][Keys.KindDetails.COUNT]: int = len(resources) - - # create a key to hold the resources returned by k8s: dict # - gathered_resources[resource_kind][ITEMS_KEY]: bool = dict() - - # store a unique list of kinds in any 'ownerReferences' definitions # - owner_kinds: List = list() - - # loop over the resources returned # - for resource in resources: - # remove the 'managedFields' key, if it exists, to reduce file size - resource.get_metadata().pop(KubernetesResource.Keys.MANAGED_FIELDS, None) - - # add the resource to its kind dictionary # - # create a key set to the name of the resource, under which all resource details will be stored: dict # - resource_details = gathered_resources[resource_kind][ITEMS_KEY][resource.get_name()] = dict() - - # create a key to hold extra details about the resource not provided in the resource definition: dict # - resource_details[Keys.ResourceDetails.EXT_DICT]: Dict = dict() - - # create a key to hold the ext.relationships list, which defines the name and kind of resources # - # related to this resource: list # - resource_relationship_list = resource_details[Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST] = list() - - # create a key under which the resource definition will be stored: KubernetesResource # - resource_details[Keys.ResourceDetails.RESOURCE_DEFINITION]: KubernetesResource = resource - - # see if this resource defines any 'ownerReferences' # - owner_references: List = resource.get_metadata_value(KubernetesResource.Keys.OWNER_REFERENCES) - - # if the resource does define 'ownerReferences', process them # - if owner_references is not None: - # iterate over the references # - for owner_reference in owner_references: - # if the owner reference kind isn't in the owner_kinds list, add it # - if owner_reference[KubernetesResource.Keys.KIND] not in owner_kinds: - owner_kinds.append(owner_reference[KubernetesResource.Keys.KIND]) - - # add the owning object to the resource's relationships extension list # - relationship: Dict = ViyaDeploymentReportUtils._create_relationship_dict( - owner_reference[KubernetesResource.Keys.KIND], - owner_reference[KubernetesResource.Keys.NAME]) - - resource_relationship_list.append(relationship) - - # if more kinds have been discovered, gather them as well # - for owner_kind in owner_kinds: - ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, owner_kind) - - @staticmethod - def define_service_to_ingress_relationships(services: Dict, ingresses: Dict) -> None: - """ - Static method that defines the upstream ext.relationship from a Service to the Ingress that controls - its in-bound HTTP traffic. - - :param services: The Service resources gathered in the Kubernetes cluster. - :param ingresses: The Ingress resources gathered in the Kubernetes cluster. - """ - # the relationship can only be determined if both resources are defined # - if services[Keys.KindDetails.COUNT] > 0 and ingresses[Keys.KindDetails.COUNT] > 0: - - # iterate over all Ingress objects and find for which Services they define paths # - for ingress_details in ingresses[ITEMS_KEY].values(): - # get the definition for the current Ingress # - ingress: KubernetesResource = ingress_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - - # get the rules for this Ingress # - rules: List = ingress.get_spec_value(KubernetesResource.Keys.RULES) - - # iterate over all rules to process all http paths defined # - for rule in rules: - # get the http paths for this rule # - http_paths: List = rule[KubernetesResource.Keys.HTTP][KubernetesResource.Keys.PATHS] - - # iterate over all http paths to process each backend defined # - for http_path in http_paths: - - # init the service name var - service_name: Text - - # check if this is the current Ingress definition schema - if ingress.get_api_version().startswith("networking.k8s.io"): - # get the Service name for this path # - service_name = \ - http_path[KubernetesResource.Keys.BACKEND][KubernetesResource.Keys.SERVICE][ - KubernetesResource.Keys.NAME] - - # otherwise, use the old definition schema - else: - # get the Service name for this path - service_name = \ - http_path[KubernetesResource.Keys.BACKEND][KubernetesResource.Keys.SERVICE_NAME] - - try: - # get the Service associated with this path # - service: Dict = services[ITEMS_KEY][service_name] - - # create the relationship to the Ingress and add it to the Service's relationships # - ingress_relationship: Dict = ViyaDeploymentReportUtils._create_relationship_dict( - ingress.get_kind(), ingress.get_name()) - - service[Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST].append(ingress_relationship) - except KeyError: - # if the Service isn't defined, move on without error # - pass - - @staticmethod - def define_service_to_virtual_service_relationships(services: Dict, virtual_services: Dict) -> None: - """ - Static method that defines the upstream ext.relationship from a Service to the VirtualService that - controls its traffic. - - :param services: The Service resources gathered in the Kubernetes cluster. - :param virtual_services: The VirtualService resources gathered in the Kubernetes cluster. - """ - # the relationship can only be determined if both resources are defined # - if services[Keys.KindDetails.COUNT] > 0 and virtual_services[Keys.KindDetails.COUNT] > 0: - - # iterate over all VirtualService objects and find which Services they define routes for # - for virtual_service_details in virtual_services[ITEMS_KEY].values(): - # get the definition of the current VirtualService # - virtual_service: KubernetesResource = virtual_service_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - - # get the http definitions for this VirtualService # - http_definitions: List = virtual_service.get_spec_value(KubernetesResource.Keys.HTTP) - - if http_definitions is not None: - # iterate over all http definitions to process their route definitions # - for http_definition in http_definitions: - # get the routes defined # - routes: List = http_definition.get(KubernetesResource.Keys.ROUTE) - - if routes is not None: - # iterate over all routes to process their destination hosts # - for route in routes: - # get the name of the Service associated with this route # - service_name: Text = route[KubernetesResource.Keys.DESTINATION][ - KubernetesResource.Keys.HOST] - - try: - # get the Service associated with this route # - service: Dict = services[ITEMS_KEY][service_name] - - # create the VirtualService relationship and add it to the Service's relationships # - virtual_service_relationship: Dict = \ - ViyaDeploymentReportUtils._create_relationship_dict(virtual_service.get_kind(), - virtual_service.get_name()) - - service[Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST].append( - virtual_service_relationship) - except KeyError: - # if the Service isn't defined, move on without error # - pass - else: - tcp_definitions: List = virtual_service.get_spec_value(KubernetesResource.Keys.TCP) - - if tcp_definitions is not None: - # iterate over all tcp definitions to process their route definitions # - for tcp_definition in tcp_definitions: - # get the routes defined # - routes: List = tcp_definition.get(KubernetesResource.Keys.ROUTE) - - if routes is not None: - # iterate over all routes to process their destination hosts # - for route in routes: - # get the name of the Service associated with this route # - service_name: Text = route[KubernetesResource.Keys.DESTINATION][ - KubernetesResource.Keys.HOST] - - # remove any additional address information if given a full address - if "." in service_name: - service_name = service_name[:service_name.find(".")] - - try: - # get the Service associated with this route # - service: Dict = services[ITEMS_KEY][service_name] - - # create the VirtualService relationship and add it to the Service's # - # relationships # - virtual_service_relationship: Dict = \ - ViyaDeploymentReportUtils._create_relationship_dict( - virtual_service.get_kind(), - virtual_service.get_name()) - - service[Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST].append( - virtual_service_relationship) - except KeyError: - # if the Service isn't defined, move on without error # - pass - - @staticmethod - def define_pod_to_service_relationships(pods: Dict, services: Dict) -> None: - """ - Static method that defines the upstream ext.relationship from a Pod to the Service that exposes the - Pod. - - :param pods: The Pod resources gathered in the Kubernetes cluster. - :param services: The Service resources gathered in the Kubernetes cluster. - """ - # create the association between Pod and Service, if both are defined # - if services[Keys.KindDetails.COUNT] > 0 and pods[Keys.KindDetails.COUNT] > 0: - - # iterate over all Services to process the defined selectors # - for service_details in services[ITEMS_KEY].values(): - # get the definition for this Service # - service: KubernetesResource = service_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - - # get the selectors # - selectors: Dict = service.get_spec_value(KubernetesResource.Keys.SELECTOR) - - # if the Service doesn't define any selectors, continue to the next Service # - if selectors is None: - continue - - # loop through all Pods and find any with matching labels # - for pod_details in pods[ITEMS_KEY].values(): - # get the definition for this Pod # - pod: KubernetesResource = pod_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - - # loop through the labels defined by the Service selector and make sure all exist on the Pod # - for selector_label, selector_value in selectors.items(): - # check if the Pod has the same label/value # - if pod.get_label(selector_label) != selector_value: - # if the label doesn't exist or isn't the same value, break the loop # - break - else: - # if the loop didn't break, add this Service to the Pod's relationships list # - service_relationship: Dict = ViyaDeploymentReportUtils._create_relationship_dict( - service.get_kind(), service.get_name()) - - pod_details[Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST].append(service_relationship) - - @staticmethod - def define_node_to_pod_relationships(nodes: Dict, pods: Dict) -> None: - """ - Static method that defines the ext.relationship from a Node to the Pod running within the Node. - - :param nodes: The Node resources gathered in the Kubernetes cluster. - :param pods: The Pod resources gathered in the Kubernetes cluster. - """ - # create association between Node and Pod, if both are defined # - if pods[Keys.KindDetails.COUNT] > 0 and nodes[Keys.KindDetails.COUNT] > 0: - node_pods: Dict = dict() - - # loop over all Pods to get their Node definition # - for pod_details in pods[ITEMS_KEY].values(): - # get the definition of the current Pod # - pod: KubernetesResource = pod_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - - # get the Pod's Node definition # - node_name: Text = pod.get_spec_value(KubernetesResource.Keys.NODE_NAME) - - try: - # create the Pod relationship and add it to the Node's relationships extension list # - relationship: Dict = ViyaDeploymentReportUtils._create_relationship_dict( - pod.get_kind(), pod.get_name()) - - nodes[ITEMS_KEY][node_name][Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST].append(relationship) - - if node_name not in node_pods: - node_pods[node_name]: None = list() - - node_pods[node_name].append(pod.get_name()) - except KeyError: - # if the Node isn't defined, move on without error # - pass - - @staticmethod - def _create_relationship_dict(kind: Text, name: Text) -> Dict: - """ - _Internal static method for creating a relationship dict object for a resource's ext.relationship list. - - :param kind: The kind value of the related resource. - :param name: The name value of the related resource. - :return: The dictionary defining the related resource. - """ - relationship: Dict = dict() - relationship[Keys.ResourceDetails.Ext.Relationship.KIND]: Text = kind - relationship[Keys.ResourceDetails.Ext.Relationship.NAME]: Text = name - return relationship - - @staticmethod - def get_pod_metrics(kubectl: KubectlInterface, pods: Dict) -> None: - """ - Static method for retrieving Pod metrics from the Kubernetes API and defining the ext.metrics - dictionary for all gathered Pods. - - :param kubectl: The KubectlInterface object for issuing requests to the Kubernetes cluster for Pod metrics. - :param pods: The Pod resources gathered in the Kubernetes cluster. - """ - # get Pod metrics if Pods are defined # - if pods[Keys.KindDetails.COUNT] > 0: - try: - # get Pod metrics # - pod_metrics: Dict = kubectl.top_pods().as_dict() - - # iterate over the returned metrics and add them to the Pod extensions # - for pod_name, metrics in pod_metrics.items(): - try: - pods[ITEMS_KEY][pod_name][Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.METRICS_DICT]: Dict = metrics - except KeyError: - # if the Pod isn't defined, move on without error # - pass - - except CalledProcessError: - # if Pod metrics aren't available, move on without error # - pass - - @staticmethod - def get_node_metrics(kubectl: KubectlInterface, nodes: Dict) -> None: - """ - Static method for retrieving Node metrics from the Kubernetes API and defining the metrics extension - for all gathered Nodes. - - :param kubectl: The KubectlInterface object for issuing requests to the Kubernetes cluster for Node metrics. - :param nodes: The Node resources gathered in the Kubernetes cluster. - """ - # get Node metrics if Nodes are defined # - if nodes[Keys.KindDetails.COUNT] > 0: - try: - # get Node metrics # - node_metrics: Dict = kubectl.top_nodes().as_dict() - - # iterate over the returned metrics and add them to the Node extensions # - for node_name, metrics in node_metrics.items(): - try: - nodes[ITEMS_KEY][node_name][Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.METRICS_DICT]: Dict = metrics - except KeyError: - # if the Node isn't defined, move on without error # - pass - - except CalledProcessError: - # if Node metrics aren't available, move on without error # - pass - - @staticmethod - def determine_ingress_controller(gathered_resources: Dict) -> Optional[Text]: - """ - Static method for determining the ingress controller being used in the Kubernetes cluster. - - :param gathered_resources: The complete dictionary of gathered resources from the Kubernetes cluster. - :return: The ingress controller used in the target cluster or None if the controller cannot be determined. - """ - # check if a SAS Ingress object is defined # - if KubernetesResource.Kinds.INGRESS in gathered_resources: - for ingress_details in gathered_resources[KubernetesResource.Kinds.INGRESS][ITEMS_KEY].values(): - # get the Ingress definition # - ingress: KubernetesResource = ingress_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - # if the Ingress is a SAS resource, return nginx as the controller # - if ingress.is_sas_resource(): - return ViyaDeploymentReportIngressController.KUBE_NGINX - - if KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE in gathered_resources: - # check if a SAS VirtualService object is defined # - for virtual_service_details in gathered_resources[KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE][ - ITEMS_KEY].values(): - - # get the VirtualService definition # - virtual_service: KubernetesResource = virtual_service_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - # if the VirtualService is a SAS resource, return istio as the controller # - if virtual_service.is_sas_resource(): - return ViyaDeploymentReportIngressController.ISTIO - - # if a controller couldn't be determined, return None # - return None - - @staticmethod - def aggregate_component_resources(resource_details: Dict, gathered_resources: Dict, component: Dict): - """ - Static method that aggregates the various resources that comprise a component deployed into the - Kubernetes cluster. - - This method is called recursively for each relationship extension to aggregate all related resources. - - :param resource_details: The details of the resource to aggregate into a component. - :param gathered_resources: The complete dictionary of resources gathered in the Kubernetes cluster. - :param component: The dictionary where the resources for the current component will be compiled. - """ - # set up the component dict - if NAME_KEY not in component: - component[NAME_KEY]: Text = "" - - if ITEMS_KEY not in component: - component[ITEMS_KEY]: Dict = dict() - - # get the relationships extension list for this resource # - resource_relationships: List = resource_details[Keys.ResourceDetails.EXT_DICT][ - Keys.ResourceDetails.Ext.RELATIONSHIPS_LIST] - - # get the resource definition # - resource: KubernetesResource = resource_details[Keys.ResourceDetails.RESOURCE_DEFINITION] - - # if a SAS component name is defined, use it since this is the most canonical value - if resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) is not None: - component[NAME_KEY] = \ - resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) - - # if a resource of this kind hasn't been added for the component, create the kind key # - if resource.get_kind() not in component[ITEMS_KEY]: - component[ITEMS_KEY][resource.get_kind()]: Dict = dict() - - # add the resource details to its kind dictionary, keyed by its name # - component[ITEMS_KEY][resource.get_kind()][resource.get_name()]: Dict = resource_details - - # aggregate any resources defined in the relationships extension # - for relationship in resource_relationships: - # get the name and kind of the related resource # - rel_kind: Text = relationship[Keys.ResourceDetails.Ext.Relationship.KIND] - rel_name: Text = relationship[Keys.ResourceDetails.Ext.Relationship.NAME] - - # get the details for the related resource # - try: - related_resource_details: Optional[Dict] = gathered_resources[rel_kind][ITEMS_KEY][rel_name] - except KeyError: - # ignore any failures that may be raised if the resource is transient and not defined - # note that the related resource wasn't found - related_resource_details: Optional[Dict] = None - - # aggregate the related resource # - if related_resource_details is not None: - ViyaDeploymentReportUtils.aggregate_component_resources(related_resource_details, gathered_resources, - component) - - # if this is the last resource and the component doesn't have a name determined from an annotation, - # set a name based on the available values - if not component[NAME_KEY]: - component[NAME_KEY]: Text = resource.get_name() - - # if a SAS component name is defined, use it instead - if resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) is not None: - component[NAME_KEY] = \ - resource.get_annotation(KubernetesResource.Keys.ANNOTATION_COMPONENT_NAME) - - @staticmethod - def get_cadence_version(resource: KubernetesResource) -> Optional[Text]: - """ - Returns the cadence version of the targeted SAS deployment. - - :param resource: The key of the value to return. - :return: A string representing the cadence version of the targeted SAS deployment. - """ - cadence_info: Optional[Text] = None - try: - if 'sas-deployment-metadata' in resource.get_name(): - cadence_data: Optional[Dict] = resource.get_data() - cadence_info = ( - f"{cadence_data['SAS_CADENCE_DISPLAY_NAME']} " - f"{cadence_data['SAS_CADENCE_VERSION']} " - f"({cadence_data['SAS_CADENCE_RELEASE']})" - ) - return cadence_info - except KeyError: - return None - - @staticmethod - def get_db_info(resource: KubernetesResource) -> Optional[Dict]: - """ - Returns the db information of the targeted SAS deployment. - - :param resource: The key of the value to return. - :return: A dict representing the db information of the targeted SAS deployment. - """ - db_dict: Optional[Dict] = dict() - try: - if 'sas-postgres-config' in resource.get_name(): - db_data: Optional[Dict] = resource.get_data() - if db_data['EXTERNAL_DATABASE'] == "false": - return {"Type": "Internal"} - - db_dict = { - "Type": "External", - "Host": db_data['DATABASE_HOST'], - "Port": db_data['DATABASE_PORT'], - "Name": db_data['DATABASE_NAME'], - "User": db_data['SPRING_DATASOURCE_USERNAME'] - } - - return db_dict - except KeyError: - return None diff --git a/deployment_report/model/viya_deployment_report.py b/deployment_report/model/viya_deployment_report.py index 1f2f76e..dd3ce80 100644 --- a/deployment_report/model/viya_deployment_report.py +++ b/deployment_report/model/viya_deployment_report.py @@ -18,25 +18,32 @@ from deployment_report.model.static.viya_deployment_report_keys import ITEMS_KEY, NAME_KEY from deployment_report.model.static.viya_deployment_report_keys import ViyaDeploymentReportKeys as Keys -from deployment_report.model.static.viya_deployment_report_ingress_controller import \ - ViyaDeploymentReportIngressController as IngressController -from deployment_report.model.utils.viya_deployment_report_utils import ViyaDeploymentReportUtils +from deployment_report.model.utils import \ + component_util, \ + config_util, \ + ingress_util, \ + metrics_util, \ + relationship_util, \ + resource_util from viya_ark_library.jinja2.sas_jinja2 import Jinja2TemplateRenderer from viya_ark_library.k8s.sas_k8s_errors import KubectlRequestForbiddenError +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress from viya_ark_library.k8s.sas_k8s_objects import \ - KubernetesApiResources, KubernetesObjectJSONEncoder, KubernetesResource + KubernetesApiResources, \ + KubernetesObjectJSONEncoder, \ + KubernetesResource from viya_ark_library.k8s.sas_kubectl_interface import KubectlInterface -# templates for string-formatted timestamp values # +# templates for string-formatted timestamp values _READABLE_TIMESTAMP_TMPL_ = "%A, %B %d, %Y %I:%M%p" _FILE_TIMESTAMP_TMPL_ = "%Y-%m-%dT%H_%M_%S" -# templates for output file names # +# templates for output file names _REPORT_DATA_FILE_NAME_TMPL_ = "viya_deployment_report_data_{}.json" _REPORT_FILE_NAME_TMPL_ = "viya_deployment_report_{}.html" -# SAS custom API resource group id # +# SAS custom API resource group id _SAS_API_GROUP_ID_ = "sas.com" @@ -116,154 +123,162 @@ def gather_details(self, kubectl: KubectlInterface, # Gather details about Kubernetes environment where SAS is deployed # ####################################################################### - # mark when these details were gathered # + # mark when these details were gathered gathered: Text = datetime.datetime.now().strftime(_READABLE_TIMESTAMP_TMPL_) - # gather Kubernetes API resources # + # gather Kubernetes API resources api_resources: KubernetesApiResources = kubectl.api_resources() api_resources_dict: Dict = api_resources.as_dict() - # make a list of any Custom Resource Definitions that are provided by SAS # + # make a list of any Custom Resource Definitions that are provided by SAS sas_custom_resources: Dict = dict() for kind, details in api_resources_dict.items(): if _SAS_API_GROUP_ID_ in api_resources.get_api_group(kind): sas_custom_resources[kind] = details - # create dictionary to store gathered resources # + # create dictionary to store gathered resources gathered_resources: Dict = dict() - # start by gathering details about ConfigMap # + # initialize variables for config values cadence_info: Optional[Text] = None db_dict: Optional[Dict] = dict() + try: - ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, - k8s_kinds.CONFIGMAP) - for item in gathered_resources[k8s_kinds.CONFIGMAP]['items']: - resource_definition = gathered_resources[k8s_kinds.CONFIGMAP]['items'][item]['resourceDefinition'] + # gather details about ConfigMaps + resource_util.gather_details(kubectl, gathered_resources, api_resources, k8s_kinds.CONFIGMAP) + + # iterate over all the ConfigMaps + for config_map_details in gathered_resources[k8s_kinds.CONFIGMAP][ITEMS_KEY].values(): + + # get the ConfigMap definition + config_map = config_map_details[Keys.ResourceDetails.RESOURCE_DEFINITION] + if not cadence_info: - cadence_info = ViyaDeploymentReportUtils.get_cadence_version(resource_definition) + # set the cadence_info if it hasn't already been set + cadence_info = config_util.get_cadence_version(config_map) if not db_dict: - db_dict = ViyaDeploymentReportUtils.get_db_info(resource_definition) + # set the db_dict if it hasn't already been set + db_dict = config_util.get_db_info(config_map) if db_dict and cadence_info: + # if both values have been defined, break the loop and move on break - except CalledProcessError: + # if ConfigMaps cannot be gathered, move on to try other resources pass + # initialize the dict that will hold all gathered resources gathered_resources = dict() - # start by gathering details about Nodes, if available # - # this information can be reported even if Pods are not listable # + try: - ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, - k8s_kinds.NODE) + # start by gathering details about Nodes, if available + # this information can be reported even if Pods are not listable + resource_util.gather_details(kubectl, gathered_resources, api_resources, k8s_kinds.NODE) except CalledProcessError: - # the user may not be able to see non-namespaced resources like nodes, move on without raising an error # + # the user may not be able to see non-namespaced resources like nodes, move on without raising an error pass - # gather details about Pods in the target Kubernetes cluster # - # Pods are the smallest unit in Kubernetes and define 'ownerReferences', which can be used to gather upstream # - # relationships # - # services, ingresses, and virtual services will be gathered separately even if Pods and their owners are # - # found; networking resources are not defined in 'ownerReferences' # - # if Pods cannot be listed, the report will report the details already gathered, and display a messages saying # - # that components could not be reported because pods are not listable # try: - ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, k8s_kinds.POD) + # gather details about Pods in the target Kubernetes cluster + # Pods are the smallest unit in Kubernetes and define 'ownerReferences', which can be used to gather + # upstream relationships + # + # services, ingresses, and virtual services will be gathered separately even if Pods and their owners are + # found - networking resources are not defined in 'ownerReferences' + # + # if Pods cannot be listed, the report will report the details already gathered, and display a messages + # saying that components could not be reported because pods are not listable + resource_util.gather_details(kubectl, gathered_resources, api_resources, k8s_kinds.POD) except CalledProcessError: - # if a CalledProcessError is raised when gathering pods, then surface an error up to stop the program # - # without the ability to list pods, aggregating component resources won't occur # + # if a CalledProcessError is raised when gathering pods, then surface an error up to stop the program + # without the ability to list pods, aggregating component resources won't occur raise KubectlRequestForbiddenError(f"Listing pods is forbidden in namespace [{kubectl.get_namespace()}]. " "Make sure KUBECONFIG is correctly set and that the correct namespace " "is being targeted. A namespace can be given on the command line using " "the \"--namespace=\" option.") - # define a list to hold any unavailable resources # + # define a list to hold any unavailable resources unavailable_resources: List = list() - ingress_ctlr: Optional[Text] = None + + # define a variable to hold the ingress controller that will be determined for this cluster + ingress_controller: Optional[Text] = None ####################################################################### # Start - Additional resource gathering # ####################################################################### - - # make sure pods were gathered # - # if none were found, there is no need to gather additional details # + # make sure pods were gathered + # if none were found, there is no need to gather additional details if gathered_resources[k8s_kinds.POD][Keys.KindDetails.COUNT] > 0: - try: - # since Pods were listable, gather details about networking kinds # - for networking_kind in [k8s_kinds.SERVICE, k8s_kinds.INGRESS, k8s_kinds.ISTIO_VIRTUAL_SERVICE]: - ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, - networking_kind) + # since Pods were listable, gather details about networking kinds - start with services + resource_util.gather_details(kubectl, gathered_resources, api_resources, k8s_kinds.SERVICE) + + # look for all kinds used by supported ingress controllers + for ingress_kind in SupportedIngress.get_ingress_controller_to_kind_map().values(): + resource_util.gather_details(kubectl, gathered_resources, api_resources, ingress_kind) - # make sure an attempt is made to gather any SAS CDRs that were discovered # - # some may have already been gathered if they are upstream owners of any Pods # + # make sure an attempt is made to gather any SAS CDRs that were discovered + # some may have already been gathered if they are upstream owners of any Pods for sas_custom_kind in sas_custom_resources.keys(): - ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources, - sas_custom_kind) + resource_util.gather_details(kubectl, gathered_resources, api_resources, sas_custom_kind) except CalledProcessError: - # if any of the networking or SAS CRDs can't be listed, move since some amount of component resources # - # have already been gathered # + # if any of the networking or SAS CRDs can't be listed, move since some amount of component resources + # have already been gathered pass - # determine the ingress controller # - ingress_ctlr = ViyaDeploymentReportUtils.determine_ingress_controller(gathered_resources) + # determine the ingress controller + ingress_controller = ingress_util.determine_ingress_controller(gathered_resources) - # determine if any discovered resource kinds were unavailable # - # if at least one is unavailable, a message will be displayed saying that components may not be complete # - # because all resources were not listable # + # determine if any discovered resource kinds were unavailable + # if at least one is unavailable, a message will be displayed saying that components may not be complete + # because all resources were not listable for kind, kind_details in gathered_resources.items(): - # check if the kind is unavailable # + # check if the kind is unavailable if not kind_details[Keys.KindDetails.AVAILABLE]: - # ignore the unavailable Ingress kind if Istio is used or VirtualService kind if Nginx is used - if (ingress_ctlr == IngressController.ISTIO and kind != k8s_kinds.INGRESS) or \ - (ingress_ctlr == IngressController.KUBE_NGINX and kind != k8s_kinds.ISTIO_VIRTUAL_SERVICE): - + # ignore any ingress kinds that are not related to the ingress controller + if not ingress_util.ignorable_for_controller_if_unavailable(ingress_controller, kind): + # add the kind to the unavailable resources unavailable_resources.append(kind) ####################################################################### # Define relationships between resources # ####################################################################### - # define the relationship between Service and Ingress # - ViyaDeploymentReportUtils.define_service_to_ingress_relationships(gathered_resources[k8s_kinds.SERVICE], - gathered_resources[k8s_kinds.INGRESS]) - - # define the relationship between Service and VirtualService # - ViyaDeploymentReportUtils.define_service_to_virtual_service_relationships( - gathered_resources[k8s_kinds.SERVICE], gathered_resources[k8s_kinds.ISTIO_VIRTUAL_SERVICE]) + # define the relationship between Service and ingress controller kind + relationship_util.define_service_to_ingress_relationships(ingress_controller, gathered_resources) - # define the relationship between Pod and Service # - ViyaDeploymentReportUtils.define_pod_to_service_relationships(gathered_resources[k8s_kinds.POD], - gathered_resources[k8s_kinds.SERVICE]) + # define the relationship between Pod and Service + relationship_util.define_pod_to_service_relationships(gathered_resources[k8s_kinds.POD], + gathered_resources[k8s_kinds.SERVICE]) - # define the relationship between Node and Pod # - ViyaDeploymentReportUtils.define_node_to_pod_relationships(gathered_resources[k8s_kinds.NODE], - gathered_resources[k8s_kinds.POD]) + # define the relationship between Node and Pod + relationship_util.define_node_to_pod_relationships(gathered_resources[k8s_kinds.NODE], + gathered_resources[k8s_kinds.POD]) ####################################################################### # Get metrics # ####################################################################### - # get Pod metrics # - ViyaDeploymentReportUtils.get_pod_metrics(kubectl, gathered_resources[k8s_kinds.POD]) + # get Pod metrics + metrics_util.get_pod_metrics(kubectl, gathered_resources[k8s_kinds.POD]) - # get Node metrics # - ViyaDeploymentReportUtils.get_node_metrics(kubectl, gathered_resources[k8s_kinds.NODE]) + # get Node metrics + metrics_util.get_node_metrics(kubectl, gathered_resources[k8s_kinds.NODE]) ####################################################################### # Gather Pod logs, if requested # ####################################################################### - # check if logs were requested # + # check if logs were requested if include_pod_log_snips: - # loop over all Pods to get their log snips # + # loop over all Pods to get their log snips for pod_name, pod_details in gathered_resources[k8s_kinds.POD][ITEMS_KEY].items(): try: - # define the log snip extension for this Pod + # get the log snip log_snip: List = kubectl.logs(pod_name) - pod_details[Keys.ResourceDetails.EXT_DICT][Keys.ResourceDetails.Ext.LOG_SNIP_LIST]: List \ - = log_snip + # add it to the pod's extension dict + pod_ext: Dict = pod_details[Keys.ResourceDetails.EXT_DICT] + pod_ext[Keys.ResourceDetails.Ext.LOG_SNIP_LIST]: List = log_snip except CalledProcessError: # if the logs can't be retrieved, move on without error # pass @@ -276,90 +291,107 @@ def gather_details(self, kubectl: KubectlInterface, # Create the report data dictionary # ####################################################################### - # add the gathered time # + # add the gathered time self._report_data[Keys.GATHERED]: Text = gathered - # add any unavailable resources # + # add any unavailable resources self._report_data[Keys.UNAVAILABLE_RESOURCES_LIST]: List = unavailable_resources - # add details about the Kubernetes cluster under the 'kubernetes' key # + ################################## + # 'kubernetes' key + ################################## k8s_details_dict = self._report_data[Keys.KUBERNETES_DICT] = dict() - # create a key to hold the API resources available in the cluster: dict # + + # create a key to hold the API resources available in the cluster: dict k8s_details_dict[Keys.Kubernetes.API_RESOURCES_DICT]: Dict = api_resources_dict - # create a key to hold the API versions in the cluster: list # + + # create a key to hold the API versions in the cluster: list k8s_details_dict[Keys.Kubernetes.API_VERSIONS_LIST]: List = kubectl.api_versions() - # create a key to mark the determined ingress controller for the cluster: str|None # - k8s_details_dict[Keys.Kubernetes.INGRESS_CTRL]: Optional[Text] = ingress_ctlr - # create a key to mark the namespace evaluated for this report: str|None # + + # create a key to mark the determined ingress controller for the cluster: str|None + k8s_details_dict[Keys.Kubernetes.INGRESS_CTRL]: Optional[Text] = ingress_controller + + # create a key to mark the namespace evaluated for this report: str|None k8s_details_dict[Keys.Kubernetes.NAMESPACE] = kubectl.get_namespace() - # create a key to hold the details about node in the cluster: dict # + + # create a key to hold the details about node in the cluster: dict k8s_details_dict[Keys.Kubernetes.NODES_DICT]: Dict = gathered_resources[k8s_kinds.NODE] - # create a key to hold the client/server versions for the cluster: dict # + + # create a key to hold the client/server versions for the cluster: dict k8s_details_dict[Keys.Kubernetes.VERSIONS_DICT]: Dict = kubectl.version() - # create a key to hold the meta information about resources discovered in the cluster: dict # + + # create a key to hold the meta information about resources discovered in the cluster: dict k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT]: Dict = dict() - # create a key to hold the cadence version information: str|None # + + # create a key to hold the cadence version information: str|None k8s_details_dict[Keys.Kubernetes.CADENCE_INFO]: Optional[Text] = cadence_info - # create a key to hold the viya db information: dict # + + # create a key to hold the viya db information: dict k8s_details_dict[Keys.Kubernetes.DB_INFO]: Dict = db_dict - # add the availability and count of all discovered resources # + # add the availability and count of all discovered resources for kind_name, kind_details in gathered_resources.items(): - # create an entry for the kind # - k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT][kind_name]: Dict = dict() - # create a key to mark if the resource kind was available: bool # - k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT][kind_name][Keys.KindDetails.AVAILABLE]: bool = \ - kind_details[Keys.KindDetails.AVAILABLE] - # create a key to note the total count of the resource kind: int # - k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT][kind_name][Keys.KindDetails.COUNT]: int = \ - kind_details[Keys.KindDetails.COUNT] - # create a key to note whether this kind is a SAS custom resource definition: bool # - k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT][kind_name][Keys.KindDetails.SAS_CRD]: bool = \ - kind_name in sas_custom_resources - - # if Pods are defined, associate the resources that comprise a component # + # create an entry for the kind + kind_dict = k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT][kind_name] = dict() + + # create a key to mark if the resource kind was available: bool + kind_dict[Keys.KindDetails.AVAILABLE]: bool = kind_details[Keys.KindDetails.AVAILABLE] + + # create a key to note the total count of the resource kind: int + kind_dict[Keys.KindDetails.COUNT]: int = kind_details[Keys.KindDetails.COUNT] + + # create a key to note whether this kind is a SAS custom resource definition: bool + kind_dict[Keys.KindDetails.SAS_CRD]: bool = kind_name in sas_custom_resources + + # if Pods are defined, aggregate the resources that comprise a component if gathered_resources[k8s_kinds.POD][Keys.KindDetails.COUNT] > 0: - # create the sas and misc entries in report_data # + # create the sas and misc entries in report_data sas_dict = self._report_data[Keys.SAS_COMPONENTS_DICT] = dict() misc_dict = self._report_data[Keys.OTHER_COMPONENTS_DICT] = dict() - # create components by building them up from the Pod via relationship extensions # + # create components by building them up from the Pod via relationship extensions for pod_details in gathered_resources[k8s_kinds.POD][ITEMS_KEY].values(): - # define a dictionary to hold the aggregated component # + # define a dictionary to hold the aggregated component component: Dict = dict() - # aggregate all the resources related to this Pod into a component # - ViyaDeploymentReportUtils.aggregate_component_resources(pod_details, gathered_resources, component) + # aggregate all the resources related to this Pod into a component + component_util.aggregate_resources(pod_details, gathered_resources, component) - # determine if this component belongs to SAS and it's component name value # + # note whether this component belongs to SAS is_sas_component: bool = False + + # note the component name value component_name: Text = component[NAME_KEY] - # iterate over all resource kinds in the component # + # iterate over all resource kinds in the component for kind_details in component[ITEMS_KEY].values(): - # iterate over all resources of this kind # + # iterate over all resources of this kind for resource_details in kind_details.values(): - # see if this resource is a SAS resource, if so this component will be treated as a SAS # - # component # - is_sas_component = is_sas_component or \ - resource_details[Keys.ResourceDetails.RESOURCE_DEFINITION].is_sas_resource() + # see if this resource is a SAS resource, if so this component will be treated as a SAS + # component + is_sas_component = \ + (is_sas_component or + resource_details[Keys.ResourceDetails.RESOURCE_DEFINITION].is_sas_resource()) # add the component to its appropriate dictionary if is_sas_component: if component_name not in sas_dict: - # if this component is being added for the first time, create its key # + # if this component is being added for the first time, create its key sas_dict[component_name]: Dict = component[ITEMS_KEY] else: - # otherwise, merge its kinds # + # otherwise, merge its kinds for kind_name, kind_details in component[ITEMS_KEY].items(): + # create the kind dictionary if one is not already defined if kind_name not in sas_dict[component_name]: sas_dict[component_name][kind_name]: Dict = kind_details else: + # otherwise add the resources into the kind dict by name for resource_name, resource_details in kind_details.items(): sas_dict[component_name][kind_name][resource_name]: Dict = resource_details else: + # add this to the misc dict, it could not be treated as a SAS component misc_dict[component_name]: Dict = component[ITEMS_KEY] def get_kubernetes_details(self) -> Optional[Dict]: diff --git a/deployment_report/templates/httpproxy.html.j2 b/deployment_report/templates/httpproxy.html.j2 new file mode 100644 index 0000000..17b536c --- /dev/null +++ b/deployment_report/templates/httpproxy.html.j2 @@ -0,0 +1,29 @@ +{# ----------------------------------------------------------- #} +{# httpproxy.html.j2 #} +{# ----------------------------------------------------------- #} +{# Author: SAS Institute Inc. #} +{# ----------------------------------------------------------- #} +{# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. #} +{# All Rights Reserved. #} +{# SPDX-License-Identifier: Apache-2.0 #} +{# ----------------------------------------------------------- #} + +{# HTTPProxy (Contour) Template #} +{% extends "resource.html.j2" %} + +{% block additional_resource_details %} +{% if resource_details.resourceDefinition.spec.routes|default([]) | length > 0 %} +Routes + +{% for route in resource_details.resourceDefinition.spec.routes|default([]) %} +{% for condition in route.conditions|default([]) %} +{% if condition.prefix|default("") != "" %} + + + +{% endif %} +{% endfor %} +{% endfor %} +
{{ condition.prefix }}
+{% endif %} +{% endblock %} \ No newline at end of file diff --git a/deployment_report/templates/route.html.j2 b/deployment_report/templates/route.html.j2 new file mode 100644 index 0000000..dc3d7be --- /dev/null +++ b/deployment_report/templates/route.html.j2 @@ -0,0 +1,23 @@ +{# ----------------------------------------------------------- #} +{# route.html.j2 #} +{# ----------------------------------------------------------- #} +{# Author: SAS Institute Inc. #} +{# ----------------------------------------------------------- #} +{# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. #} +{# All Rights Reserved. #} +{# SPDX-License-Identifier: Apache-2.0 #} +{# ----------------------------------------------------------- #} + +{# Route (OpenShift) Template #} +{% extends "resource.html.j2" %} + +{% block additional_resource_details %} +{% if resource_details.resourceDefinition.spec.path|default("") != "" %} +Path + + + + +
{{ resource_details.resourceDefinition.spec.path }}
+{% endif %} +{% endblock %} \ No newline at end of file diff --git a/viya_ark_library/k8s/sas_k8s_ingress.py b/viya_ark_library/k8s/sas_k8s_ingress.py new file mode 100644 index 0000000..029dbe0 --- /dev/null +++ b/viya_ark_library/k8s/sas_k8s_ingress.py @@ -0,0 +1,46 @@ +#################################################################### +# ### sas_k8s_ingress.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, Text + +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +class SupportedIngress(object): + """ + Values and utility methods to help in determining the ingress controller used by a k8s deployment. + """ + + class Controllers(object): + """ + Displayable name values for supported ingress controllers. + """ + CONTOUR = "Contour" + ISTIO = "Istio" + NGINX = "NGINX" + OPENSHIFT = "OpenShift" + + @staticmethod + def get_ingress_controller_to_kind_map() -> Dict[Text, Text]: + """ + Returns a dictionary mapping an ingress controller type to the k8s resource kind that it uses. + This can be used when evaluating a deployment to see which controller is used based on the presence + of resources defined in the cluster. + """ + return { + SupportedIngress.Controllers.CONTOUR: KubernetesResource.Kinds.CONTOUR_HTTPPROXY, + SupportedIngress.Controllers.ISTIO: KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE, + SupportedIngress.Controllers.OPENSHIFT: KubernetesResource.Kinds.OPENSHIFT_ROUTE, + # NGINX is placed last in the map intentionally + # Ingress kinds could be present in deployments using one of the above controllers + # If iterating over the dict, NGINX should be evaluated last to avoid false-positives + SupportedIngress.Controllers.NGINX: KubernetesResource.Kinds.INGRESS + } diff --git a/viya_ark_library/k8s/sas_k8s_objects.py b/viya_ark_library/k8s/sas_k8s_objects.py index 3ef221e..6c7a7c5 100644 --- a/viya_ark_library/k8s/sas_k8s_objects.py +++ b/viya_ark_library/k8s/sas_k8s_objects.py @@ -341,6 +341,7 @@ class Keys(object): CLUSTER_IP = "clusterIP" COLLISION_COUNT = "collisionCount" COMPLETION_TIME = "completionTime" + CONDITIONS = "conditions" CONTAINER_RUNTIME_VERSION = "containerRuntimeVersion" CONTAINER_STATUSES = "containerStatuses" CONTAINERS = "containers" @@ -407,12 +408,14 @@ class Keys(object): RESOURCE_VERSION = "resourceVersion" RESTART_COUNT = "restartCount" ROUTE = "route" + ROUTES = "routes" RULES = "rules" SELECTOR = "selector" SELF_LINK = "selfLink" SERVICE = "service" SERVICE_PORT = "servicePort" SERVICE_NAME = "serviceName" + SERVICES = "services" SPEC = "spec" START_TIME = "startTime" STARTED = "started" @@ -421,6 +424,7 @@ class Keys(object): SUCCEEDED = "succeeded" TARGET_PORT = "targetPort" TCP = "tcp" + TO = "to" TEMPLATE = "template" UID = "uid" URI = "uri" @@ -437,6 +441,7 @@ class Kinds(object): """ CAS_DEPLOYMENT = "CASDeployment" CONFIGMAP = "ConfigMap" + CONTOUR_HTTPPROXY = "HTTPProxy" CRON_JOB = "CronJob" CRUNCHY_PG_BACKUP = "Pgbackup" CRUNCHY_PG_CLUSTER = "Pgcluster" @@ -450,6 +455,7 @@ class Kinds(object): JOB = "Job" NODE = "Node" NODE_METRICS = "NodeMetrics" + OPENSHIFT_ROUTE = "Route" POD = "Pod" POD_METRICS = "PodMetrics" REPLICA_SET = "ReplicaSet" diff --git a/viya_ark_library/k8s/test/test_sas_k8s_ingress.py b/viya_ark_library/k8s/test/test_sas_k8s_ingress.py new file mode 100644 index 0000000..ec0fa57 --- /dev/null +++ b/viya_ark_library/k8s/test/test_sas_k8s_ingress.py @@ -0,0 +1,45 @@ +#################################################################### +# ### test_sas_k8s_ingress.py ### +#################################################################### +# ### Author: SAS Institute Inc. ### +#################################################################### +# ### +# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ### +# All Rights Reserved. ### +# SPDX-License-Identifier: Apache-2.0 ### +# ### +#################################################################### +from typing import Dict, Text + +from viya_ark_library.k8s.sas_k8s_ingress import SupportedIngress +from viya_ark_library.k8s.sas_k8s_objects import KubernetesResource + + +def test_get_ingress_controller_to_kind_map() -> None: + """ + Verifies the current supported ingress controllers are in the map and + that their kinds are correctly mapped. + """ + supported_ingress_map: Dict[Text, Text] = SupportedIngress.get_ingress_controller_to_kind_map() + + # assert 4 supported ingress controllers + assert len(supported_ingress_map) == 4 + + # Contour + assert SupportedIngress.Controllers.CONTOUR in supported_ingress_map + assert supported_ingress_map[SupportedIngress.Controllers.CONTOUR] == KubernetesResource.Kinds.CONTOUR_HTTPPROXY + + # Istio + assert SupportedIngress.Controllers.ISTIO in supported_ingress_map + assert supported_ingress_map[SupportedIngress.Controllers.ISTIO] == KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE + + # NGINX + assert SupportedIngress.Controllers.NGINX in supported_ingress_map + assert supported_ingress_map[SupportedIngress.Controllers.NGINX] == KubernetesResource.Kinds.INGRESS + + # OpenShift + assert SupportedIngress.Controllers.OPENSHIFT in supported_ingress_map + assert supported_ingress_map[SupportedIngress.Controllers.OPENSHIFT] == KubernetesResource.Kinds.OPENSHIFT_ROUTE + + # Verify NGINX is the last key in the dict + assert list(supported_ingress_map.keys())[-1] == SupportedIngress.Controllers.NGINX diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_all.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_all.json new file mode 100644 index 0000000..5183633 --- /dev/null +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_all.json @@ -0,0 +1,245 @@ +{ + "CASDeployment": { + "apiGroup": "viya.sas.com", + "name": "casdeployments", + "namespaced": true, + "shortname": "cas", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ] + }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "CronJob": { + "apiGroup": "batch", + "name": "cronjobs", + "namespaced": true, + "shortname": "cj", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Deployment": { + "apiGroup": "apps", + "name": "deployments", + "namespaced": true, + "shortname": "deploy", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Job": { + "apiGroup": "batch", + "name": "jobs", + "namespaced": true, + "shortname": "", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Node": { + "apiGroup": "", + "name": "nodes", + "namespaced": true, + "shortname": "no", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "NodeMetrics": { + "apiGroup": "metrics.k8s.io", + "name": "nodes", + "namespaced": true, + "shortname": "", + "verbs": [ + "get", + "list" + ] + }, + "Pod": { + "apiGroup": "", + "name": "pods", + "namespaced": true, + "shortname": "po", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "PodMetrics": { + "apiGroup": "metrics.k8s.io", + "name": "pods", + "namespaced": true, + "shortname": "", + "verbs": [ + "get", + "list" + ] + }, + "ReplicaSet": { + "apiGroup": "apps", + "name": "replicasets", + "namespaced": true, + "shortname": "rs", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Service": { + "apiGroup": "", + "name": "services", + "namespaced": true, + "shortname": "svc", + "verbs": [ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "StatefulSet": { + "apiGroup": "apps", + "name": "statefulsets", + "namespaced": true, + "shortname": "sts", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "HTTPProxy": { + "apiGroup": "projectcontour.io/v1", + "name": "httpproxy", + "namespaced": true, + "shortname": "", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ] + }, + "Ingress": { + "apiGroup": "networking.k8s.io", + "name": "ingresses", + "namespaced": true, + "shortname": "ing", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Route": { + "apiGroup": "route.openshift.io/v1", + "name": "routes", + "namespaced": true, + "shortname": "", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ] + }, + "VirtualService": { + "apiGroup": "networking.istio.io", + "name": "virtualservices", + "namespaced": true, + "shortname": "vs", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ] + } +} diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_contour.json similarity index 89% rename from viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json rename to viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_contour.json index abd5fd9..7e93c38 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_both_ingress.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_contour.json @@ -15,11 +15,11 @@ "watch" ] }, - "CronJob": { - "apiGroup": "batch", - "name": "cronjobs", + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", "namespaced": true, - "shortname": "cj", "verbs": [ "create", "delete", @@ -31,11 +31,11 @@ "watch" ] }, - "Deployment": { - "apiGroup": "apps", - "name": "deployments", + "CronJob": { + "apiGroup": "batch", + "name": "cronjobs", "namespaced": true, - "shortname": "deploy", + "shortname": "cj", "verbs": [ "create", "delete", @@ -47,11 +47,11 @@ "watch" ] }, - "Ingress": { - "apiGroup": "networking.k8s.io", - "name": "ingresses", + "Deployment": { + "apiGroup": "apps", + "name": "deployments", "namespaced": true, - "shortname": "ing", + "shortname": "deploy", "verbs": [ "create", "delete", @@ -178,11 +178,11 @@ "watch" ] }, - "VirtualService": { - "apiGroup": "networking.istio.io", - "name": "virtualservices", + "HTTPProxy": { + "apiGroup": "projectcontour.io/v1", + "name": "httpproxy", "namespaced": true, - "shortname": "vs", + "shortname": "", "verbs": [ "delete", "deletecollection", @@ -193,21 +193,5 @@ "update", "watch" ] - }, - "ConfigMap": { - "name": "configmaps", - "shortname": "cm", - "apiGroup": "", - "namespaced": true, - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] } } diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_istio.json similarity index 100% rename from viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json rename to viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_istio.json index b995b3e..a8055a2 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_istio_only.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_istio.json @@ -15,6 +15,22 @@ "watch" ] }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, "CronJob": { "apiGroup": "batch", "name": "cronjobs", @@ -177,21 +193,5 @@ "update", "watch" ] - }, - "ConfigMap": { - "name": "configmaps", - "shortname": "cm", - "apiGroup": "", - "namespaced": true, - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] } } diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_nginx.json similarity index 100% rename from viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json rename to viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_nginx.json index 7dcf74a..12f015b 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_nginx_only.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_nginx.json @@ -15,11 +15,11 @@ "watch" ] }, - "CronJob": { - "apiGroup": "batch", - "name": "cronjobs", + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", "namespaced": true, - "shortname": "cj", "verbs": [ "create", "delete", @@ -31,11 +31,11 @@ "watch" ] }, - "Deployment": { - "apiGroup": "apps", - "name": "deployments", + "CronJob": { + "apiGroup": "batch", + "name": "cronjobs", "namespaced": true, - "shortname": "deploy", + "shortname": "cj", "verbs": [ "create", "delete", @@ -47,11 +47,11 @@ "watch" ] }, - "Ingress": { - "apiGroup": "networking.k8s.io", - "name": "ingresses", + "Deployment": { + "apiGroup": "apps", + "name": "deployments", "namespaced": true, - "shortname": "ing", + "shortname": "deploy", "verbs": [ "create", "delete", @@ -178,11 +178,11 @@ "watch" ] }, - "ConfigMap": { - "name": "configmaps", - "shortname": "cm", - "apiGroup": "", + "Ingress": { + "apiGroup": "networking.k8s.io", + "name": "ingresses", "namespaced": true, + "shortname": "ing", "verbs": [ "create", "delete", diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_none.json similarity index 100% rename from viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json rename to viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_none.json index ad3d72c..49d237a 100644 --- a/viya_ark_library/k8s/test_impl/response_data/api_resources_no_ingress.json +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_none.json @@ -15,6 +15,22 @@ "watch" ] }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, "CronJob": { "apiGroup": "batch", "name": "cronjobs", @@ -161,21 +177,5 @@ "update", "watch" ] - }, - "ConfigMap": { - "name": "configmaps", - "shortname": "cm", - "apiGroup": "", - "namespaced": true, - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] } } diff --git a/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_openshift.json b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_openshift.json new file mode 100644 index 0000000..9dc0fad --- /dev/null +++ b/viya_ark_library/k8s/test_impl/response_data/api_resources_ingress_openshift.json @@ -0,0 +1,197 @@ +{ + "CASDeployment": { + "apiGroup": "viya.sas.com", + "name": "casdeployments", + "namespaced": true, + "shortname": "cas", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ] + }, + "ConfigMap": { + "name": "configmaps", + "shortname": "cm", + "apiGroup": "", + "namespaced": true, + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "CronJob": { + "apiGroup": "batch", + "name": "cronjobs", + "namespaced": true, + "shortname": "cj", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Deployment": { + "apiGroup": "apps", + "name": "deployments", + "namespaced": true, + "shortname": "deploy", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Job": { + "apiGroup": "batch", + "name": "jobs", + "namespaced": true, + "shortname": "", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Node": { + "apiGroup": "", + "name": "nodes", + "namespaced": true, + "shortname": "no", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "NodeMetrics": { + "apiGroup": "metrics.k8s.io", + "name": "nodes", + "namespaced": true, + "shortname": "", + "verbs": [ + "get", + "list" + ] + }, + "Pod": { + "apiGroup": "", + "name": "pods", + "namespaced": true, + "shortname": "po", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "PodMetrics": { + "apiGroup": "metrics.k8s.io", + "name": "pods", + "namespaced": true, + "shortname": "", + "verbs": [ + "get", + "list" + ] + }, + "ReplicaSet": { + "apiGroup": "apps", + "name": "replicasets", + "namespaced": true, + "shortname": "rs", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Service": { + "apiGroup": "", + "name": "services", + "namespaced": true, + "shortname": "svc", + "verbs": [ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "StatefulSet": { + "apiGroup": "apps", + "name": "statefulsets", + "namespaced": true, + "shortname": "sts", + "verbs": [ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + "Route": { + "apiGroup": "route.openshift.io/v1", + "name": "routes", + "namespaced": true, + "shortname": "", + "verbs": [ + "delete", + "deletecollection", + "get", + "list", + "patch", + "create", + "update", + "watch" + ] + } +} diff --git a/viya_ark_library/k8s/test_impl/response_data/resources_httpproxy.json b/viya_ark_library/k8s/test_impl/response_data/resources_httpproxy.json new file mode 100644 index 0000000..1d7bfba --- /dev/null +++ b/viya_ark_library/k8s/test_impl/response_data/resources_httpproxy.json @@ -0,0 +1,82 @@ +[ + { + "apiVersion": "projectcontour.io/v1", + "kind": "HTTPProxy", + "metadata": { + "annotations": { + "cadence.sas.com/display-name": "Fast R/TR", + "cadence.sas.com/name": "fast", + "cadence.sas.com/release": "20200508.1588944122646", + "cadence.sas.com/version": "2020", + "kubectl.kubernetes.io/last-applied-configuration": "", + "nginx.ingress.kubernetes.io/proxy-body-size": "2048m", + "nginx.ingress.kubernetes.io/proxy-read-timeout": "300", + "sas.com/component-name": "sas-annotations", + "sas.com/component-version": "2.2.25-20200506.1588775452057", + "sas.com/kustomize-base": "base", + "sas.com/version": "2.2.25" + }, + "creationTimestamp": "2020-05-08T13:36:33Z", + "generation": 1, + "labels": { + "app.kubernetes.io/name": "sas-annotations", + "sas.com/admin": "namespace", + "sas.com/deployment": "sas-viya" + }, + "name": "sas-annotations", + "namespace": "test", + "resourceVersion": "3426839", + "selfLink": "/apis/projectcontour.io/v1/namespaces/test/httpproxy/sas-annotations", + "uid": "5ef2e87c-6826-43cf-88c3-24afec767e9d" + }, + "spec": { + "routes": [ + { + "conditions": [ + { + "prefix": "/annotations" + } + ], + "loadBalancerPolicy": { + "strategy": "Cookie" + }, + "pathRewritePolicy": { + "replacePrefix": [ + { + "replacement": "/annotations/" + } + ] + }, + "services": [ + { + "name": "sas-annotations", + "port": 80 + } + ], + "timeoutPolicy": { + "response": "300s" + } + }, + { + "conditions": [ + { + "prefix": "/annotations/" + } + ], + "loadBalancerPolicy": { + "strategy": "Cookie" + }, + "services": [ + { + "name": "sas-annotations", + "port": 80 + } + ], + "timeoutPolicy": { + "response": "300s" + } + } + ] + } + } +] \ No newline at end of file diff --git a/viya_ark_library/k8s/test_impl/response_data/resources_ingresses.json b/viya_ark_library/k8s/test_impl/response_data/resources_ingresses.json index 290c77b..261a4f9 100644 --- a/viya_ark_library/k8s/test_impl/response_data/resources_ingresses.json +++ b/viya_ark_library/k8s/test_impl/response_data/resources_ingresses.json @@ -84,7 +84,7 @@ "name": "sas-annotations", "namespace": "test", "resourceVersion": "3426839", - "selfLink": "/apis/extensions/v1beta1/namespaces/test/ingresses/sas-annotations", + "selfLink": "/apis/networking.k8s.io/v1/namespaces/test/ingresses/sas-annotations", "uid": "5ab2defa-c50c-47db-8415-d9cb8b0de782" }, "spec": { diff --git a/viya_ark_library/k8s/test_impl/response_data/resources_routes.json b/viya_ark_library/k8s/test_impl/response_data/resources_routes.json new file mode 100644 index 0000000..ff935ab --- /dev/null +++ b/viya_ark_library/k8s/test_impl/response_data/resources_routes.json @@ -0,0 +1,46 @@ +[ + { + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "annotations": { + "cadence.sas.com/display-name": "Fast R/TR", + "cadence.sas.com/name": "fast", + "cadence.sas.com/release": "20200508.1588944122646", + "cadence.sas.com/version": "2020", + "kubectl.kubernetes.io/last-applied-configuration": "", + "haproxy.router.openshift.io/timeout": "300s", + "router.openshift.io/cookie-same-site": "Lax", + "router.openshift.io/cookie_name": "sas-ocp-route", + "sas.com/component-name": "sas-annotations", + "sas.com/component-version": "2.2.25-20200506.1588775452057", + "sas.com/kustomize-base": "base", + "sas.com/version": "2.2.25" + }, + "creationTimestamp": "2020-05-08T13:36:33Z", + "generation": 1, + "labels": { + "app.kubernetes.io/name": "sas-annotations", + "sas.com/admin": "namespace", + "sas.com/deployment": "sas-viya" + }, + "name": "sas-annotations", + "namespace": "test", + "resourceVersion": "3426839", + "selfLink": "/apis/route.openshift.io/v1/namespaces/test/routes/sas-annotations", + "uid": "4fe02d32-df6a-46fb-8b8c-4b74c0e05474" + }, + "spec": { + "host": "k8s-master-node.test.sas.com", + "path": "/annotations", + "port": { + "targetPort": "http" + }, + "to": { + "kind": "Service", + "name": "sas-annotations" + }, + "wildcardPolicy": "None" + } + } +] \ No newline at end of file diff --git a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py index d3c467a..ce9cce2 100644 --- a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py +++ b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py @@ -19,10 +19,12 @@ from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources, KubernetesMetrics, KubernetesResource from viya_ark_library.k8s.sas_kubectl_interface import KubectlInterface -_API_RESOURCES_BOTH_INGRESS_DATA_ = "api_resources_both_ingress.json" -_API_RESOURCES_ISTIO_ONLY_DATA_ = "api_resources_istio_only.json" -_API_RESOURCES_NGINX_ONLY_DATA_ = "api_resources_nginx_only.json" -_API_RESOURCES_NO_INGRESS_DATA_ = "api_resources_no_ingress.json" +_API_RESOURCES_All_INGRESSES_DATA_ = "api_resources_ingress_all.json" +_API_RESOURCES_CONTOUR_ONLY_DATA_ = "api_resources_ingress_contour.json" +_API_RESOURCES_ISTIO_ONLY_DATA_ = "api_resources_ingress_istio.json" +_API_RESOURCES_NGINX_ONLY_DATA_ = "api_resources_ingress_nginx.json" +_API_RESOURCES_NO_INGRESS_DATA_ = "api_resources_ingress_none.json" +_API_RESOURCES_OPENSHIFT_ONLY_DATA_ = "api_resources_ingress_openshift.json" _API_VERSIONS_DATA_ = "api_versions.json" _CONFIG_VIEW_DATA_ = "config_view.json" _TOP_NODES_DATA_ = "top_nodes.json" @@ -47,26 +49,32 @@ class IngressSimulator(Enum): Enumerated class representing the various ingress controller simulations that can be specified for the KubectlTest implementation. """ - # Neither VirtualService nor Ingress will be included in the resources returned by api_resources() # - NONE = 1 + # No ingress kinds will be included in the resources returned by api_resources() + NONE = 0 - # VirtualService will be omitted from the resources returned by api_resources() # - NGINX_ONLY = 2 + # All resource kinds will be returned by api_resources() but only Contour HTTPProxy objects will be found + ALL_CONTOUR_USED = 1 - # Ingress will be omitted from the resources returned by api_resources() # - ISTIO_ONLY = 3 + # All resource kinds will be returned by api_resources() but only Istio VirtualService objects will be found + ALL_ISTIO_USED = 2 - # Both VirtualService and Ingress will be included in the resources returned by api_resources() but only # - # Ingress objects will be defined # - BOTH_RESOURCES_NGINX_USED = 4 + # All resource kinds will be returned by api_resources() but only Ingress objects will be found + ALL_NGINX_USED = 3 - # Both VirtualService and Ingress will be included in the resources returned by api_resources() but only # - # VirtualService objects will be defined # - BOTH_RESOURCES_ISTIO_USED = 5 + # All resource kinds will be returned by api_resources() but only OpenShift Route objects will be found + ALL_OPENSHIFT_USED = 4 - # Both VirtualService and Ingress will be included in the resources returned by api_resources() and both # - # will be defined and available # - BOTH = 6 + # Only the Contour HTTPProxy kind will be included in the resources returned by api_resources() + ONLY_CONTOUR = 5 + + # Only the Istio VirtualService kind will be included in the resources returned by api_resources() + ONLY_ISTIO = 6 + + # Only the Ingress kind will be included in the resources returned by api_resources() + ONLY_NGINX = 7 + + # Only the OpenShift Route kind will be included in the resources returned by api_resources() + ONLY_OPENSHIFT = 8 ################################################################ # ### CLASS: KubectlTest.Values @@ -94,28 +102,71 @@ class Values(object): } COMPONENT_PROMETHEUS_RESOURCE_COUNT: int = len(COMPONENT_PROMETHEUS_RESOURCES_DICT) - # Component: sas-annotations + # Component: sas-annotations - all COMPONENT_SAS_ANNOTATIONS_DEPLOYMENT_NAME: Text = "sas-annotations" + COMPONENT_SAS_ANNOTATIONS_CONTOUR_HTTPPROXY_NAME: Text = "sas-annotations" COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME_DEPRECATED_DEFINITION: Text = "sas-annotations-deprecated-definition" COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME: Text = "sas-annotations" + COMPONENT_SAS_ANNOTATIONS_OPENSHIFT_ROUTE_NAME: Text = "sas-annotations" COMPONENT_SAS_ANNOTATIONS_POD_NAME: Text = "sas-annotations-58db55fd65-l2jrw" COMPONENT_SAS_ANNOTATIONS_REPLICA_SET_NAME: Text = "sas-annotations-58db55fd65" COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME: Text = "sas-annotations" COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME: Text = "sas-annotations" COMPONENT_SAS_ANNOTATIONS_NAME: Text = "sas-annotations" - COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT: Dict[Text, List[Text]] = { + + # Component: sas-annotations - No Ingress + COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NO_INGRESS: Dict[Text, List[Text]] = { + KubernetesResource.Kinds.DEPLOYMENT: [COMPONENT_SAS_ANNOTATIONS_DEPLOYMENT_NAME], + KubernetesResource.Kinds.POD: [COMPONENT_SAS_ANNOTATIONS_POD_NAME], + KubernetesResource.Kinds.REPLICA_SET: [COMPONENT_SAS_ANNOTATIONS_REPLICA_SET_NAME], + KubernetesResource.Kinds.SERVICE: [COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME] + } + COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_NO_INGRESS: int = \ + len(COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NO_INGRESS) + + # Component: sas-annotations - Contour + COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_CONTOUR: Dict[Text, List[Text]] = { KubernetesResource.Kinds.DEPLOYMENT: [COMPONENT_SAS_ANNOTATIONS_DEPLOYMENT_NAME], + KubernetesResource.Kinds.POD: [COMPONENT_SAS_ANNOTATIONS_POD_NAME], + KubernetesResource.Kinds.REPLICA_SET: [COMPONENT_SAS_ANNOTATIONS_REPLICA_SET_NAME], + KubernetesResource.Kinds.SERVICE: [COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME], + KubernetesResource.Kinds.CONTOUR_HTTPPROXY: [COMPONENT_SAS_ANNOTATIONS_CONTOUR_HTTPPROXY_NAME], + } + COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_CONTOUR: int = len(COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_CONTOUR) + + # Component: sas-annotations - Istio + COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_ISTIO: Dict[Text, List[Text]] = { + KubernetesResource.Kinds.DEPLOYMENT: [COMPONENT_SAS_ANNOTATIONS_DEPLOYMENT_NAME], + KubernetesResource.Kinds.POD: [COMPONENT_SAS_ANNOTATIONS_POD_NAME], + KubernetesResource.Kinds.REPLICA_SET: [COMPONENT_SAS_ANNOTATIONS_REPLICA_SET_NAME], + KubernetesResource.Kinds.SERVICE: [COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME], + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE: [COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME], + } + COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_ISTIO: int = len(COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_ISTIO) + + # Component: sas-annotations - NGINX + COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NGINX: Dict[Text, List[Text]] = { + KubernetesResource.Kinds.DEPLOYMENT: [COMPONENT_SAS_ANNOTATIONS_DEPLOYMENT_NAME], + KubernetesResource.Kinds.POD: [COMPONENT_SAS_ANNOTATIONS_POD_NAME], + KubernetesResource.Kinds.REPLICA_SET: [COMPONENT_SAS_ANNOTATIONS_REPLICA_SET_NAME], + KubernetesResource.Kinds.SERVICE: [COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME], KubernetesResource.Kinds.INGRESS: [ COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME, COMPONENT_SAS_ANNOTATIONS_INGRESS_NAME_DEPRECATED_DEFINITION ], + } + COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_NGINX: int = len(COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_NGINX) + + # Component: sas-annotations - OpenShift + COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_OPENSHIFT: Dict[Text, List[Text]] = { + KubernetesResource.Kinds.DEPLOYMENT: [COMPONENT_SAS_ANNOTATIONS_DEPLOYMENT_NAME], KubernetesResource.Kinds.POD: [COMPONENT_SAS_ANNOTATIONS_POD_NAME], KubernetesResource.Kinds.REPLICA_SET: [COMPONENT_SAS_ANNOTATIONS_REPLICA_SET_NAME], KubernetesResource.Kinds.SERVICE: [COMPONENT_SAS_ANNOTATIONS_SERVICE_NAME], - KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE: [COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME] + KubernetesResource.Kinds.OPENSHIFT_ROUTE: [COMPONENT_SAS_ANNOTATIONS_OPENSHIFT_ROUTE_NAME], } - COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT: int = len(COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT) + COMPONENT_SAS_ANNOTATIONS_RESOURCE_COUNT_OPENSHIFT: int = len(COMPONENT_SAS_ANNOTATIONS_RESOURCE_DICT_OPENSHIFT) # Component: sas-cacheserver COMPONENT_SAS_CACHE_SERVER_POD_NAME: Text = "sas-cacheserver-0" @@ -185,8 +236,65 @@ class Values(object): ] COMPONENT_COUNT: int = len(COMPONENT_NAMES_LIST) - # Resource: all kinds - RESOURCE_KINDS_LIST: List[Text] = [ + # Resource: All + RESOURCE_LIST_ALL: List[Text] = [ + KubernetesResource.Kinds.CAS_DEPLOYMENT, + KubernetesResource.Kinds.CONFIGMAP, + KubernetesResource.Kinds.CONTOUR_HTTPPROXY, + KubernetesResource.Kinds.CRON_JOB, + KubernetesResource.Kinds.DEPLOYMENT, + KubernetesResource.Kinds.INGRESS, + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE, + KubernetesResource.Kinds.JOB, + KubernetesResource.Kinds.NODE, + KubernetesResource.Kinds.NODE_METRICS, + KubernetesResource.Kinds.OPENSHIFT_ROUTE, + KubernetesResource.Kinds.POD, + KubernetesResource.Kinds.POD_METRICS, + KubernetesResource.Kinds.REPLICA_SET, + KubernetesResource.Kinds.SERVICE, + KubernetesResource.Kinds.STATEFUL_SET + ] + RESOURCE_LIST_ALL_COUNT: int = len(RESOURCE_LIST_ALL) + + # Resource: Contour Ingress + RESOURCE_LIST_CONTOUR: List[Text] = [ + KubernetesResource.Kinds.CAS_DEPLOYMENT, + KubernetesResource.Kinds.CONFIGMAP, + KubernetesResource.Kinds.CONTOUR_HTTPPROXY, + KubernetesResource.Kinds.CRON_JOB, + KubernetesResource.Kinds.DEPLOYMENT, + KubernetesResource.Kinds.JOB, + KubernetesResource.Kinds.NODE, + KubernetesResource.Kinds.NODE_METRICS, + KubernetesResource.Kinds.POD, + KubernetesResource.Kinds.POD_METRICS, + KubernetesResource.Kinds.REPLICA_SET, + KubernetesResource.Kinds.SERVICE, + KubernetesResource.Kinds.STATEFUL_SET + ] + RESOURCE_LIST_CONTOUR_COUNT: int = len(RESOURCE_LIST_CONTOUR) + + # Resource: Istio Ingress + RESOURCE_LIST_ISTIO: List[Text] = [ + KubernetesResource.Kinds.CAS_DEPLOYMENT, + KubernetesResource.Kinds.CONFIGMAP, + KubernetesResource.Kinds.CRON_JOB, + KubernetesResource.Kinds.DEPLOYMENT, + KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE, + KubernetesResource.Kinds.JOB, + KubernetesResource.Kinds.NODE, + KubernetesResource.Kinds.NODE_METRICS, + KubernetesResource.Kinds.POD, + KubernetesResource.Kinds.POD_METRICS, + KubernetesResource.Kinds.REPLICA_SET, + KubernetesResource.Kinds.SERVICE, + KubernetesResource.Kinds.STATEFUL_SET + ] + RESOURCE_LIST_ISTIO_COUNT: int = len(RESOURCE_LIST_ISTIO) + + # Resource: NGINX Ingress + RESOURCE_LIST_NGINX: List[Text] = [ KubernetesResource.Kinds.CAS_DEPLOYMENT, KubernetesResource.Kinds.CONFIGMAP, KubernetesResource.Kinds.CRON_JOB, @@ -194,13 +302,32 @@ class Values(object): KubernetesResource.Kinds.INGRESS, KubernetesResource.Kinds.JOB, KubernetesResource.Kinds.NODE, + KubernetesResource.Kinds.NODE_METRICS, KubernetesResource.Kinds.POD, + KubernetesResource.Kinds.POD_METRICS, KubernetesResource.Kinds.REPLICA_SET, KubernetesResource.Kinds.SERVICE, - KubernetesResource.Kinds.STATEFUL_SET, - KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE + KubernetesResource.Kinds.STATEFUL_SET ] - RESOURCE_KINDS_COUNT: int = len(RESOURCE_KINDS_LIST) + RESOURCE_LIST_NGINX_COUNT: int = len(RESOURCE_LIST_NGINX) + + # Resource: OpenShift Ingress + RESOURCE_LIST_OPENSHIFT: List[Text] = [ + KubernetesResource.Kinds.CAS_DEPLOYMENT, + KubernetesResource.Kinds.CONFIGMAP, + KubernetesResource.Kinds.CRON_JOB, + KubernetesResource.Kinds.DEPLOYMENT, + KubernetesResource.Kinds.JOB, + KubernetesResource.Kinds.NODE, + KubernetesResource.Kinds.NODE_METRICS, + KubernetesResource.Kinds.OPENSHIFT_ROUTE, + KubernetesResource.Kinds.POD, + KubernetesResource.Kinds.POD_METRICS, + KubernetesResource.Kinds.REPLICA_SET, + KubernetesResource.Kinds.SERVICE, + KubernetesResource.Kinds.STATEFUL_SET + ] + RESOURCE_LIST_OPENSHIFT_COUNT: int = len(RESOURCE_LIST_OPENSHIFT) # Resource: CASDeployment RESOURCE_CAS_DEPLOYMENT_LIST: List[Text] = [ @@ -208,6 +335,12 @@ class Values(object): ] RESOURCE_CAS_DEPLOYMENT_COUNT: int = len(RESOURCE_CAS_DEPLOYMENT_LIST) + # Resource: Contour HTTPProxy + RESOURCE_HTTPPROXY_LIST: List[Text] = [ + COMPONENT_SAS_ANNOTATIONS_CONTOUR_HTTPPROXY_NAME + ] + RESOURCE_HTTPPROXY_COUNT: int = len(RESOURCE_HTTPPROXY_LIST) + # Resource: CronJob RESOURCE_CRON_JOB_LIST: List[Text] = [ COMPONENT_SAS_SCHEDULED_BACKUP_JOB_CRON_JOB_NAME @@ -228,6 +361,12 @@ class Values(object): ] RESOURCE_INGRESS_COUNT: int = len(RESOURCE_INGRESS_LIST) + # Resource: Istio VirtualService + RESOURCE_VIRTUAL_SERVICE_LIST: List[Text] = [ + COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME + ] + RESOURCE_VIRTUAL_SERVICE_COUNT: int = len(RESOURCE_VIRTUAL_SERVICE_LIST) + # Resource: Job RESOURCE_JOB_LIST: List[Text] = [ COMPONENT_SAS_SCHEDULED_BACKUP_JOB_JOB_NAME @@ -241,6 +380,12 @@ class Values(object): ] RESOURCE_NODE_COUNT: int = len(RESOURCE_NODE_LIST) + # Resource: OpenShift Route + RESOURCE_ROUTE_LIST: List[Text] = [ + COMPONENT_SAS_ANNOTATIONS_OPENSHIFT_ROUTE_NAME + ] + RESOURCE_ROUTE_COUNT: int = len(RESOURCE_ROUTE_LIST) + # Resource: Pod RESOURCE_POD_LIST: List[Text] = [ COMPONENT_PROMETHEUS_POD_NAME, @@ -277,18 +422,12 @@ class Values(object): ] RESOURCE_STATEFUL_SET_COUNT: int = len(RESOURCE_STATEFUL_SET_LIST) - # Resource: VirtualService - RESOURCE_VIRTUAL_SERVICE_LIST: List[Text] = [ - COMPONENT_SAS_ANNOTATIONS_VIRTUAL_SERVICE_NAME - ] - RESOURCE_VIRTUAL_SERVICE_COUNT: int = len(RESOURCE_VIRTUAL_SERVICE_LIST) - ################################################################ # ### KubectlTest functions ################################################################ def __init__(self, - ingress_simulator: IngressSimulator = IngressSimulator.NGINX_ONLY, + ingress_simulator: IngressSimulator = IngressSimulator.ONLY_NGINX, include_metrics: bool = True, include_non_namespaced_resources: bool = True, namespace: Text = Values.NAMESPACE, @@ -317,15 +456,32 @@ def do(self, command: Text, ignore_errors: bool = False, success_rcs: Optional[L return "Not functional in testing implementation" def api_resources(self, ignore_errors: bool = False) -> KubernetesApiResources: + api_resources_data: Dict = dict() + # check for ingress simulation to determine which API resources should be returned + # None if self.ingress_simulator == self.IngressSimulator.NONE: - api_resources_data: Dict = KubectlTest._load_response_data(_API_RESOURCES_NO_INGRESS_DATA_) - elif self.ingress_simulator == self.IngressSimulator.NGINX_ONLY: - api_resources_data: Dict = KubectlTest._load_response_data(_API_RESOURCES_NGINX_ONLY_DATA_) - elif self.ingress_simulator == self.IngressSimulator.ISTIO_ONLY: - api_resources_data: Dict = KubectlTest._load_response_data(_API_RESOURCES_ISTIO_ONLY_DATA_) - else: - api_resources_data: Dict = KubectlTest._load_response_data(_API_RESOURCES_BOTH_INGRESS_DATA_) + api_resources_data = KubectlTest._load_response_data(_API_RESOURCES_NO_INGRESS_DATA_) + + # All included + elif 0 < self.ingress_simulator.value <= 4: + api_resources_data = KubectlTest._load_response_data(_API_RESOURCES_All_INGRESSES_DATA_) + + # Contour + elif self.ingress_simulator == self.IngressSimulator.ONLY_CONTOUR: + api_resources_data = KubectlTest._load_response_data(_API_RESOURCES_CONTOUR_ONLY_DATA_) + + # Istio + elif self.ingress_simulator == self.IngressSimulator.ONLY_ISTIO: + api_resources_data = KubectlTest._load_response_data(_API_RESOURCES_ISTIO_ONLY_DATA_) + + # NGINX + elif self.ingress_simulator == self.IngressSimulator.ONLY_NGINX: + api_resources_data = KubectlTest._load_response_data(_API_RESOURCES_NGINX_ONLY_DATA_) + + # OpenShift + elif self.ingress_simulator == self.IngressSimulator.ONLY_OPENSHIFT: + api_resources_data = KubectlTest._load_response_data(_API_RESOURCES_OPENSHIFT_ONLY_DATA_) return KubernetesApiResources(api_resources_data) @@ -358,13 +514,29 @@ def get_resources(self, k8s_api_resource: Text, raw: bool = False) -> Union[Dict if self.simulate_empty_deployment: return list() - # handle any ingress simulation - # in the scenarios below, the resources would be defined but there wouldn't be any existing objects - # so empty lists would be returned - if (self.ingress_simulator == self.IngressSimulator.BOTH_RESOURCES_NGINX_USED and k8s_api_resource.lower() == - "virtualservices") or \ - (self.ingress_simulator == self.IngressSimulator.BOTH_RESOURCES_ISTIO_USED and k8s_api_resource.lower() == - "ingresses"): + # handle any ingress simulation - this logic covers scenarios where no resources would be returned + # Contour + if k8s_api_resource.lower() == "httpproxy" and \ + self.ingress_simulator != self.IngressSimulator.ALL_CONTOUR_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_CONTOUR: + return list() + + # Istio + elif k8s_api_resource.lower() == "virtualservices" and \ + self.ingress_simulator != self.IngressSimulator.ALL_ISTIO_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_ISTIO: + return list() + + # NGINX + elif k8s_api_resource.lower() == "ingresses" and \ + self.ingress_simulator != self.IngressSimulator.ALL_NGINX_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_NGINX: + return list() + + # OpenShift + elif k8s_api_resource.lower() == "routes" and \ + self.ingress_simulator != self.IngressSimulator.ALL_OPENSHIFT_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_OPENSHIFT: return list() # handle non-namespaced resources @@ -398,14 +570,34 @@ def get_resource(self, k8s_api_resource: Text, resource_name: Text, raw: bool = raise CalledProcessError(1, f"kubectl get -n {self.namespace} {k8s_api_resource.lower()} {resource_name} " "-o json") - # handle any ingress simulation - # in the scenarios below, the kinds would be defined but no objects of that kind would be created, so trying - # to get one by name would raise a CalledProcessError - if (self.ingress_simulator == self.IngressSimulator.BOTH_RESOURCES_NGINX_USED and k8s_api_resource.lower() == - "virtualservices") or \ - (self.ingress_simulator == self.IngressSimulator.BOTH_RESOURCES_ISTIO_USED and k8s_api_resource.lower() == - "ingresses"): - raise CalledProcessError(1, f"kubectl get {k8s_api_resource.lower()} {resource_name} -o json") + # handle any ingress simulation - this logic covers scenarios where no resources would be returned + # Contour + if k8s_api_resource.lower() == "httpproxy" and \ + self.ingress_simulator != self.IngressSimulator.ALL_CONTOUR_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_CONTOUR: + raise CalledProcessError(1, f"kubectl get -n {self.namespace} {k8s_api_resource.lower()} {resource_name} " + "-o json") + + # Istio + elif k8s_api_resource.lower() == "virtualservices" and \ + self.ingress_simulator != self.IngressSimulator.ALL_ISTIO_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_ISTIO: + raise CalledProcessError(1, f"kubectl get -n {self.namespace} {k8s_api_resource.lower()} {resource_name} " + "-o json") + + # NGINX + elif k8s_api_resource.lower() == "ingresses" and \ + self.ingress_simulator != self.IngressSimulator.ALL_NGINX_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_NGINX: + raise CalledProcessError(1, f"kubectl get -n {self.namespace} {k8s_api_resource.lower()} {resource_name} " + "-o json") + + # OpenShift + elif k8s_api_resource.lower() == "routes" and \ + self.ingress_simulator != self.IngressSimulator.ALL_OPENSHIFT_USED and \ + self.ingress_simulator != self.IngressSimulator.ONLY_OPENSHIFT: + raise CalledProcessError(1, f"kubectl get -n {self.namespace} {k8s_api_resource.lower()} {resource_name} " + "-o json") # handle non-namespaced resources if not self.include_non_namespaced_resources and (k8s_api_resource.lower() == "nodes" or From 87a9771f87e543dec6b61c162a59d1dbff0b9340 Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 21 Jul 2021 15:44:54 -0400 Subject: [PATCH 13/22] (Issue #120) Deployment Report: Update supported Ingress definitions - fixing linting issues --- deployment_report/model/utils/ingress_util.py | 2 +- deployment_report/model/utils/test/test_metrics_util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deployment_report/model/utils/ingress_util.py b/deployment_report/model/utils/ingress_util.py index 8fcf2de..1725995 100644 --- a/deployment_report/model/utils/ingress_util.py +++ b/deployment_report/model/utils/ingress_util.py @@ -9,7 +9,7 @@ # SPDX-License-Identifier: Apache-2.0 ### # ### #################################################################### -from typing import Dict, List, Optional, Text +from typing import Dict, Optional, Text from deployment_report.model.static.viya_deployment_report_keys import \ ITEMS_KEY, \ diff --git a/deployment_report/model/utils/test/test_metrics_util.py b/deployment_report/model/utils/test/test_metrics_util.py index c9ac354..becb5a9 100644 --- a/deployment_report/model/utils/test/test_metrics_util.py +++ b/deployment_report/model/utils/test/test_metrics_util.py @@ -107,4 +107,4 @@ def test_get_node_metrics_unavailable(gathered_resources_only_nginx: Dict) -> No # make sure the metrics dictionary was not added for node in nodes[ITEMS_KEY].values(): - assert ReportKeys.ResourceDetails.Ext.METRICS_DICT not in node[ReportKeys.ResourceDetails.EXT_DICT] \ No newline at end of file + assert ReportKeys.ResourceDetails.Ext.METRICS_DICT not in node[ReportKeys.ResourceDetails.EXT_DICT] From 36f84fa0ccf9aa9f1e6cc8ffced31fa626737201 Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 21 Jul 2021 15:49:13 -0400 Subject: [PATCH 14/22] (Issue #120) Deployment Report: Update supported Ingress definitions - updating conftest.py --- deployment_report/model/utils/test/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deployment_report/model/utils/test/conftest.py b/deployment_report/model/utils/test/conftest.py index fad2f64..5c5591c 100644 --- a/deployment_report/model/utils/test/conftest.py +++ b/deployment_report/model/utils/test/conftest.py @@ -13,7 +13,7 @@ from typing import Dict -from deployment_report.model.utils import resource_util +from ..resource_util import gather_details from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest @@ -177,7 +177,7 @@ def _gathered_resources(ingress_simulator: KubectlTest.IngressSimulator) -> Dict gathered_resources: Dict = dict() for resource_kind in api_resources.as_dict().keys(): - resource_util.gather_details( + gather_details( kubectl=kubectl, gathered_resources=gathered_resources, api_resources=api_resources, From fc4596ebc33b56c47c6caf5604953434b0e7d6f0 Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 21 Jul 2021 15:52:50 -0400 Subject: [PATCH 15/22] (Issue #120) Deployment Report: Update supported Ingress definitions - adding init file to test folder --- deployment_report/model/utils/test/__init__.py | 0 deployment_report/model/utils/test/conftest.py | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 deployment_report/model/utils/test/__init__.py diff --git a/deployment_report/model/utils/test/__init__.py b/deployment_report/model/utils/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deployment_report/model/utils/test/conftest.py b/deployment_report/model/utils/test/conftest.py index 5c5591c..5646d9d 100644 --- a/deployment_report/model/utils/test/conftest.py +++ b/deployment_report/model/utils/test/conftest.py @@ -13,7 +13,7 @@ from typing import Dict -from ..resource_util import gather_details +from deployment_report.model.utils import relationship_util from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest @@ -177,7 +177,7 @@ def _gathered_resources(ingress_simulator: KubectlTest.IngressSimulator) -> Dict gathered_resources: Dict = dict() for resource_kind in api_resources.as_dict().keys(): - gather_details( + relationship_util.gather_details( kubectl=kubectl, gathered_resources=gathered_resources, api_resources=api_resources, From 3efa17861e89af42de98e2937deae11ae578b50d Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Wed, 21 Jul 2021 15:54:34 -0400 Subject: [PATCH 16/22] (Issue #120) Deployment Report: Update supported Ingress definitions - fixing typo --- deployment_report/model/utils/test/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deployment_report/model/utils/test/conftest.py b/deployment_report/model/utils/test/conftest.py index 5646d9d..fad2f64 100644 --- a/deployment_report/model/utils/test/conftest.py +++ b/deployment_report/model/utils/test/conftest.py @@ -13,7 +13,7 @@ from typing import Dict -from deployment_report.model.utils import relationship_util +from deployment_report.model.utils import resource_util from viya_ark_library.k8s.sas_k8s_objects import KubernetesApiResources from viya_ark_library.k8s.test_impl.sas_kubectl_test import KubectlTest @@ -177,7 +177,7 @@ def _gathered_resources(ingress_simulator: KubectlTest.IngressSimulator) -> Dict gathered_resources: Dict = dict() for resource_kind in api_resources.as_dict().keys(): - relationship_util.gather_details( + resource_util.gather_details( kubectl=kubectl, gathered_resources=gathered_resources, api_resources=api_resources, From f3f05476125ec9c8d9ae6d2328c0d8bc8a8985ab Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Tue, 27 Jul 2021 13:30:26 -0400 Subject: [PATCH 17/22] (#issue_124) Correct syntax error in example commands for Known Issues --- pre_install_report/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pre_install_report/README.md b/pre_install_report/README.md index 64a0328..d9b028f 100644 --- a/pre_install_report/README.md +++ b/pre_install_report/README.md @@ -84,19 +84,19 @@ You can modify the /viya4-ark/pre_install_report/viya_check_l The following issue may impact the performance and expected results of this tool. - All Nodes in a cluster must be in the READY state before running the tool. - - If all the Nodes are not in the READY state, the tool takes longer to run. Wait for it to complete. - - Also, the tool may not be able to clean up the pods and replicaset created in the specified namespace as shown in the example output below. If that happens, the pods and replicaset must be manually deleted. - They will look similar to the resources shown below: +- If all the Nodes are not in the READY state, the tool takes longer to run. Wait for it to complete. + Also, the tool may not be able to clean up the pods and replicaset created in the specified namespace as shown in the example output below. If that happens, the pods and replicaset must be manually deleted. + They will look similar to the resources shown below: ``` - NAME READY STATUS RESTARTS AGE - pod/hello-world-6665cf748b-5x2jq 0/1 Pending 0 115m - pod/hello-world-6665cf748b-tkq79 0/1 Pending 0 115m + NAME READY STATUS RESTARTS AGE + pod/hello-world-6665cf748b-5x2jq 0/1 Pending 0 115m + pod/hello-world-6665cf748b-tkq79 0/1 Pending 0 115m - NAME DESIRED CURRENT READY AGE - replicaset.apps/hello-world-6665cf748b 2 2 0 115m + NAME DESIRED CURRENT READY AGE + replicaset.apps/hello-world-6665cf748b 2 2 0 115m Suggested commands to delete resources before running the tool again: kubectl -n delete replicaset.apps/hello-world-6665cf748b - kubectl -n delete pos/hello-world-6665cf748b-5x2jq + kubectl -n delete pod/hello-world-6665cf748b-5x2jq kubectl -n delete pod/hello-world-6665cf748b-tkq79 ``` From 4fed0e2777a74d4fb8f8524eaef25b235621e23a Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Tue, 27 Jul 2021 15:17:06 -0400 Subject: [PATCH 18/22] (#issue_126) Error in the instructions to collect the INGRESS HOST --- pre_install_report/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pre_install_report/README.md b/pre_install_report/README.md index d9b028f..9c0eeac 100644 --- a/pre_install_report/README.md +++ b/pre_install_report/README.md @@ -56,7 +56,7 @@ ingress-nginx-controller LoadBalancer 10.0.00.000 55.147.22.101 Use the following commands to determine the parameter values: ``` -$ export INGRESS_HOST=externalIP=$(kubectl -n get service -o jsonpath='{.status.loadBalancer.ingress[*].ip}') +$ export INGRESS_HOST=$(kubectl -n get service -o jsonpath='{.status.loadBalancer.ingress[*].ip}') $ export INGRESS_HTTP_PORT=$(kubectl -n get service -o jsonpath='{.spec.ports[?(@.name=="http")].port}') $ export INGRESS_HTTPS_PORT=$(kubectl -n get service -o jsonpath='{.spec.ports[?(@.name=="https")].port}') ``` From 2e34140cc85c8cb76454085daf39a546499513b1 Mon Sep 17 00:00:00 2001 From: Latha Sivakumar Date: Tue, 27 Jul 2021 16:57:57 -0400 Subject: [PATCH 19/22] (#issue 128) Usage example fails as documented; needs to be python3 --- pre_install_report/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pre_install_report/README.md b/pre_install_report/README.md index 9c0eeac..484a382 100644 --- a/pre_install_report/README.md +++ b/pre_install_report/README.md @@ -29,7 +29,7 @@ After obtaining the latest version of this tool, cd to `/viya The following command provides usage details: ``` -python viya-ark.py pre-install-report -h +python3 viya-ark.py pre-install-report -h ``` **Note:** The tool currently expects an NGINX Ingress controller. Other Ingress controllers are not evaluated. From eeaba567bb2d7bd951d92352823037f484b5c5c0 Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Tue, 3 Aug 2021 10:40:26 -0400 Subject: [PATCH 20/22] (Issue #130) Deployment Report: Recursion error when generating report --- deployment_report/model/utils/component_util.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deployment_report/model/utils/component_util.py b/deployment_report/model/utils/component_util.py index aca623a..a90361d 100644 --- a/deployment_report/model/utils/component_util.py +++ b/deployment_report/model/utils/component_util.py @@ -70,7 +70,14 @@ def aggregate_resources(resource_details: Dict, gathered_resources: Dict, compon # aggregate the related resource if related_resource_details is not None: - aggregate_resources(related_resource_details, gathered_resources, component) + try: + aggregate_resources(related_resource_details, gathered_resources, component) + except RecursionError: + # TODO: refactor this error handling; recursion errors are being raised by ambiguous results + # from "kubectl get pgclusters" at 2021.1.4 where the "crunchydata.com/v1" version of Pgcluster + # shadows the "webinfdsvr.sas.com/v1" version of Pgcluster + # continue to next related item if a recursion error is hit + continue # if this is the last resource in the chain and the component doesn't have a name determined from an annotation, # set a name based on the available values From 531388933cbf9583e272e3983784111fabf40cbc Mon Sep 17 00:00:00 2001 From: Josh Woods Date: Tue, 3 Aug 2021 10:42:32 -0400 Subject: [PATCH 21/22] (Issue #130) Deployment Report: Recursion error when generating report --- deployment_report/model/utils/component_util.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/deployment_report/model/utils/component_util.py b/deployment_report/model/utils/component_util.py index a90361d..0685fad 100644 --- a/deployment_report/model/utils/component_util.py +++ b/deployment_report/model/utils/component_util.py @@ -73,10 +73,8 @@ def aggregate_resources(resource_details: Dict, gathered_resources: Dict, compon try: aggregate_resources(related_resource_details, gathered_resources, component) except RecursionError: - # TODO: refactor this error handling; recursion errors are being raised by ambiguous results - # from "kubectl get pgclusters" at 2021.1.4 where the "crunchydata.com/v1" version of Pgcluster - # shadows the "webinfdsvr.sas.com/v1" version of Pgcluster - # continue to next related item if a recursion error is hit + # TODO: refactor this error handling; update kubectl get calls to use kind name and group + # for now, move on if a recursion error is hit continue # if this is the last resource in the chain and the component doesn't have a name determined from an annotation, From 120de1582d5171193d9044af67659b3f996b6c0d Mon Sep 17 00:00:00 2001 From: Amy Ho <39809594+cuddlehub@users.noreply.github.com> Date: Thu, 5 Aug 2021 15:23:14 -0400 Subject: [PATCH 22/22] (VIYAARKCD-221) Remove the storage section for now (#132) Temparily remove the storage section which will be available in the next release. --- .../templates/viya_deployment_report.html.j2 | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/deployment_report/templates/viya_deployment_report.html.j2 b/deployment_report/templates/viya_deployment_report.html.j2 index d6c74e4..63f5c82 100644 --- a/deployment_report/templates/viya_deployment_report.html.j2 +++ b/deployment_report/templates/viya_deployment_report.html.j2 @@ -129,24 +129,6 @@ {# Cluster Overview: Kubernetes Versions Accordion #} - {# Cluster Overview: Storage Accordion #} - {% set kind_name = "Storage" %} -
-

Storage

-
- Database - - {% for key, value in report_data.kubernetes.dbInfo.items() %} - - - - - {% endfor %} -
{{key}}{{value}}
-
-
- {# Cluster Overview: Storage Accordion #} - {# Cluster Overview: Nodes Accordion #} {% if report_data.kubernetes.nodes.count > 0 %}