From 390a5e102cecf7c6e392d0f2cb293540fbe8188c Mon Sep 17 00:00:00 2001 From: Jamie Wiebe Date: Wed, 24 Nov 2021 12:03:50 +0100 Subject: [PATCH 1/4] switch to official kind --- .semaphore/semaphore.yml | 1 + tests/conftest.py | 6 +- tests/fiaas_deploy_daemon/conftest.py | 8 +- .../fiaas_deploy_daemon/test_bootstrap_e2e.py | 59 +++---- tests/fiaas_deploy_daemon/test_e2e.py | 158 ++++++------------ tests/fiaas_deploy_daemon/utils.py | 121 ++++++-------- 6 files changed, 135 insertions(+), 218 deletions(-) diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml index a0f7c8e3..1904d512 100644 --- a/.semaphore/semaphore.yml +++ b/.semaphore/semaphore.yml @@ -34,6 +34,7 @@ blocks: - docker run --rm --privileged multiarch/qemu-user-static:4.2.0-7 --reset -p yes - export PATH="$PATH:$HOME/.local/bin" - pip install --user .[ci] + - curl -Lo $HOME/.local/bin/kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 && chmod +x $HOME/.local/bin/kind - tox -e codestyle,test # Build docker image - docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_PASSWORD}" diff --git a/tests/conftest.py b/tests/conftest.py index b402e4ee..6ddc3ca6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -164,16 +164,12 @@ def dockerize(test_request, cert_path, service_type, k8s_version, port, apiserve "-i", "--rm", "-e", "NAMESPACE", "--name", container_name, + "--network=kind", "--publish", "{port}:{port}".format(port=port), "--mount", "type=bind,src={},dst={},ro".format(cert_path, cert_path), # make `kubernetes` resolve to the apiserver's IP to make it possible to validate its TLS cert "--add-host", "kubernetes:{}".format(apiserver_ip), ] - if not _is_macos(): - # Linux needs host networking to make the fiaas-deploy-daemon port available on localhost when running it - # in a container. To do the same thing on Docker for mac it is enough to use --publish, and enabling host - # networking will make it impossible to connect to the port. - args += ["--network", "host"] return args + ["fiaas/fiaas-deploy-daemon:latest"] if request.config.getoption(DOCKER_FOR_E2E_OPTION): diff --git a/tests/fiaas_deploy_daemon/conftest.py b/tests/fiaas_deploy_daemon/conftest.py index b46068bb..c42197ce 100644 --- a/tests/fiaas_deploy_daemon/conftest.py +++ b/tests/fiaas_deploy_daemon/conftest.py @@ -191,11 +191,11 @@ def _mock_namespace_file_open(name, *args, **kwargs): @pytest.fixture(scope="session", params=( "v1.15.12", - "v1.16.13", - "v1.18.6", - "v1.19.4", + "v1.16.15", + "v1.18.19", + "v1.19.11", "v1.20.7", - pytest.param("v1.21.1", marks=pytest.mark.e2e_latest) + pytest.param("v1.21.2", marks=pytest.mark.e2e_latest) )) def k8s_version(request): yield request.param diff --git a/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py b/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py index 60a62775..b55a3570 100644 --- a/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py +++ b/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py @@ -14,13 +14,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import os import os.path import subprocess import sys import uuid -from datetime import datetime import pytest from k8s import config @@ -87,7 +85,7 @@ class TestBootstrapE2E(object): @pytest.fixture(scope="module") def kubernetes(self, k8s_version): try: - name = "_".join(("bootstrap", k8s_version, str(uuid.uuid4()))) + name = str(uuid.uuid4()) kind = KindWrapper(k8s_version, name) try: yield kind.start() @@ -97,18 +95,6 @@ def kubernetes(self, k8s_version): msg = "Unable to run kind: %s" pytest.fail(msg % str(e)) - @pytest.fixture - def kind_logger(self, kubernetes): - @contextlib.contextmanager - def wrapped(): - start_time = datetime.now() - try: - yield - finally: - kubernetes["log_dumper"](since=start_time, until=datetime.now()) - - return wrapped - @pytest.fixture(autouse=True) def k8s_client(self, kubernetes): Client.clear_session() @@ -148,38 +134,37 @@ def custom_resource_definition_test_case(self, fiaas_path, namespace, labels, ex spec = FiaasApplicationSpec(application=name, image=IMAGE, config=fiaas_yml) return name, FiaasApplication(metadata=metadata, spec=spec), expected - def test_bootstrap_crd(self, request, kubernetes, k8s_version, use_docker_for_e2e, kind_logger): - with kind_logger(): - skip_if_crd_not_supported(k8s_version) + def test_bootstrap_crd(self, request, kubernetes, k8s_version, use_docker_for_e2e): + skip_if_crd_not_supported(k8s_version) - CrdWatcher.create_custom_resource_definitions() - wait_until(crd_available(kubernetes, timeout=TIMEOUT), "CRD available", RuntimeError, patience=PATIENCE) + CrdWatcher.create_custom_resource_definitions() + wait_until(crd_available(kubernetes, timeout=TIMEOUT), "CRD available", RuntimeError, patience=PATIENCE) - def prepare_test_case(test_case): - name, fiaas_application, expected = self.custom_resource_definition_test_case(*test_case) + def prepare_test_case(test_case): + name, fiaas_application, expected = self.custom_resource_definition_test_case(*test_case) - ensure_resources_not_exists(name, expected, fiaas_application.metadata.namespace) + ensure_resources_not_exists(name, expected, fiaas_application.metadata.namespace) - fiaas_application.save() + fiaas_application.save() - return name, fiaas_application.metadata.namespace, fiaas_application.metadata.uid, expected + return name, fiaas_application.metadata.namespace, fiaas_application.metadata.uid, expected - expectations = [prepare_test_case(test_case) for test_case in TEST_CASES] + expectations = [prepare_test_case(test_case) for test_case in TEST_CASES] - exit_code = self.run_bootstrap(request, kubernetes, k8s_version, use_docker_for_e2e) - assert exit_code == 0 + exit_code = self.run_bootstrap(request, kubernetes, k8s_version, use_docker_for_e2e) + assert exit_code == 0 - def success(): - all(deploy_successful(name, namespace, app_uid, expected) for name, namespace, app_uid, expected in expectations) + def success(): + all(deploy_successful(name, namespace, app_uid, expected) for name, namespace, app_uid, expected in expectations) - wait_until(success, "CRD bootstrapping was successful", patience=PATIENCE) + wait_until(success, "CRD bootstrapping was successful", patience=PATIENCE) - for name, namespace, app_uid, expected in expectations: - for kind in expected.keys(): - try: - kind.delete(name, namespace=namespace) - except NotFound: - pass # already missing + for name, namespace, app_uid, expected in expectations: + for kind in expected.keys(): + try: + kind.delete(name, namespace=namespace) + except NotFound: + pass # already missing def ensure_resources_not_exists(name, expected, namespace): diff --git a/tests/fiaas_deploy_daemon/test_e2e.py b/tests/fiaas_deploy_daemon/test_e2e.py index 1fe84014..46ee0159 100644 --- a/tests/fiaas_deploy_daemon/test_e2e.py +++ b/tests/fiaas_deploy_daemon/test_e2e.py @@ -17,13 +17,11 @@ from __future__ import absolute_import, print_function -import contextlib import os import subprocess import sys import time import uuid -from datetime import datetime import pytest import requests @@ -66,7 +64,7 @@ def service_type(self, request): @pytest.fixture(scope="module") def kubernetes(self, service_type, k8s_version): try: - name = "_".join((service_type, k8s_version, str(uuid.uuid4()))) + name = str(uuid.uuid4()) kind = KindWrapper(k8s_version, name) try: yield kind.start() @@ -76,47 +74,8 @@ def kubernetes(self, service_type, k8s_version): msg = "Unable to run kind: %s" pytest.fail(msg % str(e)) - @pytest.fixture(scope="module") - def kubernetes_per_app_service_account(self, k8s_version): - service_type = "ClusterIP" - try: - name = "_".join((service_type, k8s_version, str(uuid.uuid4()))) - kind = KindWrapper(k8s_version, name) - try: - yield kind.start() - finally: - kind.delete() - except Exception as e: - msg = "Unable to run kind: %s" - pytest.fail(msg % str(e)) - - @pytest.fixture - def kind_logger(self, kubernetes): - return self.get_wrapped(kubernetes) - - @pytest.fixture - def kind_logger_per_app_service_account(self, kubernetes_per_app_service_account): - return self.get_wrapped(kubernetes_per_app_service_account) - - def get_wrapped(self, kubernetes): - @contextlib.contextmanager - def wrapped(): - start_time = datetime.now() - try: - yield - finally: - kubernetes["log_dumper"](since=start_time, until=datetime.now()) - return wrapped - @pytest.fixture() def k8s_client(self, kubernetes): - self.prepare_k8s_client(kubernetes) - - @pytest.fixture() - def k8s_client_service_account(self, kubernetes_per_app_service_account): - self.prepare_k8s_client(kubernetes_per_app_service_account) - - def prepare_k8s_client(self, kubernetes): Client.clear_session() config.api_server = kubernetes["host-to-container-server"] config.debug = True @@ -137,15 +96,15 @@ def fdd(self, request, kubernetes, service_type, k8s_version, use_docker_for_e2e self._end_popen(daemon) @pytest.fixture(scope="module") - def fdd_service_account(self, request, kubernetes_per_app_service_account, k8s_version, use_docker_for_e2e): - args, port, ready = self.prepare_fdd(request, kubernetes_per_app_service_account, k8s_version, + def fdd_service_account(self, request, kubernetes, k8s_version, use_docker_for_e2e): + args, port, ready = self.prepare_fdd(request, kubernetes, k8s_version, use_docker_for_e2e, "ClusterIP", service_account=True) try: daemon = subprocess.Popen(args, stdout=sys.stderr, env=merge_dicts(os.environ, {"NAMESPACE": "default"})) time.sleep(1) if daemon.poll() is not None: pytest.fail("fiaas-deploy-daemon has crashed after startup, inspect logs") - self.wait_until_fdd_ready(k8s_version, kubernetes_per_app_service_account, ready) + self.wait_until_fdd_ready(k8s_version, kubernetes, ready) yield "http://localhost:{}/fiaas".format(port) finally: self._end_popen(daemon) @@ -366,6 +325,7 @@ def _resource_labels(self, param): return fiaas_path, expected, additional_labels def _ensure_clean(self, name, expected): + pass kinds = self._select_kinds(expected) for kind in kinds: try: @@ -444,20 +404,13 @@ def cleanup_complete(): wait_until(cleanup_complete, patience=PATIENCE) @pytest.mark.usefixtures("fdd", "k8s_client") - def test_custom_resource_definition_deploy_without_service_account(self, - custom_resource_definition, - service_type, - kind_logger): - with kind_logger(): - self.run_crd_deploy(custom_resource_definition, service_type) - - @pytest.mark.usefixtures("fdd_service_account", "k8s_client_service_account") - def test_custom_resource_definition_deploy_with_service_account(self, - custom_resource_definition_service_account, - kind_logger_per_app_service_account): + def test_custom_resource_definition_deploy_without_service_account(self, custom_resource_definition, service_type): + self.run_crd_deploy(custom_resource_definition, service_type) + + @pytest.mark.usefixtures("fdd_service_account", "k8s_client") + def test_custom_resource_definition_deploy_with_service_account(self, custom_resource_definition_service_account): service_type = "ClusterIP" - with kind_logger_per_app_service_account(): - self.run_crd_deploy(custom_resource_definition_service_account, service_type, service_account=True) + self.run_crd_deploy(custom_resource_definition_service_account, service_type, service_account=True) @pytest.mark.usefixtures("fdd", "k8s_client") @pytest.mark.parametrize("input, expected", [ @@ -474,65 +427,64 @@ def test_custom_resource_definition_deploy_with_service_account(self, "v3-data-examples-tls-issuer-override-1": "e2e_expected/tls_issuer_override2.yml" }) ]) - def test_multiple_ingresses(self, request, kind_logger, input, expected): - with kind_logger(): - fiaas_path = "v3/data/examples/%s.yml" % input - fiaas_yml = read_yml(request.fspath.dirpath().join("specs").join(fiaas_path).strpath) + def test_multiple_ingresses(self, request, input, expected): + fiaas_path = "v3/data/examples/%s.yml" % input + fiaas_yml = read_yml(request.fspath.dirpath().join("specs").join(fiaas_path).strpath) - name = sanitize_resource_name(fiaas_path) + name = sanitize_resource_name(fiaas_path) - expected = {k: read_yml(request.fspath.dirpath().join(v).strpath) for (k, v) in expected.items()} - metadata = ObjectMeta(name=name, namespace="default", labels={"fiaas/deployment_id": DEPLOYMENT_ID1}) - spec = FiaasApplicationSpec(application=name, image=IMAGE1, config=fiaas_yml) - fiaas_application = FiaasApplication(metadata=metadata, spec=spec) + expected = {k: read_yml(request.fspath.dirpath().join(v).strpath) for (k, v) in expected.items()} + metadata = ObjectMeta(name=name, namespace="default", labels={"fiaas/deployment_id": DEPLOYMENT_ID1}) + spec = FiaasApplicationSpec(application=name, image=IMAGE1, config=fiaas_yml) + fiaas_application = FiaasApplication(metadata=metadata, spec=spec) - fiaas_application.save() - app_uid = fiaas_application.metadata.uid + fiaas_application.save() + app_uid = fiaas_application.metadata.uid - # Check that deployment status is RUNNING - def _assert_status(): - status = FiaasApplicationStatus.get(create_name(name, DEPLOYMENT_ID1)) - assert status.result == u"RUNNING" - assert len(status.logs) > 0 - assert any("Saving result RUNNING for default/{}".format(name) in line for line in status.logs) + # Check that deployment status is RUNNING + def _assert_status(): + status = FiaasApplicationStatus.get(create_name(name, DEPLOYMENT_ID1)) + assert status.result == u"RUNNING" + assert len(status.logs) > 0 + assert any("Saving result RUNNING for default/{}".format(name) in line for line in status.logs) - wait_until(_assert_status, patience=PATIENCE) + wait_until(_assert_status, patience=PATIENCE) - def _check_two_ingresses(): - assert Ingress.get(name) - assert Ingress.get("{}-1".format(name)) + def _check_two_ingresses(): + assert Ingress.get(name) + assert Ingress.get("{}-1".format(name)) - for ingress_name, expected_dict in expected.items(): - actual = Ingress.get(ingress_name) - assert_k8s_resource_matches(actual, expected_dict, IMAGE1, None, DEPLOYMENT_ID1, None, app_uid) + for ingress_name, expected_dict in expected.items(): + actual = Ingress.get(ingress_name) + assert_k8s_resource_matches(actual, expected_dict, IMAGE1, None, DEPLOYMENT_ID1, None, app_uid) - wait_until(_check_two_ingresses, patience=PATIENCE) + wait_until(_check_two_ingresses, patience=PATIENCE) - # Remove 2nd ingress to make sure cleanup works - fiaas_application.spec.config["ingress"].pop() - if not fiaas_application.spec.config["ingress"]: - # if the test contains only one ingress, - # deleting the list will force the creation of the default ingress - del fiaas_application.spec.config["ingress"] - fiaas_application.metadata.labels["fiaas/deployment_id"] = DEPLOYMENT_ID2 - fiaas_application.save() + # Remove 2nd ingress to make sure cleanup works + fiaas_application.spec.config["ingress"].pop() + if not fiaas_application.spec.config["ingress"]: + # if the test contains only one ingress, + # deleting the list will force the creation of the default ingress + del fiaas_application.spec.config["ingress"] + fiaas_application.metadata.labels["fiaas/deployment_id"] = DEPLOYMENT_ID2 + fiaas_application.save() - def _check_one_ingress(): - assert Ingress.get(name) - with pytest.raises(NotFound): - Ingress.get("{}-1".format(name)) + def _check_one_ingress(): + assert Ingress.get(name) + with pytest.raises(NotFound): + Ingress.get("{}-1".format(name)) - wait_until(_check_one_ingress, patience=PATIENCE) + wait_until(_check_one_ingress, patience=PATIENCE) - # Cleanup - FiaasApplication.delete(name) + # Cleanup + FiaasApplication.delete(name) - def cleanup_complete(): - for name, _ in expected.items(): - with pytest.raises(NotFound): - Ingress.get(name) + def cleanup_complete(): + for name, _ in expected.items(): + with pytest.raises(NotFound): + Ingress.get(name) - wait_until(cleanup_complete, patience=PATIENCE) + wait_until(cleanup_complete, patience=PATIENCE) def _deploy_success(name, service_type, image, expected, deployment_id, strongbox_groups=None, app_uid=None): diff --git a/tests/fiaas_deploy_daemon/utils.py b/tests/fiaas_deploy_daemon/utils.py index 5c3350c4..31c826e1 100644 --- a/tests/fiaas_deploy_daemon/utils.py +++ b/tests/fiaas_deploy_daemon/utils.py @@ -19,16 +19,16 @@ import os import re import socket +import subprocess import sys import tempfile import time import traceback from copy import deepcopy -from collections import defaultdict from datetime import datetime from urlparse import urljoin -import docker +import json import pytest import requests import yaml @@ -65,8 +65,7 @@ def wait_until(action, description=None, exception_class=AssertionError, patienc if cause: message.append("\nThe last exception was:\n") message.extend(cause) - header = "Gave up waiting for {} after {} seconds at {}".format(description, patience, - datetime.now().isoformat(" ")) + header = "Gave up waiting for {} after {} seconds at {}".format(description, patience, datetime.now().isoformat(" ")) message.insert(0, header) raise exception_class("".join(message)) @@ -80,11 +79,14 @@ def crd_available(kubernetes, timeout=5): session.cert = (kubernetes["client-cert"], kubernetes["client-key"]) def _crd_available(): - plog("Checking if CRDs are available") for url in (app_url, status_url): - plog("Checking %s" % url) + plog("Check if CRDs are available at %s" % url) resp = session.get(url, timeout=timeout) - resp.raise_for_status() + try: + resp.raise_for_status() + except Exception as e: + plog(e) + raise plog("!!!!! %s is available !!!!" % url) return _crd_available @@ -257,95 +259,71 @@ def get_unbound_port(): class KindWrapper(object): - DOCKER_IMAGES = defaultdict(lambda: "bsycorp/kind") - # old bsycorp/kind versions isn't being updated, and the latest version has an expired cert - # See https://github.com/fiaas/fiaas-deploy-daemon/pull/45 - DOCKER_IMAGES["v1.15.12"] = "fiaas/kind" - # Created the docker image in fiaas/kind because the tests are not passing with v1.16.15 in semaphore-ci - DOCKER_IMAGES["v1.16.13"] = "fiaas/kind" - def __init__(self, k8s_version, name): self.k8s_version = k8s_version self.name = name # on Docker for mac, directories under $TMPDIR can't be mounted by default. use /tmp, which works tmp_dir = '/tmp' if _is_macos() else None self._workdir = tempfile.mkdtemp(prefix="kind-{}-".format(name), dir=tmp_dir) - self._client = docker.from_env() - self._container = None + self._kubeconfig = os.path.join(self._workdir, "kubeconfig") def start(self): + plog("creating kubeconfig at " + self._kubeconfig) + image_name = "kindest/node:" + self.k8s_version + args = ["kind", "create", "cluster", "--name="+self.name, "--kubeconfig="+self._kubeconfig, "--image="+image_name, "--wait=40s"] + output = None try: - self._start() - in_container_server_ip, api_port, config_port = self._get_ports() - wait_until(self._endpoint_ready(config_port, "config"), "config available") - resp = requests.get("http://localhost:{}/config".format(config_port)) - resp.raise_for_status() - config = yaml.safe_load(resp.content) + output, code = self._run_cmd(args) + if code != 0: + raise Exception("kind returned status code {}".format(code)) + + with open(self._kubeconfig, 'r') as f: + config = yaml.safe_load(f.read()) api_cert = self._save_to_file("api_cert", config["clusters"][-1]["cluster"]["certificate-authority-data"]) client_cert = self._save_to_file("client_cert", config["users"][-1]["user"]["client-certificate-data"]) client_key = self._save_to_file("client_key", config["users"][-1]["user"]["client-key-data"]) + apiserver_url = config["clusters"][-1]["cluster"]["server"] + + container_name = self.name + "-control-plane" + inspect_output, code = self._run_cmd(["docker", "inspect", container_name]) + if code != 0: + output = inspect_output + raise Exception("docker inspect returned status code {}".format(code)) + inspect = json.loads(inspect_output) + in_container_server_ip = inspect[0]["NetworkSettings"]["Networks"]["kind"]["IPAddress"] + result = { # the apiserver's IP. We need to map this to `kubernetes` in the fdd container to be able to validate # the TLS cert of the apiserver "container-to-container-server-ip": in_container_server_ip, # apiserver endpoint when running fdd as a container - "container-to-container-server": "https://kubernetes:8443", + "container-to-container-server": "https://{}:6443".format(container_name), # apiserver endpoint for k8s client in tests, or when running fdd locally - "host-to-container-server": "https://localhost:{}".format(api_port), + "host-to-container-server": apiserver_url, "client-cert": client_cert, "client-key": client_key, "api-cert": api_cert, - "log_dumper": self.dump_logs, } - plog("Waiting for container {} with name {} to become ready".format(self._container.id, self.name)) - wait_until(self._endpoint_ready(config_port, "kubernetes-ready"), "kubernetes ready", patience=180) + plog("started kind cluster at {}".format(apiserver_url)) return result - except Exception: - self.dump_logs() + except Exception as e: + if output: + self.dump_output(output) self.delete() - raise + raise e - def dump_logs(self, since=None, until=None): - if self._container: - logs = self._container.logs(since=since) - plog("vvvvvvvvvvvvvvvv Output from kind container vvvvvvvvvvvvvvvv") - if logs: - plog(logs, end="") - plog("^^^^^^^^^^^^^^^^ Output from kind container ^^^^^^^^^^^^^^^^") + def dump_output(self, output): + plog("vvvvvvvvvvvvvvvv Output from kind vvvvvvvvvvvvvvvv") + plog(output) + plog("^^^^^^^^^^^^^^^^ Output from kind ^^^^^^^^^^^^^^^^") def delete(self): - if self._container: - try: - self._container.stop() - except docker.errors.NotFound: - pass # container has already stopped - - def _endpoint_ready(self, port, endpoint): - url = "http://localhost:{}/{}".format(port, endpoint) - - def ready(): - resp = requests.get(url) - resp.raise_for_status() - - return ready - - def _start(self): - image_name = self.DOCKER_IMAGES[self.k8s_version] - - self._container = self._client.containers.run("{}:{}".format(image_name, self.k8s_version), - detach=True, stdout=True, stderr=True, - remove=True, auto_remove=True, privileged=True, - name=self.name, hostname=self.name, - ports={"10080/tcp": None, "8443/tcp": None}) - plog("Launched container {} with name {}".format(self._container.id, self.name)) - - def _get_ports(self): - self._container.reload() - ip = self._container.attrs["NetworkSettings"]["IPAddress"] - ports = self._container.attrs["NetworkSettings"]["Ports"] - config_port = ports["10080/tcp"][-1]["HostPort"] - api_port = ports["8443/tcp"][-1]["HostPort"] - return ip, api_port, config_port + output, code = self._run_cmd(["kind", "delete", "cluster", "--name", self.name]) + if code != 0: + self.dump_output(output) + raise "deleting kind cluster: kind returned status code {}".format(code) + else: + plog("cluster deleted") def _save_to_file(self, name, data): raw_data = base64.b64decode(data) @@ -354,6 +332,11 @@ def _save_to_file(self, name, data): fobj.write(raw_data) return path + def _run_cmd(self, args): + cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output = cmd.communicate()[0].strip() + return output, cmd.returncode + def _is_macos(): return os.uname()[0] == 'Darwin' From e1eaebfdfd658b9e3a6594e380b4b7381339cf08 Mon Sep 17 00:00:00 2001 From: Jamie Wiebe Date: Thu, 25 Nov 2021 11:44:01 +0100 Subject: [PATCH 2/4] fix tests --- tests/conftest.py | 8 +++-- .../fiaas_deploy_daemon/test_bootstrap_e2e.py | 5 ++- tests/fiaas_deploy_daemon/test_e2e.py | 34 +++++++++++++++---- tests/fiaas_deploy_daemon/utils.py | 7 +++- 4 files changed, 41 insertions(+), 13 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 6ddc3ca6..ab4ae863 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -18,7 +18,7 @@ import os import re import subprocess -import uuid +import uuid as uuidlib import pytest from xdist.scheduler import LoadScopeScheduling @@ -28,6 +28,10 @@ pytest_plugins = ['helpers_namespace'] +def uuid(): + return str(uuidlib.uuid4())[:8] + + @pytest.fixture(autouse=True) def prometheus_registry(): from prometheus_client.core import REGISTRY @@ -157,7 +161,7 @@ def pytest_addoption(parser): @pytest.fixture(scope="session") def use_docker_for_e2e(request): def dockerize(test_request, cert_path, service_type, k8s_version, port, apiserver_ip): - container_name = "fdd_{}_{}_{}".format(service_type, k8s_version, str(uuid.uuid4())) + container_name = "fdd_{}_{}_{}".format(service_type, k8s_version, uuid()) test_request.addfinalizer(lambda: subprocess.call(["docker", "stop", container_name])) args = [ "docker", "run", diff --git a/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py b/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py index b55a3570..675bb319 100644 --- a/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py +++ b/tests/fiaas_deploy_daemon/test_bootstrap_e2e.py @@ -18,7 +18,6 @@ import os.path import subprocess import sys -import uuid import pytest from k8s import config @@ -34,7 +33,7 @@ from fiaas_deploy_daemon.tools import merge_dicts from utils import wait_until, crd_available, crd_supported, \ skip_if_crd_not_supported, read_yml, sanitize_resource_name, assert_k8s_resource_matches, get_unbound_port, \ - KindWrapper + KindWrapper, uuid PATIENCE = 30 TIMEOUT = 5 @@ -85,7 +84,7 @@ class TestBootstrapE2E(object): @pytest.fixture(scope="module") def kubernetes(self, k8s_version): try: - name = str(uuid.uuid4()) + name = 'kind-bootstrap-{}-{}'.format(k8s_version, uuid()) kind = KindWrapper(k8s_version, name) try: yield kind.start() diff --git a/tests/fiaas_deploy_daemon/test_e2e.py b/tests/fiaas_deploy_daemon/test_e2e.py index 46ee0159..cac869d3 100644 --- a/tests/fiaas_deploy_daemon/test_e2e.py +++ b/tests/fiaas_deploy_daemon/test_e2e.py @@ -21,7 +21,6 @@ import subprocess import sys import time -import uuid import pytest import requests @@ -35,7 +34,7 @@ from k8s.models.service_account import ServiceAccount from utils import wait_until, crd_available, crd_supported, \ skip_if_crd_not_supported, read_yml, sanitize_resource_name, assert_k8s_resource_matches, get_unbound_port, \ - KindWrapper + KindWrapper, uuid from fiaas_deploy_daemon.crd.status import create_name from fiaas_deploy_daemon.crd.types import FiaasApplication, FiaasApplicationStatus, FiaasApplicationSpec, \ @@ -64,7 +63,20 @@ def service_type(self, request): @pytest.fixture(scope="module") def kubernetes(self, service_type, k8s_version): try: - name = str(uuid.uuid4()) + name = 'kind-{}-{}-{}'.format(k8s_version, service_type.lower(), uuid()) + kind = KindWrapper(k8s_version, name) + try: + yield kind.start() + finally: + kind.delete() + except Exception as e: + msg = "Unable to run kind: %s" + pytest.fail(msg % str(e)) + + @pytest.fixture(scope="module") + def kubernetes_service_account(self, k8s_version): + try: + name = 'kind-{}-{}-{}'.format(k8s_version, "serviceaccount", uuid()) kind = KindWrapper(k8s_version, name) try: yield kind.start() @@ -82,6 +94,14 @@ def k8s_client(self, kubernetes): config.verify_ssl = False config.cert = (kubernetes["client-cert"], kubernetes["client-key"]) + @pytest.fixture() + def k8s_client_service_account(self, kubernetes_service_account): + Client.clear_session() + config.api_server = kubernetes_service_account["host-to-container-server"] + config.debug = True + config.verify_ssl = False + config.cert = (kubernetes_service_account["client-cert"], kubernetes_service_account["client-key"]) + @pytest.fixture(scope="module") def fdd(self, request, kubernetes, service_type, k8s_version, use_docker_for_e2e): args, port, ready = self.prepare_fdd(request, kubernetes, k8s_version, use_docker_for_e2e, service_type) @@ -96,15 +116,15 @@ def fdd(self, request, kubernetes, service_type, k8s_version, use_docker_for_e2e self._end_popen(daemon) @pytest.fixture(scope="module") - def fdd_service_account(self, request, kubernetes, k8s_version, use_docker_for_e2e): - args, port, ready = self.prepare_fdd(request, kubernetes, k8s_version, + def fdd_service_account(self, request, kubernetes_service_account, k8s_version, use_docker_for_e2e): + args, port, ready = self.prepare_fdd(request, kubernetes_service_account, k8s_version, use_docker_for_e2e, "ClusterIP", service_account=True) try: daemon = subprocess.Popen(args, stdout=sys.stderr, env=merge_dicts(os.environ, {"NAMESPACE": "default"})) time.sleep(1) if daemon.poll() is not None: pytest.fail("fiaas-deploy-daemon has crashed after startup, inspect logs") - self.wait_until_fdd_ready(k8s_version, kubernetes, ready) + self.wait_until_fdd_ready(k8s_version, kubernetes_service_account, ready) yield "http://localhost:{}/fiaas".format(port) finally: self._end_popen(daemon) @@ -407,7 +427,7 @@ def cleanup_complete(): def test_custom_resource_definition_deploy_without_service_account(self, custom_resource_definition, service_type): self.run_crd_deploy(custom_resource_definition, service_type) - @pytest.mark.usefixtures("fdd_service_account", "k8s_client") + @pytest.mark.usefixtures("fdd_service_account", "k8s_client_service_account") def test_custom_resource_definition_deploy_with_service_account(self, custom_resource_definition_service_account): service_type = "ClusterIP" self.run_crd_deploy(custom_resource_definition_service_account, service_type, service_account=True) diff --git a/tests/fiaas_deploy_daemon/utils.py b/tests/fiaas_deploy_daemon/utils.py index 31c826e1..da71c323 100644 --- a/tests/fiaas_deploy_daemon/utils.py +++ b/tests/fiaas_deploy_daemon/utils.py @@ -24,6 +24,7 @@ import tempfile import time import traceback +import uuid as uuidlib from copy import deepcopy from datetime import datetime from urlparse import urljoin @@ -41,6 +42,10 @@ from fiaas_deploy_daemon.crd.types import FiaasApplication, FiaasApplicationStatus +def uuid(): + return str(uuidlib.uuid4())[:8] + + def plog(message, **kwargs): """Primitive logging""" print("%s: %s" % (time.asctime(), message), file=sys.stderr, **kwargs) # noqa: T001 @@ -80,7 +85,7 @@ def crd_available(kubernetes, timeout=5): def _crd_available(): for url in (app_url, status_url): - plog("Check if CRDs are available at %s" % url) + plog("Checking if CRDs are available at %s" % url) resp = session.get(url, timeout=timeout) try: resp.raise_for_status() From 48e582cbf86f29d81ad1fb1e71cb19d8d7312c3b Mon Sep 17 00:00:00 2001 From: Jamie Wiebe Date: Tue, 30 Nov 2021 11:01:32 +0100 Subject: [PATCH 3/4] various fixes --- setup.py | 1 - tests/fiaas_deploy_daemon/test_e2e.py | 1 - tests/fiaas_deploy_daemon/utils.py | 104 ++++++++++++-------------- 3 files changed, 46 insertions(+), 60 deletions(-) diff --git a/setup.py b/setup.py index bd90258b..fefb83da 100755 --- a/setup.py +++ b/setup.py @@ -68,7 +68,6 @@ def read(filename): 'pytest == 3.10.1', 'requests-file == 1.4.3', 'callee == 0.3', - 'docker == 4.0.2' ] DEV_TOOLS = [ diff --git a/tests/fiaas_deploy_daemon/test_e2e.py b/tests/fiaas_deploy_daemon/test_e2e.py index cac869d3..23601ef6 100644 --- a/tests/fiaas_deploy_daemon/test_e2e.py +++ b/tests/fiaas_deploy_daemon/test_e2e.py @@ -345,7 +345,6 @@ def _resource_labels(self, param): return fiaas_path, expected, additional_labels def _ensure_clean(self, name, expected): - pass kinds = self._select_kinds(expected) for kind in kinds: try: diff --git a/tests/fiaas_deploy_daemon/utils.py b/tests/fiaas_deploy_daemon/utils.py index da71c323..aad595f2 100644 --- a/tests/fiaas_deploy_daemon/utils.py +++ b/tests/fiaas_deploy_daemon/utils.py @@ -87,11 +87,7 @@ def _crd_available(): for url in (app_url, status_url): plog("Checking if CRDs are available at %s" % url) resp = session.get(url, timeout=timeout) - try: - resp.raise_for_status() - except Exception as e: - plog(e) - raise + resp.raise_for_status() plog("!!!!! %s is available !!!!" % url) return _crd_available @@ -276,59 +272,46 @@ def start(self): plog("creating kubeconfig at " + self._kubeconfig) image_name = "kindest/node:" + self.k8s_version args = ["kind", "create", "cluster", "--name="+self.name, "--kubeconfig="+self._kubeconfig, "--image="+image_name, "--wait=40s"] - output = None - try: - output, code = self._run_cmd(args) - if code != 0: - raise Exception("kind returned status code {}".format(code)) - - with open(self._kubeconfig, 'r') as f: - config = yaml.safe_load(f.read()) - api_cert = self._save_to_file("api_cert", config["clusters"][-1]["cluster"]["certificate-authority-data"]) - client_cert = self._save_to_file("client_cert", config["users"][-1]["user"]["client-certificate-data"]) - client_key = self._save_to_file("client_key", config["users"][-1]["user"]["client-key-data"]) - apiserver_url = config["clusters"][-1]["cluster"]["server"] - - container_name = self.name + "-control-plane" - inspect_output, code = self._run_cmd(["docker", "inspect", container_name]) - if code != 0: - output = inspect_output - raise Exception("docker inspect returned status code {}".format(code)) - inspect = json.loads(inspect_output) - in_container_server_ip = inspect[0]["NetworkSettings"]["Networks"]["kind"]["IPAddress"] - - result = { - # the apiserver's IP. We need to map this to `kubernetes` in the fdd container to be able to validate - # the TLS cert of the apiserver - "container-to-container-server-ip": in_container_server_ip, - # apiserver endpoint when running fdd as a container - "container-to-container-server": "https://{}:6443".format(container_name), - # apiserver endpoint for k8s client in tests, or when running fdd locally - "host-to-container-server": apiserver_url, - "client-cert": client_cert, - "client-key": client_key, - "api-cert": api_cert, - } - plog("started kind cluster at {}".format(apiserver_url)) - return result - except Exception as e: - if output: - self.dump_output(output) - self.delete() - raise e - def dump_output(self, output): - plog("vvvvvvvvvvvvvvvv Output from kind vvvvvvvvvvvvvvvv") - plog(output) - plog("^^^^^^^^^^^^^^^^ Output from kind ^^^^^^^^^^^^^^^^") + self._run_cmd(args) + + api_cert, client_cert, client_key, apiserver_url = self._kubeconfig_connection_params() + + container_name = self.name + "-control-plane" + in_container_server_ip = self._in_container_server_ip(container_name) + + result = { + # the apiserver's IP. We need to map this to `kubernetes` in the fdd container to be able to validate + # the TLS cert of the apiserver + "container-to-container-server-ip": in_container_server_ip, + # apiserver endpoint when running fdd as a container + "container-to-container-server": "https://{}:6443".format(container_name), + # apiserver endpoint for k8s client in tests, or when running fdd locally + "host-to-container-server": apiserver_url, + "client-cert": client_cert, + "client-key": client_key, + "api-cert": api_cert, + } + plog("started kind cluster at {}".format(apiserver_url)) + return result + + def _kubeconfig_connection_params(self): + with open(self._kubeconfig, 'r') as f: + config = yaml.safe_load(f.read()) + api_cert = self._save_to_file("api_cert", config["clusters"][-1]["cluster"]["certificate-authority-data"]) + client_cert = self._save_to_file("client_cert", config["users"][-1]["user"]["client-certificate-data"]) + client_key = self._save_to_file("client_key", config["users"][-1]["user"]["client-key-data"]) + apiserver_url = config["clusters"][-1]["cluster"]["server"] + return api_cert, client_cert, client_key, apiserver_url + + def _in_container_server_ip(self, container_name): + output = self._run_cmd(["docker", "inspect", container_name]) + inspect = json.loads(output) + return inspect[0]["NetworkSettings"]["Networks"]["kind"]["IPAddress"] def delete(self): - output, code = self._run_cmd(["kind", "delete", "cluster", "--name", self.name]) - if code != 0: - self.dump_output(output) - raise "deleting kind cluster: kind returned status code {}".format(code) - else: - plog("cluster deleted") + self._run_cmd(["kind", "delete", "cluster", "--name", self.name]) + plog("cluster {} deleted".format(self.name)) def _save_to_file(self, name, data): raw_data = base64.b64decode(data) @@ -338,9 +321,14 @@ def _save_to_file(self, name, data): return path def _run_cmd(self, args): - cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - output = cmd.communicate()[0].strip() - return output, cmd.returncode + try: + output = subprocess.check_output(args, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + plog("vvvvvvvvvvvvvvvv Output from {} vvvvvvvvvvvvvvvv".format(args[0])) + plog(e.output) + plog("^^^^^^^^^^^^^^^^ Output from {} ^^^^^^^^^^^^^^^^".format(args[0])) + raise e + return output.strip() def _is_macos(): From dc68ddaa305b654623345d9beadb4b354c577091 Mon Sep 17 00:00:00 2001 From: Jamie Wiebe Date: Tue, 30 Nov 2021 14:11:53 +0100 Subject: [PATCH 4/4] add retry to crd creation --- fiaas_deploy_daemon/crd/watcher.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fiaas_deploy_daemon/crd/watcher.py b/fiaas_deploy_daemon/crd/watcher.py index f1f87e6d..6eaf6aa9 100644 --- a/fiaas_deploy_daemon/crd/watcher.py +++ b/fiaas_deploy_daemon/crd/watcher.py @@ -31,6 +31,7 @@ from ..base_thread import DaemonThread from ..deployer import DeployerEvent from ..log_extras import set_extras +from ..retry import retry_on_upsert_conflict from ..specs.factory import InvalidConfiguration LOG = logging.getLogger(__name__) @@ -68,6 +69,7 @@ def create_custom_resource_definitions(cls): cls._create("ApplicationStatus", "application-statuses", ("status", "appstatus", "fs"), "fiaas.schibsted.io") @staticmethod + @retry_on_upsert_conflict def _create(kind, plural, short_names, group): name = "%s.%s" % (plural, group) metadata = ObjectMeta(name=name)