From 7ab056d1dcf849793a244bb6a674226b8cf88be2 Mon Sep 17 00:00:00 2001 From: Andres Martinez Gotor Date: Tue, 25 Jun 2019 17:36:55 +0200 Subject: [PATCH] Cleanup scripts (#1052) --- .circleci/config.yml | 2 - Makefile | 14 +- kafka-zookeeper.jsonnet | 263 -------------------------------------- script/cluster-up-dind.sh | 25 ---- script/integration-tests | 12 +- script/make.sh | 46 ------- 6 files changed, 2 insertions(+), 360 deletions(-) delete mode 100644 kafka-zookeeper.jsonnet delete mode 100755 script/cluster-up-dind.sh delete mode 100755 script/make.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 6f9fd8ece..21d4a7b93 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -177,11 +177,9 @@ jobs: - run: echo "export ESCAPED_GKE_CLUSTER=$(echo ${GKE_CLUSTER}-ci-${CIRCLE_BRANCH:-$CIRCLE_TAG} | sed 's/[^a-z0-9-]//g')" >> $BASH_ENV - run: ./script/start-gke-env.sh $ESCAPED_GKE_CLUSTER $ZONE $GKE_VERSION $GKE_ADMIN > /dev/null - run: ./script/pull-or-build-image.sh function-controller - - run: ./script/pull-or-build-image.sh kafka-controller-image - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} deployment - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} basic - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} cronjob - - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} kafka push_latest_images: <<: *defaults docker: diff --git a/Makefile b/Makefile index 726cf1d6c..ca778566b 100644 --- a/Makefile +++ b/Makefile @@ -22,9 +22,6 @@ KUBELESS_ENVS := \ default: binary -all: - CGO_ENABLED=0 ./script/make.sh - binary: CGO_ENABLED=0 ./script/binary @@ -36,7 +33,7 @@ binary-cross: $(KUBECFG) show -U https://raw.githubusercontent.com/kubeless/runtimes/master -o yaml $< > $@.tmp mv $@.tmp $@ -all-yaml: kubeless.yaml kubeless-non-rbac.yaml kubeless-openshift.yaml kafka-zookeeper.yaml +all-yaml: kubeless.yaml kubeless-non-rbac.yaml kubeless-openshift.yaml kubeless.yaml: kubeless.jsonnet kubeless-non-rbac.jsonnet @@ -44,12 +41,6 @@ kubeless-non-rbac.yaml: kubeless-non-rbac.jsonnet kubeless-openshift.yaml: kubeless-openshift.jsonnet -kafka-zookeeper.yaml: kafka-zookeeper.jsonnet - -nats.yaml: nats.jsonnet - -kinesis.yaml: kinesis.jsonnet - docker/function-controller: controller-build cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-function-controller $@ @@ -84,9 +75,6 @@ integration-tests: ./script/integration-tests minikube deployment ./script/integration-tests minikube basic -minikube-rbac-test: - ./script/integration-test-rbac minikube - fmt: $(GOFMT) -s -w $(GO_FILES) diff --git a/kafka-zookeeper.jsonnet b/kafka-zookeeper.jsonnet deleted file mode 100644 index c24780bfb..000000000 --- a/kafka-zookeeper.jsonnet +++ /dev/null @@ -1,263 +0,0 @@ -local k = import "ksonnet.beta.1/k.libsonnet"; - -local statefulset = k.apps.v1beta1.statefulSet; -local container = k.core.v1.container; -local service = k.core.v1.service; -local deployment = k.apps.v1beta1.deployment; -local serviceAccount = k.core.v1.serviceAccount; -local objectMeta = k.core.v1.objectMeta; - -local namespace = "kubeless"; -local controller_account_name = "controller-acct"; - -local crd = [ - { - apiVersion: "apiextensions.k8s.io/v1beta1", - kind: "CustomResourceDefinition", - metadata: objectMeta.name("kafkatriggers.kubeless.io"), - spec: {group: "kubeless.io", version: "v1beta1", scope: "Namespaced", names: {plural: "kafkatriggers", singular: "kafkatrigger", kind: "KafkaTrigger"}}, - }, -]; - -local controllerContainer = - container.default("kafka-trigger-controller", "bitnami/kafka-trigger-controller:v1.0.0-alpha.9") + - container.imagePullPolicy("IfNotPresent"); - -local kubelessLabel = {kubeless: "kafka-trigger-controller"}; - -local controllerAccount = - serviceAccount.default(controller_account_name, namespace); - -local controllerDeployment = - deployment.default("kafka-trigger-controller", controllerContainer, namespace) + - {metadata+:{labels: kubelessLabel}} + - {spec+: {selector: {matchLabels: kubelessLabel}}} + - {spec+: {template+: {spec+: {serviceAccountName: controllerAccount.metadata.name}}}} + - {spec+: {template+: {metadata: {labels: kubelessLabel}}}}; - -local kafkaEnv = [ - { - name: "KAFKA_ADVERTISED_HOST_NAME", - value: "broker.kubeless" - }, - { - name: "KAFKA_ADVERTISED_PORT", - value: "9092" - }, - { - name: "KAFKA_PORT", - value: "9092" - }, - { - name: "KAFKA_DELETE_TOPIC_ENABLE", - value: "true" - }, - { - name: "KAFKA_ZOOKEEPER_CONNECT", - value: "zookeeper.kubeless:2181" - }, - { - name: "ALLOW_PLAINTEXT_LISTENER", - value: "yes" - } -]; - -local zookeeperEnv = [ - { - name: "ZOO_SERVERS", - value: "server.1=zoo-0.zoo:2888:3888:participant" - }, - { - name: "ALLOW_ANONYMOUS_LOGIN", - value: "yes" - } -]; - -local zookeeperPorts = [ - { - containerPort: 2181, - name: "client" - }, - { - containerPort: 2888, - name: "peer" - }, - { - containerPort: 3888, - name: "leader-election" - } -]; - -local kafkaContainer = - container.default("broker", "bitnami/kafka:1.1.0-r0") + - container.imagePullPolicy("IfNotPresent") + - container.env(kafkaEnv) + - container.ports({containerPort: 9092}) + - container.livenessProbe({tcpSocket: {port: 9092}, initialDelaySeconds: 30}) + - container.volumeMounts([ - { - name: "datadir", - mountPath: "/bitnami/kafka/data" - } - ]); - -local kafkaInitContainer = - container.default("volume-permissions", "busybox") + - container.imagePullPolicy("IfNotPresent") + - container.command(["sh", "-c", "chmod -R g+rwX /bitnami"]) + - container.volumeMounts([ - { - name: "datadir", - mountPath: "/bitnami/kafka/data" - } - ]); - -local zookeeperContainer = - container.default("zookeeper", "bitnami/zookeeper:3.4.10-r12") + - container.imagePullPolicy("IfNotPresent") + - container.env(zookeeperEnv) + - container.ports(zookeeperPorts) + - container.volumeMounts([ - { - name: "zookeeper", - mountPath: "/bitnami/zookeeper" - } - ]); - -local zookeeperInitContainer = - container.default("volume-permissions", "busybox") + - container.imagePullPolicy("IfNotPresent") + - container.command(["sh", "-c", "chmod -R g+rwX /bitnami"]) + - container.volumeMounts([ - { - name: "zookeeper", - mountPath: "/bitnami/zookeeper" - } - ]); - -local kafkaLabel = {kubeless: "kafka"}; -local zookeeperLabel = {kubeless: "zookeeper"}; - -local kafkaVolumeCT = [ - { - "metadata": { - "name": "datadir" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Gi" - } - } - } - } -]; - -local zooVolumeCT = [ - { - "metadata": { - "name": "zookeeper" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Gi" - } - } - } - } -]; - -local kafkaSts = - statefulset.default("kafka", namespace) + - statefulset.spec({serviceName: "broker"}) + - {spec+: {template: {metadata: {labels: kafkaLabel}}}} + - {spec+: {volumeClaimTemplates: kafkaVolumeCT}} + - {spec+: {template+: {spec: {containers: [kafkaContainer], initContainers: [kafkaInitContainer]}}}}; - -local zookeeperSts = - statefulset.default("zoo", namespace) + - statefulset.spec({serviceName: "zoo"}) + - {spec+: {template: {metadata: {labels: zookeeperLabel}}}} + - {spec+: {volumeClaimTemplates: zooVolumeCT}} + - {spec+: {template+: {spec: {containers: [zookeeperContainer], initContainers: [zookeeperInitContainer]}}}}; - -local kafkaSvc = - service.default("kafka", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports({port: 9092}) + - service.mixin.spec.selector({kubeless: "kafka"}); - -local kafkaHeadlessSvc = - service.default("broker", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports({port: 9092}) + - service.mixin.spec.selector({kubeless: "kafka"}) + - {spec+: {clusterIP: "None"}}; - -local zookeeperSvc = - service.default("zookeeper", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports({port: 2181, name: "client"}) + - service.mixin.spec.selector({kubeless: "zookeeper"}); - -local zookeeperHeadlessSvc = - service.default("zoo", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports([{port: 9092, name: "peer"},{port: 3888, name: "leader-election"}]) + - service.mixin.spec.selector({kubeless: "zookeeper"}) + - {spec+: {clusterIP: "None"}}; - -local controller_roles = [ - { - apiGroups: [""], - resources: ["services", "configmaps"], - verbs: ["get", "list"], - }, - { - apiGroups: ["kubeless.io"], - resources: ["functions", "kafkatriggers"], - verbs: ["get", "list", "watch", "update", "delete"], - }, -]; - -local clusterRole(name, rules) = { - apiVersion: "rbac.authorization.k8s.io/v1beta1", - kind: "ClusterRole", - metadata: objectMeta.name(name), - rules: rules, -}; - -local clusterRoleBinding(name, role, subjects) = { - apiVersion: "rbac.authorization.k8s.io/v1beta1", - kind: "ClusterRoleBinding", - metadata: objectMeta.name(name), - subjects: [{kind: s.kind, namespace: s.metadata.namespace, name: s.metadata.name} for s in subjects], - roleRef: {kind: role.kind, apiGroup: "rbac.authorization.k8s.io", name: role.metadata.name}, -}; - -local controllerClusterRole = clusterRole( - "kafka-controller-deployer", controller_roles); - -local controllerClusterRoleBinding = clusterRoleBinding( - "kafka-controller-deployer", controllerClusterRole, [controllerAccount] -); - -{ - kafkaSts: k.util.prune(kafkaSts), - zookeeperSts: k.util.prune(zookeeperSts), - kafkaSvc: k.util.prune(kafkaSvc), - kafkaHeadlessSvc: k.util.prune(kafkaHeadlessSvc), - zookeeperSvc: k.util.prune(zookeeperSvc), - zookeeperHeadlessSvc: k.util.prune(zookeeperHeadlessSvc), - controller: k.util.prune(controllerDeployment), - crd: k.util.prune(crd), - controllerClusterRole: k.util.prune(controllerClusterRole), - controllerClusterRoleBinding: k.util.prune(controllerClusterRoleBinding), -} diff --git a/script/cluster-up-dind.sh b/script/cluster-up-dind.sh deleted file mode 100755 index fe2ff4017..000000000 --- a/script/cluster-up-dind.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2016-2017 Bitnami -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bring up kubeadm-dind-cluster (docker-in-docker k8s cluster) -DIND_CLUSTER_SH=dind-cluster-v1.7.sh -DIND_URL=https://cdn.rawgit.com/Mirantis/kubeadm-dind-cluster/master/fixed/${DIND_CLUSTER_SH} - -rm -f ${DIND_CLUSTER_SH} -wget ${DIND_URL} -chmod +x ${DIND_CLUSTER_SH} -./${DIND_CLUSTER_SH} up -# vim: sw=4 ts=4 et si diff --git a/script/integration-tests b/script/integration-tests index 7929a175b..ffb41b40b 100755 --- a/script/integration-tests +++ b/script/integration-tests @@ -67,15 +67,6 @@ deployment) basic) bats tests/integration-tests.bats ;; -kafka) - bats tests/integration-tests-kafka.bats - ;; -nats) - bats tests/integration-tests-nats.bats - ;; -kinesis) - bats tests/integration-tests-kinesis.bats - ;; http) bats tests/integration-tests-http.bats ;; @@ -89,8 +80,7 @@ prebuilt_functions) bats tests/deployment-tests.bats && \ bats tests/integration-tests.bats && \ bats tests/integration-tests-http.bats && \ - bats tests/integration-tests-cronjob.bats && \ - bats tests/integration-tests-kafka.bats + bats tests/integration-tests-cronjob.bats ;; esac exit_code=$? diff --git a/script/make.sh b/script/make.sh deleted file mode 100755 index 974f73ff0..000000000 --- a/script/make.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2016-2017 Bitnami -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -export KUBELESS_PKG='github.com/kubeless/kubeless' - -# List of bundles to create when no argument is passed -DEFAULT_BUNDLES=( - validate-test - validate-gofmt - validate-git-marks - validate-lint - validate-vet - binary -) -bundle() { - local bundle="$1"; shift - echo "---> Making bundle: $(basename "$bundle") (in $DEST)" - source "script/$bundle" "$@" -} - -if [ $# -lt 1 ]; then - bundles=(${DEFAULT_BUNDLES[@]}) -else - bundles=($@) -fi -for bundle in ${bundles[@]}; do - export DEST=. - ABS_DEST="$(cd "$DEST" && pwd -P)" - bundle "$bundle" - echo -done