From 44ed0366794f7add15cfdb08058571d8650d9e77 Mon Sep 17 00:00:00 2001 From: zzm Date: Sun, 22 Oct 2023 22:10:31 +0800 Subject: [PATCH] offline/ scaleup/scaledown ci test --- ...e_controller_ci_build_deploy_to_aliyun.yml | 36 +++ ...le_controller_ci_build_scale_to_aliyun.yml | 220 ++++++++++++++++++ ...ent_v1alpha1_moduledeployment_offline.yaml | 28 +++ ...t_v1alpha1_moduledeployment_scaledown.yaml | 28 +++ ...ent_v1alpha1_moduledeployment_scaleup.yaml | 28 +++ 5 files changed, 340 insertions(+) create mode 100644 .github/workflows/module_controller_ci_build_scale_to_aliyun.yml create mode 100644 module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_offline.yaml create mode 100644 module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaledown.yaml create mode 100644 module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaleup.yaml diff --git a/.github/workflows/module_controller_ci_build_deploy_to_aliyun.yml b/.github/workflows/module_controller_ci_build_deploy_to_aliyun.yml index 277ff94b6..708004dc5 100644 --- a/.github/workflows/module_controller_ci_build_deploy_to_aliyun.yml +++ b/.github/workflows/module_controller_ci_build_deploy_to_aliyun.yml @@ -144,4 +144,40 @@ jobs: kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng' kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng/sofa-ark' kubectl exec -it $podname -- sh -c 'grep "dynamic-provider:1.0.0 started" logs/stock-mng/sofa-ark/*.log' + + + - name: Offline moduledeployment + run: | + kubectl apply -f config/samples/module-deployment_v1alpha1_moduledeployment_offline.yaml + + - name: get moduledeployment + run: | + kubectl get moduledeployment + + - name: get modulereplicaset + run: | + kubectl get modulereplicaset + + - run: sleep 15 + + - name: get module + run: | + kubectl get module -oyaml + + - name: not exist module + run: | + moduleCount=$(kubectl get module | wc -l) + if [[ $moduleCount -ge 1 ]]; then + echo "ERROR: module下线失败" + exit 2 + fi + + - name: check module + run: | + podname=$(kubectl get pod -l app=dynamic-stock -o name) + kubectl exec -it $podname -- sh -c 'ls -al' + kubectl exec -it $podname -- sh -c 'ls -al logs' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng/sofa-ark' + kubectl exec -it $podname -- sh -c 'grep "dynamic-provider:1.0.0 started" logs/stock-mng/sofa-ark/*.log' \ No newline at end of file diff --git a/.github/workflows/module_controller_ci_build_scale_to_aliyun.yml b/.github/workflows/module_controller_ci_build_scale_to_aliyun.yml new file mode 100644 index 000000000..bfd3c65b3 --- /dev/null +++ b/.github/workflows/module_controller_ci_build_scale_to_aliyun.yml @@ -0,0 +1,220 @@ +name: Module Controller Integration Test +run-name: ${{ github.actor }} pushed module-controller code + +on: + push: + branches: + - master + paths: + - 'module-controller/**' + + pull_request: + branches: + - master + paths: + - 'module-controller/**' + + # enable manually running the workflow + workflow_dispatch: + +env: + CGO_ENABLED: 0 + GOOS: linux + WORK_DIR: module-controller + TAG: ci-test-master-latest + DOCKERHUB_REGISTRY: serverless-registry.cn-shanghai.cr.aliyuncs.com + MODULE_CONTROLLER_IMAGE_PATH: opensource/test/module-controller + INTEGRATION_TESTS_IMAGE_PATH: opensource/test/module-controller-integration-tests + POD_NAMESPACE: default + +defaults: + run: + working-directory: module-controller + +jobs: + unit-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Docker login + uses: docker/login-action@v2.2.0 + with: + registry: ${{ env.DOCKERHUB_REGISTRY }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + logout: false + + - name: Set up Docker buildx + uses: docker/setup-buildx-action@v2 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ hashFiles('${{ env.WORK_DIR }}/*Dockerfile') }} + + - name: Build and push module-controller Docker images + uses: docker/build-push-action@v4.1.1 + with: + context: ${{ env.WORK_DIR }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + file: ${{ env.WORK_DIR }}/Dockerfile + platforms: linux/amd64 + push: true + tags: ${{ env.DOCKERHUB_REGISTRY }}/${{ env.MODULE_CONTROLLER_IMAGE_PATH }}:${{ env.TAG }} + + - run: sleep 30 + + - name: Set up Minikube + run: | + curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 + sudo install minikube-linux-amd64 /usr/local/bin/minikube + + - name: Start Minikube + run: minikube start + + - name: Prepare development env + run: | + kubectl apply -f config/crd/bases/serverless.alipay.com_moduledeployments.yaml + kubectl apply -f config/crd/bases/serverless.alipay.com_modulereplicasets.yaml + kubectl apply -f config/crd/bases/serverless.alipay.com_modules.yaml + kubectl apply -f config/crd/bases/serverless.alipay.com_moduletemplates.yaml + kubectl apply -f config/rbac/role.yaml + kubectl apply -f config/rbac/role_binding.yaml + kubectl apply -f config/rbac/service_account.yaml + kubectl apply -f config/samples/dynamic-stock-deployment.yaml + kubectl apply -f config/samples/module-deployment-controller.yaml + kubectl apply -f config/samples/dynamic-stock-service.yaml + + - run: sleep 60 + + - name: minikube logs + run: minikube logs + + - name: get pod + run: | + kubectl get pod + + - name: describe pod + run: | + kubectl describe pod + + - name: wait base pod available + run: | + kubectl wait --for=condition=available deployment/dynamic-stock-deployment --timeout=300s + + - name: get module controller pod available + run: | + kubectl wait --for=condition=available deployment/module-controller --timeout=300s + + - name: Apply moduledeployment + run: | + kubectl apply -f config/samples/module-deployment_v1alpha1_moduledeployment_provider.yaml + + - name: get moduledeployment + run: | + kubectl get moduledeployment + + - name: get modulereplicaset + run: | + kubectl get modulereplicaset + + - run: sleep 15 + + - name: get module + run: | + kubectl get module -oyaml + + - name: exist module + run: | + moduleCount=$(kubectl get module | wc -l) + if [[ $moduleCount -lt 1 ]]; then + echo "ERROR: 不存在module" + exit 1 + fi + + - name: check module + run: | + podname=$(kubectl get pod -l app=dynamic-stock -o name) + kubectl exec -it $podname -- sh -c 'ls -al' + kubectl exec -it $podname -- sh -c 'ls -al logs' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng/sofa-ark' + kubectl exec -it $podname -- sh -c 'grep "dynamic-provider:1.0.0 started" logs/stock-mng/sofa-ark/*.log' + + + + - name: scaleup moduledeployment + run: | + kubectl apply -f config/samples/module-deployment_v1alpha1_moduledeployment_scaleup.yaml + + - name: get moduledeployment + run: | + kubectl get moduledeployment + + - name: get modulereplicaset + run: | + kubectl get modulereplicaset + + - run: sleep 15 + + - name: get module + run: | + kubectl get module -oyaml + + - name: scaleup module + run: | + moduleCount=$(kubectl get module | wc -l) + if [[ $moduleCount -lt 3 ]]; then + echo "ERROR: module扩容失败" + exit 3 + fi + + - name: check module + run: | + podname=$(kubectl get pod -l app=dynamic-stock -o name) + kubectl exec -it $podname -- sh -c 'ls -al' + kubectl exec -it $podname -- sh -c 'ls -al logs' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng/sofa-ark' + kubectl exec -it $podname -- sh -c 'grep "dynamic-provider:1.0.0 started" logs/stock-mng/sofa-ark/*.log' + + + + - name: scaledown moduledeployment + run: | + kubectl apply -f config/samples/module-deployment_v1alpha1_moduledeployment_scaledown.yaml + + - name: get moduledeployment + run: | + kubectl get moduledeployment + + - name: get modulereplicaset + run: | + kubectl get modulereplicaset + + - run: sleep 15 + + - name: get module + run: | + kubectl get module -oyaml + + - name: scaledown module + run: | + moduleCount=$(kubectl get module | wc -l) + if [[ $moduleCount -gt 2 ]]; then + echo "ERROR: module缩容失败" + exit 3 + fi + + - name: check module + run: | + podname=$(kubectl get pod -l app=dynamic-stock -o name) + kubectl exec -it $podname -- sh -c 'ls -al' + kubectl exec -it $podname -- sh -c 'ls -al logs' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng' + kubectl exec -it $podname -- sh -c 'ls -al logs/stock-mng/sofa-ark' + kubectl exec -it $podname -- sh -c 'grep "dynamic-provider:1.0.0 started" logs/stock-mng/sofa-ark/*.log' diff --git a/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_offline.yaml b/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_offline.yaml new file mode 100644 index 000000000..21a0fe6d9 --- /dev/null +++ b/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_offline.yaml @@ -0,0 +1,28 @@ +apiVersion: serverless.alipay.com/v1alpha1 +kind: ModuleDeployment +metadata: + labels: + app.kubernetes.io/name: moduledeployment + app.kubernetes.io/instance: moduledeployment-sample + app.kubernetes.io/part-of: module-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: module-controller + name: moduledeployment-sample-provider +spec: + baseDeploymentName: dynamic-stock-deployment + template: + spec: + module: + name: dynamic-provider + version: '1.0.0' + url: http://serverless-opensource.oss-cn-shanghai.aliyuncs.com/module-packages/stable/dynamic-provider-1.0.0-ark-biz.jar + replicas: 0 + operationStrategy: + needConfirm: false + grayTimeBetweenBatchSeconds: 120 + useBeta: false + batchCount: 1 + schedulingStrategy: + upgradePolicy: uninstall_then_install + schedulingPolicy: scatter + diff --git a/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaledown.yaml b/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaledown.yaml new file mode 100644 index 000000000..71f957020 --- /dev/null +++ b/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaledown.yaml @@ -0,0 +1,28 @@ +apiVersion: serverless.alipay.com/v1alpha1 +kind: ModuleDeployment +metadata: + labels: + app.kubernetes.io/name: moduledeployment + app.kubernetes.io/instance: moduledeployment-sample + app.kubernetes.io/part-of: module-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: module-controller + name: moduledeployment-sample-provider +spec: + baseDeploymentName: dynamic-stock-deployment + template: + spec: + module: + name: dynamic-provider + version: '1.0.0' + url: http://serverless-opensource.oss-cn-shanghai.aliyuncs.com/module-packages/stable/dynamic-provider-1.0.0-ark-biz.jar + replicas: 2 + operationStrategy: + needConfirm: false + grayTimeBetweenBatchSeconds: 120 + useBeta: false + batchCount: 1 + schedulingStrategy: + upgradePolicy: scaledown_then_scaleup + schedulingPolicy: scatter + diff --git a/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaleup.yaml b/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaleup.yaml new file mode 100644 index 000000000..8244ac4ac --- /dev/null +++ b/module-controller/config/samples/module-deployment_v1alpha1_moduledeployment_scaleup.yaml @@ -0,0 +1,28 @@ +apiVersion: serverless.alipay.com/v1alpha1 +kind: ModuleDeployment +metadata: + labels: + app.kubernetes.io/name: moduledeployment + app.kubernetes.io/instance: moduledeployment-sample + app.kubernetes.io/part-of: module-controller + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: module-controller + name: moduledeployment-sample-provider +spec: + baseDeploymentName: dynamic-stock-deployment + template: + spec: + module: + name: dynamic-provider + version: '1.0.0' + url: http://serverless-opensource.oss-cn-shanghai.aliyuncs.com/module-packages/stable/dynamic-provider-1.0.0-ark-biz.jar + replicas: 3 + operationStrategy: + needConfirm: false + grayTimeBetweenBatchSeconds: 120 + useBeta: false + batchCount: 1 + schedulingStrategy: + upgradePolicy: scaleup_then_scaledown + schedulingPolicy: scatter +