diff --git a/.allure-overview/overviews.json b/.allure-overview/overviews.json index a63207ce60..4c61b96bec 100644 --- a/.allure-overview/overviews.json +++ b/.allure-overview/overviews.json @@ -1,7 +1,7 @@ { "sanity": { "OVERVIEW_TITLE": "'OpenWifi sanity results'", - "OVERVIEW_TESTBEDS": ["edgecore_oap101-6e","edgecore_eap101","cig_wf188n","cig_wf196","edgecore_eap102","edgecore_eap104","cig_wf186w","hfcl_ion4xe","yuncore_fap655","yuncore_ax820","edgecore_eap111"] + "OVERVIEW_TESTBEDS": ["edgecore_oap101-6e","edgecore_eap101","cig_wf188n","cig_wf196","edgecore_eap102","edgecore_eap104","cig_wf186w","hfcl_ion4xe","yuncore_fap655","yuncore_ax820","edgecore_eap111","hfcl_ion4xi","udaya_a6-id2"] }, "interop": { "OVERVIEW_TITLE": "'OpenWifi interop results'", @@ -15,6 +15,6 @@ "performance": { "OVERVIEW_TITLE": "'OpenWifi performance results'", - "OVERVIEW_TESTBEDS": ["edgecore_oap101-6e","edgecore_eap101","cig_wf188n","cig_wf196","edgecore_eap102","edgecore_eap104","cig_wf186w","hfcl_ion4xe","yuncore_fap655","yuncore_ax820","edgecore_eap111"] + "OVERVIEW_TESTBEDS": ["edgecore_oap101-6e","edgecore_eap101","cig_wf188n","cig_wf196","edgecore_eap102","edgecore_eap104","cig_wf186w","hfcl_ion4xe","yuncore_fap655","yuncore_ax820","edgecore_eap111","hfcl_ion4xi","udaya_a6-id2"] } } diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml index de8bb935b9..0e6cf5b4a8 100644 --- a/.github/actions/run-tests/action.yml +++ b/.github/actions/run-tests/action.yml @@ -130,7 +130,9 @@ runs: done echo "tests completed" echo "downloading allure results..." - kubectl cp $podname:/tmp/allure-results allure-results >/dev/null 2>&1 + echo "list files in the pod /tmp/allure-results directory" + kubectl exec $podname -- ls -la /tmp/allure-results + kubectl cp --v=10 --retries=3 $podname:/tmp/allure-results allure-results echo "waiting for pod to exit" kubectl logs -f $podname >/dev/null 2>&1 diff --git a/.github/workflows/cgw-dev-deployment.yaml b/.github/workflows/cgw-dev-deployment.yaml new file mode 100644 index 0000000000..7f70324fd1 --- /dev/null +++ b/.github/workflows/cgw-dev-deployment.yaml @@ -0,0 +1,135 @@ +name: Update CGW01 OpenLAN Cloud Gateway on tip-wlan-main + +defaults: + run: + shell: bash + +env: + AWS_EKS_NAME: tip-wlan-main + AWS_DEFAULT_OUTPUT: json + AWS_DEFAULT_REGION: ap-south-1 + AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CLIENT_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CLIENT_KEY }} + +# # https://stackoverflow.com/questions/59977364/github-actions-how-use-strategy-matrix-with-script +# # Required object fiels per environment: +# # - namespace - namespace suffix that will used added for the Kubernetes environment (i.e. if you pass 'test', kubernetes namespace will be named 'openlan-test') +# # - cgw_version - OpenLAN Cloud Gateway version to deploy (will be used for Docker image tag and git branch for Helm chart if git deployment is required) +# # - just_component - if true then deploy only cgw chart + testbeds: '[ + { + "namespace": "cgw01", + "chart_version": "main", + "cgw_version": "main", + "just_component": "false" + } + ]' + +on: + workflow_dispatch: + inputs: + just_component: + default: 'false' + description: 'Just deploy component, not all the other services' + required: true + id: + description: 'run identifier' + required: false + +jobs: + id: + name: Workflow ID Provider + runs-on: ubuntu-latest + steps: + - name: ${{ github.event.inputs.id }} + run: echo run identifier ${{ inputs.id }} + + generate-matrix: + name: Generate matrix for build + runs-on: ubuntu-latest + needs: + - id + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: generate-matrix + id: set-matrix + run: | + cat >> $GITHUB_OUTPUT << EOF + matrix={"include":${{ env.testbeds }}} + EOF + + deploy: + name: Update OpenLAN Cloud SDK instances + runs-on: ubuntu-latest + needs: + - id + - generate-matrix + strategy: + matrix: ${{ fromJson( needs.generate-matrix.outputs.matrix ) }} + fail-fast: false + steps: + - name: Checkout repo with Helm values + uses: actions/checkout@v4 + with: + repository: Telecominfraproject/wlan-cloud-ucentral-deploy + path: wlan-cloud-ucentral-deploy + ref: ${{ matrix.chart_version }} + + - name: Fetch kubeconfig + run: | + aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }} + + - name: Install kubectl, helmfile and plugins + run: | + curl -s -LO "https://dl.k8s.io/release/v1.27.14/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + # Download the binary + curl -s -LO "https://github.com/getsops/sops/releases/download/v3.8.1/sops-v3.8.1.linux.amd64" + sudo install -o root -g root -m 0755 sops-v3.8.1.linux.amd64 /usr/local/bin/sops + curl -s -LO "https://github.com/helmfile/helmfile/releases/download/v0.165.0/helmfile_0.165.0_linux_amd64.tar.gz" + tar xvzf helmfile_0.165.0_linux_amd64.tar.gz helmfile + sudo install -o root -g root -m 0755 helmfile /usr/local/bin/helmfile + helm plugin install https://github.com/aslafy-z/helm-git --version 0.16.0 + helm plugin install https://github.com/databus23/helm-diff + helm plugin install https://github.com/jkroepke/helm-secrets + + - name: Deploy OpenLAN Cloud Gateway and services + if: ${{ github.event.inputs.just_component }} == "false" + working-directory: wlan-cloud-ucentral-deploy/cgw + run: | + # service components can't be reinstalled easily + helm ls -n ${{ matrix.namespace }} + if ! helm ls -n ${{ matrix.namespace }} | grep "^kafka" >/dev/null ; then + helmfile --environment ${{ matrix.namespace }} apply + else + helmfile --environment ${{ matrix.namespace }} -l app=cgw apply + fi + + - name: Deploy OpenLAN Cloud Gateway only + if: ${{ github.event.inputs.just_component }} == "true" + working-directory: wlan-cloud-ucentral-deploy/cgw + run: | + helmfile --environment ${{ matrix.namespace }} -l app=cgw apply + + - name: Show resource state on deployment failure + if: failure() + run: | + echo "Pods:" + kubectl get pods --namespace openlan-${{ matrix.namespace }} + echo "Pod Descriptions:" + kubectl describe pods --namespace openlan-${{ matrix.namespace }} + echo "Services:" + kubectl get services --namespace openlan-${{ matrix.namespace }} + echo "Service Descriptions:" + kubectl describe services --namespace openlan-${{ matrix.namespace }} + echo "PVCs:" + kubectl get persistentvolumeclaims --namespace openlan-${{ matrix.namespace }} + echo "PVC Descriptions:" + kubectl describe persistentvolumeclaims --namespace openlan-${{ matrix.namespace }} + +# - name: Rollback Cloud SDK +# if: failure() +# run: | +# helm rollback tip-openlan --namespace openlan-${{ matrix.namespace }} --wait --timeout 20m diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index 1c78661c16..a66073f2e9 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -22,7 +22,7 @@ on: description: "revision of the Open Wifi Helm chart" ap_models: required: true - default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111" + default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111,hfcl_ion4xi" description: "the AP models to test" ap_version: required: true @@ -302,7 +302,7 @@ jobs: test-hfcl-ion4xe: - needs: ["vars", "build"] + needs: ["vars", "build", "test-edgecore-eap101"] runs-on: [ self-hosted, small ] timeout-minutes: 1440 if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xe')" @@ -478,8 +478,185 @@ jobs: run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owsec + test-hfcl-ion4xi: + needs: [ "vars", "build", "test-hfcl-ion4xe"] + runs-on: [ self-hosted, small ] + timeout-minutes: 1440 + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xi')" + env: + AP_MODEL: hfcl_ion4xi + steps: + - name: Set AP model output + id: ap_model + run: | + echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT + + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + + + # TODO WIFI-7839 delete when issue is resolved on AWS CLI side + - name: install kubectl + run: | + curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + + - name: install aws CLI tool + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + - name: get EKS access credentials + run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }} + + - name: prepare namespace name + id: namespace + run: | + NAMESPACE="performance-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')" + echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT + + - name: prepare configuration + run: | + cat << EOF > lab_info.json + ${{ secrets.LAB_INFO_JSON }} + EOF + + + - name: run tests dataplane_tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dataplane_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-dtt + testbed: basic-3b + marker_expression: "performance and dataplane_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"' + allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-dataplane_tests" + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dtt --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dtt $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dtt + + - name: run tests peak_throughput_tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'peak_throughput_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-ssdbt + testbed: basic-3b + marker_expression: "performance and peak_throughput_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"' + allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-peak_throughput_tests" + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-ssdbt --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-ssdbt $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-ssdbt + + - name: run tests client_scale_tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'client_scale_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-wct + testbed: basic-3b + marker_expression: "performance and client_scale_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"' + allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-client_scale_tests" + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-wct --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-wct $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-wct + + - name: run tests dual_band_tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dual_band_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-wct + testbed: basic-3b + marker_expression: "performance and dual_band_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"' + allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-dual_band_tests" + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-wct --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-wct $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-wct + + - name: show gw logs + if: failure() + run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owgw + + - name: show fms logs + if: failure() + run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owfms + + - name: show prov logs + if: failure() + run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owprov + + - name: show analytics logs + if: failure() + run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owanalytics + + - name: show subscription (userportal) logs + if: failure() + run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owsub + + - name: show sec logs + if: failure() + run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owsec + + test-edgecore-eap101: - needs: ["vars", "build", "test-hfcl-ion4xe"] + needs: ["vars", "build"] runs-on: [ self-hosted, small ] timeout-minutes: 1440 if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap101')" @@ -2078,7 +2255,7 @@ jobs: report: if: "!cancelled()" runs-on: ubuntu-latest - needs: [vars, test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111] + needs: [vars, test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi] strategy: fail-fast: false matrix: @@ -2153,7 +2330,7 @@ jobs: # Cleanup cleanup: - needs: [test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111] + needs: [test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi] runs-on: ubuntu-latest if: always() steps: diff --git a/.github/workflows/quali.yml b/.github/workflows/quali.yml index 23c9cfff9f..ec54d8745b 100644 --- a/.github/workflows/quali.yml +++ b/.github/workflows/quali.yml @@ -22,7 +22,7 @@ on: description: "revision of the Open Wifi Helm chart" ap_models: required: true - default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111" + default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111,hfcl_ion4xi" description: "the AP models to test" ap_version: required: true @@ -752,6 +752,95 @@ jobs: if: failure() run: kubectl -n openwifi-qa01 logs deployment/owsec + test-hfcl-ion4xi: + needs: [ "vars", "build" ] + runs-on: ubuntu-latest + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xi')" + env: + AP_MODEL: hfcl_ion4xi + steps: + - name: Set AP model output + id: ap_model + run: | + echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT + + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + + # TODO WIFI-7839 delete when issue is resolved on AWS CLI side + - name: install kubectl + run: | + curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + + - name: get EKS access credentials + run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }} + + - name: prepare namespace name + id: namespace + run: | + NAMESPACE="testing-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')" + echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT + + - name: prepare configuration + run: | + cat << EOF > lab_info.json + ${{ secrets.LAB_INFO_JSON }} + EOF + + + - name: run tests + uses: ./.github/actions/run-tests + with: + namespace: ${{ steps.namespace.outputs.name }} + testbed: basic-3b + marker_expression: "${{ needs.vars.outputs.marker_expression }}" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"' + allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}" + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }} --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }} $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }} + + - name: show gw logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owgw + + - name: show fms logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owfms + + - name: show prov logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owprov + + - name: show analytics logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owanalytics + + - name: show subscription (userportal) logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owsub + + - name: show sec logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owsec + test-edgecore-eap101: needs: ["vars", "build"] @@ -1116,7 +1205,7 @@ jobs: report: if: "!cancelled()" runs-on: ubuntu-latest - needs: [vars, test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111] + needs: [vars, test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi] strategy: fail-fast: false matrix: @@ -1167,7 +1256,7 @@ jobs: # Cleanup cleanup: - needs: [test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111] + needs: [test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi] runs-on: ubuntu-latest if: always() steps: diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index be1048e738..ba66fe6101 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -22,7 +22,7 @@ on: description: "revision of the Open Wifi Helm chart" ap_models: required: true - default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111" + default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111,hfcl_ion4xi" description: "the AP models to test" ap_version: required: true @@ -447,6 +447,328 @@ jobs: if: failure() run: kubectl -n openwifi-qa01 logs deployment/owsec + test-hfcl-ion4xi: + needs: [ "vars", "build", "test-hfcl-ion4xe" ] + runs-on: [ self-hosted, small ] + timeout-minutes: 1440 + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xi')" + env: + AP_MODEL: hfcl_ion4xi + steps: + - name: Set AP model output + id: ap_model + run: | + echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT + + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + + + # TODO WIFI-7839 delete when issue is resolved on AWS CLI side + - name: install kubectl + run: | + curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + + - name: install aws CLI tool + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + - name: get EKS access credentials + run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }} + + - name: prepare namespace name + id: namespace + run: | + NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')" + echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT + + - name: prepare configuration + run: | + cat << EOF > lab_info.json + ${{ secrets.LAB_INFO_JSON }} + EOF + + + - name: run dfs tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-dfs + testbed: basic-3b + marker_expression: "ow_regression_lf and dfs_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs + + - name: run multipsk tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-multipsk + testbed: basic-3b + marker_expression: "ow_regression_lf and multi_psk_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk + + - name: run rate_limiting tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-rate-limiting + testbed: basic-3b + marker_expression: "ow_regression_lf and rate_limiting_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting + + - name: run rate_limiting_with_radius tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius + testbed: basic-3b + marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius + + - name: run dynamic_vlan tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan + testbed: basic-3b + marker_expression: "ow_regression_lf and dynamic_vlan_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan + + - name: run multi_vlan tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-multi-vlan + testbed: basic-3b + marker_expression: "ow_regression_lf and multi_vlan_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan + + - name: run strict forwarding tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding + testbed: basic-3b + marker_expression: "ow_regression_lf and strict_forwarding_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding + + - name: run captive portal tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-captive-portal + testbed: basic-3b + marker_expression: "ow_regression_lf and advanced_captive_portal_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal + + - name: run firmware upgrade & downgrade tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')" + with: + namespace: ${{ steps.namespace.outputs.name }}-fw-upgrade-downgrade + testbed: basic-3b + marker_expression: "ow_regression_lf and firmware_upgrade_downgrade" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-firmware-upgrade-downgrade --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-firmware-upgrade-downgrade $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-firmware-upgrade-downgrade + + - name: run ap support bundle tests + uses: ./.github/actions/run-tests + if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')" + with: + namespace: ${{ steps.namespace.outputs.name }}-asb + testbed: basic-3b + marker_expression: "ow_regression_lf and asb_tests" + configuration_file: "./lab_info.json" + testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }} + allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests + dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build" + + # necessary because if conditionals in composite actions are currently not respected + - name: get tests logs + if: always() + continue-on-error: true + run: | + podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb-tests --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///") + kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb-tests $podname || true + + - name: delete namespace + if: always() + continue-on-error: true + run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb-tests + + - name: show gw logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owgw + + - name: show fms logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owfms + + - name: show prov logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owprov + + - name: show analytics logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owanalytics + + - name: show subscription (userportal) logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owsub + + - name: show sec logs + if: failure() + run: kubectl -n openwifi-qa01 logs deployment/owsec + test-edgecore-eap101: needs: ["vars", "build"] @@ -3677,7 +3999,7 @@ jobs: report: if: "!cancelled()" runs-on: ubuntu-latest - needs: [vars, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111] + needs: [vars, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111, test-hfcl-ion4xi] strategy: fail-fast: false matrix: @@ -3730,7 +4052,7 @@ jobs: # Cleanup cleanup: - needs: [test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111] + needs: [test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111, test-hfcl-ion4xi] runs-on: ubuntu-latest if: always() steps: diff --git a/.github/workflows/ucentralgw-qa-deployment.yaml b/.github/workflows/ucentralgw-qa-deployment.yaml index 2b76c1d4c1..4d887c24b4 100644 --- a/.github/workflows/ucentralgw-qa-deployment.yaml +++ b/.github/workflows/ucentralgw-qa-deployment.yaml @@ -34,7 +34,7 @@ env: { "namespace": "qa01", "deploy_method": "git", - "chart_version": "v3.1.0-RC1", + "chart_version": "v3.1.0-RC2", "owgw_version": "master", "owsec_version": "main", "owfms_version": "main", diff --git a/libs/tip_2x/ap_lib.py b/libs/tip_2x/ap_lib.py index ab4b9a49f9..c4ec2fc534 100644 --- a/libs/tip_2x/ap_lib.py +++ b/libs/tip_2x/ap_lib.py @@ -579,7 +579,7 @@ def add_restrictions(self, restrictions_file, developer_mode): expected_attachment_type=allure.attachment_type.TEXT, restrictions=True) self.factory_reset(print_log=False) - time.sleep(120) + time.sleep(300) return output diff --git a/libs/tip_2x/controller.py b/libs/tip_2x/controller.py index a6b9d38cf7..e30e101929 100644 --- a/libs/tip_2x/controller.py +++ b/libs/tip_2x/controller.py @@ -918,8 +918,10 @@ def check_restrictions(self, serial_number): def asb_script(self, serial_number, payload): uri = self.build_uri("device/" + serial_number + "/script") + logging.info("uri:- " + str(uri)) payload = json.dumps(payload) resp = requests.post(uri, data=payload, headers=self.make_headers(), verify=False, timeout=120) + logging.info("resp:- " + str(resp)) resp = resp.json() resp = resp['UUID'] return resp