forked from red-hat-storage/odf-console
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test-prow-e2e.sh
executable file
·165 lines (124 loc) · 5.93 KB
/
test-prow-e2e.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#!/usr/bin/env bash
set -eExuo pipefail
if [ $# -eq 0 ]
then
echo "odf-console image not provided"
echo "exiting..."
exit 1
fi
function generateLogsAndCopyArtifacts {
oc cluster-info dump > "${ARTIFACT_DIR}"/cluster_info.json
oc get secrets -A -o wide > "${ARTIFACT_DIR}"/secrets.yaml
oc get secrets -A -o yaml >> "${ARTIFACT_DIR}"/secrets.yaml
oc get catalogsource -A -o wide > "${ARTIFACT_DIR}"/catalogsource.yaml
oc get catalogsource -A -o yaml >> "${ARTIFACT_DIR}"/catalogsource.yaml
oc get subscriptions -n openshift-storage -o wide > "${ARTIFACT_DIR}"/subscription_details.yaml
oc get subscriptions -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/subscription_details.yaml
oc get csvs -n openshift-storage -o wide > "${ARTIFACT_DIR}"/csvs.yaml
oc get csvs -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/csvs.yaml
oc get deployments -n openshift-storage -o wide > "${ARTIFACT_DIR}"/deployment_details.yaml
oc get deployments -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/deployment_details.yaml
oc get installplan -n openshift-storage -o wide > "${ARTIFACT_DIR}"/installplan.yaml
oc get installplan -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/installplan.yaml
oc get nodes -o wide > "${ARTIFACT_DIR}"/node.yaml
oc get nodes -o yaml >> "${ARTIFACT_DIR}"/node.yaml
oc get pods -n openshift-storage -o wide >> "${ARTIFACT_DIR}"/pod_details_openshift-storage.yaml
oc get pods -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/pod_details_openshift-storage.yaml
oc logs --previous=false deploy/odf-operator-controller-manager manager -n openshift-storage > "${ARTIFACT_DIR}"/odf.logs
for pod in $(oc get pods -n "${NS}" --no-headers -o custom-columns=":metadata.name" | grep "odf-console"); do
echo "$pod"
oc logs --previous=false "$pod" -n "${NS}" > "${ARTIFACT_DIR}"/"${pod}".logs
done
oc get serviceaccounts -n openshift-storage -o wide > "${ARTIFACT_DIR}"/serviceaccount.yaml
oc get serviceaccounts -n openshift-storage -o yaml >> "${ARTIFACT_DIR}"/serviceaccount.yaml
oc get console.v1.operator.openshift.io cluster -o yaml >> "${ARTIFACT_DIR}"/cluster.yaml
if [ -d "$ARTIFACT_DIR" ] && [ -d "$SCREENSHOTS_DIR" ]; then
if [[ -z "$(ls -A -- "$SCREENSHOTS_DIR")" ]]; then
echo "No artifacts were copied."
else
echo "Copying artifacts from $(pwd)..."
cp -r "$SCREENSHOTS_DIR" "${ARTIFACT_DIR}/gui-test-screenshots"
fi
fi
}
trap generateLogsAndCopyArtifacts EXIT
trap generateLogsAndCopyArtifacts ERR
PULL_SECRET_PATH="/var/run/operator-secret/dockerconfig"
NAMESPACE="openshift-marketplace"
SECRET_NAME="ocs-secret"
NS="openshift-storage"
ARTIFACT_DIR=${ARTIFACT_DIR:=/tmp/artifacts}
SCREENSHOTS_DIR=gui-test-screenshots
function createSecret {
oc create secret generic ${SECRET_NAME} --from-file=.dockerconfigjson=${PULL_SECRET_PATH} --type=kubernetes.io/dockerconfigjson -n "$1"
}
function linkSecrets {
for serviceAccount in $(oc get serviceaccounts -n ${NS} --no-headers -o custom-columns=":metadata.name" | sed 's/"//g'); do
echo "Linking ${SECRET_NAME} to ${serviceAccount}"
oc secrets link "${serviceAccount}" ${SECRET_NAME} -n ${NS} --for=pull
done
}
function deleteAllPods {
oc delete pods --all -n "$1"
}
oc patch operatorhub.config.openshift.io/cluster -p='{"spec":{"sources":[{"disabled":true,"name":"redhat-operators"}]}}' --type=merge
echo "Creating secret for CI builds in ${NAMESPACE}"
createSecret ${NAMESPACE}
oc apply -f openshift-ci/odf-catalog-source.yaml ;
echo "Waiting for CatalogSource to be Ready"
# Have to sleep here for atleast 1 min to ensure catalog source is in stable READY state
sleep 60
echo "Creating secret for linking pods"
createSecret ${NS}
echo "Adding secret to all service accounts in ${NS} namespace"
linkSecrets
echo "Restarting pods for secret update"
deleteAllPods ${NS}
sleep 30
echo "Adding secret to all service accounts in ${NS} namespace"
linkSecrets
echo "Restarting pods for secret update"
deleteAllPods ${NS}
sleep 120
echo "Adding secret to all service accounts in ${NS} namespace"
linkSecrets
echo "Restarting pods for secret update"
deleteAllPods ${NS}
echo "Adding secret to all service accounts in ${NS} namespace"
linkSecrets
echo "Restarting pods for secret update"
deleteAllPods ${NS}
sleep 120
# Enable console plugin for ODF-Console
export CONSOLE_CONFIG_NAME="cluster"
export ODF_PLUGIN_NAME="odf-console"
echo "Enabling Console Plugin for ODF Operator"
oc patch console.v1.operator.openshift.io ${CONSOLE_CONFIG_NAME} --type=json -p="[{'op': 'add', 'path': '/spec/plugins', 'value':[${ODF_PLUGIN_NAME}]}]"
ODF_CONSOLE_IMAGE="$1"
# [SC2155]
ODF_CSV_NAME="$(oc get csv -n openshift-storage -o=jsonpath='{.items[?(@.spec.displayName=="OpenShift Data Foundation")].metadata.name}')"
export ODF_CSV_NAME
oc patch csv "${ODF_CSV_NAME}" -n openshift-storage --type='json' -p \
"[{'op': 'replace', 'path': '/spec/install/spec/deployments/1/spec/template/spec/containers/0/image', 'value': \"${ODF_CONSOLE_IMAGE}\"}]"
# Installation occurs.
# This is also the default case if the CSV is in "Installing" state initially.
timeout 15m bash <<-'EOF'
echo "waiting for ${ODF_CSV_NAME} clusterserviceversion to succeed"
until [ "$(oc -n openshift-storage get csv -o=jsonpath="{.items[?(@.metadata.name==\"${ODF_CSV_NAME}\")].status.phase}")" == "Succeeded" ]; do
sleep 1
done
EOF
INSTALLER_DIR=${INSTALLER_DIR:=${ARTIFACT_DIR}/installer}
BRIDGE_KUBEADMIN_PASSWORD="$(cat "${KUBEADMIN_PASSWORD_FILE:-${INSTALLER_DIR}/auth/kubeadmin-password}")"
export BRIDGE_KUBEADMIN_PASSWORD
BRIDGE_BASE_ADDRESS="$(oc get consoles.config.openshift.io cluster -o jsonpath='{.status.consoleURL}')"
export BRIDGE_BASE_ADDRESS
# Disable color codes in Cypress since they do not render well CI test logs.
# https://docs.cypress.io/guides/guides/continuous-integration.html#Colors
export NO_COLOR=1
# Install dependencies.
yarn install
# Run tests.
yarn run test-cypress-headless
# Generate Cypress report.
yarn run cypress-postreport