diff --git a/changelogs/unreleased/250-kro-cat b/changelogs/unreleased/250-kro-cat new file mode 100644 index 00000000..7836c3c6 --- /dev/null +++ b/changelogs/unreleased/250-kro-cat @@ -0,0 +1,8 @@ +Drop the STDERR stream contents from output to avoid JSON mangling. + +The vgs command may print non-critical warnings to STDERR. Warnings may not +necessarily result in a failure return code, which allows the program to +continue with marshalling the JSON-formatted output. Combining this stream with +STDIN will cause the next step at decodeVgsJSON() to fail due to garbage mixed +in the JSON. + diff --git a/ci/ci-test.sh b/ci/ci-test.sh index 0d3438a1..7504f873 100755 --- a/ci/ci-test.sh +++ b/ci/ci-test.sh @@ -16,29 +16,91 @@ set -e +LVM_OPERATOR="$(realpath deploy/lvm-operator.yaml)" +SNAP_CLASS="$(realpath deploy/sample/lvmsnapclass.yaml)" + +export LVM_NAMESPACE="openebs" +export TEST_DIR="tests" +export NAMESPACE="kube-system" + +# allow override +if [ -z "${KUBECONFIG}" ] +then + export KUBECONFIG="${HOME}/.kube/config" +fi + +# systemid for the testing environment. The kubernetes host machine will serve as the foreign lvm system. +LVM_SYSTEMID="openebs-ci-test-system" +LVM_CONFIG="global{system_id_source=lvmlocal}local{system_id=${LVM_SYSTEMID}}" + +# Clean up generated resources for successive tests. +cleanup_loopdev() { + sudo losetup -l | grep '(deleted)' | awk '{print $1}' \ + | while IFS= read -r disk + do + sudo losetup -d "${disk}" + done +} + +cleanup_lvmvg() { + if [ -f /tmp/openebs_ci_disk.img ] + then + sudo vgremove lvmvg -y || true + rm /tmp/openebs_ci_disk.img + fi + cleanup_loopdev +} + +cleanup_foreign_lvmvg() { + if [ -f /tmp/openebs_ci_foreign_disk.img ] + then + sudo vgremove foreign_lvmvg --config="${LVM_CONFIG}" -y || true + rm /tmp/openebs_ci_foreign_disk.img + fi + cleanup_loopdev +} + +cleanup() { + set +e + + echo "Cleaning up test resources" + + cleanup_lvmvg + cleanup_foreign_lvmvg + + kubectl delete pvc -n openebs lvmpv-pvc + kubectl delete -f "${SNAP_CLASS}" + kubectl delete -f "${LVM_OPERATOR}" + + # always return true + return 0 +} +# trap "cleanup 2>/dev/null" EXIT +[ -n "${CLEANUP_ONLY}" ] && cleanup 2>/dev/null && exit 0 +[ -n "${RESET}" ] && cleanup 2>/dev/null + # setup the lvm volume group to create the volume -truncate -s 1024G /tmp/disk.img -disk=`sudo losetup -f /tmp/disk.img --show` -sudo pvcreate "$disk" -sudo vgcreate lvmvg "$disk" +cleanup_lvmvg +truncate -s 1024G /tmp/openebs_ci_disk.img +disk="$(sudo losetup -f /tmp/openebs_ci_disk.img --show)" +sudo pvcreate "${disk}" +sudo vgcreate lvmvg "${disk}" + +# setup a foreign lvm to test +cleanup_foreign_lvmvg +truncate -s 1024G /tmp/openebs_ci_foreign_disk.img +foreign_disk="$(sudo losetup -f /tmp/openebs_ci_foreign_disk.img --show)" +sudo pvcreate "${foreign_disk}" +sudo vgcreate foreign_lvmvg "${foreign_disk}" --systemid="${LVM_SYSTEMID}" # install snapshot and thin volume module for lvm sudo modprobe dm-snapshot sudo modprobe dm_thin_pool - -LVM_OPERATOR=deploy/lvm-operator.yaml -SNAP_CLASS=deploy/sample/lvmsnapclass.yaml - -export LVM_NAMESPACE="openebs" -export TEST_DIR="tests" -export NAMESPACE="kube-system" -export KUBECONFIG=$HOME/.kube/config - # Prepare env for running BDD tests # Minikube is already running -kubectl apply -f $LVM_OPERATOR -kubectl apply -f $SNAP_CLASS +kubectl apply -f "${LVM_OPERATOR}" +kubectl apply -f "${SNAP_CLASS}" dumpAgentLogs() { NR=$1 @@ -99,9 +161,7 @@ set +e echo "running ginkgo test case" -ginkgo -v - -if [ $? -ne 0 ]; then +if ! ginkgo -v ; then sudo pvscan --cache @@ -135,3 +195,6 @@ exit 1 fi printf "\n\n######### All test cases passed #########\n\n" + +# last statement formatted to always return true +[ -z "${CLEANUP}" ] || cleanup 2>/dev/null diff --git a/pkg/lvm/lvm_util.go b/pkg/lvm/lvm_util.go index 0bb48770..109f06ad 100644 --- a/pkg/lvm/lvm_util.go +++ b/pkg/lvm/lvm_util.go @@ -17,6 +17,7 @@ limitations under the License. package lvm import ( + "bytes" "encoding/json" "fmt" "os" @@ -550,6 +551,7 @@ func decodeVgsJSON(raw []byte) ([]apis.VolumeGroup, error) { }{} var err error if err = json.Unmarshal(raw, output); err != nil { + klog.Errorf("json: failed to unmarshal:\n%s", raw) return nil, err } @@ -646,6 +648,27 @@ func ReloadLVMMetadataCache() error { return nil } +// RunCommandSplit is a wrapper function to run a command and receive its +// STDERR and STDOUT streams in separate []byte vars. +func RunCommandSplit(command string, args ...string) ([]byte, []byte, error) { + var cmdStdout bytes.Buffer + var cmdStderr bytes.Buffer + + cmd := exec.Command(command, args...) + cmd.Stdout = &cmdStdout + cmd.Stderr = &cmdStderr + err := cmd.Run() + + output := cmdStdout.Bytes() + error_output := cmdStderr.Bytes() + + if len(error_output) > 0 { + klog.Warningf("lvm: said into stderr: %s", error_output) + } + + return output, error_output, err +} + // ListLVMVolumeGroup invokes `vgs` to list all the available volume // groups in the node. // @@ -662,12 +685,12 @@ func ListLVMVolumeGroup(reloadCache bool) ([]apis.VolumeGroup, error) { "--reportformat", "json", "--units", "b", } - cmd := exec.Command(VGList, args...) - output, err := cmd.CombinedOutput() + output, _, err := RunCommandSplit(VGList, args...) if err != nil { klog.Errorf("lvm: list volume group cmd %v: %v", args, err) return nil, err } + return decodeVgsJSON(output) }