Skip to content

Commit

Permalink
Merge pull request #320 from openebs/volgroup_ci_test
Browse files Browse the repository at this point in the history
test(ci): adding volgroup based tests
  • Loading branch information
abhilashshetty04 authored Jul 2, 2024
2 parents f9b57f6 + f5c581f commit eff0f6e
Show file tree
Hide file tree
Showing 5 changed files with 330 additions and 87 deletions.
18 changes: 2 additions & 16 deletions ci/ci-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,6 @@ cleanup_loopdev() {
done
}

cleanup_lvmvg() {
if [ -f /tmp/openebs_ci_disk.img ]
then
sudo vgremove lvmvg -y || true
rm /tmp/openebs_ci_disk.img
fi
cleanup_loopdev
}

cleanup_foreign_lvmvg() {
if [ -f /tmp/openebs_ci_foreign_disk.img ]
then
Expand All @@ -65,7 +56,6 @@ cleanup() {

echo "Cleaning up test resources"

cleanup_lvmvg
cleanup_foreign_lvmvg

kubectl delete pvc -n openebs lvmpv-pvc
Expand All @@ -79,12 +69,6 @@ cleanup() {
[ -n "${CLEANUP_ONLY}" ] && cleanup 2>/dev/null && exit 0
[ -n "${RESET}" ] && cleanup 2>/dev/null

# setup the lvm volume group to create the volume
cleanup_lvmvg
truncate -s 100G /tmp/openebs_ci_disk.img
disk="$(sudo losetup -f /tmp/openebs_ci_disk.img --show)"
sudo pvcreate "${disk}"
sudo vgcreate lvmvg "${disk}"

# setup a foreign lvm to test
cleanup_foreign_lvmvg
Expand Down Expand Up @@ -171,6 +155,8 @@ sudo pvscan --cache

sudo lvdisplay

sudo vgdisplay

echo "******************** LVM Controller logs***************************** "
dumpControllerLogs 1000

Expand Down
69 changes: 57 additions & 12 deletions tests/lvm_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,20 @@ import (
"fmt"
"strconv"
"strings"
"time"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

// This creates loopdevice using the size passed as arg,
// Uses the new loop device to create PV. returns loopdevice name to the caller.
func createPV(size int) string {
fmt.Printf("Creating device\n")
ginkgo.By("Creating Pv")

back_file_args := []string{
"mktemp", "-t",
"mktemp",
"-t",
"openebs_lvm_localpv_disk_XXXXX",
"--dry-run",
}
Expand All @@ -39,14 +42,16 @@ func createPV(size int) string {
file_str := strings.TrimSpace(string(file[:]))
size_str := strconv.Itoa(size) + "G"
device_args := []string{
"truncate", "-s",
"truncate",
"-s",
size_str, file_str,
}
_, _, err := execAtLocal("sudo", nil, device_args...)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "create device failed")

args_loop := []string{
"losetup", "-f",
"losetup",
"-f",
file_str, "--show",
}
stdout_loop, _, err := execAtLocal("sudo", nil, args_loop...)
Expand All @@ -62,9 +67,29 @@ func createPV(size int) string {
return stdout_loop_str
}

// Gets lv_count of a specified vg, returns false if its not empty at the end of poll.
func vgEmpty(name string) bool {
args_lvs := []string{
"vgs",
name,
"--options",
"lv_count",
"--noheadings",
}
lvs, _, _ := execAtLocal("sudo", nil, args_lvs...)
lvs_str := strings.TrimSpace(string(lvs))
lv_cnt, _ := strconv.Atoi(lvs_str)
fmt.Printf("lvs cnt is %d\n", lv_cnt)
if lv_cnt != 0 {
return false
} else {
return true
}
}

// Does pvremove on specified device. Deletes loop device and the file backing loop device.
func removePV(device string) {
fmt.Printf("remove pv\n")
ginkgo.By("Removing pv")
args_pv := []string{
"pvremove",
device,
Expand All @@ -84,7 +109,8 @@ func removePV(device string) {
dev_str := strings.TrimSpace(string(dev))

args_loop := []string{
"losetup", "-d",
"losetup",
"-d",
device,
}
_, _, err_loop := execAtLocal("sudo", nil, args_loop...)
Expand All @@ -101,7 +127,7 @@ func removePV(device string) {

// Creates vg on the specified device, Device passed should be a pv.
func createVg(name string, device string) {
fmt.Printf("Creating vg\n")
ginkgo.By("Creating vg")
args_vg := []string{
"vgcreate", name,
device,
Expand All @@ -112,7 +138,7 @@ func createVg(name string, device string) {

// Takes vg name and pv device, extends vg using the supplied pv.
func extendVg(name string, device string) {
fmt.Printf("extending vg\n")
ginkgo.By("Extending vg")
args_vg := []string{
"vgextend", name,
device,
Expand All @@ -121,14 +147,31 @@ func extendVg(name string, device string) {
gomega.Expect(err_vg).To(gomega.BeNil(), "vg extend failed")
}

// Does vhremove on specified vg with force flag,
// lv will be forcedeleted if vg is not empty.
// Does vgremove on specified vg with -y flag if vg isnt empty after fer retires.
func removeVg(name string) {
fmt.Printf("Removing vg\n")
ginkgo.By("Removing vg")
retries := 3
current_retry := 0
args_vg := []string{
"vgremove",
name,
"-f",
}
for {
if current_retry < retries {
vg_empty := vgEmpty(name)
if vg_empty {
fmt.Printf("No lv in vg before vg remove\n")
break
} else {
fmt.Printf("lv in vg during retry %d\n", current_retry)
}
} else {
fmt.Printf("vg still not empty after 6 seconds, moving on with force delete\n")
args_vg = append(args_vg, "-f")
break
}
current_retry += 1
time.Sleep(2 * time.Second)
}
_, _, err_vg := execAtLocal("sudo", nil, args_vg...)
gomega.Expect(err_vg).To(gomega.BeNil(), "vg remove failed")
Expand All @@ -137,6 +180,7 @@ func removeVg(name string) {
// enable the monitoring on thinpool created for test, on local node which
// is part of single node cluster.
func enableThinpoolMonitoring() {
ginkgo.By("Enable thinpool monitoring")
lv := VOLGROUP + "/" + pvcObj.Spec.VolumeName

args := []string{
Expand All @@ -163,6 +207,7 @@ func enableThinpoolMonitoring() {

// verify that the thinpool has extended in capacity to an expected size.
func VerifyThinpoolExtend() {
ginkgo.By("Verify thinpool extend")
expect_size, _ := strconv.ParseInt(expanded_capacity, 10, 64)
lv := VOLGROUP + "/" + pvcObj.Spec.VolumeName

Expand Down
Loading

0 comments on commit eff0f6e

Please sign in to comment.