Skip to content

Commit

Permalink
refactored test just for machine pools
Browse files Browse the repository at this point in the history
  • Loading branch information
Fiona-Waters committed Sep 27, 2023
1 parent e3c8b67 commit 58dc44b
Showing 1 changed file with 22 additions and 57 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,17 @@ import (
"time"

. "github.com/onsi/gomega"
. "github.com/project-codeflare/codeflare-operator/test/support"
mcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1"

batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

. "github.com/project-codeflare/codeflare-operator/test/support"
)

func TestInstascale(t *testing.T) {
func TestInstascaleMachinePool(t *testing.T) {

test := With(t)
test.T().Parallel()
Expand All @@ -38,11 +40,12 @@ func TestInstascale(t *testing.T) {
},
Immutable: Ptr(true),
}

config, err := test.Client().Core().CoreV1().ConfigMaps(namespace.Name).Create(test.Ctx(), config, metav1.CreateOptions{})
test.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created ConfigMap %s/%s successfully", config.Namespace, config.Name)

// create OCM connection
//create OCM connection
instascaleOCMSecret, err := test.Client().Core().CoreV1().Secrets("default").Get(test.Ctx(), "instascale-ocm-secret", metav1.GetOptions{})
if err != nil {
test.T().Errorf("unable to retrieve instascale-ocm-secret - Error : %v", err)
Expand All @@ -57,27 +60,10 @@ func TestInstascale(t *testing.T) {
}
defer connection.Close()

// check existing cluster resources
machinePoolsExist, err := MachinePoolsExist(connection)
test.Expect(err).NotTo(HaveOccurred())
nodePoolsExist, err := NodePoolsExist(connection)
// check existing cluster machine pool resources
foundMachinePool, err := CheckMachinePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())

if machinePoolsExist {
// look for machine pool with aw name - expect not to find it
foundMachinePool, err := CheckMachinePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachinePool).To(BeFalse())
} else if nodePoolsExist {
// look for node pool with aw name - expect not to find it
foundNodePool, err := CheckNodePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundNodePool).To(BeFalse())
} else {
foundMachineSet, err := CheckMachineSets(TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachineSet).To(BeFalse())
}
test.Expect(foundMachinePool).To(BeFalse())

// Batch Job
job := &batchv1.Job{
Expand Down Expand Up @@ -159,10 +145,12 @@ func TestInstascale(t *testing.T) {
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("250m"),
corev1.ResourceMemory: resource.MustParse("512Mi"),
"nvidia.com/gpu": resource.MustParse("1"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("1G"),
"nvidia.com/gpu": resource.MustParse("1"),
},
},
{
Expand Down Expand Up @@ -194,23 +182,11 @@ func TestInstascale(t *testing.T) {

// time.Sleep is used twice throughout the test, each for 30 seconds. Can look into using sync package waitGroup instead if that makes more sense
// wait for required resources to scale up before checking them again
time.Sleep(TestTimeoutThirtySeconds)

if machinePoolsExist {
// look for machine pool with aw name - expect to find it
foundMachinePool, err := CheckMachinePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachinePool).To(BeTrue())
} else if nodePoolsExist {
// look for node pool with aw name - expect to find it
foundNodePool, err := CheckNodePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundNodePool).To(BeTrue())
} else {
foundMachineSet, err := CheckMachineSets(TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachineSet).To(BeTrue())
}
time.Sleep(TestTimeoutMedium)

foundMachinePool, err = CheckMachinePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachinePool).To(BeTrue())

// Assert that the job has completed
test.T().Logf("Waiting for Job %s/%s to complete", job.Namespace, job.Name)
Expand All @@ -228,21 +204,10 @@ func TestInstascale(t *testing.T) {
Should(WithTransform(AppWrapperState, Equal(mcadv1beta1.AppWrapperStateCompleted)))

// allow time for the resources to scale down before checking them again
time.Sleep(TestTimeoutThirtySeconds)

if machinePoolsExist {
// look for machine pool with aw name - expect to find it
foundMachinePool, err := CheckMachinePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachinePool).To(BeFalse())
} else if nodePoolsExist {
// look for node pool with aw name - expect to find it
foundNodePool, err := CheckNodePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundNodePool).To(BeFalse())
} else {
foundMachineSet, err := CheckMachineSets(TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachineSet).To(BeFalse())
}
time.Sleep(TestTimeoutMedium)

foundMachinePool, err = CheckMachinePools(connection, TestName)
test.Expect(err).NotTo(HaveOccurred())
test.Expect(foundMachinePool).To(BeFalse())

}

0 comments on commit 58dc44b

Please sign in to comment.