From 1c5f3e80597e300dc25b7274e975071e73b670a0 Mon Sep 17 00:00:00 2001 From: Antonin Stefanutti Date: Wed, 15 Nov 2023 00:54:20 +0100 Subject: [PATCH] [RayJob]: Add RayJob with RayCluster spec e2e test (#1636) This PR adds e2e test following up #1539 and leverages #1575. It also factorises configuration across the RayJob e2e tests. --- .../test/e2e/rayjob_cluster_selector_test.go | 166 +------------- ray-operator/test/e2e/rayjob_test.go | 131 +++++++++++ ray-operator/test/e2e/support.go | 209 +++++++++++++++++- ray-operator/test/support/batch.go | 29 +++ ray-operator/test/support/ray.go | 10 + ray-operator/test/support/support.go | 8 + 6 files changed, 387 insertions(+), 166 deletions(-) create mode 100644 ray-operator/test/e2e/rayjob_test.go create mode 100644 ray-operator/test/support/batch.go diff --git a/ray-operator/test/e2e/rayjob_cluster_selector_test.go b/ray-operator/test/e2e/rayjob_cluster_selector_test.go index 2ccb9b57c7..11be76c287 100644 --- a/ray-operator/test/e2e/rayjob_cluster_selector_test.go +++ b/ray-operator/test/e2e/rayjob_cluster_selector_test.go @@ -5,8 +5,6 @@ import ( . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" @@ -21,21 +19,7 @@ func TestRayJobWithClusterSelector(t *testing.T) { namespace := test.NewTestNamespace() // Job scripts - jobs := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: corev1.SchemeGroupVersion.String(), - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "jobs", - Namespace: namespace.Name, - }, - BinaryData: map[string][]byte{ - "counter.py": ReadFile(test, "counter.py"), - "fail.py": ReadFile(test, "fail.py"), - }, - Immutable: Ptr(true), - } + jobs := newConfigMap(namespace.Name, "jobs", files(test, "counter.py", "fail.py")) jobs, err := test.Client().Core().CoreV1().ConfigMaps(namespace.Name).Create(test.Ctx(), jobs, metav1.CreateOptions{}) test.Expect(err).NotTo(HaveOccurred()) test.T().Logf("Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name) @@ -50,109 +34,7 @@ func TestRayJobWithClusterSelector(t *testing.T) { Name: "raycluster", Namespace: namespace.Name, }, - Spec: rayv1.RayClusterSpec{ - RayVersion: GetRayVersion(), - HeadGroupSpec: rayv1.HeadGroupSpec{ - RayStartParams: map[string]string{ - "dashboard-host": "0.0.0.0", - }, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "ray-head", - Image: GetRayImage(), - Ports: []corev1.ContainerPort{ - { - ContainerPort: 6379, - Name: "gcs", - }, - { - ContainerPort: 8265, - Name: "dashboard", - }, - { - ContainerPort: 10001, - Name: "client", - }, - }, - Lifecycle: &corev1.Lifecycle{ - PreStop: &corev1.LifecycleHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "ray stop"}, - }, - }, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("300m"), - corev1.ResourceMemory: resource.MustParse("1G"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("500m"), - corev1.ResourceMemory: resource.MustParse("2G"), - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "jobs", - MountPath: "/home/ray/jobs", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "jobs", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: jobs.Name, - }, - }, - }, - }, - }, - }, - }, - }, - WorkerGroupSpecs: []rayv1.WorkerGroupSpec{ - { - Replicas: Ptr(int32(1)), - MinReplicas: Ptr(int32(1)), - MaxReplicas: Ptr(int32(1)), - GroupName: "small-group", - RayStartParams: map[string]string{}, - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "ray-worker", - Image: GetRayImage(), - Lifecycle: &corev1.Lifecycle{ - PreStop: &corev1.LifecycleHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"/bin/sh", "-c", "ray stop"}, - }, - }, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("300m"), - corev1.ResourceMemory: resource.MustParse("1G"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("500m"), - corev1.ResourceMemory: resource.MustParse("1G"), - }, - }, - }, - }, - }, - }, - }, - }, - }, + Spec: *newRayClusterSpec(mountConfigMap(jobs, "/home/ray/jobs")), } rayCluster, err = test.Client().Ray().RayV1().RayClusters(namespace.Name).Create(test.Ctx(), rayCluster, metav1.CreateOptions{}) test.Expect(err).NotTo(HaveOccurred()) @@ -185,27 +67,7 @@ env_vars: counter_name: test_counter `, ShutdownAfterJobFinishes: false, - SubmitterPodTemplate: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "ray-job-submitter", - Image: GetRayImage(), - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("200Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("500m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - RestartPolicy: corev1.RestartPolicyNever, - }, - }, + SubmitterPodTemplate: jobSubmitterPodTemplate(), }, } rayJob, err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Create(test.Ctx(), rayJob, metav1.CreateOptions{}) @@ -240,27 +102,7 @@ env_vars: }, Entrypoint: "python /home/ray/jobs/fail.py", ShutdownAfterJobFinishes: false, - SubmitterPodTemplate: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "ray-job-submitter", - Image: GetRayImage(), - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("200m"), - corev1.ResourceMemory: resource.MustParse("200Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("500m"), - corev1.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }, - }, - RestartPolicy: corev1.RestartPolicyNever, - }, - }, + SubmitterPodTemplate: jobSubmitterPodTemplate(), }, } rayJob, err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Create(test.Ctx(), rayJob, metav1.CreateOptions{}) diff --git a/ray-operator/test/e2e/rayjob_test.go b/ray-operator/test/e2e/rayjob_test.go new file mode 100644 index 0000000000..c039132899 --- /dev/null +++ b/ray-operator/test/e2e/rayjob_test.go @@ -0,0 +1,131 @@ +package e2e + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + . "github.com/ray-project/kuberay/ray-operator/test/support" +) + +func TestRayJob(t *testing.T) { + test := With(t) + + // Create a namespace + namespace := test.NewTestNamespace() + + // Job scripts + jobs := newConfigMap(namespace.Name, "jobs", files(test, "counter.py", "fail.py")) + jobs, err := test.Client().Core().CoreV1().ConfigMaps(namespace.Name).Create(test.Ctx(), jobs, metav1.CreateOptions{}) + test.Expect(err).NotTo(HaveOccurred()) + test.T().Logf("Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name) + + test.T().Run("Successful RayJob", func(t *testing.T) { + // RayJob + rayJob := &rayv1.RayJob{ + TypeMeta: metav1.TypeMeta{ + APIVersion: rayv1.GroupVersion.String(), + Kind: "RayJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "counter", + Namespace: namespace.Name, + }, + Spec: rayv1.RayJobSpec{ + RayClusterSpec: newRayClusterSpec(mountConfigMap(jobs, "/home/ray/jobs")), + Entrypoint: "python /home/ray/jobs/counter.py", + RuntimeEnvYAML: ` +env_vars: + counter_name: test_counter +`, + ShutdownAfterJobFinishes: true, + SubmitterPodTemplate: jobSubmitterPodTemplate(), + }, + } + rayJob, err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Create(test.Ctx(), rayJob, metav1.CreateOptions{}) + test.Expect(err).NotTo(HaveOccurred()) + test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name) + + test.T().Logf("Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name) + test.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium). + Should(WithTransform(RayJobStatus, Satisfy(rayv1.IsJobTerminal))) + + // Assert the RayJob has completed successfully + test.Expect(GetRayJob(test, rayJob.Namespace, rayJob.Name)). + To(WithTransform(RayJobStatus, Equal(rayv1.JobStatusSucceeded))) + + // And the RayJob deployment status is updated accordingly + test.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name)). + Should(WithTransform(RayJobDeploymentStatus, Equal(rayv1.JobDeploymentStatusComplete))) + + // Refresh the RayJob status + rayJob = GetRayJob(test, rayJob.Namespace, rayJob.Name) + + // Assert the RayCluster has been torn down + _, err = test.Client().Ray().RayV1().RayClusters(namespace.Name).Get(test.Ctx(), rayJob.Status.RayClusterName, metav1.GetOptions{}) + test.Expect(err).To(MatchError(k8serrors.NewNotFound(rayv1.Resource("rayclusters"), rayJob.Status.RayClusterName))) + + // Delete the RayJob + err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Delete(test.Ctx(), rayJob.Name, metav1.DeleteOptions{}) + test.Expect(err).NotTo(HaveOccurred()) + test.T().Logf("Deleted RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name) + + // Assert the submitter Job has been cascade deleted + test.Eventually(Jobs(test, namespace.Name)).Should(BeEmpty()) + }) + + test.T().Run("Failing RayJob without cluster shutdown after finished", func(t *testing.T) { + // RayJob + rayJob := &rayv1.RayJob{ + TypeMeta: metav1.TypeMeta{ + APIVersion: rayv1.GroupVersion.String(), + Kind: "RayJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "fail", + Namespace: namespace.Name, + }, + Spec: rayv1.RayJobSpec{ + RayClusterSpec: newRayClusterSpec(mountConfigMap(jobs, "/home/ray/jobs")), + Entrypoint: "python /home/ray/jobs/fail.py", + ShutdownAfterJobFinishes: false, + SubmitterPodTemplate: jobSubmitterPodTemplate(), + }, + } + rayJob, err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Create(test.Ctx(), rayJob, metav1.CreateOptions{}) + test.Expect(err).NotTo(HaveOccurred()) + test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name) + + test.T().Logf("Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name) + test.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium). + Should(WithTransform(RayJobStatus, Satisfy(rayv1.IsJobTerminal))) + + // Assert the Ray job has failed + test.Expect(GetRayJob(test, rayJob.Namespace, rayJob.Name)). + To(WithTransform(RayJobStatus, Equal(rayv1.JobStatusFailed))) + + // And the RayJob deployment status is updated accordingly + test.Consistently(RayJob(test, rayJob.Namespace, rayJob.Name), 10*time.Second). + Should(WithTransform(RayJobDeploymentStatus, Equal(rayv1.JobDeploymentStatusRunning))) + + // Refresh the RayJob status + rayJob = GetRayJob(test, rayJob.Namespace, rayJob.Name) + + // Delete the RayJob + err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Delete(test.Ctx(), rayJob.Name, metav1.DeleteOptions{}) + test.Expect(err).NotTo(HaveOccurred()) + test.T().Logf("Deleted RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name) + + // Assert the RayCluster has been cascade deleted + test.Eventually(NotFound(RayClusterOrError(test, namespace.Name, rayJob.Status.RayClusterName))). + Should(BeTrue()) + + // Assert the submitter Job has been cascade deleted + test.Eventually(Jobs(test, namespace.Name)).Should(BeEmpty()) + }) +} diff --git a/ray-operator/test/e2e/support.go b/ray-operator/test/e2e/support.go index 87e555d8b3..0a4c5c69ac 100644 --- a/ray-operator/test/e2e/support.go +++ b/ray-operator/test/e2e/support.go @@ -4,15 +4,216 @@ import ( "embed" "github.com/onsi/gomega" - "github.com/ray-project/kuberay/ray-operator/test/support" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + . "github.com/ray-project/kuberay/ray-operator/test/support" ) //go:embed *.py -var files embed.FS +var _files embed.FS -func ReadFile(t support.Test, fileName string) []byte { +func ReadFile(t Test, fileName string) []byte { t.T().Helper() - file, err := files.ReadFile(fileName) + file, err := _files.ReadFile(fileName) t.Expect(err).NotTo(gomega.HaveOccurred()) return file } + +type option[T any] func(t *T) *T + +func apply[T any](t *T, options ...option[T]) *T { + for _, opt := range options { + t = opt(t) + } + return t +} + +func options[T any](options ...option[T]) option[T] { + return func(t *T) *T { + for _, opt := range options { + t = opt(t) + } + return t + } +} + +func newConfigMap(namespace, name string, options ...option[corev1.ConfigMap]) *corev1.ConfigMap { + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + BinaryData: map[string][]byte{}, + Immutable: Ptr(true), + } + + return configMapWith(cm, options...) +} + +func configMapWith(configMap *corev1.ConfigMap, options ...option[corev1.ConfigMap]) *corev1.ConfigMap { + return apply(configMap, options...) +} + +func file(t Test, fileName string) option[corev1.ConfigMap] { + return func(cm *corev1.ConfigMap) *corev1.ConfigMap { + cm.BinaryData[fileName] = ReadFile(t, fileName) + return cm + } +} + +func files(t Test, fileNames ...string) option[corev1.ConfigMap] { + var files []option[corev1.ConfigMap] + for _, fileName := range fileNames { + files = append(files, file(t, fileName)) + } + return options(files...) +} + +func newRayClusterSpec(options ...option[rayv1.RayClusterSpec]) *rayv1.RayClusterSpec { + return rayClusterSpecWith(rayClusterSpec(), options...) +} + +func rayClusterSpecWith(spec *rayv1.RayClusterSpec, options ...option[rayv1.RayClusterSpec]) *rayv1.RayClusterSpec { + return apply(spec, options...) +} + +func mountConfigMap(configMap *corev1.ConfigMap, mountPath string) option[rayv1.RayClusterSpec] { + return func(spec *rayv1.RayClusterSpec) *rayv1.RayClusterSpec { + mounts := spec.HeadGroupSpec.Template.Spec.Containers[0].VolumeMounts + spec.HeadGroupSpec.Template.Spec.Containers[0].VolumeMounts = append(mounts, corev1.VolumeMount{ + Name: configMap.Name, + MountPath: mountPath, + }) + spec.HeadGroupSpec.Template.Spec.Volumes = append(spec.HeadGroupSpec.Template.Spec.Volumes, corev1.Volume{ + Name: configMap.Name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + }, + }, + }) + return spec + } +} + +func rayClusterSpec() *rayv1.RayClusterSpec { + return &rayv1.RayClusterSpec{ + RayVersion: GetRayVersion(), + HeadGroupSpec: rayv1.HeadGroupSpec{ + RayStartParams: map[string]string{ + "dashboard-host": "0.0.0.0", + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-head", + Image: GetRayImage(), + Ports: []corev1.ContainerPort{ + { + ContainerPort: 6379, + Name: "gcs", + }, + { + ContainerPort: 8265, + Name: "dashboard", + }, + { + ContainerPort: 10001, + Name: "client", + }, + }, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "ray stop"}, + }, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("1G"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("2G"), + }, + }, + }, + }, + }, + }, + }, + WorkerGroupSpecs: []rayv1.WorkerGroupSpec{ + { + Replicas: Ptr(int32(1)), + MinReplicas: Ptr(int32(1)), + MaxReplicas: Ptr(int32(1)), + GroupName: "small-group", + RayStartParams: map[string]string{}, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-worker", + Image: GetRayImage(), + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "ray stop"}, + }, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("1G"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func jobSubmitterPodTemplate() *corev1.PodTemplateSpec { + return &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-job-submitter", + Image: GetRayImage(), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + } +} diff --git a/ray-operator/test/support/batch.go b/ray-operator/test/support/batch.go new file mode 100644 index 0000000000..cd86d0a965 --- /dev/null +++ b/ray-operator/test/support/batch.go @@ -0,0 +1,29 @@ +package support + +import ( + "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Jobs(t Test, namespace string) func(g gomega.Gomega) []batchv1.Job { + return func(g gomega.Gomega) []batchv1.Job { + jobs, err := t.Client().Core().BatchV1().Jobs(namespace).List(t.Ctx(), metav1.ListOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + return jobs.Items + } +} + +func Job(t Test, namespace, name string) func(g gomega.Gomega) *batchv1.Job { + return func(g gomega.Gomega) *batchv1.Job { + job, err := t.Client().Core().BatchV1().Jobs(namespace).Get(t.Ctx(), name, metav1.GetOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + return job + } +} + +func GetJob(t Test, namespace, name string) *batchv1.Job { + t.T().Helper() + return Job(t, namespace, name)(t) +} diff --git a/ray-operator/test/support/ray.go b/ray-operator/test/support/ray.go index f9cab10768..d868512702 100644 --- a/ray-operator/test/support/ray.go +++ b/ray-operator/test/support/ray.go @@ -24,6 +24,10 @@ func RayJobStatus(job *rayv1.RayJob) rayv1.JobStatus { return job.Status.JobStatus } +func RayJobDeploymentStatus(job *rayv1.RayJob) rayv1.JobDeploymentStatus { + return job.Status.JobDeploymentStatus +} + func GetRayJobId(t Test, namespace, name string) string { t.T().Helper() job := RayJob(t, namespace, name)(t) @@ -38,6 +42,12 @@ func RayCluster(t Test, namespace, name string) func(g gomega.Gomega) *rayv1.Ray } } +func RayClusterOrError(t Test, namespace, name string) func(g gomega.Gomega) (*rayv1.RayCluster, error) { + return func(g gomega.Gomega) (*rayv1.RayCluster, error) { + return t.Client().Ray().RayV1().RayClusters(namespace).Get(t.Ctx(), name, metav1.GetOptions{}) + } +} + func GetRayCluster(t Test, namespace, name string) *rayv1.RayCluster { t.T().Helper() return RayCluster(t, namespace, name)(t) diff --git a/ray-operator/test/support/support.go b/ray-operator/test/support/support.go index fb89752b11..8aa720b3d3 100644 --- a/ray-operator/test/support/support.go +++ b/ray-operator/test/support/support.go @@ -7,6 +7,7 @@ import ( "github.com/onsi/gomega" "github.com/onsi/gomega/format" + k8serrors "k8s.io/apimachinery/pkg/api/errors" ) var ( @@ -46,3 +47,10 @@ func init() { // Disable object truncation on test results format.MaxLength = 0 } + +func NotFound[T any](fn func(g gomega.Gomega) (T, error)) func(g gomega.Gomega) bool { + return func(g gomega.Gomega) bool { + _, err := fn(g) + return k8serrors.IsNotFound(err) + } +}