diff --git a/pkg/internal/big_int.go b/pkg/internal/big_int.go index 376f178521e..a475e7b8cb0 100644 --- a/pkg/internal/big_int.go +++ b/pkg/internal/big_int.go @@ -17,6 +17,10 @@ func (b BigInt) Equal(n BigInt) bool { return b.Cmp(n) == 0 } +func (b BigInt) EqualInt64(n int64) bool { + return b.Int.Cmp(big.NewInt(n)) == 0 +} + func (b BigInt) Cmp(n BigInt) int { return b.Int.Cmp(&n.Int) } @@ -29,6 +33,10 @@ func (b BigInt) Sub(n BigInt) BigInt { return BigInt{*big.NewInt(0).Sub(&b.Int, &n.Int)} } +func (b BigInt) String() string { + return b.Int.String() +} + func (b BigInt) MarshalJSON() ([]byte, error) { return []byte(b.String()), nil } diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go index 977067ea3e3..d277a6d9389 100644 --- a/test/e2e/framework/deployment.go +++ b/test/e2e/framework/deployment.go @@ -3,6 +3,7 @@ package framework import ( "context" "encoding/json" + "errors" "fmt" "strings" "time" @@ -13,12 +14,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" v1apps "k8s.io/client-go/kubernetes/typed/apps/v1" "k8s.io/kubectl/pkg/polymorphichelpers" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/deployment" "github.com/onsi/gomega" + + "github.com/kubeovn/kube-ovn/pkg/util" ) type DeploymentClient struct { @@ -63,6 +68,62 @@ func (c *DeploymentClient) CreateSync(deploy *appsv1.Deployment) *appsv1.Deploym return c.Get(d.Name).DeepCopy() } +func (c *DeploymentClient) RolloutStatus(name string) *appsv1.Deployment { + var deploy *appsv1.Deployment + WaitUntil(2*time.Second, timeout, func(_ context.Context) (bool, error) { + var err error + deploy = c.Get(name) + unstructured := &unstructured.Unstructured{} + if unstructured.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(deploy); err != nil { + return false, err + } + + dsv := &polymorphichelpers.DeploymentStatusViewer{} + msg, done, err := dsv.Status(unstructured, 0) + if err != nil { + return false, err + } + if done { + return true, nil + } + + Logf(strings.TrimSpace(msg)) + return false, nil + }, "") + + return deploy +} + +func (c *DeploymentClient) Patch(original, modified *appsv1.Deployment) *appsv1.Deployment { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedDeploy *appsv1.Deployment + err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + deploy, err := c.DeploymentInterface.Patch(ctx, original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch deployment %s/%s", original.Namespace, original.Name) + } + patchedDeploy = deploy + return true, nil + }) + if err == nil { + return patchedDeploy.DeepCopy() + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while retrying to patch deployment %s/%s", original.Namespace, original.Name) + } + Failf("error occurred while retrying to patch deployment %s/%s: %v", original.Namespace, original.Name, err) + + return nil +} + +func (c *DeploymentClient) PatchSync(original, modified *appsv1.Deployment) *appsv1.Deployment { + deploy := c.Patch(original, modified) + return c.RolloutStatus(deploy.Name) +} + // Restart restarts the deployment as kubectl does func (c *DeploymentClient) Restart(deploy *appsv1.Deployment) *appsv1.Deployment { buf, err := polymorphichelpers.ObjectRestarterFn(deploy) @@ -85,29 +146,7 @@ func (c *DeploymentClient) Restart(deploy *appsv1.Deployment) *appsv1.Deployment // RestartSync restarts the deployment and wait it to be ready func (c *DeploymentClient) RestartSync(deploy *appsv1.Deployment) *appsv1.Deployment { _ = c.Restart(deploy) - - WaitUntil(2*time.Second, timeout, func(_ context.Context) (bool, error) { - var err error - deploy = c.Get(deploy.Name) - unstructured := &unstructured.Unstructured{} - if unstructured.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(deploy); err != nil { - return false, err - } - - dsv := &polymorphichelpers.DeploymentStatusViewer{} - msg, done, err := dsv.Status(unstructured, 0) - if err != nil { - return false, err - } - if done { - return true, nil - } - - Logf(strings.TrimSpace(msg)) - return false, nil - }, "") - - return deploy + return c.RolloutStatus(deploy.Name) } // Delete deletes a deployment if the deployment exists diff --git a/test/e2e/framework/ippool.go b/test/e2e/framework/ippool.go new file mode 100644 index 00000000000..3ef92bcdf08 --- /dev/null +++ b/test/e2e/framework/ippool.go @@ -0,0 +1,256 @@ +package framework + +import ( + "context" + "errors" + "fmt" + "math/big" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" + + "github.com/onsi/gomega" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +// IPPoolClient is a struct for ippool client. +type IPPoolClient struct { + f *Framework + v1.IPPoolInterface +} + +func (f *Framework) IPPoolClient() *IPPoolClient { + return &IPPoolClient{ + f: f, + IPPoolInterface: f.KubeOVNClientSet.KubeovnV1().IPPools(), + } +} + +func (s *IPPoolClient) Get(name string) *apiv1.IPPool { + ippool, err := s.IPPoolInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return ippool +} + +// Create creates a new ippool according to the framework specifications +func (c *IPPoolClient) Create(ippool *apiv1.IPPool) *apiv1.IPPool { + s, err := c.IPPoolInterface.Create(context.TODO(), ippool, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating ippool") + return s.DeepCopy() +} + +// CreateSync creates a new ippool according to the framework specifications, and waits for it to be ready. +func (c *IPPoolClient) CreateSync(ippool *apiv1.IPPool) *apiv1.IPPool { + s := c.Create(ippool) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest ippool after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Update updates the ippool +func (c *IPPoolClient) Update(ippool *apiv1.IPPool, options metav1.UpdateOptions, timeout time.Duration) *apiv1.IPPool { + var updatedIPPool *apiv1.IPPool + err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + s, err := c.IPPoolInterface.Update(ctx, ippool, options) + if err != nil { + return handleWaitingAPIError(err, false, "update ippool %q", ippool.Name) + } + updatedIPPool = s + return true, nil + }) + if err == nil { + return updatedIPPool.DeepCopy() + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while retrying to update ippool %s", ippool.Name) + } + Failf("error occurred while retrying to update ippool %s: %v", ippool.Name, err) + + return nil +} + +// UpdateSync updates the ippool and waits for the ippool to be ready for `timeout`. +// If the ippool doesn't become ready before the timeout, it will fail the test. +func (c *IPPoolClient) UpdateSync(ippool *apiv1.IPPool, options metav1.UpdateOptions, timeout time.Duration) *apiv1.IPPool { + s := c.Update(ippool, options, timeout) + ExpectTrue(c.WaitToBeUpdated(s, timeout)) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest ippool after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Patch patches the ippool +func (c *IPPoolClient) Patch(original, modified *apiv1.IPPool, timeout time.Duration) *apiv1.IPPool { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedIPPool *apiv1.IPPool + err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + s, err := c.IPPoolInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch ippool %q", original.Name) + } + patchedIPPool = s + return true, nil + }) + if err == nil { + return patchedIPPool.DeepCopy() + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while retrying to patch ippool %s", original.Name) + } + Failf("error occurred while retrying to patch ippool %s: %v", original.Name, err) + + return nil +} + +// PatchSync patches the ippool and waits for the ippool to be ready for `timeout`. +// If the ippool doesn't become ready before the timeout, it will fail the test. +func (c *IPPoolClient) PatchSync(original, modified *apiv1.IPPool) *apiv1.IPPool { + s := c.Patch(original, modified, timeout) + ExpectTrue(c.WaitToBeUpdated(s, timeout)) + ExpectTrue(c.WaitToBeReady(s.Name, timeout)) + // Get the newest ippool after it becomes ready + return c.Get(s.Name).DeepCopy() +} + +// Delete deletes a ippool if the ippool exists +func (c *IPPoolClient) Delete(name string) { + err := c.IPPoolInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete ippool %q: %v", name, err) + } +} + +// DeleteSync deletes the ippool and waits for the ippool to disappear for `timeout`. +// If the ippool doesn't disappear before the timeout, it will fail the test. +func (c *IPPoolClient) DeleteSync(name string) { + c.Delete(name) + gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ippool %q to disappear", name) +} + +func isIPPoolConditionSetAsExpected(ippool *apiv1.IPPool, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { + for _, cond := range ippool.Status.Conditions { + if cond.Type == conditionType { + if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { + return true + } + if !silent { + Logf("Condition %s of ippool %s is %v instead of %t. Reason: %v, message: %v", + conditionType, ippool.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } + return false + } + } + if !silent { + Logf("Couldn't find condition %v on ippool %v", conditionType, ippool.Name) + } + return false +} + +// IsIPPoolConditionSetAsExpected returns a wantTrue value if the ippool has a match to the conditionType, +// otherwise returns an opposite value of the wantTrue with detailed logging. +func IsIPPoolConditionSetAsExpected(ippool *apiv1.IPPool, conditionType apiv1.ConditionType, wantTrue bool) bool { + return isIPPoolConditionSetAsExpected(ippool, conditionType, wantTrue, false) +} + +// WaitConditionToBe returns whether ippool "name's" condition state matches wantTrue +// within timeout. If wantTrue is true, it will ensure the ippool condition status is +// ConditionTrue; if it's false, it ensures the ippool condition is in any state other +// than ConditionTrue (e.g. not true or unknown). +func (c *IPPoolClient) WaitConditionToBe(name string, conditionType apiv1.ConditionType, wantTrue bool, timeout time.Duration) bool { + Logf("Waiting up to %v for ippool %s condition %s to be %t", timeout, name, conditionType, wantTrue) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + ippool := c.Get(name) + if IsIPPoolConditionSetAsExpected(ippool, conditionType, wantTrue) { + Logf("IPPool %s reach desired %t condition status", name, wantTrue) + return true + } + Logf("IPPool %s still not reach desired %t condition status", name, wantTrue) + } + Logf("IPPool %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) + return false +} + +// WaitToBeReady returns whether the ippool is ready within timeout. +func (c *IPPoolClient) WaitToBeReady(name string, timeout time.Duration) bool { + return c.WaitConditionToBe(name, apiv1.Ready, true, timeout) +} + +// WaitToBeUpdated returns whether the ippool is updated within timeout. +func (c *IPPoolClient) WaitToBeUpdated(ippool *apiv1.IPPool, timeout time.Duration) bool { + Logf("Waiting up to %v for ippool %s to be updated", timeout, ippool.Name) + rv, _ := big.NewInt(0).SetString(ippool.ResourceVersion, 10) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + s := c.Get(ippool.Name) + if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { + Logf("IPPool %s updated", ippool.Name) + return true + } + Logf("IPPool %s still not updated", ippool.Name) + } + Logf("IPPool %s was not updated within %v", ippool.Name, timeout) + return false +} + +// WaitUntil waits the given timeout duration for the specified condition to be met. +func (c *IPPoolClient) WaitUntil(name string, cond func(s *apiv1.IPPool) (bool, error), condDesc string, interval, timeout time.Duration) *apiv1.IPPool { + var ippool *apiv1.IPPool + err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(_ context.Context) (bool, error) { + Logf("Waiting for ippool %s to meet condition %q", name, condDesc) + ippool = c.Get(name).DeepCopy() + met, err := cond(ippool) + if err != nil { + return false, fmt.Errorf("failed to check condition for ippool %s: %v", name, err) + } + return met, nil + }) + if err == nil { + return ippool + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while waiting for ippool %s to meet condition %q", name, condDesc) + } + Failf("error occurred while waiting for ippool %s to meet condition %q: %v", name, condDesc, err) + + return nil +} + +// WaitToDisappear waits the given timeout duration for the specified ippool to disappear. +func (c *IPPoolClient) WaitToDisappear(name string, interval, timeout time.Duration) error { + err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IPPool, error) { + ippool, err := c.IPPoolInterface.Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil, nil + } + return ippool, err + })).WithTimeout(timeout).Should(gomega.BeNil()) + if err != nil { + return fmt.Errorf("expected ippool %s to not be found: %w", name, err) + } + return nil +} + +func MakeIPPool(name, subnet string, ips, namespaces []string) *apiv1.IPPool { + return &apiv1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.IPPoolSpec{ + Subnet: subnet, + IPs: ips, + Namespaces: namespaces, + }, + } +} diff --git a/test/e2e/framework/namespace.go b/test/e2e/framework/namespace.go new file mode 100644 index 00000000000..54886813e1c --- /dev/null +++ b/test/e2e/framework/namespace.go @@ -0,0 +1,101 @@ +package framework + +import ( + "context" + "errors" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kubernetes/test/e2e/framework" + + "github.com/onsi/gomega" + + "github.com/kubeovn/kube-ovn/pkg/util" +) + +// NamespaceClient is a struct for namespace client. +type NamespaceClient struct { + f *Framework + v1core.NamespaceInterface +} + +func (f *Framework) NamespaceClient() *NamespaceClient { + return &NamespaceClient{ + f: f, + NamespaceInterface: f.ClientSet.CoreV1().Namespaces(), + } +} + +func (s *NamespaceClient) Get(name string) *corev1.Namespace { + np, err := s.NamespaceInterface.Get(context.TODO(), name, metav1.GetOptions{}) + ExpectNoError(err) + return np +} + +// Create creates a new namespace according to the framework specifications +func (c *NamespaceClient) Create(ns *corev1.Namespace) *corev1.Namespace { + np, err := c.NamespaceInterface.Create(context.TODO(), ns, metav1.CreateOptions{}) + ExpectNoError(err, "Error creating namespace") + return np.DeepCopy() +} + +func (c *NamespaceClient) Patch(original, modified *corev1.Namespace) *corev1.Namespace { + patch, err := util.GenerateMergePatchPayload(original, modified) + ExpectNoError(err) + + var patchedNS *corev1.Namespace + err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { + ns, err := c.NamespaceInterface.Patch(ctx, original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + return handleWaitingAPIError(err, false, "patch namespace %s", original.Name) + } + patchedNS = ns + return true, nil + }) + if err == nil { + return patchedNS.DeepCopy() + } + + if errors.Is(err, context.DeadlineExceeded) { + Failf("timed out while retrying to patch namespace %s", original.Name) + } + Failf("error occurred while retrying to patch namespace %s: %v", original.Name, err) + + return nil +} + +// Delete deletes a namespace if the namespace exists +func (c *NamespaceClient) Delete(name string) { + err := c.NamespaceInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + Failf("Failed to delete namespace %q: %v", name, err) + } +} + +// DeleteSync deletes the namespace and waits for the namespace to disappear for `timeout`. +// If the namespace doesn't disappear before the timeout, it will fail the test. +func (c *NamespaceClient) DeleteSync(name string) { + c.Delete(name) + gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for namespace %q to disappear", name) +} + +// WaitToDisappear waits the given timeout duration for the specified namespace to disappear. +func (c *NamespaceClient) WaitToDisappear(name string, interval, timeout time.Duration) error { + err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*corev1.Namespace, error) { + policy, err := c.NamespaceInterface.Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil, nil + } + return policy, err + })).WithTimeout(timeout).Should(gomega.BeNil()) + if err != nil { + return fmt.Errorf("expected namespace %s to not be found: %w", name, err) + } + return nil +} diff --git a/test/e2e/framework/pod.go b/test/e2e/framework/pod.go index 3bfee1c274c..bd74ea98ae2 100644 --- a/test/e2e/framework/pod.go +++ b/test/e2e/framework/pod.go @@ -43,7 +43,7 @@ func (c *PodClient) DeleteSync(name string) { c.PodClient.DeleteSync(context.Background(), name, metav1.DeleteOptions{}, timeout) } -func (c *PodClient) PatchPod(original, modified *corev1.Pod) *corev1.Pod { +func (c *PodClient) Patch(original, modified *corev1.Pod) *corev1.Pod { patch, err := util.GenerateMergePatchPayload(original, modified) ExpectNoError(err) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 90c0e771c6f..75bf75343d9 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -10,6 +10,7 @@ import ( "time" "github.com/onsi/ginkgo/v2" + "github.com/scylladb/go-set/strset" "github.com/kubeovn/kube-ovn/pkg/util" ) @@ -108,14 +109,12 @@ func RandomIPPool(cidr, sep string, count int) string { max := big.NewInt(0).Exp(big.NewInt(2), big.NewInt(int64(size-prefix)), nil) max.Sub(max, big.NewInt(3)) - ips := make([]string, 0, count) - for len(ips) != count { + ips := strset.NewWithSize(count) + for ips.Size() != count { n := big.NewInt(0).Rand(rnd, max) - if ip := util.BigInt2Ip(n.Add(n, base)); !util.ContainsString(ips, ip) { - ips = append(ips, ip) - } + ips.Add(util.BigInt2Ip(n.Add(n, base))) } - return ips + return ips.List() } cidrV4, cidrV6 := util.SplitStringIP(cidr) diff --git a/test/e2e/kube-ovn/ipam/ipam.go b/test/e2e/kube-ovn/ipam/ipam.go index 590c0146106..3ec53b8dc61 100644 --- a/test/e2e/kube-ovn/ipam/ipam.go +++ b/test/e2e/kube-ovn/ipam/ipam.go @@ -2,6 +2,8 @@ package ipam import ( "context" + "fmt" + "sort" "strings" "time" @@ -11,6 +13,7 @@ import ( "github.com/onsi/ginkgo/v2" apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/ipam" "github.com/kubeovn/kube-ovn/pkg/util" "github.com/kubeovn/kube-ovn/test/e2e/framework" ) @@ -19,22 +22,27 @@ var _ = framework.Describe("[group:ipam]", func() { f := framework.NewDefaultFramework("ipam") var cs clientset.Interface + var nsClient *framework.NamespaceClient var podClient *framework.PodClient var deployClient *framework.DeploymentClient var stsClient *framework.StatefulSetClient var subnetClient *framework.SubnetClient - var namespaceName, subnetName, podName, deployName, stsName string + var ippoolClient *framework.IPPoolClient + var namespaceName, subnetName, ippoolName, podName, deployName, stsName string var subnet *apiv1.Subnet var cidr string ginkgo.BeforeEach(func() { cs = f.ClientSet + nsClient = f.NamespaceClient() podClient = f.PodClient() deployClient = f.DeploymentClient() stsClient = f.StatefulSetClient() subnetClient = f.SubnetClient() + ippoolClient = f.IPPoolClient() namespaceName = f.Namespace.Name subnetName = "subnet-" + framework.RandomSuffix() + ippoolName = "ippool-" + framework.RandomSuffix() podName = "pod-" + framework.RandomSuffix() deployName = "deploy-" + framework.RandomSuffix() stsName = "sts-" + framework.RandomSuffix() @@ -54,6 +62,9 @@ var _ = framework.Describe("[group:ipam]", func() { ginkgo.By("Deleting statefulset " + stsName) stsClient.DeleteSync(stsName) + ginkgo.By("Deleting ippool " + ippoolName) + ippoolClient.DeleteSync(ippoolName) + ginkgo.By("Deleting subnet " + subnetName) subnetClient.DeleteSync(subnetName) }) @@ -371,4 +382,152 @@ var _ = framework.Describe("[group:ipam]", func() { framework.ExpectConsistOf(podIPs, strings.Split(pod.Annotations[util.IpAddressAnnotation], ",")) } }) + + framework.ConformanceIt("should support IPPool feature", func() { + f.SkipVersionPriorTo(1, 12, "Support for IPPool feature was introduced in v1.12") + + ipsCount := 3 + randomIPs := strings.Split(framework.RandomIPPool(cidr, ";", ipsCount), ";") + ips := make([]string, 0, ipsCount*2) + for _, s := range randomIPs { + ips = append(ips, strings.Split(s, ",")...) + } + ipv4, ipv6 := util.SplitIpsByProtocol(ips) + if len(ipv4) != 0 { + framework.ExpectHaveLen(ipv4, ipsCount) + } + if len(ipv6) != 0 { + framework.ExpectHaveLen(ipv6, ipsCount) + } + + sort.Slice(ipv4, func(i, j int) bool { + ip1, _ := ipam.NewIP(ipv4[i]) + ip2, _ := ipam.NewIP(ipv4[j]) + return ip1.LessThan(ip2) + }) + sort.Slice(ipv6, func(i, j int) bool { + ip1, _ := ipam.NewIP(ipv6[i]) + ip2, _ := ipam.NewIP(ipv6[j]) + return ip1.LessThan(ip2) + }) + + var err error + ips = make([]string, 0, ipsCount*2) + ipv4Range, ipv6Range := ipam.NewEmptyIPRangeList(), ipam.NewEmptyIPRangeList() + if len(ipv4) != 0 { + tmp := []string{ipv4[0], fmt.Sprintf("%s..%s", ipv4[1], ipv4[2])} + ipv4Range, err = ipam.NewIPRangeListFrom(tmp...) + framework.ExpectNoError(err) + ips = append(ips, tmp...) + } + if len(ipv6) != 0 { + tmp := []string{ipv6[0], fmt.Sprintf("%s..%s", ipv6[1], ipv6[2])} + ipv6Range, err = ipam.NewIPRangeListFrom(tmp...) + framework.ExpectNoError(err) + ips = append(ips, tmp...) + } + + ginkgo.By(fmt.Sprintf("Creating ippool %s with ips %v", ippoolName, ips)) + ippool := framework.MakeIPPool(ippoolName, subnetName, ips, nil) + ippool = ippoolClient.CreateSync(ippool) + + ginkgo.By("Validating ippool status") + framework.ExpectTrue(ippool.Status.V4UsingIPs.EqualInt64(0)) + framework.ExpectTrue(ippool.Status.V6UsingIPs.EqualInt64(0)) + framework.ExpectEmpty(ippool.Status.V4UsingIPRange) + framework.ExpectEmpty(ippool.Status.V6UsingIPRange) + framework.ExpectTrue(ippool.Status.V4AvailableIPs.Equal(ipv4Range.Count())) + framework.ExpectTrue(ippool.Status.V6AvailableIPs.Equal(ipv4Range.Count())) + framework.ExpectEqual(ippool.Status.V4AvailableIPRange, ipv4Range.String()) + framework.ExpectEqual(ippool.Status.V6AvailableIPRange, ipv6Range.String()) + + ginkgo.By("Creating deployment " + deployName + " within ippool " + ippoolName) + replicas := 3 + labels := map[string]string{"app": deployName} + annotations := map[string]string{util.IpPoolAnnotation: ippoolName} + deploy := framework.MakeDeployment(deployName, int32(replicas), labels, annotations, "pause", framework.PauseImage, "") + deploy = deployClient.CreateSync(deploy) + + checkFn := func() { + ginkgo.By("Getting pods for deployment " + deployName) + pods, err := deployClient.GetPods(deploy) + framework.ExpectNoError(err, "failed to get pods for deployment "+deployName) + framework.ExpectHaveLen(pods.Items, replicas) + + v4Using, v6Using := ipam.NewEmptyIPRangeList(), ipam.NewEmptyIPRangeList() + for _, pod := range pods.Items { + for _, podIP := range pod.Status.PodIPs { + ip, err := ipam.NewIP(podIP.IP) + framework.ExpectNoError(err) + if strings.ContainsRune(podIP.IP, ':') { + framework.ExpectTrue(ipv6Range.Contains(ip), "Pod IP %s should be contained by %v", ip.String(), ipv6Range.String()) + v6Using.Add(ip) + } else { + framework.ExpectTrue(ipv4Range.Contains(ip), "Pod IP %s should be contained by %v", ip.String(), ipv4Range.String()) + v4Using.Add(ip) + } + } + } + + ginkgo.By("Validating ippool status") + framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { + ippool = ippoolClient.Get(ippoolName) + v4Available, v6Available := ipv4Range.Separate(v4Using), ipv6Range.Separate(v6Using) + if !ippool.Status.V4UsingIPs.Equal(v4Using.Count()) { + framework.Logf(".status.v4UsingIPs mismatch: expect %s, actual %s", v4Using.Count(), ippool.Status.V4UsingIPs) + return false, nil + } + if !ippool.Status.V6UsingIPs.Equal(v6Using.Count()) { + framework.Logf(".status.v6UsingIPs mismatch: expect %s, actual %s", v6Using.Count(), ippool.Status.V6UsingIPs) + return false, nil + } + if ippool.Status.V4UsingIPRange != v4Using.String() { + framework.Logf(".status.v4UsingIPRange mismatch: expect %s, actual %s", v4Using, ippool.Status.V4UsingIPRange) + return false, nil + } + if ippool.Status.V6UsingIPRange != v6Using.String() { + framework.Logf(".status.v6UsingIPRange mismatch: expect %s, actual %s", v6Using, ippool.Status.V6UsingIPRange) + return false, nil + } + if !ippool.Status.V4AvailableIPs.Equal(v4Available.Count()) { + framework.Logf(".status.v4AvailableIPs mismatch: expect %s, actual %s", v4Available.Count(), ippool.Status.V4AvailableIPs) + return false, nil + } + if !ippool.Status.V6AvailableIPs.Equal(v6Available.Count()) { + framework.Logf(".status.v6AvailableIPs mismatch: expect %s, actual %s", v6Available.Count(), ippool.Status.V6AvailableIPs) + return false, nil + } + if ippool.Status.V4AvailableIPRange != v4Available.String() { + framework.Logf(".status.v4AvailableIPRange mismatch: expect %s, actual %s", v4Available, ippool.Status.V4AvailableIPRange) + return false, nil + } + if ippool.Status.V6AvailableIPRange != v6Available.String() { + framework.Logf(".status.v6AvailableIPRange mismatch: expect %s, actual %s", v6Available, ippool.Status.V6AvailableIPRange) + return false, nil + } + return true, nil + }, "") + } + checkFn() + + ginkgo.By("Restarting deployment " + deployName) + deploy = deployClient.RestartSync(deploy) + checkFn() + + ginkgo.By("Getting namespace " + namespaceName) + ns := nsClient.Get(namespaceName) + framework.ExpectNotNil(ns.Annotations) + + ginkgo.By("Patching namespace " + namespaceName) + patchedNS := ns.DeepCopy() + patchedNS.Annotations[util.IpPoolAnnotation] = ippoolName + _ = nsClient.Patch(ns, patchedNS) + + ginkgo.By("Patching deployment " + deployName) + deploy = deployClient.RestartSync(deploy) + patchedDeploy := deploy.DeepCopy() + patchedDeploy.Spec.Template.Annotations = nil + deploy = deployClient.PatchSync(deploy, patchedDeploy) + checkFn() + }) }) diff --git a/test/e2e/kube-ovn/qos/qos.go b/test/e2e/kube-ovn/qos/qos.go index b7fec1376a7..2ee9a6a7bdb 100644 --- a/test/e2e/kube-ovn/qos/qos.go +++ b/test/e2e/kube-ovn/qos/qos.go @@ -148,7 +148,7 @@ var _ = framework.Describe("[group:qos]", func() { } modifiedPod.Annotations[util.NetemQosLimitAnnotation] = strconv.Itoa(limit) modifiedPod.Annotations[util.NetemQosLossAnnotation] = strconv.Itoa(loss) - pod = podClient.PatchPod(pod, modifiedPod) + pod = podClient.Patch(pod, modifiedPod) ginkgo.By("Validating pod annotations") framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true")