From 3a286924c589d171dca184d633a0f612714c80c5 Mon Sep 17 00:00:00 2001 From: Icarus9913 Date: Tue, 19 Dec 2023 17:01:58 +0800 Subject: [PATCH 1/4] supplement IPPool CR e2e Signed-off-by: Icarus9913 --- test/doc/ippoolcr.md | 36 +++--- test/e2e/ippoolcr/ippoolcr_test.go | 195 +++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+), 18 deletions(-) diff --git a/test/doc/ippoolcr.md b/test/doc/ippoolcr.md index 585e281afa..d929edb6c2 100644 --- a/test/doc/ippoolcr.md +++ b/test/doc/ippoolcr.md @@ -1,20 +1,20 @@ # E2E Cases for IPPool CR -| Case ID | Title | Priority | Smoke | Status | Other | -| ------- | ------------------------------------------------------------ | -------- | ----- | ------ | ----- | -| D00001 | An IPPool fails to add an IP that already exists in an other IPPool | p2 | | done | | -| D00002 | Add a route with `routes` and `gateway` fields in the ippool spec, which only takes effect on the new pod and does not on the old pods | p2 | smoke | done | | -| D00003 | Failed to add wrong IPPool gateway and route to an IPPool CR | p2 | | done | | -| D00004 | Failed to delete an IPPool whose IP is not de-allocated at all | p2 | | done | | -| D00005 | A "true" value of IPPool/Spec/disabled should forbid IP allocation, but still allow ip de-allocation | p2 | | done | | -| D00006 | Successfully create and delete IPPools in batch | p2 | | done | | -| D00007 | Add, delete, modify, and query ippools that are created manually | p1 | | done | | -| D00008 | Manually ippool inherits subnet attributes (including routes, vlanId, etc.) | p3 | | | | -| D00009 | multusName matches, IP can be assigned | p2 | | done | | -| D00010 | multusName mismatch, unable to assign IP | p3 | | done | | -| D00011 | The node where the pod is located matches the nodeName, and the IP can be assigned | p2 | | done | | -| D00012 | The node where the pod resides does not match the nodeName, and the IP cannot be assigned | p3 | | done | | -| D00013 | nodeName has higher priority than nodeAffinity | p3 | | | | -| D00014 | The namespace where the pod is located matches the namespaceName, and the IP can be assigned | p2 | | | | -| D00015 | The namespace where the pod resides does not match the namespaceName, and the IP cannot be assigned | p2 | | | | -| D00016 | namespaceName has higher priority than namespaceAffinity | p3 | | | | +| Case ID | Title | Priority | Smoke | Status | Other | +| ------- |----------------------------------------------------------------------------------------------------------------------------------------|----------|-------|--------| ----- | +| D00001 | An IPPool fails to add an IP that already exists in an other IPPool | p2 | | done | | +| D00002 | Add a route with `routes` and `gateway` fields in the ippool spec, which only takes effect on the new pod and does not on the old pods | p2 | smoke | done | | +| D00003 | Failed to add wrong IPPool gateway and route to an IPPool CR | p2 | | done | | +| D00004 | Failed to delete an IPPool whose IP is not de-allocated at all | p2 | | done | | +| D00005 | A "true" value of IPPool/Spec/disabled should forbid IP allocation, but still allow ip de-allocation | p2 | | done | | +| D00006 | Successfully create and delete IPPools in batch | p2 | | done | | +| D00007 | Add, delete, modify, and query ippools that are created manually | p1 | | done | | +| D00008 | Manually ippool inherits subnet attributes (including gateway,routes, etc.) | p3 | | | | +| D00009 | multusName matches, IP can be assigned | p2 | | done | | +| D00010 | multusName mismatch, unable to assign IP | p3 | | done | | +| D00011 | The node where the pod is located matches the nodeName, and the IP can be assigned | p2 | | done | | +| D00012 | The node where the pod resides does not match the nodeName, and the IP cannot be assigned | p3 | | done | | +| D00013 | nodeName has higher priority than nodeAffinity | p3 | | | | +| D00014 | The namespace where the pod is located matches the namespaceName, and the IP can be assigned | p2 | | | | +| D00015 | The namespace where the pod resides does not match the namespaceName, and the IP cannot be assigned | p2 | | | | +| D00016 | namespaceName has higher priority than namespaceAffinity | p3 | | | | diff --git a/test/e2e/ippoolcr/ippoolcr_test.go b/test/e2e/ippoolcr/ippoolcr_test.go index 05fa1beb04..41bde80cb7 100644 --- a/test/e2e/ippoolcr/ippoolcr_test.go +++ b/test/e2e/ippoolcr/ippoolcr_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "strings" + "sync" "time" . "github.com/onsi/ginkgo/v2" @@ -789,4 +790,198 @@ var _ = Describe("test ippool CR", Label("ippoolCR"), func() { } }) }) + + It("Manually ippool inherits subnet attributes", Label("D00008"), func() { + if !frame.Info.SpiderSubnetEnabled { + Skip("SpiderSubnet feature is disabled, just skip this case") + } + + fn := func(crName string, ipVersion types.IPVersion, subnet, ips, gateway string, route spiderpoolv2beta1.Route) { + demoSpiderSubnet := &spiderpoolv2beta1.SpiderSubnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d-subnet", crName, ipVersion), + }, + Spec: spiderpoolv2beta1.SubnetSpec{ + IPVersion: pointer.Int64(ipVersion), + Subnet: subnet, + IPs: []string{ips}, + Gateway: pointer.String(gateway), + Routes: []spiderpoolv2beta1.Route{route}, + }, + } + GinkgoWriter.Printf("Generate SpiderSubnet %s, try to create it\n", demoSpiderSubnet.String()) + err := frame.CreateResource(demoSpiderSubnet) + if nil != err { + if strings.Contains(err.Error(), "overlaps") { + Skip(fmt.Sprintf("the SpiderSubnet %v overlaps: %v", demoSpiderSubnet.String(), err.Error())) + } + Fail(fmt.Sprintf("failed to create SpiderSubnet, error: %s", err)) + } + + demoSpiderIPPool := &spiderpoolv2beta1.SpiderIPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d-ippool", crName, ipVersion), + }, + Spec: spiderpoolv2beta1.IPPoolSpec{ + IPVersion: pointer.Int64(ipVersion), + Subnet: subnet, + IPs: []string{ips}, + }, + } + GinkgoWriter.Printf("Generate SpiderIPPool %s, try to create it\n", demoSpiderIPPool.String()) + err = frame.CreateResource(demoSpiderIPPool) + Expect(err).NotTo(HaveOccurred()) + + GinkgoWriter.Println("check whether the IPPool inherits the Subnet properties") + Expect(demoSpiderIPPool.Spec.Gateway).To(Equal(demoSpiderSubnet.Spec.Gateway)) + Expect(demoSpiderIPPool.Spec.Routes).To(Equal(demoSpiderSubnet.Spec.Routes)) + + GinkgoWriter.Println("clean up Subnet") + err = frame.DeleteResource(demoSpiderSubnet) + Expect(err).NotTo(HaveOccurred()) + } + + crName := "demo" + wg := sync.WaitGroup{} + if frame.Info.IpV4Enabled { + wg.Add(1) + go func() { + defer wg.Done() + + subnet := "172.16.0.0/16" + ips := "172.16.0.2" + gateway := "172.16.0.1" + route := spiderpoolv2beta1.Route{ + Dst: "172.17.0.0/16", + Gw: "172.16.41.1", + } + fn(crName, constant.IPv4, subnet, ips, gateway, route) + }() + } + if frame.Info.IpV6Enabled { + wg.Add(1) + go func() { + defer wg.Done() + + subnet := "fd00:172:16::/64" + ips := "fd00:172:16::2" + gateway := "fd00:172:16::1" + route := spiderpoolv2beta1.Route{ + Dst: "fd00:172:17::/64", + Gw: "fd00:172:16::100", + } + fn(crName, constant.IPv6, subnet, ips, gateway, route) + }() + } + wg.Wait() + + }) + + Context("Test IPPool namespace Affinity", Label("namespaceName"), func() { + It("The namespace where the pod is located matches the namespaceName, and the IP can be assigned", Label("D00014"), func() { + Eventually(func() error { + if frame.Info.IpV4Enabled { + v4Pool, err := common.GetIppoolByName(frame, v4PoolObj.Name) + if nil != err { + return err + } + v4Pool.Spec.NamespaceName = []string{nsName} + err = frame.UpdateResource(v4Pool) + if nil != err { + return err + } + GinkgoWriter.Printf("update IPPool %s with NamespaceName %s successfully", v4Pool.Name, nsName) + } + if frame.Info.IpV6Enabled { + v6Pool, err := common.GetIppoolByName(frame, v6PoolObj.Name) + if nil != err { + return err + } + v6Pool.Spec.NamespaceName = []string{nsName} + err = frame.UpdateResource(v6Pool) + if nil != err { + return err + } + GinkgoWriter.Printf("update IPPool %s with NamespaceName %s successfully", v6Pool.Name, nsName) + } + return nil + }).WithTimeout(time.Minute * 3).WithPolling(time.Second * 3).Should(BeNil()) + + podName := "pod" + tools.RandomName() + podYaml := common.GenerateExamplePodYaml(podName, nsName) + annoPodIPPoolValue := types.AnnoPodIPPoolValue{} + if frame.Info.IpV4Enabled { + annoPodIPPoolValue.IPv4Pools = []string{v4PoolObj.Name} + } + if frame.Info.IpV6Enabled { + annoPodIPPoolValue.IPv6Pools = []string{v6PoolObj.Name} + } + annoPodIPPoolValueMarshal, err := json.Marshal(annoPodIPPoolValue) + Expect(err).NotTo(HaveOccurred()) + podYaml.SetAnnotations(map[string]string{ + constant.AnnoPodIPPool: string(annoPodIPPoolValueMarshal), + }) + GinkgoWriter.Printf("try to create Pod with namespaceName '%s' IPPool: %s \n", nsName, podYaml.String()) + Expect(frame.CreatePod(podYaml)).To(Succeed()) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1) + defer cancel() + GinkgoWriter.Printf("wait for one minute that pod %v/%v should be ready. \n", nsName, podName) + _, err = frame.WaitPodStarted(podName, nsName, ctx) + Expect(err).NotTo(HaveOccurred()) + }) + + It("The namespace where the pod resides does not match the namespaceName, and the IP cannot be assigned ", Label("D00015"), func() { + systemNS := "kube-system" + + Eventually(func() error { + if frame.Info.IpV4Enabled { + v4Pool, err := common.GetIppoolByName(frame, v4PoolObj.Name) + if nil != err { + return err + } + v4Pool.Spec.NamespaceName = []string{systemNS} + err = frame.UpdateResource(v4Pool) + if nil != err { + return err + } + GinkgoWriter.Printf("update IPPool %s with NamespaceName %s successfully", v4Pool.Name, systemNS) + } + if frame.Info.IpV6Enabled { + v6Pool, err := common.GetIppoolByName(frame, v6PoolObj.Name) + if nil != err { + return err + } + v6Pool.Spec.NamespaceName = []string{systemNS} + err = frame.UpdateResource(v6Pool) + if nil != err { + return err + } + GinkgoWriter.Printf("update IPPool %s with NamespaceName %s successfully", v6Pool.Name, systemNS) + } + return nil + }).WithTimeout(time.Minute * 3).WithPolling(time.Second * 3).Should(BeNil()) + + podName := "pod" + tools.RandomName() + podYaml := common.GenerateExamplePodYaml(podName, nsName) + annoPodIPPoolValue := types.AnnoPodIPPoolValue{} + if frame.Info.IpV4Enabled { + annoPodIPPoolValue.IPv4Pools = []string{v4PoolObj.Name} + } + if frame.Info.IpV6Enabled { + annoPodIPPoolValue.IPv6Pools = []string{v6PoolObj.Name} + } + annoPodIPPoolValueMarshal, err := json.Marshal(annoPodIPPoolValue) + Expect(err).NotTo(HaveOccurred()) + podYaml.SetAnnotations(map[string]string{ + constant.AnnoPodIPPool: string(annoPodIPPoolValueMarshal), + }) + GinkgoWriter.Printf("try to create Pod with namespaceName '%s' IPPool: %s \n", systemNS, podYaml.String()) + Expect(frame.CreatePod(podYaml)).To(Succeed()) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1) + defer cancel() + GinkgoWriter.Printf("wait for one minute that pod %v/%v would not ready. \n", nsName, podName) + _, err = frame.WaitPodStarted(podName, nsName, ctx) + Expect(err).To(HaveOccurred()) + }) + }) }) From 9d01b9aef53651afb7f9bbb06b2e90fe58afb280 Mon Sep 17 00:00:00 2001 From: Icarus9913 Date: Wed, 20 Dec 2023 14:32:45 +0800 Subject: [PATCH 2/4] add assign IP e2e Signed-off-by: Icarus9913 --- test/doc/assignip.md | 24 ++++----- test/e2e/assignip/assignip_test.go | 81 ++++++++++++++++++++++++++++-- 2 files changed, 90 insertions(+), 15 deletions(-) diff --git a/test/doc/assignip.md b/test/doc/assignip.md index ec0cded041..c3d4abbfbd 100644 --- a/test/doc/assignip.md +++ b/test/doc/assignip.md @@ -1,14 +1,14 @@ # E2E Cases for IP Assignment -| Case ID | Title | Priority | Smoke | Status | Other | -|---------|---------------------------------------------------------------------------------------------------------|----------|-------|--------|-------------| -| E00001 | Assign IP to a pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | -| E00002 | Assign IP to deployment/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | -| E00003 | Assign IP to statefulSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | -| E00004 | Assign IP to daemonSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | -| E00005 | Assign IP to job/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | -| E00006 | Assign IP to replicaset/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | -| E00007 | Successfully run a pod with long yaml for ipv4, ipv6 and dual-stack case | p2 | | done | | -| E00008 | Failed to run a pod when IP resource of an IPPool is exhausted | p3 | | done | | -| E00009 | The cluster is dual stack, but the spiderpool only allocates ipv4 or ipv6, the pod should run correctly | p3 | | | | -| E00010 | The cluster is single stack, but the spiderpool allocates ipv4 and ipv6, the pod should run correctly | p3 | | | | +| Case ID | Title | Priority | Smoke | Status | Other | +|---------|-------------------------------------------------------------------------------------------------------|----------|-------|--------|-------------| +| E00001 | Assign IP to a pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | +| E00002 | Assign IP to deployment/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | +| E00003 | Assign IP to statefulSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | +| E00004 | Assign IP to daemonSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | +| E00005 | Assign IP to job/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | +| E00006 | Assign IP to replicaset/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | | +| E00007 | Successfully run a pod with long yaml for ipv4, ipv6 and dual-stack case | p2 | | done | | +| E00008 | Failed to run a pod when IP resource of an IPPool is exhausted | p3 | | done | | +| E00009 | The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with IPPools annotation | p2 | | done | | +| E00010 | The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with Subnet annotation | p2 | | done | | diff --git a/test/e2e/assignip/assignip_test.go b/test/e2e/assignip/assignip_test.go index 750237b718..c00f85ebc5 100644 --- a/test/e2e/assignip/assignip_test.go +++ b/test/e2e/assignip/assignip_test.go @@ -4,18 +4,21 @@ package assignip_test import ( "context" + "encoding/json" "strings" - spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/spidernet-io/e2eframework/tools" + + "github.com/spidernet-io/spiderpool/pkg/constant" + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/pkg/types" "github.com/spidernet-io/spiderpool/test/e2e/common" + corev1 "k8s.io/api/core/v1" ) var _ = Describe("test pod", Label("assignip"), func() { - Context("fail to run a pod when IP resource of an ippool is exhausted or its IP been set excludeIPs", func() { var deployName, v4PoolName, v6PoolName, namespace string var v4PoolNameList, v6PoolNameList []string @@ -157,5 +160,77 @@ var _ = Describe("test pod", Label("assignip"), func() { Expect(frame.DeleteDeployment(deployName, namespace)).To(Succeed()) GinkgoWriter.Printf("Succeeded to delete deployment %v/%v \n", namespace, deployName) }) + + It("The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with IPPools annotation", Label("E00009"), func() { + if !(frame.Info.IpV4Enabled && frame.Info.IpV6Enabled) { + Skip("Single stack just skip this e2e case") + } + + deployment := common.GenerateExampleDeploymentYaml(deployName, namespace, 1) + annotations := map[string]string{ + constant.AnnoPodIPPool: common.GeneratePodIPPoolAnnotations(frame, "", []string{v4PoolObj.Name}, nil), + } + deployment.Spec.Template.Annotations = annotations + Expect(deployment).NotTo(BeNil(), "failed to generate Deployment yaml") + + GinkgoWriter.Printf("Try to create deploy %v/%v \n", namespace, deployName) + Expect(frame.CreateDeployment(deployment)).To(Succeed()) + + // Checking the pod run status should all be running. + var podList *corev1.PodList + var err error + Eventually(func() bool { + podList, err = frame.GetPodListByLabel(deployment.Spec.Template.Labels) + if nil != err || len(podList.Items) == 0 { + return false + } + return frame.CheckPodListRunning(podList) + }, 2*common.PodStartTimeout, common.ForcedWaitingTime).Should(BeTrue()) + + Expect(podList.Items).To(HaveLen(1)) + Expect(podList.Items[0].Status.PodIPs).To(HaveLen(1)) + }) + + It("The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with Subnet annotation", Label("E00010"), func() { + if !frame.Info.SpiderSubnetEnabled { + Skip("The SpiderSubnet feature is disabled, skip this e2e case") + } + if !(frame.Info.IpV4Enabled && frame.Info.IpV6Enabled) { + Skip("Single stack just skip this e2e case") + } + + deployment := common.GenerateExampleDeploymentYaml(deployName, namespace, 1) + + // Create deployments in bulk in a subnet + subnetAnno := types.AnnoSubnetItem{ + IPv4: []string{v4SubnetName}, + IPv6: nil, + } + subnetAnnoMarshal, err := json.Marshal(subnetAnno) + Expect(err).NotTo(HaveOccurred()) + + annotations := map[string]string{ + constant.AnnoSpiderSubnet: string(subnetAnnoMarshal), + constant.AnnoSpiderSubnetPoolIPNumber: "1", + } + deployment.Spec.Template.Annotations = annotations + Expect(deployment).NotTo(BeNil(), "failed to generate Deployment yaml") + + GinkgoWriter.Printf("Try to create deploy %v/%v \n", namespace, deployName) + Expect(frame.CreateDeployment(deployment)).To(Succeed()) + + // Checking the pod run status should all be running. + var podList *corev1.PodList + Eventually(func() bool { + podList, err = frame.GetPodListByLabel(deployment.Spec.Template.Labels) + if nil != err || len(podList.Items) == 0 { + return false + } + return frame.CheckPodListRunning(podList) + }, 2*common.PodStartTimeout, common.ForcedWaitingTime).Should(BeTrue()) + + Expect(podList.Items).To(HaveLen(1)) + Expect(podList.Items[0].Status.PodIPs).To(HaveLen(1)) + }) }) }) From 16548f15551e482e8856fba5bea39fb906f034f8 Mon Sep 17 00:00:00 2001 From: Icarus9913 Date: Thu, 21 Dec 2023 14:41:23 +0800 Subject: [PATCH 3/4] fix disable kubevirt static IP feature lead spiderpool-agent crash Signed-off-by: Icarus9913 --- cmd/spiderpool-agent/cmd/daemon.go | 14 ++++++-------- cmd/spiderpool-controller/cmd/daemon.go | 14 ++++++-------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/cmd/spiderpool-agent/cmd/daemon.go b/cmd/spiderpool-agent/cmd/daemon.go index fba68af09b..a31a112f14 100644 --- a/cmd/spiderpool-agent/cmd/daemon.go +++ b/cmd/spiderpool-agent/cmd/daemon.go @@ -342,14 +342,12 @@ func initAgentServiceManagers(ctx context.Context) { } agentContext.StsManager = statefulSetManager - if agentContext.Cfg.EnableKubevirtStaticIP { - logger.Debug("Begin to initialize Kubevirt manager") - kubevirtManager := kubevirtmanager.NewKubevirtManager( - agentContext.CRDManager.GetClient(), - agentContext.CRDManager.GetAPIReader(), - ) - agentContext.KubevirtManager = kubevirtManager - } + logger.Debug("Begin to initialize Kubevirt manager") + kubevirtManager := kubevirtmanager.NewKubevirtManager( + agentContext.CRDManager.GetClient(), + agentContext.CRDManager.GetAPIReader(), + ) + agentContext.KubevirtManager = kubevirtManager logger.Debug("Begin to initialize Endpoint manager") endpointManager, err := workloadendpointmanager.NewWorkloadEndpointManager( diff --git a/cmd/spiderpool-controller/cmd/daemon.go b/cmd/spiderpool-controller/cmd/daemon.go index 64e8b635bc..90d4bef2dc 100644 --- a/cmd/spiderpool-controller/cmd/daemon.go +++ b/cmd/spiderpool-controller/cmd/daemon.go @@ -262,14 +262,12 @@ func initControllerServiceManagers(ctx context.Context) { } controllerContext.StsManager = statefulSetManager - if controllerContext.Cfg.EnableKubevirtStaticIP { - logger.Debug("Begin to initialize Kubevirt manager") - kubevirtManager := kubevirtmanager.NewKubevirtManager( - controllerContext.CRDManager.GetClient(), - controllerContext.CRDManager.GetAPIReader(), - ) - controllerContext.KubevirtManager = kubevirtManager - } + logger.Debug("Begin to initialize Kubevirt manager") + kubevirtManager := kubevirtmanager.NewKubevirtManager( + controllerContext.CRDManager.GetClient(), + controllerContext.CRDManager.GetAPIReader(), + ) + controllerContext.KubevirtManager = kubevirtManager logger.Debug("Begin to initialize Endpoint manager") endpointManager, err := workloadendpointmanager.NewWorkloadEndpointManager( From f196cd283999fcedc4842362f38ca6f9e3b525b2 Mon Sep 17 00:00:00 2001 From: Icarus9913 Date: Thu, 21 Dec 2023 17:25:04 +0800 Subject: [PATCH 4/4] supplement troubleshoot documents Signed-off-by: Icarus9913 --- docs/usage/faq-zh_CN.md | 11 ++++++++++- docs/usage/faq.md | 11 ++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/docs/usage/faq-zh_CN.md b/docs/usage/faq-zh_CN.md index 7581c9b638..b5d009d1aa 100644 --- a/docs/usage/faq-zh_CN.md +++ b/docs/usage/faq-zh_CN.md @@ -21,4 +21,13 @@ Spiderpool 项目由多个子插件项目组成,包括有:`spiderpool`, `coo ### SpiderSubnet功能使用不正常 - 如果遇到报错 `Internal error occurred: failed calling webhook "spidersubnet.spiderpool.spidernet.io": the server could not find the requested resource`,请检查 configmap `spiderpool-conf` 确保 SpiderSubnet 功能已启动。 -- 若遇到报错 `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`,请检查 `spiderpool-controller` 的日志。 +- 若遇到报错 `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`,请检查 `spiderpool-controller` 的日志。目前 Spiderpool 的 controller 组件要求使用 SpiderSubnet 功能的集群最低版本为 `v1.21`, 如遇到以下日志报错即表明当前集群版本过低: + + ```text + W1220 05:44:16.129916 1 reflector.go:535] k8s.io/client-go/informers/factory.go:150: failed to list *v1.CronJob: the server could not find the requested resource + E1220 05:44:16.129978 1 reflector.go:147] k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CronJob: failed to list *v1.CronJob: the server could not find the requested resource + ``` + +### Spiderpool IPAM 是否依赖 spiderpool-controller 组件? + +spiderpool-controller 组件针对 SpiderSubnet、 SpiderIPPool 等资源的 `Spec` 字段实现了 Webhook 功能。而 spiderpool-agent 组件是 IPAM 功能实现的核心部分,在分配 IP 的时候会对 SpiderIPPool 资源的 `Status` 字段进行修改,该字段属于 [subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#subresources),不会被 spiderpool-controller 所注册的 Webhook 拦截到,所以 IPAM 不会依赖 spiderpool-controller 组件。 diff --git a/docs/usage/faq.md b/docs/usage/faq.md index 1e5de3a930..d61a0c9bbe 100644 --- a/docs/usage/faq.md +++ b/docs/usage/faq.md @@ -21,4 +21,13 @@ If you change the configmap `spiderpool-conf` configurations, you need to restar ### Why SpiderSubnet feature not works well? - For error like `Internal error occurred: failed calling webhook "spidersubnet.spiderpool.spidernet.io": the server could not find the requested resource`, you need to update configmap `spiderpool-conf` to enable SpiderSubnet feature and restart `spiderpool-agent` and `spiderpool-controller` components. -- For error like `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`, you should check `spiderpool-controller` logs. +- For error like `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`, you should check `spiderpool-controller` logs. The spiderpool-controller component requires that the kubernetes cluster has kubernetes version not lower than `v1.21` once using the SpiderSubnet feature. The following error logs means your kubernetes cluster version is too low: + + ```text + W1220 05:44:16.129916 1 reflector.go:535] k8s.io/client-go/informers/factory.go:150: failed to list *v1.CronJob: the server could not find the requested resource + E1220 05:44:16.129978 1 reflector.go:147] k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CronJob: failed to list *v1.CronJob: the server could not find the requested resource + ``` + +### Does Spiderpool IPAM relies on spiderpool-controller component? + +spiderpool-controller component implements the webhook for the `Spec` property of SpiderSubnet, SpiderIPPool resources. And the spiderpool-agent component is the core of implementing the IPAM, once allocating the IP addresses it will update the SpiderIPPool resource `Status` property. The property belongs to [subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#subresources), so the request would not be intercepted by the spiderpool-controller webhook. Therefore, the IPAM doesn't rely on spiderpool-controller component.