Skip to content

Commit

Permalink
Merge branch 'spidernet-io:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
ty-dc committed Dec 21, 2023
2 parents 090c2e1 + 0d5994b commit aa6d040
Show file tree
Hide file tree
Showing 8 changed files with 335 additions and 51 deletions.
14 changes: 6 additions & 8 deletions cmd/spiderpool-agent/cmd/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,14 +342,12 @@ func initAgentServiceManagers(ctx context.Context) {
}
agentContext.StsManager = statefulSetManager

if agentContext.Cfg.EnableKubevirtStaticIP {
logger.Debug("Begin to initialize Kubevirt manager")
kubevirtManager := kubevirtmanager.NewKubevirtManager(
agentContext.CRDManager.GetClient(),
agentContext.CRDManager.GetAPIReader(),
)
agentContext.KubevirtManager = kubevirtManager
}
logger.Debug("Begin to initialize Kubevirt manager")
kubevirtManager := kubevirtmanager.NewKubevirtManager(
agentContext.CRDManager.GetClient(),
agentContext.CRDManager.GetAPIReader(),
)
agentContext.KubevirtManager = kubevirtManager

logger.Debug("Begin to initialize Endpoint manager")
endpointManager, err := workloadendpointmanager.NewWorkloadEndpointManager(
Expand Down
14 changes: 6 additions & 8 deletions cmd/spiderpool-controller/cmd/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -262,14 +262,12 @@ func initControllerServiceManagers(ctx context.Context) {
}
controllerContext.StsManager = statefulSetManager

if controllerContext.Cfg.EnableKubevirtStaticIP {
logger.Debug("Begin to initialize Kubevirt manager")
kubevirtManager := kubevirtmanager.NewKubevirtManager(
controllerContext.CRDManager.GetClient(),
controllerContext.CRDManager.GetAPIReader(),
)
controllerContext.KubevirtManager = kubevirtManager
}
logger.Debug("Begin to initialize Kubevirt manager")
kubevirtManager := kubevirtmanager.NewKubevirtManager(
controllerContext.CRDManager.GetClient(),
controllerContext.CRDManager.GetAPIReader(),
)
controllerContext.KubevirtManager = kubevirtManager

logger.Debug("Begin to initialize Endpoint manager")
endpointManager, err := workloadendpointmanager.NewWorkloadEndpointManager(
Expand Down
11 changes: 10 additions & 1 deletion docs/usage/faq-zh_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,13 @@ Spiderpool 项目由多个子插件项目组成,包括有:`spiderpool`, `coo
### SpiderSubnet功能使用不正常

- 如果遇到报错 `Internal error occurred: failed calling webhook "spidersubnet.spiderpool.spidernet.io": the server could not find the requested resource`,请检查 configmap `spiderpool-conf` 确保 SpiderSubnet 功能已启动。
- 若遇到报错 `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`,请检查 `spiderpool-controller` 的日志。
- 若遇到报错 `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`,请检查 `spiderpool-controller` 的日志。目前 Spiderpool 的 controller 组件要求使用 SpiderSubnet 功能的集群最低版本为 `v1.21`, 如遇到以下日志报错即表明当前集群版本过低:

```text
W1220 05:44:16.129916 1 reflector.go:535] k8s.io/client-go/informers/factory.go:150: failed to list *v1.CronJob: the server could not find the requested resource
E1220 05:44:16.129978 1 reflector.go:147] k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CronJob: failed to list *v1.CronJob: the server could not find the requested resource
```

### Spiderpool IPAM 是否依赖 spiderpool-controller 组件?

spiderpool-controller 组件针对 SpiderSubnet、 SpiderIPPool 等资源的 `Spec` 字段实现了 Webhook 功能。而 spiderpool-agent 组件是 IPAM 功能实现的核心部分,在分配 IP 的时候会对 SpiderIPPool 资源的 `Status` 字段进行修改,该字段属于 [subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#subresources),不会被 spiderpool-controller 所注册的 Webhook 拦截到,所以 IPAM 不会依赖 spiderpool-controller 组件。
11 changes: 10 additions & 1 deletion docs/usage/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,13 @@ If you change the configmap `spiderpool-conf` configurations, you need to restar
### Why SpiderSubnet feature not works well?

- For error like `Internal error occurred: failed calling webhook "spidersubnet.spiderpool.spidernet.io": the server could not find the requested resource`, you need to update configmap `spiderpool-conf` to enable SpiderSubnet feature and restart `spiderpool-agent` and `spiderpool-controller` components.
- For error like `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`, you should check `spiderpool-controller` logs.
- For error like `failed to get IPPool candidates from Subnet: no matching auto-created IPPool candidate with matchLables`, you should check `spiderpool-controller` logs. The spiderpool-controller component requires that the kubernetes cluster has kubernetes version not lower than `v1.21` once using the SpiderSubnet feature. The following error logs means your kubernetes cluster version is too low:

```text
W1220 05:44:16.129916 1 reflector.go:535] k8s.io/client-go/informers/factory.go:150: failed to list *v1.CronJob: the server could not find the requested resource
E1220 05:44:16.129978 1 reflector.go:147] k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CronJob: failed to list *v1.CronJob: the server could not find the requested resource
```

### Does Spiderpool IPAM relies on spiderpool-controller component?

spiderpool-controller component implements the webhook for the `Spec` property of SpiderSubnet, SpiderIPPool resources. And the spiderpool-agent component is the core of implementing the IPAM, once allocating the IP addresses it will update the SpiderIPPool resource `Status` property. The property belongs to [subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#subresources), so the request would not be intercepted by the spiderpool-controller webhook. Therefore, the IPAM doesn't rely on spiderpool-controller component.
24 changes: 12 additions & 12 deletions test/doc/assignip.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
# E2E Cases for IP Assignment

| Case ID | Title | Priority | Smoke | Status | Other |
|---------|---------------------------------------------------------------------------------------------------------|----------|-------|--------|-------------|
| E00001 | Assign IP to a pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00002 | Assign IP to deployment/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00003 | Assign IP to statefulSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00004 | Assign IP to daemonSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00005 | Assign IP to job/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00006 | Assign IP to replicaset/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00007 | Successfully run a pod with long yaml for ipv4, ipv6 and dual-stack case | p2 | | done | |
| E00008 | Failed to run a pod when IP resource of an IPPool is exhausted | p3 | | done | |
| E00009 | The cluster is dual stack, but the spiderpool only allocates ipv4 or ipv6, the pod should run correctly | p3 | | | |
| E00010 | The cluster is single stack, but the spiderpool allocates ipv4 and ipv6, the pod should run correctly | p3 | | | |
| Case ID | Title | Priority | Smoke | Status | Other |
|---------|-------------------------------------------------------------------------------------------------------|----------|-------|--------|-------------|
| E00001 | Assign IP to a pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00002 | Assign IP to deployment/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00003 | Assign IP to statefulSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00004 | Assign IP to daemonSet/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00005 | Assign IP to job/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00006 | Assign IP to replicaset/pod for ipv4, ipv6 and dual-stack case | p1 | true | done | |
| E00007 | Successfully run a pod with long yaml for ipv4, ipv6 and dual-stack case | p2 | | done | |
| E00008 | Failed to run a pod when IP resource of an IPPool is exhausted | p3 | | done | |
| E00009 | The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with IPPools annotation | p2 | | done | |
| E00010 | The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with Subnet annotation | p2 | | done | |
36 changes: 18 additions & 18 deletions test/doc/ippoolcr.md
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
# E2E Cases for IPPool CR

| Case ID | Title | Priority | Smoke | Status | Other |
| ------- | ------------------------------------------------------------ | -------- | ----- | ------ | ----- |
| D00001 | An IPPool fails to add an IP that already exists in an other IPPool | p2 | | done | |
| D00002 | Add a route with `routes` and `gateway` fields in the ippool spec, which only takes effect on the new pod and does not on the old pods | p2 | smoke | done | |
| D00003 | Failed to add wrong IPPool gateway and route to an IPPool CR | p2 | | done | |
| D00004 | Failed to delete an IPPool whose IP is not de-allocated at all | p2 | | done | |
| D00005 | A "true" value of IPPool/Spec/disabled should forbid IP allocation, but still allow ip de-allocation | p2 | | done | |
| D00006 | Successfully create and delete IPPools in batch | p2 | | done | |
| D00007 | Add, delete, modify, and query ippools that are created manually | p1 | | done | |
| D00008 | Manually ippool inherits subnet attributes (including routes, vlanId, etc.) | p3 | | | |
| D00009 | multusName matches, IP can be assigned | p2 | | done | |
| D00010 | multusName mismatch, unable to assign IP | p3 | | done | |
| D00011 | The node where the pod is located matches the nodeName, and the IP can be assigned | p2 | | done | |
| D00012 | The node where the pod resides does not match the nodeName, and the IP cannot be assigned | p3 | | done | |
| D00013 | nodeName has higher priority than nodeAffinity | p3 | | | |
| D00014 | The namespace where the pod is located matches the namespaceName, and the IP can be assigned | p2 | | | |
| D00015 | The namespace where the pod resides does not match the namespaceName, and the IP cannot be assigned | p2 | | | |
| D00016 | namespaceName has higher priority than namespaceAffinity | p3 | | | |
| Case ID | Title | Priority | Smoke | Status | Other |
| ------- |----------------------------------------------------------------------------------------------------------------------------------------|----------|-------|--------| ----- |
| D00001 | An IPPool fails to add an IP that already exists in an other IPPool | p2 | | done | |
| D00002 | Add a route with `routes` and `gateway` fields in the ippool spec, which only takes effect on the new pod and does not on the old pods | p2 | smoke | done | |
| D00003 | Failed to add wrong IPPool gateway and route to an IPPool CR | p2 | | done | |
| D00004 | Failed to delete an IPPool whose IP is not de-allocated at all | p2 | | done | |
| D00005 | A "true" value of IPPool/Spec/disabled should forbid IP allocation, but still allow ip de-allocation | p2 | | done | |
| D00006 | Successfully create and delete IPPools in batch | p2 | | done | |
| D00007 | Add, delete, modify, and query ippools that are created manually | p1 | | done | |
| D00008 | Manually ippool inherits subnet attributes (including gateway,routes, etc.) | p3 | | | |
| D00009 | multusName matches, IP can be assigned | p2 | | done | |
| D00010 | multusName mismatch, unable to assign IP | p3 | | done | |
| D00011 | The node where the pod is located matches the nodeName, and the IP can be assigned | p2 | | done | |
| D00012 | The node where the pod resides does not match the nodeName, and the IP cannot be assigned | p3 | | done | |
| D00013 | nodeName has higher priority than nodeAffinity | p3 | | | |
| D00014 | The namespace where the pod is located matches the namespaceName, and the IP can be assigned | p2 | | | |
| D00015 | The namespace where the pod resides does not match the namespaceName, and the IP cannot be assigned | p2 | | | |
| D00016 | namespaceName has higher priority than namespaceAffinity | p3 | | | |
81 changes: 78 additions & 3 deletions test/e2e/assignip/assignip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,21 @@ package assignip_test

import (
"context"
"encoding/json"
"strings"

spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/spidernet-io/e2eframework/tools"

"github.com/spidernet-io/spiderpool/pkg/constant"
spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1"
"github.com/spidernet-io/spiderpool/pkg/types"
"github.com/spidernet-io/spiderpool/test/e2e/common"
corev1 "k8s.io/api/core/v1"
)

var _ = Describe("test pod", Label("assignip"), func() {

Context("fail to run a pod when IP resource of an ippool is exhausted or its IP been set excludeIPs", func() {
var deployName, v4PoolName, v6PoolName, namespace string
var v4PoolNameList, v6PoolNameList []string
Expand Down Expand Up @@ -157,5 +160,77 @@ var _ = Describe("test pod", Label("assignip"), func() {
Expect(frame.DeleteDeployment(deployName, namespace)).To(Succeed())
GinkgoWriter.Printf("Succeeded to delete deployment %v/%v \n", namespace, deployName)
})

It("The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with IPPools annotation", Label("E00009"), func() {
if !(frame.Info.IpV4Enabled && frame.Info.IpV6Enabled) {
Skip("Single stack just skip this e2e case")
}

deployment := common.GenerateExampleDeploymentYaml(deployName, namespace, 1)
annotations := map[string]string{
constant.AnnoPodIPPool: common.GeneratePodIPPoolAnnotations(frame, "", []string{v4PoolObj.Name}, nil),
}
deployment.Spec.Template.Annotations = annotations
Expect(deployment).NotTo(BeNil(), "failed to generate Deployment yaml")

GinkgoWriter.Printf("Try to create deploy %v/%v \n", namespace, deployName)
Expect(frame.CreateDeployment(deployment)).To(Succeed())

// Checking the pod run status should all be running.
var podList *corev1.PodList
var err error
Eventually(func() bool {
podList, err = frame.GetPodListByLabel(deployment.Spec.Template.Labels)
if nil != err || len(podList.Items) == 0 {
return false
}
return frame.CheckPodListRunning(podList)
}, 2*common.PodStartTimeout, common.ForcedWaitingTime).Should(BeTrue())

Expect(podList.Items).To(HaveLen(1))
Expect(podList.Items[0].Status.PodIPs).To(HaveLen(1))
})

It("The cluster is dual stack, but the spiderpool can allocates ipv4 or ipv6 only with Subnet annotation", Label("E00010"), func() {
if !frame.Info.SpiderSubnetEnabled {
Skip("The SpiderSubnet feature is disabled, skip this e2e case")
}
if !(frame.Info.IpV4Enabled && frame.Info.IpV6Enabled) {
Skip("Single stack just skip this e2e case")
}

deployment := common.GenerateExampleDeploymentYaml(deployName, namespace, 1)

// Create deployments in bulk in a subnet
subnetAnno := types.AnnoSubnetItem{
IPv4: []string{v4SubnetName},
IPv6: nil,
}
subnetAnnoMarshal, err := json.Marshal(subnetAnno)
Expect(err).NotTo(HaveOccurred())

annotations := map[string]string{
constant.AnnoSpiderSubnet: string(subnetAnnoMarshal),
constant.AnnoSpiderSubnetPoolIPNumber: "1",
}
deployment.Spec.Template.Annotations = annotations
Expect(deployment).NotTo(BeNil(), "failed to generate Deployment yaml")

GinkgoWriter.Printf("Try to create deploy %v/%v \n", namespace, deployName)
Expect(frame.CreateDeployment(deployment)).To(Succeed())

// Checking the pod run status should all be running.
var podList *corev1.PodList
Eventually(func() bool {
podList, err = frame.GetPodListByLabel(deployment.Spec.Template.Labels)
if nil != err || len(podList.Items) == 0 {
return false
}
return frame.CheckPodListRunning(podList)
}, 2*common.PodStartTimeout, common.ForcedWaitingTime).Should(BeTrue())

Expect(podList.Items).To(HaveLen(1))
Expect(podList.Items[0].Status.PodIPs).To(HaveLen(1))
})
})
})
Loading

0 comments on commit aa6d040

Please sign in to comment.