Skip to content

Commit

Permalink
fix dual
Browse files Browse the repository at this point in the history
  • Loading branch information
bobz965 committed Jul 18, 2023
1 parent e039c6e commit 3179884
Show file tree
Hide file tree
Showing 6 changed files with 109 additions and 83 deletions.
2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ SUBMARINER_ROUTE_AGENT = quay.io/submariner/submariner-route-agent:$(SUBMARINER_
SUBMARINER_NETTEST = quay.io/submariner/nettest:$(SUBMARINER_VERSION)

VPC_NAT_GW_IMG = $(REGISTRY)/vpc-nat-gateway:$(VERSION)
KUBEOVN_BASE_IMG= $(REGISTRY)/kube-ovn-base:$(VERSION)

E2E_NETWORK = bridge
ifneq ($(VLAN_ID),)
Expand Down Expand Up @@ -428,6 +429,7 @@ kind-upgrade-chart: kind-load-image

.PHONY: kind-install
kind-install: kind-load-image
$(call kind_load_image,kube-ovn,$(KUBEOVN_BASE_IMG))
kubectl config use-context kind-kube-ovn
@$(MAKE) kind-untaint-control-plane
sed 's/VERSION=.*/VERSION=$(VERSION)/' dist/images/install.sh | bash
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/ippool.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ func (c *Controller) handleDeleteIPPool(ippool *kubeovnv1.IPPool) error {
if len(ns.Annotations) == 0 {
continue
}
if ns.Annotations[util.IpPoolAnnotation] == ippool.Name {
if ns.Annotations[util.IpPoolNameAnnotation] == ippool.Name {
c.enqueueAddNamespace(ns)
}
}
Expand Down
22 changes: 17 additions & 5 deletions pkg/controller/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -1467,6 +1467,7 @@ func (c *Controller) acquireAddress(pod *v1.Pod, podNet *kubeovnNet) (string, st
*macStr = ""
}

ipPoolNameStr := pod.Annotations[fmt.Sprintf(util.IpPoolNameAnnotationTemplate, podNet.ProviderName)]
ippoolStr := pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)]
if ippoolStr == "" {
ns, err := c.namespacesLister.Get(pod.Namespace)
Expand Down Expand Up @@ -1529,21 +1530,32 @@ func (c *Controller) acquireAddress(pod *v1.Pod, podNet *kubeovnNet) (string, st

// IPPool allocate
if ippoolStr != "" {
var ipPool []string
var ipPool, ipPoolNames []string
if strings.ContainsRune(ippoolStr, ';') {
ipPool = strings.Split(ippoolStr, ";")
} else {
ipPool = strings.Split(ippoolStr, ",")
if podNet.Subnet.Spec.Protocol == kubeovnv1.ProtocolDual && isStsPod {
klog.Infof("sts pod in dual stack subnet use ip pool: %s", ippoolStr)
ipPool = strings.Split(ippoolStr, ";")
} else {
ipPool = strings.Split(ippoolStr, ",")
}
}
if ipPoolNameStr != "" {
ipPoolNames = strings.Split(ipPoolNameStr, ",")
}
for i, ip := range ipPool {
ipPool[i] = strings.TrimSpace(ip)
}

if len(ipPool) == 1 && net.ParseIP(ipPool[0]) == nil {
for i, name := range ipPoolNames {
ipPoolNames[i] = strings.TrimSpace(name)
}
if len(ipPoolNames) == 1 && ipPoolNames[0] != "" {
klog.Infof("random allocate ip by ip pool %s from subnet %s", ipPoolNames[0], podNet.Subnet.Name)
var skippedAddrs []string
for {
portName := ovs.PodNameToPortName(podName, pod.Namespace, podNet.ProviderName)
ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(key, portName, macStr, podNet.Subnet.Name, ipPool[0], skippedAddrs, !podNet.AllowLiveMigration)
ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(key, portName, macStr, podNet.Subnet.Name, ipPoolNames[0], skippedAddrs, !podNet.AllowLiveMigration)
if err != nil {
return "", "", "", podNet.Subnet, err
}
Expand Down
1 change: 1 addition & 0 deletions pkg/ipam/ipam.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ func (ipam *IPAM) GetRandomAddress(podName, nicName string, mac *string, subnetN
}

func (ipam *IPAM) GetStaticAddress(podName, nicName, ip string, mac *string, subnetName string, checkConflict bool) (string, string, string, error) {
klog.Infof("allocating by static ip %s from subnet %s", ip, subnetName)
ipam.mutex.RLock()
defer ipam.mutex.RUnlock()

Expand Down
2 changes: 2 additions & 0 deletions pkg/util/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ const (
CidrAnnotation = "ovn.kubernetes.io/cidr"
GatewayAnnotation = "ovn.kubernetes.io/gateway"
IpPoolAnnotation = "ovn.kubernetes.io/ip_pool"
IpPoolNameAnnotation = "ovn.kubernetes.io/ip_pool_name"
BgpAnnotation = "ovn.kubernetes.io/bgp"
SnatAnnotation = "ovn.kubernetes.io/snat"
EipAnnotation = "ovn.kubernetes.io/eip"
Expand Down Expand Up @@ -73,6 +74,7 @@ const (
CidrAnnotationTemplate = "%s.kubernetes.io/cidr"
GatewayAnnotationTemplate = "%s.kubernetes.io/gateway"
IpPoolAnnotationTemplate = "%s.kubernetes.io/ip_pool"
IpPoolNameAnnotationTemplate = "%s.kubernetes.io/ip_pool_name"
LogicalSwitchAnnotationTemplate = "%s.kubernetes.io/logical_switch"
LogicalRouterAnnotationTemplate = "%s.kubernetes.io/logical_router"
VlanIdAnnotationTemplate = "%s.kubernetes.io/vlan_id"
Expand Down
163 changes: 86 additions & 77 deletions test/e2e/kube-ovn/switch_lb_rule/switch_lb_rule.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,51 @@ func generateSubnetName(name string) string {
return "subnet-" + name
}

func netcatSvc(f *framework.Framework, clientPodName, slrVip string, port int32, svc *corev1.Service, isSlr bool) string {
var stsV4IP, stsV6IP, v4cmd, v6cmd, vip string
if f.IsIPv6() {
if !isSlr {
stsV6IP = svc.Spec.ClusterIPs[0]
vip = stsV6IP
} else {
stsV6IP = slrVip
}
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, port)
ginkgo.By("Waiting for client pod " + clientPodName + " " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
} else if f.IsIPv4() {
if !isSlr {
stsV4IP = svc.Spec.ClusterIPs[0]
vip = stsV4IP
} else {
stsV4IP = slrVip
}
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, port)
ginkgo.By("Waiting for client pod " + clientPodName + " " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
} else {
if !isSlr {
stsV4IP := svc.Spec.ClusterIPs[0]
vip = stsV4IP
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, port)
ginkgo.By("Waiting for client pod " + clientPodName + " " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
if !isSlr {
stsV6IP = svc.Spec.ClusterIPs[1]
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, port)
ginkgo.By("Waiting for client pod " + clientPodName + " " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
}
} else {
// TODO:// slr support dual-stack
stsV4IP = slrVip
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, port)
ginkgo.By("Waiting for client pod " + clientPodName + " " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
}
}
return vip
}
func netcat(f *framework.Framework, clientPodName, cmd string) {
framework.Logf("testing %s", cmd)
stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, clientPodName, cmd)
Expand Down Expand Up @@ -149,23 +194,46 @@ var _ = framework.Describe("[group:slr]", func() {
var (
clientPod *corev1.Pod
err error
annotations map[string]string
stsSvc, selSvc, epSvc *corev1.Service
selSlrEps, epSlrEps *corev1.Endpoints
)
replicas := 1
labels := map[string]string{"app": label}
ginkgo.By("Creating statefulset " + stsName + " with subnet " + subnetName)
sts := framework.MakeStatefulSet(stsName, stsSvcName, int32(replicas), labels, podImg)
sts.Spec.Template.Annotations = map[string]string{util.LogicalSwitchAnnotation: subnetName}
command := []string{"sh", "-c", "cd /tmp && python3 -m http.server 80"}
pool := framework.RandomIPs(overlaySubnetV4Cidr, ";", replicas)
ginkgo.By("Creating sts " + stsName + " with ip pool " + pool)
sts.Spec.Template.Annotations = map[string]string{
util.LogicalSwitchAnnotation: subnetName,
util.IpPoolAnnotation: pool,
}
if f.IsIPv4() {
sts.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", fmt.Sprintf("cd /tmp && python3 -m http.server %d", backendPort)}
}
if f.IsIPv6() {
ginkgo.By("Waiting for tcp ipv6 sts " + stsSvcName + " to be ready")
command = []string{"sh", "-c", "cd /tmp && python3 -m http.server --bind :: 80"}
sts.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", fmt.Sprintf("cd /tmp && python3 -m http.server --bind :: %d", backendPort)}
}
if f.IsDual() {
ipSplits := strings.Split(pool, ",")
sts.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c", fmt.Sprintf("cd /tmp && python3 -m http.server --bind %s %d", ipSplits[0], backendPort)}
// add ipv6 container
sts.Spec.Template.Spec.Containers = append(sts.Spec.Template.Spec.Containers, sts.Spec.Template.Spec.Containers[0])
sts.Spec.Template.Spec.Containers[1].Command = []string{"sh", "-c", fmt.Sprintf("cd /tmp && python3 -m http.server --bind %s %d", ipSplits[1], backendPort)}
sts.Spec.Template.Spec.Containers[1].Name = "webv6"
sts.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{
{
Name: "IPV4_ADDR",
Value: ipSplits[0],
},
}
sts.Spec.Template.Spec.Containers[1].Env = []corev1.EnvVar{
{
Name: "IPV6_ADDR",
Value: ipSplits[1],
},
}
}
sts.Spec.Template.Spec.Containers[0].Command = command
_ = stsClient.CreateSync(sts)

ginkgo.By("Creating service " + stsSvcName)
ports := []corev1.ServicePort{{
Name: "netcat",
Expand All @@ -174,7 +242,7 @@ var _ = framework.Describe("[group:slr]", func() {
TargetPort: intstr.FromInt(80),
}}
selector := map[string]string{"app": label}
annotations = map[string]string{
annotations := map[string]string{
util.LogicalSwitchAnnotation: subnetName,
}
stsSvc = framework.MakeService(stsSvcName, corev1.ServiceTypeClusterIP, annotations, selector, ports, corev1.ServiceAffinityNone)
Expand All @@ -199,32 +267,10 @@ var _ = framework.Describe("[group:slr]", func() {
clientPod, err = podClient.Get(context.TODO(), clientPodName, metav1.GetOptions{})
framework.ExpectNil(err)
framework.ExpectNotNil(clientPod)
var stsV4IP, stsV6IP, v4cmd, v6cmd string
if f.IsIPv6() {
stsV6IP = stsSvc.Spec.ClusterIPs[0]
vip = stsV6IP
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, frontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
} else if f.IsIPv4() {
stsV4IP = stsSvc.Spec.ClusterIPs[0]
vip = stsV4IP
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, frontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
} else {
stsV4IP := stsSvc.Spec.ClusterIPs[0]
vip = stsV4IP
stsV6IP = stsSvc.Spec.ClusterIPs[1]
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, frontPort)
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, frontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
}
ginkgo.By("Netcating sts service " + stsSvc.Name)
vip = netcatSvc(f, clientPodName, "", frontPort, stsSvc, false)

ginkgo.By("2. Creating switch-lb-rule with selector")
ginkgo.By("2. Creating switch-lb-rule with selector with lb front vip " + vip)
ginkgo.By("Creating selector SwitchLBRule " + epSlrName)
var (
selRule *kubeovnv1.SwitchLBRule
Expand All @@ -237,7 +283,7 @@ var _ = framework.Describe("[group:slr]", func() {
{
Name: "netcat",
Port: selSlrFrontPort,
TargetPort: 80,
TargetPort: backendPort,
Protocol: "TCP",
},
}
Expand Down Expand Up @@ -310,28 +356,10 @@ var _ = framework.Describe("[group:slr]", func() {
}
}

ginkgo.By("Waiting for selector switch lb " + selSlrName + " to be available")
if f.IsIPv6() {
stsV6IP = stsSvc.Spec.ClusterIPs[0]
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, selSlrFrontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
} else if f.IsIPv4() {
stsV4IP = stsSvc.Spec.ClusterIPs[0]
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, selSlrFrontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
} else {
stsV4IP := stsSvc.Spec.ClusterIPs[0]
stsV6IP = stsSvc.Spec.ClusterIPs[1]
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, selSlrFrontPort)
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, selSlrFrontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
}
ginkgo.By("3. Creating switch-lb-rule with endpoints")
ginkgo.By("Netcating selector switch lb service " + selSvc.Name)
netcatSvc(f, clientPodName, vip, selSlrFrontPort, selSvc, true)

ginkgo.By("3. Creating switch-lb-rule with endpoints with lb front vip " + vip)
ginkgo.By("Creating endpoint SwitchLBRule " + epSlrName)
sessionAffinity = corev1.ServiceAffinityClientIP
epPorts = []kubeovnv1.SlrPort{
Expand Down Expand Up @@ -410,26 +438,7 @@ var _ = framework.Describe("[group:slr]", func() {
framework.ExpectEqual(protocols[port.TargetPort], port.Protocol)
}
}
ginkgo.By("Waiting for endpoint switch lb " + selSlrName + " to be available")
if f.IsIPv6() {
stsV6IP = stsSvc.Spec.ClusterIPs[0]
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, epSlrFrontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
} else if f.IsIPv4() {
stsV4IP = stsSvc.Spec.ClusterIPs[0]
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, epSlrFrontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
} else {
stsV4IP := stsSvc.Spec.ClusterIPs[0]
stsV6IP = stsSvc.Spec.ClusterIPs[1]
v4cmd = fmt.Sprintf("nc -nvz %s %d", stsV4IP, epSlrFrontPort)
v6cmd = fmt.Sprintf("nc -6nvz %s %d", stsV6IP, epSlrFrontPort)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v4cmd + " to be ok")
netcat(f, clientPodName, v4cmd)
ginkgo.By("Waiting for client pod " + clientPodName + " nc " + v6cmd + " to be ok")
netcat(f, clientPodName, v6cmd)
}
ginkgo.By("Netcating endpoint switch lb service " + epSvc.Name)
netcatSvc(f, clientPodName, vip, epSlrFrontPort, epSvc, true)
})
})

0 comments on commit 3179884

Please sign in to comment.