Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e: do not import pkg/daemon #3055

Merged
merged 3 commits into from
Jul 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions .github/workflows/scheduled-e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1360,16 +1360,13 @@ jobs:
sudo cp -r /root/.kube/ ~/.kube/
sudo chown -R $(id -un). ~/.kube/

- name: Install Kube-OVN
- name: Install Kube-OVN with VPC NAT gateway enabled
working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }}
run: |
version=$(grep -E '^VERSION="v([0-9]+\.){2}[0-9]+"$' dist/images/install.sh | head -n1 | awk -F= '{print $2}' | tr -d '"')
docker pull kubeovn/kube-ovn:$version
docker pull kubeovn/vpc-nat-gateway:$version
VERSION=$version make kind-install

- name: Install vpc-nat-gw
run: make kind-install-vpc-nat-gw
VERSION=$version make kind-install-vpc-nat-gw

- name: Run E2E
run: make iptables-vpc-nat-gw-conformance-e2e
Expand Down
30 changes: 15 additions & 15 deletions pkg/daemon/gateway_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,26 +39,26 @@ const (
)

const (
NAT = "nat"
MANGLE = "mangle"
Prerouting = "PREROUTING"
Postrouting = "POSTROUTING"
Output = "OUTPUT"
OvnPrerouting = "OVN-PREROUTING"
OvnPostrouting = "OVN-POSTROUTING"
OvnOutput = "OVN-OUTPUT"
OvnMasquerade = "OVN-MASQUERADE"
OvnNatOutGoingPolicy = "OVN-NAT-POLICY"
OvnNatOutGoingPolicySubnet = "OVN-NAT-PSUBNET-"
NAT = util.NAT
MANGLE = util.Mangle
Prerouting = util.Prerouting
Postrouting = util.Postrouting
Output = util.Output
OvnPrerouting = util.OvnPrerouting
OvnPostrouting = util.OvnPostrouting
OvnOutput = util.OvnOutput
OvnMasquerade = util.OvnMasquerade
OvnNatOutGoingPolicy = util.OvnNatOutGoingPolicy
OvnNatOutGoingPolicySubnet = util.OvnNatOutGoingPolicySubnet
)

const (
OnOutGoingNatMark = "0x90001/0x90001"
OnOutGoingForwardMark = "0x90002/0x90002"
TProxyOutputMark = 0x90003
TProxyOutputMask = 0x90003
TProxyPreroutingMark = 0x90004
TProxyPreroutingMask = 0x90004
TProxyOutputMark = util.TProxyOutputMark
TProxyOutputMask = util.TProxyOutputMask
TProxyPreroutingMark = util.TProxyPreroutingMark
TProxyPreroutingMask = util.TProxyPreroutingMask
)

type policyRouteMeta struct {
Expand Down
17 changes: 17 additions & 0 deletions pkg/util/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,23 @@ const (
NatPolicyRuleActionForward = "forward"
NatPolicyRuleIDLength = 12

NAT = "nat"
Mangle = "mangle"
Prerouting = "PREROUTING"
Postrouting = "POSTROUTING"
Output = "OUTPUT"
OvnPrerouting = "OVN-PREROUTING"
OvnPostrouting = "OVN-POSTROUTING"
OvnOutput = "OVN-OUTPUT"
OvnMasquerade = "OVN-MASQUERADE"
OvnNatOutGoingPolicy = "OVN-NAT-POLICY"
OvnNatOutGoingPolicySubnet = "OVN-NAT-PSUBNET-"

TProxyListenPort = 8102
TProxyRouteTable = 10001

TProxyOutputMark = 0x90003
TProxyOutputMask = 0x90003
TProxyPreroutingMark = 0x90004
TProxyPreroutingMask = 0x90004
)
1 change: 0 additions & 1 deletion test/e2e/framework/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,4 @@ const (
PauseImage = "kubeovn/pause:3.2"
BusyBoxImage = "busybox:stable"
AgnhostImage = "kubeovn/agnhost:2.43"
NginxImage = "nginx:latest"
)
1 change: 1 addition & 0 deletions test/e2e/kube-ovn/ipam/ipam.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ var _ = framework.Describe("[group:ipam]", func() {
}

for replicas := 1; replicas <= 3; replicas++ {
stsName = "sts-" + framework.RandomSuffix()
ippool := framework.RandomIPs(cidr, ippoolSep, replicas)
labels := map[string]string{"app": stsName}

Expand Down
126 changes: 62 additions & 64 deletions test/e2e/kube-ovn/pod/vpc_pod_probe.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,17 @@ package pod

import (
"fmt"
"time"
"math/rand"
"net"
"strconv"
"strings"

"github.com/onsi/ginkgo/v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
clientset "k8s.io/client-go/kubernetes"

apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/daemon"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/kubeovn/kube-ovn/test/e2e/framework"
"github.com/kubeovn/kube-ovn/test/e2e/framework/iptables"
Expand All @@ -21,6 +23,7 @@ var _ = framework.SerialDescribe("[group:pod]", func() {

var cs clientset.Interface
var podClient *framework.PodClient
var eventClient *framework.EventClient
var subnetClient *framework.SubnetClient
var vpcClient *framework.VpcClient
var namespaceName, subnetName, podName, vpcName string
Expand All @@ -31,6 +34,7 @@ var _ = framework.SerialDescribe("[group:pod]", func() {
ginkgo.BeforeEach(func() {
cs = f.ClientSet
podClient = f.PodClient()
eventClient = f.EventClient()
subnetClient = f.SubnetClient()
namespaceName = f.Namespace.Name
subnetName = "subnet-" + framework.RandomSuffix()
Expand Down Expand Up @@ -64,7 +68,7 @@ var _ = framework.SerialDescribe("[group:pod]", func() {
}
})

framework.ConformanceIt("should support http and tcp liveness probe and readiness probe in custom vpc pod ", func() {
framework.ConformanceIt("should support http and tcp readiness probe in custom vpc pod", func() {
f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12")
daemonSetClient := f.DaemonSetClientNS(framework.KubeOvnNamespace)
originDs := daemonSetClient.Get("kube-ovn-cni")
Expand Down Expand Up @@ -94,108 +98,102 @@ var _ = framework.SerialDescribe("[group:pod]", func() {
subnet := framework.MakeSubnet(custVPCSubnetName, "", cidr, "", vpcName, "", nil, nil, nil)
_ = subnetClient.CreateSync(subnet)

ginkgo.By("Creating pod with HTTP liveness and readiness probe that port is accessible " + podName)
pod := framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.NginxImage, nil, nil)

ginkgo.By("Creating pod with HTTP readiness probe that port is accessible " + podName)
port := 8000 + rand.Intn(1000)
portStr := strconv.Itoa(port)
args := []string{"netexec", "--http-port", portStr}
pod := framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args)
pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromInt(80),
Port: intstr.FromInt(port),
},
},
}
pod.Spec.Containers[0].LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromInt(80),
},
},
}

pod = podClient.CreateSync(pod)
framework.ExpectEqual(pod.Status.ContainerStatuses[0].Ready, true)
checkTProxyRules(f, pod, 80, true)
checkTProxyRules(f, pod, port, true)

ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)

ginkgo.By("Creating pod with HTTP liveness and readiness probe that port is not accessible " + podName)
pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.NginxImage, nil, nil)
ginkgo.By("Creating pod with HTTP readiness probe that port is not accessible " + podName)
pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args)
pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromInt(81),
Port: intstr.FromInt(port + 1),
},
},
}
pod.Spec.Containers[0].LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromInt(81),
},
},
_ = podClient.Create(pod)

ginkgo.By("Waiting for pod readiness probe failure")
events := eventClient.WaitToHaveEvent("Pod", podName, "Warning", "Unhealthy", "kubelet", "")
var found bool
for _, event := range events {
if strings.Contains(event.Message, "Readiness probe failed") {
found = true
framework.Logf("Found pod event: %s", event.Message)
break
}
}
framework.ExpectTrue(found, "Pod readiness probe is expected to fail")

_ = podClient.Create(pod)
time.Sleep(5 * time.Second)
pod = podClient.GetPod(podName)
checkTProxyRules(f, pod, port+1, true)

framework.ExpectEqual(pod.Status.ContainerStatuses[0].Ready, false)
checkTProxyRules(f, pod, 81, true)
ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)

ginkgo.By("Creating pod with TCP probe liveness and readiness probe that port is accessible " + podName)
pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.NginxImage, nil, nil)
ginkgo.By("Creating pod with TCP readiness probe that port is accessible " + podName)
pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args)
pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
Port: intstr.FromInt(80),
},
},
}
pod.Spec.Containers[0].LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
Port: intstr.FromInt(80),
Port: intstr.FromInt(port),
},
},
}

pod = podClient.CreateSync(pod)
framework.ExpectEqual(pod.Status.ContainerStatuses[0].Ready, true)
checkTProxyRules(f, pod, port, true)

checkTProxyRules(f, pod, 80, true)
ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)

ginkgo.By("Creating pod with TCP probe liveness and readiness probe that port is not accessible " + podName)
pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.NginxImage, nil, nil)
ginkgo.By("Creating pod with TCP readiness probe that port is not accessible " + podName)
pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args)
pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
Port: intstr.FromInt(81),
Port: intstr.FromInt(port - 1),
},
},
}
pod.Spec.Containers[0].LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
Port: intstr.FromInt(81),
},
},
}

_ = podClient.Create(pod)
time.Sleep(5 * time.Second)
podClient.WaitForRunning(podName)

ginkgo.By("Waiting for pod readiness probe failure")
events = eventClient.WaitToHaveEvent("Pod", podName, "Warning", "Unhealthy", "kubelet", "")
found = false
for _, event := range events {
if strings.Contains(event.Message, "Readiness probe failed") {
found = true
framework.Logf("Found pod event: %s", event.Message)
break
}
}
framework.ExpectTrue(found, "Pod readiness probe is expected to fail")

pod = podClient.GetPod(podName)
framework.ExpectEqual(pod.Status.ContainerStatuses[0].Ready, false)
checkTProxyRules(f, pod, 81, false)
checkTProxyRules(f, pod, port-1, false)
})
})

func checkTProxyRules(f *framework.Framework, pod *corev1.Pod, probePort int, exist bool) {

nodeName := pod.Spec.NodeName
tProxyOutputMarkMask := fmt.Sprintf("%#x/%#x", daemon.TProxyOutputMark, daemon.TProxyOutputMask)
tProxyPreRoutingMarkMask := fmt.Sprintf("%#x/%#x", daemon.TProxyPreroutingMark, daemon.TProxyPreroutingMask)
tProxyOutputMarkMask := fmt.Sprintf("%#x/%#x", util.TProxyOutputMark, util.TProxyOutputMask)
tProxyPreRoutingMarkMask := fmt.Sprintf("%#x/%#x", util.TProxyPreroutingMark, util.TProxyPreroutingMask)

isZeroIP := false
if len(pod.Status.PodIPs) == 2 {
Expand All @@ -207,20 +205,20 @@ func checkTProxyRules(f *framework.Framework, pod *corev1.Pod, probePort int, ex
expectedRules := []string{
fmt.Sprintf(`-A OVN-OUTPUT -d %s/32 -p tcp -m tcp --dport %d -j MARK --set-xmark %s`, podIP.IP, probePort, tProxyOutputMarkMask),
}
iptables.CheckIptablesRulesOnNode(f, nodeName, daemon.MANGLE, daemon.OvnOutput, apiv1.ProtocolIPv4, expectedRules, exist)
iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnOutput, apiv1.ProtocolIPv4, expectedRules, exist)
hostIP := pod.Status.HostIP
if isZeroIP {
hostIP = "0.0.0.0"
hostIP = net.IPv4zero.String()
}
expectedRules = []string{
fmt.Sprintf(`-A OVN-PREROUTING -d %s/32 -p tcp -m tcp --dport %d -j TPROXY --on-port %d --on-ip %s --tproxy-mark %s`, podIP.IP, probePort, util.TProxyListenPort, hostIP, tProxyPreRoutingMarkMask),
}
iptables.CheckIptablesRulesOnNode(f, nodeName, daemon.MANGLE, daemon.OvnPrerouting, apiv1.ProtocolIPv4, expectedRules, exist)
iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnPrerouting, apiv1.ProtocolIPv4, expectedRules, exist)
} else if util.CheckProtocol(podIP.IP) == apiv1.ProtocolIPv6 {
expectedRules := []string{
fmt.Sprintf(`-A OVN-OUTPUT -d %s/128 -p tcp -m tcp --dport %d -j MARK --set-xmark %s`, podIP.IP, probePort, tProxyOutputMarkMask),
}
iptables.CheckIptablesRulesOnNode(f, nodeName, daemon.MANGLE, daemon.OvnOutput, apiv1.ProtocolIPv6, expectedRules, exist)
iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnOutput, apiv1.ProtocolIPv6, expectedRules, exist)

hostIP := pod.Status.HostIP
if isZeroIP {
Expand All @@ -229,7 +227,7 @@ func checkTProxyRules(f *framework.Framework, pod *corev1.Pod, probePort int, ex
expectedRules = []string{
fmt.Sprintf(`-A OVN-PREROUTING -d %s/128 -p tcp -m tcp --dport %d -j TPROXY --on-port %d --on-ip %s --tproxy-mark %s`, podIP.IP, probePort, util.TProxyListenPort, hostIP, tProxyPreRoutingMarkMask),
}
iptables.CheckIptablesRulesOnNode(f, nodeName, daemon.MANGLE, daemon.OvnPrerouting, apiv1.ProtocolIPv6, expectedRules, exist)
iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnPrerouting, apiv1.ProtocolIPv6, expectedRules, exist)
}
}
}
Loading