Skip to content

Commit

Permalink
fix lsp not updated correctly when logical switch is changed (#4015)
Browse files Browse the repository at this point in the history
Signed-off-by: zhangzujian <[email protected]>
  • Loading branch information
zhangzujian authored May 14, 2024
1 parent a57d041 commit b8d77e2
Show file tree
Hide file tree
Showing 5 changed files with 199 additions and 46 deletions.
25 changes: 15 additions & 10 deletions pkg/ovs/ovn-nb-logical_switch_port.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,30 +89,35 @@ func buildLogicalSwitchPort(lspName, lsName, ip, mac, podName, namespace string,
}

func (c *OVNNbClient) CreateLogicalSwitchPort(lsName, lspName, ip, mac, podName, namespace string, portSecurity bool, securityGroups, vips string, enableDHCP bool, dhcpOptions *DHCPOptionsUUIDs, vpc string) error {
exist, err := c.LogicalSwitchPortExists(lspName)
existingLsp, err := c.GetLogicalSwitchPort(lspName, true)
if err != nil {
klog.Error(err)
return err
}

// update if exists
if exist {
lsp := buildLogicalSwitchPort(lspName, lsName, ip, mac, podName, namespace, portSecurity, securityGroups, vips, enableDHCP, dhcpOptions, vpc)
if err := c.UpdateLogicalSwitchPort(lsp, &lsp.Addresses, &lsp.Dhcpv4Options, &lsp.Dhcpv6Options, &lsp.PortSecurity, &lsp.ExternalIDs); err != nil {
var ops []ovsdb.Operation
lsp := buildLogicalSwitchPort(lspName, lsName, ip, mac, podName, namespace, portSecurity, securityGroups, vips, enableDHCP, dhcpOptions, vpc)
if existingLsp != nil {
if lsp.ExternalIDs[logicalSwitchKey] == lsName {
if err := c.UpdateLogicalSwitchPort(lsp, &lsp.Addresses, &lsp.Dhcpv4Options, &lsp.Dhcpv6Options, &lsp.PortSecurity, &lsp.ExternalIDs); err != nil {
klog.Error(err)
return fmt.Errorf("failed to update logical switch port %s: %v", lspName, err)
}
return nil
}
if ops, err = c.LogicalSwitchUpdatePortOp(lsp.ExternalIDs[logicalSwitchKey], existingLsp.UUID, ovsdb.MutateOperationDelete); err != nil {
klog.Error(err)
return fmt.Errorf("failed to update logical switch port %s: %v", lspName, err)
return err
}
return nil
}

lsp := buildLogicalSwitchPort(lspName, lsName, ip, mac, podName, namespace, portSecurity, securityGroups, vips, enableDHCP, dhcpOptions, vpc)
ops, err := c.CreateLogicalSwitchPortOp(lsp, lsName)
createLspOps, err := c.CreateLogicalSwitchPortOp(lsp, lsName)
if err != nil {
klog.Error(err)
return fmt.Errorf("generate operations for creating logical switch port %s: %v", lspName, err)
}

if err = c.Transact("lsp-add", ops); err != nil {
if err = c.Transact("lsp-add", append(ops, createLspOps...)); err != nil {
return fmt.Errorf("create logical switch port %s: %v", lspName, err)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ var _ = framework.Describe("[group:pod]", func() {
var podClient *framework.PodClient
var subnetClient *framework.SubnetClient
var namespaceName, subnetName, podName string
var subnet *apiv1.Subnet
var cidr, image string

ginkgo.BeforeEach(func() {
Expand All @@ -36,10 +35,6 @@ var _ = framework.Describe("[group:pod]", func() {
if image == "" {
image = framework.GetKubeOvnImage(cs)
}

ginkgo.By("Creating subnet " + subnetName)
subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName})
subnet = subnetClient.CreateSync(subnet)
})
ginkgo.AfterEach(func() {
ginkgo.By("Deleting pod " + podName)
Expand Down Expand Up @@ -70,6 +65,10 @@ var _ = framework.Describe("[group:pod]", func() {
buff, err := json.Marshal(routes)
framework.ExpectNoError(err)

ginkgo.By("Creating subnet " + subnetName)
subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName})
subnet = subnetClient.CreateSync(subnet)

ginkgo.By("Creating pod " + podName)
annotations := map[string]string{
util.RoutesAnnotation: string(buff),
Expand All @@ -78,18 +77,21 @@ var _ = framework.Describe("[group:pod]", func() {
pod := framework.MakePod(namespaceName, podName, nil, annotations, image, cmd, nil)
pod = podClient.CreateSync(pod)

ginkgo.By("Validating pod annoations")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock)
framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway)
framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name)
framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation])
framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true")

ginkgo.By("Getting pod routes")
podRoutes, err := iproute.RouteShow("", "eth0", func(cmd ...string) ([]byte, []byte, error) {
return framework.KubectlExec(pod.Namespace, pod.Name, cmd...)
})
framework.ExpectNoError(err)

ginkgo.By("Validating pod routes")
actualRoutes := make([]request.Route, len(podRoutes))
for _, r := range podRoutes {
if r.Gateway != "" || r.Dst != "" {
Expand Down
109 changes: 109 additions & 0 deletions test/e2e/kube-ovn/pod/pod_recreation.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
package pod

import (
"cmp"
"context"
"strings"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/onsi/ginkgo/v2"

"github.com/kubeovn/kube-ovn/pkg/ovs"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/kubeovn/kube-ovn/test/e2e/framework"
)

var _ = framework.SerialDescribe("[group:pod]", func() {
f := framework.NewDefaultFramework("pod")

var podClient *framework.PodClient
var namespaceName, podName string

ginkgo.BeforeEach(func() {
podClient = f.PodClient()
namespaceName = f.Namespace.Name
podName = "pod-" + framework.RandomSuffix()
})
ginkgo.AfterEach(func() {
ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)
})

framework.ConformanceIt("should handle pod creation during kube-ovn-controller is down", func() {
ginkgo.By("Creating pod " + podName)
pod := framework.MakePod(namespaceName, podName, nil, nil, framework.PauseImage, nil, nil)
pod = podClient.CreateSync(pod)

ginkgo.By("Validating pod annoations")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true")
framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation])
framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true")
mac := pod.Annotations[util.MacAddressAnnotation]

portName := ovs.PodNameToPortName(podName, pod.Namespace, util.OvnProvider)
ginkgo.By("Getting ips " + portName)
ipClient := f.IPClient()
ip := ipClient.Get(portName)

ginkgo.By("Validating ips " + ip.Name)
framework.ExpectEqual(ip.Spec.MacAddress, mac)
framework.ExpectEqual(ip.Spec.IPAddress, pod.Annotations[util.IPAddressAnnotation])

ginkgo.By("Getting deployment kube-ovn-controller")
deployClient := f.DeploymentClientNS(framework.KubeOvnNamespace)
deploy := deployClient.Get("kube-ovn-controller")
framework.ExpectNotNil(deploy.Spec.Replicas)

ginkgo.By("Getting kube-ovn-controller pods")
kubePodClient := f.PodClientNS(framework.KubeOvnNamespace)
framework.ExpectNotNil(deploy.Spec.Replicas)
pods, err := kubePodClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deploy.Spec.Selector)})
framework.ExpectNoError(err, "failed to list kube-ovn-controller pods")
framework.ExpectNotNil(pods)
podNames := make([]string, 0, len(pods.Items))
for _, pod := range pods.Items {
podNames = append(podNames, pod.Name)
}
framework.Logf("Got kube-ovn-controller pods: %s", strings.Join(podNames, ", "))

ginkgo.By("Stopping kube-ovn-controller by setting its replicas to zero")
deployClient.SetScale(deploy.Name, 0)

ginkgo.By("Waiting for kube-ovn-controller pods to disappear")
for _, pod := range podNames {
ginkgo.By("Waiting for pod " + pod + " to disappear")
kubePodClient.WaitForNotFound(pod)
}

ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)

ginkgo.By("Recreating pod " + podName)
pod = framework.MakePod(namespaceName, podName, nil, nil, framework.PauseImage, nil, nil)
_ = podClient.Create(pod)

ginkgo.By("Starting kube-ovn-controller by restore its replicas")
deployClient.SetScale(deploy.Name, cmp.Or(*deploy.Spec.Replicas, 1))

ginkgo.By("Waiting for kube-ovn-controller to be ready")
_ = deployClient.RolloutStatus(deploy.Name)

ginkgo.By("Waiting for pod " + podName + " to be running")
podClient.WaitForRunning(podName)

ginkgo.By("Validating pod annoations")
pod = podClient.GetPod(podName)
framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true")
framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation])
framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true")
framework.ExpectNotEqual(pod.Annotations[util.MacAddressAnnotation], mac)

ginkgo.By("Getting ips " + portName)
ip = ipClient.Get(portName)

ginkgo.By("Validating ips " + ip.Name)
framework.ExpectEqual(ip.Spec.MacAddress, pod.Annotations[util.MacAddressAnnotation])
framework.ExpectEqual(ip.Spec.IPAddress, pod.Annotations[util.IPAddressAnnotation])
})
})
42 changes: 16 additions & 26 deletions test/e2e/kube-ovn/underlay/underlay.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,13 @@ func makeProviderNetwork(providerNetworkName string, exchangeLinkName bool, link
}

func waitSubnetStatusUpdate(subnetName string, subnetClient *framework.SubnetClient, expectedUsingIPs float64) {
ginkgo.By("Waiting for status of subnet " + subnetName + " to be updated")
ginkgo.By("Waiting for using ips count of subnet " + subnetName + " to be " + fmt.Sprintf("%.0f", expectedUsingIPs))
framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) {
subnet := subnetClient.Get(subnetName)
if (subnet.Status.V4AvailableIPs != 0 && subnet.Status.V4UsingIPs != expectedUsingIPs) ||
(subnet.Status.V6AvailableIPs != 0 && subnet.Status.V6UsingIPs != expectedUsingIPs) {
framework.Logf("current subnet status: v4AvailableIPs = %.0f, v4UsingIPs = %.0f, v6AvailableIPs = %.0f, v6UsingIPs = %.0f",
subnet.Status.V4AvailableIPs, subnet.Status.V4UsingIPs, subnet.Status.V6AvailableIPs, subnet.Status.V6UsingIPs)
return false, nil
}
return true, nil
Expand Down Expand Up @@ -291,39 +293,27 @@ var _ = framework.SerialDescribe("[group:underlay]", func() {
ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)

if u2oPodNameUnderlay != "" {
ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay)
podClient.DeleteSync(u2oPodNameUnderlay)
}
ginkgo.By("Deleting pod " + u2oPodNameUnderlay)
podClient.DeleteSync(u2oPodNameUnderlay)

if u2oPodNameOverlay != "" {
ginkgo.By("Deleting overlay pod default subnet " + u2oPodNameOverlay)
podClient.DeleteSync(u2oPodNameOverlay)
}
ginkgo.By("Deleting pod " + u2oPodNameOverlay)
podClient.DeleteSync(u2oPodNameOverlay)

if u2oPodOverlayCustomVPC != "" {
ginkgo.By("Deleting overlay pod custom vpc " + u2oPodOverlayCustomVPC)
podClient.DeleteSync(u2oPodOverlayCustomVPC)
}
ginkgo.By("Deleting pod " + u2oPodOverlayCustomVPC)
podClient.DeleteSync(u2oPodOverlayCustomVPC)

if u2oOverlaySubnetNameCustomVPC != "" {
ginkgo.By("Deleting subnet in custom vpc" + u2oOverlaySubnetNameCustomVPC)
subnetClient.DeleteSync(u2oOverlaySubnetNameCustomVPC)
}
ginkgo.By("Deleting subnet " + u2oOverlaySubnetNameCustomVPC)
subnetClient.DeleteSync(u2oOverlaySubnetNameCustomVPC)

if u2oOverlaySubnetName != "" {
ginkgo.By("Deleting subnet " + u2oOverlaySubnetName)
subnetClient.DeleteSync(u2oOverlaySubnetName)
}

if vpcName != "" {
ginkgo.By("Deleting custom vpc " + vpcName)
vpcClient.DeleteSync(vpcName)
}
ginkgo.By("Deleting subnet " + u2oOverlaySubnetName)
subnetClient.DeleteSync(u2oOverlaySubnetName)

ginkgo.By("Deleting subnet " + subnetName)
subnetClient.DeleteSync(subnetName)

ginkgo.By("Deleting vpc " + vpcName)
vpcClient.DeleteSync(vpcName)

ginkgo.By("Deleting vlan " + vlanName)
vlanClient.Delete(vlanName, metav1.DeleteOptions{})

Expand Down
57 changes: 52 additions & 5 deletions test/e2e/kubevirt/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,17 @@ func TestE2E(t *testing.T) {
var _ = framework.Describe("[group:kubevirt]", func() {
f := framework.NewDefaultFramework("kubevirt")

var vmName, namespaceName string
var vmName, subnetName, namespaceName string
var subnetClient *framework.SubnetClient
var podClient *framework.PodClient
var vmClient *framework.VMClient
ginkgo.BeforeEach(func() {
f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12.")

namespaceName = f.Namespace.Name
vmName = "vm-" + framework.RandomSuffix()
subnetName = "subnet-" + framework.RandomSuffix()
subnetClient = f.SubnetClient()
podClient = f.PodClientNS(namespaceName)
vmClient = f.VMClientNS(namespaceName)

Expand All @@ -57,6 +60,9 @@ var _ = framework.Describe("[group:kubevirt]", func() {
ginkgo.AfterEach(func() {
ginkgo.By("Deleting vm " + vmName)
vmClient.DeleteSync(vmName)

ginkgo.By("Deleting subnet " + subnetName)
subnetClient.DeleteSync(subnetName)
})

framework.ConformanceIt("should be able to keep pod ips after vm pod is deleted", func() {
Expand All @@ -77,7 +83,6 @@ var _ = framework.Describe("[group:kubevirt]", func() {

ginkgo.By("Deleting pod " + pod.Name)
podClient.DeleteSync(pod.Name)
framework.ExpectNoError(err)

ginkgo.By("Waiting for vm " + vmName + " to be ready")
err = vmClient.WaitToBeReady(vmName, 2*time.Minute)
Expand All @@ -97,7 +102,7 @@ var _ = framework.Describe("[group:kubevirt]", func() {
framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName)

ginkgo.By("Checking whether pod ips are changed")
framework.ExpectConsistOf(ips, pod.Status.PodIPs)
framework.ExpectEqual(ips, pod.Status.PodIPs)
})

framework.ConformanceIt("should be able to keep pod ips after the vm is restarted", func() {
Expand All @@ -118,11 +123,53 @@ var _ = framework.Describe("[group:kubevirt]", func() {

ginkgo.By("Stopping vm " + vmName)
vmClient.StopSync(vmName)
framework.ExpectNoError(err)

ginkgo.By("Starting vm " + vmName)
vmClient.StartSync(vmName)

ginkgo.By("Getting pod of vm " + vmName)
podList, err = podClient.List(context.TODO(), metav1.ListOptions{
LabelSelector: labelSelector,
})
framework.ExpectNoError(err)
framework.ExpectHaveLen(podList.Items, 1)

ginkgo.By("Validating new pod annotations")
pod = &podList.Items[0]
framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName)

ginkgo.By("Checking whether pod ips are changed")
framework.ExpectEqual(ips, pod.Status.PodIPs)
})

framework.ConformanceIt("should be able to handle subnet change", func() {
ginkgo.By("Creating subnet " + subnetName)
cidr := framework.RandomCIDR(f.ClusterIPFamily)
subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName})
_ = subnetClient.CreateSync(subnet)

ginkgo.By("Getting pod of vm " + vmName)
labelSelector := fmt.Sprintf("%s=%s", v1.VirtualMachineNameLabel, vmName)
podList, err := podClient.List(context.TODO(), metav1.ListOptions{
LabelSelector: labelSelector,
})
framework.ExpectNoError(err)
framework.ExpectHaveLen(podList.Items, 1)

ginkgo.By("Validating pod annotations")
pod := &podList.Items[0]
framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true")
framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName)
ips := pod.Status.PodIPs

ginkgo.By("Stopping vm " + vmName)
vmClient.StopSync(vmName)

ginkgo.By("Starting vm " + vmName)
vmClient.StartSync(vmName)

ginkgo.By("Getting pod of vm " + vmName)
podList, err = podClient.List(context.TODO(), metav1.ListOptions{
Expand All @@ -138,6 +185,6 @@ var _ = framework.Describe("[group:kubevirt]", func() {
framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName)

ginkgo.By("Checking whether pod ips are changed")
framework.ExpectConsistOf(ips, pod.Status.PodIPs)
framework.ExpectNotEqual(ips, pod.Status.PodIPs)
})
})

0 comments on commit b8d77e2

Please sign in to comment.