From 0625cd114f2bf54f03ff3c7f4bd5f9b7925c5004 Mon Sep 17 00:00:00 2001 From: Federico Paolinelli Date: Fri, 14 Jul 2023 17:55:00 +0300 Subject: [PATCH] E2E: implement tests for receiving prefixes Here we implement e2e test to ensure frr-k8s is able to receive routes from outside. Signed-off-by: Federico Paolinelli --- e2etests/pkg/routes/routes.go | 43 +++++ e2etests/tests/receiving.go | 298 ++++++++++++++++++++++++++++++++++ e2etests/tests/validate.go | 28 ++++ 3 files changed, 369 insertions(+) create mode 100644 e2etests/tests/receiving.go diff --git a/e2etests/pkg/routes/routes.go b/e2etests/pkg/routes/routes.go index b6c1e0ef..1b72329d 100644 --- a/e2etests/pkg/routes/routes.go +++ b/e2etests/pkg/routes/routes.go @@ -11,9 +11,24 @@ import ( "go.universe.tf/e2etest/pkg/frr" frrcontainer "go.universe.tf/e2etest/pkg/frr/container" "go.universe.tf/e2etest/pkg/ipfamily" + "go.universe.tf/metallb/e2etest/pkg/executor" v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/test/e2e/framework" ) +// PodHasPrefixFromContainer tells if the given frr-k8s pod has recevied a route for +// the given prefix from the given container. +func PodHasPrefixFromContainer(pod *v1.Pod, frr frrcontainer.FRR, prefix string) bool { + _, cidr, _ := net.ParseCIDR(prefix) + ipFamily := ipfamily.ForCIDR(cidr) + nextHop := frr.Ipv4 + if ipFamily == ipfamily.IPv6 { + nextHop = frr.Ipv6 + } + vrf := frr.RouterConfig.VRF + return hasPrefix(pod, ipFamily, cidr, nextHop, vrf) +} + // CheckNeighborHasPrefix tells if the given frr container has a route toward the given prefix // via the set of node passed to this function. func CheckNeighborHasPrefix(neighbor frrcontainer.FRR, prefix string, nodes []v1.Node) (bool, error) { @@ -67,3 +82,31 @@ func routeForCIDR(cidr *net.IPNet, routesV4 map[string]frr.Route, routesV6 map[s } return frr.Route{}, RouteNotFoundError(fmt.Sprintf("route %s not found", cidr)) } + +func hasPrefix(pod *v1.Pod, pairingFamily ipfamily.Family, prefix *net.IPNet, nextHop, vrf string) bool { + found := false + podExec := executor.ForPod(pod.Namespace, pod.Name, "frr") + routes, frrRoutesV6, err := frr.RoutesForVRF(vrf, podExec) + framework.ExpectNoError(err) + + if pairingFamily == ipfamily.IPv6 { + routes = frrRoutesV6 + } + +out: + for _, route := range routes { + if !cidrsAreEqual(route.Destination, prefix) { + continue + } + for _, nh := range route.NextHops { + if nh.String() == nextHop { + found = true + break out + } + } + } + if !found { + return false + } + return true +} diff --git a/e2etests/tests/receiving.go b/e2etests/tests/receiving.go new file mode 100644 index 00000000..368e7a64 --- /dev/null +++ b/e2etests/tests/receiving.go @@ -0,0 +1,298 @@ +// SPDX-License-Identifier:Apache-2.0 + +package tests + +import ( + "github.com/onsi/ginkgo/v2" + "go.universe.tf/e2etest/pkg/frr/container" + + frrk8sv1beta1 "github.com/metallb/frrk8s/api/v1beta1" + "github.com/metallb/frrk8stests/pkg/config" + "github.com/metallb/frrk8stests/pkg/dump" + "github.com/metallb/frrk8stests/pkg/infra" + "github.com/metallb/frrk8stests/pkg/k8s" + frrconfig "go.universe.tf/e2etest/pkg/frr/config" + "go.universe.tf/e2etest/pkg/ipfamily" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + admissionapi "k8s.io/pod-security-admission/api" +) + +var _ = ginkgo.Describe("Receiving routes", func() { + var cs clientset.Interface + var f *framework.Framework + + defer ginkgo.GinkgoRecover() + clientconfig, err := framework.LoadConfig() + framework.ExpectNoError(err) + updater, err := config.NewUpdater(clientconfig) + framework.ExpectNoError(err) + reporter := dump.NewK8sReporter(framework.TestContext.KubeConfig, k8s.FRRK8sNamespace) + + f = framework.NewDefaultFramework("bgpfrr") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + + ginkgo.AfterEach(func() { + if ginkgo.CurrentSpecReport().Failed() { + testName := ginkgo.CurrentSpecReport().LeafNodeText + dump.K8sInfo(testName, reporter) + dump.BGPInfo(testName, infra.FRRContainers, f.ClientSet, f) + } + }) + + ginkgo.BeforeEach(func() { + ginkgo.By("Clearing any previous configuration") + + for _, c := range infra.FRRContainers { + err := c.UpdateBGPConfigFile(frrconfig.Empty) + framework.ExpectNoError(err) + } + err := updater.Clean() + framework.ExpectNoError(err) + + cs = f.ClientSet + }) + + ginkgo.Context("Receiving IPs", func() { + type params struct { + vrf string + ipFamily ipfamily.Family + myAsn uint32 + toAdvertiseV4 []string + toAdvertiseV6 []string + modifyPeers func([]config.Peer, []config.Peer) + validate func([]config.Peer, []config.Peer, []*v1.Pod) + } + + ginkgo.DescribeTable("Works with external frrs", func(p params) { + frrs := config.ContainersForVRF(infra.FRRContainers, p.vrf) + peersV4, peersV6 := config.PeersForContainers(frrs, p.ipFamily) + p.modifyPeers(peersV4, peersV6) + neighbors := config.NeighborsFromPeers(peersV4, peersV6) + + config := frrk8sv1beta1.FRRConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: frrk8sv1beta1.FRRConfigurationSpec{ + BGP: frrk8sv1beta1.BGPConfig{ + Routers: []frrk8sv1beta1.Router{ + { + ASN: p.myAsn, + VRF: p.vrf, + Neighbors: neighbors, + }, + }, + }, + }, + } + + ginkgo.By("pairing with nodes") + for _, c := range frrs { + err := container.PairWithNodes(cs, c, p.ipFamily, func(frr *container.FRR) { + frr.NeighborConfig.ToAdvertiseV4 = p.toAdvertiseV4 + frr.NeighborConfig.ToAdvertiseV6 = p.toAdvertiseV6 + }) + framework.ExpectNoError(err) + } + err := updater.Update(config) + framework.ExpectNoError(err) + + nodes, err := k8s.Nodes(cs) + framework.ExpectNoError(err) + + for _, c := range frrs { + ValidateFRRPeeredWithNodes(nodes, c, p.ipFamily) + } + + pods, err := k8s.FRRK8sPods(cs) + framework.ExpectNoError(err) + + ginkgo.By("validating") + p.validate(peersV4, peersV6, pods) + }, + ginkgo.Entry("IPV4 - receive ips from all", params{ + ipFamily: ipfamily.IPv4, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + for i := range ppV4 { + ppV4[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + for _, p := range ppV4 { + ValidateNodesHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + } + }, + }), + ginkgo.Entry("IPV4 - receive ips from some, all mode", params{ + ipFamily: ipfamily.IPv4, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + ppV4[0].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + ValidateNodesHaveRoutes(pods, ppV4[0].FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + for _, p := range ppV4[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + } + }, + }), + ginkgo.Entry("IPV4 - receive ips from some, explicit mode", params{ + ipFamily: ipfamily.IPv4, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24", "192.170.2.0/24"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + ppV4[0].Neigh.ToReceive.Allowed.Prefixes = []string{"192.168.2.0/24", "192.169.2.0/24"} + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + ValidateNodesHaveRoutes(pods, ppV4[0].FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + ValidateNodesDoNotHaveRoutes(pods, ppV4[0].FRR, []string{"192.170.2.0/24"}...) + for _, p := range ppV4[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24", "192.170.2.0/24"}...) + } + }, + }), + ginkgo.Entry("IPV6 - receive ips from all", params{ + ipFamily: ipfamily.IPv6, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV6: []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + for i := range ppV4 { + ppV4[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + for i := range ppV6 { + ppV6[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + for _, p := range ppV6 { + ValidateNodesHaveRoutes(pods, p.FRR, []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}...) + } + }, + }), + ginkgo.Entry("IPV6 - receive ips from some, explicit mode", params{ + ipFamily: ipfamily.IPv6, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV6: []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + ppV4[0].Neigh.ToReceive.Allowed.Prefixes = []string{"fc00:f853:ccd:e799::/64"} + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + ValidateNodesHaveRoutes(pods, ppV4[0].FRR, []string{"fc00:f853:ccd:e799::/64"}...) + ValidateNodesDoNotHaveRoutes(pods, ppV4[0].FRR, []string{"fc00:f853:ccd:e800::/64"}...) + for _, p := range ppV6[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}...) + } + }, + }), + ginkgo.Entry("IPV4 - VRF - receive ips from some, all mode", params{ + ipFamily: ipfamily.IPv4, + vrf: infra.VRFName, + myAsn: infra.FRRK8sASNVRF, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + ppV4[0].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + ValidateNodesHaveRoutes(pods, ppV4[0].FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + for _, p := range ppV4[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + } + }, + }), + ginkgo.Entry("IPV4 - VRF - receive ips from some, explicit mode", params{ + ipFamily: ipfamily.IPv4, + vrf: infra.VRFName, + myAsn: infra.FRRK8sASNVRF, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24", "192.170.2.0/24"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + ppV4[0].Neigh.ToReceive.Allowed.Prefixes = []string{"192.168.2.0/24", "192.169.2.0/24"} + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + ValidateNodesHaveRoutes(pods, ppV4[0].FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + ValidateNodesDoNotHaveRoutes(pods, ppV4[0].FRR, []string{"192.170.2.0/24"}...) + for _, p := range ppV4[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24", "192.170.2.0/24"}...) + } + }, + }), + ginkgo.Entry("IPV6 - VRF - receive ips from all", params{ + ipFamily: ipfamily.IPv6, + vrf: infra.VRFName, + myAsn: infra.FRRK8sASNVRF, + toAdvertiseV6: []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + for i := range ppV4 { + ppV4[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + for i := range ppV6 { + ppV6[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + for _, p := range ppV6 { + ValidateNodesHaveRoutes(pods, p.FRR, []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}...) + } + }, + }), + ginkgo.Entry("DUALSTACK - receive ips from all", params{ + ipFamily: ipfamily.DualStack, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24"}, + toAdvertiseV6: []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + for i := range ppV4 { + ppV4[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + for i := range ppV6 { + ppV6[i].Neigh.ToReceive.Allowed.Mode = frrk8sv1beta1.AllowAll + } + }, + + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + for _, p := range ppV4 { + ValidateNodesHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + } + for _, p := range ppV6 { + ValidateNodesHaveRoutes(pods, p.FRR, []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}...) + } + }, + }), + ginkgo.Entry("DUALSTACK - receive ips from some, explicit mode", params{ + ipFamily: ipfamily.DualStack, + vrf: "", + myAsn: infra.FRRK8sASN, + toAdvertiseV4: []string{"192.168.2.0/24", "192.169.2.0/24"}, + toAdvertiseV6: []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}, + modifyPeers: func(ppV4 []config.Peer, ppV6 []config.Peer) { + ppV4[0].Neigh.ToReceive.Allowed.Prefixes = []string{"192.169.2.0/24"} + ppV6[0].Neigh.ToReceive.Allowed.Prefixes = []string{"fc00:f853:ccd:e799::/64"} + }, + validate: func(ppV4 []config.Peer, ppV6 []config.Peer, pods []*v1.Pod) { + ValidateNodesHaveRoutes(pods, ppV4[0].FRR, []string{"192.169.2.0/24"}...) + ValidateNodesDoNotHaveRoutes(pods, ppV4[0].FRR, []string{"192.168.2.0/24"}...) + for _, p := range ppV4[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"192.168.2.0/24", "192.169.2.0/24"}...) + } + ValidateNodesHaveRoutes(pods, ppV6[0].FRR, []string{"fc00:f853:ccd:e799::/64"}...) + ValidateNodesDoNotHaveRoutes(pods, ppV6[0].FRR, []string{"fc00:f853:ccd:e800::/64"}...) + for _, p := range ppV6[1:] { + ValidateNodesDoNotHaveRoutes(pods, p.FRR, []string{"fc00:f853:ccd:e799::/64", "fc00:f853:ccd:e800::/64"}...) + } + }, + }), + ) + }) +}) diff --git a/e2etests/tests/validate.go b/e2etests/tests/validate.go index f41a9b1f..77eb17ff 100644 --- a/e2etests/tests/validate.go +++ b/e2etests/tests/validate.go @@ -69,3 +69,31 @@ func ValidateNeighborNoPrefixes(neigh frrcontainer.FRR, nodes []v1.Node, prefixe return nil }, 5*time.Second, time.Second).ShouldNot(HaveOccurred()) } + +func ValidateNodesHaveRoutes(pods []*v1.Pod, neigh frrcontainer.FRR, prefixes ...string) { + ginkgo.By(fmt.Sprintf("Checking routes %v from %s", prefixes, neigh.Name)) + Eventually(func() error { + for _, prefix := range prefixes { + for _, pod := range pods { + if !routes.PodHasPrefixFromContainer(pod, neigh, prefix) { + return fmt.Errorf("pod %s does not have prefix %s from %s", pod.Name, prefix, neigh.Name) + } + } + } + return nil + }, time.Minute, time.Second).ShouldNot(HaveOccurred()) +} + +func ValidateNodesDoNotHaveRoutes(pods []*v1.Pod, neigh frrcontainer.FRR, prefixes ...string) { + ginkgo.By(fmt.Sprintf("Checking routes %v not injected from %s", prefixes, neigh.Name)) + Consistently(func() error { + for _, prefix := range prefixes { + for _, pod := range pods { + if routes.PodHasPrefixFromContainer(pod, neigh, prefix) { + return fmt.Errorf("pod %s has prefix %s from %s", pod.Name, prefix, neigh.Name) + } + } + } + return nil + }, 5*time.Second, time.Second).ShouldNot(HaveOccurred()) +}