Skip to content

Commit

Permalink
Merge pull request #462 from nik-netlox/main
Browse files Browse the repository at this point in the history
K8s HA CICD case added
  • Loading branch information
UltraInstinct14 authored Dec 12, 2023
2 parents aadb73d + 34b5b8e commit 270345a
Show file tree
Hide file tree
Showing 25 changed files with 1,295 additions and 20 deletions.
11 changes: 11 additions & 0 deletions cicd/k8s-calico-ipvs3-ha/README
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
## Test Case Description

This scenario will demonstrate LoxiLB in HA mode(clustering). The setup will have 2 LoxiLB nodes, K8s(1 Master Nodes & 2 Worker Nodes) cluster with Calico CNI in ipvs mode. LoxiLB will be running as external Service LB. Workloads will be spawned in all the cluster nodes.

Client will be connected to the LoxiLB with L3 network. Client and LoxiLB will do eBGP peering where Cluster nodes and LoxiLB will do iBGP. LoxiLB will advertise the Service CIDR or VirtualIP to the client and cluster nodes.

Service CIDR will also be a Virtual IP, different from the K8s cluster network.

In scenarios where LoxiLB runs outside of the cluster in HA mode, it is advised to create LB services in fullnat mode for ease of connectivity.

Please follow the link for detailed explanation about this scenario: https://www.loxilb.io/post/k8s-deploying-hitless-and-ha-load-balancing
95 changes: 95 additions & 0 deletions cicd/k8s-calico-ipvs3-ha/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

require "yaml"
settings = YAML.load_file "yaml/settings.yaml"

workers = settings["nodes"]["workers"]["count"]
loxilbs = (ENV['LOXILBS'] || "2").to_i

Vagrant.configure("2") do |config|

if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end
config.vm.define "host" do |host|
host.vm.hostname = 'host1'
host.vm.box = settings["software"]["cluster"]["box"]
host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
host.vm.provision :shell, :path => "node_scripts/host.sh"
host.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 1]
end
end

(1..loxilbs).each do |node_number|
config.vm.define "llb#{node_number}" do |loxilb|
loxilb.vm.box = settings["software"]["loxilb"]["box"]["name"]
loxilb.vm.box_version = settings["software"]["loxilb"]["box"]["version"]
loxilb.vm.hostname = "llb#{node_number}"
ip = node_number + 251
loxilb.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
loxilb.vm.network :private_network, ip: "192.168.90.#{ip}", :netmask => "255.255.255.0"
loxilb.vm.provision :shell, :path => "node_scripts/loxilb#{node_number}.sh"
loxilb.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 4096]
vbox.customize ["modifyvm", :id, "--cpus", 2]
vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
end
end
end

config.vm.define "master" do |master|
master.vm.box = settings["software"]["cluster"]["box"]
master.vm.hostname = 'master'
master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0"
master.vm.provision "shell",
env: {
"DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
"ENVIRONMENT" => settings["environment"],
"KUBERNETES_VERSION" => settings["software"]["kubernetes"],
"OS" => settings["software"]["os"]
},
path: "node_scripts/common.sh"
master.vm.provision "shell",
env: {
"CALICO_VERSION" => settings["software"]["calico"],
"CONTROL_IP" => settings["network"]["control_ip"],
"POD_CIDR" => settings["network"]["pod_cidr"],
"SERVICE_CIDR" => settings["network"]["service_cidr"]
},
path: "node_scripts/master.sh"

master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 4096]
vbox.customize ["modifyvm", :id, "--cpus", 2]
vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
end
end

(1..workers).each do |node_number|
config.vm.define "worker#{node_number}" do |worker|
worker.vm.box = settings["software"]["cluster"]["box"]
worker.vm.hostname = "worker#{node_number}"
ip = node_number + 200
worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
worker.vm.provision "shell",
env: {
"DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
"ENVIRONMENT" => settings["environment"],
"KUBERNETES_VERSION" => settings["software"]["kubernetes"],
"OS" => settings["software"]["os"]
},
path: "node_scripts/common.sh"
worker.vm.provision "shell", path: "node_scripts/worker.sh"

worker.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 4096]
vbox.customize ["modifyvm", :id, "--cpus", 2]
vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
end
end
end
end
239 changes: 239 additions & 0 deletions cicd/k8s-calico-ipvs3-ha/bird_config/bird.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,239 @@
# This is a basic configuration file, which contains boilerplate options and
# some basic examples. It allows the BIRD daemon to start but will not cause
# anything else to happen.
#
# Please refer to the BIRD User's Guide documentation, which is also available
# online at http://bird.network.cz/ in HTML format, for more information on
# configuring BIRD and adding routing protocols.

# Configure logging
#log syslog all;
log "/var/log/bird.log" { debug, trace, info, remote, warning, error, auth, fatal, bug };

# Set router ID. It is a unique identification of your router, usually one of
# IPv4 addresses of the router. It is recommended to configure it explicitly.
router id 192.168.90.9;

# Turn on global debugging of all protocols (all messages or just selected classes)
# debug protocols all;
# debug protocols { events, states };

# Turn on internal watchdog
# watchdog warning 5 s;
# watchdog timeout 30 s;

# You can define your own constants
# define my_asn = 65000;
# define my_addr = 198.51.100.1;

# Tables master4 and master6 are defined by default
# ipv4 table master4;
# ipv6 table master6;

# Define more tables, e.g. for policy routing or as MRIB
# ipv4 table mrib4;
# ipv6 table mrib6;

# The Device protocol is not a real routing protocol. It does not generate any
# routes and it only serves as a module for getting information about network
# interfaces from the kernel. It is necessary in almost any configuration.
protocol device {
}

# The direct protocol is not a real routing protocol. It automatically generates
# direct routes to all network interfaces. Can exist in as many instances as you
# wish if you want to populate multiple routing tables with direct routes.
protocol direct {
#disabled; # Disable by default
ipv4; # Connect to default IPv4 table
#ipv6; # ... and to default IPv6 table
}

# The Kernel protocol is not a real routing protocol. Instead of communicating
# with other routers in the network, it performs synchronization of BIRD
# routing tables with the OS kernel. One instance per table.
protocol kernel {
ipv4 { # Connect protocol to IPv4 table by channel
# table master4; # Default IPv4 table is master4
# import all; # Import to table, default is import all
export all; # Export to protocol. default is export none
};
# learn; # Learn alien routes from the kernel
# kernel table 10; # Kernel table to synchronize with (default: main)
merge paths on;
}

# Another instance for IPv6, skipping default options
protocol kernel {
ipv6 { export all; };
}

# Static routes (Again, there can be multiple instances, for different address
# families and to disable/enable various groups of static routes on the fly).
protocol static {
ipv4; # Again, IPv4 channel with default options

# route 0.0.0.0/0 via 198.51.100.10;
# route 192.0.2.0/24 blackhole;
# route 10.0.0.0/8 unreachable;
# route 10.2.0.0/24 via "eth0";
# # Static routes can be defined with optional attributes
# route 10.1.1.0/24 via 198.51.100.3 { rip_metric = 3; };
# route 10.1.2.0/24 via 198.51.100.3 { ospf_metric1 = 100; };
# route 10.1.3.0/24 via 198.51.100.4 { ospf_metric2 = 100; };
}

# Pipe protocol connects two routing tables. Beware of loops.
# protocol pipe {
# table master4; # No ipv4/ipv6 channel definition like in other protocols
# peer table mrib4;
# import all; # Direction peer table -> table
# export all; # Direction table -> peer table
# }

# RIP example, both RIP and RIPng are supported
# protocol rip {
# ipv4 {
# # Export direct, static routes and ones from RIP itself
# import all;
# export where source ~ [ RTS_DEVICE, RTS_STATIC, RTS_RIP ];
# };
# interface "eth*" {
# update time 10; # Default period is 30
# timeout time 60; # Default timeout is 180
# authentication cryptographic; # No authentication by default
# password "hello" { algorithm hmac sha256; }; # Default is MD5
# };
# }

# OSPF example, both OSPFv2 and OSPFv3 are supported
# protocol ospf v3 {
# ipv6 {
# import all;
# export where source = RTS_STATIC;
# };
# area 0 {
# interface "eth*" {
# type broadcast; # Detected by default
# cost 10; # Interface metric
# hello 5; # Default hello perid 10 is too long
# };
# interface "tun*" {
# type ptp; # PtP mode, avoids DR selection
# cost 100; # Interface metric
# hello 5; # Default hello perid 10 is too long
# };
# interface "dummy0" {
# stub; # Stub interface, just propagate it
# };
# };
#}

# Define simple filter as an example for BGP import filter
# See https://gitlab.labs.nic.cz/labs/bird/wikis/BGP_filtering for more examples
# filter rt_import
# {
# if bgp_path.first != 64496 then accept;
# if bgp_path.len > 64 then accept;
# if bgp_next_hop != from then accept;
# reject;
# }

# BGP example, explicit name 'uplink1' is used instead of default 'bgp1'
# protocol bgp uplink1 {
# description "My BGP uplink";
# local 198.51.100.1 as 65000;
# neighbor 198.51.100.10 as 64496;
# hold time 90; # Default is 240
# password "secret"; # Password used for MD5 authentication
#
# ipv4 { # regular IPv4 unicast (1/1)
# import filter rt_import;
# export where source ~ [ RTS_STATIC, RTS_BGP ];
# };
#
# ipv6 { # regular IPv6 unicast (2/1)
# import filter rt_import;
# export filter { # The same as 'where' expression above
# if source ~ [ RTS_STATIC, RTS_BGP ]
# then accept;
# else reject;
# };
# };
#
# ipv4 multicast { # IPv4 multicast topology (1/2)
# table mrib4; # explicit IPv4 table
# import filter rt_import;
# export all;
# };
#
# ipv6 multicast { # IPv6 multicast topology (2/2)
# table mrib6; # explicit IPv6 table
# import filter rt_import;
# export all;
# };
#}

# Template example. Using templates to define IBGP route reflector clients.
# template bgp rr_clients {
# local 10.0.0.1 as 65000;
# neighbor as 65000;
# rr client;
# rr cluster id 1.0.0.1;
#
# ipv4 {
# import all;
# export where source = RTS_BGP;
# };
#
# ipv6 {
# import all;
# export where source = RTS_BGP;
# };
# }
#
# protocol bgp client1 from rr_clients {
# neighbor 10.0.1.1;
# }
#
# protocol bgp client2 from rr_clients {
# neighbor 10.0.2.1;
# }
#
# protocol bgp client3 from rr_clients {
# neighbor 10.0.3.1;
# }
#

protocol static my_routes {
ipv4;
route 30.30.30.0/24 via 192.168.90.9;
}

filter export_my_routes {
if proto = "my_routes" then {
accept;
}
reject;
}

protocol bgp llb1 {
local as 64512;
neighbor 192.168.90.252 as 64511;

ipv4 {
import all;
export filter export_my_routes;
};
}

protocol bgp llb2 {
local as 64512;
neighbor 192.168.90.253 as 64511;

ipv4 {
import all;
export filter export_my_routes;
};
}

39 changes: 39 additions & 0 deletions cicd/k8s-calico-ipvs3-ha/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#!/bin/bash
VMs=$(vagrant global-status | grep -i virtualbox)
while IFS= read -a VMs; do
read -a vm <<< "$VMs"
cd ${vm[4]} 2>&1>/dev/null
echo "Destroying ${vm[1]}"
vagrant destroy -f ${vm[1]}
cd - 2>&1>/dev/null
done <<< "$VMs"

vagrant up

for((i=1; i<=60; i++))
do
fin=1
pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE")

while IFS= read -a pods; do
read -a pod <<< "$pods"
if [[ ${pod[3]} != *"Running"* ]]; then
echo "${pod[1]} is not UP yet"
fin=0
fi
done <<< "$pods"
if [ $fin == 1 ];
then
break;
fi
echo "Will try after 10s"
sleep 10
done

sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1

#Create fullnat Service
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_default.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_default.yml' 2> /dev/null
Loading

0 comments on commit 270345a

Please sign in to comment.