diff --git a/cicd/k8s-calico-ipvs3-ha/README b/cicd/k8s-calico-ipvs3-ha/README new file mode 100644 index 000000000..a82f92c4a --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/README @@ -0,0 +1,11 @@ +## Test Case Description + +This scenario will demonstrate LoxiLB in HA mode(clustering). The setup will have 2 LoxiLB nodes, K8s(1 Master Nodes & 2 Worker Nodes) cluster with Calico CNI in ipvs mode. LoxiLB will be running as external Service LB. Workloads will be spawned in all the cluster nodes. + +Client will be connected to the LoxiLB with L3 network. Client and LoxiLB will do eBGP peering where Cluster nodes and LoxiLB will do iBGP. LoxiLB will advertise the Service CIDR or VirtualIP to the client and cluster nodes. + +Service CIDR will also be a Virtual IP, different from the K8s cluster network. + +In scenarios where LoxiLB runs outside of the cluster in HA mode, it is advised to create LB services in fullnat mode for ease of connectivity. + +Please follow the link for detailed explanation about this scenario: https://www.loxilb.io/post/k8s-deploying-hitless-and-ha-load-balancing diff --git a/cicd/k8s-calico-ipvs3-ha/Vagrantfile b/cicd/k8s-calico-ipvs3-ha/Vagrantfile new file mode 100644 index 000000000..f2a5f620a --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/Vagrantfile @@ -0,0 +1,95 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require "yaml" +settings = YAML.load_file "yaml/settings.yaml" + +workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i + +Vagrant.configure("2") do |config| + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.box = settings["software"]["cluster"]["box"] + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "node_scripts/host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 1] + end + end + + (1..loxilbs).each do |node_number| + config.vm.define "llb#{node_number}" do |loxilb| + loxilb.vm.box = settings["software"]["loxilb"]["box"]["name"] + loxilb.vm.box_version = settings["software"]["loxilb"]["box"]["version"] + loxilb.vm.hostname = "llb#{node_number}" + ip = node_number + 251 + loxilb.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + loxilb.vm.network :private_network, ip: "192.168.90.#{ip}", :netmask => "255.255.255.0" + loxilb.vm.provision :shell, :path => "node_scripts/loxilb#{node_number}.sh" + loxilb.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end + + config.vm.define "master" do |master| + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' + master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" + master.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + master.vm.provision "shell", + env: { + "CALICO_VERSION" => settings["software"]["calico"], + "CONTROL_IP" => settings["network"]["control_ip"], + "POD_CIDR" => settings["network"]["pod_cidr"], + "SERVICE_CIDR" => settings["network"]["service_cidr"] + }, + path: "node_scripts/master.sh" + + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 200 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + worker.vm.provision "shell", path: "node_scripts/worker.sh" + + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end +end diff --git a/cicd/k8s-calico-ipvs3-ha/bird_config/bird.conf b/cicd/k8s-calico-ipvs3-ha/bird_config/bird.conf new file mode 100644 index 000000000..96b0bb578 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/bird_config/bird.conf @@ -0,0 +1,239 @@ +# This is a basic configuration file, which contains boilerplate options and +# some basic examples. It allows the BIRD daemon to start but will not cause +# anything else to happen. +# +# Please refer to the BIRD User's Guide documentation, which is also available +# online at http://bird.network.cz/ in HTML format, for more information on +# configuring BIRD and adding routing protocols. + +# Configure logging +#log syslog all; +log "/var/log/bird.log" { debug, trace, info, remote, warning, error, auth, fatal, bug }; + +# Set router ID. It is a unique identification of your router, usually one of +# IPv4 addresses of the router. It is recommended to configure it explicitly. +router id 192.168.90.9; + +# Turn on global debugging of all protocols (all messages or just selected classes) +# debug protocols all; +# debug protocols { events, states }; + +# Turn on internal watchdog +# watchdog warning 5 s; +# watchdog timeout 30 s; + +# You can define your own constants +# define my_asn = 65000; +# define my_addr = 198.51.100.1; + +# Tables master4 and master6 are defined by default +# ipv4 table master4; +# ipv6 table master6; + +# Define more tables, e.g. for policy routing or as MRIB +# ipv4 table mrib4; +# ipv6 table mrib6; + +# The Device protocol is not a real routing protocol. It does not generate any +# routes and it only serves as a module for getting information about network +# interfaces from the kernel. It is necessary in almost any configuration. +protocol device { +} + +# The direct protocol is not a real routing protocol. It automatically generates +# direct routes to all network interfaces. Can exist in as many instances as you +# wish if you want to populate multiple routing tables with direct routes. +protocol direct { + #disabled; # Disable by default + ipv4; # Connect to default IPv4 table + #ipv6; # ... and to default IPv6 table +} + +# The Kernel protocol is not a real routing protocol. Instead of communicating +# with other routers in the network, it performs synchronization of BIRD +# routing tables with the OS kernel. One instance per table. +protocol kernel { + ipv4 { # Connect protocol to IPv4 table by channel +# table master4; # Default IPv4 table is master4 +# import all; # Import to table, default is import all + export all; # Export to protocol. default is export none + }; +# learn; # Learn alien routes from the kernel +# kernel table 10; # Kernel table to synchronize with (default: main) + merge paths on; +} + +# Another instance for IPv6, skipping default options +protocol kernel { + ipv6 { export all; }; +} + +# Static routes (Again, there can be multiple instances, for different address +# families and to disable/enable various groups of static routes on the fly). +protocol static { + ipv4; # Again, IPv4 channel with default options + +# route 0.0.0.0/0 via 198.51.100.10; +# route 192.0.2.0/24 blackhole; +# route 10.0.0.0/8 unreachable; +# route 10.2.0.0/24 via "eth0"; +# # Static routes can be defined with optional attributes +# route 10.1.1.0/24 via 198.51.100.3 { rip_metric = 3; }; +# route 10.1.2.0/24 via 198.51.100.3 { ospf_metric1 = 100; }; +# route 10.1.3.0/24 via 198.51.100.4 { ospf_metric2 = 100; }; +} + +# Pipe protocol connects two routing tables. Beware of loops. +# protocol pipe { +# table master4; # No ipv4/ipv6 channel definition like in other protocols +# peer table mrib4; +# import all; # Direction peer table -> table +# export all; # Direction table -> peer table +# } + +# RIP example, both RIP and RIPng are supported +# protocol rip { +# ipv4 { +# # Export direct, static routes and ones from RIP itself +# import all; +# export where source ~ [ RTS_DEVICE, RTS_STATIC, RTS_RIP ]; +# }; +# interface "eth*" { +# update time 10; # Default period is 30 +# timeout time 60; # Default timeout is 180 +# authentication cryptographic; # No authentication by default +# password "hello" { algorithm hmac sha256; }; # Default is MD5 +# }; +# } + +# OSPF example, both OSPFv2 and OSPFv3 are supported +# protocol ospf v3 { +# ipv6 { +# import all; +# export where source = RTS_STATIC; +# }; +# area 0 { +# interface "eth*" { +# type broadcast; # Detected by default +# cost 10; # Interface metric +# hello 5; # Default hello perid 10 is too long +# }; +# interface "tun*" { +# type ptp; # PtP mode, avoids DR selection +# cost 100; # Interface metric +# hello 5; # Default hello perid 10 is too long +# }; +# interface "dummy0" { +# stub; # Stub interface, just propagate it +# }; +# }; +#} + +# Define simple filter as an example for BGP import filter +# See https://gitlab.labs.nic.cz/labs/bird/wikis/BGP_filtering for more examples +# filter rt_import +# { +# if bgp_path.first != 64496 then accept; +# if bgp_path.len > 64 then accept; +# if bgp_next_hop != from then accept; +# reject; +# } + +# BGP example, explicit name 'uplink1' is used instead of default 'bgp1' +# protocol bgp uplink1 { +# description "My BGP uplink"; +# local 198.51.100.1 as 65000; +# neighbor 198.51.100.10 as 64496; +# hold time 90; # Default is 240 +# password "secret"; # Password used for MD5 authentication +# +# ipv4 { # regular IPv4 unicast (1/1) +# import filter rt_import; +# export where source ~ [ RTS_STATIC, RTS_BGP ]; +# }; +# +# ipv6 { # regular IPv6 unicast (2/1) +# import filter rt_import; +# export filter { # The same as 'where' expression above +# if source ~ [ RTS_STATIC, RTS_BGP ] +# then accept; +# else reject; +# }; +# }; +# +# ipv4 multicast { # IPv4 multicast topology (1/2) +# table mrib4; # explicit IPv4 table +# import filter rt_import; +# export all; +# }; +# +# ipv6 multicast { # IPv6 multicast topology (2/2) +# table mrib6; # explicit IPv6 table +# import filter rt_import; +# export all; +# }; +#} + +# Template example. Using templates to define IBGP route reflector clients. +# template bgp rr_clients { +# local 10.0.0.1 as 65000; +# neighbor as 65000; +# rr client; +# rr cluster id 1.0.0.1; +# +# ipv4 { +# import all; +# export where source = RTS_BGP; +# }; +# +# ipv6 { +# import all; +# export where source = RTS_BGP; +# }; +# } +# +# protocol bgp client1 from rr_clients { +# neighbor 10.0.1.1; +# } +# +# protocol bgp client2 from rr_clients { +# neighbor 10.0.2.1; +# } +# +# protocol bgp client3 from rr_clients { +# neighbor 10.0.3.1; +# } +# + +protocol static my_routes { + ipv4; + route 30.30.30.0/24 via 192.168.90.9; +} + +filter export_my_routes { + if proto = "my_routes" then { + accept; + } + reject; +} + +protocol bgp llb1 { + local as 64512; + neighbor 192.168.90.252 as 64511; + + ipv4 { + import all; + export filter export_my_routes; + }; +} + +protocol bgp llb2 { + local as 64512; + neighbor 192.168.90.253 as 64511; + + ipv4 { + import all; + export filter export_my_routes; + }; +} + diff --git a/cicd/k8s-calico-ipvs3-ha/config.sh b/cicd/k8s-calico-ipvs3-ha/config.sh new file mode 100755 index 000000000..f58e470e7 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/config.sh @@ -0,0 +1,39 @@ +#!/bin/bash +VMs=$(vagrant global-status | grep -i virtualbox) +while IFS= read -a VMs; do + read -a vm <<< "$VMs" + cd ${vm[4]} 2>&1>/dev/null + echo "Destroying ${vm[1]}" + vagrant destroy -f ${vm[1]} + cd - 2>&1>/dev/null +done <<< "$VMs" + +vagrant up + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done + +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_default.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_default.yml' 2> /dev/null diff --git a/cicd/k8s-calico-ipvs3-ha/host_validation.sh b/cicd/k8s-calico-ipvs3-ha/host_validation.sh new file mode 100755 index 000000000..814bb14b4 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/host_validation.sh @@ -0,0 +1,43 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +code=0 +echo Service IP: $extIP + +numECMP=$(birdc show route | grep $extIP -A 3 | grep via | wc -l) + +birdc show route | grep $extIP -A 3 + +if [ $numECMP == "2" ]; then + echo "Host route [OK]" +else + echo "Host route [NOK]" +fi +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" + +# iperf client accessing fullnat service +stdbuf -oL nohup iperf -c 20.20.20.1 -p 56002 -t 100 -i 1 -b 100M &> iperff.out & + +# iperf client accessing default service +stdbuf -oL nohup iperf -c 20.20.20.1 -p 56003 -t 100 -i 1 -b 100M -B 30.30.30.1 &> iperfd.out & + +mkfifo sd1.pipe +mkfifo sd2.pipe + +sleep infinity > sd1.pipe & +sleep infinity > sd2.pipe & + +sleep 1 + +stdbuf -oL nohup sctp_darn -H 192.168.90.9 -h 20.20.20.1 -p 56004 -s -I < sd1.pipe &> sdf.out & +stdbuf -oL nohup sctp_darn -H 30.30.30.1 -h 20.20.20.1 -p 56005 -s -I < sd2.pipe &> sdd.out & + +sleep 2 +for((i=0;i<30;i++)) +do +echo "snd=100" >> sd1.pipe +echo "snd=100" >> sd2.pipe +sleep 1 +done diff --git a/cicd/k8s-calico-ipvs3-ha/host_validation2.sh b/cicd/k8s-calico-ipvs3-ha/host_validation2.sh new file mode 100755 index 000000000..11e870001 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/host_validation2.sh @@ -0,0 +1,57 @@ +#!/bin/bash +for((i=0;i<50;i++)) +do +echo "snd=100" >> sd1.pipe +echo "snd=100" >> sd2.pipe +sleep 1 +done + + +echo "stats" >> sd1.pipe +echo "stats" >> sd2.pipe + +echo "shutdown" >> sd1.pipe +echo "shutdown" >> sd2.pipe + +pkill iperf +pkill sctp_darn + +iperff_res=$(tail -n 1 iperff.out | xargs | cut -d ' ' -f 7) +iperfd_res=$(tail -n 1 iperff.out | xargs | cut -d ' ' -f 7) + +sdf_res1=$(grep -i "packets sent" sdf.out | xargs | cut -d ' ' -f 3) +sdf_res2=$(grep -i "packets rec" sdf.out | xargs | cut -d ' ' -f 3) + +sdd_res1=$(grep -i "packets sent" sdd.out | xargs | cut -d ' ' -f 3) +sdd_res2=$(grep -i "packets rec" sdd.out | xargs | cut -d ' ' -f 3) + +if [[ $iperff_res != 0 ]]; then + echo -e "K8s-calico-ipvs3-ha TCP\t\t(fullnat)\t[OK]" +else + echo -e "K8s-calico-ipvs3-ha TCP\t\t(fullnat)\t[FAILED]" + code=1 +fi + +if [[ $iperfd_res != 0 ]]; then + echo -e "K8s-calico-ipvs3-ha TCP\t\t(default\t[OK]" +else + echo -e "K8s-calico-ipvs3-ha TCP\t\t(default)\t[FAILED]" + code=1 +fi + +if [[ $sdf_res1 != 0 && $sdf_res2 != 0 && $sdf_res1 == $sdf_res2 ]]; then + echo -e "K8s-calico-ipvs3-ha SCTP\t(fullnat)\t[OK]" +else + echo -e "K8s-calico-ipvs3-ha SCTP\t(fullnat)\t[FAILED]" + code=1 +fi + +if [[ $sdd_res1 != 0 && $sdd_res2 != 0 && $sdd_res1 == $sdd_res2 ]]; then + echo -e "K8s-calico-ipvs3-ha SCTP\t(default)\t[OK]" +else + echo -e "K8s-calico-ipvs3-ha SCTP\t(default)\t[FAILED]" + code=1 +fi + +rm *.out *.pipe +exit $code diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh new file mode 100644 index 000000000..b8634194f --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/common.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Common setup for all servers (Control Plane and Nodes) + +set -euxo pipefail + +# Variable Declaration + +# DNS Setting +if [ ! -d /etc/systemd/resolved.conf.d ]; then + sudo mkdir /etc/systemd/resolved.conf.d/ +fi +cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true +sudo apt-get update -y +# Install CRI-O Runtime + +VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" + +# Create the .conf file to load the modules at bootup +cat <> /etc/default/crio << EOF +${ENVIRONMENT} +EOF +sudo systemctl daemon-reload +sudo systemctl enable crio --now + +echo "CRI runtime installed successfully" + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl +curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg + +echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list +sudo apt-get update -y +sudo apt-get install -y kubelet="$KUBERNETES_VERSION" kubectl="$KUBERNETES_VERSION" kubeadm="$KUBERNETES_VERSION" +sudo apt-get update -y +sudo apt-get install -y jq +sudo apt-get install -y ipvsadm + +local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" +cat > /etc/default/kubelet << EOF +KUBELET_EXTRA_ARGS=--node-ip=$local_ip +${ENVIRONMENT} +EOF diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/host.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/host.sh new file mode 100755 index 000000000..d7821be5a --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/host.sh @@ -0,0 +1,14 @@ +# Install Bird to work with k3s +sudo apt-get -y install bird2 lksctp-tools iperf + +sudo ip addr add 30.30.30.1/32 dev lo + +sleep 5 + +sudo cp -f /vagrant/bird_config/bird.conf /etc/bird/bird.conf +if [ ! -f /var/log/bird.log ]; then + sudo touch /var/log/bird.log +fi +sudo chown bird:bird /var/log/bird.log +sudo service bird restart +echo "Host is up" diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/loxilb1.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/loxilb1.sh new file mode 100644 index 000000000..cc999d846 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/loxilb1.sh @@ -0,0 +1,10 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest -b --cluster=192.168.80.253 --self=0 +#docker exec -dt loxilb /root/loxilb-io/loxilb/loxilb -b --cluster=192.168.80.253 --self=0 diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/loxilb2.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/loxilb2.sh new file mode 100644 index 000000000..522ff30a9 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/loxilb2.sh @@ -0,0 +1,11 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest -b --cluster=192.168.80.252 --self=1 +#docker exec -dt loxilb /root/loxilb-io/loxilb/loxilb -b --cluster=192.168.80.252 --self=1 + diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh new file mode 100644 index 000000000..8a4ee7477 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/master.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Setup for Control Plane (Master) servers + +set -euxo pipefail + +NODENAME=$(hostname -s) + +sudo kubeadm config images pull + +echo "Preflight Check Passed: Downloaded All Required Images" + +#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml + +mkdir -p "$HOME"/.kube +sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config +sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config + +# Save Configs to shared /Vagrant location + +# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. + +config_path="/vagrant/configs" + +if [ -d $config_path ]; then + rm -f $config_path/* +else + mkdir -p $config_path +fi + +cp -i /etc/kubernetes/admin.conf $config_path/config +touch $config_path/join.sh +chmod +x $config_path/join.sh + +kubeadm token create --print-join-command > $config_path/join.sh + +# Install Calico Network Plugin + +curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O + +kubectl apply -f calico.yaml + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +EOF + +# Install Metrics Server + +kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml + +# Install loxilb +kubectl apply -f /vagrant/yaml/kube-loxilb.yml +kubectl apply -f /vagrant/yaml/loxilb-peer.yml diff --git a/cicd/k8s-calico-ipvs3-ha/node_scripts/worker.sh b/cicd/k8s-calico-ipvs3-ha/node_scripts/worker.sh new file mode 100644 index 000000000..a5754170b --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/node_scripts/worker.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Setup for Node servers + +set -euxo pipefail + +config_path="/vagrant/configs" + +/bin/bash $config_path/join.sh -v + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +NODENAME=$(hostname -s) +kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker +EOF diff --git a/cicd/k8s-calico-ipvs3-ha/rmconfig.sh b/cicd/k8s-calico-ipvs3-ha/rmconfig.sh new file mode 100755 index 000000000..4c990e5e0 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/rmconfig.sh @@ -0,0 +1,7 @@ +#!/bin/bash +vagrant destroy -f worker2 +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f llb1 +vagrant destroy -f llb2 +vagrant destroy -f host diff --git a/cicd/k8s-calico-ipvs3-ha/validation.sh b/cicd/k8s-calico-ipvs3-ha/validation.sh new file mode 100755 index 000000000..0ddc714fe --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/validation.sh @@ -0,0 +1,118 @@ +#!/bin/bash +source ../common.sh +echo k8s-calico-ipvs3-ha + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +for((i=0; i<120; i++)) +do + extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-default") + read -a strarr <<< "$extLB" + len=${#strarr[*]} + if [[ $((len)) -lt 6 ]]; then + echo "Can't find tcp-lb service" + sleep 1 + continue + fi + if [[ ${strarr[3]} != *"none"* ]]; then + extIP="$(cut -d'-' -f2 <<<${strarr[3]})" + break + fi + echo "No external LB allocated" + sleep 1 +done + +## Any routing updates ?? +#sleep 30 + +echo Service IP : $extIP +echo $extIP > extIP +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nLB List" +echo -e "\n---- LLB1 ----" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo docker exec -it loxilb loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\n---- LLB2 ----" +vagrant ssh llb2 -c 'sudo docker exec -it loxilb loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nEP List" +echo -e "\n---- LLB1 ----" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo docker exec -it loxilb loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\n---- LLB2 ----" +vagrant ssh llb2 -c 'sudo docker exec -it loxilb loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nTEST RESULTS" +echo "******************************************************************************" + +master="llb1" +backup="llb2" + +state=$(curl -sX 'GET' 'http://192.168.80.252:11111/netlox/v1/config/cistate/all' -H 'accept: application/json') + +if [[ $state == *"BACKUP"* ]]; then + master="llb2" + backup="llb1" +fi + +echo -e "\n MASTER\t: $master" +echo -e " BACKUP\t: $backup\n" + +vagrant ssh host -c 'sudo /vagrant/host_validation.sh' 2> /dev/null + +count=1 +sync=0 +while [[ $count -le 5 ]] ; do +echo -e "\nStatus at MASTER:$master\n" +vagrant ssh $master -c "sudo docker exec -it loxilb loxicmd get ct | grep est" 2> /dev/null + +echo -e "\nStatus at BACKUP:$backup\n" +vagrant ssh $backup -c "sudo docker exec -it loxilb loxicmd get ct | grep est" 2> /dev/null + +nres1=$(curl -sX 'GET' 'http://192.168.80.252:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) +nres2=$(curl -sX 'GET' 'http://192.168.80.253:11111/netlox/v1/config/conntrack/all' -H 'accept: application/json' | grep -ow "\"conntrackState\":\"est\"" | wc -l) + +if [[ $nres1 == $nres2 ]]; then + echo -e "\nConnections sync successful!!!\n" + sync=1 + break; +fi +echo -e "\nConnections sync pending.. Let's wait a little more..\n" +count=$(( $count + 1 )) +sleep 2 +done + +if [[ $sync == 0 ]]; then + echo -e "\nConnection Sync failed\n" + vagrant ssh host -c 'sudo pkill iperf; sudo pkill sctp_darn; sudo rm -rf *.pipe *.out' + exit 1 +fi + +echo "Restarting MASTER:$master.." +vagrant ssh $master -c 'sudo docker restart loxilb' 2> /dev/null + +sleep 10 + +vagrant ssh host -c 'sudo /vagrant/host_validation2.sh' 2> /dev/null + +sudo rm extIP + +exit $code diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml new file mode 100644 index 000000000..d64338f4c --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml @@ -0,0 +1,135 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:debug + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 + - --externalCIDR=20.20.20.1/32 + #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + #- --monitor + - --setBGP=64511 + - --extBGPPeers=192.168.90.9:64512 + - --setRoles=0.0.0.0 + - --listenBGPPort=1791 #Mandatory to mention if running with Calico CNI + #- --monitor + #- --setBGP + - --setLBMode=2 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml new file mode 100644 index 000000000..31afe601c --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/kubeadm-config.yaml @@ -0,0 +1,69 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 192.168.80.250 + bindPort: 6443 +nodeRegistration: + imagePullPolicy: IfNotPresent + name: master + taints: null +--- +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +kind: ClusterConfiguration +apiServer: + timeoutForControlPlane: 4m0s + certSANs: + - 192.168.80.250 +controlPlaneEndpoint: 192.168.80.250:6443 +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kubernetesVersion: v1.27.5 +networking: + dnsDomain: cluster.local + podSubnet: 172.16.1.0/16 + serviceSubnet: 172.17.1.0/18 +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 5 +clusterCIDR: "" +configSyncPeriod: 15m0s +#featureGates: "SupportIPVSProxyMode=true" +mode: ipvs +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/loxilb-peer.yml b/cicd/k8s-calico-ipvs3-ha/yaml/loxilb-peer.yml new file mode 100644 index 000000000..023f04f21 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/loxilb-peer.yml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-peer + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-peer-app + template: + metadata: + name: loxilb-peer + labels: + app: loxilb-peer-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Make sure loxilb gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: "node-role.kubernetes.io/master" +# operator: DoesNotExist +# - key: "node-role.kubernetes.io/control-plane" +# operator: DoesNotExist + containers: + - name: loxilb-peer-app + image: "ghcr.io/loxilb-io/loxilb:latest" + command: [ "/root/loxilb-io/loxilb/loxilb", "--peer" ] + ports: + - containerPort: 11111 + - containerPort: 1791 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-peer-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-peer-app + ports: + - name: loxilb-peer-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-peer-bgp + port: 1791 + targetPort: 1791 + protocol: TCP + - name: loxilb-peer-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP + + diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/sctp_default.yml b/cicd/k8s-calico-ipvs3-ha/yaml/sctp_default.yml new file mode 100644 index 000000000..44de88b52 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/sctp_default.yml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-default + annotations: + loxilb.io/lbmode: "default" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-default-test + ports: + - port: 56005 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-default-test + labels: + what: sctp-default-test +spec: + containers: + - name: sctp-default-test + image: loxilbio/sctp-darn:latest + imagePullPolicy: Always + #command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 6 ; done"] + command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/sctp_fullnat.yml b/cicd/k8s-calico-ipvs3-ha/yaml/sctp_fullnat.yml new file mode 100644 index 000000000..87beb9d08 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/sctp_fullnat.yml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-fullnat + annotations: + loxilb.io/lbmode: "fullnat" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-fullnat-test + ports: + - port: 56004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-fullnat-test + labels: + what: sctp-fullnat-test +spec: + containers: + - name: sctp-fullnat-test + image: loxilbio/sctp-darn:latest + imagePullPolicy: Always + #command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 6 ; done"] + command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/settings.yaml b/cicd/k8s-calico-ipvs3-ha/yaml/settings.yaml new file mode 100644 index 000000000..e5b02a60b --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/settings.yaml @@ -0,0 +1,44 @@ +--- +# cluster_name is used to group the nodes in a folder within VirtualBox: +cluster_name: Kubernetes Cluster +# Uncomment to set environment variables for services such as crio and kubelet. +# For example, configure the cluster to pull images via a proxy. +# environment: | +# HTTP_PROXY=http://my-proxy:8000 +# HTTPS_PROXY=http://my-proxy:8000 +# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 +# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. +network: + iloxilb_ip: 192.168.80.253 + oloxilb_ip: 192.168.90.253 + # Worker IPs are simply incremented from the control IP. + control_ip: 192.168.80.250 + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + pod_cidr: 172.16.1.0/16 + service_cidr: 172.17.1.0/18 +nodes: + control: + cpu: 2 + memory: 4096 + workers: + count: 2 + cpu: 1 + memory: 2048 +# Mount additional shared folders from the host into each virtual machine. +# Note that the project directory is automatically mounted at /vagrant. +# shared_folders: +# - host_path: ../images +# vm_path: /vagrant/images +software: + loxilb: + box: + name: sysnet4admin/Ubuntu-k8s + version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + calico: 3.26.0 + # To skip the dashboard installation, set its version to an empty value or comment it out: + kubernetes: 1.27.1-00 + os: xUbuntu_22.04 diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/tcp_default.yml b/cicd/k8s-calico-ipvs3-ha/yaml/tcp_default.yml new file mode 100644 index 000000000..c3def4e05 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/tcp_default.yml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-default-test + ports: + - port: 56003 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-default-test + labels: + what: tcp-default-test +spec: + containers: + - name: tcp-default-test + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/tcp_fullnat.yml b/cicd/k8s-calico-ipvs3-ha/yaml/tcp_fullnat.yml new file mode 100644 index 000000000..d47eb27c3 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/tcp_fullnat.yml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-fullnat-test + ports: + - port: 56002 + targetPort: 5001 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-fullnat-test + labels: + what: tcp-fullnat-test +spec: + containers: + - name: tcp-fullnat-test + image: eyes852/ubuntu-iperf-test:0.5 + command: + - iperf + - "-s" + ports: + - containerPort: 5001 diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/udp_fullnat.yml b/cicd/k8s-calico-ipvs3-ha/yaml/udp_fullnat.yml new file mode 100644 index 000000000..833187e73 --- /dev/null +++ b/cicd/k8s-calico-ipvs3-ha/yaml/udp_fullnat.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-onearm-test + ports: + - port: 56003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-onearm-test + labels: + what: udp-onearm-test +spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-calico-ipvs3/configs/config b/cicd/k8s-calico-ipvs3/configs/config deleted file mode 100644 index 2b8856cc8..000000000 --- a/cicd/k8s-calico-ipvs3/configs/config +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1Ea3lNVEEzTVRJeU9Gb1hEVE16TURreE9EQTNNVEl5T0Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTC85CmRuVTRaRHZIdFFHR21yQi9DYnZJa0JrdUNRVFp3OXN4OEJLWWhTL0JQV2Jic3RldE56cnB4dkU1T2pQdU5iVDEKTXhadXZBbEhYOWRIWHZDMzVuSzRySkRYSVc2LytKT1c0TmtVeGEzUFBYUTBNOE9ZWmwxbmxiRk8yWnVWeCswdwp5SkU1WmRMdlV3aGtnb0xTM1N3bTBCVVVOOG1nTGV0NTRSVW44ZzhabVpCbVpRSzFGSE5zZ0s3UE1LQ2FGRjBGCmYrb0R0QWd0Z1pHTkxGRHpGNmdHaERBMmlKTkkwcFJRb0FUMUtVUzJ0MUdLNHNYYnpHdy9TV2ZyTkh6ZjZuVHUKR2lZUU1vWElzMVhqanlja1ZGTll1cnFrVEZWa0RVcklCclhGRTNvUDhLRldmOE1ITTJWUDdLMGVVM0hUK2VZcQp0ZzVwa0RCS0VlbXJtcmRpOHdjQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZBcmluMmhYZk1CTFdrdHc5YjNrTkw5NnpwQjBNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRGZFVHFHRHZJQTJNaHl4Vys0aAo5MmxNNVRVVUw0MndrV1VXQVdxUzVrRUxlTDZXS0lsTVo2TFlPdjVaZlhmbnZ2Tytxc2NpUDY3dXlpZzBXZjFWCjBDa2RGWXJINWhuZFVXRjFmMU94WThQclJoRStJWkIzMlNtR0RRVkc1M1pDUzQ2QWw0eVdSVFNZa3lmcVpKRk0KT2pZeUJFcGFYN2hQRTcyVzkwMzUrdUM5Q3NhbEs1T2dQaGZrT2wzMmRBL01KVFBXUHMvZ3hDeWtsR1IvVnFKOQpzNTNlYUpNaUdoSnlKV1ZXS1hQV1ZsTkNnTGZxREZidEV0cC9EcVlVTDRuWDYzK3BHOUk0bER0aU0xdlBmM2FMCjBiMzVJbCtpb1lqNUZtUktwdWJ5SWIrM0JKa2lrSVFyclExcUZhSFQzYm5xc0ROWTB6a3VlOHl6VWE0NzkxKysKVU9RPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - server: https://192.168.80.250:6443 - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: kubernetes-admin - name: kubernetes-admin@kubernetes -current-context: kubernetes-admin@kubernetes -kind: Config -preferences: {} -users: -- name: kubernetes-admin - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJTERZZTJxYkdkbFV3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBNU1qRXdOekV5TWpoYUZ3MHlOREE1TWpBd056RXlNekphTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTJMNXZBRHJwVXlXZnRsOUoKS29DRXlxdEJzWFF4enc0encvbHRPYWVFTkd0d0JQeEV6ZGZsczFWSHdGelhwTGx3UE9raGg2eEhiUkVYYmtVWgpUblplOXdBTTB1Wk96d21leVI2MDN0Z2x2Tzhuekdzb2JoYVplajVGeUVuaWlweHJ3cTE4SUFmOHRaRGM5QlZPCko4NnVkVUdiR08zTlZOa1psREs0ZVljcDFEU1F0QUpiNnBBS09CeGM5U3hWcGlBSWh4dDhrVW05RFp0WDdSNW4KUUs0azdSNTZXeWdDcmh6emFFc0lBb0tmVkdrbEYrY3ZjUVVJT1ZNckZ2N1k1c21JTUpzWU1SWUJEdjBvOGJiSwpPWUQxL2NDaXB3QnhmS2dNWWtKVXd4eWdDNnlOSWh4OURFUXU3YnoxNXRpK2ZSeXRUOHVFc1F0Q3VhUkNpQXBUClVhWDJBd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JRSzRwOW9WM3pBUzFwTGNQVzk1RFMvZXM2UQpkREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTTIvRnpSdGF0bVBRM0g1QU9yazQvd0g4SnVBY0xwbjVLa0t1CnVtSndHOE1XWG9EaGY1OG9xajU2a2Z5SHJtNXBuOTcyRTVGSUYvU3c3MGJQRG1oVU9QaEtlZm9aRTlHTTd3dGcKeUcwa0dUVGJBMlZYVG8yUXdnT1VLcEMyQng2ZHpHR1NVL28zRXNqUzFXajFWclFuMzRVMnNDcG5VVEZDZDY0MQpoRHIwU0JqaSt6QkpUZDIzMS9vZG4yYlhWQVh1Z0g3K25xcXRlVk1WUmhSUHJIZjF6T0pwMHVjUWkyaDcvS2l6CmE0aUxRbkVGK2NqaVd1dkQzZy82UUhoL3BITzhZRjdYUEVnczRFMW8vM3J1WWFRL3k5ODgyWXVMVXNLeDY0elcKZlJITUxWUHdwenZLYS9PYzRMVlAzYWZDS3dpMkNjNGY5bmFQVlh5N2hBNTE3OGxsY3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMkw1dkFEcnBVeVdmdGw5SktvQ0V5cXRCc1hReHp3NHp3L2x0T2FlRU5HdHdCUHhFCnpkZmxzMVZId0Z6WHBMbHdQT2toaDZ4SGJSRVhia1VaVG5aZTl3QU0wdVpPendtZXlSNjAzdGdsdk84bnpHc28KYmhhWmVqNUZ5RW5paXB4cndxMThJQWY4dFpEYzlCVk9KODZ1ZFVHYkdPM05WTmtabERLNGVZY3AxRFNRdEFKYgo2cEFLT0J4YzlTeFZwaUFJaHh0OGtVbTlEWnRYN1I1blFLNGs3UjU2V3lnQ3JoenphRXNJQW9LZlZHa2xGK2N2CmNRVUlPVk1yRnY3WTVzbUlNSnNZTVJZQkR2MG84YmJLT1lEMS9jQ2lwd0J4ZktnTVlrSlV3eHlnQzZ5TkloeDkKREVRdTdiejE1dGkrZlJ5dFQ4dUVzUXRDdWFSQ2lBcFRVYVgyQXdJREFRQUJBb0lCQUI5aHkwNmNPUVpKNzBzSApsdDlwaGxDVjVVZ2RTOHp3QUh6Q2s1Y0NjempueVd0ZlgxTHlqSmFmcFdqcnlHbFlWRER0akdaSmdubi9QSVpCCnlQMDhWRm10RHk1R0hHa0x1b3JpUUV2M3ZkY1NSWS9xZ0FzWWlKWlRoK1VORlpqK052Z1VDR0hXNWpjZngyNHIKMjRvVitkcU9hamQ4enZzWENBWVdKZkdxcVZySkVXR3RTTUd5V25YQlRNUTFhcy93K2FQK3VTSHRuNkpPR1Z3Twp1MGdXZndYL2c4REQvVWRLYUNUN2QxMDkrakxUamJWMUUvbG5tMy8wVUIyTkpCc2dUYllwT0pUZy9lcW0vN2NICnZCWVJyMHVsK2JPUURTVEpsKzVXc1lXc2JDVVBPSi84NVBDT1JjQkx6V2QvNlpsdWNMaHJhN3FvTUxXSml4dWYKRHorYUdjRUNnWUVBNVRrK2ZGL0hlcW1QSWEyMlBrR0YvTEdJNjlQWENvZGo3Q1UvZlYrcy9YeUJFUklxd1RJTgpXaUd0cFB1YkNGN25HY0FSZ1AxenltdXZGNHQ4N3RnV3JYbEcyRURtVE9ab0d1d1ZjL2tITG9uMU5iUy9TS01XCk9NUEpWdXV2R1hIUytlZE53UU1YeDZGU24ycVZSajNFU3UxTUwyYTVzRjY4NXI3ZXpjazZCT0VDZ1lFQThnLysKS1l1Q1J2dEJSZEJtVmRhNkc2dm9ZWUtJaU96WnJlMlZtb05DaFMxZW80WTgxOXNXakNkYmFpOGx3M080ZVFMRwo0cklyUTgzcWIwYWZ5ZklYSEhvNWNLSEZZdlpuZ1VZWTBuQ095UjJ5MXJBY0VtaDBsdVJiZXhENWhjNDdoQmJOCjJKNzYzTTFONGl3bjBHQ1U4bnMwdTduWnNpU3JnS2svOHF1bGMyTUNnWUJ6QVZxQmFraWFHVkpQc21rYUtTUEIKV1kxNXpJbzZwUjBVRU1nZTdnN01DMWh5S1N3OFRCRjQ1Q2JML05TV3lZNm5VNzgwZVhWTDNPVTNLZXZmcFprVApKaUxPUXhtSkFaeS96UzBpUjVJNzNrbWh6WEtmVGZ2TE5TZmlMVk5ucjcwTFZ1cWk2UnRZTkc5UXZsZndxdksrCktQcXRXWnZXaW1HNlg0OUhyaXNtNFFLQmdRRHJTOEo2N2VLSVR3bm03Q25kdk9tUnBtd3dCT3ZsZFo0dlpHZnQKRnpTZE1qbnhhTFpSK09TVS9sanRBY0dLbVQwelhCQ3FJaGpPUXlaa1hsbEt1Mm5kdzYxOEg3UTRlMTZubWRUZApFbzZwVHBZZC9IaDlLRTVVc3N0OStFVWszUkNiV0hObDR6dmxVQ2dwOTFRSVNTbmYvd1dHaWFUa2RxTERZVUlKCkNQSytFd0tCZ0ViNWRBZ05NUVBZM21tMmY4OUxqdi9rN04ycmd4Z0ljM04yejJkQXZQUERvcmpzSmpWSm81MTkKY0MvTXJ6cVNuNFo0cG5pSFB4UFRBeGl3TjI1MEplY2daWmFzS0d1WVd3T3A5cmdwMDJmYXhLQ0dCWkdySFRMUgptOE9nQy9rUlg2NUZSVEFmVGF2aGZicURiL2VleUdOdnlhZjRBbi9TLyt0dTBLN09ib0hTCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/cicd/k8s-calico-ipvs3/configs/join.sh b/cicd/k8s-calico-ipvs3/configs/join.sh deleted file mode 100755 index 811ef7888..000000000 --- a/cicd/k8s-calico-ipvs3/configs/join.sh +++ /dev/null @@ -1 +0,0 @@ -kubeadm join 192.168.80.250:6443 --token wcb2oj.gbbi7xe3tz3cldic --discovery-token-ca-cert-hash sha256:2477462be5ae294a594f5c948251bfc8b48f969cbed904f131bf46ee1fa4e13d