From 59148016b94ef64d7be857d82ea68b7bcef71d3b Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Fri, 20 Sep 2024 08:24:28 +0900 Subject: [PATCH] gh-87 updated manifests and added scenario of k8s flannel incluster --- .../k8s-flannel-incluster-multus.yml | 36 +++ cicd/docker-k0s-lb/kube-loxilb.yml | 1 + cicd/docker-k3s-calico/kube-loxilb.yml | 1 + cicd/docker-k3s-cilium/kube-loxilb.yml | 1 + cicd/eks/kube-loxilb.yaml | 1 + cicd/k0s-incluster/kube-loxilb.yml | 1 + cicd/k0s-weave/kube-loxilb.yml | 1 + cicd/k3s-base-sanity/kube-loxilb.yml | 1 + cicd/k3s-calico-dual-stack/kube-loxilb.yml | 1 + cicd/k3s-calico-incluster/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-calico/kube-loxilb.yml | 1 + cicd/k3s-cilium-cluster/kube-loxilb.yml | 1 + cicd/k3s-cilium/kube-loxilb.yml | 1 + cicd/k3s-ext-ep/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-flannel-cluster/kube-loxilb.yml | 1 + cicd/k3s-flannel-incluster-l2/kube-loxilb.yml | 1 + cicd/k3s-flannel-incluster/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-flannel-multus/kube-loxilb.yml | 1 + cicd/k3s-flannel/kube-loxilb.yml | 1 + cicd/k3s-incluster/kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + .../kube-loxilb.yml | 1 + cicd/k3s-rabbitmq-incluster/kube-loxilb.yml | 1 + .../manifests/kube-loxilb.yml | 1 + cicd/k3s-sctpmh-2/kube-loxilb.yml | 1 + cicd/k3s-sctpmh-seagull/kube-loxilb.yml | 1 + cicd/k3s-sctpmh/kube-loxilb.yml | 1 + .../yaml/kube-loxilb.yaml | 1 + cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml | 1 + .../yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml | 1 + cicd/k8s-calico/yaml/kube-loxilb.yml | 1 + cicd/k8s-flannel-incluster-multus/Vagrantfile | 81 ++++++ cicd/k8s-flannel-incluster-multus/config.sh | 67 +++++ .../multus/multus-daemonset.yml | 236 ++++++++++++++++++ .../multus/multus-macvlan.yml | 27 ++ .../multus/multus-pod-02.yml | 22 ++ .../multus/multus-pod.yml | 14 ++ .../multus/multus-service.yml | 16 ++ .../multus/multus-vlan.yml | 21 ++ .../node_scripts/common.sh | 93 +++++++ .../node_scripts/host.sh | 8 + .../node_scripts/loxilb.sh | 9 + .../node_scripts/master.sh | 69 +++++ .../node_scripts/worker.sh | 34 +++ cicd/k8s-flannel-incluster-multus/rmconfig.sh | 5 + .../validation.sh | 30 +++ .../yaml/kube-flannel.yml | 210 ++++++++++++++++ .../yaml/kube-loxilb.yaml | 186 ++++++++++++++ .../yaml/kubeadm-config.yaml | 70 ++++++ .../yaml/loxilb-localvip.yaml | 110 ++++++++ .../yaml/loxilb.yaml | 71 ++++++ .../yaml/sctp_fullnat.yml | 44 ++++ .../yaml/sctp_onearm.yml | 41 +++ .../yaml/settings.yaml | 45 ++++ .../yaml/tcp_fullnat.yml | 29 +++ .../yaml/tcp_onearm.yml | 30 +++ .../yaml/udp_fullnat.yml | 30 +++ .../yaml/udp_onearm.yml | 30 +++ cicd/k8s-nat64/kube-loxilb.yaml | 1 + cicd/microk8s-incluster/kube-loxilb.yml | 1 + 67 files changed, 1703 insertions(+) create mode 100644 .github/workflows/k8s-flannel-incluster-multus.yml create mode 100644 cicd/k8s-flannel-incluster-multus/Vagrantfile create mode 100755 cicd/k8s-flannel-incluster-multus/config.sh create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-service.yml create mode 100644 cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/common.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/host.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/master.sh create mode 100755 cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh create mode 100755 cicd/k8s-flannel-incluster-multus/rmconfig.sh create mode 100755 cicd/k8s-flannel-incluster-multus/validation.sh create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/settings.yaml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml create mode 100644 cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml diff --git a/.github/workflows/k8s-flannel-incluster-multus.yml b/.github/workflows/k8s-flannel-incluster-multus.yml new file mode 100644 index 000000000..154e800c0 --- /dev/null +++ b/.github/workflows/k8s-flannel-incluster-multus.yml @@ -0,0 +1,36 @@ +name: K8s-Flannel-Incluster-Multus-Sanity-CI +on: + # schedule: + # Runs "At 11:00 UTC every day-of-week" + #- cron: '0 11 * * *' + workflow_dispatch: + inputs: + testName: + description: 'Test Run-Name' + required: true + default: 'k8s-flannel-incluster-multus' +jobs: + test-runner: + name: k8s-flannel-incluster-multus-sanity + runs-on: [self-hosted, large] + if: github.repository == 'loxilb-io/loxilb' + && github.event.inputs.tagName == '' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run the test + run: | + cd cicd/k8s-flannel-incluster-multus + ./config.sh + ./validation.sh + cd - + + - name: Clean test-bed + if: success() || failure() + run: | + cd cicd/k8s-flannel-incluster-multus || true + ./rmconfig.sh + cd - diff --git a/cicd/docker-k0s-lb/kube-loxilb.yml b/cicd/docker-k0s-lb/kube-loxilb.yml index ae9d4e678..a225785cc 100644 --- a/cicd/docker-k0s-lb/kube-loxilb.yml +++ b/cicd/docker-k0s-lb/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/docker-k3s-calico/kube-loxilb.yml b/cicd/docker-k3s-calico/kube-loxilb.yml index e49c3aec7..4fddc08f4 100644 --- a/cicd/docker-k3s-calico/kube-loxilb.yml +++ b/cicd/docker-k3s-calico/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/docker-k3s-cilium/kube-loxilb.yml b/cicd/docker-k3s-cilium/kube-loxilb.yml index e49c3aec7..4fddc08f4 100644 --- a/cicd/docker-k3s-cilium/kube-loxilb.yml +++ b/cicd/docker-k3s-cilium/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/eks/kube-loxilb.yaml b/cicd/eks/kube-loxilb.yaml index 3c60aed6c..92c5a674e 100644 --- a/cicd/eks/kube-loxilb.yaml +++ b/cicd/eks/kube-loxilb.yaml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k0s-incluster/kube-loxilb.yml b/cicd/k0s-incluster/kube-loxilb.yml index ef86b0f16..99732f870 100644 --- a/cicd/k0s-incluster/kube-loxilb.yml +++ b/cicd/k0s-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k0s-weave/kube-loxilb.yml b/cicd/k0s-weave/kube-loxilb.yml index 4a0b9744f..e01934215 100644 --- a/cicd/k0s-weave/kube-loxilb.yml +++ b/cicd/k0s-weave/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-base-sanity/kube-loxilb.yml b/cicd/k3s-base-sanity/kube-loxilb.yml index 83b695f7b..d74551814 100644 --- a/cicd/k3s-base-sanity/kube-loxilb.yml +++ b/cicd/k3s-base-sanity/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico-dual-stack/kube-loxilb.yml b/cicd/k3s-calico-dual-stack/kube-loxilb.yml index b9fe885c9..6fdc7a51d 100644 --- a/cicd/k3s-calico-dual-stack/kube-loxilb.yml +++ b/cicd/k3s-calico-dual-stack/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico-incluster/kube-loxilb.yml b/cicd/k3s-calico-incluster/kube-loxilb.yml index ed875f3b1..2c9d2dd36 100644 --- a/cicd/k3s-calico-incluster/kube-loxilb.yml +++ b/cicd/k3s-calico-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml b/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml index bec0145d5..efb2e4580 100644 --- a/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml +++ b/cicd/k3s-calico-single-node-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-calico/kube-loxilb.yml b/cicd/k3s-calico/kube-loxilb.yml index 4a0b9744f..e01934215 100644 --- a/cicd/k3s-calico/kube-loxilb.yml +++ b/cicd/k3s-calico/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-cilium-cluster/kube-loxilb.yml b/cicd/k3s-cilium-cluster/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k3s-cilium-cluster/kube-loxilb.yml +++ b/cicd/k3s-cilium-cluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-cilium/kube-loxilb.yml b/cicd/k3s-cilium/kube-loxilb.yml index 4a0b9744f..e01934215 100644 --- a/cicd/k3s-cilium/kube-loxilb.yml +++ b/cicd/k3s-cilium/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-ext-ep/kube-loxilb.yml b/cicd/k3s-ext-ep/kube-loxilb.yml index 85670b3a6..08278d69c 100644 --- a/cicd/k3s-ext-ep/kube-loxilb.yml +++ b/cicd/k3s-ext-ep/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml b/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml index 2e36bf345..f5bda7eea 100644 --- a/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml +++ b/cicd/k3s-flannel-cluster-ipvs-compat/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-cluster/kube-loxilb.yml b/cicd/k3s-flannel-cluster/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k3s-flannel-cluster/kube-loxilb.yml +++ b/cicd/k3s-flannel-cluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml index 415d2c8a5..b7dc4df2c 100644 --- a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml +++ b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-incluster/kube-loxilb.yml b/cicd/k3s-flannel-incluster/kube-loxilb.yml index b18da9f0b..8f57b2e55 100644 --- a/cicd/k3s-flannel-incluster/kube-loxilb.yml +++ b/cicd/k3s-flannel-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml index 1b2f760f8..e9dd3f9f5 100644 --- a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml +++ b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel-multus/kube-loxilb.yml b/cicd/k3s-flannel-multus/kube-loxilb.yml index b1bf14968..51a9b489e 100644 --- a/cicd/k3s-flannel-multus/kube-loxilb.yml +++ b/cicd/k3s-flannel-multus/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-flannel/kube-loxilb.yml b/cicd/k3s-flannel/kube-loxilb.yml index 83b695f7b..d74551814 100644 --- a/cicd/k3s-flannel/kube-loxilb.yml +++ b/cicd/k3s-flannel/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-incluster/kube-loxilb.yml b/cicd/k3s-incluster/kube-loxilb.yml index ef86b0f16..99732f870 100644 --- a/cicd/k3s-incluster/kube-loxilb.yml +++ b/cicd/k3s-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml b/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml index f57f3c44b..401225522 100644 --- a/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml +++ b/cicd/k3s-multi-master-service-proxy-calico/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml b/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml index f57f3c44b..401225522 100644 --- a/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml +++ b/cicd/k3s-multi-master-service-proxy/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml index 414e820b3..2149b4bc7 100644 --- a/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml index 2e36bf345..f5bda7eea 100644 --- a/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml +++ b/cicd/k3s-rabbitmq-incluster/manifests/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-sctpmh-2/kube-loxilb.yml b/cicd/k3s-sctpmh-2/kube-loxilb.yml index f727d1a2a..421ca81ad 100644 --- a/cicd/k3s-sctpmh-2/kube-loxilb.yml +++ b/cicd/k3s-sctpmh-2/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-sctpmh-seagull/kube-loxilb.yml b/cicd/k3s-sctpmh-seagull/kube-loxilb.yml index f06f18e0b..137b6dcbe 100644 --- a/cicd/k3s-sctpmh-seagull/kube-loxilb.yml +++ b/cicd/k3s-sctpmh-seagull/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k3s-sctpmh/kube-loxilb.yml b/cicd/k3s-sctpmh/kube-loxilb.yml index 2d407e76f..77a5d683c 100644 --- a/cicd/k3s-sctpmh/kube-loxilb.yml +++ b/cicd/k3s-sctpmh/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml index b70b66ffb..1ca024293 100644 --- a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml +++ b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yaml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml index f9da5df2f..72e1d1ce7 100644 --- a/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml index c14902abb..4285bde5c 100644 --- a/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs2-ha-ka-sync/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml index 4155acc91..ba0c400a0 100644 --- a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml index 54bdcb89f..8f7623ab0 100644 --- a/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs3-ha/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml index 21933422e..bf600de94 100644 --- a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml b/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico-ubuntu22/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-calico/yaml/kube-loxilb.yml b/cicd/k8s-calico/yaml/kube-loxilb.yml index 8dd66ed3e..00879295c 100644 --- a/cicd/k8s-calico/yaml/kube-loxilb.yml +++ b/cicd/k8s-calico/yaml/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/k8s-flannel-incluster-multus/Vagrantfile b/cicd/k8s-flannel-incluster-multus/Vagrantfile new file mode 100644 index 000000000..1c0c4d065 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/Vagrantfile @@ -0,0 +1,81 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require "yaml" +settings = YAML.load_file "yaml/settings.yaml" + +workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i + +Vagrant.configure("2") do |config| + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.box = settings["software"]["cluster"]["box"] + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "124.124.124.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "node_scripts/host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + config.vm.define "master" do |master| + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' + master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "124.124.124.10", :netmask => "255.255.255.0" + master.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + master.vm.provision "shell", + env: { + "CALICO_VERSION" => settings["software"]["calico"], + "CONTROL_IP" => settings["network"]["control_ip"], + "POD_CIDR" => settings["network"]["pod_cidr"], + "SERVICE_CIDR" => settings["network"]["service_cidr"] + }, + path: "node_scripts/master.sh" + + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 200 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.network :private_network, ip: "124.124.124.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + worker.vm.provision "shell", path: "node_scripts/worker.sh" + + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end +end diff --git a/cicd/k8s-flannel-incluster-multus/config.sh b/cicd/k8s-flannel-incluster-multus/config.sh new file mode 100755 index 000000000..5970158c4 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/config.sh @@ -0,0 +1,67 @@ +#!/bin/bash +VMs=$(vagrant global-status | grep -i virtualbox) +while IFS= read -a VMs; do + read -a vm <<< "$VMs" + cd ${vm[4]} 2>&1>/dev/null + echo "Destroying ${vm[1]}" + vagrant destroy -f ${vm[1]} + cd - 2>&1>/dev/null +done <<< "$VMs" + +vagrant up + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done + +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_onearm.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_onearm.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_onearm.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null +#vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + echo "Cluster is ready" + break; + fi + echo "Will try after 10s" + sleep 10 +done + +if [[ $fin == 0 ]]; then + echo "Cluster is not ready" + exit 1 +fi diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml new file mode 100644 index 000000000..40fa51932 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-daemonset.yml @@ -0,0 +1,236 @@ +# Note: +# This deployment file is designed for 'quickstart' of multus, easy installation to test it, +# hence this deployment yaml does not care about following things intentionally. +# - various configuration options +# - minor deployment scenario +# - upgrade/update/uninstall scenario +# Multus team understand users deployment scenarios are diverse, hence we do not cover +# comprehensive deployment scenario. We expect that it is covered by each platform deployment. +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoExecute + serviceAccountName: multus + containers: + - name: kube-multus + image: ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot + command: ["/thin_entrypoint"] + args: + - "--multus-conf-file=auto" + - "--multus-autoconfig-dir=/host/etc/cni/net.d" + - "--cni-conf-dir=/host/etc/cni/net.d" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + initContainers: + - name: install-multus-binary + image: ghcr.io/k8snetworkplumbingwg/multus-cni:snapshot + command: ["/install_multus"] + args: + - "--type" + - "thin" + resources: + requests: + cpu: "10m" + memory: "15Mi" + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cnibin + mountPath: /host/opt/cni/bin + mountPropagation: Bidirectional + terminationGracePeriodSeconds: 10 + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml new file mode 100644 index 000000000..4b14d1789 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-macvlan.yml @@ -0,0 +1,27 @@ +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: macvlan1 +spec: + config: '{ + "cniVersion": "0.3.1", + "type": "macvlan", + "master": "eth1", + "mode": "bridge", + "ipam": { + "type": "host-local", + "ranges": [ + [ { + "subnet": "4.0.6.0/24", + "rangeStart": "4.0.6.3", + "rangeEnd": "4.0.6.100", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "gateway": "4.0.6.149" + } ] + ] + } + }' diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml new file mode 100644 index 000000000..aa0754a77 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-pod-02.yml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-02 + labels: + app: pod-02 + #annotations: + # k8s.v1.cni.cncf.io/networks: vlan5 +spec: + containers: + - name: nginx + image: ghcr.io/nicolaka/netshoot:latest + command: + - sleep + - "infinity" + ports: + - containerPort: 80 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml new file mode 100644 index 000000000..32dfd15c7 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-pod.yml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-01 + labels: + app: pod-01 + annotations: + k8s.v1.cni.cncf.io/networks: vlan5 +spec: + containers: + - name: nginx + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-service.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-service.yml new file mode 100644 index 000000000..17038e336 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-service.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: multus-service + annotations: + loxilb.io/multus-nets: vlan5 + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + app: pod-01 + ports: + - port: 55002 + targetPort: 80 + type: LoadBalancer diff --git a/cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml b/cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml new file mode 100644 index 000000000..ccedf60ee --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/multus/multus-vlan.yml @@ -0,0 +1,21 @@ +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: vlan5 +spec: + config: '{ + "name": "vlan5-net", + "cniVersion": "0.3.1", + "type": "vlan", + "master": "eth2", + "mtu": 1450, + "vlanId": 5, + "linkInContainer": false, + "ipam": { + "type": "whereabouts", + "range": "123.123.123.192/28" + }, + "dns": { + "nameservers": [ "8.8.8.8" ] + } + }' diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh new file mode 100755 index 000000000..c01ad688f --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/common.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Common setup for all servers (Control Plane and Nodes) + +set -euxo pipefail + +# Variable Declaration + +# DNS Setting +if [ ! -d /etc/systemd/resolved.conf.d ]; then + sudo mkdir /etc/systemd/resolved.conf.d/ +fi +cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true +sudo apt-get update -y +# Install CRI-O Runtime + +VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" +CRIO_VERSION=1.27 +# Create the .conf file to load the modules at bootup +cat <> /etc/default/crio << EOF +${ENVIRONMENT} +EOF +sudo systemctl daemon-reload +sudo systemctl enable crio --now + +echo "CRI runtime installed successfully" + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl gpg +curl -fsSL https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v'$VERSION'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list + +sudo apt-get update -y +sudo apt-get install -y kubelet kubectl kubeadm +sudo apt-get update -y +sudo apt-get install -y jq +sudo apt-get install -y ipvsadm + +local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" +cat > /etc/default/kubelet << EOF +KUBELET_EXTRA_ARGS=--node-ip=$local_ip +${ENVIRONMENT} +EOF diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/host.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/host.sh new file mode 100755 index 000000000..9eb9c8efb --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/host.sh @@ -0,0 +1,8 @@ +# Setup the bastion host +sudo apt-get update +sudo apt-get -y install socat lksctp-tools +sudo ip link add link eth2 name eth2.5 type vlan id 5 +sudo ip addr add 123.123.123.206/24 dev eth2.5 +sudo ip link set eth2.5 up + +echo "Host is up" diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh new file mode 100755 index 000000000..6df67208f --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/loxilb.sh @@ -0,0 +1,9 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh new file mode 100755 index 000000000..43d431e53 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/master.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# +# Setup for Control Plane (Master) servers + +set -euxo pipefail + +NODENAME=$(hostname -s) + +sudo sed -i 's#10.85.0.0/16#10.244.0.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist + +sudo kubeadm config images pull + +echo "Preflight Check Passed: Downloaded All Required Images" + +#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml + +mkdir -p "$HOME"/.kube +sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config +sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config + +# Save Configs to shared /Vagrant location + +# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. + +config_path="/vagrant/configs" + +if [ -d $config_path ]; then + rm -f $config_path/* +else + mkdir -p $config_path +fi + +cp -i /etc/kubernetes/admin.conf $config_path/config +touch $config_path/join.sh +chmod +x $config_path/join.sh + +kubeadm token create --print-join-command > $config_path/join.sh + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +EOF + +# Install Flannel Network Plugin +kubectl apply -f /vagrant/yaml/kube-flannel.yml + +# Install loxilb checksum module +#curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - + +# Install whereabouts +git clone https://github.com/k8snetworkplumbingwg/whereabouts && cd whereabouts +kubectl apply \ + -f doc/crds/daemonset-install.yaml \ + -f doc/crds/whereabouts.cni.cncf.io_ippools.yaml \ + -f doc/crds/whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml && cd - + +# Install multus +kubectl apply -f /vagrant/multus/multus-daemonset.yml + +# Wait for pods to be ready +kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=kube-flannel --timeout=240s >> /dev/null 2>&1 || true +kubectl apply -f /vagrant/multus/multus-vlan.yml +sleep 60 +kubectl apply -f /vagrant/yaml/loxilb.yaml diff --git a/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh new file mode 100755 index 000000000..0fd5eaee9 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/node_scripts/worker.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Setup for Node servers + +set -euxo pipefail + +if [[ $(hostname -s) == "worker1" ]]; then + sudo sed -i 's#10.85.0.0/16#10.244.1.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist +else + sudo sed -i 's#10.85.0.0/16#10.244.2.0/24#g' /etc/cni/net.d/100-crio-bridge.conflist +fi + +config_path="/vagrant/configs" + +/bin/bash $config_path/join.sh -v + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +NODENAME=$(hostname -s) +kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker +kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=240s >> /dev/null 2>&1 || true +kubectl wait pod --all --for=condition=Ready --namespace=kube-flannel --timeout=240s >> /dev/null 2>&1 || true +kubectl apply -f /vagrant/yaml/kube-loxilb.yaml +kubectl apply -f /vagrant/multus/multus-pod.yml +sleep 60 +kubectl apply -f /vagrant/multus/multus-service.yml + +EOF + +#curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - diff --git a/cicd/k8s-flannel-incluster-multus/rmconfig.sh b/cicd/k8s-flannel-incluster-multus/rmconfig.sh new file mode 100755 index 000000000..1eb0df750 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/rmconfig.sh @@ -0,0 +1,5 @@ +#!/bin/bash +vagrant destroy -f worker2 +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f host diff --git a/cicd/k8s-flannel-incluster-multus/validation.sh b/cicd/k8s-flannel-incluster-multus/validation.sh new file mode 100755 index 000000000..0db8906ee --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/validation.sh @@ -0,0 +1,30 @@ +#!/bin/bash +source ../common.sh +echo k8s-flannel-incluster + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null + +out=$(vagrant ssh host -c "curl -s --connect-timeout 10 http://123.123.123.205:55002" 2> /dev/null) +#echo $out +if [[ ${out} == *"nginx"* ]]; then + echo -e "k8s-flannel-incluster TCP\t[OK]" +else + echo -e "k8s-flannel-incluster TCP\t[FAILED]" + code=1 +fi + +exit $code diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml b/cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml new file mode 100644 index 000000000..aaf3d7404 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/kube-flannel.yml @@ -0,0 +1,210 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: kube-flannel + labels: + k8s-app: flannel + pod-security.kubernetes.io/enforce: privileged +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-flannel +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: flannel + name: flannel + namespace: kube-flannel +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-flannel + labels: + tier: node + k8s-app: flannel + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "EnableNFTables": false, + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-flannel + labels: + tier: node + app: flannel + k8s-app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni-plugin + image: docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel2 + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: docker.io/flannel/flannel:v0.25.6 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: docker.io/flannel/flannel:v0.25.6 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + - --iface=eth1 + resources: + requests: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni-plugin + hostPath: + path: /opt/cni/bin + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml b/cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml new file mode 100644 index 000000000..6b5a9f50b --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/kube-loxilb.yaml @@ -0,0 +1,186 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - namespaces + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - gateway.networking.k8s.io + resources: + - gatewayclasses + - gatewayclasses/status + - gateways + - gateways/status + - tcproutes + - udproutes + verbs: ["get", "watch", "list", "patch", "update"] + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - bgppeer.loxilb.io + resources: + - bgppeerservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicydefinedsets.loxilb.io + resources: + - bgppolicydefinedsetsservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicydefinition.loxilb.io + resources: + - bgppolicydefinitionservices + verbs: + - get + - watch + - list + - create + - update + - delete + - apiGroups: + - bgppolicyapply.loxilb.io + resources: + - bgppolicyapplyservices + verbs: + - get + - watch + - list + - create + - update + - delete + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: kube-loxilb-app +spec: + replicas: 1 + selector: + matchLabels: + app: kube-loxilb-app + template: + metadata: + labels: + app: kube-loxilb-app + spec: + #hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --cidrPools=defaultPool=123.123.123.205/32 + #- --setBGP=64512 + #- --listenBGPPort=1791 + - --setRoles=0.0.0.0 + #- --zone=az1 + #- --monitor + #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml b/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml new file mode 100644 index 000000000..e8de10b86 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/kubeadm-config.yaml @@ -0,0 +1,70 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 192.168.80.250 + bindPort: 6443 +nodeRegistration: + imagePullPolicy: IfNotPresent + name: master + taints: null + kubeletExtraArgs: + node-ip: 192.168.80.250 +--- +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +kind: ClusterConfiguration +apiServer: + timeoutForControlPlane: 4m0s + certSANs: + - 192.168.80.250 +controlPlaneEndpoint: 192.168.80.250:6443 +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kubernetesVersion: v1.29.2 +networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.245.0.0/18 +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 5 +clusterCIDR: "" +configSyncPeriod: 15m0s +#featureGates: "SupportIPVSProxyMode=true" +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" diff --git a/cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml b/cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml new file mode 100644 index 000000000..3bcfce436 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/loxilb-localvip.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + hostPID: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + #- key: "node-role.kubernetes.io/master" + #operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + # - key: "node-role.kubernetes.io/master" + # operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + initContainers: + - name: mkllb-cgroup + command: + - sh + - -ec + - | + ls /usr/local/sbin/mkllb_cgroup && chmod 777 /usr/local/sbin/mkllb_cgroup; + cp -f /usr/local/sbin/mkllb_cgroup /hbin/mkllb_cgroup; + nsenter --cgroup=/hproc/1/ns/cgroup --mount=/hproc/1/ns/mnt /bin/mkllb_cgroup; + echo done; + rm /hbin/mkllb_cgroup; + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + volumeMounts: + - name: hproc + mountPath: /hproc + - name: hbin + mountPath: /hbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: IfNotPresent + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico", "--localsockpolicy" ] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + volumeMounts: + - name: llb-cgroup + mountPath: /opt/loxilb/cgroup + securityContext: + privileged: true + runAsUser: 0 + capabilities: + add: + - SYS_ADMIN + volumes: + - name: hproc + hostPath: + path: /proc + type: Directory + - name: hbin + hostPath: + path: /bin + type: Directory + - name: llb-cgroup + hostPath: + path: /opt/loxilb/cgroup + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml b/cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml new file mode 100644 index 000000000..e386d728d --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/loxilb.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + #namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + annotations: + k8s.v1.cni.cncf.io/networks: vlan5 + spec: + #hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + #- key: "node-role.kubernetes.io/master" + #operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + #- key: "node-role.kubernetes.io/master" + # operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + #command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel.|cali.|tunl.|vxlan[.]calico" ] + command: [ "/root/loxilb-io/loxilb/loxilb" ] + ports: + - containerPort: 11111 + - containerPort: 179 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + #namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 179 + targetPort: 179 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml b/cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml new file mode 100644 index 000000000..199d2a406 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/sctp_fullnat.yml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-fullnat-test + ports: + - port: 57004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-fullnat-test + labels: + what: sctp-fullnat-test +spec: + containers: + - name: sctp-fullnat-test + #image: loxilbio/sctp-darn:latest + image: ghcr.io/loxilb-io/alpine-socat:latest + imagePullPolicy: Always + #command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml b/cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml new file mode 100644 index 000000000..b4b736962 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/sctp_onearm.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-onearm-test + ports: + - port: 56004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-onearm-test + labels: + what: sctp-onearm-test +spec: + containers: + - name: sctp-onearm-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-flannel-incluster-multus/yaml/settings.yaml b/cicd/k8s-flannel-incluster-multus/yaml/settings.yaml new file mode 100644 index 000000000..9f57a1998 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/settings.yaml @@ -0,0 +1,45 @@ +--- +# cluster_name is used to group the nodes in a folder within VirtualBox: +cluster_name: Kubernetes Cluster +# Uncomment to set environment variables for services such as crio and kubelet. +# For example, configure the cluster to pull images via a proxy. +# environment: | +# HTTP_PROXY=http://my-proxy:8000 +# HTTPS_PROXY=http://my-proxy:8000 +# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 +# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. +network: + iloxilb_ip: 192.168.80.253 + oloxilb_ip: 192.168.90.253 + # Worker IPs are simply incremented from the control IP. + control_ip: 192.168.80.250 + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + pod_cidr: 10.244.0.0/16 + service_cidr: 10.245.0.0/18 +nodes: + control: + cpu: 2 + memory: 4096 + workers: + count: 2 + cpu: 1 + memory: 2048 +# Mount additional shared folders from the host into each virtual machine. +# Note that the project directory is automatically mounted at /vagrant. +# shared_folders: +# - host_path: ../images +# vm_path: /vagrant/images +software: + loxilb: + box: + name: sysnet4admin/Ubuntu-k8s + version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + version: 202401.31.0 + calico: 3.26.0 + # To skip the dashboard installation, set its version to an empty value or comment it out: + kubernetes: 1.29.2 + os: xUbuntu_22.04 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml b/cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml new file mode 100644 index 000000000..3303ac35e --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/tcp_fullnat.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-fullnat-test + ports: + - port: 57002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-fullnat-test + labels: + what: tcp-fullnat-test +spec: + containers: + - name: tcp-fullnat-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml b/cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml new file mode 100644 index 000000000..87d317015 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/tcp_onearm.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" + loxilb.io/zoneselect: "az1" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 56002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-onearm-test + labels: + what: tcp-onearm-test +spec: + containers: + - name: tcp-onearm-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml b/cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml new file mode 100644 index 000000000..67b729019 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/udp_fullnat.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-fullnat-test + ports: + - port: 57003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-fullnat-test + labels: + what: udp-fullnat-test +spec: + containers: + - name: udp-fullnat-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml b/cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml new file mode 100644 index 000000000..833187e73 --- /dev/null +++ b/cicd/k8s-flannel-incluster-multus/yaml/udp_onearm.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-onearm-test + ports: + - port: 56003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-onearm-test + labels: + what: udp-onearm-test +spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-nat64/kube-loxilb.yaml b/cicd/k8s-nat64/kube-loxilb.yaml index 6a1beee6d..9c1bb0e0a 100644 --- a/cicd/k8s-nat64/kube-loxilb.yaml +++ b/cicd/k8s-nat64/kube-loxilb.yaml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get diff --git a/cicd/microk8s-incluster/kube-loxilb.yml b/cicd/microk8s-incluster/kube-loxilb.yml index ef86b0f16..99732f870 100644 --- a/cicd/microk8s-incluster/kube-loxilb.yml +++ b/cicd/microk8s-incluster/kube-loxilb.yml @@ -33,6 +33,7 @@ rules: resources: - endpoints - services + - namespaces - services/status verbs: - get