Skip to content

Commit

Permalink
Merge branch 'main' of github.com:jLemmings/ansible-role-rke2
Browse files Browse the repository at this point in the history
  • Loading branch information
Joshua Hemmings committed May 13, 2024
2 parents ea46fc8 + 768771c commit 361f9f5
Show file tree
Hide file tree
Showing 10 changed files with 56 additions and 27 deletions.
9 changes: 8 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ rke2_kubevip_ipvs_lb_enable: false
# Enable layer 4 load balancing for control plane using IPVS kernel module
# Must use kube-vip version 0.4.0 or later

rke2_kubevip_service_election_enable: false
rke2_kubevip_service_election_enable: true
# By default ARP mode provides a HA implementation of a VIP (your service IP address) which will receive traffic on the kube-vip leader.
# To circumvent this kube-vip has implemented a new function which is "leader election per service",
# instead of one node becoming the leader for all services an election is held across all kube-vip instances and the leader from that election becomes the holder of that service. Ultimately,
Expand Down Expand Up @@ -316,6 +316,13 @@ rke2_debug: false
# (Optional) Customize default kubelet arguments
# rke2_kubelet_arg:
# - "--system-reserved=cpu=100m,memory=100Mi"

# (Optional) Customize default kube-proxy arguments
# rke2_kube_proxy_arg:
# - "proxy-mode=ipvs"

# The value for the node-name configuration item
rke2_node_name: "{{ inventory_hostname }}"
```
## Inventory file example
Expand Down
9 changes: 8 additions & 1 deletion defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ rke2_kubevip_ipvs_lb_enable: false
# Enable layer 4 load balancing for control plane using IPVS kernel module
# Must use kube-vip version 0.4.0 or later

rke2_kubevip_service_election_enable: false
rke2_kubevip_service_election_enable: true
# By default ARP mode provides a HA implementation of a VIP (your service IP address) which will receive traffic on the kube-vip leader.
# To circumvent this kube-vip has implemented a new function which is "leader election per service",
# instead of one node becoming the leader for all services an election is held across all kube-vip instances and the leader from that election becomes the holder of that service. Ultimately,
Expand Down Expand Up @@ -277,3 +277,10 @@ rke2_debug: false
# (Optional) Customize default kubelet arguments
# rke2_kubelet_arg:
# - "--system-reserved=cpu=100m,memory=100Mi"

# (Optional) Customize default kube-proxy arguments
# rke2_kube_proxy_arg:
# - "proxy-mode=ipvs"

# The value for the node-name configuration item
rke2_node_name: "{{ inventory_hostname }}"
2 changes: 1 addition & 1 deletion tasks/change_config.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
- name: Restart RKE2 service on {{ inventory_hostname }}
- name: Restart RKE2 service on {{ rke2_node_name }}
ansible.builtin.service:
name: "{{ rke2_service_name }}"
state: restarted
Expand Down
8 changes: 5 additions & 3 deletions tasks/first_server.yml
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@

- name: Wait for the first server be ready - no CNI
ansible.builtin.shell: |
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ inventory_hostname }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ rke2_node_name }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
args:
executable: /bin/bash
changed_when: false
Expand All @@ -121,7 +121,7 @@
- name: Wait for the first server be ready - with CNI
ansible.builtin.shell: |
set -o pipefail
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ inventory_hostname }}"
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ rke2_node_name }}"
args:
executable: /bin/bash
changed_when: false
Expand All @@ -145,7 +145,9 @@
- name: Set an Active Server variable
ansible.builtin.set_fact:
active_server: "{{ inventory_hostname }}"
run_once: true
delegate_to: "{{ item }}"
delegate_facts: true
loop: "{{ groups[rke2_cluster_group_name] }}"

- name: Get all nodes
ansible.builtin.shell: |
Expand Down
12 changes: 6 additions & 6 deletions tasks/remaining_nodes.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@
retries: 100
delay: 15
loop: "{{ groups[rke2_cluster_group_name] }}"
delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}"
run_once: true
when: rke2_cni == 'none'
when:
- rke2_cni == 'none'
- inventory_hostname == active_server or inventory_hostname == groups[rke2_servers_group_name].0

- name: Wait for remaining nodes to be ready - with CNI
ansible.builtin.shell: |
Expand All @@ -89,6 +89,6 @@
"groups[rke2_cluster_group_name] | length == all_ready_nodes.stdout | int"
retries: 100
delay: 15
delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}"
run_once: true
when: rke2_cni != 'none'
when:
- rke2_cni != 'none'
- inventory_hostname == active_server or inventory_hostname == groups[rke2_servers_group_name].0
12 changes: 6 additions & 6 deletions tasks/rolling_restart.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
---

- name: Cordon and Drain the node {{ inventory_hostname }}
- name: Cordon and Drain the node {{ rke2_node_name }}
ansible.builtin.shell: |
set -o pipefail
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \
cordon "{{ inventory_hostname }}" && \
cordon "{{ rke2_node_name }}" && \
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \
drain "{{ inventory_hostname }}" --ignore-daemonsets --delete-emptydir-data
drain "{{ rke2_node_name }}" --ignore-daemonsets --delete-emptydir-data
args:
executable: /bin/bash
register: drain
Expand All @@ -19,7 +19,7 @@
run_once: true
when: rke2_drain_node_during_upgrade

- name: Restart RKE2 service on {{ inventory_hostname }}
- name: Restart RKE2 service on {{ rke2_node_name }}
ansible.builtin.service:
name: "{{ rke2_service_name }}"
state: restarted
Expand All @@ -40,11 +40,11 @@
delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}"
run_once: true

- name: Uncordon the node {{ inventory_hostname }}
- name: Uncordon the node {{ rke2_node_name }}
ansible.builtin.shell: |
set -o pipefail
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \
uncordon "{{ inventory_hostname }}"
uncordon "{{ rke2_node_name }}"
args:
executable: /bin/bash
changed_when: false
Expand Down
4 changes: 2 additions & 2 deletions tasks/standalone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

- name: Wait for the first server be ready - no CNI
ansible.builtin.shell: |
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ inventory_hostname }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ rke2_node_name }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
args:
executable: /bin/bash
changed_when: false
Expand All @@ -55,7 +55,7 @@
- name: Wait for the first server be ready - with CNI
ansible.builtin.shell: |
set -o pipefail
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ inventory_hostname }}"
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ rke2_node_name }}"
args:
executable: /bin/bash
changed_when: false
Expand Down
5 changes: 1 addition & 4 deletions tasks/summary.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@
src: /etc/rancher/rke2/rke2.yaml
dest: "{{ rke2_download_kubeconf_path }}/{{ rke2_download_kubeconf_file_name }}"
flat: yes
delegate_to: "{{ groups[rke2_servers_group_name].0 }}"
run_once: true
when:
- rke2_download_kubeconf | bool
- inventory_hostname == groups[rke2_servers_group_name].0

- name: Replace loopback IP by master server IP
ansible.builtin.replace:
Expand All @@ -31,11 +30,9 @@
args:
executable: /bin/bash
changed_when: false
run_once: true
retries: 5
register: nodes_summary

- name: K8s nodes state
ansible.builtin.debug:
var: nodes_summary.stdout_lines
run_once: true
8 changes: 7 additions & 1 deletion templates/config.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ node-label:
{% endfor %}
{% endif %}
snapshotter: {{ rke2_snapshooter }}
node-name: {{ inventory_hostname }}
node-name: {{ rke2_node_name }}
{% if ( disable_kube_proxy | bool ) %}
disable-kube-proxy: true
{% endif %}
Expand Down Expand Up @@ -77,6 +77,12 @@ kubelet-arg:
- {{ argument }}
{% endfor %}
{% endif %}
{% if ( rke2_kube_proxy_arg is defined ) %}
kube-proxy-arg:
{% for argument in rke2_kube_proxy_arg %}
- {{ argument }}
{% endfor %}
{% endif %}
{% if (rke2_disable_cloud_controller | bool ) %}
disable-cloud-controller: true
cloud-provider-name: "{{ rke2_cloud_provider_name }}"
Expand Down
14 changes: 12 additions & 2 deletions templates/kube-vip/kube-vip.yml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,6 @@ spec:
- name: svc_enable
value: "{{ rke2_kubevip_svc_enable }}"
- name: svc_election
value: "true"
- name: enableServicesElection
value: "{{ rke2_kubevip_service_election_enable }}"
- name: vip_leaderelection
value: "true"
Expand Down Expand Up @@ -83,11 +81,23 @@ spec:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /etc/rancher/rke2/rke2.yaml
type: File
name: kubeconfig
updateStrategy: {}

0 comments on commit 361f9f5

Please sign in to comment.