Skip to content

Commit

Permalink
chore(libvirt): convert libvirt helm chart from submodule to repo (#807)
Browse files Browse the repository at this point in the history
This change removes the need to carry the openstack-helm-infra chart
for the purposes of providing a libvirt deployment. The base helm
file has been updated and simplified, reducing the values we carry
to only what we need.

Related Issue: #809
Signed-off-by: Kevin Carter <[email protected]>
  • Loading branch information
cloudnull authored Feb 24, 2025
1 parent 3d5089a commit 4fee828
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 288 deletions.
290 changes: 7 additions & 283 deletions base-helm-configs/libvirt/libvirt-helm-overrides.yaml
Original file line number Diff line number Diff line change
@@ -1,296 +1,20 @@
release_group: null
labels:
agent:
libvirt:
node_selector_key: openstack-compute-node
node_selector_value: enabled
---
images:
tags:
libvirt: docker.io/openstackhelm/libvirt:2024.1-ubuntu_jammy # We want to use jammy
libvirt_exporter: vexxhost/libvirtd-exporter:latest
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013'
dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
image_repo_sync: docker.io/library/docker:17.07.0
kubectl: docker.io/bitnami/kubectl:latest
pull_policy: "IfNotPresent"
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
libvirt: docker.io/openstackhelm/libvirt:2024.1-ubuntu_jammy
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy
network:
# provide what type of network wiring will be used
# possible options: ovn, openvswitch, linuxbridge, sriov
backend:
- ovn
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
oci_image_registry:
name: oci-image-registry
namespace: oci-image-registry
auth:
enabled: false
libvirt:
username: libvirt
password: password
hosts:
default: localhost
host_fqdn_override:
default: null
port:
registry:
default: null
libvirt_exporter:
port:
metrics:
default: 9474
network_policy:
libvirt:
ingress:
- {}
egress:
- {}
ceph_client:
configmap: ceph-etc
user_secret_name: pvc-ceph-client-key
conf:
ceph:
enabled: false # Set to true when we has ceph support for openstack.
admin_keyring: null
cinder:
user: "cinder"
keyring: null
secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
# Cinder Ceph backend that is not configured by the k8s cluter
external_ceph:
enabled: false
user: null
secret_uuid: null
user_secret_name: null
libvirt:
listen_tcp: "1"
listen_tls: "0"
auth_tcp: "none"
ca_file: "/etc/pki/CA/cacert.pem"
cert_file: "/etc/pki/libvirt/servercert.pem"
key_file: "/etc/pki/libvirt/private/serverkey.pem"
auth_unix_rw: "none"
listen_addr: 0.0.0.0
log_level: "3"
log_outputs: "1:file:/var/log/libvirt/libvirtd.log"
qemu:
vnc_tls: "0"
vnc_tls_x509_verify: "0"
stdio_handler: "file"
user: "nova"
group: "kvm"
kubernetes:
cgroup: "kubepods.slice"
vencrypt:
# Issuer to use for the vencrypt certs.
issuer:
kind: ClusterIssuer
name: ca-clusterissuer
# Script is included here (vs in bin/) to allow overriding, in the case that
# communication happens over an IP other than the pod IP for some reason.
cert_init_sh: |
#!/bin/bash
set -x
HOSTNAME_FQDN=$(hostname --fqdn)
# Script to create certs for each libvirt pod based on pod IP (by default).
cat <<EOF | kubectl apply -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ${POD_NAME}-${TYPE}
namespace: ${POD_NAMESPACE}
ownerReferences:
- apiVersion: v1
kind: Pod
name: ${POD_NAME}
uid: ${POD_UID}
spec:
secretName: ${POD_NAME}-${TYPE}
commonName: ${POD_IP}
usages:
- client auth
- server auth
dnsNames:
- ${HOSTNAME}
- ${HOSTNAME_FQDN}
ipAddresses:
- ${POD_IP}
issuerRef:
kind: ${ISSUER_KIND}
name: ${ISSUER_NAME}
EOF
kubectl -n ${POD_NAMESPACE} wait --for=condition=Ready --timeout=300s \
certificate/${POD_NAME}-${TYPE}
# NOTE(mnaser): cert-manager does not clean-up the secrets when the certificate
# is deleted, so we should add an owner reference to the secret
# to ensure that it is cleaned up when the pod is deleted.
kubectl -n ${POD_NAMESPACE} patch secret ${POD_NAME}-${TYPE} \
--type=json -p='[{"op": "add", "path": "/metadata/ownerReferences", "value": [{"apiVersion": "v1", "kind": "Pod", "name": "'${POD_NAME}'", "uid": "'${POD_UID}'"}]}]'
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.crt}' | base64 -d > /tmp/${TYPE}.crt
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.key}' | base64 -d > /tmp/${TYPE}.key
kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.ca\.crt}' | base64 -d > /tmp/${TYPE}-ca.crt
pod:
probes:
libvirt:
libvirt:
liveness:
enabled: true
params:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 5
readiness:
enabled: true
params:
initialDelaySeconds: 15
periodSeconds: 60
timeoutSeconds: 5
security_context:
libvirt:
pod:
runAsUser: 0
container:
ceph_admin_keyring_placement:
readOnlyRootFilesystem: false
ceph_keyring_placement:
readOnlyRootFilesystem: false
libvirt:
privileged: true
readOnlyRootFilesystem: false
libvirt_exporter:
privileged: true
sidecars:
libvirt_exporter: false
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
weight:
default: 10
tolerations:
libvirt:
enabled: false
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
dns_policy: "ClusterFirstWithHostNet"
mounts:
libvirt:
init_container: null
libvirt:
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
libvirt:
enabled: true
min_ready_seconds: 0
max_unavailable: 20%
resources:
enabled: false
libvirt:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "4096Mi"
jobs:
image_repo_sync:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "4096Mi"
libvirt_exporter:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "256Mi"
libvirt:
listen_addr: "0.0.0.0"
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- libvirt-image-repo-sync
services:
- endpoint: node
service: local_image_registry
targeted:
ovn:
libvirt:
pod: [] # In a hybrid deployment, we don't want to run ovn-controller on the same node as libvirt
# - requireSameNode: true
# labels:
# application: ovn
# component: ovn-controller
openvswitch:
libvirt:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-ovs-agent
linuxbridge:
libvirt:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-lb-agent
sriov:
libvirt:
pod:
- requireSameNode: true
labels:
application: neutron
component: neutron-sriov-agent
static:
libvirt:
services: null
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
manifests:
configmap_bin: true
configmap_etc: true
daemonset_libvirt: true
job_image_repo_sync: true
network_policy: false
role_cert_manager: false
secret_registry: true
secrets:
oci_image_registry:
libvirt: libvirt-oci-image-registry-key
tls:
server: libvirt-tls-server
client: libvirt-tls-client
pod: [] # In a hybrid deployment, we don't want to run ovn-controller on the same node as libvirt
9 changes: 4 additions & 5 deletions bin/install-libvirt.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides"
SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/libvirt"
BASE_OVERRIDES="/opt/genestack/base-helm-configs/libvirt/libvirt-helm-overrides.yaml"

pushd /opt/genestack/submodules/openstack-helm-infra || exit 1

HELM_CMD="helm upgrade --install libvirt ./libvirt \
HELM_CMD="helm upgrade --install libvirt openstack-helm-infra/libvirt --version 2024.2.92+628a320c \
--namespace=openstack \
--timeout 120m"

Expand All @@ -26,8 +24,9 @@ done

HELM_CMD+=" $@"

helm repo add openstack-helm-infra https://tarballs.opendev.org/openstack/openstack-helm-infra
helm repo update

echo "Executing Helm command:"
echo "${HELM_CMD}"
eval "${HELM_CMD}"

popd || exit 1
16 changes: 16 additions & 0 deletions releasenotes/notes/libvirt-chart-2f3d090799aff3e0.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
deprecations:
- |
The libvirt chart will now use the online OSH helm repository. This change
will allow the libvirt chart to be updated more frequently and will allow
the libvirt chart to be used with the OpenStack-Helm project. Upgrading to
this chart may require changes to the deployment configuration. Simple
updates can be made by running the following command:
.. code-block:: shell
helm -n openstack uninstall libvirt
/opt/genestack/bin/install-libvirt.sh
This operation should have no operational impact on running VMs but should be
performed during a maintenance window.

0 comments on commit 4fee828

Please sign in to comment.