From cebc733f143abd2048ee43922ad40ccc2ae131df Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 11:54:57 +0100 Subject: [PATCH 01/34] feat: first refactoring Signed-off-by: djerfy --- Dockerfile | 14 +- src/config.yaml | 68 +++++ src/modules/common/functions.py | 45 +-- src/modules/kubernetes/__init__.py | 0 src/modules/kubernetes/base/__init__.py | 6 + src/modules/kubernetes/base/cronjobs.py | 121 ++++++++ src/modules/kubernetes/base/daemonsets.py | 89 ++++++ src/modules/kubernetes/base/deployments.py | 87 ++++++ src/modules/kubernetes/base/nodes.py | 98 ++++++ src/modules/kubernetes/base/statefulsets.py | 87 ++++++ src/modules/kubernetes/base/volumes.py | 104 +++++++ src/modules/kubernetes/get.py | 321 -------------------- src/modules/zabbix/__init__.py | 0 src/modules/zabbix/discovery.py | 107 ------- src/modules/zabbix/item.py | 101 ------ src/requirements.txt | 1 + src/zabbix-kubernetes-discovery.py | 158 +++------- 17 files changed, 719 insertions(+), 688 deletions(-) create mode 100644 src/config.yaml delete mode 100644 src/modules/kubernetes/__init__.py create mode 100644 src/modules/kubernetes/base/__init__.py create mode 100644 src/modules/kubernetes/base/cronjobs.py create mode 100644 src/modules/kubernetes/base/daemonsets.py create mode 100644 src/modules/kubernetes/base/deployments.py create mode 100644 src/modules/kubernetes/base/nodes.py create mode 100644 src/modules/kubernetes/base/statefulsets.py create mode 100644 src/modules/kubernetes/base/volumes.py delete mode 100644 src/modules/kubernetes/get.py delete mode 100644 src/modules/zabbix/__init__.py delete mode 100644 src/modules/zabbix/discovery.py delete mode 100644 src/modules/zabbix/item.py diff --git a/Dockerfile b/Dockerfile index 5e485b5..60c3465 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,9 +6,6 @@ LABEL description="Zabbix Kubernetes Discovery" \ WORKDIR /app -ENV ZABBIX_ENDPOINT="" -ENV KUBERNETES_NAME="" - ARG CONTAINER_USER="zabbix" ARG CONTAINER_GROUP="zabbix" @@ -17,18 +14,9 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends curl iputils-ping python3 python3-pip && \ rm -rf /var/lib/apt/lists && \ mkdir -p /app /root/.kube && \ - touch /app/crontab && \ groupadd -g 2000 ${CONTAINER_GROUP} && \ useradd -u 2000 -d /app -s /bin/bash -M -g ${CONTAINER_GROUP} ${CONTAINER_USER} -ARG SUPERCRONIC_VER="0.2.28" -ARG SUPERCRONIC_SHA="fe1a81a8a5809deebebbd7a209a3b97e542e2bcd" - -RUN curl -fsSLO "https://github.com/aptible/supercronic/releases/download/v${SUPERCRONIC_VER}/supercronic-linux-amd64" && \ - echo "${SUPERCRONIC_SHA} supercronic-linux-amd64" | sha1sum -c - && \ - chmod +x supercronic-linux-amd64 && \ - mv supercronic-linux-amd64 /usr/local/bin/supercronic - COPY ./src/ /app/ RUN chown ${CONTAINER_USER}:${CONTAINER_GROUP} -R /app && \ @@ -37,4 +25,4 @@ RUN chown ${CONTAINER_USER}:${CONTAINER_GROUP} -R /app && \ USER ${CONTAINER_USER}:${CONTAINER_GROUP} -CMD ["/usr/local/bin/supercronic", "-split-logs", "-json", "/app/crontab"] +CMD ["/usr/bin/python3", "/app/zabbix-kubernetes-discovery.py"] diff --git a/src/config.yaml b/src/config.yaml new file mode 100644 index 0000000..de34527 --- /dev/null +++ b/src/config.yaml @@ -0,0 +1,68 @@ +zabbix: + endpoint: localhost + timeout: 10 + schedule: + discovery: 60 + items: 2 + +kubernetes: + name: cluster-name + +monitoring: + # core + nodes: + enabled: True + labels: + include: [] + exclude: [] + daemonsets: + enabled: True + labels: + include: [] + exclude: [] + volumes: + enabled: True + labels: + include: [] + exclude: [] + deployments: + enabled: True + labels: + include: [] + exclude: [] + statefulsets: + enabled: True + labels: + include: [] + exclude: [] + cronjobs: + enabled: True + labels: + include: [] + exclude: [] + ingresses: + enabled: True + labels: + include: [] + exclude: [] + # plugins + openebs: + enabled: False + labels: + include: [] + exclude: [] + velero: + enabled: True + labels: + include: [] + exclude: [] + trivy: + enabled: True + labels: + include: [] + exclude: [] + certs: + enabled: True + labels: + include: [] + exclude: [] \ No newline at end of file diff --git a/src/modules/common/functions.py b/src/modules/common/functions.py index 5759502..3cb4adf 100644 --- a/src/modules/common/functions.py +++ b/src/modules/common/functions.py @@ -1,48 +1,21 @@ import re import json -def ifObjectMatch(object_list=None, object_name=None): +def matchLabels(match_labels=None, object_labels=None): """ - description: check if the object is in list + description: check if the object match labels return: bool """ - if object_list is None or object_list == "" or object_list == "*": - return False - - if object_name is None or object_name == "" or object_name == "*": - return False - - if type(object_list) == str: - object_list = object_list.split(",") - - if type(object_list) != list: - return False - - reg_list = map(re.compile, object_list) - - if any(reg.match(object_name) for reg in reg_list): - return True - - return False - -def ifLabelMatch(match_label=None, object_labels=None): - """ - description: check if the object match a label - return: bool - """ - if match_label is None or match_label == "" or match_label == "*": - return False - - if object_labels is None or object_labels == "" or object_labels == "*": - return False - + for i in [match_labels, object_labels]: + if i is None or i == [] or i == "" or i == "*": + return False + object_labels = str(object_labels).replace("{", "").replace("}", "").replace("'", "").replace(" ", "").split(",") for label in object_labels: - k, v = label.split(":")[0], label.split(":")[1] - + key, value = label.split(":")[0], label.split(":")[1] for separator in ["=", ":"]: - if match_label.split(separator)[0] == k and match_label.split(separator)[1] == v: + if match_labels.split(separator)[0] == key and match_labels.split(separator)[1] == value: return True - + return False diff --git a/src/modules/kubernetes/__init__.py b/src/modules/kubernetes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/modules/kubernetes/base/__init__.py b/src/modules/kubernetes/base/__init__.py new file mode 100644 index 0000000..1bcce39 --- /dev/null +++ b/src/modules/kubernetes/base/__init__.py @@ -0,0 +1,6 @@ +from modules.kubernetes.base.nodes import baseNodes +from modules.kubernetes.base.cronjobs import baseCronjobs +from modules.kubernetes.base.volumes import baseVolumes +from modules.kubernetes.base.statefulsets import baseStatefulsets +from modules.kubernetes.base.deployments import baseDeployments +from modules.kubernetes.base.daemonsets import baseDaemonsets diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py new file mode 100644 index 0000000..31e496e --- /dev/null +++ b/src/modules/kubernetes/base/cronjobs.py @@ -0,0 +1,121 @@ +from kubernetes import client +from datetime import datetime +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def kubernetesGetCronjobs(config=None): + """ + description: get cronjobs data + return: list + """ + kubernetes = client.BatchV1Api() + + cronjobs = [] + + for cronjob in kubernetes.list_cron_job_for_all_namespaces().items: + related_jobs, job_latest = [], {} + + for job in kubernetes.list_job_for_all_namespaces().items: + if not job: + continue + + if not job.metadata.owner_references: + continue + + if not "CronJob" in job.metadata.owner_references[0].kind: + continue + + if job.metadata.owner_references[0].name != cronjob.metadata.name: + continue + + if job.status.active is not None: + continue + + related_jobs.append(job) + + for related_job in related_jobs: + if not bool(job_latest): + job_latest = related_job + continue + + related_job_dt = datetime.timestamp(related_job.status.conditions[0].last_probe_time) + job_latest_dt = datetime.timestamp(job_latest.status.conditions[0].last_probe_time) + + if related_job_dt > job_latest_dt: + job_latest = related_job + + if type(job_latest) is dict: + continue + + if job_latest.status.conditions[0].type == "Complete": + cronjob_status = "0" + else: + cronjob_status = "1" + + json = { + "name": cronjob.metadata.name, + "namespace": cronjob.metadata.namespace, + "status": cronjob_status, + "last_job": { + "name": job_latest.metadata.name, + "reason": job_latest.status.conditions[0].reason, + "message": job_latest.status.conditions[0].message, + "status": job_latest.status.conditions[0].type + } + } + + if matchLabels(config['labels']['exclude'], cronjob.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['include'], cronjob.metadata.labels): + continue + + cronjobs.append(json) + + return cronjobs + +def zabbixDiscoveryCronjobs(clustername, cronjobs=[]): + """ + description: create a discovery for cronjob, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for cronjob in cronjobs: + output = { + "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], + "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.cronjob.discovery", json.dumps(discovery))] + + return sender + +def zabbixItemCronjobs(clustername, cronjobs=[]): + """ + description: create a item for cronjob, per namespace + return: class ZabbixResponse + """ + sender = [] + + for cronjob in cronjobs: + sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']),) + + return sender + +def baseCronjobs(mode=None, config=None): + """ + description: monitoring cronjobs + return: class ZabbixMetric + """ + if mode == "discovery": + return zabbixDiscoveryCronjobs(config['kubernetes']['name'], kubernetesGetCronjobs(config['monitoring']['cronjobs'])) + if mode == "item": + return zabbixItemCronjobs(config['kubernetes']['name'], kubernetesGetCronjobs(config['monitoring']['cronjobs'])) + return [] diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py new file mode 100644 index 0000000..e424e96 --- /dev/null +++ b/src/modules/kubernetes/base/daemonsets.py @@ -0,0 +1,89 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def kubernetesGetDaemonsets(config=None): + """ + description: get daemonsets data + return: list + """ + kubernetes = client.AppsV1Api() + + daemonsets = [] + + for daemonset in kubernetes.list_daemon_set_for_all_namespaces().items: + + json = { + "name": daemonset.metadata.name, + "namespace": daemonset.metadata.namespace, + "replicas": { + "desired": daemonset.status.desired_number_scheduled, + "current": daemonset.status.current_number_scheduled, + "available": daemonset.status.number_available, + "ready": daemonset.status.number_ready + } + } + + for i in ["desired", "current", "available", "ready"]: + if json['replicas'][i] is None: + json['replicas'][i] = 0 + + if matchLabels(config['labels']['exclude'], daemonset.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['include'], daemonset.metadata.labels): + continue + + if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in daemonsets): + continue + + daemonsets.append(json) + + return daemonsets + +def zabbixDiscoveryDaemonsets(clustername, daemonsets=[]): + """ + description: create a discovery for daemonset, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for daemonset in daemonsets: + output = { + "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], + "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.daemonset.discovery", json.dumps(discovery))] + + return sender + +def zabbixItemDaemonsets(clustername, daemonsets=[]): + """ + description: create a item for daemonset, per namespace + return: class ZabbixMetric + """ + sender = [] + + for daemonset in daemonsets: + sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']),) + + return sender + +def baseDaemonsets(mode=None, config=None): + """ + description: monitoring daemonsets + return: class ZabbixMetric + """ + if mode == "discovery": + return zabbixDiscoveryDaemonsets(config['kubernetes']['name'], kubernetesGetDaemonsets(config['monitoring']['daemonsets'])) + if mode == "item": + return zabbixItemDaemonsets(config['kubernetes']['name'], kubernetesGetDaemonsets(config['monitoring']['daemonsets'])) + return [] \ No newline at end of file diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py new file mode 100644 index 0000000..8fdacb2 --- /dev/null +++ b/src/modules/kubernetes/base/deployments.py @@ -0,0 +1,87 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def kubernetesGetDeployments(config=None): + """ + description: get deployments data + return: list + """ + kubernetes = client.AppsV1Api() + + deployments = [] + + for deployment in kubernetes.list_deployment_for_all_namespaces().items: + + json = { + "name": deployment.metadata.name, + "namespace": deployment.metadata.namespace, + "replicas": { + "desired": deployment.status.replicas, + "ready": deployment.status.ready_replicas, + "available": deployment.status.available_replicas + } + } + + if matchLabels(config['labels']['exclude'], deployment.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['include'], deployment.metadata.labels): + continue + + for i in ["desired", "ready", "available"]: + if json['replicas'][i] is None: + json['replicas'][i] = 0 + + if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in deployments): + continue + + deployments.append(json) + + return deployments + +def zabbixDiscoveryDeployments(clustername, deployments=[]): + """ + description: create a discovery for deployment, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for deployment in deployments: + output = { + "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], + "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.deployment.discovery", json.dumps(discovery))] + + return sender + +def zabbixItemDeployments(clustername, deployments=[]): + """ + description: create a item for deployment, per namespace + return: class ZabbixResponse + """ + sender = [] + + for deployment in deployments: + sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']),) + + return sender + +def baseDeployments(mode=None, config=None): + """ + description: monitoring deployments + return: class ZabbixMetric + """ + if mode == "discovery": + return zabbixDiscoveryDeployments(config['kubernetes']['name'], kubernetesGetDeployments(config['monitoring']['deployments'])) + if mode == "item": + return zabbixItemDeployments(config['kubernetes']['name'], kubernetesGetDeployments(config['monitoring']['deployments'])) + return [] diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py new file mode 100644 index 0000000..6c4aee4 --- /dev/null +++ b/src/modules/kubernetes/base/nodes.py @@ -0,0 +1,98 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def kubernetesGetNodes(config=None): + """ + description: get nodes data + return: list + """ + kubernetes = client.CoreV1Api() + + nodes = [] + + for node in kubernetes.list_node().items: + if node.spec.taints is not None: + if "node.kubernetes.io/not-ready" in str(node.spec.taints): + continue + + node_healthz = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="healthz") + node_status = kubernetes.read_node_status(name=node.metadata.name) + node_pods = kubernetes.list_pod_for_all_namespaces(field_selector="spec.nodeName={}".format(node.metadata.name)) + + json = { + "name": node.metadata.name, + "uid": node.metadata.uid, + "status": node_healthz, + "capacity": node_status.status.capacity, + "allocatable": node_status.status.allocatable, + "current": { + "pods": str(len(node_pods.items)), + "pods_used": str(round(len(node_pods.items) * 100 / int(node_status.status.allocatable['pods']), 1)), + "pods_free": str(round(100 - (len(node_pods.items) * 100 / int(node_status.status.allocatable['pods'])), 1)) + } + } + + if matchLabels(config['labels']['exclude'], node.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['include'], node.metadata.labels): + continue + + if any(n['name'] == json['name'] for n in nodes): + continue + + nodes.append(json) + + return nodes + +def zabbixDiscoveryNodes(clustername, nodes=[]): + """ + description: create a discovery for node + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for node in nodes: + output = {"{#KUBERNETES_NODE_NAME}": node['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.node.discovery", json.dumps(discovery))] + + return sender + +def zabbixItemNodes(clustername, nodes=[]): + """ + description: create a item for node + return: class ZabbixMetric + """ + sender = [] + + for node in nodes: + sender.append(ZabbixMetric(clustername, f"kubernetes.node.healthz[{node['name']}]", node['status']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']),) + + return sender + +def baseNodes(mode=None, config=None): + """ + description: monitoring nodes + return: class ZabbixMetric + """ + if mode == "discovery": + return zabbixDiscoveryNodes(config['kubernetes']['name'], kubernetesGetNodes(config['monitoring']['nodes'])) + if mode == "item": + return zabbixItemNodes(config['kubernetes']['name'], kubernetesGetNodes(config['monitoring']['nodes'])) + return [] diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py new file mode 100644 index 0000000..fcf01d3 --- /dev/null +++ b/src/modules/kubernetes/base/statefulsets.py @@ -0,0 +1,87 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def kubernetesGetStatefulsets(config=None): + """ + description: get statefulsets data + return: list + """ + kubernetes = client.AppsV1Api() + + statefulsets = [] + + for statefulset in kubernetes.list_stateful_set_for_all_namespaces().items: + + json = { + "name": statefulset.metadata.name, + "namespace": statefulset.metadata.namespace, + "replicas": { + "available": statefulset.status.current_replicas, + "ready": statefulset.status.ready_replicas, + "desired": statefulset.status.replicas + } + } + + if matchLabels(config['labels']['exclude'], statefulset.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['include'], statefulset.metadata.labels): + continue + + for i in ["desired", "ready", "available"]: + if json['replicas'][i] is None: + json['replicas'][i] = 0 + + if any(s['name'] == json['name'] and s['namespace'] == json['namespace'] for s in statefulsets): + continue + + statefulsets.append(json) + + return statefulsets + +def zabbixDiscoveryStatefulsets(clustername, statefulsets=[]): + """ + description: create a discovery for statefulset, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for statefulset in statefulsets: + output = { + "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], + "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.statefulset.discovery", json.dumps(discovery))] + + return sender + +def zabbixItemStatefulsets(clustername, statefulsets=[]): + """ + description: create a item for statefulset, per namespace + return: class ZabbixResponse + """ + sender = [] + + for statefulset in statefulsets: + sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']),) + + return sender + +def baseStatefulsets(mode=None, config=None): + """ + description: monitoring statefulsets + return: class ZabbixMetric + """ + if mode == "discovery": + return zabbixDiscoveryStatefulsets(config['kubernetes']['name'], kubernetesGetStatefulsets(config['monitoring']['statefulsets'])) + if mode == "item": + return zabbixItemStatefulsets(config['kubernetes']['name'], kubernetesGetStatefulsets(config['monitoring']['statefulsets'])) + return [] diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py new file mode 100644 index 0000000..861ee6a --- /dev/null +++ b/src/modules/kubernetes/base/volumes.py @@ -0,0 +1,104 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3, re, logging + +urllib3.disable_warnings() + +def kubernetesGetVolumes(config=None): + """ + description: get volumes data + return: list + """ + kubernetes = client.CoreV1Api() + + volumes = [] + + for node in kubernetes.list_node().items: + if node.spec.taints is not None: + if "node.kubernetes.io/not-ready" in str(node.spec.taints): + continue + + node_info = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="stats/summary").replace("'", "\"") + node_json = json.loads(node_info) + + for pod in node_json['pods']: + if not "volume" in pod: + continue + + for volume in pod['volume']: + + if not "pvcRef" in volume: + continue + + if volume['pvcRef']['name'].startswith(pod['podRef']['name']) and re.match(r"(.*)-[a-z0-9]{8,10}-[a-z0-9]{5}$", pod['podRef']['name']): + continue + + volume['namespace'] = volume['pvcRef']['namespace'] + volume['name'] = volume['pvcRef']['name'] + + if volume.get("metadata"): + if volume.metadata.get("labels"): + if matchLabels(config['labels']['exclude'], volume.metadata.labels): + continue + if config['labels']['include'] != []: + if not matchLabels(config['labels']['include'], volume.metadata.labels): + continue + + for i in ["time", "pvcRef"]: + del volume[i] + + if any(v['name'] == volume['name'] and v['namespace'] == volume['namespace'] for v in volumes): + continue + + if "-token-" in volume['name']: + continue + logging.info(volume) + volumes.append(volume) + + return volumes + +def zabbixDiscoveryVolumes(clustername, volumes=[]): + """ + description: create a discovery for persistent volume claim, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for volume in volumes: + output = { + "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], + "{#KUBERNETES_PVC_NAME}": volume['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.pvc.discovery", json.dumps(discovery))] + + return sender + +def zabbixItemVolumes(clustername, volumes=[]): + """ + description: create a item for persistent volume claim, per namespace + return: class ZabbixMetric + """ + sender = [] + + for volume in volumes: + sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']),) + + return sender + +def baseVolumes(mode=None, config=None): + """ + description: monitoring volumes + return: class ZabbixMetric + """ + if mode == "discovery": + return zabbixDiscoveryVolumes(config['kubernetes']['name'], kubernetesGetVolumes(config['monitoring']['volumes'])) + if mode == "item": + return zabbixItemVolumes(config['kubernetes']['name'], kubernetesGetVolumes(config['monitoring']['volumes'])) + return [] diff --git a/src/modules/kubernetes/get.py b/src/modules/kubernetes/get.py deleted file mode 100644 index 3e07eec..0000000 --- a/src/modules/kubernetes/get.py +++ /dev/null @@ -1,321 +0,0 @@ -from kubernetes import client -from datetime import datetime -from modules.common.functions import * -import json, urllib3, re - -urllib3.disable_warnings() - -def getNode(name=None, exclude_name=None, match_label=None): - """ - description: get all or specific node - return: list - """ - kubernetes = client.CoreV1Api() - - nodes = [] - - for node in kubernetes.list_node().items: - node_healthz = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="healthz") - node_status = kubernetes.read_node_status(name=node.metadata.name) - node_pods = kubernetes.list_pod_for_all_namespaces(field_selector="spec.nodeName={}".format(node.metadata.name)) - - json = { - "name": node.metadata.name, - "uid": node.metadata.uid, - "status": node_healthz, - "capacity": node_status.status.capacity, - "allocatable": node_status.status.allocatable, - "current": { - "pods": str(len(node_pods.items)), - "pods_used": str(round(len(node_pods.items) * 100 / int(node_status.status.allocatable['pods']), 1)), - "pods_free": str(round(100 - (len(node_pods.items) * 100 / int(node_status.status.allocatable['pods'])), 1)) - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if match_label is not None and not ifLabelMatch(match_label, node.metadata.labels): - continue - - if name == json['name']: - return [json] - - if any(n['name'] == json['name'] for n in nodes): - continue - - nodes.append(json) - - return nodes - - -def getDaemonset(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific daemonset - return: list - """ - kubernetes = client.AppsV1Api() - - daemonsets = [] - - for daemonset in kubernetes.list_daemon_set_for_all_namespaces().items: - - json = { - "name": daemonset.metadata.name, - "namespace": daemonset.metadata.namespace, - "replicas": { - "desired": daemonset.status.desired_number_scheduled, - "current": daemonset.status.current_number_scheduled, - "available": daemonset.status.number_available, - "ready": daemonset.status.number_ready - } - } - - for i in ["desired", "current", "available", "ready"]: - if json['replicas'][i] is None: - json['replicas'][i] = 0 - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, daemonset.metadata.labels): - continue - - if name == json['name']: - return [json] - - if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in daemonsets): - continue - - daemonsets.append(json) - - return daemonsets - - -def getVolume(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific persistent volume claim - return: list - """ - kubernetes = client.CoreV1Api() - - volumes = [] - - for node in kubernetes.list_node().items: - node_info = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="stats/summary").replace("'", "\"") - node_json = json.loads(node_info) - - for pod in node_json['pods']: - - if not "volume" in pod: - continue - - for volume in pod['volume']: - - if not "pvcRef" in volume: - continue - - if volume['pvcRef']['name'].startswith(pod['podRef']['name']) and re.match(r"(.*)-[a-z0-9]{8,10}-[a-z0-9]{5}$", pod['podRef']['name']): - continue - - volume['namespace'] = volume['pvcRef']['namespace'] - volume['name'] = volume['pvcRef']['name'] - - if ifObjectMatch(exclude_name, volume['name']): - continue - - if ifObjectMatch(exclude_namespace, volume['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, volume.metadata.labels): - continue - - for i in ["time", "pvcRef"]: - del volume[i] - - if name == volume['name']: - return [volume] - - if any(v['name'] == volume['name'] and v['namespace'] == volume['namespace'] for v in volumes): - continue - - if "-token-" in volume['name']: - continue - - volumes.append(volume) - - return volumes - - -def getDeployment(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific deployment - return: list - """ - kubernetes = client.AppsV1Api() - - deployments = [] - - for deployment in kubernetes.list_deployment_for_all_namespaces().items: - - json = { - "name": deployment.metadata.name, - "namespace": deployment.metadata.namespace, - "replicas": { - "desired": deployment.status.replicas, - "ready": deployment.status.ready_replicas, - "available": deployment.status.available_replicas - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, deployment.metadata.labels): - continue - - for i in ["desired", "ready", "available"]: - if json['replicas'][i] is None: - json['replicas'][i] = 0 - - if name == json['name']: - return [json] - - if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in deployments): - continue - - deployments.append(json) - - return deployments - - -def getStatefulset(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific statefulset - return: list - """ - kubernetes = client.AppsV1Api() - - statefulsets = [] - - for statefulset in kubernetes.list_stateful_set_for_all_namespaces().items: - - json = { - "name": statefulset.metadata.name, - "namespace": statefulset.metadata.namespace, - "replicas": { - "available": statefulset.status.current_replicas, - "ready": statefulset.status.ready_replicas, - "desired": statefulset.status.replicas - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, statefulset.metadata.labels): - continue - - for i in ["desired", "ready", "available"]: - if json['replicas'][i] is None: - json['replicas'][i] = 0 - - if name == json['name']: - return [json] - - if any(s['name'] == json['name'] and s['namespace'] == json['namespace'] for s in statefulsets): - continue - - statefulsets.append(json) - - return statefulsets - - -def getCronjob(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific cronjob - return: list - """ - kubernetes = client.BatchV1Api() - - cronjobs = [] - - for cronjob in kubernetes.list_cron_job_for_all_namespaces().items: - - related_jobs, job_latest = [], {} - - for job in kubernetes.list_job_for_all_namespaces().items: - - if not job: - continue - - if not job.metadata.owner_references: - continue - - if not "CronJob" in job.metadata.owner_references[0].kind: - continue - - if job.metadata.owner_references[0].name != cronjob.metadata.name: - continue - - if job.status.active is not None: - continue - - related_jobs.append(job) - - for related_job in related_jobs: - - if not bool(job_latest): - job_latest = related_job - continue - - related_job_dt = datetime.timestamp(related_job.status.conditions[0].last_probe_time) - job_latest_dt = datetime.timestamp(job_latest.status.conditions[0].last_probe_time) - - if related_job_dt > job_latest_dt: - job_latest = related_job - - if type(job_latest) is dict: - continue - - if job_latest.status.conditions[0].type == "Complete": - cronjob_status = "0" - else: - cronjob_status = "1" - - json = { - "name": cronjob.metadata.name, - "namespace": cronjob.metadata.namespace, - "status": cronjob_status, - "last_job": { - "name": job_latest.metadata.name, - "reason": job_latest.status.conditions[0].reason, - "message": job_latest.status.conditions[0].message, - "status": job_latest.status.conditions[0].type - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, cronjob.metadata.labels): - continue - - if name == json['name']: - return [json] - - cronjobs.append(json) - - return cronjobs diff --git a/src/modules/zabbix/__init__.py b/src/modules/zabbix/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/modules/zabbix/discovery.py b/src/modules/zabbix/discovery.py deleted file mode 100644 index 7f5cc3e..0000000 --- a/src/modules/zabbix/discovery.py +++ /dev/null @@ -1,107 +0,0 @@ -from pyzabbix import ZabbixMetric -import json - -def zabbixDiscoveryNode(clustername, nodes=[]): - """ - description: create a discovery for node - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for node in nodes: - output = {"{#KUBERNETES_NODE_NAME}": node['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.node.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryDaemonset(clustername, daemonsets=[]): - """ - description: create a discovery for daemonset, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for daemonset in daemonsets: - output = { - "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], - "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.daemonset.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryVolume(clustername, volumes=[]): - """ - description: create a discovery for persistent volume claim, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for volume in volumes: - output = { - "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], - "{#KUBERNETES_PVC_NAME}": volume['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.pvc.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryDeployment(clustername, deployments=[]): - """ - description: create a discovery for deployment, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for deployment in deployments: - output = { - "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], - "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.deployment.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryStatefulset(clustername, statefulsets=[]): - """ - description: create a discovery for statefulset, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for statefulset in statefulsets: - output = { - "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], - "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.statefulset.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryCronjob(clustername, cronjobs=[]): - """ - description: create a discovery for cronjob, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for cronjob in cronjobs: - output = { - "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], - "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.cronjob.discovery", json.dumps(discovery))] - - return sender diff --git a/src/modules/zabbix/item.py b/src/modules/zabbix/item.py deleted file mode 100644 index f12278c..0000000 --- a/src/modules/zabbix/item.py +++ /dev/null @@ -1,101 +0,0 @@ -from pyzabbix import ZabbixMetric - -def zabbixItemNode(clustername, nodes=[]): - """ - description: create a item for node - return: class ZabbixMetric - """ - sender = [] - - for node in nodes: - sender.append(ZabbixMetric(clustername, f"kubernetes.node.healthz[{node['name']}]", node['status']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']),) - - return sender - - -def zabbixItemDaemonset(clustername, daemonsets=[]): - """ - description: create a item for daemonset, per namespace - return: class ZabbixMetric - """ - sender = [] - - for daemonset in daemonsets: - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']),) - - return sender - - -def zabbixItemVolume(clustername, volumes=[]): - """ - description: create a item for persistent volume claim, per namespace - return: class ZabbixMetric - """ - sender = [] - - for volume in volumes: - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']),) - - return sender - - -def zabbixItemDeployment(clustername, deployments=[]): - """ - description: create a item for deployment, per namespace - return: class ZabbixResponse - """ - sender = [] - - for deployment in deployments: - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']),) - - return sender - - -def zabbixItemStatefulset(clustername, statefulsets=[]): - """ - description: create a item for statefulset, per namespace - return: class ZabbixResponse - """ - sender = [] - - for statefulset in statefulsets: - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']),) - - return sender - - -def zabbixItemCronjob(clustername, cronjobs=[]): - """ - description: create a item for cronjob, per namespace - return: class ZabbixResponse - """ - sender = [] - - for cronjob in cronjobs: - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']),) - - return sender diff --git a/src/requirements.txt b/src/requirements.txt index 5165dff..489d6a2 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -2,3 +2,4 @@ requests==2.31.0 kubernetes==27.2.0 py-zabbix==1.1.7 urllib3==1.26.15 +schedule==1.2.1 diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index dfe8011..359a77a 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -1,130 +1,68 @@ #!/usr/bin/env python3 -import argparse, sys, os, logging -from random import randint +import argparse, sys, os +import logging, schedule, yaml from time import sleep -from kubernetes import config +from kubernetes import config as kube_config from pyzabbix import ZabbixSender -from modules.kubernetes.get import * -from modules.zabbix.item import * -from modules.zabbix.discovery import * +from modules.kubernetes.base import * parser = argparse.ArgumentParser() -parser.add_argument("--zabbix-timeout", dest="zabbix_timeout", action="store", required=False, help="Set Zabbix timeout", default=5) -parser.add_argument("--zabbix-endpoint", dest="zabbix_endpoint", action="store", required=True, help="Set Zabbix endpoint (server)") -parser.add_argument("--kubernetes-name", dest="kubernetes_name", action="store", required=True, help="Set Kubernetes cluster name in Zabbix") -parser.add_argument("--monitoring-mode", dest="monitoring_mode", action="store", required=True, help="Mode of monitoring", choices=["volume","deployment","daemonset","node","statefulset","cronjob"]) -parser.add_argument("--monitoring-type", dest="monitoring_type", action="store", required=True, help="Type of monitoring", choices=["discovery", "item", "json"]) -parser.add_argument("--object-name", dest="object_name", action="store", required=False, help="Name of object in Kubernetes", default=None) -parser.add_argument("--match-label", dest="match_label", action="store", required=False, help="Match label of object in Kubernetes", default=None) -parser.add_argument("--exclude-name", dest="exclude_name", action="store", required=False, help="Exclude object name in Kubernetes", default=None) -parser.add_argument("--exclude-namespace", dest="exclude_namespace", action="store", required=False, help="Exclude namespace in Kubernetes", default=None) -parser.add_argument("--no-wait", dest="no_wait", action="store_true", required=False, help="Disable startup wait time", default=False) -parser.add_argument("--verbose", dest="verbose", action="store_true", required=False, help="Verbose output", default=False) -parser.add_argument("--debug", dest="debug", action="store_true", required=False, help="Debug output for Zabbix", default=False) +parser.add_argument("--config-file", "-c", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") +parser.add_argument("--debug", "-d", dest="debug", action="store_true", required=False, help="Enable debug output (default: false)", default=False) args = parser.parse_args() -if args.debug: - logger = logging.getLogger("pyzabbix") - logger.setLevel(logging.DEBUG) - handler = logging.StreamHandler(sys.stdout) - logger.addHandler(handler) +logging.basicConfig( + format="[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s", + datefmt="%d/%m/%Y %H:%M:%S", + level=logging.INFO) if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/token") and not os.getenv('KUBECONFIG'): - config.load_incluster_config() - if args.verbose: print("Kubernetes credentials from ServiceAccount") + kube_config.load_incluster_config() + logging.debug("Loading Kubernetes credentials from ServiceAccount") else: try: - config.load_kube_config() - if args.verbose: print("Kubernetes credentials from KUBECONFIG") + kube_config.load_kube_config() + logging.debug("Loading Kubernetes credentials from KUBECONFIG variable") except: - print("Unable to find kubernetes cluster configuration") + logging.error("Unable to load Kubernetes credentials") sys.exit(1) -zabbix = ZabbixSender(args.zabbix_endpoint) -if args.zabbix_timeout: zabbix.timeout = int(args.zabbix_timeout) -if args.verbose: - print(f"Zabbix endpoint: {args.zabbix_endpoint}") - print(f"Zabbix timeout: {args.zabbix_timeout}") - print(f"Kubernetes name: {args.kubernetes_name}") +with open(args.config_file, "r") as f: + config = yaml.load(f, Loader=yaml.FullLoader) + logging.debug(f"Configuration file {args.config_file} loaded successfully") -if __name__ == "__main__": - - # Random sleep between 0 and 15 seconds - if args.no_wait == False: - timewait = randint(0,15) - if args.verbose: print(f"Starting in {timewait} second(s)...") - sleep(timewait) - - # Node - if args.monitoring_mode == "node": - if args.monitoring_type == "json": - print("JSON output (node): {}".format( - getNode(args.object_name, args.exclude_name, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (node): {}".format( - zabbix.send(zabbixDiscoveryNode(args.kubernetes_name, getNode(args.object_name, args.exclude_name, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (node): {}".format( - zabbix.send(zabbixItemNode(args.kubernetes_name, getNode(args.object_name, args.exclude_name, args.match_label))))) +zabbix = ZabbixSender(config['zabbix']['endpoint']) +zabbix.timeout = int(config['zabbix']['timeout']) +logging.debug(f"-> Zabbix endpoint: {config['zabbix']['endpoint']}") +logging.debug(f"-> Zabbix timeout: {config['zabbix']['timeout']}") +logging.debug(f"-> Cluster name: {config['kubernetes']['name']}") - # Daemonset - if args.monitoring_mode == "daemonset": - if args.monitoring_type == "json": - print("JSON output (daemonset): {}".format( - getDaemonset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (daemonset): {}".format( - zabbix.send(zabbixDiscoveryDaemonset(args.kubernetes_name, getDaemonset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (daemonset): {}".format( - zabbix.send(zabbixItemDaemonset(args.kubernetes_name, getDaemonset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - - # Volumes - if args.monitoring_mode == "volume": - if args.monitoring_type == "json": - print("JSON output (volume): {}".format( - getVolume(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (volume): {}".format( - zabbix.send(zabbixDiscoveryVolume(args.kubernetes_name, getVolume(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (volume): {}".format( - zabbix.send(zabbixItemVolume(args.kubernetes_name, getVolume(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - - # Deployment - if args.monitoring_mode == "deployment": - if args.monitoring_type == "json": - print("JSON output (deployment): {}".format( - getDeployment(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (deployment): {}".format( - zabbix.send(zabbixDiscoveryDeployment(args.kubernetes_name, getDeployment(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (deployment): {}".format( - zabbix.send(zabbixItemDeployment(args.kubernetes_name, getDeployment(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) +def mainSender(data): + try: + logging.debug(data) + zabbix.send(data) + except Exception as e: + logging.debug(e) - # Statefulset - if args.monitoring_mode == "statefulset": - if args.monitoring_type == "json": - print("JSON output (statefulset): {}".format( - getStatefulset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (statefulset): {}".format( - zabbix.send(zabbixDiscoveryStatefulset(args.kubernetes_name, getStatefulset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (statefulset): {}".format( - zabbix.send(zabbixItemStatefulset(args.kubernetes_name, getStatefulset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) +if __name__ == "__main__": + # discovery + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseCronjobs(mode="discovery", config=config))) # cronjobs + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDaemonsets(mode="discovery", config=config))) # daemonsets + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDeployments(mode="discovery", config=config))) # deployments + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseNodes(mode="discovery", config=config))) # nodes + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseStatefulsets(mode="discovery", config=config))) # statefulsets + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseVolumes(mode="discovery", config=config))) # volumes - # Cronjob - if args.monitoring_mode == "cronjob": - if args.monitoring_type == "json": - print("JSON output (cronjob): {}".format( - getCronjob(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (cronjob): {}".format( - zabbix.send(zabbixDiscoveryCronjob(args.kubernetes_name, getCronjob(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (cronjob): {}".format( - zabbix.send(zabbixItemCronjob(args.kubernetes_name, getCronjob(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) + # items + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseCronjobs(mode="item", config=config))) # cronjobs + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDaemonsets(mode="item", config=config))) # daemonsets + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDeployments(mode="item", config=config))) # deployments + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseNodes(mode="item", config=config))) # nodes + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseStatefulsets(mode="item", config=config))) # statefulsets + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseVolumes(mode="item", config=config))) # volumes + # tasks + while True: + schedule.run_pending() + sleep(1) From 1a456a0bfc8c1df83ae0938fa7a65e755f5f85a5 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 12:05:22 +0100 Subject: [PATCH 02/34] feat: improve logging Signed-off-by: djerfy --- src/modules/kubernetes/base/volumes.py | 2 +- src/zabbix-kubernetes-discovery.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 861ee6a..4cdceef 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -53,7 +53,7 @@ def kubernetesGetVolumes(config=None): if "-token-" in volume['name']: continue - logging.info(volume) + volumes.append(volume) return volumes diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 359a77a..0ce1a47 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -8,14 +8,15 @@ from modules.kubernetes.base import * parser = argparse.ArgumentParser() -parser.add_argument("--config-file", "-c", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") -parser.add_argument("--debug", "-d", dest="debug", action="store_true", required=False, help="Enable debug output (default: false)", default=False) +parser.add_argument("--config-file", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") +parser.add_argument("--log-level", dest="log_level", action="store", required=False, help="Logging output log-level (default: INFO)", default="INFO", choices=["INFO", "WARNING", "ERROR", "DEBUG"]) args = parser.parse_args() logging.basicConfig( - format="[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s", datefmt="%d/%m/%Y %H:%M:%S", - level=logging.INFO) + format="[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s", + level=getattr(logging, args.log_level) +) if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/token") and not os.getenv('KUBECONFIG'): kube_config.load_incluster_config() From f249b3612c2a7bac0e493c79caebf01b57a0b4ca Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 12:14:37 +0100 Subject: [PATCH 03/34] feat: use enable/disable functions Signed-off-by: djerfy --- src/config.yaml | 6 ++-- src/zabbix-kubernetes-discovery.py | 45 ++++++++++++++++++++---------- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index de34527..3550709 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -6,10 +6,10 @@ zabbix: items: 2 kubernetes: - name: cluster-name + name: localhost monitoring: - # core + # base nodes: enabled: True labels: @@ -41,7 +41,7 @@ monitoring: include: [] exclude: [] ingresses: - enabled: True + enabled: False labels: include: [] exclude: [] diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 0ce1a47..ef7336d 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -47,23 +47,40 @@ def mainSender(data): logging.debug(e) if __name__ == "__main__": - # discovery - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseCronjobs(mode="discovery", config=config))) # cronjobs - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDaemonsets(mode="discovery", config=config))) # daemonsets - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDeployments(mode="discovery", config=config))) # deployments - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseNodes(mode="discovery", config=config))) # nodes - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseStatefulsets(mode="discovery", config=config))) # statefulsets - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseVolumes(mode="discovery", config=config))) # volumes + logging.info("Application zabbix-kubernetes-discovery started") - # items - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseCronjobs(mode="item", config=config))) # cronjobs - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDaemonsets(mode="item", config=config))) # daemonsets - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDeployments(mode="item", config=config))) # deployments - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseNodes(mode="item", config=config))) # nodes - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseStatefulsets(mode="item", config=config))) # statefulsets - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseVolumes(mode="item", config=config))) # volumes + # cronjobs + if config['monitoring']['cronjobs']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseCronjobs(mode="discovery", config=config))) # cronjobs + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseCronjobs(mode="item", config=config))) # cronjobs + + # daemonsets + if config['monitoring']['daemonsets']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDaemonsets(mode="discovery", config=config))) # daemonsets + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDaemonsets(mode="item", config=config))) # daemonsets + + # deployments + if config['monitoring']['deployments']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDeployments(mode="discovery", config=config))) # deployments + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDeployments(mode="item", config=config))) # deployments + + # nodes + if config['monitoring']['nodes']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseNodes(mode="discovery", config=config))) # nodes + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseNodes(mode="item", config=config))) # nodes + + # statefulsets + if config['monitoring']['statefulsets']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseStatefulsets(mode="discovery", config=config))) # statefulsets + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseStatefulsets(mode="item", config=config))) # statefulsets + + # volumes + if config['monitoring']['volumes']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseVolumes(mode="discovery", config=config))) # volumes + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseVolumes(mode="item", config=config))) # volumes # tasks while True: schedule.run_pending() sleep(1) + \ No newline at end of file From ceda038e4171396d9292893509b22b359edf71ef Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 12:16:27 +0100 Subject: [PATCH 04/34] misc: remove useless comments Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index ef7336d..1d4b5e8 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -51,33 +51,33 @@ def mainSender(data): # cronjobs if config['monitoring']['cronjobs']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseCronjobs(mode="discovery", config=config))) # cronjobs - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseCronjobs(mode="item", config=config))) # cronjobs + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseCronjobs(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseCronjobs(mode="item", config=config))) # daemonsets if config['monitoring']['daemonsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDaemonsets(mode="discovery", config=config))) # daemonsets - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDaemonsets(mode="item", config=config))) # daemonsets + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDaemonsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDaemonsets(mode="item", config=config))) # deployments if config['monitoring']['deployments']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDeployments(mode="discovery", config=config))) # deployments - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDeployments(mode="item", config=config))) # deployments + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDeployments(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDeployments(mode="item", config=config))) # nodes if config['monitoring']['nodes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseNodes(mode="discovery", config=config))) # nodes - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseNodes(mode="item", config=config))) # nodes + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseNodes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseNodes(mode="item", config=config))) # statefulsets if config['monitoring']['statefulsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseStatefulsets(mode="discovery", config=config))) # statefulsets - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseStatefulsets(mode="item", config=config))) # statefulsets + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseStatefulsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseStatefulsets(mode="item", config=config))) # volumes if config['monitoring']['volumes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseVolumes(mode="discovery", config=config))) # volumes - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseVolumes(mode="item", config=config))) # volumes + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseVolumes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseVolumes(mode="item", config=config))) # tasks while True: From 7c72a5c85fd3e57017d5aba01da30b0d6e1d6cd2 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 13:04:14 +0100 Subject: [PATCH 05/34] fix: match include/exclude labels from list Signed-off-by: djerfy --- src/modules/common/functions.py | 11 ++++++----- src/modules/kubernetes/base/nodes.py | 4 ---- src/modules/kubernetes/base/volumes.py | 4 ---- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/modules/common/functions.py b/src/modules/common/functions.py index 3cb4adf..aa64e59 100644 --- a/src/modules/common/functions.py +++ b/src/modules/common/functions.py @@ -12,10 +12,11 @@ def matchLabels(match_labels=None, object_labels=None): object_labels = str(object_labels).replace("{", "").replace("}", "").replace("'", "").replace(" ", "").split(",") - for label in object_labels: - key, value = label.split(":")[0], label.split(":")[1] - for separator in ["=", ":"]: - if match_labels.split(separator)[0] == key and match_labels.split(separator)[1] == value: - return True + for object_label in object_labels: + key, value = object_label.split(":")[0], object_label.split(":")[1] + for match_label in match_labels: + for separator in ["=", ":"]: + if match_label.split(separator)[0] == key and match_label.split(separator)[1] == value: + return True return False diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 6c4aee4..9fffb40 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -15,10 +15,6 @@ def kubernetesGetNodes(config=None): nodes = [] for node in kubernetes.list_node().items: - if node.spec.taints is not None: - if "node.kubernetes.io/not-ready" in str(node.spec.taints): - continue - node_healthz = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="healthz") node_status = kubernetes.read_node_status(name=node.metadata.name) node_pods = kubernetes.list_pod_for_all_namespaces(field_selector="spec.nodeName={}".format(node.metadata.name)) diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 4cdceef..777c5dd 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -15,10 +15,6 @@ def kubernetesGetVolumes(config=None): volumes = [] for node in kubernetes.list_node().items: - if node.spec.taints is not None: - if "node.kubernetes.io/not-ready" in str(node.spec.taints): - continue - node_info = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="stats/summary").replace("'", "\"") node_json = json.loads(node_info) From 3895da8a91d0e2d6703159506ef7b6169d096c3f Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 14:06:55 +0100 Subject: [PATCH 06/34] misc: remove empty line Signed-off-by: djerfy --- src/modules/common/functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/modules/common/functions.py b/src/modules/common/functions.py index aa64e59..fe20eae 100644 --- a/src/modules/common/functions.py +++ b/src/modules/common/functions.py @@ -9,7 +9,7 @@ def matchLabels(match_labels=None, object_labels=None): for i in [match_labels, object_labels]: if i is None or i == [] or i == "" or i == "*": return False - + object_labels = str(object_labels).replace("{", "").replace("}", "").replace("'", "").replace(" ", "").split(",") for object_label in object_labels: @@ -18,5 +18,5 @@ def matchLabels(match_labels=None, object_labels=None): for separator in ["=", ":"]: if match_label.split(separator)[0] == key and match_label.split(separator)[1] == value: return True - + return False From eeb7e248922fc05c258f8c79732ef3c78b908a9d Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 14:07:09 +0100 Subject: [PATCH 07/34] feat: use threading for parallels execution Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 37 ++++++++++++++++++------------ 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 1d4b5e8..90b190d 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 -import argparse, sys, os -import logging, schedule, yaml +import argparse, sys, os, yaml +import logging, schedule, threading from time import sleep from kubernetes import config as kube_config from pyzabbix import ZabbixSender @@ -39,45 +39,52 @@ logging.debug(f"-> Zabbix timeout: {config['zabbix']['timeout']}") logging.debug(f"-> Cluster name: {config['kubernetes']['name']}") -def mainSender(data): +def mainSend(data): try: logging.debug(data) zabbix.send(data) except Exception as e: logging.debug(e) +def mainThread(func): + try: + func_thread = threading.Thread(target=func) + func_thread.start() + except Exception as e: + logging.error(e) + if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") # cronjobs if config['monitoring']['cronjobs']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseCronjobs(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseCronjobs(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="item", config=config))) # daemonsets if config['monitoring']['daemonsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDaemonsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDaemonsets(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="item", config=config))) # deployments if config['monitoring']['deployments']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseDeployments(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseDeployments(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="item", config=config))) # nodes if config['monitoring']['nodes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseNodes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseNodes(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="item", config=config))) # statefulsets if config['monitoring']['statefulsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseStatefulsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseStatefulsets(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="item", config=config))) # volumes if config['monitoring']['volumes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(lambda: mainSender(baseVolumes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(lambda: mainSender(baseVolumes(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="item", config=config))) # tasks while True: From 32fb86d9115d11a0aaf5ef96e5493411343920dc Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 14:58:36 +0100 Subject: [PATCH 08/34] feat: upgrade dependencies Signed-off-by: djerfy --- src/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/requirements.txt b/src/requirements.txt index 489d6a2..12e2309 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,5 +1,5 @@ requests==2.31.0 -kubernetes==27.2.0 +kubernetes==29.0.0 py-zabbix==1.1.7 -urllib3==1.26.15 +urllib3==2.1.0 schedule==1.2.1 From f353a0e4353405b9bc94f34c7d026756268a8972 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 14:58:56 +0100 Subject: [PATCH 09/34] feat: set final values of schedule Signed-off-by: djerfy --- src/config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index 3550709..90d40c7 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -2,8 +2,8 @@ zabbix: endpoint: localhost timeout: 10 schedule: - discovery: 60 - items: 2 + discovery: 3600 + items: 120 kubernetes: name: localhost @@ -45,7 +45,7 @@ monitoring: labels: include: [] exclude: [] - # plugins + # optional openebs: enabled: False labels: @@ -65,4 +65,4 @@ monitoring: enabled: True labels: include: [] - exclude: [] \ No newline at end of file + exclude: [] From ffb7ec6fd96bb6b0321c3286fedfc7686833966b Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 19:54:12 +0100 Subject: [PATCH 10/34] feat: prepare modules integration Signed-off-by: djerfy --- src/config.yaml | 12 ++- src/modules/common/functions.py | 11 +++ src/modules/kubernetes/openebs/__init__.py | 1 + .../kubernetes/openebs/cstorpoolclusters.py | 88 +++++++++++++++++++ src/modules/kubernetes/trivy/__init__.py | 1 + .../kubernetes/trivy/vulnerabilityreports.py | 19 ++++ src/zabbix-kubernetes-discovery.py | 18 ++-- 7 files changed, 140 insertions(+), 10 deletions(-) create mode 100644 src/modules/kubernetes/openebs/__init__.py create mode 100644 src/modules/kubernetes/openebs/cstorpoolclusters.py create mode 100644 src/modules/kubernetes/trivy/__init__.py create mode 100644 src/modules/kubernetes/trivy/vulnerabilityreports.py diff --git a/src/config.yaml b/src/config.yaml index 90d40c7..3cc1110 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -45,24 +45,28 @@ monitoring: labels: include: [] exclude: [] - # optional + # openebs openebs: enabled: False + engine: cstor labels: include: [] exclude: [] + # velero velero: - enabled: True + enabled: False labels: include: [] exclude: [] + # trivy trivy: - enabled: True + enabled: False labels: include: [] exclude: [] + # certificates certs: - enabled: True + enabled: False labels: include: [] exclude: [] diff --git a/src/modules/common/functions.py b/src/modules/common/functions.py index fe20eae..6080e5e 100644 --- a/src/modules/common/functions.py +++ b/src/modules/common/functions.py @@ -20,3 +20,14 @@ def matchLabels(match_labels=None, object_labels=None): return True return False + +def rawObjects(data=[]): + """ + description: get objects from raw api, convert items and return only objects + return: list + """ + for key, value in data.items(): + if key == "items": + return value + + return [] diff --git a/src/modules/kubernetes/openebs/__init__.py b/src/modules/kubernetes/openebs/__init__.py new file mode 100644 index 0000000..f4a76e9 --- /dev/null +++ b/src/modules/kubernetes/openebs/__init__.py @@ -0,0 +1 @@ +from modules.kubernetes.openebs.cstorpoolclusters import openebsGetCstorpoolclusters diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py new file mode 100644 index 0000000..dcf8a07 --- /dev/null +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -0,0 +1,88 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def openebsGetCstorpoolclusters(config=None): + """ + description: get cstorpoolclusters data + return: list + """ + kubernetes = client.CustomObjectsApi() + + cstorpoolclusters = [] + + for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): + json = { + "name": cstorpoolcluster.metadata.name, + "namespace": cstorpoolcluster.metadata.namespace, + "instances": { + "desired": cstorpoolcluster.status.desiredInstances, + "healthy": cstorpoolcluster.status.healthyInstances, + "provisioned": cstorpoolcluster.status.provisionedInstances + }, + "version": { + "desired": cstorpoolcluster.status.versionDetails.desired, + "current": cstorpoolcluster.status.versionDetails.status.current + } + } + + if matchLabels(config['labels']['exclude'], cstorpoolcluster.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['exclude'], cstorpoolcluster.metadata.labels): + continue + + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): + continue + + cstorpoolclusters.append(json) + + return cstorpoolclusters + +def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): + """ + description: create a discovery for cstorpoolclusters, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for cstorpoolcluster in cstorpoolclusters: + output = { + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] + + return sender + +def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): + """ + description: create a item for cstorpoolclusters, per namespace + return: class ZabbixMetric + """ + sender = [] + + for cstorpoolcluster in cstorpoolclusters: + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) + + return sender + +def baseOpenebsCstorpoolclusters(mode=None, config=None): + """ + description: monitoring openebs cstorpoolclusters + return: class ZabbixMetric + """ + if mode == "discovery": + return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + if mode == "item": + return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return [] diff --git a/src/modules/kubernetes/trivy/__init__.py b/src/modules/kubernetes/trivy/__init__.py new file mode 100644 index 0000000..9ffd180 --- /dev/null +++ b/src/modules/kubernetes/trivy/__init__.py @@ -0,0 +1 @@ +from modules.kubernetes.trivy.vulnerabilityreports import trivyGetVulnerabilityreports diff --git a/src/modules/kubernetes/trivy/vulnerabilityreports.py b/src/modules/kubernetes/trivy/vulnerabilityreports.py new file mode 100644 index 0000000..1fad38f --- /dev/null +++ b/src/modules/kubernetes/trivy/vulnerabilityreports.py @@ -0,0 +1,19 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def trivyGetVulnerabilityreports(config=None): + """ + description: get vulnerabilityreports data + return: list + """ + kubernetes = client.CustomObjectsApi() + + reports = [] + + for vuln in rawObjects(kubernetes.list_cluster_custom_object(group="aquasecurity.github.io", version="v1alpha1", plural="vulnerabilityreports")): + print(vuln['metadata']['name']) + print(vuln['report']['summary']) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 90b190d..863f6eb 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -6,6 +6,7 @@ from kubernetes import config as kube_config from pyzabbix import ZabbixSender from modules.kubernetes.base import * +from modules.kubernetes.openebs import * parser = argparse.ArgumentParser() parser.add_argument("--config-file", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") @@ -56,36 +57,41 @@ def mainThread(func): if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") - # cronjobs + # cronjobs (base) if config['monitoring']['cronjobs']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="item", config=config))) - # daemonsets + # daemonsets (base) if config['monitoring']['daemonsets']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="item", config=config))) - # deployments + # deployments (base) if config['monitoring']['deployments']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="item", config=config))) - # nodes + # nodes (base) if config['monitoring']['nodes']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="item", config=config))) - # statefulsets + # statefulsets (base) if config['monitoring']['statefulsets']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="item", config=config))) - # volumes + # volumes (base) if config['monitoring']['volumes']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="item", config=config))) + # cstorpoolclusters (openebs) + if config['monitoring']['openebs']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="item", config=config))) + # tasks while True: schedule.run_pending() From eb398efbda04fb4053c676425d7d339d6d3035ea Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:00:34 +0100 Subject: [PATCH 11/34] fix: openebs syntax Signed-off-by: djerfy --- src/modules/kubernetes/openebs/__init__.py | 2 +- .../kubernetes/openebs/cstorpoolclusters.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/modules/kubernetes/openebs/__init__.py b/src/modules/kubernetes/openebs/__init__.py index f4a76e9..3e24476 100644 --- a/src/modules/kubernetes/openebs/__init__.py +++ b/src/modules/kubernetes/openebs/__init__.py @@ -1 +1 @@ -from modules.kubernetes.openebs.cstorpoolclusters import openebsGetCstorpoolclusters +from modules.kubernetes.openebs.cstorpoolclusters import baseOpenebsCstorpoolclusters diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index dcf8a07..1a519fa 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -16,16 +16,16 @@ def openebsGetCstorpoolclusters(config=None): for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): json = { - "name": cstorpoolcluster.metadata.name, - "namespace": cstorpoolcluster.metadata.namespace, + "name": cstorpoolcluster['metadata']['name'], + "namespace": cstorpoolcluster['metadata']['namespace'], "instances": { - "desired": cstorpoolcluster.status.desiredInstances, - "healthy": cstorpoolcluster.status.healthyInstances, - "provisioned": cstorpoolcluster.status.provisionedInstances + "desired": cstorpoolcluster['status']['desiredInstances'], + "healthy": cstorpoolcluster['status']['healthyInstances'], + "provisioned": cstorpoolcluster['status']['provisionedInstances'] }, "version": { - "desired": cstorpoolcluster.status.versionDetails.desired, - "current": cstorpoolcluster.status.versionDetails.status.current + "desired": cstorpoolcluster['status']['versionDetails']['desired'], + "current": cstorpoolcluster['status']['versionDetails']['status.current'] } } From c7cfcc0891be336af4161971f1543f8f9512848e Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:06:28 +0100 Subject: [PATCH 12/34] fix: openebs syntax Signed-off-by: djerfy --- src/modules/kubernetes/openebs/cstorpoolclusters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 1a519fa..4ea50a6 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -24,8 +24,8 @@ def openebsGetCstorpoolclusters(config=None): "provisioned": cstorpoolcluster['status']['provisionedInstances'] }, "version": { - "desired": cstorpoolcluster['status']['versionDetails']['desired'], - "current": cstorpoolcluster['status']['versionDetails']['status.current'] + "desired": cstorpoolcluster['versionDetails']['desired'], + "current": cstorpoolcluster['versionDetails']['status']['current'] } } From 709c349406cacb46eadf89f300755f359c9a89e0 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:07:40 +0100 Subject: [PATCH 13/34] fix: syntax if not labels founds Signed-off-by: djerfy --- src/modules/kubernetes/openebs/cstorpoolclusters.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 4ea50a6..4afaaaa 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -29,12 +29,13 @@ def openebsGetCstorpoolclusters(config=None): } } - if matchLabels(config['labels']['exclude'], cstorpoolcluster.metadata.labels): - continue - - if config['labels']['include'] != []: - if not matchLabels(config['labels']['exclude'], cstorpoolcluster.metadata.labels): - continue + if cstorpoolcluster.get("metadata"): + if cstorpoolcluster['metadata'].get("labels"): + if matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + continue + if config['labels']['include'] != []: + if not matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + continue if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): continue From 038ae869064dfd21bc0146d94cff014cdd739f96 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:10:41 +0100 Subject: [PATCH 14/34] misc: return empty list if engine isn't cstor Signed-off-by: djerfy --- src/modules/kubernetes/openebs/cstorpoolclusters.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 4afaaaa..daf31ca 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -14,6 +14,9 @@ def openebsGetCstorpoolclusters(config=None): cstorpoolclusters = [] + if config['engine'] != "cstor": + return cstorpoolclusters + for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): json = { "name": cstorpoolcluster['metadata']['name'], From 9939798f9563ba5f6543b417115619373f55bad3 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:23:04 +0100 Subject: [PATCH 15/34] feat(openebs): cstorpoolinstances integration Signed-off-by: djerfy --- src/modules/kubernetes/openebs/__init__.py | 1 + .../kubernetes/openebs/cstorpoolclusters.py | 77 ++++++++-------- .../kubernetes/openebs/cstorpoolinstances.py | 92 +++++++++++++++++++ src/zabbix-kubernetes-discovery.py | 19 ++-- 4 files changed, 142 insertions(+), 47 deletions(-) create mode 100644 src/modules/kubernetes/openebs/cstorpoolinstances.py diff --git a/src/modules/kubernetes/openebs/__init__.py b/src/modules/kubernetes/openebs/__init__.py index 3e24476..f9bd2c8 100644 --- a/src/modules/kubernetes/openebs/__init__.py +++ b/src/modules/kubernetes/openebs/__init__.py @@ -1 +1,2 @@ from modules.kubernetes.openebs.cstorpoolclusters import baseOpenebsCstorpoolclusters +from modules.kubernetes.openebs.cstorpoolinstances import baseOpenebsCstorpoolinstances diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index daf31ca..3131cfc 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -5,88 +5,85 @@ urllib3.disable_warnings() -def openebsGetCstorpoolclusters(config=None): +def openebsGetCstorpoolinstances(config=None): """ - description: get cstorpoolclusters data + description: get cstorpoolinstances data return: list """ kubernetes = client.CustomObjectsApi() - cstorpoolclusters = [] + cstorpoolinstances = [] if config['engine'] != "cstor": - return cstorpoolclusters + return cstorpoolinstances - for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): + for cstorpoolinstance in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances")): json = { - "name": cstorpoolcluster['metadata']['name'], - "namespace": cstorpoolcluster['metadata']['namespace'], - "instances": { - "desired": cstorpoolcluster['status']['desiredInstances'], - "healthy": cstorpoolcluster['status']['healthyInstances'], - "provisioned": cstorpoolcluster['status']['provisionedInstances'] - }, + "name": cstorpoolinstance['metadata']['name'], + "namespace": cstorpoolinstance['metadata']['namespace'], + "status": cstorpoolinstance['status'], "version": { - "desired": cstorpoolcluster['versionDetails']['desired'], - "current": cstorpoolcluster['versionDetails']['status']['current'] + "desired": cstorpoolinstance['versionDetails']['desired'], + "current": cstorpoolinstance['versionDetails']['status']['current'] } } - if cstorpoolcluster.get("metadata"): - if cstorpoolcluster['metadata'].get("labels"): - if matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + if cstorpoolinstance.get("metadata"): + if cstorpoolinstance['metadata'].get("labels"): + if matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue if config['labels']['include'] != []: - if not matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + if not matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue - if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstance): continue - cstorpoolclusters.append(json) + cstorpoolinstances.append(json) - return cstorpoolclusters + return cstorpoolinstances -def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): +def ZabbixDiscoveryCstorpoolinstances(clustername, cstorpoolinstances=[]): """ - description: create a discovery for cstorpoolclusters, per namespace + description: create a discovery for cstorpoolinstances, per namespace return: class ZabbixMetric """ discovery = {"data":[]} - for cstorpoolcluster in cstorpoolclusters: + for cstorpoolinstance in cstorpoolinstances: output = { - "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], - "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] + sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery))] return sender -def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): +def ZabbixItemCstorpoolinstances(clustername, cstorpoolinstances=[]): """ - description: create a item for cstorpoolclusters, per namespace + description: create a item for cstorpoolinstances, per namespace return: class ZabbixMetric """ sender = [] - for cstorpoolcluster in cstorpoolclusters: - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) - + for cstorpoolinstance in cstorpoolinstances: + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']),) return sender -def baseOpenebsCstorpoolclusters(mode=None, config=None): +def baseOpenebsCstorpoolinstances(mode=None, config=None): """ - description: monitoring openebs cstorpoolclusters + description: monitoring openebs cstorpoolinstances return: class ZabbixMetric """ if mode == "discovery": - return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return ZabbixDiscoveryCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) if mode == "item": - return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return ZabbixItemCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) return [] diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py new file mode 100644 index 0000000..daf31ca --- /dev/null +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -0,0 +1,92 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def openebsGetCstorpoolclusters(config=None): + """ + description: get cstorpoolclusters data + return: list + """ + kubernetes = client.CustomObjectsApi() + + cstorpoolclusters = [] + + if config['engine'] != "cstor": + return cstorpoolclusters + + for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): + json = { + "name": cstorpoolcluster['metadata']['name'], + "namespace": cstorpoolcluster['metadata']['namespace'], + "instances": { + "desired": cstorpoolcluster['status']['desiredInstances'], + "healthy": cstorpoolcluster['status']['healthyInstances'], + "provisioned": cstorpoolcluster['status']['provisionedInstances'] + }, + "version": { + "desired": cstorpoolcluster['versionDetails']['desired'], + "current": cstorpoolcluster['versionDetails']['status']['current'] + } + } + + if cstorpoolcluster.get("metadata"): + if cstorpoolcluster['metadata'].get("labels"): + if matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + continue + if config['labels']['include'] != []: + if not matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + continue + + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): + continue + + cstorpoolclusters.append(json) + + return cstorpoolclusters + +def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): + """ + description: create a discovery for cstorpoolclusters, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for cstorpoolcluster in cstorpoolclusters: + output = { + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] + + return sender + +def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): + """ + description: create a item for cstorpoolclusters, per namespace + return: class ZabbixMetric + """ + sender = [] + + for cstorpoolcluster in cstorpoolclusters: + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) + + return sender + +def baseOpenebsCstorpoolclusters(mode=None, config=None): + """ + description: monitoring openebs cstorpoolclusters + return: class ZabbixMetric + """ + if mode == "discovery": + return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + if mode == "item": + return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return [] diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 863f6eb..81e8dcd 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -57,40 +57,45 @@ def mainThread(func): if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") - # cronjobs (base) + # cronjobs if config['monitoring']['cronjobs']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="item", config=config))) - # daemonsets (base) + # daemonsets if config['monitoring']['daemonsets']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="item", config=config))) - # deployments (base) + # deployments if config['monitoring']['deployments']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="item", config=config))) - # nodes (base) + # nodes if config['monitoring']['nodes']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="item", config=config))) - # statefulsets (base) + # statefulsets if config['monitoring']['statefulsets']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="item", config=config))) - # volumes (base) + # volumes if config['monitoring']['volumes']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="item", config=config))) - # cstorpoolclusters (openebs) + # openebs if config['monitoring']['openebs']['enabled']: + # cstorpoolclusters schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="item", config=config))) + # cstorpoolinstances + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolinstances(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolinstances(mode="item", config=config))) + # tasks while True: From e50380d4d827ce40c1f71c27c26e2fb071dc7dc7 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:26:41 +0100 Subject: [PATCH 16/34] feat(openebs): fix names of files Signed-off-by: djerfy --- .../kubernetes/openebs/cstorpoolclusters.py | 77 ++++++++++--------- .../kubernetes/openebs/cstorpoolinstances.py | 77 +++++++++---------- 2 files changed, 77 insertions(+), 77 deletions(-) diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 3131cfc..daf31ca 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -5,85 +5,88 @@ urllib3.disable_warnings() -def openebsGetCstorpoolinstances(config=None): +def openebsGetCstorpoolclusters(config=None): """ - description: get cstorpoolinstances data + description: get cstorpoolclusters data return: list """ kubernetes = client.CustomObjectsApi() - cstorpoolinstances = [] + cstorpoolclusters = [] if config['engine'] != "cstor": - return cstorpoolinstances + return cstorpoolclusters - for cstorpoolinstance in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances")): + for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): json = { - "name": cstorpoolinstance['metadata']['name'], - "namespace": cstorpoolinstance['metadata']['namespace'], - "status": cstorpoolinstance['status'], + "name": cstorpoolcluster['metadata']['name'], + "namespace": cstorpoolcluster['metadata']['namespace'], + "instances": { + "desired": cstorpoolcluster['status']['desiredInstances'], + "healthy": cstorpoolcluster['status']['healthyInstances'], + "provisioned": cstorpoolcluster['status']['provisionedInstances'] + }, "version": { - "desired": cstorpoolinstance['versionDetails']['desired'], - "current": cstorpoolinstance['versionDetails']['status']['current'] + "desired": cstorpoolcluster['versionDetails']['desired'], + "current": cstorpoolcluster['versionDetails']['status']['current'] } } - if cstorpoolinstance.get("metadata"): - if cstorpoolinstance['metadata'].get("labels"): - if matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): + if cstorpoolcluster.get("metadata"): + if cstorpoolcluster['metadata'].get("labels"): + if matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): continue if config['labels']['include'] != []: - if not matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): + if not matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): continue - if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstance): + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): continue - cstorpoolinstances.append(json) + cstorpoolclusters.append(json) - return cstorpoolinstances + return cstorpoolclusters -def ZabbixDiscoveryCstorpoolinstances(clustername, cstorpoolinstances=[]): +def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): """ - description: create a discovery for cstorpoolinstances, per namespace + description: create a discovery for cstorpoolclusters, per namespace return: class ZabbixMetric """ discovery = {"data":[]} - for cstorpoolinstance in cstorpoolinstances: + for cstorpoolcluster in cstorpoolclusters: output = { - "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], - "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery))] + sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] return sender -def ZabbixItemCstorpoolinstances(clustername, cstorpoolinstances=[]): +def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): """ - description: create a item for cstorpoolinstances, per namespace + description: create a item for cstorpoolclusters, per namespace return: class ZabbixMetric """ sender = [] - for cstorpoolinstance in cstorpoolinstances: - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']),) + for cstorpoolcluster in cstorpoolclusters: + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) + return sender -def baseOpenebsCstorpoolinstances(mode=None, config=None): +def baseOpenebsCstorpoolclusters(mode=None, config=None): """ - description: monitoring openebs cstorpoolinstances + description: monitoring openebs cstorpoolclusters return: class ZabbixMetric """ if mode == "discovery": - return ZabbixDiscoveryCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) + return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) if mode == "item": - return ZabbixItemCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) + return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) return [] diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index daf31ca..3131cfc 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -5,88 +5,85 @@ urllib3.disable_warnings() -def openebsGetCstorpoolclusters(config=None): +def openebsGetCstorpoolinstances(config=None): """ - description: get cstorpoolclusters data + description: get cstorpoolinstances data return: list """ kubernetes = client.CustomObjectsApi() - cstorpoolclusters = [] + cstorpoolinstances = [] if config['engine'] != "cstor": - return cstorpoolclusters + return cstorpoolinstances - for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): + for cstorpoolinstance in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances")): json = { - "name": cstorpoolcluster['metadata']['name'], - "namespace": cstorpoolcluster['metadata']['namespace'], - "instances": { - "desired": cstorpoolcluster['status']['desiredInstances'], - "healthy": cstorpoolcluster['status']['healthyInstances'], - "provisioned": cstorpoolcluster['status']['provisionedInstances'] - }, + "name": cstorpoolinstance['metadata']['name'], + "namespace": cstorpoolinstance['metadata']['namespace'], + "status": cstorpoolinstance['status'], "version": { - "desired": cstorpoolcluster['versionDetails']['desired'], - "current": cstorpoolcluster['versionDetails']['status']['current'] + "desired": cstorpoolinstance['versionDetails']['desired'], + "current": cstorpoolinstance['versionDetails']['status']['current'] } } - if cstorpoolcluster.get("metadata"): - if cstorpoolcluster['metadata'].get("labels"): - if matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + if cstorpoolinstance.get("metadata"): + if cstorpoolinstance['metadata'].get("labels"): + if matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue if config['labels']['include'] != []: - if not matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + if not matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue - if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstance): continue - cstorpoolclusters.append(json) + cstorpoolinstances.append(json) - return cstorpoolclusters + return cstorpoolinstances -def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): +def ZabbixDiscoveryCstorpoolinstances(clustername, cstorpoolinstances=[]): """ - description: create a discovery for cstorpoolclusters, per namespace + description: create a discovery for cstorpoolinstances, per namespace return: class ZabbixMetric """ discovery = {"data":[]} - for cstorpoolcluster in cstorpoolclusters: + for cstorpoolinstance in cstorpoolinstances: output = { - "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], - "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] + sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery))] return sender -def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): +def ZabbixItemCstorpoolinstances(clustername, cstorpoolinstances=[]): """ - description: create a item for cstorpoolclusters, per namespace + description: create a item for cstorpoolinstances, per namespace return: class ZabbixMetric """ sender = [] - for cstorpoolcluster in cstorpoolclusters: - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) - + for cstorpoolinstance in cstorpoolinstances: + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']),) return sender -def baseOpenebsCstorpoolclusters(mode=None, config=None): +def baseOpenebsCstorpoolinstances(mode=None, config=None): """ - description: monitoring openebs cstorpoolclusters + description: monitoring openebs cstorpoolinstances return: class ZabbixMetric """ if mode == "discovery": - return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return ZabbixDiscoveryCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) if mode == "item": - return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return ZabbixItemCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) return [] From f635595b322fd7616b038e54df782367a9acf0a1 Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 20:29:16 +0100 Subject: [PATCH 17/34] feat(openebs): fix syntax Signed-off-by: djerfy --- src/modules/kubernetes/openebs/cstorpoolinstances.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index 3131cfc..b44123e 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -36,7 +36,7 @@ def openebsGetCstorpoolinstances(config=None): if not matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue - if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstance): + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstances): continue cstorpoolinstances.append(json) From 972a77ece3c004a0b9c0654279c1e6ad6bfc3562 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 11:31:41 +0100 Subject: [PATCH 18/34] feat: review threading and fix memory leak Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 48 +++++++++++++++++------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 81e8dcd..3440745 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -import argparse, sys, os, yaml +import argparse, sys, os, yaml, queue import logging, schedule, threading from time import sleep from kubernetes import config as kube_config @@ -40,62 +40,68 @@ logging.debug(f"-> Zabbix timeout: {config['zabbix']['timeout']}") logging.debug(f"-> Cluster name: {config['kubernetes']['name']}") -def mainSend(data): +def executeSender(data): try: logging.debug(data) zabbix.send(data) except Exception as e: logging.debug(e) -def mainThread(func): +def executeJobs(): try: - func_thread = threading.Thread(target=func) - func_thread.start() + jobs = jobs_queue.get() + jobs() + jobs_queue.task_done() except Exception as e: logging.error(e) if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") + jobs_queue = queue.Queue() + # cronjobs if config['monitoring']['cronjobs']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseCronjobs(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseCronjobs(mode="item", config=config))) # daemonsets if config['monitoring']['daemonsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseDaemonsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseDaemonsets(mode="item", config=config))) # deployments if config['monitoring']['deployments']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseDeployments(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseDeployments(mode="item", config=config))) # nodes if config['monitoring']['nodes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseNodes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseNodes(mode="item", config=config))) # statefulsets if config['monitoring']['statefulsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseStatefulsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseStatefulsets(mode="item", config=config))) # volumes if config['monitoring']['volumes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseVolumes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseVolumes(mode="item", config=config))) # openebs if config['monitoring']['openebs']['enabled']: # cstorpoolclusters - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolclusters(mode="item", config=config))) # cstorpoolinstances - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolinstances(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolinstances(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolinstances(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolinstances(mode="item", config=config))) + # thread + thread = threading.Thread(target=executeJobs) + thread.start() # tasks while True: From 9af31bcb2bf9106a54ae830fd1d93c10e8bf17b2 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 12:09:30 +0100 Subject: [PATCH 19/34] fix: job queue execution Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 43 +++++++++++++++--------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 3440745..9b4e8c9 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -48,12 +48,13 @@ def executeSender(data): logging.debug(e) def executeJobs(): - try: + while True: jobs = jobs_queue.get() - jobs() - jobs_queue.task_done() - except Exception as e: - logging.error(e) + if jobs is not None: + jobs() + jobs_queue.task_done() + else: + logging.info("No job in queue") if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") @@ -62,42 +63,42 @@ def executeJobs(): # cronjobs if config['monitoring']['cronjobs']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseCronjobs(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseCronjobs(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseCronjobs(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseCronjobs(mode="item", config=config))) # daemonsets if config['monitoring']['daemonsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseDaemonsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseDaemonsets(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseDaemonsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseDaemonsets(mode="item", config=config))) # deployments if config['monitoring']['deployments']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseDeployments(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseDeployments(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseDeployments(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseDeployments(mode="item", config=config))) # nodes if config['monitoring']['nodes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseNodes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseNodes(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseNodes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseNodes(mode="item", config=config))) # statefulsets if config['monitoring']['statefulsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseStatefulsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseStatefulsets(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseStatefulsets(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseStatefulsets(mode="item", config=config))) # volumes if config['monitoring']['volumes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseVolumes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseVolumes(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseVolumes(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseVolumes(mode="item", config=config))) # openebs if config['monitoring']['openebs']['enabled']: # cstorpoolclusters - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolclusters(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolclusters(mode="item", config=config))) # cstorpoolinstances - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolinstances(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, executeSender(baseOpenebsCstorpoolinstances(mode="item", config=config))) + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolinstances(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolinstances(mode="item", config=config))) # thread thread = threading.Thread(target=executeJobs) From 4755b8b2f9bfcde883416003e1bc20d90cd4e20e Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 13:35:58 +0100 Subject: [PATCH 20/34] feat: improve logging and add logger Signed-off-by: djerfy --- src/config.yaml | 4 ++++ src/modules/kubernetes/base/cronjobs.py | 4 +++- src/modules/kubernetes/base/daemonsets.py | 4 +++- src/modules/kubernetes/base/deployments.py | 4 +++- src/modules/kubernetes/base/nodes.py | 4 +++- src/modules/kubernetes/base/statefulsets.py | 4 +++- src/modules/kubernetes/base/volumes.py | 2 ++ src/modules/kubernetes/openebs/cstorpoolclusters.py | 4 +++- .../kubernetes/openebs/cstorpoolinstances.py | 4 +++- .../kubernetes/trivy/vulnerabilityreports.py | 3 ++- src/requirements.txt | 1 + src/zabbix-kubernetes-discovery.py | 13 +++++++++---- 12 files changed, 39 insertions(+), 12 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index 3cc1110..9bf9ebd 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -70,3 +70,7 @@ monitoring: labels: include: [] exclude: [] + +metrics: + output: DEBUG + memory: True diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 31e496e..6a501db 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -2,9 +2,10 @@ from datetime import datetime from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.cronjobs") def kubernetesGetCronjobs(config=None): """ @@ -114,6 +115,7 @@ def baseCronjobs(mode=None, config=None): description: monitoring cronjobs return: class ZabbixMetric """ + logging.info(f"Function baseCronjobs() executed: {mode}") if mode == "discovery": return zabbixDiscoveryCronjobs(config['kubernetes']['name'], kubernetesGetCronjobs(config['monitoring']['cronjobs'])) if mode == "item": diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index e424e96..fc28ea9 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.daemonsets") def kubernetesGetDaemonsets(config=None): """ @@ -82,6 +83,7 @@ def baseDaemonsets(mode=None, config=None): description: monitoring daemonsets return: class ZabbixMetric """ + logging.info(f"Function baseDaemonsets() executed: {mode}") if mode == "discovery": return zabbixDiscoveryDaemonsets(config['kubernetes']['name'], kubernetesGetDaemonsets(config['monitoring']['daemonsets'])) if mode == "item": diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 8fdacb2..9f3f127 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.deployments") def kubernetesGetDeployments(config=None): """ @@ -80,6 +81,7 @@ def baseDeployments(mode=None, config=None): description: monitoring deployments return: class ZabbixMetric """ + logging.info(f"Function baseDeployments() executed: {mode}") if mode == "discovery": return zabbixDiscoveryDeployments(config['kubernetes']['name'], kubernetesGetDeployments(config['monitoring']['deployments'])) if mode == "item": diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 9fffb40..2ccd119 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.nodes") def kubernetesGetNodes(config=None): """ @@ -87,6 +88,7 @@ def baseNodes(mode=None, config=None): description: monitoring nodes return: class ZabbixMetric """ + logging.info(f"Function baseNodes() executed: {mode}") if mode == "discovery": return zabbixDiscoveryNodes(config['kubernetes']['name'], kubernetesGetNodes(config['monitoring']['nodes'])) if mode == "item": diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index fcf01d3..a9fa742 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.statefulsets") def kubernetesGetStatefulsets(config=None): """ @@ -80,6 +81,7 @@ def baseStatefulsets(mode=None, config=None): description: monitoring statefulsets return: class ZabbixMetric """ + logging.info(f"Function baseStatefulsets() executed: {mode}") if mode == "discovery": return zabbixDiscoveryStatefulsets(config['kubernetes']['name'], kubernetesGetStatefulsets(config['monitoring']['statefulsets'])) if mode == "item": diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 777c5dd..943dfd5 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -4,6 +4,7 @@ import json, urllib3, re, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.volumes") def kubernetesGetVolumes(config=None): """ @@ -93,6 +94,7 @@ def baseVolumes(mode=None, config=None): description: monitoring volumes return: class ZabbixMetric """ + logging.info(f"Function baseVolumes() executed: {mode}") if mode == "discovery": return zabbixDiscoveryVolumes(config['kubernetes']['name'], kubernetesGetVolumes(config['monitoring']['volumes'])) if mode == "item": diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index daf31ca..f9ef130 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.openebs.cstorpoolclusters") def openebsGetCstorpoolclusters(config=None): """ @@ -85,6 +86,7 @@ def baseOpenebsCstorpoolclusters(mode=None, config=None): description: monitoring openebs cstorpoolclusters return: class ZabbixMetric """ + logging.info(f"Function baseOpenebsCstorpoolclusters() executed: {mode}") if mode == "discovery": return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) if mode == "item": diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index b44123e..ce514d4 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.openebs.cstorpoolinstances") def openebsGetCstorpoolinstances(config=None): """ @@ -82,6 +83,7 @@ def baseOpenebsCstorpoolinstances(mode=None, config=None): description: monitoring openebs cstorpoolinstances return: class ZabbixMetric """ + logging.info(f"Function baseOpenebsCstorpoolinstances() executed: {mode}") if mode == "discovery": return ZabbixDiscoveryCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) if mode == "item": diff --git a/src/modules/kubernetes/trivy/vulnerabilityreports.py b/src/modules/kubernetes/trivy/vulnerabilityreports.py index 1fad38f..9de84a8 100644 --- a/src/modules/kubernetes/trivy/vulnerabilityreports.py +++ b/src/modules/kubernetes/trivy/vulnerabilityreports.py @@ -1,9 +1,10 @@ from kubernetes import client from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3 +import json, urllib3, logging urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.trivy.vulnerabilityreports") def trivyGetVulnerabilityreports(config=None): """ diff --git a/src/requirements.txt b/src/requirements.txt index 12e2309..b54e6cd 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -3,3 +3,4 @@ kubernetes==29.0.0 py-zabbix==1.1.7 urllib3==2.1.0 schedule==1.2.1 +psutil==5.9.7 diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 9b4e8c9..e00c424 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 import argparse, sys, os, yaml, queue -import logging, schedule, threading +import logging, schedule, threading, psutil from time import sleep from kubernetes import config as kube_config from pyzabbix import ZabbixSender @@ -16,8 +16,8 @@ logging.basicConfig( datefmt="%d/%m/%Y %H:%M:%S", format="[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s", - level=getattr(logging, args.log_level) -) + level=getattr(logging, args.log_level)) +logging = logging.getLogger("main") if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/token") and not os.getenv('KUBECONFIG'): kube_config.load_incluster_config() @@ -48,13 +48,17 @@ def executeSender(data): logging.debug(e) def executeJobs(): + p = psutil.Process(os.getpid()) + logging.debug(f"Program memory used (rss): {p.memory_info().rss / 1024 / 1024} MiB") + while True: + logging.debug(f"{jobs_queue.qsize()} job(s) in queue") jobs = jobs_queue.get() if jobs is not None: jobs() jobs_queue.task_done() else: - logging.info("No job in queue") + logging.debug("0 job in queue") if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") @@ -107,5 +111,6 @@ def executeJobs(): # tasks while True: schedule.run_pending() + logging sleep(1) \ No newline at end of file From 47a42637748e8e25ff88424fa9808c7099c95723 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 13:36:30 +0100 Subject: [PATCH 21/34] feat: cleanup config Signed-off-by: djerfy --- src/config.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index 9bf9ebd..3cc1110 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -70,7 +70,3 @@ monitoring: labels: include: [] exclude: [] - -metrics: - output: DEBUG - memory: True From 3831a541e4b75d35e7514bcdf4923f9238cf9acb Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 13:46:22 +0100 Subject: [PATCH 22/34] feat: improve memory overview Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index e00c424..41de35b 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -48,10 +48,8 @@ def executeSender(data): logging.debug(e) def executeJobs(): - p = psutil.Process(os.getpid()) - logging.debug(f"Program memory used (rss): {p.memory_info().rss / 1024 / 1024} MiB") - while True: + logging.debug(f"Program memory used (rss): {round(psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024)} MiB") logging.debug(f"{jobs_queue.qsize()} job(s) in queue") jobs = jobs_queue.get() if jobs is not None: From a6e444785175d59a5e817a9b14b5734fc396e524 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 15:49:41 +0100 Subject: [PATCH 23/34] feat: improve loglevel config Signed-off-by: djerfy --- src/config.yaml | 9 ++++++--- src/zabbix-kubernetes-discovery.py | 10 ++++------ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index 3cc1110..a660e49 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -1,3 +1,9 @@ +output: + level: INFO + +kubernetes: + name: localhost + zabbix: endpoint: localhost timeout: 10 @@ -5,9 +11,6 @@ zabbix: discovery: 3600 items: 120 -kubernetes: - name: localhost - monitoring: # base nodes: diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 41de35b..99a2acc 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -10,13 +10,15 @@ parser = argparse.ArgumentParser() parser.add_argument("--config-file", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") -parser.add_argument("--log-level", dest="log_level", action="store", required=False, help="Logging output log-level (default: INFO)", default="INFO", choices=["INFO", "WARNING", "ERROR", "DEBUG"]) args = parser.parse_args() +with open(args.config_file, "r") as f: + config = yaml.load(f, Loader=yaml.FullLoader) + logging.basicConfig( datefmt="%d/%m/%Y %H:%M:%S", format="[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s", - level=getattr(logging, args.log_level)) + level=getattr(logging, config['output']['level'])) logging = logging.getLogger("main") if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/token") and not os.getenv('KUBECONFIG'): @@ -30,10 +32,6 @@ logging.error("Unable to load Kubernetes credentials") sys.exit(1) -with open(args.config_file, "r") as f: - config = yaml.load(f, Loader=yaml.FullLoader) - logging.debug(f"Configuration file {args.config_file} loaded successfully") - zabbix = ZabbixSender(config['zabbix']['endpoint']) zabbix.timeout = int(config['zabbix']['timeout']) logging.debug(f"-> Zabbix endpoint: {config['zabbix']['endpoint']}") From 1c78b65deac4d082bea07f0c9ba7607e8b11fe8e Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 18:28:24 +0100 Subject: [PATCH 24/34] feat: replace py-zabbix by zappix and global review Signed-off-by: djerfy --- src/config.yaml | 4 +- src/modules/kubernetes/base/__init__.py | 12 ++-- src/modules/kubernetes/base/cronjobs.py | 54 ++++++-------- src/modules/kubernetes/base/daemonsets.py | 56 ++++++--------- src/modules/kubernetes/base/deployments.py | 54 ++++++-------- src/modules/kubernetes/base/nodes.py | 72 ++++++++----------- src/modules/kubernetes/base/statefulsets.py | 54 ++++++-------- src/modules/kubernetes/base/volumes.py | 53 +++++--------- src/modules/kubernetes/openebs/__init__.py | 4 +- .../kubernetes/openebs/cstorpoolclusters.py | 49 +++++-------- .../kubernetes/openebs/cstorpoolinstances.py | 58 ++++++--------- .../kubernetes/trivy/vulnerabilityreports.py | 3 +- src/requirements.txt | 2 +- src/zabbix-kubernetes-discovery.py | 42 +++++------ 14 files changed, 202 insertions(+), 315 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index a660e49..8d710c5 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -1,5 +1,5 @@ output: - level: INFO + level: DEBUG kubernetes: name: localhost @@ -9,7 +9,7 @@ zabbix: timeout: 10 schedule: discovery: 3600 - items: 120 + items: 3 monitoring: # base diff --git a/src/modules/kubernetes/base/__init__.py b/src/modules/kubernetes/base/__init__.py index 1bcce39..460bf46 100644 --- a/src/modules/kubernetes/base/__init__.py +++ b/src/modules/kubernetes/base/__init__.py @@ -1,6 +1,6 @@ -from modules.kubernetes.base.nodes import baseNodes -from modules.kubernetes.base.cronjobs import baseCronjobs -from modules.kubernetes.base.volumes import baseVolumes -from modules.kubernetes.base.statefulsets import baseStatefulsets -from modules.kubernetes.base.deployments import baseDeployments -from modules.kubernetes.base.daemonsets import baseDaemonsets +from modules.kubernetes.base.cronjobs import zabbixDiscoveryCronjobs, zabbixItemsCronjobs +from modules.kubernetes.base.daemonsets import zabbixDiscoveryDaemonsets, zabbixItemsDaemonsets +from modules.kubernetes.base.deployments import zabbixDiscoveryDeployments, zabbixItemsDeployments +from modules.kubernetes.base.nodes import zabbixDiscoveryNodes, zabbixItemsNodes +from modules.kubernetes.base.statefulsets import zabbixDiscoveryStatefulsets, zabbixItemsStatefulsets +from modules.kubernetes.base.volumes import zabbixDiscoveryVolumes, zabbixItemsVolumes diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 6a501db..1c2e26b 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -1,13 +1,12 @@ from kubernetes import client from datetime import datetime -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.base.cronjobs") -def kubernetesGetCronjobs(config=None): +def kubernetesGetCronjobs(config): """ description: get cronjobs data return: list @@ -68,56 +67,43 @@ def kubernetesGetCronjobs(config=None): } } - if matchLabels(config['labels']['exclude'], cronjob.metadata.labels): - continue - - if config['labels']['include'] != []: - if not matchLabels(config['labels']['include'], cronjob.metadata.labels): - continue + if cronjob.get("metadata"): + if cronjob['metadata'].get("labels"): + if matchLabels(config['monitoring']['cronjobs']['labels']['exclude'], cronjob.metadata.labels): + continue + if config['labels']['include'] != []: + if not matchLabels(config['monitoring']['cronjobs']['labels']['include'], cronjob.metadata.labels): + continue cronjobs.append(json) return cronjobs -def zabbixDiscoveryCronjobs(clustername, cronjobs=[]): +def zabbixDiscoveryCronjobs(config): """ description: create a discovery for cronjob, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for cronjob in cronjobs: + for cronjob in kubernetesGetCronjobs(config): output = { "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.cronjob.discovery", json.dumps(discovery))] - - return sender + return [config['kubernetes']['name'], "kubernetes.cronjobs.discovery", json.dumps(discovery)] -def zabbixItemCronjobs(clustername, cronjobs=[]): +def zabbixItemsCronjobs(config): """ description: create a item for cronjob, per namespace - return: class ZabbixResponse + return: list """ - sender = [] - - for cronjob in cronjobs: - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']),) + items = [] - return sender + for cronjob in kubernetesGetCronjobs(config): + items.append(config['kubernetes']['name'], f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']) + items.append(config['kubernetes']['name'], f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']) + items.append(config['kubernetes']['name'], f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']) -def baseCronjobs(mode=None, config=None): - """ - description: monitoring cronjobs - return: class ZabbixMetric - """ - logging.info(f"Function baseCronjobs() executed: {mode}") - if mode == "discovery": - return zabbixDiscoveryCronjobs(config['kubernetes']['name'], kubernetesGetCronjobs(config['monitoring']['cronjobs'])) - if mode == "item": - return zabbixItemCronjobs(config['kubernetes']['name'], kubernetesGetCronjobs(config['monitoring']['cronjobs'])) - return [] + return items diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index fc28ea9..e019e85 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.base.daemonsets") -def kubernetesGetDaemonsets(config=None): +def kubernetesGetDaemonsets(config): """ description: get daemonsets data return: list @@ -32,12 +31,13 @@ def kubernetesGetDaemonsets(config=None): if json['replicas'][i] is None: json['replicas'][i] = 0 - if matchLabels(config['labels']['exclude'], daemonset.metadata.labels): - continue - - if config['labels']['include'] != []: - if not matchLabels(config['labels']['include'], daemonset.metadata.labels): - continue + if daemonset.get("metadata"): + if daemonset['metadata'].get("labels"): + if matchLabels(config['monitoring']['daemonsets']['labels']['exclude'], daemonset.metadata.labels): + continue + if config['labels']['include'] != []: + if not matchLabels(config['monitoring']['daemonsets']['labels']['include'], daemonset.metadata.labels): + continue if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in daemonsets): continue @@ -46,46 +46,32 @@ def kubernetesGetDaemonsets(config=None): return daemonsets -def zabbixDiscoveryDaemonsets(clustername, daemonsets=[]): +def zabbixDiscoveryDaemonsets(config): """ description: create a discovery for daemonset, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for daemonset in daemonsets: + for daemonset in kubernetesGetDaemonsets(config): output = { "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.daemonset.discovery", json.dumps(discovery))] + return [config['kubernetes']['name'], "kubernetes.daemonsets.discovery", json.dumps(discovery)] - return sender - -def zabbixItemDaemonsets(clustername, daemonsets=[]): +def zabbixItemsDaemonsets(config): """ description: create a item for daemonset, per namespace - return: class ZabbixMetric + return: list """ - sender = [] + items = [] - for daemonset in daemonsets: - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']),) + for daemonset in kubernetesGetDaemonsets(config): + items.append(config['kubernetes']['name'], f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']) + items.append(config['kubernetes']['name'], f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']) + items.append(config['kubernetes']['name'], f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']) + items.append(config['kubernetes']['name'], f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']) - return sender - -def baseDaemonsets(mode=None, config=None): - """ - description: monitoring daemonsets - return: class ZabbixMetric - """ - logging.info(f"Function baseDaemonsets() executed: {mode}") - if mode == "discovery": - return zabbixDiscoveryDaemonsets(config['kubernetes']['name'], kubernetesGetDaemonsets(config['monitoring']['daemonsets'])) - if mode == "item": - return zabbixItemDaemonsets(config['kubernetes']['name'], kubernetesGetDaemonsets(config['monitoring']['daemonsets'])) - return [] \ No newline at end of file + return items diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 9f3f127..7ce4188 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.base.deployments") -def kubernetesGetDeployments(config=None): +def kubernetesGetDeployments(config): """ description: get deployments data return: list @@ -27,12 +26,13 @@ def kubernetesGetDeployments(config=None): } } - if matchLabels(config['labels']['exclude'], deployment.metadata.labels): - continue - - if config['labels']['include'] != []: - if not matchLabels(config['labels']['include'], deployment.metadata.labels): - continue + if deployment.get("metadata"): + if deployment['metadata'].get("labels"): + if matchLabels(config['monitoring']['deployments']['labels']['exclude'], deployment.metadata.labels): + continue + if config['labels']['include'] != []: + if not matchLabels(config['monitoring']['deployments']['labels']['include'], deployment.metadata.labels): + continue for i in ["desired", "ready", "available"]: if json['replicas'][i] is None: @@ -45,45 +45,31 @@ def kubernetesGetDeployments(config=None): return deployments -def zabbixDiscoveryDeployments(clustername, deployments=[]): +def zabbixDiscoveryDeployments(config): """ description: create a discovery for deployment, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for deployment in deployments: + for deployment in kubernetesGetDeployments(config): output = { "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.deployment.discovery", json.dumps(discovery))] + return [config['kubernetes']['name'], "kubernetes.deployments.discovery", json.dumps(discovery)] - return sender - -def zabbixItemDeployments(clustername, deployments=[]): +def zabbixItemsDeployments(config): """ description: create a item for deployment, per namespace - return: class ZabbixResponse + return: list """ - sender = [] + items = [] - for deployment in deployments: - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']),) + for deployment in kubernetesGetDeployments(config): + items.append(config['kubernetes']['name'], f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']) + items.append(config['kubernetes']['name'], f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']) + items.append(config['kubernetes']['name'], f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']) - return sender - -def baseDeployments(mode=None, config=None): - """ - description: monitoring deployments - return: class ZabbixMetric - """ - logging.info(f"Function baseDeployments() executed: {mode}") - if mode == "discovery": - return zabbixDiscoveryDeployments(config['kubernetes']['name'], kubernetesGetDeployments(config['monitoring']['deployments'])) - if mode == "item": - return zabbixItemDeployments(config['kubernetes']['name'], kubernetesGetDeployments(config['monitoring']['deployments'])) - return [] + return items diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 2ccd119..454b48d 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.base.nodes") -def kubernetesGetNodes(config=None): +def kubernetesGetNodes(config): """ description: get nodes data return: list @@ -33,12 +32,13 @@ def kubernetesGetNodes(config=None): } } - if matchLabels(config['labels']['exclude'], node.metadata.labels): - continue - - if config['labels']['include'] != []: - if not matchLabels(config['labels']['include'], node.metadata.labels): - continue + if node.get("metadata"): + if node['metadata'].get("labels"): + if matchLabels(config['monitoring']['nodes']['labels']['exclude'], node.metadata.labels): + continue + if config['labels']['include'] != []: + if not matchLabels(config['monitoring']['nodes']['labels']['include'], node.metadata.labels): + continue if any(n['name'] == json['name'] for n in nodes): continue @@ -47,50 +47,36 @@ def kubernetesGetNodes(config=None): return nodes -def zabbixDiscoveryNodes(clustername, nodes=[]): +def zabbixDiscoveryNodes(config): """ description: create a discovery for node - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for node in nodes: + for node in kubernetesGetNodes(config): output = {"{#KUBERNETES_NODE_NAME}": node['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.node.discovery", json.dumps(discovery))] - - return sender + return [config['kubernetes']['name'], "kubernetes.nodes.discovery", json.dumps(discovery)] -def zabbixItemNodes(clustername, nodes=[]): +def zabbixItemsNodes(config): """ description: create a item for node - return: class ZabbixMetric - """ - sender = [] - - for node in nodes: - sender.append(ZabbixMetric(clustername, f"kubernetes.node.healthz[{node['name']}]", node['status']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']),) - - return sender - -def baseNodes(mode=None, config=None): - """ - description: monitoring nodes - return: class ZabbixMetric + return: list """ - logging.info(f"Function baseNodes() executed: {mode}") - if mode == "discovery": - return zabbixDiscoveryNodes(config['kubernetes']['name'], kubernetesGetNodes(config['monitoring']['nodes'])) - if mode == "item": - return zabbixItemNodes(config['kubernetes']['name'], kubernetesGetNodes(config['monitoring']['nodes'])) - return [] + items = [] + + for node in kubernetesGetNodes(config): + items.append(config['kubernetes']['name'], f"kubernetes.node.healthz[{node['name']}]", node['status']) + items.append(config['kubernetes']['name'], f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']) + items.append(config['kubernetes']['name'], f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']) + items.append(config['kubernetes']['name'], f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']) + items.append(config['kubernetes']['name'], f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']) + items.append(config['kubernetes']['name'], f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']) + items.append(config['kubernetes']['name'], f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']) + items.append(config['kubernetes']['name'], f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']) + items.append(config['kubernetes']['name'], f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']) + items.append(config['kubernetes']['name'], f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']) + + return items diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index a9fa742..fcfd7d4 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.base.statefulsets") -def kubernetesGetStatefulsets(config=None): +def kubernetesGetStatefulsets(config): """ description: get statefulsets data return: list @@ -27,12 +26,13 @@ def kubernetesGetStatefulsets(config=None): } } - if matchLabels(config['labels']['exclude'], statefulset.metadata.labels): - continue - - if config['labels']['include'] != []: - if not matchLabels(config['labels']['include'], statefulset.metadata.labels): - continue + if statefulset.get("metadata"): + if statefulset['metadata'].get("labels"): + if matchLabels(config['monitoring']['statefulsets']['labels']['exclude'], statefulset.metadata.labels): + continue + if config['labels']['include'] != []: + if not matchLabels(config['monitoring']['statefulsets']['labels']['include'], statefulset.metadata.labels): + continue for i in ["desired", "ready", "available"]: if json['replicas'][i] is None: @@ -45,45 +45,31 @@ def kubernetesGetStatefulsets(config=None): return statefulsets -def zabbixDiscoveryStatefulsets(clustername, statefulsets=[]): +def zabbixDiscoveryStatefulsets(config): """ description: create a discovery for statefulset, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for statefulset in statefulsets: + for statefulset in kubernetesGetStatefulsets(config): output = { "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.statefulset.discovery", json.dumps(discovery))] + return [config['kubernetes']['name'], "kubernetes.statefulsets.discovery", json.dumps(discovery)] - return sender - -def zabbixItemStatefulsets(clustername, statefulsets=[]): +def zabbixItemsStatefulsets(config): """ description: create a item for statefulset, per namespace - return: class ZabbixResponse + return: list """ - sender = [] + items = [] - for statefulset in statefulsets: - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']),) + for statefulset in kubernetesGetStatefulsets(config): + items.append(config['kubernetes']['name'], f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']) + items.append(config['kubernetes']['name'], f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']) + items.append(config['kubernetes']['name'], f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']) - return sender - -def baseStatefulsets(mode=None, config=None): - """ - description: monitoring statefulsets - return: class ZabbixMetric - """ - logging.info(f"Function baseStatefulsets() executed: {mode}") - if mode == "discovery": - return zabbixDiscoveryStatefulsets(config['kubernetes']['name'], kubernetesGetStatefulsets(config['monitoring']['statefulsets'])) - if mode == "item": - return zabbixItemStatefulsets(config['kubernetes']['name'], kubernetesGetStatefulsets(config['monitoring']['statefulsets'])) - return [] + return items diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 943dfd5..1d9fbcc 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, re, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.base.volumes") -def kubernetesGetVolumes(config=None): +def kubernetesGetVolumes(config): """ description: get volumes data return: list @@ -35,11 +34,11 @@ def kubernetesGetVolumes(config=None): volume['name'] = volume['pvcRef']['name'] if volume.get("metadata"): - if volume.metadata.get("labels"): - if matchLabels(config['labels']['exclude'], volume.metadata.labels): + if volume['metadata'].get("labels"): + if matchLabels(config['monitoring']['volumes']['labels']['exclude'], volume.metadata.labels): continue if config['labels']['include'] != []: - if not matchLabels(config['labels']['include'], volume.metadata.labels): + if not matchLabels(config['monitoring']['volumes']['labels']['include'], volume.metadata.labels): continue for i in ["time", "pvcRef"]: @@ -55,48 +54,34 @@ def kubernetesGetVolumes(config=None): return volumes -def zabbixDiscoveryVolumes(clustername, volumes=[]): +def zabbixDiscoveryVolumes(config): """ description: create a discovery for persistent volume claim, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for volume in volumes: + for volume in kubernetesGetVolumes(config): output = { "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], "{#KUBERNETES_PVC_NAME}": volume['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.pvc.discovery", json.dumps(discovery))] + return [config['kubernetes']['name'], "kubernetes.volumes.discovery", json.dumps(discovery)] - return sender - -def zabbixItemVolumes(clustername, volumes=[]): +def zabbixItemsVolumes(config): """ description: create a item for persistent volume claim, per namespace - return: class ZabbixMetric + return: list """ - sender = [] - - for volume in volumes: - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']),) + items = [] - return sender + for volume in kubernetesGetVolumes(config): + items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']) + items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']) + items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']) + items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']) + items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']) + items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']) -def baseVolumes(mode=None, config=None): - """ - description: monitoring volumes - return: class ZabbixMetric - """ - logging.info(f"Function baseVolumes() executed: {mode}") - if mode == "discovery": - return zabbixDiscoveryVolumes(config['kubernetes']['name'], kubernetesGetVolumes(config['monitoring']['volumes'])) - if mode == "item": - return zabbixItemVolumes(config['kubernetes']['name'], kubernetesGetVolumes(config['monitoring']['volumes'])) - return [] + return items diff --git a/src/modules/kubernetes/openebs/__init__.py b/src/modules/kubernetes/openebs/__init__.py index f9bd2c8..b33ddec 100644 --- a/src/modules/kubernetes/openebs/__init__.py +++ b/src/modules/kubernetes/openebs/__init__.py @@ -1,2 +1,2 @@ -from modules.kubernetes.openebs.cstorpoolclusters import baseOpenebsCstorpoolclusters -from modules.kubernetes.openebs.cstorpoolinstances import baseOpenebsCstorpoolinstances +from modules.kubernetes.openebs.cstorpoolclusters import zabbixDiscoveryCstorpoolclusters, zabbixItemsCstorpoolclusters +from modules.kubernetes.openebs.cstorpoolinstances import zabbixDiscoveryCstorpoolinstances, zabbixItemsCstorpoolinstances diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index f9ef130..4a3424b 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * import json, urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.openebs.cstorpoolclusters") -def openebsGetCstorpoolclusters(config=None): +def openebsGetCstorpoolclusters(config): """ description: get cstorpoolclusters data return: list @@ -35,10 +34,10 @@ def openebsGetCstorpoolclusters(config=None): if cstorpoolcluster.get("metadata"): if cstorpoolcluster['metadata'].get("labels"): - if matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + if matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolcluster['metadata']['labels']): continue if config['labels']['include'] != []: - if not matchLabels(config['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + if not matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolcluster['metadata']['labels']): continue if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): @@ -48,47 +47,33 @@ def openebsGetCstorpoolclusters(config=None): return cstorpoolclusters -def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): +def zabbixDiscoveryCstorpoolclusters(config): """ description: create a discovery for cstorpoolclusters, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for cstorpoolcluster in cstorpoolclusters: + for cstorpoolcluster in openebsGetCstorpoolclusters(config): output = { "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] + return [config['kubernetes']['name'], "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery)] - return sender - -def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): +def zabbixItemsCstorpoolclusters(config): """ description: create a item for cstorpoolclusters, per namespace - return: class ZabbixMetric + return: list """ - sender = [] - - for cstorpoolcluster in cstorpoolclusters: - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) + items = [] - return sender + for cstorpoolcluster in openebsGetCstorpoolclusters(config): + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']) -def baseOpenebsCstorpoolclusters(mode=None, config=None): - """ - description: monitoring openebs cstorpoolclusters - return: class ZabbixMetric - """ - logging.info(f"Function baseOpenebsCstorpoolclusters() executed: {mode}") - if mode == "discovery": - return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) - if mode == "item": - return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) - return [] + return items diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index ce514d4..f6fc018 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -1,12 +1,11 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3, logging +import urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.openebs.cstorpoolinstances") -def openebsGetCstorpoolinstances(config=None): +def openebsGetCstorpoolinstances(config): """ description: get cstorpoolinstances data return: list @@ -31,10 +30,10 @@ def openebsGetCstorpoolinstances(config=None): if cstorpoolinstance.get("metadata"): if cstorpoolinstance['metadata'].get("labels"): - if matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): + if matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue if config['labels']['include'] != []: - if not matchLabels(config['labels']['exclude'], cstorpoolinstance['metadata']['labels']): + if not matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstances): @@ -44,48 +43,35 @@ def openebsGetCstorpoolinstances(config=None): return cstorpoolinstances -def ZabbixDiscoveryCstorpoolinstances(clustername, cstorpoolinstances=[]): +def zabbixDiscoveryCstorpoolinstances(config): """ description: create a discovery for cstorpoolinstances, per namespace - return: class ZabbixMetric + return: dict """ discovery = {"data":[]} - for cstorpoolinstance in cstorpoolinstances: + for cstorpoolinstance in openebsGetCstorpoolinstances(config): output = { "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} discovery['data'].append(output) - sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery))] + return [config['kubernetes']['name'], "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery)] - return sender - -def ZabbixItemCstorpoolinstances(clustername, cstorpoolinstances=[]): +def zabbixItemsCstorpoolinstances(config): """ description: create a item for cstorpoolinstances, per namespace - return: class ZabbixMetric - """ - sender = [] - - for cstorpoolinstance in cstorpoolinstances: - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']),) - return sender - -def baseOpenebsCstorpoolinstances(mode=None, config=None): - """ - description: monitoring openebs cstorpoolinstances - return: class ZabbixMetric + return: list """ - logging.info(f"Function baseOpenebsCstorpoolinstances() executed: {mode}") - if mode == "discovery": - return ZabbixDiscoveryCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) - if mode == "item": - return ZabbixItemCstorpoolinstances(config['kubernetes']['name'], openebsGetCstorpoolinstances(config['monitoring']['openebs'])) - return [] + items = [] + + for cstorpoolinstance in openebsGetCstorpoolinstances(config): + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']) + items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']) + + return items diff --git a/src/modules/kubernetes/trivy/vulnerabilityreports.py b/src/modules/kubernetes/trivy/vulnerabilityreports.py index 9de84a8..d3b06d5 100644 --- a/src/modules/kubernetes/trivy/vulnerabilityreports.py +++ b/src/modules/kubernetes/trivy/vulnerabilityreports.py @@ -1,7 +1,6 @@ from kubernetes import client -from pyzabbix import ZabbixMetric from modules.common.functions import * -import json, urllib3, logging +import urllib3, logging urllib3.disable_warnings() logging = logging.getLogger("kubernetes.trivy.vulnerabilityreports") diff --git a/src/requirements.txt b/src/requirements.txt index b54e6cd..1d227b4 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,6 +1,6 @@ requests==2.31.0 kubernetes==29.0.0 -py-zabbix==1.1.7 +zappix==1.0.2 urllib3==2.1.0 schedule==1.2.1 psutil==5.9.7 diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 99a2acc..6a0a9ad 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -4,7 +4,7 @@ import logging, schedule, threading, psutil from time import sleep from kubernetes import config as kube_config -from pyzabbix import ZabbixSender +from zappix.sender import Sender as zabbix_sender from modules.kubernetes.base import * from modules.kubernetes.openebs import * @@ -32,7 +32,7 @@ logging.error("Unable to load Kubernetes credentials") sys.exit(1) -zabbix = ZabbixSender(config['zabbix']['endpoint']) +zabbix = zabbix_sender(config['zabbix']['endpoint']) zabbix.timeout = int(config['zabbix']['timeout']) logging.debug(f"-> Zabbix endpoint: {config['zabbix']['endpoint']}") logging.debug(f"-> Zabbix timeout: {config['zabbix']['timeout']}") @@ -41,9 +41,9 @@ def executeSender(data): try: logging.debug(data) - zabbix.send(data) + zabbix.send_value(data) except Exception as e: - logging.debug(e) + logging.error(e) def executeJobs(): while True: @@ -60,45 +60,47 @@ def executeJobs(): logging.info("Application zabbix-kubernetes-discovery started") jobs_queue = queue.Queue() + sch_disco = config['zabbix']['schedule']['discovery'] + sch_items = config['zabbix']['schedule']['items'] # cronjobs if config['monitoring']['cronjobs']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseCronjobs(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseCronjobs(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryCronjobs(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsCronjobs(config))) # daemonsets if config['monitoring']['daemonsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseDaemonsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseDaemonsets(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryDaemonsets(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsDaemonsets(config))) # deployments if config['monitoring']['deployments']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseDeployments(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseDeployments(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryDeployments(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsDeployments(config))) # nodes if config['monitoring']['nodes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseNodes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseNodes(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryNodes(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsNodes(config))) # statefulsets if config['monitoring']['statefulsets']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseStatefulsets(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseStatefulsets(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryStatefulsets(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsStatefulsets(config))) # volumes if config['monitoring']['volumes']['enabled']: - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseVolumes(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseVolumes(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryVolumes(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsVolumes(config))) # openebs if config['monitoring']['openebs']['enabled']: # cstorpoolclusters - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolclusters(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryCstorpoolclusters(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsCstorpoolclusters(config))) # cstorpoolinstances - schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolinstances(mode="discovery", config=config))) - schedule.every(config['zabbix']['schedule']['items']).seconds.do(jobs_queue.put, lambda: executeSender(baseOpenebsCstorpoolinstances(mode="item", config=config))) + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryCstorpoolinstances(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsCstorpoolinstances(config))) # thread thread = threading.Thread(target=executeJobs) From 8793871d3fb475212d2dc037df08c2d4c1e3d135 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 18:37:26 +0100 Subject: [PATCH 25/34] fix: labels parsing Signed-off-by: djerfy --- src/modules/kubernetes/base/cronjobs.py | 6 +++--- src/modules/kubernetes/base/daemonsets.py | 6 +++--- src/modules/kubernetes/base/deployments.py | 6 +++--- src/modules/kubernetes/base/nodes.py | 6 +++--- src/modules/kubernetes/base/statefulsets.py | 6 +++--- src/modules/kubernetes/base/volumes.py | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 1c2e26b..6fdb7e5 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -67,11 +67,11 @@ def kubernetesGetCronjobs(config): } } - if cronjob.get("metadata"): - if cronjob['metadata'].get("labels"): + if cronjob.metadata: + if cronjob.metadata.labels: if matchLabels(config['monitoring']['cronjobs']['labels']['exclude'], cronjob.metadata.labels): continue - if config['labels']['include'] != []: + if config['monitoring']['cronjobs']['labels']['include'] != []: if not matchLabels(config['monitoring']['cronjobs']['labels']['include'], cronjob.metadata.labels): continue diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index e019e85..5cc017e 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -31,11 +31,11 @@ def kubernetesGetDaemonsets(config): if json['replicas'][i] is None: json['replicas'][i] = 0 - if daemonset.get("metadata"): - if daemonset['metadata'].get("labels"): + if daemonset.metadata: + if daemonset.metadata.labels: if matchLabels(config['monitoring']['daemonsets']['labels']['exclude'], daemonset.metadata.labels): continue - if config['labels']['include'] != []: + if config['monitoring']['daemonsets']['labels']['include'] != []: if not matchLabels(config['monitoring']['daemonsets']['labels']['include'], daemonset.metadata.labels): continue diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 7ce4188..3b9dfdf 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -26,11 +26,11 @@ def kubernetesGetDeployments(config): } } - if deployment.get("metadata"): - if deployment['metadata'].get("labels"): + if deployment.metadata: + if deployment.metadata.labels: if matchLabels(config['monitoring']['deployments']['labels']['exclude'], deployment.metadata.labels): continue - if config['labels']['include'] != []: + if config['monitoring']['deployments']['labels']['include'] != []: if not matchLabels(config['monitoring']['deployments']['labels']['include'], deployment.metadata.labels): continue diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 454b48d..705cbb4 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -32,11 +32,11 @@ def kubernetesGetNodes(config): } } - if node.get("metadata"): - if node['metadata'].get("labels"): + if node.metadata: + if node.metadata.labels: if matchLabels(config['monitoring']['nodes']['labels']['exclude'], node.metadata.labels): continue - if config['labels']['include'] != []: + if config['monitoring']['nodes']['labels']['include'] != []: if not matchLabels(config['monitoring']['nodes']['labels']['include'], node.metadata.labels): continue diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index fcfd7d4..5fdb0b5 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -26,11 +26,11 @@ def kubernetesGetStatefulsets(config): } } - if statefulset.get("metadata"): - if statefulset['metadata'].get("labels"): + if statefulset.metadata: + if statefulset.metadata.labels: if matchLabels(config['monitoring']['statefulsets']['labels']['exclude'], statefulset.metadata.labels): continue - if config['labels']['include'] != []: + if config['monitoring']['statefulsets']['labels']['include'] != []: if not matchLabels(config['monitoring']['statefulsets']['labels']['include'], statefulset.metadata.labels): continue diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 1d9fbcc..84e8df8 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -33,11 +33,11 @@ def kubernetesGetVolumes(config): volume['namespace'] = volume['pvcRef']['namespace'] volume['name'] = volume['pvcRef']['name'] - if volume.get("metadata"): - if volume['metadata'].get("labels"): + if volume.metadata: + if volume.metadata.labels: if matchLabels(config['monitoring']['volumes']['labels']['exclude'], volume.metadata.labels): continue - if config['labels']['include'] != []: + if config['monitoring']['volumes']['labels']['include'] != []: if not matchLabels(config['monitoring']['volumes']['labels']['include'], volume.metadata.labels): continue From da922b2295876fca007ada7635672f677cd7de4d Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 18:50:04 +0100 Subject: [PATCH 26/34] fix: sender list Signed-off-by: djerfy --- src/modules/kubernetes/base/cronjobs.py | 6 +++--- src/modules/kubernetes/base/daemonsets.py | 8 ++++---- src/modules/kubernetes/base/deployments.py | 6 +++--- src/modules/kubernetes/base/nodes.py | 20 +++++++++---------- src/modules/kubernetes/base/statefulsets.py | 6 +++--- src/modules/kubernetes/base/volumes.py | 12 +++++------ .../kubernetes/openebs/cstorpoolclusters.py | 10 +++++----- .../kubernetes/openebs/cstorpoolinstances.py | 14 ++++++------- 8 files changed, 41 insertions(+), 41 deletions(-) diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 6fdb7e5..1412dc2 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -102,8 +102,8 @@ def zabbixItemsCronjobs(config): items = [] for cronjob in kubernetesGetCronjobs(config): - items.append(config['kubernetes']['name'], f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']) - items.append(config['kubernetes']['name'], f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']) - items.append(config['kubernetes']['name'], f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']) + items.append([config['kubernetes']['name'], f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']]) + items.append([config['kubernetes']['name'], f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']]) + items.append([config['kubernetes']['name'], f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']]) return items diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index 5cc017e..59602b3 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -69,9 +69,9 @@ def zabbixItemsDaemonsets(config): items = [] for daemonset in kubernetesGetDaemonsets(config): - items.append(config['kubernetes']['name'], f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']) - items.append(config['kubernetes']['name'], f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']) - items.append(config['kubernetes']['name'], f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']) - items.append(config['kubernetes']['name'], f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']) + items.append([config['kubernetes']['name'], f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']]) + items.append([config['kubernetes']['name'], f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']]) return items diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 3b9dfdf..38db526 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -68,8 +68,8 @@ def zabbixItemsDeployments(config): items = [] for deployment in kubernetesGetDeployments(config): - items.append(config['kubernetes']['name'], f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']) - items.append(config['kubernetes']['name'], f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']) - items.append(config['kubernetes']['name'], f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']) + items.append([config['kubernetes']['name'], f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']]) return items diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 705cbb4..ddd1a73 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -68,15 +68,15 @@ def zabbixItemsNodes(config): items = [] for node in kubernetesGetNodes(config): - items.append(config['kubernetes']['name'], f"kubernetes.node.healthz[{node['name']}]", node['status']) - items.append(config['kubernetes']['name'], f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']) - items.append(config['kubernetes']['name'], f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']) - items.append(config['kubernetes']['name'], f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']) - items.append(config['kubernetes']['name'], f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']) - items.append(config['kubernetes']['name'], f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']) - items.append(config['kubernetes']['name'], f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']) - items.append(config['kubernetes']['name'], f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']) - items.append(config['kubernetes']['name'], f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']) - items.append(config['kubernetes']['name'], f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']) + items.append([config['kubernetes']['name'], f"kubernetes.node.healthz[{node['name']}]", node['status']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']]) + items.append([config['kubernetes']['name'], f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']]) return items diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index 5fdb0b5..3a01fdc 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -68,8 +68,8 @@ def zabbixItemsStatefulsets(config): items = [] for statefulset in kubernetesGetStatefulsets(config): - items.append(config['kubernetes']['name'], f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']) - items.append(config['kubernetes']['name'], f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']) - items.append(config['kubernetes']['name'], f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']) + items.append([config['kubernetes']['name'], f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']]) return items diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 84e8df8..97cdf66 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -77,11 +77,11 @@ def zabbixItemsVolumes(config): items = [] for volume in kubernetesGetVolumes(config): - items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']) - items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']) - items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']) - items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']) - items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']) - items.append(config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']) + items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']]) + items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']]) + items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']]) return items diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 4a3424b..4a599a5 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -70,10 +70,10 @@ def zabbixItemsCstorpoolclusters(config): items = [] for cstorpoolcluster in openebsGetCstorpoolclusters(config): - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']]) return items diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index f6fc018..7560fd4 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -66,12 +66,12 @@ def zabbixItemsCstorpoolinstances(config): items = [] for cstorpoolinstance in openebsGetCstorpoolinstances(config): - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']) - items.append(config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']]) return items From d95983278db3b35f582865e1bb6334d222835d71 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 19:20:58 +0100 Subject: [PATCH 27/34] fix: zabbix sender from list Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 6a0a9ad..fac1f77 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -40,8 +40,12 @@ def executeSender(data): try: - logging.debug(data) - zabbix.send_value(data) + for d in data: + if len(d) != 3: + logging.error(f"Invalid zabbix format: {d}") + host, key, value = d[0], d[1], d[2] + logging.info(f"host={host} key={key} value={value}") + zabbix.send_value(host, key, value) except Exception as e: logging.error(e) From f8f9881034f115c9475b39cfdf9fff137a53ae6f Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 19:55:27 +0100 Subject: [PATCH 28/34] misc: improve sender configuration Signed-off-by: djerfy --- src/zabbix-kubernetes-discovery.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index fac1f77..543ae46 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -39,15 +39,16 @@ logging.debug(f"-> Cluster name: {config['kubernetes']['name']}") def executeSender(data): - try: - for d in data: - if len(d) != 3: - logging.error(f"Invalid zabbix format: {d}") - host, key, value = d[0], d[1], d[2] - logging.info(f"host={host} key={key} value={value}") - zabbix.send_value(host, key, value) - except Exception as e: - logging.error(e) + for d in data: + if len(d) != 3: + logging.error(f"Invalid format: {d}") + host, key, value = d[0], d[1], d[2] + logging.info(f"Zabbix server request: host={host} key={key} value={value}") + try: + resp = zabbix.send_value(host, key, value) + logging.info(f"Zabbix server response: {resp}") + except Exception as e: + logging.error(e) def executeJobs(): while True: From 51ccf6cf439c0eff31a9dcc3a253f4e15277a65d Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 19:58:59 +0100 Subject: [PATCH 29/34] fix: labels items parser Signed-off-by: djerfy --- src/config.yaml | 12 ++++++------ src/modules/kubernetes/base/cronjobs.py | 4 ++-- src/modules/kubernetes/base/daemonsets.py | 4 ++-- src/modules/kubernetes/base/deployments.py | 4 ++-- src/modules/kubernetes/base/nodes.py | 4 ++-- src/modules/kubernetes/base/statefulsets.py | 4 ++-- src/modules/kubernetes/base/volumes.py | 4 ++-- src/modules/kubernetes/openebs/cstorpoolclusters.py | 2 +- src/modules/kubernetes/openebs/cstorpoolinstances.py | 2 +- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index 8d710c5..ae7a59a 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -1,5 +1,5 @@ output: - level: DEBUG + level: INFO kubernetes: name: localhost @@ -19,27 +19,27 @@ monitoring: include: [] exclude: [] daemonsets: - enabled: True + enabled: False labels: include: [] exclude: [] volumes: - enabled: True + enabled: False labels: include: [] exclude: [] deployments: - enabled: True + enabled: False labels: include: [] exclude: [] statefulsets: - enabled: True + enabled: False labels: include: [] exclude: [] cronjobs: - enabled: True + enabled: False labels: include: [] exclude: [] diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 1412dc2..e109b57 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -67,8 +67,8 @@ def kubernetesGetCronjobs(config): } } - if cronjob.metadata: - if cronjob.metadata.labels: + if hasattr(cronjob, 'metadata'): + if hasattr(cronjob.metadata, 'labels'): if matchLabels(config['monitoring']['cronjobs']['labels']['exclude'], cronjob.metadata.labels): continue if config['monitoring']['cronjobs']['labels']['include'] != []: diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index 59602b3..afbdec3 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -31,8 +31,8 @@ def kubernetesGetDaemonsets(config): if json['replicas'][i] is None: json['replicas'][i] = 0 - if daemonset.metadata: - if daemonset.metadata.labels: + if hasattr(daemonset, 'metadata'): + if hasattr(daemonset.metadata, 'labels'): if matchLabels(config['monitoring']['daemonsets']['labels']['exclude'], daemonset.metadata.labels): continue if config['monitoring']['daemonsets']['labels']['include'] != []: diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 38db526..6bf344e 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -26,8 +26,8 @@ def kubernetesGetDeployments(config): } } - if deployment.metadata: - if deployment.metadata.labels: + if hasattr(deployment, 'metadata'): + if hasattr(deployment.metadata, 'labels'): if matchLabels(config['monitoring']['deployments']['labels']['exclude'], deployment.metadata.labels): continue if config['monitoring']['deployments']['labels']['include'] != []: diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index ddd1a73..e016a27 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -32,8 +32,8 @@ def kubernetesGetNodes(config): } } - if node.metadata: - if node.metadata.labels: + if hasattr(node, 'metadata'): + if hasattr(node.metadata, 'labels'): if matchLabels(config['monitoring']['nodes']['labels']['exclude'], node.metadata.labels): continue if config['monitoring']['nodes']['labels']['include'] != []: diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index 3a01fdc..a3c5166 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -26,8 +26,8 @@ def kubernetesGetStatefulsets(config): } } - if statefulset.metadata: - if statefulset.metadata.labels: + if hasattr(statefulset, 'metadata'): + if hasattr(statefulset.metadata, 'labels'): if matchLabels(config['monitoring']['statefulsets']['labels']['exclude'], statefulset.metadata.labels): continue if config['monitoring']['statefulsets']['labels']['include'] != []: diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 97cdf66..0c98f2b 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -33,8 +33,8 @@ def kubernetesGetVolumes(config): volume['namespace'] = volume['pvcRef']['namespace'] volume['name'] = volume['pvcRef']['name'] - if volume.metadata: - if volume.metadata.labels: + if hasattr(volume, 'metadata'): + if hasattr(volume.metadata, 'labels'): if matchLabels(config['monitoring']['volumes']['labels']['exclude'], volume.metadata.labels): continue if config['monitoring']['volumes']['labels']['include'] != []: diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 4a599a5..442ca7e 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -14,7 +14,7 @@ def openebsGetCstorpoolclusters(config): cstorpoolclusters = [] - if config['engine'] != "cstor": + if config['monitoring']['openebs']['engine'] != "cstor": return cstorpoolclusters for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index 7560fd4..d6d1604 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -14,7 +14,7 @@ def openebsGetCstorpoolinstances(config): cstorpoolinstances = [] - if config['engine'] != "cstor": + if config['monitoring']['openebs']['engine'] != "cstor": return cstorpoolinstances for cstorpoolinstance in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances")): From 35837b39dd2bc9ed217e3c3876e4d76aece9b260 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 20:00:53 +0100 Subject: [PATCH 30/34] fix: openebs match labels Signed-off-by: djerfy --- src/modules/kubernetes/openebs/cstorpoolclusters.py | 2 +- src/modules/kubernetes/openebs/cstorpoolinstances.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 442ca7e..a232a42 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -36,7 +36,7 @@ def openebsGetCstorpoolclusters(config): if cstorpoolcluster['metadata'].get("labels"): if matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolcluster['metadata']['labels']): continue - if config['labels']['include'] != []: + if config['monitoring']['openebs']['labels']['include'] != []: if not matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolcluster['metadata']['labels']): continue diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index d6d1604..4b14ab0 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -32,7 +32,7 @@ def openebsGetCstorpoolinstances(config): if cstorpoolinstance['metadata'].get("labels"): if matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue - if config['labels']['include'] != []: + if config['monitoring']['openebs']['labels']['include'] != []: if not matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolinstance['metadata']['labels']): continue From f3576da1a573c2fbb1fe96a7bf136e79c1e7d6e0 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 21:00:16 +0100 Subject: [PATCH 31/34] fix: discovery list syntax Signed-off-by: djerfy --- src/modules/kubernetes/base/cronjobs.py | 2 +- src/modules/kubernetes/base/daemonsets.py | 2 +- src/modules/kubernetes/base/deployments.py | 2 +- src/modules/kubernetes/base/nodes.py | 2 +- src/modules/kubernetes/base/statefulsets.py | 2 +- src/modules/kubernetes/base/volumes.py | 2 +- src/modules/kubernetes/openebs/cstorpoolclusters.py | 2 +- src/modules/kubernetes/openebs/cstorpoolinstances.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index e109b57..0b6a83e 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -90,7 +90,7 @@ def zabbixDiscoveryCronjobs(config): output = { "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.cronjobs.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index afbdec3..102ce38 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -57,7 +57,7 @@ def zabbixDiscoveryDaemonsets(config): output = { "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.daemonsets.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 6bf344e..2b8c4d1 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -56,7 +56,7 @@ def zabbixDiscoveryDeployments(config): output = { "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.deployments.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index e016a27..2babf91 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -56,7 +56,7 @@ def zabbixDiscoveryNodes(config): for node in kubernetesGetNodes(config): output = {"{#KUBERNETES_NODE_NAME}": node['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.nodes.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index a3c5166..8dbdc35 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -56,7 +56,7 @@ def zabbixDiscoveryStatefulsets(config): output = { "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.statefulsets.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 0c98f2b..429032d 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -65,7 +65,7 @@ def zabbixDiscoveryVolumes(config): output = { "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], "{#KUBERNETES_PVC_NAME}": volume['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.volumes.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index a232a42..cc0433d 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -58,7 +58,7 @@ def zabbixDiscoveryCstorpoolclusters(config): output = { "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery)] diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index 4b14ab0..8a86dd7 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -54,7 +54,7 @@ def zabbixDiscoveryCstorpoolinstances(config): output = { "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} - discovery['data'].append(output) + discovery['data'].append([output]) return [config['kubernetes']['name'], "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery)] From a3003af97880f07225b6e4a1b91a3205b7c27211 Mon Sep 17 00:00:00 2001 From: djerfy Date: Thu, 11 Jan 2024 21:02:54 +0100 Subject: [PATCH 32/34] fix: discovery list syntax Signed-off-by: djerfy --- src/modules/kubernetes/base/cronjobs.py | 4 ++-- src/modules/kubernetes/base/daemonsets.py | 4 ++-- src/modules/kubernetes/base/deployments.py | 4 ++-- src/modules/kubernetes/base/nodes.py | 4 ++-- src/modules/kubernetes/base/statefulsets.py | 4 ++-- src/modules/kubernetes/base/volumes.py | 4 ++-- src/modules/kubernetes/openebs/cstorpoolclusters.py | 4 ++-- src/modules/kubernetes/openebs/cstorpoolinstances.py | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 0b6a83e..79aa51b 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -90,9 +90,9 @@ def zabbixDiscoveryCronjobs(config): output = { "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.cronjobs.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.cronjobs.discovery", json.dumps(discovery)]] def zabbixItemsCronjobs(config): """ diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index 102ce38..d4ec556 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -57,9 +57,9 @@ def zabbixDiscoveryDaemonsets(config): output = { "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.daemonsets.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.daemonsets.discovery", json.dumps(discovery)]] def zabbixItemsDaemonsets(config): """ diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index 2b8c4d1..b693c4d 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -56,9 +56,9 @@ def zabbixDiscoveryDeployments(config): output = { "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.deployments.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.deployments.discovery", json.dumps(discovery)]] def zabbixItemsDeployments(config): """ diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 2babf91..56e93d5 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -56,9 +56,9 @@ def zabbixDiscoveryNodes(config): for node in kubernetesGetNodes(config): output = {"{#KUBERNETES_NODE_NAME}": node['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.nodes.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.nodes.discovery", json.dumps(discovery)]] def zabbixItemsNodes(config): """ diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index 8dbdc35..4c9c6ee 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -56,9 +56,9 @@ def zabbixDiscoveryStatefulsets(config): output = { "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.statefulsets.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.statefulsets.discovery", json.dumps(discovery)]] def zabbixItemsStatefulsets(config): """ diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index 429032d..db9542b 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -65,9 +65,9 @@ def zabbixDiscoveryVolumes(config): output = { "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], "{#KUBERNETES_PVC_NAME}": volume['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.volumes.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.volumes.discovery", json.dumps(discovery)]] def zabbixItemsVolumes(config): """ diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index cc0433d..71ca7e8 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -58,9 +58,9 @@ def zabbixDiscoveryCstorpoolclusters(config): output = { "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery)]] def zabbixItemsCstorpoolclusters(config): """ diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index 8a86dd7..e396f8e 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -54,9 +54,9 @@ def zabbixDiscoveryCstorpoolinstances(config): output = { "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} - discovery['data'].append([output]) + discovery['data'].append(output) - return [config['kubernetes']['name'], "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery)] + return [[config['kubernetes']['name'], "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery)]] def zabbixItemsCstorpoolinstances(config): """ From d7c5e31afa4a4a805475c8b4fcf160e298c34b03 Mon Sep 17 00:00:00 2001 From: djerfy Date: Fri, 12 Jan 2024 21:01:13 +0100 Subject: [PATCH 33/34] misc: rename zabbix objects Signed-off-by: djerfy --- src/modules/kubernetes/base/cronjobs.py | 12 +++++----- src/modules/kubernetes/base/daemonsets.py | 14 +++++------ src/modules/kubernetes/base/deployments.py | 12 +++++----- src/modules/kubernetes/base/nodes.py | 24 +++++++++---------- src/modules/kubernetes/base/statefulsets.py | 12 +++++----- src/modules/kubernetes/base/volumes.py | 18 +++++++------- .../kubernetes/openebs/cstorpoolclusters.py | 4 ++-- .../kubernetes/openebs/cstorpoolinstances.py | 4 ++-- 8 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py index 79aa51b..4f365a9 100644 --- a/src/modules/kubernetes/base/cronjobs.py +++ b/src/modules/kubernetes/base/cronjobs.py @@ -88,11 +88,11 @@ def zabbixDiscoveryCronjobs(config): for cronjob in kubernetesGetCronjobs(config): output = { - "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], - "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} + "{#KUBERNETES_BASE_CRONJOBS_NAMESPACE}": cronjob['namespace'], + "{#KUBERNETES_BASE_CRONJOBS_NAME}": cronjob['name']} discovery['data'].append(output) - return [[config['kubernetes']['name'], "kubernetes.cronjobs.discovery", json.dumps(discovery)]] + return [[config['kubernetes']['name'], "kubernetes.base.cronjobs.discovery", json.dumps(discovery)]] def zabbixItemsCronjobs(config): """ @@ -102,8 +102,8 @@ def zabbixItemsCronjobs(config): items = [] for cronjob in kubernetesGetCronjobs(config): - items.append([config['kubernetes']['name'], f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']]) - items.append([config['kubernetes']['name'], f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']]) - items.append([config['kubernetes']['name'], f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.cronjobs.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.cronjobs.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.cronjobs.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']]) return items diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py index d4ec556..b61f20d 100644 --- a/src/modules/kubernetes/base/daemonsets.py +++ b/src/modules/kubernetes/base/daemonsets.py @@ -55,11 +55,11 @@ def zabbixDiscoveryDaemonsets(config): for daemonset in kubernetesGetDaemonsets(config): output = { - "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], - "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} + "{#KUBERNETES_BASE_DAEMONSETS_NAMESPACE}": daemonset['namespace'], + "{#KUBERNETES_BASE_DAEMONSETS_NAME}": daemonset['name']} discovery['data'].append(output) - return [[config['kubernetes']['name'], "kubernetes.daemonsets.discovery", json.dumps(discovery)]] + return [[config['kubernetes']['name'], "kubernetes.base.daemonsets.discovery", json.dumps(discovery)]] def zabbixItemsDaemonsets(config): """ @@ -69,9 +69,9 @@ def zabbixItemsDaemonsets(config): items = [] for daemonset in kubernetesGetDaemonsets(config): - items.append([config['kubernetes']['name'], f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']]) - items.append([config['kubernetes']['name'], f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']]) - items.append([config['kubernetes']['name'], f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']]) - items.append([config['kubernetes']['name'], f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']]) return items diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py index b693c4d..8933250 100644 --- a/src/modules/kubernetes/base/deployments.py +++ b/src/modules/kubernetes/base/deployments.py @@ -54,11 +54,11 @@ def zabbixDiscoveryDeployments(config): for deployment in kubernetesGetDeployments(config): output = { - "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], - "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} + "{#KUBERNETES_BASE_DEPLOYMENTS_NAMESPACE}": deployment['namespace'], + "{#KUBERNETES_BASE_DEPLOYMENTS_NAME}": deployment['name']} discovery['data'].append(output) - return [[config['kubernetes']['name'], "kubernetes.deployments.discovery", json.dumps(discovery)]] + return [[config['kubernetes']['name'], "kubernetes.base.deployments.discovery", json.dumps(discovery)]] def zabbixItemsDeployments(config): """ @@ -68,8 +68,8 @@ def zabbixItemsDeployments(config): items = [] for deployment in kubernetesGetDeployments(config): - items.append([config['kubernetes']['name'], f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']]) - items.append([config['kubernetes']['name'], f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']]) - items.append([config['kubernetes']['name'], f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.deployments.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.deployments.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.deployments.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']]) return items diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py index 56e93d5..33a9994 100644 --- a/src/modules/kubernetes/base/nodes.py +++ b/src/modules/kubernetes/base/nodes.py @@ -55,10 +55,10 @@ def zabbixDiscoveryNodes(config): discovery = {"data":[]} for node in kubernetesGetNodes(config): - output = {"{#KUBERNETES_NODE_NAME}": node['name']} + output = {"{#KUBERNETES_BASE_NODES_NAME}": node['name']} discovery['data'].append(output) - return [[config['kubernetes']['name'], "kubernetes.nodes.discovery", json.dumps(discovery)]] + return [[config['kubernetes']['name'], "kubernetes.base.nodes.discovery", json.dumps(discovery)]] def zabbixItemsNodes(config): """ @@ -68,15 +68,15 @@ def zabbixItemsNodes(config): items = [] for node in kubernetesGetNodes(config): - items.append([config['kubernetes']['name'], f"kubernetes.node.healthz[{node['name']}]", node['status']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']]) - items.append([config['kubernetes']['name'], f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.healthz[{node['name']}]", node['status']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.capacity.cpu[{node['name']}]", node['capacity']['cpu']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.capacity.memory[{node['name']}]", node['capacity']['memory']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.capacity.pods[{node['name']}]", node['capacity']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.allocatable.memory[{node['name']}]", node['allocatable']['memory']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.allocatable.pods[{node['name']}]", node['allocatable']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.current.pods[{node['name']}]", node['current']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.current.podsUsed[{node['name']}]", node['current']['pods_used']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.current.podsFree[{node['name']}]", node['current']['pods_free']]) return items diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py index 4c9c6ee..2f3b86e 100644 --- a/src/modules/kubernetes/base/statefulsets.py +++ b/src/modules/kubernetes/base/statefulsets.py @@ -54,11 +54,11 @@ def zabbixDiscoveryStatefulsets(config): for statefulset in kubernetesGetStatefulsets(config): output = { - "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], - "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} + "{#KUBERNETES_BASE_STATEFULSETS_NAMESPACE}": statefulset['namespace'], + "{#KUBERNETES_BASE_STATEFULSETS_NAME}": statefulset['name']} discovery['data'].append(output) - return [[config['kubernetes']['name'], "kubernetes.statefulsets.discovery", json.dumps(discovery)]] + return [[config['kubernetes']['name'], "kubernetes.base.statefulsets.discovery", json.dumps(discovery)]] def zabbixItemsStatefulsets(config): """ @@ -68,8 +68,8 @@ def zabbixItemsStatefulsets(config): items = [] for statefulset in kubernetesGetStatefulsets(config): - items.append([config['kubernetes']['name'], f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']]) - items.append([config['kubernetes']['name'], f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']]) - items.append([config['kubernetes']['name'], f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.statefulsets.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.statefulsets.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.statefulsets.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']]) return items diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py index db9542b..b4fbcfa 100644 --- a/src/modules/kubernetes/base/volumes.py +++ b/src/modules/kubernetes/base/volumes.py @@ -63,11 +63,11 @@ def zabbixDiscoveryVolumes(config): for volume in kubernetesGetVolumes(config): output = { - "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], - "{#KUBERNETES_PVC_NAME}": volume['name']} + "{#KUBERNETES_BASE_VOLUMES_NAMESPACE}": volume['namespace'], + "{#KUBERNETES_BASE_VOLUMES_NAME}": volume['name']} discovery['data'].append(output) - return [[config['kubernetes']['name'], "kubernetes.volumes.discovery", json.dumps(discovery)]] + return [[config['kubernetes']['name'], "kubernetes.base.volumes.discovery", json.dumps(discovery)]] def zabbixItemsVolumes(config): """ @@ -77,11 +77,11 @@ def zabbixItemsVolumes(config): items = [] for volume in kubernetesGetVolumes(config): - items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']]) - items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']]) - items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']]) - items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']]) - items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']]) - items.append([config['kubernetes']['name'], f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']]) return items diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index 71ca7e8..db90b07 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -56,8 +56,8 @@ def zabbixDiscoveryCstorpoolclusters(config): for cstorpoolcluster in openebsGetCstorpoolclusters(config): output = { - "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], - "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTERS_NAMESPACE}": cstorpoolcluster['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTERS_NAME}": cstorpoolcluster['name']} discovery['data'].append(output) return [[config['kubernetes']['name'], "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery)]] diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index e396f8e..5459d42 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -52,8 +52,8 @@ def zabbixDiscoveryCstorpoolinstances(config): for cstorpoolinstance in openebsGetCstorpoolinstances(config): output = { - "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAMESPACE}": cstorpoolinstance['namespace'], - "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCE_NAME}": cstorpoolinstance['name']} + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCES_NAMESPACE}": cstorpoolinstance['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCES_NAME}": cstorpoolinstance['name']} discovery['data'].append(output) return [[config['kubernetes']['name'], "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery)]] From c3ed0cc535ee2d02dd346a1aa25d6c2e4ce85c6a Mon Sep 17 00:00:00 2001 From: djerfy Date: Sat, 13 Jan 2024 11:16:06 +0100 Subject: [PATCH 34/34] fix: objects exception if not exists Signed-off-by: djerfy --- src/modules/kubernetes/openebs/cstorpoolclusters.py | 7 ++++++- src/modules/kubernetes/openebs/cstorpoolinstances.py | 7 ++++++- src/modules/kubernetes/trivy/vulnerabilityreports.py | 7 ++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py index db90b07..773be3e 100644 --- a/src/modules/kubernetes/openebs/cstorpoolclusters.py +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -17,7 +17,12 @@ def openebsGetCstorpoolclusters(config): if config['monitoring']['openebs']['engine'] != "cstor": return cstorpoolclusters - for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): + try: + objects = kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters") + except Exception: + return cstorpoolclusters + + for cstorpoolcluster in rawObjects(objects): json = { "name": cstorpoolcluster['metadata']['name'], "namespace": cstorpoolcluster['metadata']['namespace'], diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py index 5459d42..4395668 100644 --- a/src/modules/kubernetes/openebs/cstorpoolinstances.py +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -16,8 +16,13 @@ def openebsGetCstorpoolinstances(config): if config['monitoring']['openebs']['engine'] != "cstor": return cstorpoolinstances + + try: + objects = kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances") + except Exception: + return cstorpoolinstances - for cstorpoolinstance in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances")): + for cstorpoolinstance in rawObjects(objects): json = { "name": cstorpoolinstance['metadata']['name'], "namespace": cstorpoolinstance['metadata']['namespace'], diff --git a/src/modules/kubernetes/trivy/vulnerabilityreports.py b/src/modules/kubernetes/trivy/vulnerabilityreports.py index d3b06d5..92feaa3 100644 --- a/src/modules/kubernetes/trivy/vulnerabilityreports.py +++ b/src/modules/kubernetes/trivy/vulnerabilityreports.py @@ -14,6 +14,11 @@ def trivyGetVulnerabilityreports(config=None): reports = [] - for vuln in rawObjects(kubernetes.list_cluster_custom_object(group="aquasecurity.github.io", version="v1alpha1", plural="vulnerabilityreports")): + try: + objects = kubernetes.list_cluster_custom_object(group="aquasecurity.github.io", version="v1alpha1", plural="vulnerabilityreports") + except Exception: + return reports + + for vuln in rawObjects(objects): print(vuln['metadata']['name']) print(vuln['report']['summary'])