diff --git a/Dockerfile b/Dockerfile index 5e485b5..60c3465 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,9 +6,6 @@ LABEL description="Zabbix Kubernetes Discovery" \ WORKDIR /app -ENV ZABBIX_ENDPOINT="" -ENV KUBERNETES_NAME="" - ARG CONTAINER_USER="zabbix" ARG CONTAINER_GROUP="zabbix" @@ -17,18 +14,9 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends curl iputils-ping python3 python3-pip && \ rm -rf /var/lib/apt/lists && \ mkdir -p /app /root/.kube && \ - touch /app/crontab && \ groupadd -g 2000 ${CONTAINER_GROUP} && \ useradd -u 2000 -d /app -s /bin/bash -M -g ${CONTAINER_GROUP} ${CONTAINER_USER} -ARG SUPERCRONIC_VER="0.2.28" -ARG SUPERCRONIC_SHA="fe1a81a8a5809deebebbd7a209a3b97e542e2bcd" - -RUN curl -fsSLO "https://github.com/aptible/supercronic/releases/download/v${SUPERCRONIC_VER}/supercronic-linux-amd64" && \ - echo "${SUPERCRONIC_SHA} supercronic-linux-amd64" | sha1sum -c - && \ - chmod +x supercronic-linux-amd64 && \ - mv supercronic-linux-amd64 /usr/local/bin/supercronic - COPY ./src/ /app/ RUN chown ${CONTAINER_USER}:${CONTAINER_GROUP} -R /app && \ @@ -37,4 +25,4 @@ RUN chown ${CONTAINER_USER}:${CONTAINER_GROUP} -R /app && \ USER ${CONTAINER_USER}:${CONTAINER_GROUP} -CMD ["/usr/local/bin/supercronic", "-split-logs", "-json", "/app/crontab"] +CMD ["/usr/bin/python3", "/app/zabbix-kubernetes-discovery.py"] diff --git a/src/config.yaml b/src/config.yaml new file mode 100644 index 0000000..ae7a59a --- /dev/null +++ b/src/config.yaml @@ -0,0 +1,75 @@ +output: + level: INFO + +kubernetes: + name: localhost + +zabbix: + endpoint: localhost + timeout: 10 + schedule: + discovery: 3600 + items: 3 + +monitoring: + # base + nodes: + enabled: True + labels: + include: [] + exclude: [] + daemonsets: + enabled: False + labels: + include: [] + exclude: [] + volumes: + enabled: False + labels: + include: [] + exclude: [] + deployments: + enabled: False + labels: + include: [] + exclude: [] + statefulsets: + enabled: False + labels: + include: [] + exclude: [] + cronjobs: + enabled: False + labels: + include: [] + exclude: [] + ingresses: + enabled: False + labels: + include: [] + exclude: [] + # openebs + openebs: + enabled: False + engine: cstor + labels: + include: [] + exclude: [] + # velero + velero: + enabled: False + labels: + include: [] + exclude: [] + # trivy + trivy: + enabled: False + labels: + include: [] + exclude: [] + # certificates + certs: + enabled: False + labels: + include: [] + exclude: [] diff --git a/src/modules/common/functions.py b/src/modules/common/functions.py index 5759502..6080e5e 100644 --- a/src/modules/common/functions.py +++ b/src/modules/common/functions.py @@ -1,48 +1,33 @@ import re import json -def ifObjectMatch(object_list=None, object_name=None): +def matchLabels(match_labels=None, object_labels=None): """ - description: check if the object is in list + description: check if the object match labels return: bool """ - if object_list is None or object_list == "" or object_list == "*": - return False + for i in [match_labels, object_labels]: + if i is None or i == [] or i == "" or i == "*": + return False - if object_name is None or object_name == "" or object_name == "*": - return False - - if type(object_list) == str: - object_list = object_list.split(",") - - if type(object_list) != list: - return False - - reg_list = map(re.compile, object_list) + object_labels = str(object_labels).replace("{", "").replace("}", "").replace("'", "").replace(" ", "").split(",") - if any(reg.match(object_name) for reg in reg_list): - return True + for object_label in object_labels: + key, value = object_label.split(":")[0], object_label.split(":")[1] + for match_label in match_labels: + for separator in ["=", ":"]: + if match_label.split(separator)[0] == key and match_label.split(separator)[1] == value: + return True return False -def ifLabelMatch(match_label=None, object_labels=None): +def rawObjects(data=[]): """ - description: check if the object match a label - return: bool + description: get objects from raw api, convert items and return only objects + return: list """ - if match_label is None or match_label == "" or match_label == "*": - return False - - if object_labels is None or object_labels == "" or object_labels == "*": - return False - - object_labels = str(object_labels).replace("{", "").replace("}", "").replace("'", "").replace(" ", "").split(",") - - for label in object_labels: - k, v = label.split(":")[0], label.split(":")[1] - - for separator in ["=", ":"]: - if match_label.split(separator)[0] == k and match_label.split(separator)[1] == v: - return True - - return False + for key, value in data.items(): + if key == "items": + return value + + return [] diff --git a/src/modules/kubernetes/__init__.py b/src/modules/kubernetes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/modules/kubernetes/base/__init__.py b/src/modules/kubernetes/base/__init__.py new file mode 100644 index 0000000..460bf46 --- /dev/null +++ b/src/modules/kubernetes/base/__init__.py @@ -0,0 +1,6 @@ +from modules.kubernetes.base.cronjobs import zabbixDiscoveryCronjobs, zabbixItemsCronjobs +from modules.kubernetes.base.daemonsets import zabbixDiscoveryDaemonsets, zabbixItemsDaemonsets +from modules.kubernetes.base.deployments import zabbixDiscoveryDeployments, zabbixItemsDeployments +from modules.kubernetes.base.nodes import zabbixDiscoveryNodes, zabbixItemsNodes +from modules.kubernetes.base.statefulsets import zabbixDiscoveryStatefulsets, zabbixItemsStatefulsets +from modules.kubernetes.base.volumes import zabbixDiscoveryVolumes, zabbixItemsVolumes diff --git a/src/modules/kubernetes/base/cronjobs.py b/src/modules/kubernetes/base/cronjobs.py new file mode 100644 index 0000000..4f365a9 --- /dev/null +++ b/src/modules/kubernetes/base/cronjobs.py @@ -0,0 +1,109 @@ +from kubernetes import client +from datetime import datetime +from modules.common.functions import * +import json, urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.cronjobs") + +def kubernetesGetCronjobs(config): + """ + description: get cronjobs data + return: list + """ + kubernetes = client.BatchV1Api() + + cronjobs = [] + + for cronjob in kubernetes.list_cron_job_for_all_namespaces().items: + related_jobs, job_latest = [], {} + + for job in kubernetes.list_job_for_all_namespaces().items: + if not job: + continue + + if not job.metadata.owner_references: + continue + + if not "CronJob" in job.metadata.owner_references[0].kind: + continue + + if job.metadata.owner_references[0].name != cronjob.metadata.name: + continue + + if job.status.active is not None: + continue + + related_jobs.append(job) + + for related_job in related_jobs: + if not bool(job_latest): + job_latest = related_job + continue + + related_job_dt = datetime.timestamp(related_job.status.conditions[0].last_probe_time) + job_latest_dt = datetime.timestamp(job_latest.status.conditions[0].last_probe_time) + + if related_job_dt > job_latest_dt: + job_latest = related_job + + if type(job_latest) is dict: + continue + + if job_latest.status.conditions[0].type == "Complete": + cronjob_status = "0" + else: + cronjob_status = "1" + + json = { + "name": cronjob.metadata.name, + "namespace": cronjob.metadata.namespace, + "status": cronjob_status, + "last_job": { + "name": job_latest.metadata.name, + "reason": job_latest.status.conditions[0].reason, + "message": job_latest.status.conditions[0].message, + "status": job_latest.status.conditions[0].type + } + } + + if hasattr(cronjob, 'metadata'): + if hasattr(cronjob.metadata, 'labels'): + if matchLabels(config['monitoring']['cronjobs']['labels']['exclude'], cronjob.metadata.labels): + continue + if config['monitoring']['cronjobs']['labels']['include'] != []: + if not matchLabels(config['monitoring']['cronjobs']['labels']['include'], cronjob.metadata.labels): + continue + + cronjobs.append(json) + + return cronjobs + +def zabbixDiscoveryCronjobs(config): + """ + description: create a discovery for cronjob, per namespace + return: dict + """ + discovery = {"data":[]} + + for cronjob in kubernetesGetCronjobs(config): + output = { + "{#KUBERNETES_BASE_CRONJOBS_NAMESPACE}": cronjob['namespace'], + "{#KUBERNETES_BASE_CRONJOBS_NAME}": cronjob['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.base.cronjobs.discovery", json.dumps(discovery)]] + +def zabbixItemsCronjobs(config): + """ + description: create a item for cronjob, per namespace + return: list + """ + items = [] + + for cronjob in kubernetesGetCronjobs(config): + items.append([config['kubernetes']['name'], f"kubernetes.base.cronjobs.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.cronjobs.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.cronjobs.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']]) + + return items diff --git a/src/modules/kubernetes/base/daemonsets.py b/src/modules/kubernetes/base/daemonsets.py new file mode 100644 index 0000000..b61f20d --- /dev/null +++ b/src/modules/kubernetes/base/daemonsets.py @@ -0,0 +1,77 @@ +from kubernetes import client +from modules.common.functions import * +import json, urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.daemonsets") + +def kubernetesGetDaemonsets(config): + """ + description: get daemonsets data + return: list + """ + kubernetes = client.AppsV1Api() + + daemonsets = [] + + for daemonset in kubernetes.list_daemon_set_for_all_namespaces().items: + + json = { + "name": daemonset.metadata.name, + "namespace": daemonset.metadata.namespace, + "replicas": { + "desired": daemonset.status.desired_number_scheduled, + "current": daemonset.status.current_number_scheduled, + "available": daemonset.status.number_available, + "ready": daemonset.status.number_ready + } + } + + for i in ["desired", "current", "available", "ready"]: + if json['replicas'][i] is None: + json['replicas'][i] = 0 + + if hasattr(daemonset, 'metadata'): + if hasattr(daemonset.metadata, 'labels'): + if matchLabels(config['monitoring']['daemonsets']['labels']['exclude'], daemonset.metadata.labels): + continue + if config['monitoring']['daemonsets']['labels']['include'] != []: + if not matchLabels(config['monitoring']['daemonsets']['labels']['include'], daemonset.metadata.labels): + continue + + if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in daemonsets): + continue + + daemonsets.append(json) + + return daemonsets + +def zabbixDiscoveryDaemonsets(config): + """ + description: create a discovery for daemonset, per namespace + return: dict + """ + discovery = {"data":[]} + + for daemonset in kubernetesGetDaemonsets(config): + output = { + "{#KUBERNETES_BASE_DAEMONSETS_NAMESPACE}": daemonset['namespace'], + "{#KUBERNETES_BASE_DAEMONSETS_NAME}": daemonset['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.base.daemonsets.discovery", json.dumps(discovery)]] + +def zabbixItemsDaemonsets(config): + """ + description: create a item for daemonset, per namespace + return: list + """ + items = [] + + for daemonset in kubernetesGetDaemonsets(config): + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.daemonsets.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']]) + + return items diff --git a/src/modules/kubernetes/base/deployments.py b/src/modules/kubernetes/base/deployments.py new file mode 100644 index 0000000..8933250 --- /dev/null +++ b/src/modules/kubernetes/base/deployments.py @@ -0,0 +1,75 @@ +from kubernetes import client +from modules.common.functions import * +import json, urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.deployments") + +def kubernetesGetDeployments(config): + """ + description: get deployments data + return: list + """ + kubernetes = client.AppsV1Api() + + deployments = [] + + for deployment in kubernetes.list_deployment_for_all_namespaces().items: + + json = { + "name": deployment.metadata.name, + "namespace": deployment.metadata.namespace, + "replicas": { + "desired": deployment.status.replicas, + "ready": deployment.status.ready_replicas, + "available": deployment.status.available_replicas + } + } + + if hasattr(deployment, 'metadata'): + if hasattr(deployment.metadata, 'labels'): + if matchLabels(config['monitoring']['deployments']['labels']['exclude'], deployment.metadata.labels): + continue + if config['monitoring']['deployments']['labels']['include'] != []: + if not matchLabels(config['monitoring']['deployments']['labels']['include'], deployment.metadata.labels): + continue + + for i in ["desired", "ready", "available"]: + if json['replicas'][i] is None: + json['replicas'][i] = 0 + + if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in deployments): + continue + + deployments.append(json) + + return deployments + +def zabbixDiscoveryDeployments(config): + """ + description: create a discovery for deployment, per namespace + return: dict + """ + discovery = {"data":[]} + + for deployment in kubernetesGetDeployments(config): + output = { + "{#KUBERNETES_BASE_DEPLOYMENTS_NAMESPACE}": deployment['namespace'], + "{#KUBERNETES_BASE_DEPLOYMENTS_NAME}": deployment['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.base.deployments.discovery", json.dumps(discovery)]] + +def zabbixItemsDeployments(config): + """ + description: create a item for deployment, per namespace + return: list + """ + items = [] + + for deployment in kubernetesGetDeployments(config): + items.append([config['kubernetes']['name'], f"kubernetes.base.deployments.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.deployments.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.deployments.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']]) + + return items diff --git a/src/modules/kubernetes/base/nodes.py b/src/modules/kubernetes/base/nodes.py new file mode 100644 index 0000000..33a9994 --- /dev/null +++ b/src/modules/kubernetes/base/nodes.py @@ -0,0 +1,82 @@ +from kubernetes import client +from modules.common.functions import * +import json, urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.nodes") + +def kubernetesGetNodes(config): + """ + description: get nodes data + return: list + """ + kubernetes = client.CoreV1Api() + + nodes = [] + + for node in kubernetes.list_node().items: + node_healthz = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="healthz") + node_status = kubernetes.read_node_status(name=node.metadata.name) + node_pods = kubernetes.list_pod_for_all_namespaces(field_selector="spec.nodeName={}".format(node.metadata.name)) + + json = { + "name": node.metadata.name, + "uid": node.metadata.uid, + "status": node_healthz, + "capacity": node_status.status.capacity, + "allocatable": node_status.status.allocatable, + "current": { + "pods": str(len(node_pods.items)), + "pods_used": str(round(len(node_pods.items) * 100 / int(node_status.status.allocatable['pods']), 1)), + "pods_free": str(round(100 - (len(node_pods.items) * 100 / int(node_status.status.allocatable['pods'])), 1)) + } + } + + if hasattr(node, 'metadata'): + if hasattr(node.metadata, 'labels'): + if matchLabels(config['monitoring']['nodes']['labels']['exclude'], node.metadata.labels): + continue + if config['monitoring']['nodes']['labels']['include'] != []: + if not matchLabels(config['monitoring']['nodes']['labels']['include'], node.metadata.labels): + continue + + if any(n['name'] == json['name'] for n in nodes): + continue + + nodes.append(json) + + return nodes + +def zabbixDiscoveryNodes(config): + """ + description: create a discovery for node + return: dict + """ + discovery = {"data":[]} + + for node in kubernetesGetNodes(config): + output = {"{#KUBERNETES_BASE_NODES_NAME}": node['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.base.nodes.discovery", json.dumps(discovery)]] + +def zabbixItemsNodes(config): + """ + description: create a item for node + return: list + """ + items = [] + + for node in kubernetesGetNodes(config): + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.healthz[{node['name']}]", node['status']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.capacity.cpu[{node['name']}]", node['capacity']['cpu']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.capacity.memory[{node['name']}]", node['capacity']['memory']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.capacity.pods[{node['name']}]", node['capacity']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.allocatable.memory[{node['name']}]", node['allocatable']['memory']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.allocatable.pods[{node['name']}]", node['allocatable']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.current.pods[{node['name']}]", node['current']['pods']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.current.podsUsed[{node['name']}]", node['current']['pods_used']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.nodes.current.podsFree[{node['name']}]", node['current']['pods_free']]) + + return items diff --git a/src/modules/kubernetes/base/statefulsets.py b/src/modules/kubernetes/base/statefulsets.py new file mode 100644 index 0000000..2f3b86e --- /dev/null +++ b/src/modules/kubernetes/base/statefulsets.py @@ -0,0 +1,75 @@ +from kubernetes import client +from modules.common.functions import * +import json, urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.statefulsets") + +def kubernetesGetStatefulsets(config): + """ + description: get statefulsets data + return: list + """ + kubernetes = client.AppsV1Api() + + statefulsets = [] + + for statefulset in kubernetes.list_stateful_set_for_all_namespaces().items: + + json = { + "name": statefulset.metadata.name, + "namespace": statefulset.metadata.namespace, + "replicas": { + "available": statefulset.status.current_replicas, + "ready": statefulset.status.ready_replicas, + "desired": statefulset.status.replicas + } + } + + if hasattr(statefulset, 'metadata'): + if hasattr(statefulset.metadata, 'labels'): + if matchLabels(config['monitoring']['statefulsets']['labels']['exclude'], statefulset.metadata.labels): + continue + if config['monitoring']['statefulsets']['labels']['include'] != []: + if not matchLabels(config['monitoring']['statefulsets']['labels']['include'], statefulset.metadata.labels): + continue + + for i in ["desired", "ready", "available"]: + if json['replicas'][i] is None: + json['replicas'][i] = 0 + + if any(s['name'] == json['name'] and s['namespace'] == json['namespace'] for s in statefulsets): + continue + + statefulsets.append(json) + + return statefulsets + +def zabbixDiscoveryStatefulsets(config): + """ + description: create a discovery for statefulset, per namespace + return: dict + """ + discovery = {"data":[]} + + for statefulset in kubernetesGetStatefulsets(config): + output = { + "{#KUBERNETES_BASE_STATEFULSETS_NAMESPACE}": statefulset['namespace'], + "{#KUBERNETES_BASE_STATEFULSETS_NAME}": statefulset['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.base.statefulsets.discovery", json.dumps(discovery)]] + +def zabbixItemsStatefulsets(config): + """ + description: create a item for statefulset, per namespace + return: list + """ + items = [] + + for statefulset in kubernetesGetStatefulsets(config): + items.append([config['kubernetes']['name'], f"kubernetes.base.statefulsets.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.statefulsets.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.statefulsets.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']]) + + return items diff --git a/src/modules/kubernetes/base/volumes.py b/src/modules/kubernetes/base/volumes.py new file mode 100644 index 0000000..b4fbcfa --- /dev/null +++ b/src/modules/kubernetes/base/volumes.py @@ -0,0 +1,87 @@ +from kubernetes import client +from modules.common.functions import * +import json, urllib3, re, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.base.volumes") + +def kubernetesGetVolumes(config): + """ + description: get volumes data + return: list + """ + kubernetes = client.CoreV1Api() + + volumes = [] + + for node in kubernetes.list_node().items: + node_info = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="stats/summary").replace("'", "\"") + node_json = json.loads(node_info) + + for pod in node_json['pods']: + if not "volume" in pod: + continue + + for volume in pod['volume']: + + if not "pvcRef" in volume: + continue + + if volume['pvcRef']['name'].startswith(pod['podRef']['name']) and re.match(r"(.*)-[a-z0-9]{8,10}-[a-z0-9]{5}$", pod['podRef']['name']): + continue + + volume['namespace'] = volume['pvcRef']['namespace'] + volume['name'] = volume['pvcRef']['name'] + + if hasattr(volume, 'metadata'): + if hasattr(volume.metadata, 'labels'): + if matchLabels(config['monitoring']['volumes']['labels']['exclude'], volume.metadata.labels): + continue + if config['monitoring']['volumes']['labels']['include'] != []: + if not matchLabels(config['monitoring']['volumes']['labels']['include'], volume.metadata.labels): + continue + + for i in ["time", "pvcRef"]: + del volume[i] + + if any(v['name'] == volume['name'] and v['namespace'] == volume['namespace'] for v in volumes): + continue + + if "-token-" in volume['name']: + continue + + volumes.append(volume) + + return volumes + +def zabbixDiscoveryVolumes(config): + """ + description: create a discovery for persistent volume claim, per namespace + return: dict + """ + discovery = {"data":[]} + + for volume in kubernetesGetVolumes(config): + output = { + "{#KUBERNETES_BASE_VOLUMES_NAMESPACE}": volume['namespace'], + "{#KUBERNETES_BASE_VOLUMES_NAME}": volume['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.base.volumes.discovery", json.dumps(discovery)]] + +def zabbixItemsVolumes(config): + """ + description: create a item for persistent volume claim, per namespace + return: list + """ + items = [] + + for volume in kubernetesGetVolumes(config): + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']]) + items.append([config['kubernetes']['name'], f"kubernetes.base.volumes.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']]) + + return items diff --git a/src/modules/kubernetes/get.py b/src/modules/kubernetes/get.py deleted file mode 100644 index 3e07eec..0000000 --- a/src/modules/kubernetes/get.py +++ /dev/null @@ -1,321 +0,0 @@ -from kubernetes import client -from datetime import datetime -from modules.common.functions import * -import json, urllib3, re - -urllib3.disable_warnings() - -def getNode(name=None, exclude_name=None, match_label=None): - """ - description: get all or specific node - return: list - """ - kubernetes = client.CoreV1Api() - - nodes = [] - - for node in kubernetes.list_node().items: - node_healthz = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="healthz") - node_status = kubernetes.read_node_status(name=node.metadata.name) - node_pods = kubernetes.list_pod_for_all_namespaces(field_selector="spec.nodeName={}".format(node.metadata.name)) - - json = { - "name": node.metadata.name, - "uid": node.metadata.uid, - "status": node_healthz, - "capacity": node_status.status.capacity, - "allocatable": node_status.status.allocatable, - "current": { - "pods": str(len(node_pods.items)), - "pods_used": str(round(len(node_pods.items) * 100 / int(node_status.status.allocatable['pods']), 1)), - "pods_free": str(round(100 - (len(node_pods.items) * 100 / int(node_status.status.allocatable['pods'])), 1)) - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if match_label is not None and not ifLabelMatch(match_label, node.metadata.labels): - continue - - if name == json['name']: - return [json] - - if any(n['name'] == json['name'] for n in nodes): - continue - - nodes.append(json) - - return nodes - - -def getDaemonset(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific daemonset - return: list - """ - kubernetes = client.AppsV1Api() - - daemonsets = [] - - for daemonset in kubernetes.list_daemon_set_for_all_namespaces().items: - - json = { - "name": daemonset.metadata.name, - "namespace": daemonset.metadata.namespace, - "replicas": { - "desired": daemonset.status.desired_number_scheduled, - "current": daemonset.status.current_number_scheduled, - "available": daemonset.status.number_available, - "ready": daemonset.status.number_ready - } - } - - for i in ["desired", "current", "available", "ready"]: - if json['replicas'][i] is None: - json['replicas'][i] = 0 - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, daemonset.metadata.labels): - continue - - if name == json['name']: - return [json] - - if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in daemonsets): - continue - - daemonsets.append(json) - - return daemonsets - - -def getVolume(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific persistent volume claim - return: list - """ - kubernetes = client.CoreV1Api() - - volumes = [] - - for node in kubernetes.list_node().items: - node_info = kubernetes.connect_get_node_proxy_with_path(name=node.metadata.name, path="stats/summary").replace("'", "\"") - node_json = json.loads(node_info) - - for pod in node_json['pods']: - - if not "volume" in pod: - continue - - for volume in pod['volume']: - - if not "pvcRef" in volume: - continue - - if volume['pvcRef']['name'].startswith(pod['podRef']['name']) and re.match(r"(.*)-[a-z0-9]{8,10}-[a-z0-9]{5}$", pod['podRef']['name']): - continue - - volume['namespace'] = volume['pvcRef']['namespace'] - volume['name'] = volume['pvcRef']['name'] - - if ifObjectMatch(exclude_name, volume['name']): - continue - - if ifObjectMatch(exclude_namespace, volume['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, volume.metadata.labels): - continue - - for i in ["time", "pvcRef"]: - del volume[i] - - if name == volume['name']: - return [volume] - - if any(v['name'] == volume['name'] and v['namespace'] == volume['namespace'] for v in volumes): - continue - - if "-token-" in volume['name']: - continue - - volumes.append(volume) - - return volumes - - -def getDeployment(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific deployment - return: list - """ - kubernetes = client.AppsV1Api() - - deployments = [] - - for deployment in kubernetes.list_deployment_for_all_namespaces().items: - - json = { - "name": deployment.metadata.name, - "namespace": deployment.metadata.namespace, - "replicas": { - "desired": deployment.status.replicas, - "ready": deployment.status.ready_replicas, - "available": deployment.status.available_replicas - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, deployment.metadata.labels): - continue - - for i in ["desired", "ready", "available"]: - if json['replicas'][i] is None: - json['replicas'][i] = 0 - - if name == json['name']: - return [json] - - if any(d['name'] == json['name'] and d['namespace'] == json['namespace'] for d in deployments): - continue - - deployments.append(json) - - return deployments - - -def getStatefulset(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific statefulset - return: list - """ - kubernetes = client.AppsV1Api() - - statefulsets = [] - - for statefulset in kubernetes.list_stateful_set_for_all_namespaces().items: - - json = { - "name": statefulset.metadata.name, - "namespace": statefulset.metadata.namespace, - "replicas": { - "available": statefulset.status.current_replicas, - "ready": statefulset.status.ready_replicas, - "desired": statefulset.status.replicas - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, statefulset.metadata.labels): - continue - - for i in ["desired", "ready", "available"]: - if json['replicas'][i] is None: - json['replicas'][i] = 0 - - if name == json['name']: - return [json] - - if any(s['name'] == json['name'] and s['namespace'] == json['namespace'] for s in statefulsets): - continue - - statefulsets.append(json) - - return statefulsets - - -def getCronjob(name=None, exclude_name=None, exclude_namespace=None, match_label=None): - """ - description: get all or specific cronjob - return: list - """ - kubernetes = client.BatchV1Api() - - cronjobs = [] - - for cronjob in kubernetes.list_cron_job_for_all_namespaces().items: - - related_jobs, job_latest = [], {} - - for job in kubernetes.list_job_for_all_namespaces().items: - - if not job: - continue - - if not job.metadata.owner_references: - continue - - if not "CronJob" in job.metadata.owner_references[0].kind: - continue - - if job.metadata.owner_references[0].name != cronjob.metadata.name: - continue - - if job.status.active is not None: - continue - - related_jobs.append(job) - - for related_job in related_jobs: - - if not bool(job_latest): - job_latest = related_job - continue - - related_job_dt = datetime.timestamp(related_job.status.conditions[0].last_probe_time) - job_latest_dt = datetime.timestamp(job_latest.status.conditions[0].last_probe_time) - - if related_job_dt > job_latest_dt: - job_latest = related_job - - if type(job_latest) is dict: - continue - - if job_latest.status.conditions[0].type == "Complete": - cronjob_status = "0" - else: - cronjob_status = "1" - - json = { - "name": cronjob.metadata.name, - "namespace": cronjob.metadata.namespace, - "status": cronjob_status, - "last_job": { - "name": job_latest.metadata.name, - "reason": job_latest.status.conditions[0].reason, - "message": job_latest.status.conditions[0].message, - "status": job_latest.status.conditions[0].type - } - } - - if ifObjectMatch(exclude_name, json['name']): - continue - - if ifObjectMatch(exclude_namespace, json['namespace']): - continue - - if match_label is not None and not ifLabelMatch(match_label, cronjob.metadata.labels): - continue - - if name == json['name']: - return [json] - - cronjobs.append(json) - - return cronjobs diff --git a/src/modules/kubernetes/openebs/__init__.py b/src/modules/kubernetes/openebs/__init__.py new file mode 100644 index 0000000..b33ddec --- /dev/null +++ b/src/modules/kubernetes/openebs/__init__.py @@ -0,0 +1,2 @@ +from modules.kubernetes.openebs.cstorpoolclusters import zabbixDiscoveryCstorpoolclusters, zabbixItemsCstorpoolclusters +from modules.kubernetes.openebs.cstorpoolinstances import zabbixDiscoveryCstorpoolinstances, zabbixItemsCstorpoolinstances diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py new file mode 100644 index 0000000..773be3e --- /dev/null +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -0,0 +1,84 @@ +from kubernetes import client +from modules.common.functions import * +import json, urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.openebs.cstorpoolclusters") + +def openebsGetCstorpoolclusters(config): + """ + description: get cstorpoolclusters data + return: list + """ + kubernetes = client.CustomObjectsApi() + + cstorpoolclusters = [] + + if config['monitoring']['openebs']['engine'] != "cstor": + return cstorpoolclusters + + try: + objects = kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters") + except Exception: + return cstorpoolclusters + + for cstorpoolcluster in rawObjects(objects): + json = { + "name": cstorpoolcluster['metadata']['name'], + "namespace": cstorpoolcluster['metadata']['namespace'], + "instances": { + "desired": cstorpoolcluster['status']['desiredInstances'], + "healthy": cstorpoolcluster['status']['healthyInstances'], + "provisioned": cstorpoolcluster['status']['provisionedInstances'] + }, + "version": { + "desired": cstorpoolcluster['versionDetails']['desired'], + "current": cstorpoolcluster['versionDetails']['status']['current'] + } + } + + if cstorpoolcluster.get("metadata"): + if cstorpoolcluster['metadata'].get("labels"): + if matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + continue + if config['monitoring']['openebs']['labels']['include'] != []: + if not matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolcluster['metadata']['labels']): + continue + + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): + continue + + cstorpoolclusters.append(json) + + return cstorpoolclusters + +def zabbixDiscoveryCstorpoolclusters(config): + """ + description: create a discovery for cstorpoolclusters, per namespace + return: dict + """ + discovery = {"data":[]} + + for cstorpoolcluster in openebsGetCstorpoolclusters(config): + output = { + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTERS_NAMESPACE}": cstorpoolcluster['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTERS_NAME}": cstorpoolcluster['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery)]] + +def zabbixItemsCstorpoolclusters(config): + """ + description: create a item for cstorpoolclusters, per namespace + return: list + """ + items = [] + + for cstorpoolcluster in openebsGetCstorpoolclusters(config): + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']]) + + return items diff --git a/src/modules/kubernetes/openebs/cstorpoolinstances.py b/src/modules/kubernetes/openebs/cstorpoolinstances.py new file mode 100644 index 0000000..4395668 --- /dev/null +++ b/src/modules/kubernetes/openebs/cstorpoolinstances.py @@ -0,0 +1,82 @@ +from kubernetes import client +from modules.common.functions import * +import urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.openebs.cstorpoolinstances") + +def openebsGetCstorpoolinstances(config): + """ + description: get cstorpoolinstances data + return: list + """ + kubernetes = client.CustomObjectsApi() + + cstorpoolinstances = [] + + if config['monitoring']['openebs']['engine'] != "cstor": + return cstorpoolinstances + + try: + objects = kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolinstances") + except Exception: + return cstorpoolinstances + + for cstorpoolinstance in rawObjects(objects): + json = { + "name": cstorpoolinstance['metadata']['name'], + "namespace": cstorpoolinstance['metadata']['namespace'], + "status": cstorpoolinstance['status'], + "version": { + "desired": cstorpoolinstance['versionDetails']['desired'], + "current": cstorpoolinstance['versionDetails']['status']['current'] + } + } + + if cstorpoolinstance.get("metadata"): + if cstorpoolinstance['metadata'].get("labels"): + if matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolinstance['metadata']['labels']): + continue + if config['monitoring']['openebs']['labels']['include'] != []: + if not matchLabels(config['monitoring']['openebs']['labels']['exclude'], cstorpoolinstance['metadata']['labels']): + continue + + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolinstances): + continue + + cstorpoolinstances.append(json) + + return cstorpoolinstances + +def zabbixDiscoveryCstorpoolinstances(config): + """ + description: create a discovery for cstorpoolinstances, per namespace + return: dict + """ + discovery = {"data":[]} + + for cstorpoolinstance in openebsGetCstorpoolinstances(config): + output = { + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCES_NAMESPACE}": cstorpoolinstance['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLINSTANCES_NAME}": cstorpoolinstance['name']} + discovery['data'].append(output) + + return [[config['kubernetes']['name'], "kubernetes.openebs.cstorpoolinstances.discovery", json.dumps(discovery)]] + +def zabbixItemsCstorpoolinstances(config): + """ + description: create a item for cstorpoolinstances, per namespace + return: list + """ + items = [] + + for cstorpoolinstance in openebsGetCstorpoolinstances(config): + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.readonly[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['readOnly']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.provisionedReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['provisionedReplicas']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.healthyReplicas[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['healthyReplicas']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.status[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['phase']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.total[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['total']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.free[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['free']]) + items.append([config['kubernetes']['name'], f"kubernetes.openebs.cstorpoolinstances.capacity.used[{cstorpoolinstance['namespace']},{cstorpoolinstance['name']}]", cstorpoolinstance['status']['capacity']['used']]) + + return items diff --git a/src/modules/kubernetes/trivy/__init__.py b/src/modules/kubernetes/trivy/__init__.py new file mode 100644 index 0000000..9ffd180 --- /dev/null +++ b/src/modules/kubernetes/trivy/__init__.py @@ -0,0 +1 @@ +from modules.kubernetes.trivy.vulnerabilityreports import trivyGetVulnerabilityreports diff --git a/src/modules/kubernetes/trivy/vulnerabilityreports.py b/src/modules/kubernetes/trivy/vulnerabilityreports.py new file mode 100644 index 0000000..92feaa3 --- /dev/null +++ b/src/modules/kubernetes/trivy/vulnerabilityreports.py @@ -0,0 +1,24 @@ +from kubernetes import client +from modules.common.functions import * +import urllib3, logging + +urllib3.disable_warnings() +logging = logging.getLogger("kubernetes.trivy.vulnerabilityreports") + +def trivyGetVulnerabilityreports(config=None): + """ + description: get vulnerabilityreports data + return: list + """ + kubernetes = client.CustomObjectsApi() + + reports = [] + + try: + objects = kubernetes.list_cluster_custom_object(group="aquasecurity.github.io", version="v1alpha1", plural="vulnerabilityreports") + except Exception: + return reports + + for vuln in rawObjects(objects): + print(vuln['metadata']['name']) + print(vuln['report']['summary']) diff --git a/src/modules/zabbix/__init__.py b/src/modules/zabbix/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/modules/zabbix/discovery.py b/src/modules/zabbix/discovery.py deleted file mode 100644 index 7f5cc3e..0000000 --- a/src/modules/zabbix/discovery.py +++ /dev/null @@ -1,107 +0,0 @@ -from pyzabbix import ZabbixMetric -import json - -def zabbixDiscoveryNode(clustername, nodes=[]): - """ - description: create a discovery for node - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for node in nodes: - output = {"{#KUBERNETES_NODE_NAME}": node['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.node.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryDaemonset(clustername, daemonsets=[]): - """ - description: create a discovery for daemonset, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for daemonset in daemonsets: - output = { - "{#KUBERNETES_DAEMONSET_NAMESPACE}": daemonset['namespace'], - "{#KUBERNETES_DAEMONSET_NAME}": daemonset['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.daemonset.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryVolume(clustername, volumes=[]): - """ - description: create a discovery for persistent volume claim, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for volume in volumes: - output = { - "{#KUBERNETES_PVC_NAMESPACE}": volume['namespace'], - "{#KUBERNETES_PVC_NAME}": volume['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.pvc.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryDeployment(clustername, deployments=[]): - """ - description: create a discovery for deployment, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for deployment in deployments: - output = { - "{#KUBERNETES_DEPLOYMENT_NAMESPACE}": deployment['namespace'], - "{#KUBERNETES_DEPLOYMENT_NAME}": deployment['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.deployment.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryStatefulset(clustername, statefulsets=[]): - """ - description: create a discovery for statefulset, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for statefulset in statefulsets: - output = { - "{#KUBERNETES_STATEFULSET_NAMESPACE}": statefulset['namespace'], - "{#KUBERNETES_STATEFULSET_NAME}": statefulset['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.statefulset.discovery", json.dumps(discovery))] - - return sender - - -def zabbixDiscoveryCronjob(clustername, cronjobs=[]): - """ - description: create a discovery for cronjob, per namespace - return: class ZabbixMetric - """ - discovery = {"data":[]} - - for cronjob in cronjobs: - output = { - "{#KUBERNETES_CRONJOB_NAMESPACE}": cronjob['namespace'], - "{#KUBERNETES_CRONJOB_NAME}": cronjob['name']} - discovery['data'].append(output) - - sender = [ZabbixMetric(clustername, "kubernetes.cronjob.discovery", json.dumps(discovery))] - - return sender diff --git a/src/modules/zabbix/item.py b/src/modules/zabbix/item.py deleted file mode 100644 index f12278c..0000000 --- a/src/modules/zabbix/item.py +++ /dev/null @@ -1,101 +0,0 @@ -from pyzabbix import ZabbixMetric - -def zabbixItemNode(clustername, nodes=[]): - """ - description: create a item for node - return: class ZabbixMetric - """ - sender = [] - - for node in nodes: - sender.append(ZabbixMetric(clustername, f"kubernetes.node.healthz[{node['name']}]", node['status']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.cpu[{node['name']}]", node['capacity']['cpu']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.memory[{node['name']}]", node['capacity']['memory']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.capacity.pods[{node['name']}]", node['capacity']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.cpu[{node['name']}]", node['allocatable']['cpu']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.memory[{node['name']}]", node['allocatable']['memory']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.allocatable.pods[{node['name']}]", node['allocatable']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.pods[{node['name']}]", node['current']['pods']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsUsed[{node['name']}]", node['current']['pods_used']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.node.current.podsFree[{node['name']}]", node['current']['pods_free']),) - - return sender - - -def zabbixItemDaemonset(clustername, daemonsets=[]): - """ - description: create a item for daemonset, per namespace - return: class ZabbixMetric - """ - sender = [] - - for daemonset in daemonsets: - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.desiredReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['desired']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.currentReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['current']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.availableReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.daemonset.readyReplicas[{daemonset['namespace']},{daemonset['name']}]", daemonset['replicas']['ready']),) - - return sender - - -def zabbixItemVolume(clustername, volumes=[]): - """ - description: create a item for persistent volume claim, per namespace - return: class ZabbixMetric - """ - sender = [] - - for volume in volumes: - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.availableBytes[{volume['namespace']},{volume['name']}]", volume['availableBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.capacityBytes[{volume['namespace']},{volume['name']}]", volume['capacityBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.usedBytes[{volume['namespace']},{volume['name']}]", volume['usedBytes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesFree[{volume['namespace']},{volume['name']}]", volume['inodesFree']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodes[{volume['namespace']},{volume['name']}]", volume['inodes']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.volumeclaim.inodesUsed[{volume['namespace']},{volume['name']}]", volume['inodesUsed']),) - - return sender - - -def zabbixItemDeployment(clustername, deployments=[]): - """ - description: create a item for deployment, per namespace - return: class ZabbixResponse - """ - sender = [] - - for deployment in deployments: - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.availableReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.readyReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['ready']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.deployment.desiredReplicas[{deployment['namespace']},{deployment['name']}]", deployment['replicas']['desired']),) - - return sender - - -def zabbixItemStatefulset(clustername, statefulsets=[]): - """ - description: create a item for statefulset, per namespace - return: class ZabbixResponse - """ - sender = [] - - for statefulset in statefulsets: - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.availableReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['available']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.readyReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['ready']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.statefulset.desiredReplicas[{statefulset['namespace']},{statefulset['name']}]", statefulset['replicas']['desired']),) - - return sender - - -def zabbixItemCronjob(clustername, cronjobs=[]): - """ - description: create a item for cronjob, per namespace - return: class ZabbixResponse - """ - sender = [] - - for cronjob in cronjobs: - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.status[{cronjob['namespace']},{cronjob['name']}]", cronjob['status']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.reason[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['reason']),) - sender.append(ZabbixMetric(clustername, f"kubernetes.cronjob.message[{cronjob['namespace']},{cronjob['name']}]", cronjob['last_job']['message']),) - - return sender diff --git a/src/requirements.txt b/src/requirements.txt index 5165dff..1d227b4 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,4 +1,6 @@ requests==2.31.0 -kubernetes==27.2.0 -py-zabbix==1.1.7 -urllib3==1.26.15 +kubernetes==29.0.0 +zappix==1.0.2 +urllib3==2.1.0 +schedule==1.2.1 +psutil==5.9.7 diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index dfe8011..543ae46 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -1,130 +1,119 @@ #!/usr/bin/env python3 -import argparse, sys, os, logging -from random import randint +import argparse, sys, os, yaml, queue +import logging, schedule, threading, psutil from time import sleep -from kubernetes import config -from pyzabbix import ZabbixSender -from modules.kubernetes.get import * -from modules.zabbix.item import * -from modules.zabbix.discovery import * +from kubernetes import config as kube_config +from zappix.sender import Sender as zabbix_sender +from modules.kubernetes.base import * +from modules.kubernetes.openebs import * parser = argparse.ArgumentParser() -parser.add_argument("--zabbix-timeout", dest="zabbix_timeout", action="store", required=False, help="Set Zabbix timeout", default=5) -parser.add_argument("--zabbix-endpoint", dest="zabbix_endpoint", action="store", required=True, help="Set Zabbix endpoint (server)") -parser.add_argument("--kubernetes-name", dest="kubernetes_name", action="store", required=True, help="Set Kubernetes cluster name in Zabbix") -parser.add_argument("--monitoring-mode", dest="monitoring_mode", action="store", required=True, help="Mode of monitoring", choices=["volume","deployment","daemonset","node","statefulset","cronjob"]) -parser.add_argument("--monitoring-type", dest="monitoring_type", action="store", required=True, help="Type of monitoring", choices=["discovery", "item", "json"]) -parser.add_argument("--object-name", dest="object_name", action="store", required=False, help="Name of object in Kubernetes", default=None) -parser.add_argument("--match-label", dest="match_label", action="store", required=False, help="Match label of object in Kubernetes", default=None) -parser.add_argument("--exclude-name", dest="exclude_name", action="store", required=False, help="Exclude object name in Kubernetes", default=None) -parser.add_argument("--exclude-namespace", dest="exclude_namespace", action="store", required=False, help="Exclude namespace in Kubernetes", default=None) -parser.add_argument("--no-wait", dest="no_wait", action="store_true", required=False, help="Disable startup wait time", default=False) -parser.add_argument("--verbose", dest="verbose", action="store_true", required=False, help="Verbose output", default=False) -parser.add_argument("--debug", dest="debug", action="store_true", required=False, help="Debug output for Zabbix", default=False) +parser.add_argument("--config-file", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") args = parser.parse_args() -if args.debug: - logger = logging.getLogger("pyzabbix") - logger.setLevel(logging.DEBUG) - handler = logging.StreamHandler(sys.stdout) - logger.addHandler(handler) +with open(args.config_file, "r") as f: + config = yaml.load(f, Loader=yaml.FullLoader) + +logging.basicConfig( + datefmt="%d/%m/%Y %H:%M:%S", + format="[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s", + level=getattr(logging, config['output']['level'])) +logging = logging.getLogger("main") if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount/token") and not os.getenv('KUBECONFIG'): - config.load_incluster_config() - if args.verbose: print("Kubernetes credentials from ServiceAccount") + kube_config.load_incluster_config() + logging.debug("Loading Kubernetes credentials from ServiceAccount") else: try: - config.load_kube_config() - if args.verbose: print("Kubernetes credentials from KUBECONFIG") + kube_config.load_kube_config() + logging.debug("Loading Kubernetes credentials from KUBECONFIG variable") except: - print("Unable to find kubernetes cluster configuration") + logging.error("Unable to load Kubernetes credentials") sys.exit(1) -zabbix = ZabbixSender(args.zabbix_endpoint) -if args.zabbix_timeout: zabbix.timeout = int(args.zabbix_timeout) -if args.verbose: - print(f"Zabbix endpoint: {args.zabbix_endpoint}") - print(f"Zabbix timeout: {args.zabbix_timeout}") - print(f"Kubernetes name: {args.kubernetes_name}") +zabbix = zabbix_sender(config['zabbix']['endpoint']) +zabbix.timeout = int(config['zabbix']['timeout']) +logging.debug(f"-> Zabbix endpoint: {config['zabbix']['endpoint']}") +logging.debug(f"-> Zabbix timeout: {config['zabbix']['timeout']}") +logging.debug(f"-> Cluster name: {config['kubernetes']['name']}") + +def executeSender(data): + for d in data: + if len(d) != 3: + logging.error(f"Invalid format: {d}") + host, key, value = d[0], d[1], d[2] + logging.info(f"Zabbix server request: host={host} key={key} value={value}") + try: + resp = zabbix.send_value(host, key, value) + logging.info(f"Zabbix server response: {resp}") + except Exception as e: + logging.error(e) + +def executeJobs(): + while True: + logging.debug(f"Program memory used (rss): {round(psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024)} MiB") + logging.debug(f"{jobs_queue.qsize()} job(s) in queue") + jobs = jobs_queue.get() + if jobs is not None: + jobs() + jobs_queue.task_done() + else: + logging.debug("0 job in queue") if __name__ == "__main__": + logging.info("Application zabbix-kubernetes-discovery started") + + jobs_queue = queue.Queue() + sch_disco = config['zabbix']['schedule']['discovery'] + sch_items = config['zabbix']['schedule']['items'] + + # cronjobs + if config['monitoring']['cronjobs']['enabled']: + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryCronjobs(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsCronjobs(config))) + + # daemonsets + if config['monitoring']['daemonsets']['enabled']: + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryDaemonsets(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsDaemonsets(config))) + + # deployments + if config['monitoring']['deployments']['enabled']: + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryDeployments(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsDeployments(config))) + + # nodes + if config['monitoring']['nodes']['enabled']: + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryNodes(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsNodes(config))) + + # statefulsets + if config['monitoring']['statefulsets']['enabled']: + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryStatefulsets(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsStatefulsets(config))) + + # volumes + if config['monitoring']['volumes']['enabled']: + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryVolumes(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsVolumes(config))) + + # openebs + if config['monitoring']['openebs']['enabled']: + # cstorpoolclusters + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryCstorpoolclusters(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsCstorpoolclusters(config))) + # cstorpoolinstances + schedule.every(sch_disco).seconds.do(jobs_queue.put, lambda: executeSender(zabbixDiscoveryCstorpoolinstances(config))) + schedule.every(sch_items).seconds.do(jobs_queue.put, lambda: executeSender(zabbixItemsCstorpoolinstances(config))) - # Random sleep between 0 and 15 seconds - if args.no_wait == False: - timewait = randint(0,15) - if args.verbose: print(f"Starting in {timewait} second(s)...") - sleep(timewait) - - # Node - if args.monitoring_mode == "node": - if args.monitoring_type == "json": - print("JSON output (node): {}".format( - getNode(args.object_name, args.exclude_name, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (node): {}".format( - zabbix.send(zabbixDiscoveryNode(args.kubernetes_name, getNode(args.object_name, args.exclude_name, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (node): {}".format( - zabbix.send(zabbixItemNode(args.kubernetes_name, getNode(args.object_name, args.exclude_name, args.match_label))))) - - # Daemonset - if args.monitoring_mode == "daemonset": - if args.monitoring_type == "json": - print("JSON output (daemonset): {}".format( - getDaemonset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (daemonset): {}".format( - zabbix.send(zabbixDiscoveryDaemonset(args.kubernetes_name, getDaemonset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (daemonset): {}".format( - zabbix.send(zabbixItemDaemonset(args.kubernetes_name, getDaemonset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - - # Volumes - if args.monitoring_mode == "volume": - if args.monitoring_type == "json": - print("JSON output (volume): {}".format( - getVolume(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (volume): {}".format( - zabbix.send(zabbixDiscoveryVolume(args.kubernetes_name, getVolume(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (volume): {}".format( - zabbix.send(zabbixItemVolume(args.kubernetes_name, getVolume(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - - # Deployment - if args.monitoring_mode == "deployment": - if args.monitoring_type == "json": - print("JSON output (deployment): {}".format( - getDeployment(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (deployment): {}".format( - zabbix.send(zabbixDiscoveryDeployment(args.kubernetes_name, getDeployment(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (deployment): {}".format( - zabbix.send(zabbixItemDeployment(args.kubernetes_name, getDeployment(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - - # Statefulset - if args.monitoring_mode == "statefulset": - if args.monitoring_type == "json": - print("JSON output (statefulset): {}".format( - getStatefulset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (statefulset): {}".format( - zabbix.send(zabbixDiscoveryStatefulset(args.kubernetes_name, getStatefulset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (statefulset): {}".format( - zabbix.send(zabbixItemStatefulset(args.kubernetes_name, getStatefulset(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - - # Cronjob - if args.monitoring_mode == "cronjob": - if args.monitoring_type == "json": - print("JSON output (cronjob): {}".format( - getCronjob(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))) - if args.monitoring_type == "discovery": - print("Zabbix discovery (cronjob): {}".format( - zabbix.send(zabbixDiscoveryCronjob(args.kubernetes_name, getCronjob(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) - if args.monitoring_type == "item": - print("Zabbix item (cronjob): {}".format( - zabbix.send(zabbixItemCronjob(args.kubernetes_name, getCronjob(args.object_name, args.exclude_name, args.exclude_namespace, args.match_label))))) + # thread + thread = threading.Thread(target=executeJobs) + thread.start() + # tasks + while True: + schedule.run_pending() + logging + sleep(1) + \ No newline at end of file