From d1ef410e497482a1372e550078a7a6f4b56d51ca Mon Sep 17 00:00:00 2001 From: WuLeeX Date: Wed, 14 Mar 2018 14:21:32 +0800 Subject: [PATCH] load configurations from Consul cluster, not from local files. --- db.sqlite3 | Bin 3072 -> 0 bytes metrics/common_metrics.py | 130 +++++++++-------------- metrics/consul_metrics.py | 68 ++++++++---- metrics/grafana_metrics.py | 57 ++++------ metrics/monitor.py | 57 +++++----- metrics/monitor_params.py | 59 ----------- metrics/service_metrics.py | 29 ----- metrics/tomcat_metrics.py | 95 +++++++---------- metrics/utils.py | 209 +++++++++++++++++++++++++++++++++---- metrics/utils.pyc | Bin 0 -> 10682 bytes myapp/__init__.pyc | Bin 107 -> 107 bytes myapp/params.py | 6 +- myapp/params.pyc | Bin 309 -> 309 bytes myapp/parse.py | 2 - myapp/parse.pyc | Bin 7945 -> 7945 bytes myapp/urls.py | 1 - myapp/views.py | 4 +- 17 files changed, 376 insertions(+), 341 deletions(-) delete mode 100644 db.sqlite3 delete mode 100644 metrics/monitor_params.py delete mode 100644 metrics/service_metrics.py create mode 100644 metrics/utils.pyc diff --git a/db.sqlite3 b/db.sqlite3 deleted file mode 100644 index 919f2e5f580e255249506160e532d798db468f9d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3072 zcmeHIy>7xV5VjN4PMuj#R)<7@kXo@ftumm70BIO9A(yzRtNeiDc5fe%N9dFEf&>zv zsuLB1PdZ!YyYGDZ&L=-j1E~ePRI1<_Zb^$!O6~v%Ar#d{-AZG(wg&Ev{g+VE`*yD| z%CgqPzQ)CfVc?_;OstD`r$ZCXpL0>Ka(v$NTCCnhxi~cKhsgJ0A7bx5@ZliVDS08f zh;%;_dfD#x=@;MROy78!Rr5k-ifdVw_0fT~mzN#+88}R)3}mTACKQC@7{XZ)z$6-u zyeNT3KLKwRkB1><8u?+|1;&?41|M84Ub%8^1_O5oLuXU#?_PxFQs9Lt*Fwue?4X<3 g+?(o?_QoVP3>XH^iGk*CHuHZ@9Wk{S2L6wMA8dYWCIA2c diff --git a/metrics/common_metrics.py b/metrics/common_metrics.py index 962d075..9ab6b64 100644 --- a/metrics/common_metrics.py +++ b/metrics/common_metrics.py @@ -1,16 +1,12 @@ #!/usr/bin/python #-*- coding:utf-8 -*- -import os, sys +import sys import re - -import requests import logging -import json -import monitor_params -from time import time import utils sys.path.append("..") +import myapp.params as params from myapp.parse import ParseUtil from time import time @@ -21,46 +17,29 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') -import sys + logger = logging.getLogger(sys.path[0] + 'common_metrics') class CommonMetrics(object): - def __init__(self, name='', **kwargs): - self._name = name - - - def ip_list(self, ip): - ''' - return common components ip list - ''' - ip_list = [] - try: - list = re.split(r'[,\s]\s*', ip.strip()) - except: - logging.error("Can't split ip {0}. Check the ip in monitor_params.py.".format(ip)) - sys.exit(1) - else: - ip_list = list - return ip_list - - def process_instance(self, ip): - ''' - @return list of common instances. - ''' - process_instance = utils.get_instances(ip, monitor_params.process_exporter_port) - return process_instance + def __init__(self, process_exporter_name, service_name): + self._process_name = process_exporter_name + self._service_name = service_name + self._instances = utils.instance_info(service_name, process_exporter_name) + self._service_instance = self._instances['service_instance'] + self._process_instance = self._instances['process_instance'] + self._prom_url = utils.prometheus_url(self._process_name) + self._grafana_url = utils.grafana_floating_url(self._process_name) - def cluster_state(self, ip): + def cluster_state(self): ''' @return cluster state and the numbers of healthy nodes. ''' - process_instances = self.process_instance(ip) state = 0.0 success_count = 0.0 - for i in range(len(process_instances)): - common_up = self.node_state(process_instances[i]) + for i in range(len(self._process_instance)): + common_up = self.node_state(self._process_instance[i]) if common_up: success_count +=1 else: @@ -75,17 +54,16 @@ def node_state(self, process_instance): @return a float value 1 or 0, indicating the node state up or down. ''' state = {} - url = utils.prometheus_url() param = { - "query": '{0}_process_up{{instance="{1}"}}'.format(self._name, process_instance) + "query": '{0}_process_up{{instance="{1}"}}'.format(self._service_name, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): state.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if state.has_key(process_instance): return float(state[process_instance]) else: - logging.error("No instance in the {0} cluster, node {1} down.".format(self._name, process_instance)) + logging.error("No instance in the {0} cluster, node {1} down.".format(self._service_name, process_instance)) return 0.0 @@ -94,17 +72,16 @@ def cpu_usage(self, process_instance): @return components cpu usage. ''' cpu_usage = {} - url = utils.prometheus_url() param = { - "query": '{0}_cpu_percentage{{instance="{1}"}}'.format(self._name, process_instance) + "query": '{0}_cpu_percentage{{instance="{1}"}}'.format(self._service_name, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): cpu_usage.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if cpu_usage.has_key(process_instance): return float(cpu_usage[process_instance]) else: - logging.error("No instance in the {0} cluster, get {1} cpu usage failed.".format(self._name, process_instance)) + logging.error("No instance in the {0} cluster, get {1} cpu usage failed.".format(self._service_name, process_instance)) return None def uptime(self, process_instance): @@ -112,17 +89,16 @@ def uptime(self, process_instance): @return a float value of create time. ''' uptime = {} - url = utils.prometheus_url() param = { - "query": '{0}_running_time_seconds_total{{instance="{1}"}}'.format(self._name, process_instance) + "query": '{0}_running_time_seconds_total{{instance="{1}"}}'.format(self._service_name, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): uptime.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if uptime.has_key(process_instance): return float(uptime[process_instance]) else: - logging.error("No instance in the {0} cluster, get {1} uptime failed.".format(self._name, process_instance)) + logging.error("No instance in the {0} cluster, get {1} uptime failed.".format(self._service_name, process_instance)) return None def mem_usage(self, process_instance): @@ -130,70 +106,66 @@ def mem_usage(self, process_instance): @return components memory usage. ''' mem_usage = {} - url = utils.prometheus_url() param = { - "query": 'sum by (instance)({0}_memory_usage_bytes_total{{instance="{1}", mode=~"rss|vms|shared"}})'.format(self._name, process_instance) + "query": 'sum by (instance)({0}_memory_usage_bytes_total{{instance="{1}", mode=~"rss|vms|shared"}})'.format(self._service_name, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): mem_usage.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if mem_usage.has_key(process_instance): return float(mem_usage[process_instance]) else: - logging.error("No instance in the {0} cluster, get {1} memory usage failed.".format(self._name, process_instance)) + logging.error("No instance in the {0} cluster, get {1} memory usage failed.".format(self._service_name, process_instance)) return None - def cluster_list(self, ip): - process_instances = self.process_instance(ip) + def cluster_list(self): uptime = time() - for i in range(len(process_instances)): - state = self.node_state(process_instances[i]) + for i in range(len(self._process_instance)): + state = self.node_state(self._process_instance[i]) if state: - uptime = self.uptime(process_instances[i]) + uptime = self.uptime(self._process_instance[i]) break else: continue node_info = [] - for i in range(len(process_instances)): - node_info.append(self.node_detail(process_instances[i])) + for i in range(len(self._process_instance)): + node_info.append(self.node_detail(self._process_instance[i])) cluster_info = { - "{0}_cluster_state".format(self._name) : self.cluster_state(ip)[0], - "{0}_total_nodes".format(self._name) : float(len(self.ip_list(ip))), - "{0}_healthy_nodes".format(self._name) : self.cluster_state(ip)[1], - "{0}_uptime".format(self._name) : time() - uptime, - "{0}_nodes_info".format(self._name) : node_info + "{0}_cluster_state".format(self._service_name) : self.cluster_state()[0], + "{0}_total_nodes".format(self._service_name) : float(len(self._process_instance)), + "{0}_healthy_nodes".format(self._service_name) : self.cluster_state()[1], + "{0}_uptime".format(self._service_name) : time() - uptime, + "{0}_nodes_info".format(self._service_name) : node_info } return cluster_info def node_detail(self, process_instance): - url_name = re.sub('([a-z0-9])_([a-z0-9])', r'\1-\2', self._name).lower() + board_name = re.sub('([a-z0-9])_([a-z0-9])', r'\1-\2', self._service_name).lower() if not self.node_state(process_instance): node_info = { - "{0}_node_state".format(self._name) : 0.0, - "{0}_uptime".format(self._name) : 0.0, - "{0}_cpu_usage".format(self._name) : 0.0, - "{0}_mem_usage".format(self._name) : 0.0, - "{0}_url".format(self._name) : None + "{0}_node_state".format(self._service_name) : 0.0, + "{0}_uptime".format(self._service_name) : 0.0, + "{0}_cpu_usage".format(self._service_name) : 0.0, + "{0}_mem_usage".format(self._service_name) : 0.0, + "{0}_self._prom_url".format(self._service_name) : None } else: node_info = { - "{0}_node_state".format(self._name) : self.node_state(process_instance), - "{0}_uptime".format(self._name) : time() - self.uptime(process_instance), - "{0}_cpu_usage".format(self._name) : self.cpu_usage(process_instance), - "{0}_mem_usage".format(self._name) : self.mem_usage(process_instance), - "{0}_url".format(self._name) : 'http://{0}/dashboard/db/{1}-dashboard-for-prometheus?orgId=1&var-instance={2}'.format(utils.grafana_url(), url_name, process_instance) + "{0}_node_state".format(self._service_name) : self.node_state(process_instance), + "{0}_uptime".format(self._service_name) : time() - self.uptime(process_instance), + "{0}_cpu_usage".format(self._service_name) : self.cpu_usage(process_instance), + "{0}_mem_usage".format(self._service_name) : self.mem_usage(process_instance), + "{0}_url".format(self._service_name) : 'http://{0}/dashboard/db/{1}-dashboard-for-prometheus?orgId=1&var-instance={2}&kiosk'.format(self._grafana_url, board_name, process_instance) } return node_info def main(): - prom_name = "prometheus" - ip = monitor_params.prometheus_ip - common = CommonMetrics(prom_name) - from pprint import pprint - pprint(common.cluster_list(ip)) + service_name = "prometheus" + process_name = "process_status_exporter" + common = CommonMetrics(process_name, service_name) if __name__ == '__main__': main() diff --git a/metrics/consul_metrics.py b/metrics/consul_metrics.py index 41b0b04..1f4df38 100644 --- a/metrics/consul_metrics.py +++ b/metrics/consul_metrics.py @@ -1,18 +1,11 @@ #!/usr/bin/python #-*- coding:utf-8 -*- -import os, sys -import re -import time -import requests -import argparse +import sys import logging -import json -import monitor_params import utils -# import father directory, append father directory to the sys.path sys.path.append("..") -from myapp.parse import ParseUtil +import myapp.params as params from common_metrics import CommonMetrics from time import time @@ -23,22 +16,36 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') -import sys + logger = logging.getLogger(sys.path[0] + 'consul_metrics') class ConsulMetrics(CommonMetrics): - def __init__(self, name = "consul"): - self._name = name - - def cluster_state(self, ip): + + def __init__(self, process_exporter_name): + CommonMetrics.__init__(self, process_exporter_name, "consul") + + def consul_process_instance(self): + ''' + @return a list of service ip according the service_name given in the param. + ''' + process_instances = [] + ip_list = utils.consul_ip_list() + port = utils.get_process_port(self._process_name) + for i in range(len(ip_list)): + process_instances.append("{0}:{1}".format(ip_list[i], port)) + return process_instances + + + def cluster_state(self): ''' Once 1 leader down, more than half peers left in the cluster, the cluster can elected a new leader. So the cluster can work well. ''' + process_instances = self.consul_process_instance() + state = 0.0 + members_count = float(len(process_instances)) success_count = 0.0 - members_count = len(self.ip_list(ip)) - process_instances = self.process_instance(ip) for i in range(len(process_instances)): consul_up = self.node_state(process_instances[i]) @@ -53,10 +60,33 @@ def cluster_state(self, ip): logging.info("success count is: {0}, and state is: {1}".format(success_count, state)) return [state,success_count] + def cluster_list(self): + process_instances = self.consul_process_instance() + uptime = time() + for i in range(len(process_instances)): + state = self.node_state(process_instances[i]) + if state: + uptime = self.uptime(process_instances[i]) + break + else: + continue + + node_info = [] + for i in range(len(process_instances)): + node_info.append(self.node_detail(process_instances[i])) + + cluster_info = { + "{0}_cluster_state".format(self._service_name) : self.cluster_state()[0], + "{0}_total_nodes".format(self._service_name) : float(len(process_instances)), + "{0}_healthy_nodes".format(self._service_name) : self.cluster_state()[1], + "{0}_uptime".format(self._service_name) : time() - uptime, + "{0}_nodes_info".format(self._service_name) : node_info + } + return cluster_info + def main(): - consul = ConsulMetrics() - from pprint import pprint - pprint(consul.cluster_list(monitor_params.consul_ip)) + process_name = "process_status_exporter" + consul = ConsulMetrics(process_name) if __name__ == '__main__': main() \ No newline at end of file diff --git a/metrics/grafana_metrics.py b/metrics/grafana_metrics.py index 8efbb62..be1de42 100644 --- a/metrics/grafana_metrics.py +++ b/metrics/grafana_metrics.py @@ -1,21 +1,14 @@ #!/usr/bin/python #-*- coding:utf-8 -*- -import os, sys -import re - -import requests -import argparse +import sys import logging -import json -import monitor_params import utils -sys.path.append('..') -from myapp.parse import ParseUtil +sys.path.append("..") +import myapp.params as params from common_metrics import CommonMetrics from time import time - ''' Scrape consul metrics from Consul Cluster or consul_exporter. ''' @@ -23,38 +16,32 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') -import sys -logger = logging.getLogger(sys.path[0] + 'nginx_metrics') -class GrafanaMetrics(CommonMetrics): +logger = logging.getLogger(sys.path[0] + 'grafana_metrics') - def __init__(self, name = 'grafana_server'): - self._name = name +class GrafanaMetrics(CommonMetrics): - def instance_info(self, ip): - instance_list = utils.get_instances(ip, monitor_params.grafana_port) - return instance_list + def __init__(self, process_exporter_name): + CommonMetrics.__init__(self, process_exporter_name, "grafana_server") - def cluster_list(self, ip): - process_instances = self.process_instance(ip) - instances = self.instance_info(ip) + def cluster_list(self): uptime = time() - for i in range(len(process_instances)): - state = self.node_state(process_instances[i]) + for i in range(len(self._process_instance)): + state = self.node_state(self._process_instance[i]) if state: - uptime = self.uptime(process_instances[i]) + uptime = self.uptime(self._process_instance[i]) break else: continue node_info = [] - for i in range(len(instances)): - node_info.append(self.grafana_node_detail(process_instances[i], instances[i])) + for i in range(len(self._service_instance)): + node_info.append(self.grafana_node_detail(self._process_instance[i], self._service_instance[i])) cluster_info = { - "grafana_cluster_state" : self.cluster_state(ip)[0], - "grafana_total_nodes" : float(len(instances)), - "grafana_healthy_nodes" : self.cluster_state(ip)[1], + "grafana_cluster_state" : self.cluster_state()[0], + "grafana_total_nodes" : float(len(self._service_instance)), + "grafana_healthy_nodes" : self.cluster_state()[1], "grafana_uptime" : time() - uptime, "grafana_nodes_info": node_info } @@ -75,17 +62,13 @@ def grafana_node_detail(self, process_instance, instance): "grafana_uptime" : time() - self.uptime(process_instance), "grafana_cpu_usage" : self.cpu_usage(process_instance), "grafana_mem_usage" : self.mem_usage(process_instance), - "grafana_url" : 'http://{0}/dashboard/db/grafana-dashboard-for-prometheus?orgId=1&var-instance={1}'.format(utils.grafana_url(), instance) + "grafana_url" : 'http://{0}/dashboard/db/grafana-dashboard-for-prometheus?orgId=1&var-instance={1}&kiosk'.format(self._grafana_url, instance) } - return node_info - + return node_info def main(): - grafana = GrafanaMetrics() - - from pprint import pprint - pprint(grafana.cluster_list(monitor_params.grafana_ip)) - + process_name = "process_status_exporter" + grafana = GrafanaMetrics(process_name) if __name__ == '__main__': main() diff --git a/metrics/monitor.py b/metrics/monitor.py index 5f07811..0f7517e 100644 --- a/metrics/monitor.py +++ b/metrics/monitor.py @@ -2,51 +2,52 @@ #!/usr/bin/python #-*- coding:utf-8 -*- -import monitor_params +import sys import utils +sys.path.append("..") +import myapp.params as params + from common_metrics import CommonMetrics from consul_metrics import ConsulMetrics from tomcat_metrics import TomcatMetrics from grafana_metrics import GrafanaMetrics - def monitor_metrics(): - name_list = { - "ambari_agent" : monitor_params.ambari_agent_ip, - "ambari_server" : monitor_params.ambari_server_ip, - "keycloak" : monitor_params.keycloak_ip, - "knox" : monitor_params.knox_ip, - "slapd" : monitor_params.ldap_ip, - "mysqld" : monitor_params.mysql_ip, - "prometheus" : monitor_params.prometheus_ip, - } - result = [] metric_info = {} - - consul = ConsulMetrics() - consul_info = consul.cluster_list(monitor_params.consul_ip) - tomcat = TomcatMetrics() - tomcat_info = tomcat.cluster_list(monitor_params.tomcat_ip) - grafana = GrafanaMetrics() - grafana_info = grafana.cluster_list(monitor_params.grafana_ip) - - for name in name_list: - common = CommonMetrics(name) - metrics = common.cluster_list(name_list[name]) - metric_info.setdefault("{0}_info".format(name), metrics) + name_list = [ + "ambari_agent", + "ambari_server", + "nginx", + "keycloak", + "knox", + "slapd", + "mysqld", + "prometheus", + ] + process_name = "process_status_exporter" + + consul = ConsulMetrics(process_name) + consul_info = consul.cluster_list() + tomcat = TomcatMetrics(process_name) + tomcat_info = tomcat.cluster_list() + grafana = GrafanaMetrics(process_name) + grafana_info = grafana.cluster_list() + + for i in range(len(name_list)): + common = CommonMetrics(process_name, name_list[i]) + metrics = common.cluster_list() + metric_info.setdefault("{0}_info".format(name_list[i]), metrics) metric_info.setdefault("consul_info", consul_info) metric_info.setdefault("tomcat_info", tomcat_info) metric_info.setdefault("grafana_info", grafana_info) - result.append(metric_info) - return result - def main(): result = monitor_metrics() if __name__ == '__main__': - main() \ No newline at end of file + main() + \ No newline at end of file diff --git a/metrics/monitor_params.py b/metrics/monitor_params.py deleted file mode 100644 index 056f2a0..0000000 --- a/metrics/monitor_params.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python2 -#-*- coding: utf-8 -*- - -# consul_ip can be set to several values, seperated by ",". -# consul -consul_ip = "172.16.1.47, 172.16.1.33, 172.16.1.19" -consul_port = "8500" -consul_exporter_port = "9107" - -# nginx -# nginx_ip:nginx_stab_port running nginx stab -# nginx_ip:nginx_exporter_port running nginx_exporter -# nginx_ip:nginxlog_exporter_port running nginxlog_exporter -nginx_ip = "172.16.1.47" -nginx_stab_port = "7654" -nginx_exporter_port = "9113" -nginxlog_exporter_port = "4040" -# nginx_instance = "10.110.13.67:9113, 10.110.13.67:9113" - -# tomcat -tomcat_ip = "172.16.1.47, 172.16.1.33" -tomcat_master_port = "9011" -tomcat_tenant_port = "9021" - -# prometheus -prometheus_virtual_ip = "172.16.1.43" -prometheus_ip = "172.16.1.47, 172.16.1.33" -prometheus_outside_ip = "10.10.6.206, 10.10.6.211" -prometheus_port = "9500" - -# grafana version should be v4.6.3 or later -grafana_ip = "172.16.1.47, 172.16.1.33" -grafana_outside_ip = "10.10.6.206, 10.10.6.211" -grafana_port = "3000" -# - -# mysql -mysql_ip = "172.16.1.47, 172.16.1.33" -mysql_exporter_port = "9104" - -# process_status_exporter -process_exporter_ip = "172.16.1.47, 172.16.1.33, 172.16.1.19" -process_exporter_port = "9108" - -# keycloak -keycloak_ip = "172.16.1.47" -keycloak_port = "9110" - -# knox -knox_ip = "172.16.1.47" - -# ambari-server -ambari_server_ip = "172.16.1.47" - -# ambari-agent -ambari_agent_ip = "172.16.1.47, 172.16.1.33, 172.16.1.19" - -# ldap -ldap_ip = "172.16.1.47, 172.16.1.33" \ No newline at end of file diff --git a/metrics/service_metrics.py b/metrics/service_metrics.py deleted file mode 100644 index 4787e77..0000000 --- a/metrics/service_metrics.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/python -#-*- coding:utf-8 -*- - -import re, os -import logging -import requests -from pprint import pprint - - -logging.basicConfig(level=logging.INFO, - format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', - datefmt='%a, %d %b %Y %H:%M:%S') -import sys -logger = logging.getLogger(sys.path[0] + 'service_metrics') - -def get_ip(consul_ip, consul_port, service_name): - url = 'http://{0}:{1}/v1/catalog/node/{2}'.format(consul_ip, consul_port, service_name) - response = requests.get(url) - pprint(response.json()) - -def main(): - consul_ip = "10.10.6.206" - consul_port = 8500 - service_name = "prometheus" - get_ip(consul_ip, consul_port, service_name) - -if __name__ == '__main__': - main() - diff --git a/metrics/tomcat_metrics.py b/metrics/tomcat_metrics.py index 611fa43..30ccc05 100644 --- a/metrics/tomcat_metrics.py +++ b/metrics/tomcat_metrics.py @@ -1,16 +1,11 @@ #!/usr/bin/python #-*- coding:utf-8 -*- -import os, sys -import re -import time -import requests -import argparse +import sys import logging -import json -import monitor_params import utils sys.path.append("..") +import myapp.params as params from myapp.parse import ParseUtil from common_metrics import CommonMetrics from time import time @@ -28,34 +23,25 @@ class TomcatMetrics(CommonMetrics): - def __init__(self, name = 'tomcat'): - self.master_name = "{0}_master".format(name) - self.tenant_name = "{0}_tenant".format(name) - - def master_instance(self): - ''' - @return tomcat instance, include tomcat_ip:master_port - ''' - master_instance = utils.get_instances(monitor_params.tomcat_ip, monitor_params.tomcat_master_port) - return master_instance - - def tenant_instance(self): - ''' - @return tomcat instance, include tomcat_ip:tenant_port - ''' - tenant_instance = utils.get_instances(monitor_params.tomcat_ip, monitor_params.tomcat_tenant_port) - return tenant_instance + def __init__(self, process_exporter_name): + CommonMetrics.__init__(self, process_exporter_name, "tomcat") + self.master_name = "tomcat_master" + self.tenant_name = "tomcat_tenant" + self.master_instance_infos = utils.instance_info(self.master_name, self._process_name) + self.tenant_instance_infos = utils.instance_info(self.tenant_name, self._process_name) + self._master_instance = self.master_instance_infos['service_instance'] + self._tenant_instance = self.tenant_instance_infos['service_instance'] + self._process_instance = self.master_instance_infos['process_instance'] def tomcat_node_state(self, role, process_instance): ''' @return a float value 1 or 0, indicating the node state up or down. ''' state = {} - url = utils.prometheus_url() param = { "query": '{0}_process_up{{instance="{1}"}}'.format(role, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): state.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if state.has_key(process_instance): @@ -64,21 +50,18 @@ def tomcat_node_state(self, role, process_instance): logging.error("No instance in the tomcat cluster, tomcat {0} node {1} down.".format(role, process_instance)) return 0.0 - def cluster_state(self, ip): + def cluster_state(self): ''' @return tomcat cluster state, and the numbers of healthy nodes. ''' - master_instances = self.master_instance() - tenant_instances = self.tenant_instance() - process_instances = self.process_instance(ip) state = 0.0 success_count = 0.0 master_count = 0.0 tenant_count = 0.0 - for i in range(len(process_instances)): - master_up = self.tomcat_node_state(self.master_name, process_instances[i]) - tenant_up = self.tomcat_node_state(self.tenant_name, process_instances[i]) + for i in range(len(self._process_instance)): + master_up = self.tomcat_node_state(self.master_name, self._process_instance[i]) + tenant_up = self.tomcat_node_state(self.tenant_name, self._process_instance[i]) if master_up: master_count += 1 if tenant_up: @@ -94,15 +77,13 @@ def tomcat_cpu_usage(self, role, process_instance): @return a float value 1 or 0, indicating the node state up or down. ''' cpu_usage = {} - url = utils.prometheus_url() param = { "query": '{0}_cpu_percentage{{instance="{1}"}}'.format(role, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): cpu_usage.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if cpu_usage.has_key(process_instance): - print float(cpu_usage[process_instance]) return float(cpu_usage[process_instance]) else: logging.error("No instance in the tomcat cluster, get tomcat {0} node {1} cpu usage failed.".format(role, process_instance)) @@ -113,11 +94,10 @@ def tomcat_uptime(self, role, process_instance): @return a float value 1 or 0, indicating the node state up or down. ''' uptime = {} - url = utils.prometheus_url() param = { "query": '{0}_running_time_seconds_total{{instance="{1}"}}'.format(role, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): uptime.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if uptime.has_key(process_instance): @@ -131,11 +111,10 @@ def tomcat_mem_usage(self, role, process_instance): @return a float value 1 or 0, indicating the node state up or down. ''' mem_usage = {} - url = utils.prometheus_url() param = { "query": 'sum by (instance)({0}_memory_usage_bytes_total{{instance="{1}", mode=~"rss|vms|shared"}})'.format(role, process_instance) } - response = ParseUtil.request_metrics(url, param) + response = ParseUtil.request_metrics(self._prom_url, param) for i in range(len(response)): mem_usage.setdefault(response[i]['metric']['instance'], response[i]['value'][1]) if mem_usage.has_key(process_instance): @@ -144,33 +123,30 @@ def tomcat_mem_usage(self, role, process_instance): logging.error("No instance in the tomcat cluster, get tomcat {0} node {1} memory usage failed.".format(role, process_instance)) return None - def cluster_list(self, ip): - master_instances = self.master_instance() - tenant_instances = self.tenant_instance() - process_instances = self.process_instance(ip) + def cluster_list(self): uptime = time() - for i in range(len(process_instances)): - master_state = self.tomcat_node_state(self.master_name, process_instances[i]) - tenant_state = self.tomcat_node_state(self.tenant_name, process_instances[i]) + for i in range(len(self._process_instance)): + master_state = self.tomcat_node_state(self.master_name, self._process_instance[i]) + tenant_state = self.tomcat_node_state(self.tenant_name, self._process_instance[i]) if master_state: - uptime = self.tomcat_uptime(self.master_name, process_instances[i]) + uptime = self.tomcat_uptime(self.master_name, self._process_instance[i]) break elif tenant_state: - uptime = self.tomcat_uptime(self.tenant_name, process_instances[i]) + uptime = self.tomcat_uptime(self.tenant_name, self._process_instance[i]) break else: continue master_info = [] tenant_info = [] - for i in range(len(master_instances)): - master_info.append(self.tomcat_node_detail(self.master_name, process_instances[i], master_instances[i])) - tenant_info.append(self.tomcat_node_detail(self.tenant_name, process_instances[i], tenant_instances[i])) + for i in range(len(self._master_instance)): + master_info.append(self.tomcat_node_detail(self.master_name, self._process_instance[i], self._master_instance[i])) + tenant_info.append(self.tomcat_node_detail(self.tenant_name, self._process_instance[i], self._tenant_instance[i])) cluster_info = { - "tomcat_cluster_state" : self.cluster_state(ip)[0], - "tomcat_total_nodes" : float(sum([len(master_instances), len(tenant_instances)])), - "tomcat_healthy_nodes" : self.cluster_state(ip)[1], + "tomcat_cluster_state" : self.cluster_state()[0], + "tomcat_total_nodes" : float(sum([len(self._master_instance), len(self._tenant_instance)])), + "tomcat_healthy_nodes" : self.cluster_state()[1], "tomcat_uptime" : time() - uptime, "tomcat_master_info": master_info, "tomcat_tenant_info": tenant_info @@ -184,7 +160,7 @@ def tomcat_node_detail(self, role, process_instance, role_instance): "tomcat_{0}_uptime".format(role) : 0.0, "tomcat_{0}_cpu_usage".format(role) : 0.0, "tomcat_{0}_mem_usage".format(role) : 0.0, - "tomcat_{0}_url".format(role) : None + "tomcat_{0}_self._prom_url".format(role) : None } else: node_info = { @@ -192,14 +168,13 @@ def tomcat_node_detail(self, role, process_instance, role_instance): "tomcat_{0}_uptime".format(role) : time() - self.tomcat_uptime(role, process_instance), "tomcat_{0}_cpu_usage".format(role) : self.tomcat_cpu_usage(role, process_instance), "tomcat_{0}_mem_usage".format(role) : self.tomcat_mem_usage(role, process_instance), - "tomcat_{0}_url".format(role) : 'http://{0}/dashboard/db/tomcat-dashboard-for-prometheus?orgId=1&var-instance={1}'.format(utils.grafana_url(), role_instance) + "tomcat_{0}_url".format(role) : 'http://{0}/dashboard/db/tomcat-dashboard-for-prometheus?orgId=1&var-instance={1}&kiosk'.format(self._grafana_url, role_instance) } return node_info def main(): - tomcat = TomcatMetrics() - from pprint import pprint - pprint(tomcat.cluster_list(monitor_params.tomcat_ip)) + process_name = "process_status_exporter" + tomcat = TomcatMetrics(process_name) if __name__ == '__main__': main() diff --git a/metrics/utils.py b/metrics/utils.py index e3ab259..11190ea 100644 --- a/metrics/utils.py +++ b/metrics/utils.py @@ -1,20 +1,26 @@ #!/usr/bin/python #-*- coding:utf-8 -*- -import re, os -import monitor_params +import re, sys +sys.path.append("..") +import myapp.params as params import logging import requests +import json +import base64 logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') -import sys + logger = logging.getLogger(sys.path[0] + 'utils') -def get_instances(ip, port): +def get_ip_list(ip): + ''' + @param ip: a ip string, seperated by comma. + @return a list of ip + ''' ip_list = [] - instances = [] if r',' in ip.strip(): try: list = re.split(r'[,\s]\s*', ip.strip()) @@ -25,21 +31,138 @@ def get_instances(ip, port): ip_list = list else: ip_list.append(ip.strip()) - + return ip_list + +def consul_ip_list(): + ''' + @return a list of consul + ''' + ip_list = get_ip_list(params.consul_ip) + return ip_list + +def get_consul_instance(): + ''' + @return a list of consul instances, including ip:port + ''' + ip_list = consul_ip_list() + port = params.consul_port + success = 0 + + for i in range(len(ip_list)): + url = 'http://{0}:{1}/v1/status/leader'.format(ip_list[i], port) + try: + response = requests.get(url, timeout=5) + response.raise_for_status() + except requests.exceptions.ConnectionError: + logging.warning("GET {0} failed! Connection Error.".format(url)) + continue + except requests.RequestException as e: + logging.warning(e) + continue + else: + logging.info("GET {0} ok! Response code is = {1}".format(url, response.status_code)) + result = response.json() + if len(result) >= 1: + success += 1 + consul_instance = "{0}:{1}".format(ip_list[i], port) + logging.debug("Consul instance is : {0}".format(consul_instance)) + return consul_instance + else: + continue + if not success: + logging.error("No consul agent available, please check it out.") + sys.exit(1) + +def service_info(service_name): + ''' + @return a dict of service info including service_ip and service_port via the service_name given in the param. + ''' + service_ip = [] + consul_instance = get_consul_instance() + logging.debug("Consul instance is: {0}".format(consul_instance)) + url = 'http://{0}/v1/catalog/service/{1}'.format(consul_instance, service_name) + try: + response = requests.get(url, timeout=5) + response.raise_for_status() + except requests.exceptions.ConnectionError: + logging.error("GET {0} failed! Connection Error.".format(url)) + except requests.RequestException as e: + logging.error(e) + else: + logging.info("GET {0} ok! Response code is = {1}".format(url, response.status_code)) + result = response.json() + if len(result): + service_port = result[0]['ServicePort'] + for i in range(len(result)): + service_ip.append(result[i]['ServiceAddress']) + else: + logging.warning("No service named {0} in consul cluster.".format(service_name)) + service_port = None + service_info = { + "service_ip": service_ip, + "service_port": service_port + } + return service_info + +def get_process_port(process_exporter_name): + ''' + @return a list of service ip according the service_name given in the param. + ''' + consul_instance = get_consul_instance() + logging.debug("Consul instance is: {0}".format(consul_instance)) + url = 'http://{0}/v1/catalog/service/{1}'.format(consul_instance, process_exporter_name) + try: + response = requests.get(url, timeout=5) + response.raise_for_status() + except requests.exceptions.ConnectionError: + logging.error("GET {0} failed! Connection Error.".format(url)) + except requests.RequestException as e: + logging.error(e) + else: + logging.info("GET {0} ok! Response code is = {1}".format(url, response.status_code)) + result = response.json() + if len(result): + process_port = result[0]['ServicePort'] + logging.info("The port of {0} is : {1}".format(process_exporter_name, process_port)) + else: + logging.warning("No process_exporter named {0} in consul cluster, no port found.".format(process_exporter_name)) + process_port = None + return process_port + +def get_instances(ip_list, port): + ''' + @return instance info, formating on ip:port, via the given ip_list and port. + ''' + instances = [] for i in range(len(ip_list)): - url = ip_list[i] + ":" + port + url = "{0}:{1}".format(ip_list[i], port) instances.append(url) return instances +def instance_info(service_name, process_exporter_name): + services = service_info(service_name) + service_ip = services['service_ip'] + service_port = services['service_port'] + process_port = get_process_port(process_exporter_name) + instances = {} + instances.setdefault("service_instance", get_instances(service_ip, service_port)) + instances.setdefault("process_instance", get_instances(service_ip, process_port)) + return instances -def get_instance_index(process): +def get_instance_index(service_name, process_exporter_name): + ''' + According to service_name(prometheus or grafana_server) to get instance index. + ''' success = 0.0 - prometheus_instances = get_instances(monitor_params.prometheus_ip, monitor_params.prometheus_port) - for index in range(len(prometheus_instances)): + prometheus_instances = instance_info("prometheus", process_exporter_name)['service_instance'] + instance_infos = instance_info(service_name, process_exporter_name) + service_instances = instance_infos['service_instance'] + process_instances = instance_infos['process_instance'] + for index in range(len(service_instances)): prom_url = 'http://{0}/api/v1/query'.format(prometheus_instances[index]) param = { - "query": 'up{{job="{0}"}}'.format(process) + "query": '{0}_process_up{{instance="{1}"}}'.format(service_name, process_instances[index]) } logging.info("start GET %s?%s", prom_url, param) try: @@ -63,9 +186,12 @@ def get_instance_index(process): logging.error("No prometheus or grafana available, please check it out.") sys.exit(1) -def normal_index(): - prom_index = get_instance_index("prometheus") - grafana_index = get_instance_index("grafana") +def normal_index(process_exporter_name): + ''' + @return the index of available prometheus and grafana. + ''' + prom_index = get_instance_index("prometheus", process_exporter_name) + grafana_index = get_instance_index("grafana_server", process_exporter_name) if prom_index == grafana_index: logging.info("prometheus and grafana all normal, index is: {0}".format(grafana_index)) return grafana_index @@ -78,21 +204,60 @@ def normal_index(): logging.info("prometheus1 down, prometheus2 OK. grafana OK, using prometheus index, the index is: {0}".format(prom_index)) return prom_index -def prometheus_url(): - prometheus_instances = get_instances(monitor_params.prometheus_ip, monitor_params.prometheus_port) - index = normal_index() +def prometheus_url(process_exporter_name): + prometheus_instances = instance_info("prometheus", process_exporter_name)['service_instance'] + index = normal_index(process_exporter_name) url = 'http://{0}/api/v1/query'.format(prometheus_instances[index]) return url -def grafana_url(): - grafana_instances = get_instances(monitor_params.grafana_outside_ip, monitor_params.grafana_port) - index = normal_index() +def grafana_url(process_exporter_name): + grafana_instances = instance_info("grafana_server", process_exporter_name)['service_instance'] + index = normal_index(process_exporter_name) + logging.debug("grafana available instance : {0}".format(grafana_instances[index])) return grafana_instances[index] +def grafana_floating_url(process_exporter_name): + instance = grafana_url(process_exporter_name).split(":") + ip = instance[0] + port = instance[1] + logging.debug("grafana ip: {0}, and grafana port: {1}.".format(ip, port)) + map = floating_ip_map() + if ip in map.keys(): + floating_ip = map[ip] + floating_instance = "{0}:{1}".format(floating_ip, port) + logging.info("Get grafana ip {0} in floating ip map, grafana floating instance is : {1}.".format(ip, floating_instance)) + return floating_instance + else: + logging.error("No grafana ip {0} found in floating ip map, please check.".format(ip)) + sys.exit(1) + + +def floating_ip_map(): + ''' + http://consul_instance/v1/kv/floating_ip_map?recurse + @return: a list of floating ip. + ''' + consul_instance = get_consul_instance() + floating_ip_info = {} + url = 'http://{0}/v1/kv/floating_ip_map?recurse'.format(consul_instance) + logging.info("start GET %s", url) + try: + response = requests.get(url) + response.raise_for_status() + except requests.RequestException as e: + logging.error(e) + else: + result = response.json() + for i in range(len(result)): + floating_ip_info.update(json.loads(base64.b64decode(result[i]['Value']).decode())) + logging.debug("The floating ip map info is : {0}.".format(floating_ip_info)) + return floating_ip_info def main(): - print prometheus_url() - print grafana_url() + process_exporter_name = "process_status_exporter" + print prometheus_url(process_exporter_name) + print grafana_url(process_exporter_name) + print grafana_floating_url(process_exporter_name) if __name__ == '__main__': main() \ No newline at end of file diff --git a/metrics/utils.pyc b/metrics/utils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..592ecb0db2a62a31e60a2567e508d0a4447b942b GIT binary patch literal 10682 zcmdT~U2Ggz6~42({@Jx1J9hpyP3R=4+fBQy>n4QOG&F6JrlrJzaoePh!!q6(J8Q3J zH*;qlTk#5^fCS>9JRu%<0fBgc7vKRQ6%{1J14!kum5>lfyzqiRAn{iCzH?`0*G?07 z087~3>pOSu@44rE_dDlq_HQGFXU=?gzNX@z0sKFQkN&i!l!re{HI$l9^Ol;oly9k( zj9SSmFQe{S%8Kjd)JmV4?@OBXtNH#UKcMCZlKh~WA58LjHJ_J!Ry7K0t)S)$%JTcw z%8>GMikV^Mp+|It_JiuK%F64Jcn)4(6}^Hwh*`|(d3zO;2S zulo%tTxy1EZluqnaI)m;T2x>2PilLzv{Y~Sfy;;Mje6kEOqTdBXrAV1@rM)9 zpGUH+RL7!rY$>&(xW?6f6|ShwBPtT@fNG?#?q*acqpk*zE6cK?tXjz_4{0B3VCSep zSnUdSI42LcI_laM+E^->P+RD3u%|CnM$ue}g3_}{3O0VuwcOBMv+J!H+eK>Y2+~|G z+uCpWAy(|!i<@?>xwht3O#RT0+F^jojk=EP<`Npo3(Q`rvK~Zwy}Y1rEa=De$MA68 z4IYbZ-D=b$8RgECor-<_re9mNqnkd5M2TGw?6qc4kD6gsCZsE^%}SkJmhwnMs+?LY z;`U<>vI=%8d(&txFJszL>W5)7bXW}%!%{bOl*50219hn{ZmZ=7UWxCdGxW*rt+%RN zk{SZ6eZ{+Ioz8c$&Yk9r>x z)z@#0TUqM}J_oG5nS5r#s79XW&m>ATw^QXtjVw4zPFiFEyP_=a!pIVd z7;A$WZ5*cwVSiKOxo~5T)e$f@CbgASMVJV!E~hq6P#CbSEipQ=FslR8>!Vp=1nX+> zn#!?GUab_=YC(nH5IY@;!IzziUD8f(J)!U?)kmc6&z05bhy6<_H3EfIUpPGF#%g;n1v|aU|G@)Ve4Q zfv(UEfCE{)!69WJjCdOQx7q*?EpJdE8e^eb*M1e}T-C@aDxlEcsQE1}Uq>|6t~f** zjzB3_dD{&G;w3wBOz#(yrh*$#Q-3;F;j+l@%(ZvFqTvT2GtLrFP7CI0^ zRwkhUfbuMDIkrzw0`pAd0yt#_oP;$3vcL#pa00ZU5KtoEbOmUG#{t3>!RgBaPUsrM z5g>vAvdI9!JEpEdvF`zy`hZMQ6?lVLSi$$zH5Pzg0+ysL#~Myy2@fLzOZ_U^qYR#f zCoE)u97Ui}u`{4HW)=1|eUFYW+;~WJz*5ll7+V2~isYso^OgmOHM9trRghs*-UEV) z4(lLGfWIMiW(y=}Se@zQ)mC23;q6?Br-!~xsSD9biHldS331VWxL&UT1fc6Z@Dvy0 z%(LCV0}+y1RVuT+UU!AHB!#4k_HupQ572?U5%yxJl0L|tz6V1Lgb=;DkVm+yxseMV zGZpte1z2*38UF*cL$3ya=FH+gMF66>;^*f*4_2u4V<-a7#WS+GS}&n%#xiVxx{my? zBJdMegyY;bHQ6q!Rp;}D!3 zO4k`-@*yMwIJ3>bFR|6O;W=%GknKFdgo|&ziQbc~QK+7=Trf40>5OA{AfK|xU2rfk06kN#HVpy@Z!-|~ww67Re z#1%1~200H58QcH_B83&P_WQ!h-Fsjq#jW6*y|D5fz)CKK72~i3pW6W|uxB_pU0A_` zft5ai4FRJtM;4UA3IN9d6IczV3ib;GG-N485BRk^(PKz~e4*kTup?$OuR(k}kRow$ z5(6M4)N0L8XbioVMB)FBalrX7)-81BRV+wQ4o;o;WI}hQJHqn-1Ff*hQ@rYLu$CV_ z2oK73fChw2agjiO=w19b&7Vf^Od}~#?;|4R&Iwk<^6zK#V(Li~PIdQ8U3eGiMepzp ztUy14gxuo(hHuQiM&zWi(-#sp@d;#>!6xA7oQ?g#9!KXSSp&Qygr8_3y^aoSopuvP z5Q>UEz+8#_>(`NNGl_(W$(dQk-ZIKUP7lHz)B19HNXF-)@vG%5rIh2dk((BC?Go2u z;9P)R4g~_cgertiI+Tkzq{$4lz_oz+F76v=#eGX>@kJD9o;bv7lEJ-(=y6F|d}M0~ z?7(8P9oU2-eA4c72G7F$z+}kzgy3O^=EN)4FQD`%g?Dl{3%yu3RbSM6NjQ zp#KQ>K)|HE;^9N(V?BQZ4Ie1u*iNDL#-B}8KM{30S-7?_N>bOmqBjv1DuQ!{iHM%| z>xk%2Gba?~Bgl#TlYWvR^)AbQ=lEa8#8UYWC-NuFOQSDuf^$HT#5*}bIth2Q203^V|yg1L&9Q)kwDE?;t3!$kNQ?Lu2vA9$AEO$umfYz zZ!zUmzn|~2mz37aDOtjHBQAN|5+RLH4y%hxk)Wi6nz3O(jxo*VIWd!)|={`1$wdX6BC*=L1V;rUGWekP9+mscN=Zr#EWRe zh8Nu{x_=)J+K1N843lS>h&ywLIXV9rs&Q;4|ABkd^^DNKguQVk(i4}Vnf4&fBx`Z} z?rwFDH0K#sX6RnlD_bqG8dk0{a6{Y@KX->4wTlQfjw$pW#F#!b@>?j?+!db2!}uM= znLGg>=`bSaNtk9L4bvPzje_+kKHGvcJWu#JkB|Nm5~5k^CCSlkE@^m-;5?CIo2Ggp zakPM5fUeXJBB6?FA#S&l)B>9AtVK^oK#gxwM-90-VB4H9U40HS+ICpUH42x{+n6*A zNxGh>CF2;eNgMj*oaM6U@o%?uO@{En4Q;p40I3JzYcQD0;p*DNGG5-+XxhH=O4(j) zN6C=Ww%5EJlW-`CTv$+|x-#nE1V70zH8#Qdl4!V(1hW{&<%PyDl1*KI>dU zCvvJK`rS3ep|9PFhmPHj?1!jIzVPG#Fg67Ay>xKmQCd*Qv*Xd34kMQd)W zoL1?+h~16&=80YNa0cht(VGNW0DE?~Y20V;T~wtcH#L{hg~LVe2*k>jOVR-^qcJH7 ztFHQ++Hetix;?al8}A5cvb#h{9a^FB*lB0%h2!@0EDlchIbLN6m3HaIeIG`OMOaPe z0sa0X$|V-2aHmtiZxKOQ-sFrUmjlq&I7co!b$b4Q2k1Ms6(T??Di6g^p*F=3s4Xai zwRyA~Wrfng44}VheD#Ehnc_wlcQ&m!W-6L%oFpr_iZ6MffjBX+f{jZ&vmjz~Rf8Tc z+&CrSO^}IUMokbx#aR#kBleqUe5hvbk&(flb?d9@DkqAY7ML4mFU5R=T>PbxMJ!dA zN6hMXT#k|5udYv}@-!OsT*VPAI9@TP*)sy|3}(Xnp7<`eQX-W!fk_g99$v76c*{kE7yvu&@^!hiY`6IJAQ>E1 z?P)1o#4Yfd(-O8?JagLfz|D42}4e@vd6&!&v2JOvPQwT%5CltZOHMbt*vEKMZ?6-#S zE3Q%%*jW#%)siTUz$771TD9EhrrcXe0D5f`-$$yL?>vc1YZOp)PxR~!t`#41VF#6} z*Knt}R9}|PUY>pNiYOSIp-&{a3XsO02_n@`~Uy| delta 13 Ucmd1KW@rA)%M}znYa)9J03197CIA2c diff --git a/myapp/params.py b/myapp/params.py index 8ae5474..31a666b 100644 --- a/myapp/params.py +++ b/myapp/params.py @@ -3,8 +3,8 @@ # define consul cluster ip and port, both set to be string. # consul_ip can be set to several values, seperated by ",". -consul_ip = "172.16.1.10, 172.16.1.14, 172.16.1.41" +consul_ip = "172.16.1.18, 172.16.1.50, 172.16.1.53" consul_port = "8500" -rest_api_url = "http://10.10.6.214:19030" -prometheus_url = "http://10.10.6.214:9500" +rest_api_url = "http://10.10.6.209:19030" +prometheus_url = "http://10.10.6.209:9500" diff --git a/myapp/params.pyc b/myapp/params.pyc index 0e3f14732518f86950485eba015492b5d99a6265..c365a845464842a4625e4302dd1eb4ce8ff78d1e 100644 GIT binary patch delta 58 zcmdnWw3UgS`7CJEcM7vVH)j CCJv_n diff --git a/myapp/parse.py b/myapp/parse.py index 41dd5f5..20accc8 100644 --- a/myapp/parse.py +++ b/myapp/parse.py @@ -7,8 +7,6 @@ import logging import time from pprint import pprint -# from util import request_util, dic_to_str, cpu_cores -# from . import params import params import base64 diff --git a/myapp/parse.pyc b/myapp/parse.pyc index 5c11f1997d36884157fc1a2df144e442a2869c20..6b3fc74901deda20cf05d84d2f96d28094bd9932 100644 GIT binary patch delta 138 zcmV;50CoR~K8ZdD1M>|Ej3lP92T}zA8k1TDe*qY?+69aO0W-5_2d4u8II|B6#sUFV zv(^nG0s&{U`VSid0d2x s){~bUjsY^0CLQ4cK6eF0SpHc0R{mF0So~e0u~w;u?YtZ0pC0%3;+NC delta 138 zcmV;50CoR~K8ZdD1M>|EQd63-2T}zA9FtlFe*qe^+69aO0X4H{2d4u8I%8*>2x s*prtWjsY~2CLQ4c<&y{=>K6kH0SpHc0R{mF0So~e0u~w;u?YtZ0o&#z6951J diff --git a/myapp/urls.py b/myapp/urls.py index 2ea5c8a..4439244 100644 --- a/myapp/urls.py +++ b/myapp/urls.py @@ -5,7 +5,6 @@ from . import views urlpatterns = [ - url(r'^api/v1/test/$', views.test), url(r'^api/v1/monitor/$', views.module_list), url(r'^api/v1/hosts/$', views.hosts_list), url(r'^api/v1/hosts/(?P((?:(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?[1-9])))(\:\d{1,6})))/$', views.host_detail), diff --git a/myapp/views.py b/myapp/views.py index a37256d..82472fe 100644 --- a/myapp/views.py +++ b/myapp/views.py @@ -19,7 +19,7 @@ import sys sys.path.append('..') -import metrics.monitor +import metrics.monitor as monitor @api_view(['GET']) def hosts_list(request): @@ -49,7 +49,7 @@ def module_list(request): List all module metrics. """ if request.method == 'GET': - return JsonResponse(metrics.monitor.monitor_metrics(), safe=False) + return JsonResponse(monitor.monitor_metrics(), safe=False) @api_view(['GET']) def test(request):