forked from shell909090/influx-proxy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.py
126 lines (107 loc) · 3.37 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@date: 2017-01-24
@author: Shell.Xu
@copyright: 2017, Eleme <[email protected]>
@license: MIT
'''
from __future__ import absolute_import, division,\
print_function, unicode_literals
import sys
import getopt
import redis
# backends key use for KEYMAPS, NODES, cache file
# url: influxdb addr or other http backend which supports influxdb line protocol
# db: influxdb db
# zone: same zone first query
# interval: default config is 1000ms, wait 1 second write whether point count has bigger than maxrowlimit config
# timeout: default config is 10000ms, write timeout until 10 seconds
# timeoutquery: default config is 600000ms, query timeout until 600 seconds
# maxrowlimit: default config is 10000, wait 10000 points write
# checkinterval: default config is 1000ms, check backend active every 1 second
# rewriteinterval: default config is 10000ms, rewrite every 10 seconds
# writeonly: default 0
BACKENDS = {
'local': {
'url': 'http://localhost:8086',
'db': 'test',
'zone':'local',
'interval': 1000,
'timeout': 10000,
'timeoutquery':600000,
'maxrowlimit':10000,
'checkinterval':1000,
'rewriteinterval':10000,
},
'local2': {
'url': 'http://influxdb-test:8086',
'db': 'test2',
'interval': 200,
},
}
# measurement:[backends keys], the key must be in the BACKENDS
# data with the measurement will write to the backends
KEYMAPS = {
'cpu': ['local'],
'temperature': ['local2'],
'_default_': ['local']
}
# this config will cover default_node config
# listenaddr: proxy listen addr
# db: proxy db, client's db must be same with it
# zone: use for query
# nexts: the backends keys, will accept all data, split with ','
# interval: collect Statistics
# idletimeout: keep-alives wait time
# writetracing: enable logging for the write,default is 0
# querytracing: enable logging for the query,default is 0
NODES = {
'l1': {
'listenaddr': ':6666',
'db': 'test',
'zone': 'local',
'interval':10,
'idletimeout':10,
'writetracing':0,
'querytracing':0,
}
}
# the influxdb default cluster node
DEFAULT_NODE = {
'listenaddr': ':6666'
}
def cleanups(client, parttens):
for p in parttens:
for key in client.keys(p):
client.delete(key)
def write_configs(client, o, prefix):
for k, l in o.items():
if hasattr(l, 'items'):
for f, v in l.items():
client.hset(prefix+k, f, v)
elif hasattr(l, '__iter__'):
for i in l:
client.rpush(prefix+k, i)
def write_config(client, d, name):
for k, v in d.items():
client.hset(name, k, v)
def main():
optlist, args = getopt.getopt(sys.argv[1:], 'd:hH:p:P:')
optdict = dict(optlist)
if '-h' in optdict:
print(main.__doc__)
return
client = redis.StrictRedis(
host=optdict.get('-H', 'localhost'),
port=int(optdict.get('-p', '6379')),
db=int(optdict.get('-d', '0')),
password=optdict.get('-P', '')
)
cleanups(client, ['default_node', 'b:*', 'm:*', 'n:*'])
write_config(client, DEFAULT_NODE, "default_node")
write_configs(client, BACKENDS, 'b:')
write_configs(client, NODES, 'n:')
write_configs(client, KEYMAPS, 'm:')
if __name__ == '__main__':
main()