forked from mosajjal/dnsmonster
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config-sample.ini
227 lines (164 loc) · 6.57 KB
/
config-sample.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
[general]
; Garbage Collection interval for tcp assembly and ip defragmentation
GcTime = 10s
; Duration to calculate interface stats
CaptureStatsDelay = 1s
; Duration to print capture and database stats
PrintStatsDelay = 10s
; Mask source IPs by bits. 32 means all the bits of IP is saved in DB
MaskSize = 32
; Name of the server used to index the metrics.
ServerName = default
; Size of the tcp assembler
TcpAssemblyChannelSize = 1000
; Size of the tcp result channel
TcpResultChannelSize = 1000
; Number of routines used to handle tcp assembly
TcpHandlerCount = 1
; Size of the result processor channel size
ResultChannelSize = 100000
; Set debug Log level, 0:PANIC, 1:ERROR, 2:WARN, 3:INFO, 4:DEBUG
LogLevel = 3
; Size of the channel to send packets to be defragged
DefraggerChannelSize = 500
; Size of the channel where the defragged packets are returned
DefraggerChannelReturnSize = 500
; write cpu profile to file
Cpuprofile =
; write memory profile to file
Memprofile =
; GOMAXPROCS variable
Gomaxprocs = -1
; Limit of packets logged to clickhouse every iteration. Default 0 (disabled)
PacketLimit = 0
; Skip outputing domains matching items in the CSV file path. Can accept a URL (http:// or https://) or path
SkipDomainsFile =
; Hot-Reload skipDomainsFile interval
SkipDomainsRefreshInterval = 1m0s
; skipDomainsFile type. Options: csv and hashtable. Hashtable is ONLY fqdn, csv can support fqdn, prefix and suffix logic but it's much slower
SkipDomainsFileType = csv
; Allow Domains logic input file. Can accept a URL (http:// or https://) or path
AllowDomainsFile =
; Hot-Reload allowDomainsFile file interval
AllowDomainsRefreshInterval = 1m0s
; allowDomainsFile type. Options: csv and hashtable. Hashtable is ONLY fqdn, csv can support fqdn, prefix and suffix logic but it's much slower
AllowDomainsFileType = csv
; Skip TLS verification when making HTTPS connections
SkipTLSVerification = false
; Save full packet query and response in JSON format.
SaveFullQuery = false
[capture]
; Device used to capture
DevName =
; Pcap filename to run
PcapFile =
; dnstrap socket path. Example: unix:///tmp/dnstap.sock, tcp://127.0.0.1:8080
DnstapSocket =
; Port selected to filter packets
Port = 53
; Capture Sampling by a:b. eg sampleRatio of 1:100 will process 1 percent of the incoming packets
SampleRatio = 1:1
; Set the dnstap socket permission, only applicable when unix:// is used
DnstapPermission = 755
; Number of routines used to handle received packets
PacketHandlerCount = 1
; Size of the packet handler channel
PacketChannelSize = 100000
; Afpacket Buffersize in MB
AfpacketBuffersizeMb = 64
; BPF filter applied to the packet stream. If port is selected, the packets will not be defragged.
Filter = ((ip and (ip[9] == 6 or ip[9] == 17)) or (ip6 and (ip6[6] == 17 or ip6[6] == 6 or ip6[6] == 44)))
; Use AFPacket for live captures. Supported on Linux 3.0+ only
UseAfpacket = false
[output]
; Address of the clickhouse database to save the results
ClickhouseAddress = localhost:9000
; Interval between sending results to ClickHouse
ClickhouseDelay = 1s
; Debug Clickhouse connection
ClickhouseDebug = false
; What should be written to clickhouse. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
ClickhouseOutputType = 0
; Minimun capacity of the cache array used to send data to clickhouse. Set close to the queries per second received to prevent allocations
ClickhouseBatchSize = 100000
; What should be written to file. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
FileOutputType = 0
; Path to output file. Used if fileOutputType is not none
FileOutputPath =
; What should be written to stdout. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
StdoutOutputType = 0
; What should be written to Syslog server. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
SyslogOutputType = 0
; Syslog endpoint address, example: udp://127.0.0.1:514, tcp://127.0.0.1:514. Used if syslogOutputType is not none
SyslogOutputEndpoint =
; What should be written to kafka. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
KafkaOutputType = 0
; kafka broker address, example: 127.0.0.1:9092. Used if kafkaOutputType is not none
KafkaOutputBroker =
; Kafka topic for logging
KafkaOutputTopic = dnsmonster
; Minimun capacity of the cache array used to send data to Kafka
KafkaBatchSize = 1000
; Interval between sending results to Kafka if Batch size is not filled
KafkaBatchDelay = 1s
; What should be written to elastic. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
ElasticOutputType = 0
; elastic endpoint address, example: http://127.0.0.1:9200. Used if elasticOutputType is not none
ElasticOutputEndpoint =
; elastic index
ElasticOutputIndex = default
; Send data to Elastic in batch sizes
ElasticBatchSize = 1000
; Interval between sending results to Elastic if Batch size is not filled
ElasticBatchDelay = 1s
; What should be written to HEC. options:
; 0: Disable Output
; 1: Enable Output without any filters
; 2: Enable Output and apply skipdomains logic
; 3: Enable Output and apply allowdomains logic
; 4: Enable Output and apply both skip and allow domains logic
SplunkOutputType = 0
; splunk endpoint address, example: http://127.0.0.1:8088. Used if splunkOutputType is not none
SplunkOutputEndpoints =
; Splunk HEC Token
SplunkOutputToken = 00000000-0000-0000-0000-000000000000
; Splunk Output Index
SplunkOutputIndex = temp
; Splunk Output Source
SplunkOutputSource = dnsmonster
; Splunk Output Sourcetype
SplunkOutputSourceType = json
; Send data to HEC in batch sizes
SplunkBatchSize = 1000
; Interval between sending results to HEC if Batch size is not filled
SplunkBatchDelay = 1s