forked from Hi-Media/docker-graylog2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
graylog2-server.conf
204 lines (167 loc) · 10.5 KB
/
graylog2-server.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
# If you are running more than one instances of graylog2-server you have to select one of these
# instances as master. The master will perform some periodical tasks that non-masters won't perform.
is_master = true
# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
# to use an absolute file path here if you are starting graylog2-server from init scripts or similar.
node_id_file = /etc/graylog2/server/node-id
# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
# Generate one by using for example: pwgen -s 96
password_secret = ufImkyyMjZdJoydfLT2UuvjOZZol0qjIDiHGrLbV82BL7ndLsJa4w2KS4znAp4nN14bon7Dq6u0S2X9LogB3z0C0ezvznv9q
# the default root user is named 'admin'
root_username = admin
# You MUST specify a hash password for the root user (which you only need to initially set up the
# system and in case you lose connectivity to your authentication backend)
# This password cannot be changed using the API or via the web interface. If you need to change it,
# modify it in this file.
# Create one by using for example: echo -n yourpassword | shasum -a 256
# and put the resulting hash value into the following line
root_password_sha2 = 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
# Set plugin directory here (relative or absolute)
plugin_dir = /usr/share/graylog2-server/plugin
# REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster.
rest_listen_uri = http://0.0.0.0:12900/
# REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri
# is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used.
# This will be promoted in the cluster discovery APIs and other nodes may try to connect on this
# address. (see rest_listen_uri)
rest_transport_uri = http://127.0.0.1:12900/
# Enable CORS headers for REST api. This is necessary for JS-clients accessing the server directly.
# If these are disabled, modern browsers will not be able to retrieve resources from the server.
# This is disabled by default. Uncomment the next line to enable it.
#rest_enable_cors = true
# Enable GZIP support for REST api. This compresses API responses and therefore helps to reduce
# overall round trip times. This is disabled by default. Uncomment the next line to enable it.
#rest_enable_gzip = true
# Embedded elasticsearch configuration file
# pay attention to the working directory of the server, maybe use an absolute path here
#elasticsearch_config_file = /etc/graylog2-elasticsearch.yml
elasticsearch_max_docs_per_index = 20000000
# How many indices do you want to keep?
# elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup
elasticsearch_max_number_of_indices = 20
# Decide what happens with the oldest indices when the maximum number of indices is reached.
# The following strategies are availble:
# - delete # Deletes the index completely (Default)
# - close # Closes the index and hides it from the system. Can be re-opened later.
retention_strategy = close
# How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices.
elasticsearch_shards = 1
elasticsearch_replicas = 0
elasticsearch_index_prefix = graylog2
# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
# be enabled with care. See also: http://support.torch.sh/help/kb/graylog2-web-interface/the-search-bar-explained
allow_leading_wildcard_searches = false
# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
# should only be enabled after making sure your elasticsearch cluster has enough memory.
allow_highlighting = false
# settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
# all these
# this must be the same as for your elasticsearch cluster
elasticsearch_cluster_name = graylog2
# you could also leave this out, but makes it easier to identify the graylog2 client instance
#elasticsearch_node_name = graylog2-server
# we don't want the graylog2 server to store any data, or be master node
elasticsearch_node_master = false
elasticsearch_node_data = false
# use a different port if you run multiple elasticsearch nodes on one machine
#elasticsearch_transport_tcp_port = 9350
# we don't need to run the embedded HTTP server here
#elasticsearch_http_enabled = false
elasticsearch_discovery_zen_ping_multicast_enabled = false
#elasticsearch_discovery_zen_ping_unicast_hosts = 192.168.1.203:9300
elasticsearch_discovery_zen_ping_unicast_hosts = 127.0.0.1:9300
# the following settings allow to change the bind addresses for the elasticsearch client in graylog2
# these settings are empty by default, letting elasticsearch choose automatically,
# override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
# refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html for special values here
# elasticsearch_network_host =
# elasticsearch_network_bind_host =
# elasticsearch_network_publish_host =
# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
# ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
# Note that this setting only takes effect on newly created indices.
elasticsearch_analyzer = standard
# Batch size for the ElasticSearch output. This is the maximum (!) number of messages the ElasticSearch output
# module will get at once and write to ElasticSearch in a batch call. If the configured batch size has not been
# reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember
# that every outputbuffer processor manages its own batch and performs its own batch write calls.
# ("outputbuffer_processors" variable)
output_batch_size = 25
# Flush interval (in seconds) for the ElasticSearch output. This is the maximum amount of time between two
# batches of messages written to ElasticSearch. It is only effective at all if your minimum number of messages
# for this time period is less than output_batch_size * outputbuffer_processors.
output_flush_interval = 1
# The number of parallel running processors.
# Raise this number if your buffers are filling up.
processbuffer_processors = 5
outputbuffer_processors = 3
# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
# Possible types:
# - yielding
# Compromise between performance and CPU usage.
# - sleeping
# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
# - blocking
# High throughput, low latency, higher CPU usage.
# - busy_spinning
# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
processor_wait_strategy = blocking
# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
# Start server with --statistics flag to see buffer utilization.
# Must be a power of 2. (512, 1024, 2048, ...)
ring_size = 1024
# EXPERIMENTAL: Dead Letters
# Every failed indexing attempt is logged by default and made visible in the web-interface. You can enable
# the experimental dead letters feature to write every message that was not successfully indexed into the
# MongoDB "dead_letters" collection to make sure that you never lose a message. The actual writing of dead
# letter should work fine already but it is not heavily tested yet and will get more features in future
# releases.
dead_letters_enabled = false
# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
# shutdown process. Set to 0 if you have no status checking load balancers in front.
lb_recognition_period_seconds = 3
# Every message is matched against the configured streams and it can happen that a stream contains rules which
# take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking.
# This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other
# streams, Graylog2 limits the execution time for each stream.
# The default values are noted below, the timeout is in milliseconds.
# If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times
# that stream is disabled and a notification is shown in the web interface.
# stream_processing_timeout = 2000
# stream_processing_max_faults = 3
# MongoDB Configuration
mongodb_useauth = false
#mongodb_user = grayloguser
#mongodb_password = 123
mongodb_host = 127.0.0.1
#mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019
mongodb_database = graylog2
mongodb_port = 27017
# Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems.
mongodb_max_connections = 100
# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than that and an exception will be thrown.
# http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
mongodb_threads_allowed_to_block_multiplier = 5
# Drools Rule File (Use to rewrite incoming log messages)
# See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
# rules_file = /etc/graylog2.drl
# Email transport
transport_email_enabled = true
transport_email_hostname = smtp.hi-media-techno.com
transport_email_port = 25
transport_email_use_auth = false
transport_email_use_tls = false
transport_email_use_ssl = false
transport_email_auth_username = [email protected]
transport_email_auth_password = secret
transport_email_subject_prefix = [graylog2]
transport_email_from_email = [email protected]
# Specify and uncomment this if you want to include links to the stream in your stream alert mails.
# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
#
transport_email_web_interface_url = http://localhost:9000
# HTTP proxy for outgoing HTTP calls
#http_proxy_uri =