forked from mailgun/kafka-pixy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdefault.yaml
139 lines (107 loc) · 5.23 KB
/
default.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# TCP address that gRPC API server should listen on.
grpc_addr: 0.0.0.0:19091
# TCP address that RESTful API server should listen on.
tcp_addr: 0.0.0.0:19092
# Unix domain socket address that RESTful API server should listen on.
# Listening on a unix domain socket is disabled by default.
# unix_addr: "/var/run/kafka-pixy.sock"
# A map of cluster names to respective proxy configurations. The first proxy
# in the map is considered to be `default`. It is used in API calls that do not
# specify cluster name explicitly.
proxies:
# Name of a Kafka+ZooKeeper cluster. The only requirement to the name is that
# it should be unique in this config file. The value of this parameter is
# a configuration of a proxy to access the cluster.
default:
# Unique ID that identifies a Kafka-Pixy instance in both ZooKeeper and
# Kafka. It is automatically generated by default and it is recommended to
# leave it like that.
# client_id: AUTOGENERATED
# Kafka parameters section.
kafka:
# List of seed Kafka peers that Kafka-Pixy should access to resolve the
# Kafka cluster topology.
seed_peers:
- localhost:9092
# Version of the Kafka cluster. Supported versions are 0.8.2.2 - 1.0.0
version: 0.8.2.2
# ZooKeeper parameters section.
zoo_keeper:
# List of seed ZooKeeper peers that Kafka-Pixy should access to resolve the
# ZooKeeper cluster topology.
seed_peers:
- localhost:2181
# Path to the directory where Kafka keeps its data.
# chroot: "/"
# Producer parameters section.
producer:
# Size of all buffered channels created by the producer module.
channel_buffer_size: 4096
# The maximum permitted size of a message (defaults to 1000000). Should be
# set equal to or smaller than the broker's `message.max.bytes`.
max_message_bytes: 1000000
# The type of compression to use on messages. Allowed values are:
# none, gzip, snappy, and lz4.
compression: snappy
# The best-effort number of bytes needed to trigger a flush.
flush_bytes: 1048576
# The best-effort frequency of flushes.
flush_frequency: 500ms
# How long to wait for the cluster to settle between retries.
retry_backoff: 10s
# The total number of times to retry sending a message before giving up.
retry_max: 6
# The level of acknowledgement reliability needed from the broker.
# Allowed values are:
# * no_response: the broker doesn't send any response, the TCP ACK
# is all you get.
# * wait_for_local: the broker responds as soon as it commits to the
# local disk.
# * wait_for_all: the broker waits for all in-sync replicas to commit
# before responding.
required_acks: wait_for_all
# Period of time that Kafka-Pixy should keep trying to submit buffered
# messages to Kafka. It is recommended to make it large enough to survive
# a ZooKeeper leader election in your setup.
shutdown_timeout: 30s
# Consumer parameters section.
consumer:
# Period of time that Kafka-Pixy should wait for an acknowledgement
# before retrying.
ack_timeout: 5m
# Size of all buffered channels created by the consumer module.
channel_buffer_size: 64
# The number of bytes of messages to attempt to fetch for each
# topic-partition in each fetch request. These bytes will be read into
# memory for each partition, so this helps control the memory used by
# the consumer. The fetch request size must be at least as large as
# the maximum message size the server allows or else it is possible
# for the producer to send messages larger than the consumer can fetch.
fetch_max_bytes: 1048576
# The maximum amount of time the server will block before answering
# the fetch request if there isn't data immediately available.
fetch_max_wait: 250ms
# Consume request will wait at most this long until for a message from a
# topic to become available before expiring.
long_polling_timeout: 3s
# The maximum number of unacknowledged messages allowed for a particular
# group-topic-partition at a time. When this number is reached subsequent
# consume requests will return long polling timeout errors, until some of
# the pending messages are acknowledged.
max_pending_messages: 300
# The maximum number of retries Kafka-Pixy will make to offer an
# unack message. Messages that exceeded the number of retries are
# discarded by Kafka-Pixy and acknowledged in Kafka. Zero retries
# means that messages will be offered just once.
#
# If you want Kafka-Pixy to retry indefinitely, then set this
# parameter to -1.
max_retries: -1
# How frequently to commit offsets to Kafka.
offsets_commit_interval: 500ms
# If a request to a Kafka-Pixy fails for any reason, then it should wait this
# long before retrying.
retry_backoff: 500ms
# Period of time that Kafka-Pixy should keep a subscription for a
# topic by a group in absence of requests to from the consumer group.
subscription_timeout: 15s