-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathkafka-doc-2.5.json
1 lines (1 loc) · 263 KB
/
kafka-doc-2.5.json
1
[{"Category":"broker","Name":"zookeeper.connect","Description":"\n\u003cp\u003eSpecifies the ZooKeeper connection string in the form \u003ccode\u003ehostname:port\u003c/code\u003e where host and port are the host and port of a ZooKeeper server. To allow connecting through other ZooKeeper nodes when that ZooKeeper machine is down you can also specify multiple hosts in the form \u003ccode\u003ehostname1:port1,hostname2:port2,hostname3:port3\u003c/code\u003e.\u003cbr/\u003eThe server can also have a ZooKeeper chroot path as part of its ZooKeeper connection string which puts its data under some path in the global ZooKeeper namespace. For example to give a chroot path of \u003ccode\u003e/chroot/path\u003c/code\u003e you would give the connection string as \u003ccode\u003ehostname1:port1,hostname2:port2,hostname3:port3/chroot/path\u003c/code\u003e.\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"advertised.host.name","Description":"\n\u003cp\u003eDEPRECATED: only used when \u003ccode\u003eadvertised.listeners\u003c/code\u003e or \u003ccode\u003elisteners\u003c/code\u003e are not set. Use \u003ccode\u003eadvertised.listeners\u003c/code\u003e instead. \u003cbr/\u003eHostname to publish to ZooKeeper for clients to use. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, it will use the value for \u003ccode\u003ehost.name\u003c/code\u003e if configured. Otherwise it will use the value returned from java.net.InetAddress.getCanonicalHostName().\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"advertised.listeners","Description":"\n\u003cp\u003eListeners to publish to ZooKeeper for clients to use, if different than the \u003ccode\u003elisteners\u003c/code\u003e config property. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, the value for \u003ccode\u003elisteners\u003c/code\u003e will be used. Unlike \u003ccode\u003elisteners\u003c/code\u003e it is not valid to advertise the 0.0.0.0 meta-address.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"advertised.port","Description":"\n\u003cp\u003eDEPRECATED: only used when \u003ccode\u003eadvertised.listeners\u003c/code\u003e or \u003ccode\u003elisteners\u003c/code\u003e are not set. Use \u003ccode\u003eadvertised.listeners\u003c/code\u003e instead. \u003cbr/\u003eThe port to publish to ZooKeeper for clients to use. In IaaS environments, this may need to be different from the port to which the broker binds. If this is not set, it will publish the same port that the broker binds to.\u003c/p\u003e\n","Type":"int","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"auto.create.topics.enable","Description":"\n\u003cp\u003eEnable auto creation of topic on the server\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"auto.leader.rebalance.enable","Description":"\n\u003cp\u003eEnables auto leader balancing. A background thread checks the distribution of partition leaders at regular intervals, configurable by `leader.imbalance.check.interval.seconds`. If the leader imbalance exceeds `leader.imbalance.per.broker.percentage`, leader rebalance to the preferred leader for partitions is triggered.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"background.threads","Description":"\n\u003cp\u003eThe number of threads to use for various background processing tasks\u003c/p\u003e\n","Type":"int","Default":"10","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"broker.id","Description":"\n\u003cp\u003eThe broker id for this server. If unset, a unique broker id will be generated.To avoid conflicts between zookeeper generated broker id\u0026#39;s and user configured broker id\u0026#39;s, generated broker ids start from reserved.broker.max.id + 1.\u003c/p\u003e\n","Type":"int","Default":"-1","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"compression.type","Description":"\n\u003cp\u003eSpecify the final compression type for a given topic. This configuration accepts the standard compression codecs (\u0026#39;gzip\u0026#39;, \u0026#39;snappy\u0026#39;, \u0026#39;lz4\u0026#39;, \u0026#39;zstd\u0026#39;). It additionally accepts \u0026#39;uncompressed\u0026#39; which is equivalent to no compression; and \u0026#39;producer\u0026#39; which means retain the original compression codec set by the producer.\u003c/p\u003e\n","Type":"string","Default":"producer","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"control.plane.listener.name","Description":"\n\u003cp\u003eName of listener used for communication between controller and brokers. Broker will use the control.plane.listener.name to locate the endpoint in listeners list, to listen for connections from the controller. For example, if a broker\u0026#39;s config is :\u003cbr/\u003elisteners = INTERNAL://192.1.1.8:9092, EXTERNAL://10.1.1.5:9093, CONTROLLER://192.1.1.8:9094\u003cbr/\u003elistener.security.protocol.map = INTERNAL:PLAINTEXT, EXTERNAL:SSL, CONTROLLER:SSL\u003cbr/\u003econtrol.plane.listener.name = CONTROLLER\u003cbr/\u003eOn startup, the broker will start listening on \u0026#34;192.1.1.8:9094\u0026#34; with security protocol \u0026#34;SSL\u0026#34;.\u003cbr/\u003eOn controller side, when it discovers a broker\u0026#39;s published endpoints through zookeeper, it will use the control.plane.listener.name to find the endpoint, which it will use to establish connection to the broker.\u003cbr/\u003eFor example, if the broker\u0026#39;s published endpoints on zookeeper are :\u003cbr/\u003e\u0026#34;endpoints\u0026#34; : [\u0026#34;INTERNAL://broker1.example.com:9092\u0026#34;,\u0026#34;EXTERNAL://broker1.example.com:9093\u0026#34;,\u0026#34;CONTROLLER://broker1.example.com:9094\u0026#34;]\u003cbr/\u003e and the controller\u0026#39;s config is :\u003cbr/\u003elistener.security.protocol.map = INTERNAL:PLAINTEXT, EXTERNAL:SSL, CONTROLLER:SSL\u003cbr/\u003econtrol.plane.listener.name = CONTROLLER\u003cbr/\u003ethen controller will use \u0026#34;broker1.example.com:9094\u0026#34; with security protocol \u0026#34;SSL\u0026#34; to connect to the broker.\u003cbr/\u003eIf not explicitly configured, the default value will be null and there will be no dedicated endpoints for controller connections.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"delete.topic.enable","Description":"\n\u003cp\u003eEnables delete topic. Delete topic through the admin tool will have no effect if this config is turned off\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"host.name","Description":"\n\u003cp\u003eDEPRECATED: only used when \u003ccode\u003elisteners\u003c/code\u003e is not set. Use \u003ccode\u003elisteners\u003c/code\u003e instead. \u003cbr/\u003ehostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"leader.imbalance.check.interval.seconds","Description":"\n\u003cp\u003eThe frequency with which the partition rebalance check is triggered by the controller\u003c/p\u003e\n","Type":"long","Default":"300","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"leader.imbalance.per.broker.percentage","Description":"\n\u003cp\u003eThe ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.\u003c/p\u003e\n","Type":"int","Default":"10","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"listeners\n","Description":"\n\u003cp\u003eListener List - Comma-separated list of URIs we will listen on and the listener names. If the listener name is not a security protocol, listener.security.protocol.map must also be set.\u003cbr/\u003e Specify hostname as 0.0.0.0 to bind to all interfaces.\u003cbr/\u003e Leave hostname empty to bind to default interface.\u003cbr/\u003e Examples of legal listener lists:\u003cbr/\u003e PLAINTEXT://myhost:9092,SSL://:9091\u003cbr/\u003e CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093\u003cbr/\u003e\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"log.dir","Description":"\n\u003cp\u003eThe directory in which the log data is kept (supplemental for log.dirs property)\u003c/p\u003e\n","Type":"string","Default":"/tmp/kafka-logs","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.dirs","Description":"\n\u003cp\u003eThe directories in which the log data is kept. If not set, the value in log.dir is used\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.flush.interval.messages","Description":"\n\u003cp\u003eThe number of messages accumulated on a log partition before messages are flushed to disk \u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.flush.interval.ms","Description":"\n\u003cp\u003eThe maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used\u003c/p\u003e\n","Type":"long","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.flush.offset.checkpoint.interval.ms","Description":"\n\u003cp\u003eThe frequency with which we update the persistent record of the last flush which acts as the log recovery point\u003c/p\u003e\n","Type":"int","Default":"60000","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.flush.scheduler.interval.ms","Description":"\n\u003cp\u003eThe frequency in ms that the log flusher checks whether any log needs to be flushed to disk\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.flush.start.offset.checkpoint.interval.ms","Description":"\n\u003cp\u003eThe frequency with which we update the persistent record of log start offset\u003c/p\u003e\n","Type":"int","Default":"60000","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.retention.bytes","Description":"\n\u003cp\u003eThe maximum size of the log before deleting it\u003c/p\u003e\n","Type":"long","Default":"-1","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.retention.hours","Description":"\n\u003cp\u003eThe number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property\u003c/p\u003e\n","Type":"int","Default":"168","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.retention.minutes","Description":"\n\u003cp\u003eThe number of minutes to keep a log file before deleting it (in minutes), secondary to log.retention.ms property. If not set, the value in log.retention.hours is used\u003c/p\u003e\n","Type":"int","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.retention.ms","Description":"\n\u003cp\u003eThe number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.\u003c/p\u003e\n","Type":"long","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.roll.hours","Description":"\n\u003cp\u003eThe maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property\u003c/p\u003e\n","Type":"int","Default":"168","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.roll.jitter.hours","Description":"\n\u003cp\u003eThe maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property\u003c/p\u003e\n","Type":"int","Default":"0","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.roll.jitter.ms","Description":"\n\u003cp\u003eThe maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used\u003c/p\u003e\n","Type":"long","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.roll.ms","Description":"\n\u003cp\u003eThe maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in log.roll.hours is used\u003c/p\u003e\n","Type":"long","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.segment.bytes","Description":"\n\u003cp\u003eThe maximum size of a single log file\u003c/p\u003e\n","Type":"int","Default":"1073741824","ValidValues":"[14,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.segment.delete.delay.ms","Description":"\n\u003cp\u003eThe amount of time to wait before deleting a file from the filesystem\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"message.max.bytes","Description":"\n\u003cp\u003eThe largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers\u0026#39; fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.This can be set per topic with the topic level \u003ccode\u003emax.message.bytes\u003c/code\u003e config.\u003c/p\u003e\n","Type":"int","Default":"1048588","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"min.insync.replicas","Description":"\n\u003cp\u003eWhen a producer sets acks to \u0026#34;all\u0026#34; (or \u0026#34;-1\u0026#34;), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).\u003cbr/\u003eWhen used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of \u0026#34;all\u0026#34;. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"num.io.threads","Description":"\n\u003cp\u003eThe number of threads that the server uses for processing requests, which may include disk I/O\u003c/p\u003e\n","Type":"int","Default":"8","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"num.network.threads","Description":"\n\u003cp\u003eThe number of threads that the server uses for receiving requests from the network and sending responses to the network\u003c/p\u003e\n","Type":"int","Default":"3","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"num.recovery.threads.per.data.dir","Description":"\n\u003cp\u003eThe number of threads per data directory to be used for log recovery at startup and flushing at shutdown\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"num.replica.alter.log.dirs.threads","Description":"\n\u003cp\u003eThe number of threads that can move replicas between log directories, which may include disk I/O\u003c/p\u003e\n","Type":"int","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"num.replica.fetchers","Description":"\n\u003cp\u003eNumber of fetcher threads used to replicate messages from a source broker. Increasing this value can increase the degree of I/O parallelism in the follower broker.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"offset.metadata.max.bytes","Description":"\n\u003cp\u003eThe maximum size for a metadata entry associated with an offset commit\u003c/p\u003e\n","Type":"int","Default":"4096","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.commit.required.acks","Description":"\n\u003cp\u003eThe required acks before the commit can be accepted. In general, the default (-1) should not be overridden\u003c/p\u003e\n","Type":"short","Default":"-1","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.commit.timeout.ms","Description":"\n\u003cp\u003eOffset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.\u003c/p\u003e\n","Type":"int","Default":"5000","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.load.buffer.size","Description":"\n\u003cp\u003eBatch size for reading from the offsets segments when loading offsets into the cache (soft-limit, overridden if records are too large).\u003c/p\u003e\n","Type":"int","Default":"5242880","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.retention.check.interval.ms","Description":"\n\u003cp\u003eFrequency at which to check for stale offsets\u003c/p\u003e\n","Type":"long","Default":"600000","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.retention.minutes","Description":"\n\u003cp\u003eAfter a consumer group loses all its consumers (i.e. becomes empty) its offsets will be kept for this retention period before getting discarded. For standalone consumers (using manual assignment), offsets will be expired after the time of last commit plus this retention period.\u003c/p\u003e\n","Type":"int","Default":"10080","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.topic.compression.codec","Description":"\n\u003cp\u003eCompression codec for the offsets topic - compression may be used to achieve \u0026#34;atomic\u0026#34; commits\u003c/p\u003e\n","Type":"int","Default":"0","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.topic.num.partitions","Description":"\n\u003cp\u003eThe number of partitions for the offset commit topic (should not change after deployment)\u003c/p\u003e\n","Type":"int","Default":"50","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.topic.replication.factor","Description":"\n\u003cp\u003eThe replication factor for the offsets topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.\u003c/p\u003e\n","Type":"short","Default":"3","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"offsets.topic.segment.bytes","Description":"\n\u003cp\u003eThe offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads\u003c/p\u003e\n","Type":"int","Default":"104857600","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"port\n","Description":"\n\u003cp\u003eDEPRECATED: only used when \u003ccode\u003elisteners\u003c/code\u003e is not set. Use \u003ccode\u003elisteners\u003c/code\u003e instead. \u003cbr/\u003ethe port to listen and accept connections on\u003c/p\u003e\n","Type":"int","Default":"9092","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"queued.max.requests","Description":"\n\u003cp\u003eThe number of queued requests allowed for data-plane, before blocking the network threads\u003c/p\u003e\n","Type":"int","Default":"500","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"quota.consumer.default","Description":"\n\u003cp\u003eDEPRECATED: Used only when dynamic default quotas are not configured for \u003cuser, \u003cclient-id=\"\"\u003e or \u003cuser, client-id=\"\"\u003e in Zookeeper. Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second\u003c/user,\u003e\u003c/user,\u003e\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"quota.producer.default","Description":"\n\u003cp\u003eDEPRECATED: Used only when dynamic default quotas are not configured for \u003cuser\u003e, \u003cclient-id\u003e or \u003cuser, client-id=\"\"\u003e in Zookeeper. Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second\u003c/user,\u003e\u003c/client-id\u003e\u003c/user\u003e\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.fetch.min.bytes","Description":"\n\u003cp\u003eMinimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.fetch.wait.max.ms","Description":"\n\u003cp\u003emax wait time for each fetcher request issued by follower replicas. This value should always be less than the replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics\u003c/p\u003e\n","Type":"int","Default":"500","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.high.watermark.checkpoint.interval.ms","Description":"\n\u003cp\u003eThe frequency with which the high watermark is saved out to disk\u003c/p\u003e\n","Type":"long","Default":"5000","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.lag.time.max.ms","Description":"\n\u003cp\u003eIf a follower hasn\u0026#39;t sent any fetch requests or hasn\u0026#39;t consumed up to the leaders log end offset for at least this time, the leader will remove the follower from isr\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.socket.receive.buffer.bytes","Description":"\n\u003cp\u003eThe socket receive buffer for network requests\u003c/p\u003e\n","Type":"int","Default":"65536","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.socket.timeout.ms","Description":"\n\u003cp\u003eThe socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms\u003c/p\u003e\n","Type":"int","Default":"30000","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"request.timeout.ms","Description":"\n\u003cp\u003eThe configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\u003c/p\u003e\n","Type":"int","Default":"30000","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"socket.receive.buffer.bytes","Description":"\n\u003cp\u003eThe SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"102400","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"socket.request.max.bytes","Description":"\n\u003cp\u003eThe maximum number of bytes in a socket request\u003c/p\u003e\n","Type":"int","Default":"104857600","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"socket.send.buffer.bytes","Description":"\n\u003cp\u003eThe SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"102400","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.max.timeout.ms","Description":"\n\u003cp\u003eThe maximum allowed timeout for transactions. If a client’s requested transaction time exceed this, then the broker will return an error in InitProducerIdRequest. This prevents a client from too large of a timeout, which can stall consumers reading from topics included in the transaction.\u003c/p\u003e\n","Type":"int","Default":"900000","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.state.log.load.buffer.size","Description":"\n\u003cp\u003eBatch size for reading from the transaction log segments when loading producer ids and transactions into the cache (soft-limit, overridden if records are too large).\u003c/p\u003e\n","Type":"int","Default":"5242880","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.state.log.min.isr","Description":"\n\u003cp\u003eOverridden min.insync.replicas config for the transaction topic.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.state.log.num.partitions","Description":"\n\u003cp\u003eThe number of partitions for the transaction topic (should not change after deployment).\u003c/p\u003e\n","Type":"int","Default":"50","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.state.log.replication.factor","Description":"\n\u003cp\u003eThe replication factor for the transaction topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.\u003c/p\u003e\n","Type":"short","Default":"3","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.state.log.segment.bytes","Description":"\n\u003cp\u003eThe transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads\u003c/p\u003e\n","Type":"int","Default":"104857600","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transactional.id.expiration.ms","Description":"\n\u003cp\u003eThe time in ms that the transaction coordinator will wait without receiving any transaction status updates for the current transaction before expiring its transactional id. This setting also influences producer id expiration - producer ids are expired once this time has elapsed after the last write with the given producer id. Note that producer ids may expire sooner if the last write from the producer id is deleted due to the topic\u0026#39;s retention settings.\u003c/p\u003e\n","Type":"int","Default":"604800000","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"unclean.leader.election.enable","Description":"\n\u003cp\u003eIndicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"high","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.connection.timeout.ms","Description":"\n\u003cp\u003eThe max time that the client waits to establish a connection to zookeeper. If not set, the value in zookeeper.session.timeout.ms is used\u003c/p\u003e\n","Type":"int","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.max.in.flight.requests","Description":"\n\u003cp\u003eThe maximum number of unacknowledged requests the client will send to Zookeeper before blocking.\u003c/p\u003e\n","Type":"int","Default":"10","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.session.timeout.ms","Description":"\n\u003cp\u003eZookeeper session timeout\u003c/p\u003e\n","Type":"int","Default":"18000","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.set.acl","Description":"\n\u003cp\u003eSet client to use secure ACLs\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"high","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"broker.id.generation.enable","Description":"\n\u003cp\u003eEnable automatic broker id generation on the server. When enabled the value configured for reserved.broker.max.id should be reviewed.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"broker.rack","Description":"\n\u003cp\u003eRack of the broker. This will be used in rack aware replication assignment for fault tolerance. Examples: `RACK1`, `us-east-1d`\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"connections.max.idle.ms","Description":"\n\u003cp\u003eIdle connections timeout: the server socket processor threads close the connections that idle more than this\u003c/p\u003e\n","Type":"long","Default":"600000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"connections.max.reauth.ms","Description":"\n\u003cp\u003eWhen explicitly set to a positive number (the default is 0, not a positive number), a session lifetime that will not exceed the configured value will be communicated to v2.2.0 or later clients when they authenticate. The broker will disconnect any such connection that is not re-authenticated within the session lifetime and that is then subsequently used for any purpose other than re-authentication. Configuration names can optionally be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.oauthbearer.connections.max.reauth.ms=3600000\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"controlled.shutdown.enable","Description":"\n\u003cp\u003eEnable controlled shutdown of the server\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"controlled.shutdown.max.retries","Description":"\n\u003cp\u003eControlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens\u003c/p\u003e\n","Type":"int","Default":"3","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"controlled.shutdown.retry.backoff.ms","Description":"\n\u003cp\u003eBefore each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying.\u003c/p\u003e\n","Type":"long","Default":"5000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"controller.socket.timeout.ms","Description":"\n\u003cp\u003eThe socket timeout for controller-to-broker channels\u003c/p\u003e\n","Type":"int","Default":"30000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"default.replication.factor","Description":"\n\u003cp\u003edefault replication factors for automatically created topics\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"delegation.token.expiry.time.ms","Description":"\n\u003cp\u003eThe token validity time in miliseconds before the token needs to be renewed. Default value 1 day.\u003c/p\u003e\n","Type":"long","Default":"86400000","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"delegation.token.master.key","Description":"\n\u003cp\u003eMaster/secret key to generate and verify delegation tokens. Same key must be configured across all the brokers. If the key is not set or set to empty string, brokers will disable the delegation token support.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"delegation.token.max.lifetime.ms","Description":"\n\u003cp\u003eThe token has a maximum lifetime beyond which it cannot be renewed anymore. Default value 7 days.\u003c/p\u003e\n","Type":"long","Default":"604800000","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"delete.records.purgatory.purge.interval.requests","Description":"\n\u003cp\u003eThe purge interval (in number of requests) of the delete records request purgatory\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"fetch.max.bytes","Description":"\n\u003cp\u003eThe maximum number of bytes we will return for a fetch request. Must be at least 1024.\u003c/p\u003e\n","Type":"int","Default":"57671680","ValidValues":"[1024,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"fetch.purgatory.purge.interval.requests","Description":"\n\u003cp\u003eThe purge interval (in number of requests) of the fetch request purgatory\u003c/p\u003e\n","Type":"int","Default":"1000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"group.initial.rebalance.delay.ms","Description":"\n\u003cp\u003eThe amount of time the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins.\u003c/p\u003e\n","Type":"int","Default":"3000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"group.max.session.timeout.ms","Description":"\n\u003cp\u003eThe maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.\u003c/p\u003e\n","Type":"int","Default":"1800000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"group.max.size","Description":"\n\u003cp\u003eThe maximum number of consumers that a single consumer group can accommodate.\u003c/p\u003e\n","Type":"int","Default":"2147483647","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"group.min.session.timeout.ms","Description":"\n\u003cp\u003eThe minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources.\u003c/p\u003e\n","Type":"int","Default":"6000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"inter.broker.listener.name","Description":"\n\u003cp\u003eName of listener used for communication between brokers. If this is unset, the listener name is defined by security.inter.broker.protocol. It is an error to set this and security.inter.broker.protocol properties at the same time.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"inter.broker.protocol.version","Description":"\n\u003cp\u003eSpecify which version of the inter-broker protocol will be used.\u003cbr/\u003e This is typically bumped after all brokers were upgraded to a new version.\u003cbr/\u003e Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list.\u003c/p\u003e\n","Type":"string","Default":"2.5-IV0","ValidValues":"[0.8.0, 0.8.1, 0.8.2, 0.9.0, 0.10.0-IV0, 0.10.0-IV1, 0.10.1-IV0, 0.10.1-IV1, 0.10.1-IV2, 0.10.2-IV0, 0.11.0-IV0, 0.11.0-IV1, 0.11.0-IV2, 1.0-IV0, 1.1-IV0, 2.0-IV0, 2.0-IV1, 2.1-IV0, 2.1-IV1, 2.1-IV2, 2.2-IV0, 2.2-IV1, 2.3-IV0, 2.3-IV1, 2.4-IV0, 2.4-IV1, 2.5-IV0]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to sleep when there are no logs to clean\u003c/p\u003e\n","Type":"long","Default":"15000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.dedupe.buffer.size","Description":"\n\u003cp\u003eThe total memory used for log deduplication across all cleaner threads\u003c/p\u003e\n","Type":"long","Default":"134217728","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.delete.retention.ms","Description":"\n\u003cp\u003eHow long are delete records retained?\u003c/p\u003e\n","Type":"long","Default":"86400000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.enable","Description":"\n\u003cp\u003eEnable the log cleaner process to run on the server. Should be enabled if using any topics with a cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted and continually grow in size.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.io.buffer.load.factor","Description":"\n\u003cp\u003eLog cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value will allow more log to be cleaned at once but will lead to more hash collisions\u003c/p\u003e\n","Type":"double","Default":"0.9","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.io.buffer.size","Description":"\n\u003cp\u003eThe total memory used for log cleaner I/O buffers across all cleaner threads\u003c/p\u003e\n","Type":"int","Default":"524288","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.io.max.bytes.per.second","Description":"\n\u003cp\u003eThe log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average\u003c/p\u003e\n","Type":"double","Default":"1.7976931348623157E308","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.max.compaction.lag.ms","Description":"\n\u003cp\u003eThe maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.min.cleanable.ratio","Description":"\n\u003cp\u003eThe minimum ratio of dirty log to total log for a log to eligible for cleaning. If the log.cleaner.max.compaction.lag.ms or the log.cleaner.min.compaction.lag.ms configurations are also specified, then the log compactor considers the log eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the log.cleaner.min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the log.cleaner.max.compaction.lag.ms period.\u003c/p\u003e\n","Type":"double","Default":"0.5","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.min.compaction.lag.ms","Description":"\n\u003cp\u003eThe minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleaner.threads","Description":"\n\u003cp\u003eThe number of background threads to use for log cleaning\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.cleanup.policy","Description":"\n\u003cp\u003eThe default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. Valid policies are: \u0026#34;delete\u0026#34; and \u0026#34;compact\u0026#34;\u003c/p\u003e\n","Type":"list","Default":"delete","ValidValues":"[compact, delete]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.index.interval.bytes","Description":"\n\u003cp\u003eThe interval with which we add an entry to the offset index\u003c/p\u003e\n","Type":"int","Default":"4096","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.index.size.max.bytes","Description":"\n\u003cp\u003eThe maximum size in bytes of the offset index\u003c/p\u003e\n","Type":"int","Default":"10485760","ValidValues":"[4,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.message.format.version","Description":"\n\u003cp\u003eSpecify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don\u0026#39;t understand.\u003c/p\u003e\n","Type":"string","Default":"2.5-IV0","ValidValues":"[0.8.0, 0.8.1, 0.8.2, 0.9.0, 0.10.0-IV0, 0.10.0-IV1, 0.10.1-IV0, 0.10.1-IV1, 0.10.1-IV2, 0.10.2-IV0, 0.11.0-IV0, 0.11.0-IV1, 0.11.0-IV2, 1.0-IV0, 1.1-IV0, 2.0-IV0, 2.0-IV1, 2.1-IV0, 2.1-IV1, 2.1-IV2, 2.2-IV0, 2.2-IV1, 2.3-IV0, 2.3-IV1, 2.4-IV0, 2.4-IV1, 2.5-IV0]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"log.message.timestamp.difference.max.ms","Description":"\n\u003cp\u003eThe maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If log.message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime.The maximum timestamp difference allowed should be no greater than log.retention.ms to avoid unnecessarily frequent log rolling.\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.message.timestamp.type","Description":"\n\u003cp\u003eDefine whether the timestamp in the message is message create time or log append time. The value should be either `CreateTime` or `LogAppendTime`\u003c/p\u003e\n","Type":"string","Default":"CreateTime","ValidValues":"[CreateTime, LogAppendTime]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.preallocate","Description":"\n\u003cp\u003eShould pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"log.retention.check.interval.ms","Description":"\n\u003cp\u003eThe frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"max.connections","Description":"\n\u003cp\u003eThe maximum number of connections we allow in the broker at any time. This limit is applied in addition to any per-ip limits configured using max.connections.per.ip. Listener-level limits may also be configured by prefixing the config name with the listener prefix, for example, \u003ccode\u003elistener.name.internal.max.connections\u003c/code\u003e. Broker-wide limit should be configured based on broker capacity while listener limits should be configured based on application requirements. New connections are blocked if either the listener or broker limit is reached. Connections on the inter-broker listener are permitted even if broker-wide limit is reached. The least recently used connection on another listener will be closed in this case.\u003c/p\u003e\n","Type":"int","Default":"2147483647","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"max.connections.per.ip","Description":"\n\u003cp\u003eThe maximum number of connections we allow from each ip address. This can be set to 0 if there are overrides configured using max.connections.per.ip.overrides property. New connections from the ip address are dropped if the limit is reached.\u003c/p\u003e\n","Type":"int","Default":"2147483647","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"max.connections.per.ip.overrides","Description":"\n\u003cp\u003eA comma-separated list of per-ip or hostname overrides to the default maximum number of connections. An example value is \u0026#34;hostName:100,127.0.0.1:200\u0026#34;\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"medium","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"max.incremental.fetch.session.cache.slots","Description":"\n\u003cp\u003eThe maximum number of incremental fetch sessions that we will maintain.\u003c/p\u003e\n","Type":"int","Default":"1000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"num.partitions","Description":"\n\u003cp\u003eThe default number of log partitions per topic\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"password.encoder.old.secret","Description":"\n\u003cp\u003eThe old secret that was used for encoding dynamically configured passwords. This is required only when the secret is updated. If specified, all dynamically encoded passwords are decoded using this old secret and re-encoded using password.encoder.secret when broker starts up.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"password.encoder.secret","Description":"\n\u003cp\u003eThe secret used for encoding dynamically configured passwords for this broker.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"principal.builder.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the KafkaPrincipalBuilder interface, which is used to build the KafkaPrincipal object used during authorization. This config also supports the deprecated PrincipalBuilder interface which was previously used for client authentication over SSL. If no principal builder is defined, the default behavior depends on the security protocol in use. For SSL authentication, the principal will be derived using the rules defined by \u003ccode\u003essl.principal.mapping.rules\u003c/code\u003e applied on the distinguished name from the client certificate if one is provided; otherwise, if client authentication is not required, the principal name will be ANONYMOUS. For SASL authentication, the principal will be derived using the rules defined by \u003ccode\u003esasl.kerberos.principal.to.local.rules\u003c/code\u003e if GSSAPI is in use, and the SASL authentication ID for other mechanisms. For PLAINTEXT, the principal will be ANONYMOUS.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"producer.purgatory.purge.interval.requests","Description":"\n\u003cp\u003eThe purge interval (in number of requests) of the producer request purgatory\u003c/p\u003e\n","Type":"int","Default":"1000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"queued.max.request.bytes","Description":"\n\u003cp\u003eThe number of queued bytes allowed before no more requests are read\u003c/p\u003e\n","Type":"long","Default":"-1","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.fetch.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to sleep when fetch partition error occurs.\u003c/p\u003e\n","Type":"int","Default":"1000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.fetch.max.bytes","Description":"\n\u003cp\u003eThe number of bytes of messages to attempt to fetch for each partition. This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. The maximum record batch size accepted by the broker is defined via \u003ccode\u003emessage.max.bytes\u003c/code\u003e (broker config) or \u003ccode\u003emax.message.bytes\u003c/code\u003e (topic config).\u003c/p\u003e\n","Type":"int","Default":"1048576","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.fetch.response.max.bytes","Description":"\n\u003cp\u003eMaximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. The maximum record batch size accepted by the broker is defined via \u003ccode\u003emessage.max.bytes\u003c/code\u003e (broker config) or \u003ccode\u003emax.message.bytes\u003c/code\u003e (topic config).\u003c/p\u003e\n","Type":"int","Default":"10485760","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replica.selector.class","Description":"\n\u003cp\u003eThe fully qualified class name that implements ReplicaSelector. This is used by the broker to find the preferred read replica. By default, we use an implementation that returns the leader.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"reserved.broker.max.id","Description":"\n\u003cp\u003eMax number that can be used for a broker.id\u003c/p\u003e\n","Type":"int","Default":"1000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.client.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.enabled.mechanisms","Description":"\n\u003cp\u003eThe list of SASL mechanisms enabled in the Kafka server. The list may contain any mechanism for which a security provider is available. Only GSSAPI is enabled by default.\u003c/p\u003e\n","Type":"list","Default":"GSSAPI","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.jaas.config","Description":"\n\u003cp\u003eJAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described \u003ca href=\"http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\"\u003ehere\u003c/a\u003e. The format for the value is: \u0026#39;\u003ccode\u003eloginModuleClass controlFlag (optionName=optionValue)*;\u003c/code\u003e\u0026#39;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.kerberos.kinit.cmd","Description":"\n\u003cp\u003eKerberos kinit command path.\u003c/p\u003e\n","Type":"string","Default":"/usr/bin/kinit","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.kerberos.min.time.before.relogin","Description":"\n\u003cp\u003eLogin thread sleep time between refresh attempts.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.kerberos.principal.to.local.rules","Description":"\n\u003cp\u003eA list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}@{REALM} are mapped to {username}. For more details on the format please see \u003ca href=\"#security_authz\"\u003e security authorization and acls\u003c/a\u003e. Note that this configuration is ignored if an extension of KafkaPrincipalBuilder is provided by the \u003ccode\u003eprincipal.builder.class\u003c/code\u003e configuration.\u003c/p\u003e\n","Type":"list","Default":"DEFAULT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.kerberos.service.name","Description":"\n\u003cp\u003eThe Kerberos principal name that Kafka runs as. This can be defined either in Kafka\u0026#39;s JAAS config or in Kafka\u0026#39;s config.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.kerberos.ticket.renew.jitter","Description":"\n\u003cp\u003ePercentage of random jitter added to the renewal time.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.kerberos.ticket.renew.window.factor","Description":"\n\u003cp\u003eLogin thread will sleep until the specified window factor of time from last refresh to ticket\u0026#39;s expiry has been reached, at which time it will try to renew the ticket.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.login.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.login.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.login.refresh.buffer.seconds","Description":"\n\u003cp\u003eThe amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"300","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.login.refresh.min.period.seconds","Description":"\n\u003cp\u003eThe desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"60","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.login.refresh.window.factor","Description":"\n\u003cp\u003eLogin refresh thread will sleep until the specified window factor relative to the credential\u0026#39;s lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.login.refresh.window.jitter","Description":"\n\u003cp\u003eThe maximum amount of random jitter relative to the credential\u0026#39;s lifetime that is added to the login refresh thread\u0026#39;s sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.mechanism.inter.broker.protocol","Description":"\n\u003cp\u003eSASL mechanism used for inter-broker communication. Default is GSSAPI.\u003c/p\u003e\n","Type":"string","Default":"GSSAPI","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"sasl.server.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL server callback handler class that implements the AuthenticateCallbackHandler interface. Server callback handlers must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.plain.sasl.server.callback.handler.class=com.example.CustomPlainCallbackHandler.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"security.inter.broker.protocol","Description":"\n\u003cp\u003eSecurity protocol used to communicate between brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. It is an error to set this and inter.broker.listener.name properties at the same time.\u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.cipher.suites","Description":"\n\u003cp\u003eA list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.client.auth","Description":"\n\u003cp\u003eConfigures kafka broker to request client authentication. The following settings are common: \u003c/p\u003e\u003cul\u003e \u003cli\u003e\u003ccode\u003essl.client.auth=required\u003c/code\u003e If set to required client authentication is required. \u003c/li\u003e\u003cli\u003e\u003ccode\u003essl.client.auth=requested\u003c/code\u003e This means client authentication is optional. unlike requested , if this option is set client can choose not to provide authentication information about itself \u003c/li\u003e\u003cli\u003e\u003ccode\u003essl.client.auth=none\u003c/code\u003e This means client authentication is not needed.\u003c/li\u003e\u003c/ul\u003e\u003cp\u003e\u003c/p\u003e\n","Type":"string","Default":"none","ValidValues":"[required, requested, none]","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.enabled.protocols","Description":"\n\u003cp\u003eThe list of protocols enabled for SSL connections.\u003c/p\u003e\n","Type":"list","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.key.password","Description":"\n\u003cp\u003eThe password of the private key in the key store file. This is optional for client.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.keymanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"SunX509","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.keystore.location","Description":"\n\u003cp\u003eThe location of the key store file. This is optional for client and can be used for two-way authentication for client.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.keystore.password","Description":"\n\u003cp\u003eThe store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured. \u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.keystore.type","Description":"\n\u003cp\u003eThe file format of the key store file. This is optional for client.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.protocol","Description":"\n\u003cp\u003eThe SSL protocol used to generate the SSLContext. Default setting is TLSv1.2, which is fine for most cases. Allowed values in recent JVMs are TLSv1.2 and TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.\u003c/p\u003e\n","Type":"string","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.provider","Description":"\n\u003cp\u003eThe name of the security provider used for SSL connections. Default value is the default security provider of the JVM.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.trustmanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"PKIX","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.truststore.location","Description":"\n\u003cp\u003eThe location of the trust store file. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.truststore.password","Description":"\n\u003cp\u003eThe password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.truststore.type","Description":"\n\u003cp\u003eThe file format of the trust store file.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.clientCnxnSocket","Description":"\n\u003cp\u003eTypically set to \u003ccode\u003eorg.apache.zookeeper.ClientCnxnSocketNetty\u003c/code\u003e when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the same-named \u003ccode\u003ezookeeper.clientCnxnSocket\u003c/code\u003e system property.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.client.enable","Description":"\n\u003cp\u003eSet client to use TLS when connecting to ZooKeeper. An explicit value overrides any value set via the \u003ccode\u003ezookeeper.client.secure\u003c/code\u003e system property (note the different name). Defaults to false if neither is set; when true, \u003ccode\u003ezookeeper.clientCnxnSocket\u003c/code\u003e must be set (typically to \u003ccode\u003eorg.apache.zookeeper.ClientCnxnSocketNetty\u003c/code\u003e); other values to set may include \u003ccode\u003ezookeeper.ssl.cipher.suites\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.crl.enable\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.enabled.protocols\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.endpoint.identification.algorithm\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.keystore.location\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.keystore.password\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.keystore.type\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.ocsp.enable\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.protocol\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.truststore.location\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.truststore.password\u003c/code\u003e, \u003ccode\u003ezookeeper.ssl.truststore.type\u003c/code\u003e\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.keystore.location","Description":"\n\u003cp\u003eKeystore location when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.keyStore.location\u003c/code\u003e system property (note the camelCase).\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.keystore.password","Description":"\n\u003cp\u003eKeystore password when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.keyStore.password\u003c/code\u003e system property (note the camelCase). Note that ZooKeeper does not support a key password different from the keystore password, so be sure to set the key password in the keystore to be identical to the keystore password; otherwise the connection attempt to Zookeeper will fail.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.keystore.type","Description":"\n\u003cp\u003eKeystore type when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.keyStore.type\u003c/code\u003e system property (note the camelCase). The default value of \u003ccode\u003enull\u003c/code\u003e means the type will be auto-detected based on the filename extension of the keystore.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.truststore.location","Description":"\n\u003cp\u003eTruststore location when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.trustStore.location\u003c/code\u003e system property (note the camelCase).\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.truststore.password","Description":"\n\u003cp\u003eTruststore password when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.trustStore.password\u003c/code\u003e system property (note the camelCase).\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.truststore.type","Description":"\n\u003cp\u003eTruststore type when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.trustStore.type\u003c/code\u003e system property (note the camelCase). The default value of \u003ccode\u003enull\u003c/code\u003e means the type will be auto-detected based on the filename extension of the truststore.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"alter.config.policy.class.name","Description":"\n\u003cp\u003eThe alter configs policy class that should be used for validation. The class should implement the \u003ccode\u003eorg.apache.kafka.server.policy.AlterConfigPolicy\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"alter.log.dirs.replication.quota.window.num","Description":"\n\u003cp\u003eThe number of samples to retain in memory for alter log dirs replication quotas\u003c/p\u003e\n","Type":"int","Default":"11","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"alter.log.dirs.replication.quota.window.size.seconds","Description":"\n\u003cp\u003eThe time span of each sample for alter log dirs replication quotas\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"authorizer.class.name","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements sorg.apache.kafka.server.authorizer.Authorizer interface, which is used by the broker for authorization. This config also supports authorizers that implement the deprecated kafka.security.auth.Authorizer trait which was previously used for authorization.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"client.quota.callback.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the ClientQuotaCallback interface, which is used to determine quota limits applied to client requests. By default, \u003cuser, client-id=\"\"\u003e, \u003cuser\u003e or \u003cclient-id\u003e quotas stored in ZooKeeper are applied. For any given request, the most specific quota that matches the user principal of the session and the client-id of the request is applied.\u003c/client-id\u003e\u003c/user\u003e\u003c/user,\u003e\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"connection.failed.authentication.delay.ms","Description":"\n\u003cp\u003eConnection close delay on failed authentication: this is the time (in milliseconds) by which connection close will be delayed on authentication failure. This must be configured to be less than connections.max.idle.ms to prevent connection timeout.\u003c/p\u003e\n","Type":"int","Default":"100","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"create.topic.policy.class.name","Description":"\n\u003cp\u003eThe create topic policy class that should be used for validation. The class should implement the \u003ccode\u003eorg.apache.kafka.server.policy.CreateTopicPolicy\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"delegation.token.expiry.check.interval.ms","Description":"\n\u003cp\u003eScan interval to remove expired delegation tokens.\u003c/p\u003e\n","Type":"long","Default":"3600000","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"kafka.metrics.polling.interval.secs","Description":"\n\u003cp\u003eThe metrics polling interval (in seconds) which can be used in kafka.metrics.reporters implementations.\u003c/p\u003e\n","Type":"int","Default":"10","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"kafka.metrics.reporters","Description":"\n\u003cp\u003eA list of classes to use as Yammer metrics custom reporters. The reporters should implement \u003ccode\u003ekafka.metrics.KafkaMetricsReporter\u003c/code\u003e trait. If a client wants to expose JMX operations on a custom reporter, the custom reporter needs to additionally implement an MBean trait that extends \u003ccode\u003ekafka.metrics.KafkaMetricsReporterMBean\u003c/code\u003e trait so that the registered MBean is compliant with the standard MBean convention.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"listener.security.protocol.map","Description":"\n\u003cp\u003eMap between listener names and security protocols. This must be defined for the same security protocol to be usable in more than one port or IP. For example, internal and external traffic can be separated even if SSL is required for both. Concretely, the user could define listeners with names INTERNAL and EXTERNAL and this property as: `INTERNAL:SSL,EXTERNAL:SSL`. As shown, key and value are separated by a colon and map entries are separated by commas. Each listener name should only appear once in the map. Different security (SSL and SASL) settings can be configured for each listener by adding a normalised prefix (the listener name is lowercased) to the config name. For example, to set a different keystore for the INTERNAL listener, a config with name \u003ccode\u003elistener.name.internal.ssl.keystore.location\u003c/code\u003e would be set. If the config for the listener name is not set, the config will fallback to the generic config (i.e. \u003ccode\u003essl.keystore.location\u003c/code\u003e). \u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL","ValidValues":"","Importance":"low","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"log.message.downconversion.enable","Description":"\n\u003cp\u003eThis configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to \u003ccode\u003efalse\u003c/code\u003e, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with \u003ccode\u003eUNSUPPORTED_VERSION\u003c/code\u003e error for consume requests from such older clients. This configurationdoes not apply to any message format conversion that might be required for replication to followers.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"low","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"metric.reporters","Description":"\n\u003cp\u003eA list of classes to use as metrics reporters. Implementing the \u003ccode\u003eorg.apache.kafka.common.metrics.MetricsReporter\u003c/code\u003e interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"cluster-wide","ServerDefaultProperty":""},{"Category":"broker","Name":"metrics.num.samples","Description":"\n\u003cp\u003eThe number of samples maintained to compute metrics.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"metrics.recording.level","Description":"\n\u003cp\u003eThe highest recording level for metrics.\u003c/p\u003e\n","Type":"string","Default":"INFO","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"metrics.sample.window.ms","Description":"\n\u003cp\u003eThe window of time a metrics sample is computed over.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"password.encoder.cipher.algorithm","Description":"\n\u003cp\u003eThe Cipher algorithm used for encoding dynamically configured passwords.\u003c/p\u003e\n","Type":"string","Default":"AES/CBC/PKCS5Padding","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"password.encoder.iterations","Description":"\n\u003cp\u003eThe iteration count used for encoding dynamically configured passwords.\u003c/p\u003e\n","Type":"int","Default":"4096","ValidValues":"[1024,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"password.encoder.key.length","Description":"\n\u003cp\u003eThe key length used for encoding dynamically configured passwords.\u003c/p\u003e\n","Type":"int","Default":"128","ValidValues":"[8,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"password.encoder.keyfactory.algorithm","Description":"\n\u003cp\u003eThe SecretKeyFactory algorithm used for encoding dynamically configured passwords. Default is PBKDF2WithHmacSHA512 if available and PBKDF2WithHmacSHA1 otherwise.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"quota.window.num","Description":"\n\u003cp\u003eThe number of samples to retain in memory for client quotas\u003c/p\u003e\n","Type":"int","Default":"11","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"quota.window.size.seconds","Description":"\n\u003cp\u003eThe time span of each sample for client quotas\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replication.quota.window.num","Description":"\n\u003cp\u003eThe number of samples to retain in memory for replication quotas\u003c/p\u003e\n","Type":"int","Default":"11","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"replication.quota.window.size.seconds","Description":"\n\u003cp\u003eThe time span of each sample for replication quotas\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"security.providers","Description":"\n\u003cp\u003eA list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the \u003ccode\u003eorg.apache.kafka.common.security.auth.SecurityProviderCreator\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.endpoint.identification.algorithm","Description":"\n\u003cp\u003eThe endpoint identification algorithm to validate server hostname using server certificate. \u003c/p\u003e\n","Type":"string","Default":"https","ValidValues":"","Importance":"low","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.principal.mapping.rules","Description":"\n\u003cp\u003eA list of rules for mapping from distinguished name from the client certificate to short name. The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, distinguished name of the X.500 certificate will be the principal. For more details on the format please see \u003ca href=\"#security_authz\"\u003e security authorization and acls\u003c/a\u003e. Note that this configuration is ignored if an extension of KafkaPrincipalBuilder is provided by the \u003ccode\u003eprincipal.builder.class\u003c/code\u003e configuration.\u003c/p\u003e\n","Type":"string","Default":"DEFAULT","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"ssl.secure.random.implementation","Description":"\n\u003cp\u003eThe SecureRandom PRNG implementation to use for SSL cryptography operations. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"per-broker","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.abort.timed.out.transaction.cleanup.interval.ms","Description":"\n\u003cp\u003eThe interval at which to rollback transactions that have timed out\u003c/p\u003e\n","Type":"int","Default":"10000","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"transaction.remove.expired.transaction.cleanup.interval.ms","Description":"\n\u003cp\u003eThe interval at which to remove transactions that have expired due to \u003ccode\u003etransactional.id.expiration.ms\u003c/code\u003e passing\u003c/p\u003e\n","Type":"int","Default":"3600000","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.cipher.suites","Description":"\n\u003cp\u003eSpecifies the enabled cipher suites to be used in ZooKeeper TLS negotiation (csv). Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.ciphersuites\u003c/code\u003e system property (note the single word \u0026#34;ciphersuites\u0026#34;). The default value of \u003ccode\u003enull\u003c/code\u003e means the list of enabled cipher suites is determined by the Java runtime being used.\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.crl.enable","Description":"\n\u003cp\u003eSpecifies whether to enable Certificate Revocation List in the ZooKeeper TLS protocols. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.crl\u003c/code\u003e system property (note the shorter name).\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.enabled.protocols","Description":"\n\u003cp\u003eSpecifies the enabled protocol(s) in ZooKeeper TLS negotiation (csv). Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.enabledProtocols\u003c/code\u003e system property (note the camelCase). The default value of \u003ccode\u003enull\u003c/code\u003e means the enabled protocol will be the value of the \u003ccode\u003ezookeeper.ssl.protocol\u003c/code\u003e configuration property.\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.endpoint.identification.algorithm","Description":"\n\u003cp\u003eSpecifies whether to enable hostname verification in the ZooKeeper TLS negotiation process, with (case-insensitively) \u0026#34;https\u0026#34; meaning ZooKeeper hostname verification is enabled and an explicit blank value meaning it is disabled (disabling it is only recommended for testing purposes). An explicit value overrides any \u0026#34;true\u0026#34; or \u0026#34;false\u0026#34; value set via the \u003ccode\u003ezookeeper.ssl.hostnameVerification\u003c/code\u003e system property (note the different name and values; true implies https and false implies blank).\u003c/p\u003e\n","Type":"string","Default":"HTTPS","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.ocsp.enable","Description":"\n\u003cp\u003eSpecifies whether to enable Online Certificate Status Protocol in the ZooKeeper TLS protocols. Overrides any explicit value set via the \u003ccode\u003ezookeeper.ssl.ocsp\u003c/code\u003e system property (note the shorter name).\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.ssl.protocol","Description":"\n\u003cp\u003eSpecifies the protocol to be used in ZooKeeper TLS negotiation. An explicit value overrides any value set via the same-named \u003ccode\u003ezookeeper.ssl.protocol\u003c/code\u003e system property.\u003c/p\u003e\n","Type":"string","Default":"TLSv1.2","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"broker","Name":"zookeeper.sync.time.ms","Description":"\n\u003cp\u003eHow far a ZK follower can be behind a ZK leader\u003c/p\u003e\n","Type":"int","Default":"2000","ValidValues":"","Importance":"low","DynamicUpdateMode":"read-only","ServerDefaultProperty":""},{"Category":"topic","Name":"cleanup.policy","Description":"\n\u003cp\u003eA string that is either \u0026#34;delete\u0026#34; or \u0026#34;compact\u0026#34; or both. This string designates the retention policy to use on old log segments. The default policy (\u0026#34;delete\u0026#34;) will discard old segments when their retention time or size limit has been reached. The \u0026#34;compact\u0026#34; setting will enable \u003ca href=\"#compaction\"\u003elog compaction\u003c/a\u003e on the topic.\u003c/p\u003e\n","Type":"list","Default":"delete","ValidValues":"[compact, delete]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.cleanup.policy"},{"Category":"topic","Name":"compression.type","Description":"\n\u003cp\u003eSpecify the final compression type for a given topic. This configuration accepts the standard compression codecs (\u0026#39;gzip\u0026#39;, \u0026#39;snappy\u0026#39;, \u0026#39;lz4\u0026#39;, \u0026#39;zstd\u0026#39;). It additionally accepts \u0026#39;uncompressed\u0026#39; which is equivalent to no compression; and \u0026#39;producer\u0026#39; which means retain the original compression codec set by the producer.\u003c/p\u003e\n","Type":"string","Default":"producer","ValidValues":"[uncompressed, zstd, lz4, snappy, gzip, producer]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"compression.type"},{"Category":"topic","Name":"delete.retention.ms","Description":"\n\u003cp\u003eThe amount of time to retain delete tombstone markers for \u003ca href=\"#compaction\"\u003elog compacted\u003c/a\u003e topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).\u003c/p\u003e\n","Type":"long","Default":"86400000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.cleaner.delete.retention.ms"},{"Category":"topic","Name":"file.delete.delay.ms","Description":"\n\u003cp\u003eThe time to wait before deleting a file from the filesystem\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.segment.delete.delay.ms"},{"Category":"topic","Name":"flush.messages","Description":"\n\u003cp\u003eThis setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system\u0026#39;s background flush capabilities as it is more efficient. This setting can be overridden on a per-topic basis (see \u003ca href=\"#topicconfigs\"\u003ethe per-topic configuration section\u003c/a\u003e).\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.flush.interval.messages"},{"Category":"topic","Name":"flush.ms","Description":"\n\u003cp\u003eThis setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system\u0026#39;s background flush capabilities as it is more efficient.\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.flush.interval.ms"},{"Category":"topic","Name":"follower.replication.throttled.replicas","Description":"\n\u003cp\u003eA list of replicas for which log replication should be throttled on the follower side. The list should describe a set of replicas in the form [PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard \u0026#39;*\u0026#39; can be used to throttle all replicas for this topic.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"[partitionId]:[brokerId],[partitionId]:[brokerId],...","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"follower.replication.throttled.replicas"},{"Category":"topic","Name":"index.interval.bytes","Description":"\n\u003cp\u003eThis setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don\u0026#39;t need to change this.\u003c/p\u003e\n","Type":"int","Default":"4096","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.index.interval.bytes"},{"Category":"topic","Name":"leader.replication.throttled.replicas","Description":"\n\u003cp\u003eA list of replicas for which log replication should be throttled on the leader side. The list should describe a set of replicas in the form [PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard \u0026#39;*\u0026#39; can be used to throttle all replicas for this topic.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"[partitionId]:[brokerId],[partitionId]:[brokerId],...","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"leader.replication.throttled.replicas"},{"Category":"topic","Name":"max.compaction.lag.ms","Description":"\n\u003cp\u003eThe maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.cleaner.max.compaction.lag.ms"},{"Category":"topic","Name":"max.message.bytes","Description":"\n\u003cp\u003eThe largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers\u0026#39; fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.\u003c/p\u003e\n","Type":"int","Default":"1048588","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"message.max.bytes"},{"Category":"topic","Name":"message.format.version","Description":"\n\u003cp\u003eSpecify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don\u0026#39;t understand.\u003c/p\u003e\n","Type":"string","Default":"2.5-IV0","ValidValues":"[0.8.0, 0.8.1, 0.8.2, 0.9.0, 0.10.0-IV0, 0.10.0-IV1, 0.10.1-IV0, 0.10.1-IV1, 0.10.1-IV2, 0.10.2-IV0, 0.11.0-IV0, 0.11.0-IV1, 0.11.0-IV2, 1.0-IV0, 1.1-IV0, 2.0-IV0, 2.0-IV1, 2.1-IV0, 2.1-IV1, 2.1-IV2, 2.2-IV0, 2.2-IV1, 2.3-IV0, 2.3-IV1, 2.4-IV0, 2.4-IV1, 2.5-IV0]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.message.format.version"},{"Category":"topic","Name":"message.timestamp.difference.max.ms","Description":"\n\u003cp\u003eThe maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.\u003c/p\u003e\n","Type":"long","Default":"9223372036854775807","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.message.timestamp.difference.max.ms"},{"Category":"topic","Name":"message.timestamp.type","Description":"\n\u003cp\u003eDefine whether the timestamp in the message is message create time or log append time. The value should be either `CreateTime` or `LogAppendTime`\u003c/p\u003e\n","Type":"string","Default":"CreateTime","ValidValues":"[CreateTime, LogAppendTime]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.message.timestamp.type"},{"Category":"topic","Name":"min.cleanable.dirty.ratio","Description":"\n\u003cp\u003eThis configuration controls how frequently the log compactor will attempt to clean the log (assuming \u003ca href=\"#compaction\"\u003elog compaction\u003c/a\u003e is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.\u003c/p\u003e\n","Type":"double","Default":"0.5","ValidValues":"[0,...,1]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.cleaner.min.cleanable.ratio"},{"Category":"topic","Name":"min.compaction.lag.ms","Description":"\n\u003cp\u003eThe minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.cleaner.min.compaction.lag.ms"},{"Category":"topic","Name":"min.insync.replicas","Description":"\n\u003cp\u003eWhen a producer sets acks to \u0026#34;all\u0026#34; (or \u0026#34;-1\u0026#34;), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).\u003cbr/\u003eWhen used together, \u003ccode\u003emin.insync.replicas\u003c/code\u003e and \u003ccode\u003eacks\u003c/code\u003e allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set \u003ccode\u003emin.insync.replicas\u003c/code\u003e to 2, and produce with \u003ccode\u003eacks\u003c/code\u003e of \u0026#34;all\u0026#34;. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"min.insync.replicas"},{"Category":"topic","Name":"preallocate\n","Description":"\n\u003cp\u003eTrue if we should preallocate the file on disk when creating a new log segment.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.preallocate"},{"Category":"topic","Name":"retention.bytes","Description":"\n\u003cp\u003eThis configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the \u0026#34;delete\u0026#34; retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.\u003c/p\u003e\n","Type":"long","Default":"-1","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.retention.bytes"},{"Category":"topic","Name":"retention.ms","Description":"\n\u003cp\u003eThis configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the \u0026#34;delete\u0026#34; retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.\u003c/p\u003e\n","Type":"long","Default":"604800000","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.retention.ms"},{"Category":"topic","Name":"segment.bytes","Description":"\n\u003cp\u003eThis configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention.\u003c/p\u003e\n","Type":"int","Default":"1073741824","ValidValues":"[14,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.segment.bytes"},{"Category":"topic","Name":"segment.index.bytes","Description":"\n\u003cp\u003eThis configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.\u003c/p\u003e\n","Type":"int","Default":"10485760","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.index.size.max.bytes"},{"Category":"topic","Name":"segment.jitter.ms","Description":"\n\u003cp\u003eThe maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.roll.jitter.ms"},{"Category":"topic","Name":"segment.ms","Description":"\n\u003cp\u003eThis configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn\u0026#39;t full to ensure that retention can delete or compact old data.\u003c/p\u003e\n","Type":"long","Default":"604800000","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"log.roll.ms"},{"Category":"topic","Name":"unclean.leader.election.enable","Description":"\n\u003cp\u003eIndicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":"unclean.leader.election.enable"},{"Category":"topic","Name":"message.downconversion.enable","Description":"\n\u003cp\u003eThis configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to \u003ccode\u003efalse\u003c/code\u003e, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with \u003ccode\u003eUNSUPPORTED_VERSION\u003c/code\u003e error for consume requests from such older clients. This configurationdoes not apply to any message format conversion that might be required for replication to followers.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":"log.message.downconversion.enable"},{"Category":"producer","Name":"key.serializer","Description":"\n\u003cp\u003eSerializer class for key that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Serializer\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"value.serializer","Description":"\n\u003cp\u003eSerializer class for value that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Serializer\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"acks\n","Description":"\n\u003cp\u003eThe number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are allowed: \u003c/p\u003e\u003cul\u003e \u003cli\u003e\u003ccode\u003eacks=0\u003c/code\u003e If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and the \u003ccode\u003eretries\u003c/code\u003e configuration will not take effect (as the client won\u0026#39;t generally know of any failures). The offset given back for each record will always be set to \u003ccode\u003e-1\u003c/code\u003e. \u003c/li\u003e\u003cli\u003e\u003ccode\u003eacks=1\u003c/code\u003e This will mean the leader will write the record to its local log but will respond without awaiting full acknowledgement from all followers. In this case should the leader fail immediately after acknowledging the record but before the followers have replicated it then the record will be lost. \u003c/li\u003e\u003cli\u003e\u003ccode\u003eacks=all\u003c/code\u003e This means the leader will wait for the full set of in-sync replicas to acknowledge the record. This guarantees that the record will not be lost as long as at least one in-sync replica remains alive. This is the strongest available guarantee. This is equivalent to the acks=-1 setting.\u003c/li\u003e\u003c/ul\u003e\u003cp\u003e\u003c/p\u003e\n","Type":"string","Default":"1","ValidValues":"[all, -1, 0, 1]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"bootstrap.servers","Description":"\n\u003cp\u003eA list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form \u003ccode\u003ehost1:port1,host2:port2,...\u003c/code\u003e. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"buffer.memory","Description":"\n\u003cp\u003eThe total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will block for \u003ccode\u003emax.block.ms\u003c/code\u003e after which it will throw an exception.\u003c/p\u003e\u003cp\u003eThis setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Some additional memory will be used for compression (if compression is enabled) as well as for maintaining in-flight requests.\u003c/p\u003e\n","Type":"long","Default":"33554432","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"compression.type","Description":"\n\u003cp\u003eThe compression type for all data generated by the producer. The default is none (i.e. no compression). Valid values are \u003ccode\u003enone\u003c/code\u003e, \u003ccode\u003egzip\u003c/code\u003e, \u003ccode\u003esnappy\u003c/code\u003e, \u003ccode\u003elz4\u003c/code\u003e, or \u003ccode\u003ezstd\u003c/code\u003e. Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression).\u003c/p\u003e\n","Type":"string","Default":"none","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"retries\n","Description":"\n\u003cp\u003eSetting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries without setting \u003ccode\u003emax.in.flight.requests.per.connection\u003c/code\u003e to 1 will potentially change the ordering of records because if two batches are sent to a single partition, and the first fails and is retried but the second succeeds, then the records in the second batch may appear first. Note additionally that produce requests will be failed before the number of retries has been exhausted if the timeout configured by \u003ccode\u003edelivery.timeout.ms\u003c/code\u003e expires first before successful acknowledgement. Users should generally prefer to leave this config unset and instead use \u003ccode\u003edelivery.timeout.ms\u003c/code\u003e to control retry behavior.\u003c/p\u003e\n","Type":"int","Default":"2147483647","ValidValues":"[0,...,2147483647]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.key.password","Description":"\n\u003cp\u003eThe password of the private key in the key store file. This is optional for client.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.keystore.location","Description":"\n\u003cp\u003eThe location of the key store file. This is optional for client and can be used for two-way authentication for client.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.keystore.password","Description":"\n\u003cp\u003eThe store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured. \u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.truststore.location","Description":"\n\u003cp\u003eThe location of the trust store file. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.truststore.password","Description":"\n\u003cp\u003eThe password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"batch.size","Description":"\n\u003cp\u003eThe producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. \u003c/p\u003e\u003cp\u003eNo attempt will be made to batch records larger than this size. \u003c/p\u003e\u003cp\u003eRequests sent to brokers will contain multiple batches, one for each partition with data available to be sent. \u003c/p\u003e\u003cp\u003eA small batch size will make batching less common and may reduce throughput (a batch size of zero will disable batching entirely). A very large batch size may use memory a bit more wastefully as we will always allocate a buffer of the specified batch size in anticipation of additional records.\u003c/p\u003e\n","Type":"int","Default":"16384","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"client.dns.lookup","Description":"\n\u003cp\u003eControls how the client uses DNS lookups. If set to \u003ccode\u003euse_all_dns_ips\u003c/code\u003e then, when the lookup returns multiple IP addresses for a hostname, they will all be attempted to connect to before failing the connection. Applies to both bootstrap and advertised servers. If the value is \u003ccode\u003eresolve_canonical_bootstrap_servers_only\u003c/code\u003e each entry will be resolved and expanded into a list of canonical names.\u003c/p\u003e\n","Type":"string","Default":"default","ValidValues":"[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"client.id","Description":"\n\u003cp\u003eAn id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"connections.max.idle.ms","Description":"\n\u003cp\u003eClose idle connections after the number of milliseconds specified by this config.\u003c/p\u003e\n","Type":"long","Default":"540000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"delivery.timeout.ms","Description":"\n\u003cp\u003eAn upper bound on the time to report success or failure after a call to \u003ccode\u003esend()\u003c/code\u003e returns. This limits the total time that a record will be delayed prior to sending, the time to await acknowledgement from the broker (if expected), and the time allowed for retriable send failures. The producer may report failure to send a record earlier than this config if either an unrecoverable error is encountered, the retries have been exhausted, or the record is added to a batch which reached an earlier delivery expiration deadline. The value of this config should be greater than or equal to the sum of \u003ccode\u003erequest.timeout.ms\u003c/code\u003e and \u003ccode\u003elinger.ms\u003c/code\u003e.\u003c/p\u003e\n","Type":"int","Default":"120000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"linger.ms","Description":"\n\u003cp\u003eThe producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay—that is, rather than immediately sending out a record the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together. This can be thought of as analogous to Nagle\u0026#39;s algorithm in TCP. This setting gives the upper bound on the delay for batching: once we get \u003ccode\u003ebatch.size\u003c/code\u003e worth of records for a partition it will be sent immediately regardless of this setting, however if we have fewer than this many bytes accumulated for this partition we will \u0026#39;linger\u0026#39; for the specified time waiting for more records to show up. This setting defaults to 0 (i.e. no delay). Setting \u003ccode\u003elinger.ms=5\u003c/code\u003e, for example, would have the effect of reducing the number of requests sent but would add up to 5ms of latency to records sent in the absence of load.\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"max.block.ms","Description":"\n\u003cp\u003eThe configuration controls how long \u003ccode\u003eKafkaProducer.send()\u003c/code\u003e and \u003ccode\u003eKafkaProducer.partitionsFor()\u003c/code\u003e will block.These methods can be blocked either because the buffer is full or metadata unavailable.Blocking in the user-supplied serializers or partitioner will not be counted against this timeout.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"max.request.size","Description":"\n\u003cp\u003eThe maximum size of a request in bytes. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. This is also effectively a cap on the maximum uncompressed record batch size. Note that the server has its own cap on the record batch size (after compression if compression is enabled) which may be different from this.\u003c/p\u003e\n","Type":"int","Default":"1048576","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"partitioner.class","Description":"\n\u003cp\u003ePartitioner class that implements the \u003ccode\u003eorg.apache.kafka.clients.producer.Partitioner\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.clients.producer.internals.DefaultPartitioner","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"receive.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"32768","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"request.timeout.ms","Description":"\n\u003cp\u003eThe configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. This should be larger than \u003ccode\u003ereplica.lag.time.max.ms\u003c/code\u003e (a broker configuration) to reduce the possibility of message duplication due to unnecessary producer retries.\u003c/p\u003e\n","Type":"int","Default":"30000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.client.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.jaas.config","Description":"\n\u003cp\u003eJAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described \u003ca href=\"http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\"\u003e\u003c/a\u003e\n here. The format for the value is: \u0026#39;\u003ccode\u003eloginModuleClass controlFlag (optionName=optionValue)*;\u003c/code\u003e\u0026#39;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.kerberos.service.name","Description":"\n\u003cp\u003eThe Kerberos principal name that Kafka runs as. This can be defined either in Kafka\u0026#39;s JAAS config or in Kafka\u0026#39;s config.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.login.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.login.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.mechanism","Description":"\n\u003cp\u003eSASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.\u003c/p\u003e\n","Type":"string","Default":"GSSAPI","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"security.protocol","Description":"\n\u003cp\u003eProtocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.\u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"send.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"131072","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.enabled.protocols","Description":"\n\u003cp\u003eThe list of protocols enabled for SSL connections.\u003c/p\u003e\n","Type":"list","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.keystore.type","Description":"\n\u003cp\u003eThe file format of the key store file. This is optional for client.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.protocol","Description":"\n\u003cp\u003eThe SSL protocol used to generate the SSLContext. Default setting is TLSv1.2, which is fine for most cases. Allowed values in recent JVMs are TLSv1.2 and TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.\u003c/p\u003e\n","Type":"string","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.provider","Description":"\n\u003cp\u003eThe name of the security provider used for SSL connections. Default value is the default security provider of the JVM.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.truststore.type","Description":"\n\u003cp\u003eThe file format of the trust store file.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"enable.idempotence","Description":"\n\u003cp\u003eWhen set to \u0026#39;true\u0026#39;, the producer will ensure that exactly one copy of each message is written in the stream. If \u0026#39;false\u0026#39;, producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. Note that enabling idempotence requires \u003ccode\u003emax.in.flight.requests.per.connection\u003c/code\u003e to be less than or equal to 5, \u003ccode\u003eretries\u003c/code\u003e to be greater than 0 and \u003ccode\u003eacks\u003c/code\u003e must be \u0026#39;all\u0026#39;. If these values are not explicitly set by the user, suitable values will be chosen. If incompatible values are set, a \u003ccode\u003eConfigException\u003c/code\u003e will be thrown.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"interceptor.classes","Description":"\n\u003cp\u003eA list of classes to use as interceptors. Implementing the \u003ccode\u003eorg.apache.kafka.clients.producer.ProducerInterceptor\u003c/code\u003e interface allows you to intercept (and possibly mutate) the records received by the producer before they are published to the Kafka cluster. By default, there are no interceptors.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"max.in.flight.requests.per.connection","Description":"\n\u003cp\u003eThe maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled).\u003c/p\u003e\n","Type":"int","Default":"5","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"metadata.max.age.ms","Description":"\n\u003cp\u003eThe period of time in milliseconds after which we force a refresh of metadata even if we haven\u0026#39;t seen any partition leadership changes to proactively discover any new brokers or partitions.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"metadata.max.idle.ms","Description":"\n\u003cp\u003eControls how long the producer will cache metadata for a topic that\u0026#39;s idle. If the elapsed time since a topic was last produced to exceeds the metadata idle duration, then the topic\u0026#39;s metadata is forgotten and the next access to it will force a metadata fetch request.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[5000,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"metric.reporters","Description":"\n\u003cp\u003eA list of classes to use as metrics reporters. Implementing the \u003ccode\u003eorg.apache.kafka.common.metrics.MetricsReporter\u003c/code\u003e interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"metrics.num.samples","Description":"\n\u003cp\u003eThe number of samples maintained to compute metrics.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"metrics.recording.level","Description":"\n\u003cp\u003eThe highest recording level for metrics.\u003c/p\u003e\n","Type":"string","Default":"INFO","ValidValues":"[INFO, DEBUG]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"metrics.sample.window.ms","Description":"\n\u003cp\u003eThe window of time a metrics sample is computed over.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"reconnect.backoff.max.ms","Description":"\n\u003cp\u003eThe maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.\u003c/p\u003e\n","Type":"long","Default":"1000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"reconnect.backoff.ms","Description":"\n\u003cp\u003eThe base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.\u003c/p\u003e\n","Type":"long","Default":"50","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"retry.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\u003c/p\u003e\n","Type":"long","Default":"100","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.kerberos.kinit.cmd","Description":"\n\u003cp\u003eKerberos kinit command path.\u003c/p\u003e\n","Type":"string","Default":"/usr/bin/kinit","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.kerberos.min.time.before.relogin","Description":"\n\u003cp\u003eLogin thread sleep time between refresh attempts.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.kerberos.ticket.renew.jitter","Description":"\n\u003cp\u003ePercentage of random jitter added to the renewal time.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.kerberos.ticket.renew.window.factor","Description":"\n\u003cp\u003eLogin thread will sleep until the specified window factor of time from last refresh to ticket\u0026#39;s expiry has been reached, at which time it will try to renew the ticket.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.login.refresh.buffer.seconds","Description":"\n\u003cp\u003eThe amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"300","ValidValues":"[0,...,3600]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.login.refresh.min.period.seconds","Description":"\n\u003cp\u003eThe desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"60","ValidValues":"[0,...,900]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.login.refresh.window.factor","Description":"\n\u003cp\u003eLogin refresh thread will sleep until the specified window factor relative to the credential\u0026#39;s lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"[0.5,...,1.0]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"sasl.login.refresh.window.jitter","Description":"\n\u003cp\u003eThe maximum amount of random jitter relative to the credential\u0026#39;s lifetime that is added to the login refresh thread\u0026#39;s sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"[0.0,...,0.25]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"security.providers","Description":"\n\u003cp\u003eA list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the \u003ccode\u003eorg.apache.kafka.common.security.auth.SecurityProviderCreator\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.cipher.suites","Description":"\n\u003cp\u003eA list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.endpoint.identification.algorithm","Description":"\n\u003cp\u003eThe endpoint identification algorithm to validate server hostname using server certificate. \u003c/p\u003e\n","Type":"string","Default":"https","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.keymanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"SunX509","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.secure.random.implementation","Description":"\n\u003cp\u003eThe SecureRandom PRNG implementation to use for SSL cryptography operations. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"ssl.trustmanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"PKIX","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"transaction.timeout.ms","Description":"\n\u003cp\u003eThe maximum amount of time in ms that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction.If this value is larger than the transaction.max.timeout.ms setting in the broker, the request will fail with a \u003ccode\u003eInvalidTransactionTimeout\u003c/code\u003e error.\u003c/p\u003e\n","Type":"int","Default":"60000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"producer","Name":"transactional.id","Description":"\n\u003cp\u003eThe TransactionalId to use for transactional delivery. This enables reliability semantics which span multiple producer sessions since it allows the client to guarantee that transactions using the same TransactionalId have been completed prior to starting any new transactions. If no TransactionalId is provided, then the producer is limited to idempotent delivery. Note that \u003ccode\u003eenable.idempotence\u003c/code\u003e must be enabled if a TransactionalId is configured. The default is \u003ccode\u003enull\u003c/code\u003e, which means transactions cannot be used. Note that, by default, transactions require a cluster of at least three brokers which is the recommended setting for production; for development you can change this, by adjusting broker setting \u003ccode\u003etransaction.state.log.replication.factor\u003c/code\u003e.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"non-empty string","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"key.deserializer","Description":"\n\u003cp\u003eDeserializer class for key that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Deserializer\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"value.deserializer","Description":"\n\u003cp\u003eDeserializer class for value that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Deserializer\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"bootstrap.servers","Description":"\n\u003cp\u003eA list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form \u003ccode\u003ehost1:port1,host2:port2,...\u003c/code\u003e. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"fetch.min.bytes","Description":"\n\u003cp\u003eThe minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of 1 byte means that fetch requests are answered as soon as a single byte of data is available or the fetch request times out waiting for data to arrive. Setting this to something greater than 1 will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"group.id","Description":"\n\u003cp\u003eA unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using \u003ccode\u003esubscribe(topic)\u003c/code\u003e or the Kafka-based offset management strategy.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"heartbeat.interval.ms","Description":"\n\u003cp\u003eThe expected time between heartbeats to the consumer coordinator when using Kafka\u0026#39;s group management facilities. Heartbeats are used to ensure that the consumer\u0026#39;s session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than \u003ccode\u003esession.timeout.ms\u003c/code\u003e, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.\u003c/p\u003e\n","Type":"int","Default":"3000","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"max.partition.fetch.bytes","Description":"\n\u003cp\u003eThe maximum amount of data per-partition the server will return. Records are fetched in batches by the consumer. If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. The maximum record batch size accepted by the broker is defined via \u003ccode\u003emessage.max.bytes\u003c/code\u003e (broker config) or \u003ccode\u003emax.message.bytes\u003c/code\u003e (topic config). See fetch.max.bytes for limiting the consumer request size.\u003c/p\u003e\n","Type":"int","Default":"1048576","ValidValues":"[0,...]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"session.timeout.ms","Description":"\n\u003cp\u003eThe timeout used to detect client failures when using Kafka\u0026#39;s group management facility. The client sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove this client from the group and initiate a rebalance. Note that the value must be in the allowable range as configured in the broker configuration by \u003ccode\u003egroup.min.session.timeout.ms\u003c/code\u003e and \u003ccode\u003egroup.max.session.timeout.ms\u003c/code\u003e.\u003c/p\u003e\n","Type":"int","Default":"10000","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.key.password","Description":"\n\u003cp\u003eThe password of the private key in the key store file. This is optional for client.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.keystore.location","Description":"\n\u003cp\u003eThe location of the key store file. This is optional for client and can be used for two-way authentication for client.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.keystore.password","Description":"\n\u003cp\u003eThe store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured. \u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.truststore.location","Description":"\n\u003cp\u003eThe location of the trust store file. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.truststore.password","Description":"\n\u003cp\u003eThe password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"allow.auto.create.topics","Description":"\n\u003cp\u003eAllow automatic topic creation on the broker when subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the broker allows for it using `auto.create.topics.enable` broker configuration. This configuration must be set to `false` when using brokers older than 0.11.0\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"auto.offset.reset","Description":"\n\u003cp\u003eWhat to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted): \u003c/p\u003e\u003cul\u003e\u003cli\u003eearliest: automatically reset the offset to the earliest offset\u003c/li\u003e\u003cli\u003elatest: automatically reset the offset to the latest offset\u003c/li\u003e\u003cli\u003enone: throw exception to the consumer if no previous offset is found for the consumer\u0026#39;s group\u003c/li\u003e\u003cli\u003eanything else: throw exception to the consumer.\u003c/li\u003e\u003c/ul\u003e\u003cp\u003e\u003c/p\u003e\n","Type":"string","Default":"latest","ValidValues":"[latest, earliest, none]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"client.dns.lookup","Description":"\n\u003cp\u003eControls how the client uses DNS lookups. If set to \u003ccode\u003euse_all_dns_ips\u003c/code\u003e then, when the lookup returns multiple IP addresses for a hostname, they will all be attempted to connect to before failing the connection. Applies to both bootstrap and advertised servers. If the value is \u003ccode\u003eresolve_canonical_bootstrap_servers_only\u003c/code\u003e each entry will be resolved and expanded into a list of canonical names.\u003c/p\u003e\n","Type":"string","Default":"default","ValidValues":"[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"connections.max.idle.ms","Description":"\n\u003cp\u003eClose idle connections after the number of milliseconds specified by this config.\u003c/p\u003e\n","Type":"long","Default":"540000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"default.api.timeout.ms","Description":"\n\u003cp\u003eSpecifies the timeout (in milliseconds) for client APIs. This configuration is used as the default timeout for all client operations that do not specify a \u003ccode\u003etimeout\u003c/code\u003e parameter.\u003c/p\u003e\n","Type":"int","Default":"60000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"enable.auto.commit","Description":"\n\u003cp\u003eIf true the consumer\u0026#39;s offset will be periodically committed in the background.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"exclude.internal.topics","Description":"\n\u003cp\u003eWhether internal topics matching a subscribed pattern should be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"fetch.max.bytes","Description":"\n\u003cp\u003eThe maximum amount of data the server should return for a fetch request. Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. The maximum record batch size accepted by the broker is defined via \u003ccode\u003emessage.max.bytes\u003c/code\u003e (broker config) or \u003ccode\u003emax.message.bytes\u003c/code\u003e (topic config). Note that the consumer performs multiple fetches in parallel.\u003c/p\u003e\n","Type":"int","Default":"52428800","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"group.instance.id","Description":"\n\u003cp\u003eA unique identifier of the consumer instance provided by the end user. Only non-empty strings are permitted. If set, the consumer is treated as a static member, which means that only one instance with this ID is allowed in the consumer group at any time. This can be used in combination with a larger session timeout to avoid group rebalances caused by transient unavailability (e.g. process restarts). If not set, the consumer will join the group as a dynamic member, which is the traditional behavior.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"isolation.level","Description":"\n\u003cp\u003eControls how to read messages written transactionally. If set to \u003ccode\u003eread_committed\u003c/code\u003e, consumer.poll() will only return transactional messages which have been committed. If set to \u003ccode\u003eread_uncommitted\u003c/code\u003e (the default), consumer.poll() will return all messages, even transactional messages which have been aborted. Non-transactional messages will be returned unconditionally in either mode. \u003c/p\u003e\u003cp\u003eMessages will always be returned in offset order. Hence, in \u003ccode\u003eread_committed\u003c/code\u003e mode, consumer.poll() will only return messages up to the last stable offset (LSO), which is the one less than the offset of the first open transaction. In particular any messages appearing after messages belonging to ongoing transactions will be withheld until the relevant transaction has been completed. As a result, \u003ccode\u003eread_committed\u003c/code\u003e consumers will not be able to read up to the high watermark when there are in flight transactions.\u003c/p\u003e\u003cp\u003e Further, when in \u003ccode\u003eread_committed\u003c/code\u003e the seekToEnd method will return the LSO\u003c/p\u003e\n","Type":"string","Default":"read_uncommitted","ValidValues":"[read_committed, read_uncommitted]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"max.poll.interval.ms","Description":"\n\u003cp\u003eThe maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. For consumers using a non-null \u003ccode\u003egroup.instance.id\u003c/code\u003e which reach this timeout, partitions will not be immediately reassigned. Instead, the consumer will stop sending heartbeats and partitions will be reassigned after expiration of \u003ccode\u003esession.timeout.ms\u003c/code\u003e. This mirrors the behavior of a static consumer which has shutdown.\u003c/p\u003e\n","Type":"int","Default":"300000","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"max.poll.records","Description":"\n\u003cp\u003eThe maximum number of records returned in a single call to poll().\u003c/p\u003e\n","Type":"int","Default":"500","ValidValues":"[1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"partition.assignment.strategy","Description":"\n\u003cp\u003eA list of class names or class types, ordered by preference, of supported assignors responsible for the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used. Implementing the \u003ccode\u003eorg.apache.kafka.clients.consumer.ConsumerPartitionAssignor\u003c/code\u003e interface allows you to plug in a custom assignment strategy.\u003c/p\u003e\n","Type":"list","Default":"class org.apache.kafka.clients.consumer.RangeAssignor","ValidValues":"non-null string","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"receive.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"65536","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"request.timeout.ms","Description":"\n\u003cp\u003eThe configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\u003c/p\u003e\n","Type":"int","Default":"30000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.client.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.jaas.config","Description":"\n\u003cp\u003eJAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described \u003ca href=\"http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\"\u003ehere\u003c/a\u003e. The format for the value is: \u0026#39;\u003ccode\u003eloginModuleClass controlFlag (optionName=optionValue)*;\u003c/code\u003e\u0026#39;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.kerberos.service.name","Description":"\n\u003cp\u003eThe Kerberos principal name that Kafka runs as. This can be defined either in Kafka\u0026#39;s JAAS config or in Kafka\u0026#39;s config.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.login.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.login.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.mechanism","Description":"\n\u003cp\u003eSASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.\u003c/p\u003e\n","Type":"string","Default":"GSSAPI","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"security.protocol","Description":"\n\u003cp\u003eProtocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.\u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"send.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"131072","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.enabled.protocols","Description":"\n\u003cp\u003eThe list of protocols enabled for SSL connections.\u003c/p\u003e\n","Type":"list","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.keystore.type","Description":"\n\u003cp\u003eThe file format of the key store file. This is optional for client.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.protocol","Description":"\n\u003cp\u003eThe SSL protocol used to generate the SSLContext. Default setting is TLSv1.2, which is fine for most cases. Allowed values in recent JVMs are TLSv1.2 and TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.\u003c/p\u003e\n","Type":"string","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.provider","Description":"\n\u003cp\u003eThe name of the security provider used for SSL connections. Default value is the default security provider of the JVM.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.truststore.type","Description":"\n\u003cp\u003eThe file format of the trust store file.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"auto.commit.interval.ms","Description":"\n\u003cp\u003eThe frequency in milliseconds that the consumer offsets are auto-committed to Kafka if \u003ccode\u003eenable.auto.commit\u003c/code\u003e is set to \u003ccode\u003etrue\u003c/code\u003e.\u003c/p\u003e\n","Type":"int","Default":"5000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"check.crcs","Description":"\n\u003cp\u003eAutomatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"client.id","Description":"\n\u003cp\u003eAn id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"client.rack","Description":"\n\u003cp\u003eA rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config \u0026#39;broker.rack\u0026#39;\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"fetch.max.wait.ms","Description":"\n\u003cp\u003eThe maximum amount of time the server will block before answering the fetch request if there isn\u0026#39;t sufficient data to immediately satisfy the requirement given by fetch.min.bytes.\u003c/p\u003e\n","Type":"int","Default":"500","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"interceptor.classes","Description":"\n\u003cp\u003eA list of classes to use as interceptors. Implementing the \u003ccode\u003eorg.apache.kafka.clients.consumer.ConsumerInterceptor\u003c/code\u003e interface allows you to intercept (and possibly mutate) records received by the consumer. By default, there are no interceptors.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"metadata.max.age.ms","Description":"\n\u003cp\u003eThe period of time in milliseconds after which we force a refresh of metadata even if we haven\u0026#39;t seen any partition leadership changes to proactively discover any new brokers or partitions.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"metric.reporters","Description":"\n\u003cp\u003eA list of classes to use as metrics reporters. Implementing the \u003ccode\u003eorg.apache.kafka.common.metrics.MetricsReporter\u003c/code\u003e interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"metrics.num.samples","Description":"\n\u003cp\u003eThe number of samples maintained to compute metrics.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"metrics.recording.level","Description":"\n\u003cp\u003eThe highest recording level for metrics.\u003c/p\u003e\n","Type":"string","Default":"INFO","ValidValues":"[INFO, DEBUG]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"metrics.sample.window.ms","Description":"\n\u003cp\u003eThe window of time a metrics sample is computed over.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"reconnect.backoff.max.ms","Description":"\n\u003cp\u003eThe maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.\u003c/p\u003e\n","Type":"long","Default":"1000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"reconnect.backoff.ms","Description":"\n\u003cp\u003eThe base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.\u003c/p\u003e\n","Type":"long","Default":"50","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"retry.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\u003c/p\u003e\n","Type":"long","Default":"100","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.kerberos.kinit.cmd","Description":"\n\u003cp\u003eKerberos kinit command path.\u003c/p\u003e\n","Type":"string","Default":"/usr/bin/kinit","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.kerberos.min.time.before.relogin","Description":"\n\u003cp\u003eLogin thread sleep time between refresh attempts.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.kerberos.ticket.renew.jitter","Description":"\n\u003cp\u003ePercentage of random jitter added to the renewal time.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.kerberos.ticket.renew.window.factor","Description":"\n\u003cp\u003eLogin thread will sleep until the specified window factor of time from last refresh to ticket\u0026#39;s expiry has been reached, at which time it will try to renew the ticket.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.login.refresh.buffer.seconds","Description":"\n\u003cp\u003eThe amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"300","ValidValues":"[0,...,3600]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.login.refresh.min.period.seconds","Description":"\n\u003cp\u003eThe desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"60","ValidValues":"[0,...,900]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.login.refresh.window.factor","Description":"\n\u003cp\u003eLogin refresh thread will sleep until the specified window factor relative to the credential\u0026#39;s lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"[0.5,...,1.0]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"sasl.login.refresh.window.jitter","Description":"\n\u003cp\u003eThe maximum amount of random jitter relative to the credential\u0026#39;s lifetime that is added to the login refresh thread\u0026#39;s sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"[0.0,...,0.25]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"security.providers","Description":"\n\u003cp\u003eA list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the \u003ccode\u003eorg.apache.kafka.common.security.auth.SecurityProviderCreator\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.cipher.suites","Description":"\n\u003cp\u003eA list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.endpoint.identification.algorithm","Description":"\n\u003cp\u003eThe endpoint identification algorithm to validate server hostname using server certificate. \u003c/p\u003e\n","Type":"string","Default":"https","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.keymanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"SunX509","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.secure.random.implementation","Description":"\n\u003cp\u003eThe SecureRandom PRNG implementation to use for SSL cryptography operations. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"consumer","Name":"ssl.trustmanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"PKIX","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"config.storage.topic","Description":"\n\u003cp\u003eThe name of the Kafka topic where connector configurations are stored\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"group.id","Description":"\n\u003cp\u003eA unique string that identifies the Connect cluster group this worker belongs to.\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"key.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.\u003c/p\u003e\n","Type":"class","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"offset.storage.topic","Description":"\n\u003cp\u003eThe name of the Kafka topic where connector offsets are stored\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"status.storage.topic","Description":"\n\u003cp\u003eThe name of the Kafka topic where connector and task status are stored\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"value.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.\u003c/p\u003e\n","Type":"class","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"bootstrap.servers","Description":"\n\u003cp\u003eA list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form \u003ccode\u003ehost1:port1,host2:port2,...\u003c/code\u003e. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\u003c/p\u003e\n","Type":"list","Default":"localhost:9092","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"heartbeat.interval.ms","Description":"\n\u003cp\u003eThe expected time between heartbeats to the group coordinator when using Kafka\u0026#39;s group management facilities. Heartbeats are used to ensure that the worker\u0026#39;s session stays active and to facilitate rebalancing when new members join or leave the group. The value must be set lower than \u003ccode\u003esession.timeout.ms\u003c/code\u003e, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.\u003c/p\u003e\n","Type":"int","Default":"3000","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rebalance.timeout.ms","Description":"\n\u003cp\u003eThe maximum allowed time for each worker to join the group once a rebalance has begun. This is basically a limit on the amount of time needed for all tasks to flush any pending data and commit offsets. If the timeout is exceeded, then the worker will be removed from the group, which will cause offset commit failures.\u003c/p\u003e\n","Type":"int","Default":"60000","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"session.timeout.ms","Description":"\n\u003cp\u003eThe timeout used to detect worker failures. The worker sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove the worker from the group and initiate a rebalance. Note that the value must be in the allowable range as configured in the broker configuration by \u003ccode\u003egroup.min.session.timeout.ms\u003c/code\u003e and \u003ccode\u003egroup.max.session.timeout.ms\u003c/code\u003e.\u003c/p\u003e\n","Type":"int","Default":"10000","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.key.password","Description":"\n\u003cp\u003eThe password of the private key in the key store file. This is optional for client.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.keystore.location","Description":"\n\u003cp\u003eThe location of the key store file. This is optional for client and can be used for two-way authentication for client.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.keystore.password","Description":"\n\u003cp\u003eThe store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured. \u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.truststore.location","Description":"\n\u003cp\u003eThe location of the trust store file. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.truststore.password","Description":"\n\u003cp\u003eThe password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"client.dns.lookup","Description":"\n\u003cp\u003eControls how the client uses DNS lookups. If set to \u003ccode\u003euse_all_dns_ips\u003c/code\u003e then, when the lookup returns multiple IP addresses for a hostname, they will all be attempted to connect to before failing the connection. Applies to both bootstrap and advertised servers. If the value is \u003ccode\u003eresolve_canonical_bootstrap_servers_only\u003c/code\u003e each entry will be resolved and expanded into a list of canonical names.\u003c/p\u003e\n","Type":"string","Default":"default","ValidValues":"[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"connections.max.idle.ms","Description":"\n\u003cp\u003eClose idle connections after the number of milliseconds specified by this config.\u003c/p\u003e\n","Type":"long","Default":"540000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"connector.client.config.override.policy","Description":"\n\u003cp\u003eClass name or alias of implementation of \u003ccode\u003eConnectorClientConfigOverridePolicy\u003c/code\u003e. Defines what client configurations can be overriden by the connector. The default implementation is `None`. The other possible policies in the framework include `All` and `Principal`. \u003c/p\u003e\n","Type":"string","Default":"None","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"receive.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"32768","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"request.timeout.ms","Description":"\n\u003cp\u003eThe configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\u003c/p\u003e\n","Type":"int","Default":"40000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.client.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.jaas.config","Description":"\n\u003cp\u003eJAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described \u003ca href=\"http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\"\u003e\u003c/a\u003e\n here. The format for the value is: \u0026#39;\u003ccode\u003eloginModuleClass controlFlag (optionName=optionValue)*;\u003c/code\u003e\u0026#39;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.kerberos.service.name","Description":"\n\u003cp\u003eThe Kerberos principal name that Kafka runs as. This can be defined either in Kafka\u0026#39;s JAAS config or in Kafka\u0026#39;s config.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.login.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.login.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.mechanism","Description":"\n\u003cp\u003eSASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.\u003c/p\u003e\n","Type":"string","Default":"GSSAPI","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"security.protocol","Description":"\n\u003cp\u003eProtocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.\u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"send.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"131072","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.enabled.protocols","Description":"\n\u003cp\u003eThe list of protocols enabled for SSL connections.\u003c/p\u003e\n","Type":"list","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.keystore.type","Description":"\n\u003cp\u003eThe file format of the key store file. This is optional for client.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.protocol","Description":"\n\u003cp\u003eThe SSL protocol used to generate the SSLContext. Default setting is TLSv1.2, which is fine for most cases. Allowed values in recent JVMs are TLSv1.2 and TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.\u003c/p\u003e\n","Type":"string","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.provider","Description":"\n\u003cp\u003eThe name of the security provider used for SSL connections. Default value is the default security provider of the JVM.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.truststore.type","Description":"\n\u003cp\u003eThe file format of the trust store file.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"worker.sync.timeout.ms","Description":"\n\u003cp\u003eWhen the worker is out of sync with other workers and needs to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and waiting a backoff period before rejoining.\u003c/p\u003e\n","Type":"int","Default":"3000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"worker.unsync.backoff.ms","Description":"\n\u003cp\u003eWhen the worker is out of sync with other workers and fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining.\u003c/p\u003e\n","Type":"int","Default":"300000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"access.control.allow.methods","Description":"\n\u003cp\u003eSets the methods supported for cross origin requests by setting the Access-Control-Allow-Methods header. The default value of the Access-Control-Allow-Methods header allows cross origin requests for GET, POST and HEAD.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"access.control.allow.origin","Description":"\n\u003cp\u003eValue to set the Access-Control-Allow-Origin header to for REST API requests.To enable cross origin access, set this to the domain of the application that should be permitted to access the API, or \u0026#39;*\u0026#39; to allow access from any domain. The default value only allows access from the domain of the REST API.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"admin.listeners","Description":"\n\u003cp\u003eList of comma-separated URIs the Admin REST API will listen on. The supported protocols are HTTP and HTTPS. An empty or blank string will disable this feature. The default behavior is to use the regular listener (specified by the \u0026#39;listeners\u0026#39; property).\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"org.apache.kafka.connect.runtime.WorkerConfig$AdminListenersValidator@36d4b5c","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"client.id","Description":"\n\u003cp\u003eAn id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"config.providers","Description":"\n\u003cp\u003eComma-separated names of \u003ccode\u003eConfigProvider\u003c/code\u003e classes, loaded and used in the order specified. Implementing the interface \u003ccode\u003eConfigProvider\u003c/code\u003e allows you to replace variable references in connector configurations, such as for externalized secrets. \u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"config.storage.replication.factor","Description":"\n\u003cp\u003eReplication factor used when creating the configuration storage topic\u003c/p\u003e\n","Type":"short","Default":"3","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"connect.protocol","Description":"\n\u003cp\u003eCompatibility mode for Kafka Connect Protocol\u003c/p\u003e\n","Type":"string","Default":"sessioned","ValidValues":"[eager, compatible, sessioned]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"header.converter","Description":"\n\u003cp\u003eHeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.connect.storage.SimpleHeaderConverter","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"inter.worker.key.generation.algorithm","Description":"\n\u003cp\u003eThe algorithm to use for generating internal request keys\u003c/p\u003e\n","Type":"string","Default":"HmacSHA256","ValidValues":"Any KeyGenerator algorithm supported by the worker JVM","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"inter.worker.key.size","Description":"\n\u003cp\u003eThe size of the key to use for signing internal requests, in bits. If null, the default key size for the key generation algorithm will be used.\u003c/p\u003e\n","Type":"int","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"inter.worker.key.ttl.ms","Description":"\n\u003cp\u003eThe TTL of generated session keys used for internal request validation (in milliseconds)\u003c/p\u003e\n","Type":"int","Default":"3600000","ValidValues":"[0,...,2147483647]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"inter.worker.signature.algorithm","Description":"\n\u003cp\u003eThe algorithm used to sign internal requests\u003c/p\u003e\n","Type":"string","Default":"HmacSHA256","ValidValues":"Any MAC algorithm supported by the worker JVM","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"inter.worker.verification.algorithms","Description":"\n\u003cp\u003eA list of permitted algorithms for verifying internal requests\u003c/p\u003e\n","Type":"list","Default":"HmacSHA256","ValidValues":"A list of one or more MAC algorithms, each supported by the worker JVM","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"internal.key.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. This setting controls the format used for internal bookkeeping data used by the framework, such as configs and offsets, so users can typically use any functioning Converter implementation. Deprecated; will be removed in an upcoming version.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.connect.json.JsonConverter","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"internal.value.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. This setting controls the format used for internal bookkeeping data used by the framework, such as configs and offsets, so users can typically use any functioning Converter implementation. Deprecated; will be removed in an upcoming version.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.connect.json.JsonConverter","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"listeners\n","Description":"\n\u003cp\u003eList of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS.\u003cbr/\u003e Specify hostname as 0.0.0.0 to bind to all interfaces.\u003cbr/\u003e Leave hostname empty to bind to default interface.\u003cbr/\u003e Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"metadata.max.age.ms","Description":"\n\u003cp\u003eThe period of time in milliseconds after which we force a refresh of metadata even if we haven\u0026#39;t seen any partition leadership changes to proactively discover any new brokers or partitions.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"metric.reporters","Description":"\n\u003cp\u003eA list of classes to use as metrics reporters. Implementing the \u003ccode\u003eorg.apache.kafka.common.metrics.MetricsReporter\u003c/code\u003e interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"metrics.num.samples","Description":"\n\u003cp\u003eThe number of samples maintained to compute metrics.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"metrics.recording.level","Description":"\n\u003cp\u003eThe highest recording level for metrics.\u003c/p\u003e\n","Type":"string","Default":"INFO","ValidValues":"[INFO, DEBUG]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"metrics.sample.window.ms","Description":"\n\u003cp\u003eThe window of time a metrics sample is computed over.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"offset.flush.interval.ms","Description":"\n\u003cp\u003eInterval at which to try committing offsets for tasks.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"offset.flush.timeout.ms","Description":"\n\u003cp\u003eMaximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt.\u003c/p\u003e\n","Type":"long","Default":"5000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"offset.storage.partitions","Description":"\n\u003cp\u003eThe number of partitions used when creating the offset storage topic\u003c/p\u003e\n","Type":"int","Default":"25","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"offset.storage.replication.factor","Description":"\n\u003cp\u003eReplication factor used when creating the offset storage topic\u003c/p\u003e\n","Type":"short","Default":"3","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"plugin.path","Description":"\n\u003cp\u003eList of paths separated by commas (,) that contain plugins (connectors, converters, transformations). The list should consist of top level directories that include any combination of: \u003cbr/\u003ea) directories immediately containing jars with plugins and their dependencies\u003cbr/\u003eb) uber-jars with plugins and their dependencies\u003cbr/\u003ec) directories immediately containing the package directory structure of classes of plugins and their dependencies\u003cbr/\u003eNote: symlinks will be followed to discover dependencies or plugins.\u003cbr/\u003eExamples: plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"reconnect.backoff.max.ms","Description":"\n\u003cp\u003eThe maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.\u003c/p\u003e\n","Type":"long","Default":"1000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"reconnect.backoff.ms","Description":"\n\u003cp\u003eThe base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.\u003c/p\u003e\n","Type":"long","Default":"50","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rest.advertised.host.name","Description":"\n\u003cp\u003eIf this is set, this is the hostname that will be given out to other workers to connect to.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rest.advertised.listener","Description":"\n\u003cp\u003eSets the advertised listener (HTTP or HTTPS) which will be given to other workers to use.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rest.advertised.port","Description":"\n\u003cp\u003eIf this is set, this is the port that will be given out to other workers to connect to.\u003c/p\u003e\n","Type":"int","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rest.extension.classes","Description":"\n\u003cp\u003eComma-separated names of \u003ccode\u003eConnectRestExtension\u003c/code\u003e classes, loaded and called in the order specified. Implementing the interface \u003ccode\u003eConnectRestExtension\u003c/code\u003e allows you to inject into Connect\u0026#39;s REST API user defined resources like filters. Typically used to add custom capability like logging, security, etc. \u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rest.host.name","Description":"\n\u003cp\u003eHostname for the REST API. If this is set, it will only bind to this interface.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"rest.port","Description":"\n\u003cp\u003ePort for the REST API to listen on.\u003c/p\u003e\n","Type":"int","Default":"8083","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"retry.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\u003c/p\u003e\n","Type":"long","Default":"100","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.kerberos.kinit.cmd","Description":"\n\u003cp\u003eKerberos kinit command path.\u003c/p\u003e\n","Type":"string","Default":"/usr/bin/kinit","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.kerberos.min.time.before.relogin","Description":"\n\u003cp\u003eLogin thread sleep time between refresh attempts.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.kerberos.ticket.renew.jitter","Description":"\n\u003cp\u003ePercentage of random jitter added to the renewal time.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.kerberos.ticket.renew.window.factor","Description":"\n\u003cp\u003eLogin thread will sleep until the specified window factor of time from last refresh to ticket\u0026#39;s expiry has been reached, at which time it will try to renew the ticket.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.login.refresh.buffer.seconds","Description":"\n\u003cp\u003eThe amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"300","ValidValues":"[0,...,3600]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.login.refresh.min.period.seconds","Description":"\n\u003cp\u003eThe desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"60","ValidValues":"[0,...,900]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.login.refresh.window.factor","Description":"\n\u003cp\u003eLogin refresh thread will sleep until the specified window factor relative to the credential\u0026#39;s lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"[0.5,...,1.0]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"sasl.login.refresh.window.jitter","Description":"\n\u003cp\u003eThe maximum amount of random jitter relative to the credential\u0026#39;s lifetime that is added to the login refresh thread\u0026#39;s sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"[0.0,...,0.25]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"scheduled.rebalance.max.delay.ms","Description":"\n\u003cp\u003eThe maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned\u003c/p\u003e\n","Type":"int","Default":"300000","ValidValues":"[0,...,2147483647]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.cipher.suites","Description":"\n\u003cp\u003eA list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.client.auth","Description":"\n\u003cp\u003eConfigures kafka broker to request client authentication. The following settings are common: \u003c/p\u003e\u003cul\u003e \u003cli\u003e\u003ccode\u003essl.client.auth=required\u003c/code\u003e If set to required client authentication is required. \u003c/li\u003e\u003cli\u003e\u003ccode\u003essl.client.auth=requested\u003c/code\u003e This means client authentication is optional. unlike requested , if this option is set client can choose not to provide authentication information about itself \u003c/li\u003e\u003cli\u003e\u003ccode\u003essl.client.auth=none\u003c/code\u003e This means client authentication is not needed.\u003c/li\u003e\u003c/ul\u003e\u003cp\u003e\u003c/p\u003e\n","Type":"string","Default":"none","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.endpoint.identification.algorithm","Description":"\n\u003cp\u003eThe endpoint identification algorithm to validate server hostname using server certificate. \u003c/p\u003e\n","Type":"string","Default":"https","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.keymanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"SunX509","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.secure.random.implementation","Description":"\n\u003cp\u003eThe SecureRandom PRNG implementation to use for SSL cryptography operations. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"ssl.trustmanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"PKIX","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"status.storage.partitions","Description":"\n\u003cp\u003eThe number of partitions used when creating the status storage topic\u003c/p\u003e\n","Type":"int","Default":"5","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"status.storage.replication.factor","Description":"\n\u003cp\u003eReplication factor used when creating the status storage topic\u003c/p\u003e\n","Type":"short","Default":"3","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"task.shutdown.graceful.timeout.ms","Description":"\n\u003cp\u003eAmount of time to wait for tasks to shutdown gracefully. This is the total amount of time, not per task. All task have shutdown triggered, then they are waited on sequentially.\u003c/p\u003e\n","Type":"long","Default":"5000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"topic.tracking.allow.reset","Description":"\n\u003cp\u003eIf set to true, it allows user requests to reset the set of active topics per connector.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"topic.tracking.enable","Description":"\n\u003cp\u003eEnable tracking the set of active topics per connector during runtime.\u003c/p\u003e\n","Type":"boolean","Default":"true","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"name","Description":"\n\u003cp\u003eGlobally unique name to use for this connector.\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"non-empty string without ISO control characters","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"connector.class","Description":"\n\u003cp\u003eName or alias of the class for this connector. Must be a subclass of org.apache.kafka.connect.connector.Connector. If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name, or use \u0026#34;FileStreamSink\u0026#34; or \u0026#34;FileStreamSinkConnector\u0026#34; to make the configuration a bit shorter\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"tasks.max","Description":"\n\u003cp\u003eMaximum number of tasks to use for this connector.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"key.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"value.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"header.converter","Description":"\n\u003cp\u003eHeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"config.action.reload","Description":"\n\u003cp\u003eThe action that Connect should take on the connector when changes in external configuration providers result in a change in the connector\u0026#39;s configuration properties. A value of \u0026#39;none\u0026#39; indicates that Connect will do nothing. A value of \u0026#39;restart\u0026#39; indicates that Connect should restart/reload the connector with the updated configuration properties.The restart may actually be scheduled in the future if the external configuration provider indicates that a configuration value will expire in the future.\u003c/p\u003e\n","Type":"string","Default":"restart","ValidValues":"[none, restart]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"transforms","Description":"\n\u003cp\u003eAliases for the transformations to be applied to records.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string, unique transformation aliases","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.retry.timeout","Description":"\n\u003cp\u003eThe maximum duration in milliseconds that a failed operation will be reattempted. The default is 0, which means no retries will be attempted. Use -1 for infinite retries.\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.retry.delay.max.ms","Description":"\n\u003cp\u003eThe maximum duration in milliseconds between consecutive retry attempts. Jitter will be added to the delay once this limit is reached to prevent thundering herd issues.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.tolerance","Description":"\n\u003cp\u003eBehavior for tolerating errors during connector operation. \u0026#39;none\u0026#39; is the default value and signals that any error will result in an immediate connector task failure; \u0026#39;all\u0026#39; changes the behavior to skip over problematic records.\u003c/p\u003e\n","Type":"string","Default":"none","ValidValues":"[none, all]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.log.enable","Description":"\n\u003cp\u003eIf true, write each error and the details of the failed operation and problematic record to the Connect application log. This is \u0026#39;false\u0026#39; by default, so that only errors that are not tolerated are reported.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.log.include.messages","Description":"\n\u003cp\u003eWhether to the include in the log the Connect record that resulted in a failure. This is \u0026#39;false\u0026#39; by default, which will prevent record keys, values, and headers from being written to log files, although some information such as topic and partition number will still be logged.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"name","Description":"\n\u003cp\u003eGlobally unique name to use for this connector.\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"non-empty string without ISO control characters","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"connector.class","Description":"\n\u003cp\u003eName or alias of the class for this connector. Must be a subclass of org.apache.kafka.connect.connector.Connector. If the connector is org.apache.kafka.connect.file.FileStreamSinkConnector, you can either specify this full name, or use \u0026#34;FileStreamSink\u0026#34; or \u0026#34;FileStreamSinkConnector\u0026#34; to make the configuration a bit shorter\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"tasks.max","Description":"\n\u003cp\u003eMaximum number of tasks to use for this connector.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"[1,...]","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"topics","Description":"\n\u003cp\u003eList of topics to consume, separated by commas\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"topics.regex","Description":"\n\u003cp\u003eRegular expression giving topics to consume. Under the hood, the regex is compiled to a \u003ccode\u003ejava.util.regex.Pattern\u003c/code\u003e. Only one of topics or topics.regex should be specified.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"valid regex","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"key.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"value.converter","Description":"\n\u003cp\u003eConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"header.converter","Description":"\n\u003cp\u003eHeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the schemas.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"config.action.reload","Description":"\n\u003cp\u003eThe action that Connect should take on the connector when changes in external configuration providers result in a change in the connector\u0026#39;s configuration properties. A value of \u0026#39;none\u0026#39; indicates that Connect will do nothing. A value of \u0026#39;restart\u0026#39; indicates that Connect should restart/reload the connector with the updated configuration properties.The restart may actually be scheduled in the future if the external configuration provider indicates that a configuration value will expire in the future.\u003c/p\u003e\n","Type":"string","Default":"restart","ValidValues":"[none, restart]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"transforms","Description":"\n\u003cp\u003eAliases for the transformations to be applied to records.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"non-null string, unique transformation aliases","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.retry.timeout","Description":"\n\u003cp\u003eThe maximum duration in milliseconds that a failed operation will be reattempted. The default is 0, which means no retries will be attempted. Use -1 for infinite retries.\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.retry.delay.max.ms","Description":"\n\u003cp\u003eThe maximum duration in milliseconds between consecutive retry attempts. Jitter will be added to the delay once this limit is reached to prevent thundering herd issues.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.tolerance","Description":"\n\u003cp\u003eBehavior for tolerating errors during connector operation. \u0026#39;none\u0026#39; is the default value and signals that any error will result in an immediate connector task failure; \u0026#39;all\u0026#39; changes the behavior to skip over problematic records.\u003c/p\u003e\n","Type":"string","Default":"none","ValidValues":"[none, all]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.log.enable","Description":"\n\u003cp\u003eIf true, write each error and the details of the failed operation and problematic record to the Connect application log. This is \u0026#39;false\u0026#39; by default, so that only errors that are not tolerated are reported.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.log.include.messages","Description":"\n\u003cp\u003eWhether to the include in the log the Connect record that resulted in a failure. This is \u0026#39;false\u0026#39; by default, which will prevent record keys, values, and headers from being written to log files, although some information such as topic and partition number will still be logged.\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.deadletterqueue.topic.name","Description":"\n\u003cp\u003eThe name of the topic to be used as the dead letter queue (DLQ) for messages that result in an error when processed by this sink connector, or its transformations or converters. The topic name is blank by default, which means that no messages are to be recorded in the DLQ.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.deadletterqueue.topic.replication.factor","Description":"\n\u003cp\u003eReplication factor used to create the dead letter queue topic when it doesn\u0026#39;t already exist.\u003c/p\u003e\n","Type":"short","Default":"3","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"connect","Name":"errors.deadletterqueue.context.headers.enable","Description":"\n\u003cp\u003eIf true, add headers containing error context to the messages written to the dead letter queue. To avoid clashing with headers from the original record, all error context header keys, all error context header keys will start with \u003ccode\u003e__connect.errors.\u003c/code\u003e\u003c/p\u003e\n","Type":"boolean","Default":"false","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"application.id","Description":"\n\u003cp\u003eAn identifier for the stream processing application. Must be unique within the Kafka cluster. It is used as 1) the default client-id prefix, 2) the group-id for membership management, 3) the changelog topic prefix.\u003c/p\u003e\n","Type":"string","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"bootstrap.servers","Description":"\n\u003cp\u003eA list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form \u003ccode\u003ehost1:port1,host2:port2,...\u003c/code\u003e. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\u003c/p\u003e\n","Type":"list","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"replication.factor","Description":"\n\u003cp\u003eThe replication factor for change log topics and repartition topics created by the stream processing application.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"state.dir","Description":"\n\u003cp\u003eDirectory location for state store. This path must be unique for each streams instance sharing the same underlying filesystem.\u003c/p\u003e\n","Type":"string","Default":"/tmp/kafka-streams","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"cache.max.bytes.buffering","Description":"\n\u003cp\u003eMaximum number of memory bytes to be used for buffering across all threads\u003c/p\u003e\n","Type":"long","Default":"10485760","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"client.id","Description":"\n\u003cp\u003eAn ID prefix string used for the client IDs of internal consumer, producer and restore-consumer, with pattern \u0026#39;\u003cclient.id\u003e-StreamThread-\u003cthreadsequencenumber\u003e-\u003cconsumer|producer|restore-consumer\u003e\u0026#39;.\u003c/consumer|producer|restore-consumer\u003e\u003c/threadsequencenumber\u003e\u003c/client.id\u003e\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"default.deserialization.exception.handler","Description":"\n\u003cp\u003eException handling class that implements the \u003ccode\u003eorg.apache.kafka.streams.errors.DeserializationExceptionHandler\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.streams.errors.LogAndFailExceptionHandler","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"default.key.serde","Description":"\n\u003cp\u003e Default serializer / deserializer class for key that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Serde\u003c/code\u003e interface. Note when windowed serde class is used, one needs to set the inner serde class that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Serde\u003c/code\u003e interface via \u0026#39;default.windowed.key.serde.inner\u0026#39; or \u0026#39;default.windowed.value.serde.inner\u0026#39; as well\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.common.serialization.Serdes$ByteArraySerde","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"default.production.exception.handler","Description":"\n\u003cp\u003eException handling class that implements the \u003ccode\u003eorg.apache.kafka.streams.errors.ProductionExceptionHandler\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.streams.errors.DefaultProductionExceptionHandler","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"default.timestamp.extractor","Description":"\n\u003cp\u003eDefault timestamp extractor class that implements the \u003ccode\u003eorg.apache.kafka.streams.processor.TimestampExtractor\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.streams.processor.FailOnInvalidTimestamp","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"default.value.serde","Description":"\n\u003cp\u003eDefault serializer / deserializer class for value that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Serde\u003c/code\u003e interface. Note when windowed serde class is used, one needs to set the inner serde class that implements the \u003ccode\u003eorg.apache.kafka.common.serialization.Serde\u003c/code\u003e interface via \u0026#39;default.windowed.key.serde.inner\u0026#39; or \u0026#39;default.windowed.value.serde.inner\u0026#39; as well\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.common.serialization.Serdes$ByteArraySerde","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"max.task.idle.ms","Description":"\n\u003cp\u003eMaximum amount of time a stream task will stay idle when not all of its partition buffers contain records, to avoid potential out-of-order record processing across multiple input streams.\u003c/p\u003e\n","Type":"long","Default":"0","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"num.standby.replicas","Description":"\n\u003cp\u003eThe number of standby replicas for each task.\u003c/p\u003e\n","Type":"int","Default":"0","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"num.stream.threads","Description":"\n\u003cp\u003eThe number of threads to execute stream processing.\u003c/p\u003e\n","Type":"int","Default":"1","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"processing.guarantee","Description":"\n\u003cp\u003eThe processing guarantee that should be used. Possible values are \u003ccode\u003eat_least_once\u003c/code\u003e (default) and \u003ccode\u003eexactly_once\u003c/code\u003e. Note that exactly-once processing requires a cluster of at least three brokers by default what is the recommended setting for production; for development you can change this, by adjusting broker setting \u003ccode\u003etransaction.state.log.replication.factor\u003c/code\u003e and \u003ccode\u003etransaction.state.log.min.isr\u003c/code\u003e.\u003c/p\u003e\n","Type":"string","Default":"at_least_once","ValidValues":"[at_least_once, exactly_once]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"security.protocol","Description":"\n\u003cp\u003eProtocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.\u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"topology.optimization","Description":"\n\u003cp\u003eA configuration telling Kafka Streams if it should optimize the topology, disabled by default\u003c/p\u003e\n","Type":"string","Default":"none","ValidValues":"[none, all]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"application.server","Description":"\n\u003cp\u003eA host:port pair pointing to a user-defined endpoint that can be used for state store discovery and interactive queries on this KafkaStreams instance.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"buffered.records.per.partition","Description":"\n\u003cp\u003eMaximum number of records to buffer per partition.\u003c/p\u003e\n","Type":"int","Default":"1000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"built.in.metrics.version","Description":"\n\u003cp\u003eVersion of the built-in metrics to use.\u003c/p\u003e\n","Type":"string","Default":"latest","ValidValues":"[0.10.0-2.4, latest]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"commit.interval.ms","Description":"\n\u003cp\u003eThe frequency with which to save the position of the processor. (Note, if \u003ccode\u003eprocessing.guarantee\u003c/code\u003e is set to \u003ccode\u003eexactly_once\u003c/code\u003e, the default value is \u003ccode\u003e100\u003c/code\u003e, otherwise the default value is \u003ccode\u003e30000\u003c/code\u003e.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"connections.max.idle.ms","Description":"\n\u003cp\u003eClose idle connections after the number of milliseconds specified by this config.\u003c/p\u003e\n","Type":"long","Default":"540000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"metadata.max.age.ms","Description":"\n\u003cp\u003eThe period of time in milliseconds after which we force a refresh of metadata even if we haven\u0026#39;t seen any partition leadership changes to proactively discover any new brokers or partitions.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"metric.reporters","Description":"\n\u003cp\u003eA list of classes to use as metrics reporters. Implementing the \u003ccode\u003eorg.apache.kafka.common.metrics.MetricsReporter\u003c/code\u003e interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"metrics.num.samples","Description":"\n\u003cp\u003eThe number of samples maintained to compute metrics.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"metrics.recording.level","Description":"\n\u003cp\u003eThe highest recording level for metrics.\u003c/p\u003e\n","Type":"string","Default":"INFO","ValidValues":"[INFO, DEBUG]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"metrics.sample.window.ms","Description":"\n\u003cp\u003eThe window of time a metrics sample is computed over.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"partition.grouper","Description":"\n\u003cp\u003ePartition grouper class that implements the \u003ccode\u003eorg.apache.kafka.streams.processor.PartitionGrouper\u003c/code\u003e interface. WARNING: This config is deprecated and will be removed in 3.0.0 release.\u003c/p\u003e\n","Type":"class","Default":"org.apache.kafka.streams.processor.DefaultPartitionGrouper","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"poll.ms","Description":"\n\u003cp\u003eThe amount of time in milliseconds to block waiting for input.\u003c/p\u003e\n","Type":"long","Default":"100","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"receive.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"32768","ValidValues":"[-1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"reconnect.backoff.max.ms","Description":"\n\u003cp\u003eThe maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.\u003c/p\u003e\n","Type":"long","Default":"1000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"reconnect.backoff.ms","Description":"\n\u003cp\u003eThe base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.\u003c/p\u003e\n","Type":"long","Default":"50","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"request.timeout.ms","Description":"\n\u003cp\u003eThe configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\u003c/p\u003e\n","Type":"int","Default":"40000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"retries\n","Description":"\n\u003cp\u003eSetting a value greater than zero will cause the client to resend any request that fails with a potentially transient error.\u003c/p\u003e\n","Type":"int","Default":"0","ValidValues":"[0,...,2147483647]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"retry.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\u003c/p\u003e\n","Type":"long","Default":"100","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"rocksdb.config.setter","Description":"\n\u003cp\u003eA Rocks DB config setter class or class name that implements the \u003ccode\u003eorg.apache.kafka.streams.state.RocksDBConfigSetter\u003c/code\u003e interface\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"send.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"131072","ValidValues":"[-1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"state.cleanup.delay.ms","Description":"\n\u003cp\u003eThe amount of time in milliseconds to wait before deleting state when a partition has migrated. Only state directories that have not been modified for at least \u003ccode\u003estate.cleanup.delay.ms\u003c/code\u003e will be removed\u003c/p\u003e\n","Type":"long","Default":"600000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"upgrade.from","Description":"\n\u003cp\u003eAllows upgrading in a backward compatible way. This is needed when upgrading from [0.10.0, 1.1] to 2.0+, or when upgrading from [2.0, 2.3] to 2.4+. When upgrading from 2.4 to a newer version it is not required to specify this config. Default is null. Accepted values are \u0026#34;0.10.0\u0026#34;, \u0026#34;0.10.1\u0026#34;, \u0026#34;0.10.2\u0026#34;, \u0026#34;0.11.0\u0026#34;, \u0026#34;1.0\u0026#34;, \u0026#34;1.1\u0026#34;, \u0026#34;2.0\u0026#34;, \u0026#34;2.1\u0026#34;, \u0026#34;2.2\u0026#34;, \u0026#34;2.3\u0026#34; (for upgrading from the corresponding old version).\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"[null, 0.10.0, 0.10.1, 0.10.2, 0.11.0, 1.0, 1.1, 2.0, 2.1, 2.2, 2.3]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"streams","Name":"windowstore.changelog.additional.retention.ms","Description":"\n\u003cp\u003eAdded to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift. Default is 1 day\u003c/p\u003e\n","Type":"long","Default":"86400000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"bootstrap.servers","Description":"\n\u003cp\u003eA list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form \u003ccode\u003ehost1:port1,host2:port2,...\u003c/code\u003e. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\u003c/p\u003e\n","Type":"list","Default":"","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.key.password","Description":"\n\u003cp\u003eThe password of the private key in the key store file. This is optional for client.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.keystore.location","Description":"\n\u003cp\u003eThe location of the key store file. This is optional for client and can be used for two-way authentication for client.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.keystore.password","Description":"\n\u003cp\u003eThe store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured. \u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.truststore.location","Description":"\n\u003cp\u003eThe location of the trust store file. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.truststore.password","Description":"\n\u003cp\u003eThe password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"high","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"client.dns.lookup","Description":"\n\u003cp\u003eControls how the client uses DNS lookups. If set to \u003ccode\u003euse_all_dns_ips\u003c/code\u003e then, when the lookup returns multiple IP addresses for a hostname, they will all be attempted to connect to before failing the connection. Applies to both bootstrap and advertised servers. If the value is \u003ccode\u003eresolve_canonical_bootstrap_servers_only\u003c/code\u003e each entry will be resolved and expanded into a list of canonical names.\u003c/p\u003e\n","Type":"string","Default":"default","ValidValues":"[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"client.id","Description":"\n\u003cp\u003eAn id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\u003c/p\u003e\n","Type":"string","Default":"\"\"","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"connections.max.idle.ms","Description":"\n\u003cp\u003eClose idle connections after the number of milliseconds specified by this config.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"default.api.timeout.ms","Description":"\n\u003cp\u003eSpecifies the timeout (in milliseconds) for client APIs. This configuration is used as the default timeout for all client operations that do not specify a \u003ccode\u003etimeout\u003c/code\u003e parameter.\u003c/p\u003e\n","Type":"int","Default":"60000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"receive.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"65536","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"request.timeout.ms","Description":"\n\u003cp\u003eThe configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\u003c/p\u003e\n","Type":"int","Default":"30000","ValidValues":"[0,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.client.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.jaas.config","Description":"\n\u003cp\u003eJAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described \u003ca href=\"http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html\"\u003e\u003c/a\u003e\n here. The format for the value is: \u0026#39;\u003ccode\u003eloginModuleClass controlFlag (optionName=optionValue)*;\u003c/code\u003e\u0026#39;. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;\u003c/p\u003e\n","Type":"password","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.kerberos.service.name","Description":"\n\u003cp\u003eThe Kerberos principal name that Kafka runs as. This can be defined either in Kafka\u0026#39;s JAAS config or in Kafka\u0026#39;s config.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.login.callback.handler.class","Description":"\n\u003cp\u003eThe fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.login.class","Description":"\n\u003cp\u003eThe fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin\u003c/p\u003e\n","Type":"class","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.mechanism","Description":"\n\u003cp\u003eSASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.\u003c/p\u003e\n","Type":"string","Default":"GSSAPI","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"security.protocol","Description":"\n\u003cp\u003eProtocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.\u003c/p\u003e\n","Type":"string","Default":"PLAINTEXT","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"send.buffer.bytes","Description":"\n\u003cp\u003eThe size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\u003c/p\u003e\n","Type":"int","Default":"131072","ValidValues":"[-1,...]","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.enabled.protocols","Description":"\n\u003cp\u003eThe list of protocols enabled for SSL connections.\u003c/p\u003e\n","Type":"list","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.keystore.type","Description":"\n\u003cp\u003eThe file format of the key store file. This is optional for client.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.protocol","Description":"\n\u003cp\u003eThe SSL protocol used to generate the SSLContext. Default setting is TLSv1.2, which is fine for most cases. Allowed values in recent JVMs are TLSv1.2 and TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.\u003c/p\u003e\n","Type":"string","Default":"TLSv1.2","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.provider","Description":"\n\u003cp\u003eThe name of the security provider used for SSL connections. Default value is the default security provider of the JVM.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.truststore.type","Description":"\n\u003cp\u003eThe file format of the trust store file.\u003c/p\u003e\n","Type":"string","Default":"JKS","ValidValues":"","Importance":"medium","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"metadata.max.age.ms","Description":"\n\u003cp\u003eThe period of time in milliseconds after which we force a refresh of metadata even if we haven\u0026#39;t seen any partition leadership changes to proactively discover any new brokers or partitions.\u003c/p\u003e\n","Type":"long","Default":"300000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"metric.reporters","Description":"\n\u003cp\u003eA list of classes to use as metrics reporters. Implementing the \u003ccode\u003eorg.apache.kafka.common.metrics.MetricsReporter\u003c/code\u003e interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.\u003c/p\u003e\n","Type":"list","Default":"\"\"","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"metrics.num.samples","Description":"\n\u003cp\u003eThe number of samples maintained to compute metrics.\u003c/p\u003e\n","Type":"int","Default":"2","ValidValues":"[1,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"metrics.recording.level","Description":"\n\u003cp\u003eThe highest recording level for metrics.\u003c/p\u003e\n","Type":"string","Default":"INFO","ValidValues":"[INFO, DEBUG]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"metrics.sample.window.ms","Description":"\n\u003cp\u003eThe window of time a metrics sample is computed over.\u003c/p\u003e\n","Type":"long","Default":"30000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"reconnect.backoff.max.ms","Description":"\n\u003cp\u003eThe maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.\u003c/p\u003e\n","Type":"long","Default":"1000","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"reconnect.backoff.ms","Description":"\n\u003cp\u003eThe base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.\u003c/p\u003e\n","Type":"long","Default":"50","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"retries\n","Description":"\n\u003cp\u003eSetting a value greater than zero will cause the client to resend any request that fails with a potentially transient error.\u003c/p\u003e\n","Type":"int","Default":"2147483647","ValidValues":"[0,...,2147483647]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"retry.backoff.ms","Description":"\n\u003cp\u003eThe amount of time to wait before attempting to retry a failed request. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\u003c/p\u003e\n","Type":"long","Default":"100","ValidValues":"[0,...]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.kerberos.kinit.cmd","Description":"\n\u003cp\u003eKerberos kinit command path.\u003c/p\u003e\n","Type":"string","Default":"/usr/bin/kinit","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.kerberos.min.time.before.relogin","Description":"\n\u003cp\u003eLogin thread sleep time between refresh attempts.\u003c/p\u003e\n","Type":"long","Default":"60000","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.kerberos.ticket.renew.jitter","Description":"\n\u003cp\u003ePercentage of random jitter added to the renewal time.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.kerberos.ticket.renew.window.factor","Description":"\n\u003cp\u003eLogin thread will sleep until the specified window factor of time from last refresh to ticket\u0026#39;s expiry has been reached, at which time it will try to renew the ticket.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.login.refresh.buffer.seconds","Description":"\n\u003cp\u003eThe amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"300","ValidValues":"[0,...,3600]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.login.refresh.min.period.seconds","Description":"\n\u003cp\u003eThe desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"short","Default":"60","ValidValues":"[0,...,900]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.login.refresh.window.factor","Description":"\n\u003cp\u003eLogin refresh thread will sleep until the specified window factor relative to the credential\u0026#39;s lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.8","ValidValues":"[0.5,...,1.0]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"sasl.login.refresh.window.jitter","Description":"\n\u003cp\u003eThe maximum amount of random jitter relative to the credential\u0026#39;s lifetime that is added to the login refresh thread\u0026#39;s sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.\u003c/p\u003e\n","Type":"double","Default":"0.05","ValidValues":"[0.0,...,0.25]","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"security.providers","Description":"\n\u003cp\u003eA list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the \u003ccode\u003eorg.apache.kafka.common.security.auth.SecurityProviderCreator\u003c/code\u003e interface.\u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.cipher.suites","Description":"\n\u003cp\u003eA list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.\u003c/p\u003e\n","Type":"list","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.endpoint.identification.algorithm","Description":"\n\u003cp\u003eThe endpoint identification algorithm to validate server hostname using server certificate. \u003c/p\u003e\n","Type":"string","Default":"https","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.keymanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"SunX509","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.secure.random.implementation","Description":"\n\u003cp\u003eThe SecureRandom PRNG implementation to use for SSL cryptography operations. \u003c/p\u003e\n","Type":"string","Default":"null","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""},{"Category":"adminclient","Name":"ssl.trustmanager.algorithm","Description":"\n\u003cp\u003eThe algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.\u003c/p\u003e\n","Type":"string","Default":"PKIX","ValidValues":"","Importance":"low","DynamicUpdateMode":"","ServerDefaultProperty":""}]