Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,8 @@
* </ul>
*
* <p>
* In particular, the {@link #headers()} method returns a mutable collection of headers. If multiple
* threads access or modify these headers concurrently, it may lead to race conditions or inconsistent
* states. It is the responsibility of the user to ensure that multi-threaded access is properly synchronized.
* However, the {@link #headers()} collection and individual header instances are
* <b>read thread-safe</b>, allowing concurrent access for reading without additional synchronization.
*
* <p>
* Refer to the {@link KafkaConsumer} documentation for more details on multi-threaded consumption and processing strategies.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@

public class RecordHeader implements Header {
private ByteBuffer keyBuffer;
private String key;
private ByteBuffer valueBuffer;
private byte[] value;
private volatile String key;
private volatile ByteBuffer valueBuffer;
private volatile byte[] value;

public RecordHeader(String key, byte[] value) {
Objects.requireNonNull(key, "Null header keys are not permitted");
Expand All @@ -42,16 +42,24 @@ public RecordHeader(ByteBuffer keyBuffer, ByteBuffer valueBuffer) {

public String key() {
if (key == null) {
key = Utils.utf8(keyBuffer, keyBuffer.remaining());
keyBuffer = null;
synchronized (this) {
if (key == null) {
key = Utils.utf8(keyBuffer, keyBuffer.remaining());
keyBuffer = null;
}
}
}
return key;
}

public byte[] value() {
if (value == null && valueBuffer != null) {
value = Utils.toArray(valueBuffer);
valueBuffer = null;
synchronized (this) {
if (value == null && valueBuffer != null) {
value = Utils.toArray(valueBuffer);
valueBuffer = null;
}
}
}
return value;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,18 @@
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;

import org.junit.jupiter.api.RepeatedTest;
import org.junit.jupiter.api.Test;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Iterator;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;

Check notice on line 31 in clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java

View workflow job for this annotation

GitHub Actions / build / Compile and Check (Merge Ref)

Checkstyle error

Unused import - java.util.concurrent.atomic.AtomicBoolean.
import java.util.stream.Collectors;
import java.util.stream.IntStream;

import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
Expand Down Expand Up @@ -265,4 +273,41 @@
assertArrayEquals(value.getBytes(), actual.value());
}

private void assertRecordHeaderReadThreadSafe(RecordHeader header) {
int threadCount = 16;
CountDownLatch startLatch = new CountDownLatch(1);

var futures = IntStream.range(0, threadCount)
.mapToObj(i -> CompletableFuture.runAsync(() -> {
try {
startLatch.await();
header.key();
header.value();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
})).collect(Collectors.toUnmodifiableList());

startLatch.countDown();
futures.forEach(CompletableFuture::join);
}

@RepeatedTest(100)
public void testRecordHeaderIsReadThreadSafe() throws Exception {
RecordHeader header = new RecordHeader(
ByteBuffer.wrap("key".getBytes(StandardCharsets.UTF_8)),
ByteBuffer.wrap("value".getBytes(StandardCharsets.UTF_8))
);
assertRecordHeaderReadThreadSafe(header);
}

@RepeatedTest(100)
public void testRecordHeaderWithNullValueIsReadThreadSafe() throws Exception {
RecordHeader header = new RecordHeader(
ByteBuffer.wrap("key".getBytes(StandardCharsets.UTF_8)),
null
);
assertRecordHeaderReadThreadSafe(header);
}
}
4 changes: 4 additions & 0 deletions docs/upgrade.html
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ <h5><a id="upgrade_4_2_0_from" href="#upgrade_4_2_0_from">Upgrading Servers to 4

<h5><a id="upgrade_420_notable" href="#upgrade_420_notable">Notable changes in 4.2.0</a></h5>
<ul>
<li>
The <code>org.apache.kafka.common.header.internals.RecordHeader</code> class has been updated to be read thread-safe. See <a href="https://cwiki.apache.org/confluence/x/nYmhFg">KIP-1205</a> for details.
In other words, <code>ConsumerRecord</code> headers can now be safely read from multiple threads concurrently.
</li>
<li>
The <code>org.apache.kafka.disallowed.login.modules</code> config was deprecated. Please use the <code>org.apache.kafka.allowed.login.modules</code>
instead.
Expand Down
Loading