diff --git a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeader.java b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeader.java index e1ce6ad01a5bb..3eee4d27da50a 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeader.java +++ b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeader.java @@ -25,9 +25,9 @@ public class RecordHeader implements Header { private ByteBuffer keyBuffer; - private String key; - private ByteBuffer valueBuffer; - private byte[] value; + private volatile String key; + private volatile ByteBuffer valueBuffer; + private volatile byte[] value; public RecordHeader(String key, byte[] value) { Objects.requireNonNull(key, "Null header keys are not permitted"); @@ -42,16 +42,24 @@ public RecordHeader(ByteBuffer keyBuffer, ByteBuffer valueBuffer) { public String key() { if (key == null) { - key = Utils.utf8(keyBuffer, keyBuffer.remaining()); - keyBuffer = null; + synchronized (this) { + if (key == null) { + key = Utils.utf8(keyBuffer, keyBuffer.remaining()); + keyBuffer = null; + } + } } return key; } public byte[] value() { if (value == null && valueBuffer != null) { - value = Utils.toArray(valueBuffer); - valueBuffer = null; + synchronized (this) { + if (value == null && valueBuffer != null) { + value = Utils.toArray(valueBuffer); + valueBuffer = null; + } + } } return value; } diff --git a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java index 41104194991d9..e7b17091caaa8 100644 --- a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java +++ b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java @@ -19,10 +19,18 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; +import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.Iterator; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -265,4 +273,44 @@ static void assertHeader(String key, String value, Header actual) { assertArrayEquals(value.getBytes(), actual.value()); } + @RepeatedTest(100) + public void testRecordHeaderIsReadThreadSafe() throws Exception { + int threads = 8; + RecordHeader header = new RecordHeader( + ByteBuffer.wrap("key".getBytes(StandardCharsets.UTF_8)), + ByteBuffer.wrap("value".getBytes(StandardCharsets.UTF_8)) + ); + + ExecutorService pool = Executors.newFixedThreadPool(threads); + CountDownLatch startLatch = new CountDownLatch(1); + AtomicBoolean raceDetected = new AtomicBoolean(false); + + try { + Runnable task = () -> { + try { + startLatch.await(); + header.key(); + header.value(); + } catch (NullPointerException e) { + raceDetected.set(true); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + }; + + for (int i = 0; i < threads; i++) pool.submit(task); + + startLatch.countDown(); + + pool.shutdown(); + pool.awaitTermination(5, TimeUnit.SECONDS); + + assertFalse(raceDetected.get(), "Read race condition detected in RecordHeader!"); + } finally { + if (!pool.isTerminated()) { + pool.shutdownNow(); + } + } + } } diff --git a/docs/upgrade.html b/docs/upgrade.html index 80343474d7051..2a00a0cdd6cfd 100644 --- a/docs/upgrade.html +++ b/docs/upgrade.html @@ -25,6 +25,9 @@
org.apache.kafka.common.header.internals.RecordHeader class has been updated to be read thread-safe. See KIP-1205 for details.
+ org.apache.kafka.disallowed.login.modules config was deprecated. Please use the org.apache.kafka.allowed.login.modules
instead.