diff --git a/pom.xml b/pom.xml index 36301aa73..dc06777f2 100644 --- a/pom.xml +++ b/pom.xml @@ -53,7 +53,7 @@ 32.1.3-jre 2.0.9 2.2 - 0.9.0-SNAPSHOT + 0.10.0-SNAPSHOT 23.5.26 @@ -134,7 +134,7 @@ ${s3stream.version} - io.opentelemetry.instrumentation + io.opentelemetry.* * diff --git a/s3stream/pom.xml b/s3stream/pom.xml index 93afce96d..326328480 100644 --- a/s3stream/pom.xml +++ b/s3stream/pom.xml @@ -22,7 +22,7 @@ 4.0.0 com.automq.elasticstream s3stream - 0.9.0-SNAPSHOT + 0.10.0-SNAPSHOT 5.5.0 5.10.0 @@ -34,7 +34,8 @@ 17 17 UTF-8 - 1.32.0-alpha + 1.32.0 + 1.9.20.1 @@ -113,10 +114,25 @@ jackson-databind 2.16.0 + + io.opentelemetry + opentelemetry-api + ${opentelemetry.version} + io.opentelemetry.instrumentation - opentelemetry-runtime-telemetry-java8 - ${alapha.opentelemetry.version} + opentelemetry-instrumentation-annotations + ${opentelemetry.version} + + + org.aspectj + aspectjrt + ${aspectj.version} + + + org.aspectj + aspectjweaver + ${aspectj.version} @@ -239,6 +255,34 @@ + + dev.aspectj + aspectj-maven-plugin + 1.13.1 + + + org.aspectj + aspectjtools + ${aspectj.version} + + + + ${maven.compiler.target} + ${maven.compiler.source} + ${maven.compiler.target} + true + true + ignore + UTF-8 + + + + + compile + + + + diff --git a/s3stream/src/main/java/com/automq/stream/api/Stream.java b/s3stream/src/main/java/com/automq/stream/api/Stream.java index 96900a507..c82deb9e3 100644 --- a/s3stream/src/main/java/com/automq/stream/api/Stream.java +++ b/s3stream/src/main/java/com/automq/stream/api/Stream.java @@ -18,6 +18,8 @@ package com.automq.stream.api; import com.automq.stream.api.exceptions.StreamClientException; +import com.automq.stream.s3.context.AppendContext; +import com.automq.stream.s3.context.FetchContext; import java.util.concurrent.CompletableFuture; @@ -54,7 +56,11 @@ public interface Stream { * @return - complete success with async {@link AppendResult}, when append success. * - complete exception with {@link StreamClientException}, when append fail. TODO: specify the exception. */ - CompletableFuture append(RecordBatch recordBatch); + CompletableFuture append(AppendContext context, RecordBatch recordBatch); + + default CompletableFuture append(RecordBatch recordBatch) { + return append(AppendContext.DEFAULT, recordBatch); + } /** * Fetch recordBatch list from stream. Note the startOffset may be in the middle in the first recordBatch. @@ -67,10 +73,10 @@ public interface Stream { * @return - complete success with {@link FetchResult}, when fetch success. * - complete exception with {@link StreamClientException}, when startOffset is bigger than stream end offset. */ - CompletableFuture fetch(long startOffset, long endOffset, int maxBytesHint, ReadOptions readOptions); + CompletableFuture fetch(FetchContext context, long startOffset, long endOffset, int maxBytesHint); default CompletableFuture fetch(long startOffset, long endOffset, int maxBytesHint) { - return fetch(startOffset, endOffset, maxBytesHint, ReadOptions.DEFAULT); + return fetch(FetchContext.DEFAULT, startOffset, endOffset, maxBytesHint); } /** diff --git a/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java b/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java index 3d44f26a6..607b0a7af 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java +++ b/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java @@ -17,12 +17,13 @@ package com.automq.stream.s3; -import com.automq.stream.api.ReadOptions; import com.automq.stream.api.exceptions.FastReadFailFastException; import com.automq.stream.s3.cache.CacheAccessType; import com.automq.stream.s3.cache.LogCache; import com.automq.stream.s3.cache.ReadDataBlock; import com.automq.stream.s3.cache.S3BlockCache; +import com.automq.stream.s3.context.AppendContext; +import com.automq.stream.s3.context.FetchContext; import com.automq.stream.s3.metadata.StreamMetadata; import com.automq.stream.s3.metrics.S3StreamMetricsManager; import com.automq.stream.s3.metrics.TimerUtil; @@ -32,6 +33,7 @@ import com.automq.stream.s3.objects.ObjectManager; import com.automq.stream.s3.operator.S3Operator; import com.automq.stream.s3.streams.StreamManager; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.s3.wal.WriteAheadLog; import com.automq.stream.utils.FutureTicker; import com.automq.stream.utils.FutureUtil; @@ -40,6 +42,8 @@ import io.netty.buffer.ByteBuf; import io.netty.util.HashedWheelTimer; import io.netty.util.Timeout; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -259,14 +263,15 @@ public void shutdown() { @Override - public CompletableFuture append(StreamRecordBatch streamRecord) { + @WithSpan + public CompletableFuture append(AppendContext context, StreamRecordBatch streamRecord) { TimerUtil timerUtil = new TimerUtil(); CompletableFuture cf = new CompletableFuture<>(); // encoded before append to free heap ByteBuf. streamRecord.encoded(); - WalWriteRequest writeRequest = new WalWriteRequest(streamRecord, -1L, cf); + WalWriteRequest writeRequest = new WalWriteRequest(streamRecord, -1L, cf, context); handleAppendRequest(writeRequest); - append0(writeRequest, false); + append0(context, writeRequest, false); cf.whenComplete((nil, ex) -> { streamRecord.release(); S3StreamMetricsManager.recordOperationLatency(timerUtil.elapsedAs(TimeUnit.NANOSECONDS), S3Operation.APPEND_STORAGE); @@ -279,7 +284,7 @@ public CompletableFuture append(StreamRecordBatch streamRecord) { * * @return backoff status. */ - public boolean append0(WalWriteRequest request, boolean fromBackoff) { + public boolean append0(AppendContext context, WalWriteRequest request, boolean fromBackoff) { // TODO: storage status check, fast fail the request when storage closed. if (!fromBackoff && !backoffRecords.isEmpty()) { backoffRecords.offer(request); @@ -304,7 +309,7 @@ public boolean append0(WalWriteRequest request, boolean fromBackoff) { Lock lock = confirmOffsetCalculator.addLock(); lock.lock(); try { - appendResult = deltaWAL.append(streamRecord.encoded()); + appendResult = deltaWAL.append(new TraceContext(context), streamRecord.encoded()); } finally { lock.unlock(); } @@ -344,7 +349,7 @@ private void tryDrainBackoffRecords() { if (request == null) { break; } - if (append0(request, true)) { + if (append0(request.context, request, true)) { LOGGER.warn("try drain backoff record fail, still backoff"); break; } @@ -356,20 +361,30 @@ private void tryDrainBackoffRecords() { } @Override - public CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes, ReadOptions readOptions) { + @WithSpan + public CompletableFuture read(FetchContext context, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes) { TimerUtil timerUtil = new TimerUtil(); CompletableFuture cf = new CompletableFuture<>(); - FutureUtil.propagate(read0(streamId, startOffset, endOffset, maxBytes, readOptions), cf); + FutureUtil.propagate(read0(context, streamId, startOffset, endOffset, maxBytes), cf); cf.whenComplete((nil, ex) -> S3StreamMetricsManager.recordOperationLatency(timerUtil.elapsedAs(TimeUnit.NANOSECONDS), S3Operation.READ_STORAGE)); return cf; } - private CompletableFuture read0(long streamId, long startOffset, long endOffset, int maxBytes, ReadOptions readOptions) { - List logCacheRecords = deltaWALCache.get(streamId, startOffset, endOffset, maxBytes); + @WithSpan + private CompletableFuture read0(FetchContext context, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes) { + List logCacheRecords = deltaWALCache.get(context, streamId, startOffset, endOffset, maxBytes); if (!logCacheRecords.isEmpty() && logCacheRecords.get(0).getBaseOffset() <= startOffset) { return CompletableFuture.completedFuture(new ReadDataBlock(logCacheRecords, CacheAccessType.DELTA_WAL_CACHE_HIT)); } - if (readOptions.fastRead()) { + if (context.readOptions().fastRead()) { // fast read fail fast when need read from block cache. logCacheRecords.forEach(StreamRecordBatch::release); logCacheRecords.clear(); @@ -380,7 +395,7 @@ private CompletableFuture read0(long streamId, long startOffset, } Timeout timeout = timeoutDetect.newTimeout(t -> LOGGER.warn("read from block cache timeout, stream={}, {}, maxBytes: {}", streamId, startOffset, maxBytes), 1, TimeUnit.MINUTES); long finalEndOffset = endOffset; - return blockCache.read(streamId, startOffset, endOffset, maxBytes).thenApply(blockCacheRst -> { + return blockCache.read(context, streamId, startOffset, endOffset, maxBytes).thenApply(blockCacheRst -> { List rst = new ArrayList<>(blockCacheRst.getRecords()); int remainingBytesSize = maxBytes - rst.stream().mapToInt(StreamRecordBatch::size).sum(); int readIndex = -1; diff --git a/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java b/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java index add215205..4bf4ae6b5 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java +++ b/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java @@ -21,7 +21,6 @@ import com.automq.stream.RecordBatchWithContextWrapper; import com.automq.stream.api.AppendResult; import com.automq.stream.api.FetchResult; -import com.automq.stream.api.ReadOptions; import com.automq.stream.api.RecordBatch; import com.automq.stream.api.RecordBatchWithContext; import com.automq.stream.api.Stream; @@ -29,6 +28,8 @@ import com.automq.stream.api.exceptions.FastReadFailFastException; import com.automq.stream.api.exceptions.StreamClientException; import com.automq.stream.s3.cache.CacheAccessType; +import com.automq.stream.s3.context.AppendContext; +import com.automq.stream.s3.context.FetchContext; import com.automq.stream.s3.metrics.S3StreamMetricsManager; import com.automq.stream.s3.metrics.TimerUtil; import com.automq.stream.s3.metrics.operations.S3Operation; @@ -38,6 +39,8 @@ import com.automq.stream.utils.FutureUtil; import com.automq.stream.utils.GlobalSwitch; import io.netty.buffer.Unpooled; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -139,7 +142,8 @@ public long nextOffset() { } @Override - public CompletableFuture append(RecordBatch recordBatch) { + @WithSpan + public CompletableFuture append(AppendContext context, RecordBatch recordBatch) { TimerUtil timerUtil = new TimerUtil(); writeLock.lock(); S3StreamMetricsManager.recordOperationLatency(timerUtil.elapsedAs(TimeUnit.NANOSECONDS), S3Operation.APPEND_STREAM_WRITE_LOCK); @@ -148,7 +152,7 @@ public CompletableFuture append(RecordBatch recordBatch) { if (networkInboundLimiter != null) { networkInboundLimiter.forceConsume(recordBatch.rawPayload().remaining()); } - return append0(recordBatch); + return append0(context, recordBatch); }, LOGGER, "append"); pendingAppends.add(cf); cf.whenComplete((nil, ex) -> { @@ -161,13 +165,14 @@ public CompletableFuture append(RecordBatch recordBatch) { } } - private CompletableFuture append0(RecordBatch recordBatch) { + @WithSpan + private CompletableFuture append0(AppendContext context, RecordBatch recordBatch) { if (!status.isWritable()) { return FutureUtil.failedFuture(new StreamClientException(ErrorCode.STREAM_ALREADY_CLOSED, logIdent + " stream is not writable")); } long offset = nextOffset.getAndAdd(recordBatch.count()); StreamRecordBatch streamRecordBatch = new StreamRecordBatch(streamId, epoch, offset, recordBatch.count(), Unpooled.wrappedBuffer(recordBatch.rawPayload())); - CompletableFuture cf = storage.append(streamRecordBatch).thenApply(nil -> { + CompletableFuture cf = storage.append(context, streamRecordBatch).thenApply(nil -> { updateConfirmOffset(offset + recordBatch.count()); return new DefaultAppendResult(offset); }); @@ -186,12 +191,16 @@ private CompletableFuture append0(RecordBatch recordBatch) { } @Override - public CompletableFuture fetch(long startOffset, long endOffset, int maxBytes, ReadOptions readOptions) { + @WithSpan + public CompletableFuture fetch(FetchContext context, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes) { TimerUtil timerUtil = new TimerUtil(); readLock.lock(); S3StreamMetricsManager.recordOperationLatency(timerUtil.elapsedAs(TimeUnit.NANOSECONDS), S3Operation.FETCH_STREAM_READ_LOCK); try { - CompletableFuture cf = exec(() -> fetch0(startOffset, endOffset, maxBytes, readOptions), LOGGER, "fetch"); + CompletableFuture cf = exec(() -> fetch0(context, startOffset, endOffset, maxBytes), LOGGER, "fetch"); pendingFetches.add(cf); cf.whenComplete((rs, ex) -> { S3StreamMetricsManager.recordOperationLatency(timerUtil.elapsedAs(TimeUnit.NANOSECONDS), S3Operation.FETCH_STREAM); @@ -216,7 +225,8 @@ public CompletableFuture fetch(long startOffset, long endOffset, in } } - private CompletableFuture fetch0(long startOffset, long endOffset, int maxBytes, ReadOptions readOptions) { + @WithSpan + private CompletableFuture fetch0(FetchContext context, long startOffset, long endOffset, int maxBytes) { if (!status.isReadable()) { return FutureUtil.failedFuture(new StreamClientException(ErrorCode.STREAM_ALREADY_CLOSED, logIdent + " stream is already closed")); } @@ -237,12 +247,12 @@ private CompletableFuture fetch0(long startOffset, long endOffset, if (startOffset == endOffset) { return CompletableFuture.completedFuture(new DefaultFetchResult(Collections.emptyList(), CacheAccessType.DELTA_WAL_CACHE_HIT, false)); } - return storage.read(streamId, startOffset, endOffset, maxBytes, readOptions).thenApply(dataBlock -> { + return storage.read(context, streamId, startOffset, endOffset, maxBytes).thenApply(dataBlock -> { List records = dataBlock.getRecords(); if (LOGGER.isTraceEnabled()) { LOGGER.trace("{} stream fetch, startOffset: {}, endOffset: {}, maxBytes: {}, records: {}", logIdent, startOffset, endOffset, maxBytes, records.size()); } - return new DefaultFetchResult(records, dataBlock.getCacheAccessType(), readOptions.pooledBuf()); + return new DefaultFetchResult(records, dataBlock.getCacheAccessType(), context.readOptions().pooledBuf()); }); } diff --git a/s3stream/src/main/java/com/automq/stream/s3/Storage.java b/s3stream/src/main/java/com/automq/stream/s3/Storage.java index 4cd7624a1..ea398f7ac 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/Storage.java +++ b/s3stream/src/main/java/com/automq/stream/s3/Storage.java @@ -17,8 +17,9 @@ package com.automq.stream.s3; -import com.automq.stream.api.ReadOptions; import com.automq.stream.s3.cache.ReadDataBlock; +import com.automq.stream.s3.context.AppendContext; +import com.automq.stream.s3.context.FetchContext; import com.automq.stream.s3.model.StreamRecordBatch; import java.util.concurrent.CompletableFuture; @@ -37,9 +38,17 @@ public interface Storage { * * @param streamRecord {@link StreamRecordBatch} */ - CompletableFuture append(StreamRecordBatch streamRecord); + CompletableFuture append(AppendContext context, StreamRecordBatch streamRecord); - CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes, ReadOptions readOptions); + default CompletableFuture append(StreamRecordBatch streamRecord) { + return append(AppendContext.DEFAULT, streamRecord); + } + + CompletableFuture read(FetchContext context, long streamId, long startOffset, long endOffset, int maxBytes); + + default CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes) { + return read(FetchContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); + } /** * Force stream record in WAL upload to s3 diff --git a/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java b/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java index 844cbfb39..5cc947d9b 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java +++ b/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java @@ -18,20 +18,27 @@ package com.automq.stream.s3; +import com.automq.stream.s3.context.AppendContext; import com.automq.stream.s3.model.StreamRecordBatch; import java.util.concurrent.CompletableFuture; public class WalWriteRequest implements Comparable { final StreamRecordBatch record; + final AppendContext context; long offset; final CompletableFuture cf; boolean persisted; public WalWriteRequest(StreamRecordBatch record, long offset, CompletableFuture cf) { + this(record, offset, cf, AppendContext.DEFAULT); + } + + public WalWriteRequest(StreamRecordBatch record, long offset, CompletableFuture cf, AppendContext context) { this.record = record; this.offset = offset; this.cf = cf; + this.context = context; } @Override diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java index 1e9eb2789..a740bf4e5 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java +++ b/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java @@ -22,7 +22,10 @@ import com.automq.stream.s3.metrics.S3StreamMetricsManager; import com.automq.stream.s3.model.StreamRecordBatch; import com.automq.stream.s3.cache.DefaultS3BlockCache.ReadAheadRecord; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.utils.biniarysearch.StreamRecordBatchList; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -209,11 +212,21 @@ boolean checkRange0(long streamId, long startOffset, int maxBytes) { return nextMaxBytes <= 0; } + public GetCacheResult get(long streamId, long startOffset, long endOffset, int maxBytes) { + return get(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); + } + /** * Get records from cache. * Note: the records is retained, the caller should release it. */ - public GetCacheResult get(long streamId, long startOffset, long endOffset, int maxBytes) { + @WithSpan + public GetCacheResult get(TraceContext context, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes) { + context.currentContext(); if (startOffset >= endOffset || maxBytes <= 0) { return GetCacheResult.empty(); } diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java index bde961c63..1d198862c 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java +++ b/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java @@ -24,8 +24,12 @@ import com.automq.stream.s3.model.StreamRecordBatch; import com.automq.stream.s3.objects.ObjectManager; import com.automq.stream.s3.operator.S3Operator; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.utils.FutureUtil; import com.automq.stream.utils.Threads; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,10 +77,16 @@ public void shutdown() { } @Override - public CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes) { + @WithSpan + public CompletableFuture read(TraceContext traceContext, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data, stream={}, {}-{}, total bytes: {} ", streamId, startOffset, endOffset, maxBytes); + LOGGER.debug("[S3BlockCache] read data, stream={}, {}-{}, total bytes: {}", streamId, startOffset, endOffset, maxBytes); } + final TraceContext finalTraceContext = new TraceContext(traceContext); this.readAheadManager.updateReadProgress(streamId, startOffset); TimerUtil timerUtil = new TimerUtil(); CompletableFuture readCf = new CompletableFuture<>(); @@ -88,7 +98,7 @@ public CompletableFuture read(long streamId, long startOffset, lo // submit read task to mainExecutor to avoid read slower the caller thread. mainExecutor.execute(() -> { try { - FutureUtil.propagate(read0(streamId, startOffset, endOffset, maxBytes, uuid, context).whenComplete((ret, ex) -> { + FutureUtil.propagate(read0(finalTraceContext, streamId, startOffset, endOffset, maxBytes, uuid, context).whenComplete((ret, ex) -> { if (ex != null) { LOGGER.error("read {} [{}, {}), maxBytes: {} from block cache fail", streamId, startOffset, endOffset, maxBytes, ex); this.inflightReadThrottle.release(uuid); @@ -101,9 +111,10 @@ public CompletableFuture read(long streamId, long startOffset, lo long timeElapsed = timerUtil.elapsedAs(TimeUnit.NANOSECONDS); boolean isCacheHit = ret.getCacheAccessType() == CacheAccessType.BLOCK_CACHE_HIT; + Span.fromContext(finalTraceContext.currentContext()).setAttribute("cache_hit", isCacheHit); S3StreamMetricsManager.recordReadCacheLatency(timeElapsed, S3Operation.READ_STORAGE_BLOCK_CACHE, isCacheHit); if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data complete, cache hit: {}, stream={}, {}-{}, total bytes: {} ", + LOGGER.debug("[S3BlockCache] read data complete, cache hit: {}, stream={}, {}-{}, total bytes: {}", ret.getCacheAccessType() == CacheAccessType.BLOCK_CACHE_HIT, streamId, startOffset, endOffset, totalReturnedSize); } this.inflightReadThrottle.release(uuid); @@ -119,7 +130,13 @@ public CompletableFuture read(long streamId, long startOffset, lo return readCf; } - public CompletableFuture read0(long streamId, long startOffset, long endOffset, int maxBytes, UUID uuid, ReadTaskContext context) { + @WithSpan + public CompletableFuture read0(TraceContext traceContext, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes, + UUID uuid, ReadTaskContext context) { ReadAheadAgent agent = context.agent; if (LOGGER.isDebugEnabled()) { @@ -138,13 +155,13 @@ public CompletableFuture read0(long streamId, long startOffset, l CompletableFuture readCf = new CompletableFuture<>(); context.setStatus(ReadBlockCacheStatus.WAIT_INFLIGHT_RA); inflightReadAheadTaskContext.cf.whenComplete((nil, ex) -> FutureUtil.exec(() -> FutureUtil.propagate( - read0(streamId, startOffset, endOffset, maxBytes, uuid, context), readCf), readCf, LOGGER, "read0")); + read0(traceContext, streamId, startOffset, endOffset, maxBytes, uuid, context), readCf), readCf, LOGGER, "read0")); return readCf; } // 1. get from cache context.setStatus(ReadBlockCacheStatus.GET_FROM_CACHE); - BlockCache.GetCacheResult cacheRst = cache.get(streamId, nextStartOffset, endOffset, nextMaxBytes); + BlockCache.GetCacheResult cacheRst = cache.get(traceContext, streamId, nextStartOffset, endOffset, nextMaxBytes); List cacheRecords = cacheRst.getRecords(); if (!cacheRecords.isEmpty()) { asyncReadAhead(streamId, agent, cacheRst.getReadAheadRecords()); @@ -161,7 +178,7 @@ public CompletableFuture read0(long streamId, long startOffset, l if (LOGGER.isDebugEnabled()) { LOGGER.debug("[S3BlockCache] read data partially hit cache, stream={}, {}-{}, total bytes: {} ", streamId, nextStartOffset, endOffset, nextMaxBytes); } - return read0(streamId, nextStartOffset, endOffset, nextMaxBytes, uuid, context).thenApply(rst -> { + return read0(traceContext, streamId, nextStartOffset, endOffset, nextMaxBytes, uuid, context).thenApply(rst -> { List records = new ArrayList<>(cacheRecords); records.addAll(rst.getRecords()); return new ReadDataBlock(records, CacheAccessType.BLOCK_CACHE_MISS); @@ -174,14 +191,14 @@ public CompletableFuture read0(long streamId, long startOffset, l if (LOGGER.isDebugEnabled()) { LOGGER.debug("[S3BlockCache] read data cache miss, stream={}, {}-{}, total bytes: {} ", streamId, startOffset, endOffset, maxBytes); } - return streamReader.syncReadAhead(streamId, startOffset, endOffset, maxBytes, agent, uuid) + return streamReader.syncReadAhead(traceContext, streamId, startOffset, endOffset, maxBytes, agent, uuid) .thenCompose(rst -> { if (!rst.isEmpty()) { int remainBytes = maxBytes - rst.stream().mapToInt(StreamRecordBatch::size).sum(); long lastOffset = rst.get(rst.size() - 1).getLastOffset(); if (remainBytes > 0 && lastOffset < endOffset) { // retry read - return read0(streamId, lastOffset, endOffset, remainBytes, uuid, context).thenApply(rst2 -> { + return read0(traceContext, streamId, lastOffset, endOffset, remainBytes, uuid, context).thenApply(rst2 -> { List records = new ArrayList<>(rst); records.addAll(rst2.getRecords()); return new ReadDataBlock(records, CacheAccessType.BLOCK_CACHE_MISS); diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java b/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java index ac7157082..6d60ae048 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java +++ b/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java @@ -18,9 +18,11 @@ package com.automq.stream.s3.cache; import com.automq.stream.s3.metrics.S3StreamMetricsManager; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.utils.ThreadUtils; import com.automq.stream.utils.Threads; import com.automq.stream.utils.Utils; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,6 +84,12 @@ public int getRemainingInflightReadBytes() { } public CompletableFuture acquire(UUID uuid, int readSize) { + return acquire(TraceContext.DEFAULT, uuid, readSize); + } + + @WithSpan + public CompletableFuture acquire(TraceContext context, UUID uuid, int readSize) { + context.currentContext(); lock.lock(); try { if (readSize > maxInflightReadBytes) { diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java index d2c643058..d52aba729 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java +++ b/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java @@ -21,7 +21,10 @@ import com.automq.stream.s3.metrics.TimerUtil; import com.automq.stream.s3.metrics.operations.S3Operation; import com.automq.stream.s3.model.StreamRecordBatch; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.utils.biniarysearch.StreamRecordBatchList; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,6 +103,10 @@ public boolean put(StreamRecordBatch recordBatch) { return full; } + public List get(long streamId, long startOffset, long endOffset, int maxBytes) { + return get(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); + } + /** * Get streamId [startOffset, endOffset) range records with maxBytes limit. *

@@ -124,7 +131,13 @@ public boolean put(StreamRecordBatch recordBatch) { *

* Note: the records is retained, the caller should release it. */ - public List get(long streamId, long startOffset, long endOffset, int maxBytes) { + @WithSpan + public List get(TraceContext context, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes) { + context.currentContext(); TimerUtil timerUtil = new TimerUtil(); List records; readLock.lock(); diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java index 060cc7600..161780a29 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java +++ b/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java @@ -17,6 +17,8 @@ package com.automq.stream.s3.cache; +import com.automq.stream.s3.trace.context.TraceContext; + import java.util.concurrent.CompletableFuture; /** @@ -26,5 +28,9 @@ */ public interface S3BlockCache { - CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes); + CompletableFuture read(TraceContext context, long streamId, long startOffset, long endOffset, int maxBytes); + + default CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes) { + return read(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); + } } diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java b/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java index 06640b21e..8981fdc83 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java +++ b/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java @@ -26,7 +26,12 @@ import com.automq.stream.s3.model.StreamRecordBatch; import com.automq.stream.s3.objects.ObjectManager; import com.automq.stream.s3.operator.S3Operator; +import com.automq.stream.s3.trace.TraceUtils; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.utils.Threads; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; +import io.opentelemetry.instrumentation.annotations.WithSpan; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.slf4j.Logger; @@ -105,8 +110,12 @@ public void shutdown() { errorHandlerExecutor.shutdown(); } - public CompletableFuture> syncReadAhead(long streamId, long startOffset, long endOffset, - int maxBytes, ReadAheadAgent agent, UUID uuid) { + @WithSpan + public CompletableFuture> syncReadAhead(TraceContext traceContext, + @SpanAttribute long streamId, + @SpanAttribute long startOffset, + @SpanAttribute long endOffset, + @SpanAttribute int maxBytes, ReadAheadAgent agent, UUID uuid) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("[S3BlockCache] sync read ahead, stream={}, {}-{}, maxBytes={}", streamId, startOffset, endOffset, maxBytes); } @@ -119,9 +128,9 @@ public CompletableFuture> syncReadAhead(long streamId, l if (inflightReadAheadTaskMap.putIfAbsent(readAheadTaskKey, readAheadTaskContext) == null) { context.taskKeySet.add(readAheadTaskKey); } - return getDataBlockIndices(streamId, endOffset, context) + return getDataBlockIndices(traceContext, streamId, endOffset, context) .thenComposeAsync(v -> - handleSyncReadAhead(streamId, startOffset, endOffset, maxBytes, agent, uuid, timer, context), streamReaderExecutor) + handleSyncReadAhead(traceContext, streamId, startOffset, endOffset, maxBytes, agent, uuid, timer, context), streamReaderExecutor) .whenComplete((nil, ex) -> { for (DefaultS3BlockCache.ReadAheadTaskKey key : context.taskKeySet) { completeInflightTask0(key, ex); @@ -131,7 +140,8 @@ public CompletableFuture> syncReadAhead(long streamId, l }); } - CompletableFuture> handleSyncReadAhead(long streamId, long startOffset, long endOffset, + @WithSpan + CompletableFuture> handleSyncReadAhead(TraceContext traceContext, long streamId, long startOffset, long endOffset, int maxBytes, ReadAheadAgent agent, UUID uuid, TimerUtil timer, ReadContext context) { if (context.streamDataBlocksPair.isEmpty()) { @@ -158,7 +168,7 @@ CompletableFuture> handleSyncReadAhead(long streamId, lo totalReserveSize, uuid, streamId, startOffset, endOffset, maxBytes); } - CompletableFuture throttleCf = inflightReadThrottle.acquire(uuid, totalReserveSize); + CompletableFuture throttleCf = inflightReadThrottle.acquire(traceContext, uuid, totalReserveSize); return throttleCf.thenComposeAsync(nil -> { // concurrently read all data blocks for (int i = 0; i < streamDataBlocksToRead.size(); i++) { @@ -182,31 +192,37 @@ CompletableFuture> handleSyncReadAhead(long streamId, lo setInflightReadAheadStatus(new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA); } - cfList.add(reserveResult.cf().thenApplyAsync(dataBlock -> { - if (dataBlock.records().isEmpty()) { - return new ArrayList(); - } - // retain records to be returned - dataBlock.records().forEach(StreamRecordBatch::retain); - recordsMap.put(dataBlockKey, dataBlock.records()); - - // retain records to be put into block cache - dataBlock.records().forEach(StreamRecordBatch::retain); - blockCache.put(streamId, dataBlock.records()); - dataBlock.release(); - - return dataBlock.records(); - }, backgroundExecutor).whenComplete((ret, ex) -> { - if (ex != null) { - LOGGER.error("[S3BlockCache] sync ra fail to read data block, stream={}, {}-{}, data block: {}", - streamId, startOffset, endOffset, streamDataBlock, ex); - } - completeInflightTask(context, taskKey, ex); - if (isNotAlignedFirstBlock) { - // in case of first data block and startOffset is not aligned with start of data block - completeInflightTask(context, new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), ex); - } - })); + try { + CompletableFuture> cf = TraceUtils.runWithSpanAsync(new TraceContext(traceContext), Attributes.empty(), "StreamReader::readDataBlock", + () -> reserveResult.cf().thenApplyAsync(dataBlock -> { + if (dataBlock.records().isEmpty()) { + return new ArrayList(); + } + // retain records to be returned + dataBlock.records().forEach(StreamRecordBatch::retain); + recordsMap.put(dataBlockKey, dataBlock.records()); + + // retain records to be put into block cache + dataBlock.records().forEach(StreamRecordBatch::retain); + blockCache.put(streamId, dataBlock.records()); + dataBlock.release(); + + return dataBlock.records(); + }, backgroundExecutor).whenComplete((ret, ex) -> { + if (ex != null) { + LOGGER.error("[S3BlockCache] sync ra fail to read data block, stream={}, {}-{}, data block: {}", + streamId, startOffset, endOffset, streamDataBlock, ex); + } + completeInflightTask(context, taskKey, ex); + if (isNotAlignedFirstBlock) { + // in case of first data block and startOffset is not aligned with start of data block + completeInflightTask(context, new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), ex); + } + })); + cfList.add(cf); + } catch (Throwable e) { + throw new IllegalArgumentException(e); + } if (reserveResult.reserveSize() > 0) { dataBlockReadAccumulator.readDataBlock(objectReader, streamDataBlock.dataBlockIndex()); } @@ -274,7 +290,7 @@ public void asyncReadAhead(long streamId, long startOffset, long endOffset, int DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_DATA_INDEX); inflightReadAheadTaskMap.putIfAbsent(readAheadTaskKey, readAheadTaskContext); context.taskKeySet.add(readAheadTaskKey); - getDataBlockIndices(streamId, endOffset, context) + getDataBlockIndices(TraceContext.DEFAULT, streamId, endOffset, context) .thenAcceptAsync(v -> handleAsyncReadAhead(streamId, startOffset, endOffset, maxBytes, agent, timer, context), streamReaderExecutor) .whenComplete((nil, ex) -> { @@ -347,7 +363,7 @@ CompletableFuture handleAsyncReadAhead(long streamId, long startOffset, lo reserveResult.reserveSize(), uuid, streamId, startOffset, endOffset, maxBytes); } if (reserveResult.reserveSize() > 0) { - inflightReadThrottle.acquire(uuid, reserveResult.reserveSize()).thenAcceptAsync(nil -> { + inflightReadThrottle.acquire(TraceContext.DEFAULT, uuid, reserveResult.reserveSize()).thenAcceptAsync(nil -> { // read data block if (context.taskKeySet.contains(taskKey)) { setInflightReadAheadStatus(taskKey, DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA); @@ -380,7 +396,9 @@ CompletableFuture handleAsyncReadAhead(long streamId, long startOffset, lo }); } - CompletableFuture getDataBlockIndices(long streamId, long endOffset, ReadContext context) { + @WithSpan + CompletableFuture getDataBlockIndices(TraceContext traceContext, long streamId, long endOffset, ReadContext context) { + traceContext.currentContext(); CompletableFuture getObjectsCf = CompletableFuture.completedFuture(false); if (context.objectIndex >= context.objects.size()) { getObjectsCf = objectManager @@ -450,7 +468,7 @@ CompletableFuture getDataBlockIndices(long streamId, long endOffset, ReadC if (findIndexResult.isFulfilled()) { return CompletableFuture.completedFuture(null); } - return getDataBlockIndices(streamId, endOffset, context); + return getDataBlockIndices(traceContext, streamId, endOffset, context); }, streamReaderExecutor); } diff --git a/s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java b/s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java new file mode 100644 index 000000000..de8ef8626 --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.context; + +import com.automq.stream.s3.trace.context.TraceContext; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; + +public class AppendContext extends TraceContext { + public static final AppendContext DEFAULT = new AppendContext(); + + public AppendContext() { + super(false, null, null); + } + + public AppendContext(TraceContext context) { + super(context); + } + + public AppendContext(boolean isTraceEnabled, Tracer tracer, Context currentContext) { + super(isTraceEnabled, tracer, currentContext); + } +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java b/s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java new file mode 100644 index 000000000..3218e864b --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.context; + +import com.automq.stream.api.ReadOptions; +import com.automq.stream.s3.trace.context.TraceContext; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; + +public class FetchContext extends TraceContext { + public static final FetchContext DEFAULT = new FetchContext(); + private ReadOptions readOptions = ReadOptions.DEFAULT; + + public FetchContext() { + super(false, null, null); + } + + public FetchContext(TraceContext context) { + super(context); + } + + public FetchContext(boolean isTraceEnabled, Tracer tracer, Context currentContext) { + super(isTraceEnabled, tracer, currentContext); + } + + public ReadOptions readOptions() { + return readOptions; + } + + public void setReadOptions(ReadOptions readOptions) { + this.readOptions = readOptions; + } +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java b/s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java new file mode 100644 index 000000000..c60247aeb --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.trace; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.instrumentation.annotations.SpanAttribute; + +import java.lang.reflect.Method; +import java.lang.reflect.Parameter; +import java.lang.reflect.Type; +import java.util.function.BiFunction; + +public class AttributeBindings { + private final BiFunction[] bindings; + + private AttributeBindings(BiFunction[] bindings) { + this.bindings = bindings; + } + + public static AttributeBindings bind(Method method, String[] parametersNames) { + Parameter[] parameters = method.getParameters(); + if (parameters.length != parametersNames.length) { + return new AttributeBindings(null); + } + + BiFunction[] bindings = new BiFunction[parametersNames.length]; + for (int i = 0; i < parametersNames.length; i++) { + Parameter parameter = parameters[i]; + + SpanAttribute spanAttribute = parameter.getAnnotation(SpanAttribute.class); + if (spanAttribute == null) { + bindings[i] = emptyBinding(); + } else { + String attributeName = spanAttribute.value().isEmpty() ? parametersNames[i] : spanAttribute.value(); + bindings[i] = createBinding(attributeName, parameter.getParameterizedType()); + } + } + return new AttributeBindings(bindings); + } + + public boolean isEmpty() { + return bindings == null || bindings.length == 0; + } + + public void apply(AttributesBuilder target, Object[] args) { + if (args.length != bindings.length) { + return; + } + + for (int i = 0; i < args.length; i++) { + bindings[i].apply(target, args[i]); + } + } + + static BiFunction emptyBinding() { + return (builder, arg) -> builder; + } + + static BiFunction createBinding(String name, Type type) { + // Simple scalar parameter types + if (type == String.class) { + AttributeKey key = AttributeKey.stringKey(name); + return (builder, arg) -> builder.put(key, (String) arg); + } + if (type == long.class || type == Long.class) { + AttributeKey key = AttributeKey.longKey(name); + return (builder, arg) -> builder.put(key, (Long) arg); + } + if (type == double.class || type == Double.class) { + AttributeKey key = AttributeKey.doubleKey(name); + return (builder, arg) -> builder.put(key, (Double) arg); + } + if (type == boolean.class || type == Boolean.class) { + AttributeKey key = AttributeKey.booleanKey(name); + return (builder, arg) -> builder.put(key, (Boolean) arg); + } + if (type == int.class || type == Integer.class) { + AttributeKey key = AttributeKey.longKey(name); + return (builder, arg) -> builder.put(key, ((Integer) arg).longValue()); + } + if (type == float.class || type == Float.class) { + AttributeKey key = AttributeKey.doubleKey(name); + return (builder, arg) -> builder.put(key, ((Float) arg).doubleValue()); + } + + // Default parameter types + AttributeKey key = AttributeKey.stringKey(name); + return (builder, arg) -> builder.put(key, arg.toString()); + } +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java b/s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java new file mode 100644 index 000000000..c0151593c --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.trace; + +import java.lang.reflect.Method; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; + +final class MethodCache extends ClassValue> { + public V computeIfAbsent(Method key, Function mappingFunction) { + return this.get(key.getDeclaringClass()).computeIfAbsent(key, mappingFunction); + } + + @Override + protected Map computeValue(Class type) { + return new ConcurrentHashMap<>(); + } +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java b/s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java new file mode 100644 index 000000000..ffa4c0c2d --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.trace; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; + +import java.lang.reflect.Method; + +public final class SpanAttributesExtractor { + + private final MethodCache cache; + + public static SpanAttributesExtractor create() { + return new SpanAttributesExtractor(new MethodCache<>()); + } + + SpanAttributesExtractor(MethodCache cache) { + this.cache = cache; + } + + public Attributes extract(Method method, String[] parametersNames, Object[] args) { + AttributesBuilder attributes = Attributes.builder(); + AttributeBindings bindings = + cache.computeIfAbsent(method, (Method m) -> AttributeBindings.bind(m, parametersNames)); + if (!bindings.isEmpty()) { + bindings.apply(attributes, args); + } + return attributes.build(); + } +} \ No newline at end of file diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java b/s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java new file mode 100644 index 000000000..4f94724d1 --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.trace; + +import com.automq.stream.s3.trace.context.TraceContext; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import io.opentelemetry.instrumentation.annotations.WithSpan; +import org.aspectj.lang.ProceedingJoinPoint; +import org.aspectj.lang.reflect.MethodSignature; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.Method; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; + +public class TraceUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(TraceUtils.class); + private static final SpanAttributesExtractor EXTRACTOR = SpanAttributesExtractor.create(); + + public static Object trace(TraceContext context, ProceedingJoinPoint joinPoint, WithSpan withSpan) throws Throwable { + if (context.isTraceDisabled()) { + return joinPoint.proceed(); + } + + MethodSignature signature = (MethodSignature) joinPoint.getSignature(); + Method method = signature.getMethod(); + Object[] args = joinPoint.getArgs(); + + String className = method.getDeclaringClass().getSimpleName(); + String spanName = withSpan.value().isEmpty() ? className + "::" + method.getName() : withSpan.value(); + + TraceContext.Scope scope = createAndStartSpan(context, spanName); + if (scope == null) { + return joinPoint.proceed(); + } + Span span = scope.getSpan(); + Attributes attributes = EXTRACTOR.extract(method, signature.getParameterNames(), args); + span.setAllAttributes(attributes); + + try { + if (method.getReturnType() == CompletableFuture.class) { + return doTraceWhenReturnCompletableFuture(scope, joinPoint); + } else { + return doTraceWhenReturnObject(scope, joinPoint); + } + } catch (Throwable t) { + endSpan(scope, t); + throw t; + } + } + + public static T runWithSpanSync(TraceContext context, Attributes attributes, String spanName, Callable callable) throws Throwable { + TraceContext.Scope scope = createAndStartSpan(context, spanName); + if (scope == null) { + return callable.call(); + } + scope.getSpan().setAllAttributes(attributes); + try (scope) { + T ret = callable.call(); + endSpan(scope, null); + return ret; + } catch (Throwable t) { + endSpan(scope, t); + throw t; + } + } + + public static CompletableFuture runWithSpanAsync(TraceContext context, Attributes attributes, String spanName, + Callable> callable) throws Throwable { + TraceContext.Scope scope = createAndStartSpan(context, spanName); + if (scope == null) { + return callable.call(); + } + scope.getSpan().setAllAttributes(attributes); + try (scope) { + CompletableFuture cf = callable.call(); + cf.whenComplete((nil, ex) -> endSpan(scope, ex)); + return cf; + } catch (Throwable t) { + endSpan(scope, t); + throw t; + } + } + + public static TraceContext.Scope createAndStartSpan(TraceContext context, String name) { + if (context.isTraceDisabled()) { + return null; + } + Tracer tracer = context.tracer(); + Context parentContext = context.currentContext(); + Span span = tracer.spanBuilder(name) + .setParent(parentContext) + .startSpan(); + + return context.attachContext(parentContext.with(span)); + } + + public static void endSpan(TraceContext.Scope scope, Throwable t) { + if (scope == null) { + return; + } + if (t != null) { + scope.getSpan().recordException(t); + scope.getSpan().setStatus(StatusCode.ERROR, t.getMessage()); + } else { + scope.getSpan().setStatus(StatusCode.OK); + } + scope.getSpan().end(); + scope.close(); + } + + private static CompletableFuture doTraceWhenReturnCompletableFuture(TraceContext.Scope scope, ProceedingJoinPoint joinPoint) throws Throwable { + CompletableFuture future = (CompletableFuture) joinPoint.proceed(); + return future.whenComplete((r, t) -> endSpan(scope, t)); + } + + private static Object doTraceWhenReturnObject(TraceContext.Scope scope, ProceedingJoinPoint joinPoint) throws Throwable { + Object result = joinPoint.proceed(); + endSpan(scope, null); + return result; + } + +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java b/s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java new file mode 100644 index 000000000..3490797fb --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.trace.aop; + +import com.automq.stream.s3.trace.TraceUtils; +import com.automq.stream.s3.trace.context.TraceContext; +import io.opentelemetry.instrumentation.annotations.WithSpan; +import org.aspectj.lang.ProceedingJoinPoint; +import org.aspectj.lang.annotation.Around; +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.Pointcut; + +@Aspect +public class S3StreamTraceAspect { + + @Pointcut("@annotation(withSpan)") + public void trace(WithSpan withSpan) { + } + + @Around(value = "trace(withSpan) && execution(* com.automq.stream..*(..))", argNames = "joinPoint,withSpan") + public Object createSpan(ProceedingJoinPoint joinPoint, WithSpan withSpan) throws Throwable { + Object[] args = joinPoint.getArgs(); + if (args.length > 0 && args[0] instanceof TraceContext context) { + return TraceUtils.trace(context, joinPoint, withSpan); + } + + return joinPoint.proceed(); + } +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java b/s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java new file mode 100644 index 000000000..a8342221a --- /dev/null +++ b/s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.stream.s3.trace.context; + +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.NotThreadSafe; + +/** + * Trace context that holds the current trace context. This class is not thread safe and should be copied before + * asynchronous usage. + */ +@NotThreadSafe +public class TraceContext { + private static final Logger LOGGER = LoggerFactory.getLogger(TraceContext.class); + public static final TraceContext DEFAULT = new TraceContext(false, null, null); + private final boolean isTraceEnabled; + private final Tracer tracer; + private Context currContext; + + public TraceContext(boolean isTraceEnabled, Tracer tracer, Context currContext) { + this.isTraceEnabled = isTraceEnabled; + if (isTraceEnabled && tracer == null) { + this.tracer = GlobalOpenTelemetry.getTracer("s3stream"); + } else { + this.tracer = tracer; + } + if (isTraceEnabled && currContext == null) { + this.currContext = Context.current(); + } else { + this.currContext = currContext; + } + } + + public TraceContext(TraceContext traceContext) { + this(traceContext.isTraceEnabled, traceContext.tracer, traceContext.currContext); + } + + public boolean isTraceDisabled() { + return !isTraceEnabled; + } + + public Tracer tracer() { + return tracer; + } + + public Context currentContext() { + return currContext; + } + + public Scope attachContext(Context contextToAttach) { + return new Scope(contextToAttach); + } + + public class Scope implements AutoCloseable { + private final Context prevContext; + private final Span span; + + private Scope(Context contextToAttach) { + this.prevContext = currContext; + this.span = Span.fromContext(contextToAttach); + currContext = contextToAttach; + } + + public Span getSpan() { + return span; + } + + @Override + public void close() { + currContext = prevContext; + } + } + +} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java b/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java index 6fe679474..6e2ccfbdb 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java +++ b/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java @@ -23,6 +23,8 @@ import com.automq.stream.s3.metrics.TimerUtil; import com.automq.stream.s3.metrics.operations.S3Operation; import com.automq.stream.s3.metrics.operations.S3Stage; +import com.automq.stream.s3.trace.TraceUtils; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.s3.wal.util.WALChannel; import com.automq.stream.s3.wal.util.WALUtil; import com.automq.stream.utils.ThreadUtils; @@ -380,13 +382,18 @@ public WALMetadata metadata() { } @Override - public AppendResult append(ByteBuf buf, int crc) throws OverCapacityException { + public AppendResult append(TraceContext context, ByteBuf buf, int crc) throws OverCapacityException { + // get current method name + TraceContext.Scope scope = TraceUtils.createAndStartSpan(context, "BlockWALService::append"); TimerUtil timerUtil = new TimerUtil(); try { - return append0(buf, crc); + AppendResult result = append0(buf, crc); + result.future().whenComplete((nil, ex) -> TraceUtils.endSpan(scope, ex)); + return result; } catch (OverCapacityException ex) { buf.release(); S3StreamMetricsManager.recordOperationLatency(timerUtil.elapsedAs(TimeUnit.NANOSECONDS), S3Operation.APPEND_STORAGE_WAL_FULL); + TraceUtils.endSpan(scope, ex); throw ex; } } diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java b/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java index ffb8601b6..9245b0a1c 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java +++ b/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java @@ -17,6 +17,7 @@ package com.automq.stream.s3.wal; +import com.automq.stream.s3.trace.context.TraceContext; import io.netty.buffer.ByteBuf; import java.io.IOException; @@ -45,7 +46,7 @@ public WALMetadata metadata() { } @Override - public AppendResult append(ByteBuf data, int crc) { + public AppendResult append(TraceContext traceContext, ByteBuf data, int crc) { data.release(); long offset = offsetAlloc.getAndIncrement(); return new AppendResult() { diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java b/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java index 6297b614a..4b301464a 100644 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java +++ b/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java @@ -18,6 +18,7 @@ package com.automq.stream.s3.wal; +import com.automq.stream.s3.trace.context.TraceContext; import io.netty.buffer.ByteBuf; import java.io.IOException; @@ -45,10 +46,18 @@ public interface WriteAheadLog { * * @return The data position will be written. */ - AppendResult append(ByteBuf data, int crc) throws OverCapacityException; + AppendResult append(TraceContext context, ByteBuf data, int crc) throws OverCapacityException; + + default AppendResult append(TraceContext context, ByteBuf data) throws OverCapacityException { + return append(context, data, 0); + } + + default AppendResult append(ByteBuf data, int crc) throws OverCapacityException { + return append(TraceContext.DEFAULT, data, crc); + } default AppendResult append(ByteBuf data) throws OverCapacityException { - return append(data, 0); + return append(TraceContext.DEFAULT, data, 0); } Iterator recover(); diff --git a/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java b/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java index d36ae9f52..878b3774b 100644 --- a/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java +++ b/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java @@ -17,7 +17,6 @@ package com.automq.stream.s3; -import com.automq.stream.api.ReadOptions; import com.automq.stream.s3.cache.DefaultS3BlockCache; import com.automq.stream.s3.cache.LogCache; import com.automq.stream.s3.cache.ReadDataBlock; @@ -103,9 +102,9 @@ public void testAppend() throws Exception { cf2.get(3, TimeUnit.SECONDS); cf3.get(3, TimeUnit.SECONDS); - ReadDataBlock readRst = storage.read(233, 10, 13, 90, ReadOptions.DEFAULT).get(); + ReadDataBlock readRst = storage.read(233, 10, 13, 90).get(); assertEquals(1, readRst.getRecords().size()); - readRst = storage.read(233, 10, 13, 200, ReadOptions.DEFAULT).get(); + readRst = storage.read(233, 10, 13, 200).get(); assertEquals(2, readRst.getRecords().size()); storage.forceUpload(233L).get(); @@ -252,7 +251,7 @@ public void testRecoverContinuousRecords() { public void testWALOverCapacity() throws WriteAheadLog.OverCapacityException { storage.append(newRecord(233L, 10L)); storage.append(newRecord(233L, 11L)); - doThrow(new WriteAheadLog.OverCapacityException("test")).when(wal).append(any()); + doThrow(new WriteAheadLog.OverCapacityException("test")).when(wal).append(any(), any()); Mockito.when(objectManager.prepareObject(eq(1), anyLong())).thenReturn(CompletableFuture.completedFuture(16L)); CommitStreamSetObjectResponse resp = new CommitStreamSetObjectResponse(); diff --git a/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java b/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java index 21c83c386..3779731be 100644 --- a/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java +++ b/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java @@ -56,7 +56,7 @@ public void setup() { @Test public void testFetch() throws Throwable { stream.confirmOffset.set(120L); - Mockito.when(storage.read(eq(233L), eq(110L), eq(120L), eq(100), any())) + Mockito.when(storage.read(any(), eq(233L), eq(110L), eq(120L), eq(100))) .thenReturn(CompletableFuture.completedFuture(newReadDataBlock(110, 115, 110))); FetchResult rst = stream.fetch(110, 120, 100).get(1, TimeUnit.SECONDS); assertEquals(1, rst.recordBatchList().size()); diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java index 94b5003fe..01a5a37e1 100644 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java +++ b/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java @@ -29,6 +29,7 @@ import com.automq.stream.s3.objects.ObjectManager; import com.automq.stream.s3.operator.MemoryS3Operator; import com.automq.stream.s3.operator.S3Operator; +import com.automq.stream.s3.trace.context.TraceContext; import com.automq.stream.utils.CloseableIterator; import com.automq.stream.utils.Threads; import org.apache.commons.lang3.tuple.ImmutablePair; @@ -77,7 +78,7 @@ public void testGetDataBlockIndices() { StreamReader streamReader = new StreamReader(s3Operator, objectManager, Mockito.mock(BlockCache.class), new HashMap<>(), new InflightReadThrottle()); StreamReader.ReadContext context = new StreamReader.ReadContext(15L, 1024); - streamReader.getDataBlockIndices(233L, 1024L, context).thenAccept(v -> { + streamReader.getDataBlockIndices(TraceContext.DEFAULT, 233L, 1024L, context).thenAccept(v -> { Assertions.assertEquals(40L, context.nextStartOffset); Assertions.assertEquals(0, context.nextMaxBytes); Assertions.assertEquals(2, context.streamDataBlocksPair.size()); @@ -115,7 +116,7 @@ public void testSyncReadAheadInflight() { List.of(new StreamDataBlock(streamId, 64, 128, objectId, index1))))).when(reader).find(eq(streamId), eq(startOffset), anyLong(), eq(maxBytes)); doReturn(new CompletableFuture<>()).when(reader).read(index1); - streamReader.syncReadAhead(streamId, startOffset, endOffset, maxBytes, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID()); + streamReader.syncReadAhead(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID()); Threads.sleep(1000); Assertions.assertEquals(2, inflightReadAheadTasks.size()); ReadAheadTaskKey key1 = new ReadAheadTaskKey(233L, startOffset); @@ -171,7 +172,7 @@ public StreamRecordBatch next() { }); Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlock1)); context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - CompletableFuture> cf = streamReader.handleSyncReadAhead(233L, 0, + CompletableFuture> cf = streamReader.handleSyncReadAhead(TraceContext.DEFAULT,233L, 0, 999, 64, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID(), new TimerUtil(), context); cf.whenComplete((rst, ex) -> { @@ -233,7 +234,7 @@ public StreamRecordBatch next() { ReadAheadTaskKey key = new ReadAheadTaskKey(233L, startOffset); context.taskKeySet.add(key); inflightReadAheadTasks.put(key, new DefaultS3BlockCache.ReadAheadTaskContext(new CompletableFuture<>(), DefaultS3BlockCache.ReadBlockCacheStatus.INIT)); - CompletableFuture> cf = streamReader.handleSyncReadAhead(233L, startOffset, + CompletableFuture> cf = streamReader.handleSyncReadAhead(TraceContext.DEFAULT, 233L, startOffset, 999, 64, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID(), new TimerUtil(), context); cf.whenComplete((rst, ex) -> { @@ -294,7 +295,7 @@ public StreamRecordBatch next() { Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlock1)); Mockito.when(reader.read(index2)).thenReturn(CompletableFuture.failedFuture(new RuntimeException("exception"))); context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - CompletableFuture> cf = streamReader.handleSyncReadAhead(233L, 0, + CompletableFuture> cf = streamReader.handleSyncReadAhead(TraceContext.DEFAULT, 233L, 0, 512, 1024, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID(), new TimerUtil(), context); Threads.sleep(1000); diff --git a/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java b/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java index ef89bdaa6..c88aa9150 100644 --- a/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java +++ b/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java @@ -22,6 +22,7 @@ import com.automq.rocketmq.metadata.api.StoreMetadataService; import com.automq.rocketmq.store.api.StreamStore; import com.automq.rocketmq.store.model.StoreContext; +import com.automq.rocketmq.store.util.ContextUtil; import com.automq.stream.api.AppendResult; import com.automq.stream.api.FetchResult; import com.automq.stream.api.OpenStreamOptions; @@ -35,6 +36,8 @@ import com.automq.stream.s3.cache.DefaultS3BlockCache; import com.automq.stream.s3.cache.S3BlockCache; import com.automq.stream.s3.compact.CompactionManager; +import com.automq.stream.s3.context.AppendContext; +import com.automq.stream.s3.context.FetchContext; import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; import com.automq.stream.s3.objects.ObjectManager; import com.automq.stream.s3.operator.DefaultS3Operator; @@ -127,7 +130,8 @@ public CompletableFuture fetch(StoreContext context, @SpanAttribute if (stream.isEmpty()) { throw new IllegalStateException("Stream " + streamId + " is not opened."); } - return stream.get().fetch(startOffset, startOffset + maxCount, Integer.MAX_VALUE) + FetchContext fetchContext = new FetchContext(ContextUtil.buildStreamTraceContext(context)); + return stream.get().fetch(fetchContext, startOffset, startOffset + maxCount, Integer.MAX_VALUE) .thenApplyAsync(result -> { context.span().ifPresent(span -> { span.setAttribute("messageCount", result.recordBatchList().size()); @@ -151,7 +155,8 @@ public CompletableFuture append(StoreContext context, long streamI span.setAttribute("recordBytes", recordBatch.rawPayload().remaining()); }); - return stream.get().append(recordBatch) + AppendContext appendContext = new AppendContext(ContextUtil.buildStreamTraceContext(context)); + return stream.get().append(appendContext, recordBatch) .thenApplyAsync(result -> { context.span().ifPresent(span -> span.setAttribute("offset", result.baseOffset())); return result; diff --git a/store/src/main/java/com/automq/rocketmq/store/util/ContextUtil.java b/store/src/main/java/com/automq/rocketmq/store/util/ContextUtil.java new file mode 100644 index 000000000..b079ac38d --- /dev/null +++ b/store/src/main/java/com/automq/rocketmq/store/util/ContextUtil.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.rocketmq.store.util; + +import com.automq.stream.s3.trace.context.TraceContext; +import io.opentelemetry.context.Context; + +public class ContextUtil { + public static TraceContext buildStreamTraceContext(com.automq.rocketmq.common.trace.TraceContext context) { + boolean isTraceEnabled = context.tracer().isPresent(); + Context currContext = context.span().map(span -> Context.current().with(span)).orElse(Context.current()); + return new TraceContext(isTraceEnabled, context.tracer().get(), currContext); + } +} diff --git a/store/src/test/java/com/automq/rocketmq/store/mock/MemoryStreamClient.java b/store/src/test/java/com/automq/rocketmq/store/mock/MemoryStreamClient.java index ddf5e12e0..eab43557f 100644 --- a/store/src/test/java/com/automq/rocketmq/store/mock/MemoryStreamClient.java +++ b/store/src/test/java/com/automq/rocketmq/store/mock/MemoryStreamClient.java @@ -21,11 +21,13 @@ import com.automq.stream.api.CreateStreamOptions; import com.automq.stream.api.FetchResult; import com.automq.stream.api.OpenStreamOptions; -import com.automq.stream.api.ReadOptions; import com.automq.stream.api.RecordBatch; import com.automq.stream.api.RecordBatchWithContext; import com.automq.stream.api.Stream; import com.automq.stream.api.StreamClient; +import com.automq.stream.s3.context.AppendContext; +import com.automq.stream.s3.context.FetchContext; + import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -99,14 +101,14 @@ public long nextOffset() { } @Override - public CompletableFuture append(RecordBatch recordBatch) { + public CompletableFuture append(AppendContext context, RecordBatch recordBatch) { long baseOffset = nextOffsetAlloc.getAndAdd(recordBatch.count()); recordMap.put(baseOffset, new RecordBatchWithContextWrapper(recordBatch, baseOffset)); return CompletableFuture.completedFuture(() -> baseOffset); } @Override - public CompletableFuture fetch(long startOffset, long endOffset, int maxBytesHint, ReadOptions readOptions) { + public CompletableFuture fetch(FetchContext context, long startOffset, long endOffset, int maxBytesHint) { List records = new ArrayList<>(recordMap.subMap(startOffset, endOffset).values()); return CompletableFuture.completedFuture(() -> records); }