flushSoftTimeoutPerLevel = new HashMap<>();
+ private long shutdownSoftTimeout = 5000; // milliseconds
+ private long shutdownHardTimeout = 30000; // milliseconds
+ private volatile int timeoutCheckPeriod = 1000;
+
+ private long flushHardTimeoutTimestamp;
+ private long flushSoftTimeoutTimestamp;
+ // The maximum soft timeout allowed. If users wish to continue log collection
+ // and synchronization while the JVM is shutting down, this value will be
+ // lowered to increase the likelihood of flushing before the shutdown
+ // completes.
+ private volatile long flushAbsoluteMaximumSoftTimeout;
+ private long flushMaximumSoftTimeout;
+
+ private final Thread backgroundFlushThread = new Thread(new BackgroundFlushRunnable());
+ private final BackgroundSyncRunnable backgroundSyncRunnable = new BackgroundSyncRunnable();
+ private final Thread backgroundSyncThread = new Thread(backgroundSyncRunnable);
+ private final Thread shutdownHookThread = new Thread(new ShutdownHookRunnable());
+ private volatile boolean closeWithDelayedShutdown = false;
+
+ private volatile long numEventsLogged = 0L;
+
+ private boolean activated = false;
+
+ private final AtomicBoolean closeStarted = new AtomicBoolean(false);
+ private volatile boolean closedForAppends = false;
+
+ /**
+ * Default constructor
+ */
+ public AbstractBufferedRollingFileAppender () {
+ this(new SystemTimeSource());
+ }
+
+ /**
+ * Constructor to enable the ability to specify a TimeSource, such as those
+ * that can be controlled manually to facilitate unit testing
+ * @param timeSource The time source that the appender should use
+ */
+ public AbstractBufferedRollingFileAppender (TimeSource timeSource) {
+ this.timeSource = timeSource;
+
+ // The default flush timeout values below are optimized for high latency
+ // remote persistent storage such as object stores or HDFS
+ flushHardTimeoutPerLevel.put(Level.ERROR, 5L * 60 * 1000 /* 5 min */);
+ flushHardTimeoutPerLevel.put(Level.WARN, 10L * 60 * 1000 /* 10 min */);
+ flushHardTimeoutPerLevel.put(Level.INFO, 30L * 60 * 1000 /* 30 min */);
+ flushHardTimeoutPerLevel.put(Level.DEBUG, 30L * 60 * 1000 /* 30 min */);
+ flushHardTimeoutPerLevel.put(Level.TRACE, 30L * 60 * 1000 /* 30 min */);
+
+ flushSoftTimeoutPerLevel.put(Level.ERROR, 10L * 1000 /* 10 sec */);
+ flushSoftTimeoutPerLevel.put(Level.WARN, 15L * 1000 /* 15 sec */);
+ flushSoftTimeoutPerLevel.put(Level.INFO, 3L * 60 * 1000 /* 3 min */);
+ flushSoftTimeoutPerLevel.put(Level.DEBUG, 3L * 60 * 1000 /* 3 min */);
+ flushSoftTimeoutPerLevel.put(Level.TRACE, 3L * 60 * 1000 /* 3 min */);
+ }
+
+ /**
+ * @param baseName The base filename for log files
+ */
+ public void setBaseName (String baseName) {
+ this.baseName = baseName;
+ }
+
+ /**
+ * Sets whether to close the log appender upon receiving a shutdown signal
+ * before the JVM exits. If set to false, the appender will continue appending
+ * logs even while the JVM is shutting down and the appender will do its best
+ * to sync those logs before the JVM shuts down. This presents a tradeoff
+ * between capturing more log events and potential data loss if the log events
+ * cannot be flushed and synced before the JVM is killed.
+ * @param closeOnShutdown Whether to close the log file on shutdown
+ */
+ public void setCloseOnShutdown (boolean closeOnShutdown) {
+ this.closeOnShutdown = closeOnShutdown;
+ }
+
+ /**
+ * Sets the per-log-level hard timeouts for flushing.
+ *
+ * NOTE: Timeouts for custom log-levels are not supported. Log events with
+ * these levels will be assigned the timeout of the INFO level.
+ * @param csvTimeouts A CSV string of kv-pairs. The key being the log-level in
+ * all caps and the value being the hard timeout for flushing in minutes. E.g.
+ * "INFO=30,WARN=10,ERROR=5"
+ */
+ public void setFlushHardTimeoutsInMinutes (String csvTimeouts) {
+ for (String token : csvTimeouts.split(",")) {
+ String[] kv = token.split("=");
+ if (isSupportedLogLevel(kv[0])) {
+ try {
+ flushHardTimeoutPerLevel.put(Level.toLevel(kv[0]), Long.parseLong(kv[1]) * 60 * 1000);
+ } catch (UnsupportedOperationException ex) {
+ this.addError("Failed to set hard flush timeout value " +
+ "for the " + kv[0] + " verbosity level: " + kv[1]);
+ }
+ } else {
+ this.addError("Failed to set hard flush timeout " +
+ "for the following unsupported verbosity level: " + kv[0]);
+ }
+ }
+ }
+
+ /**
+ * Sets the per-log-level soft timeouts for flushing.
+ *
+ * NOTE: Timeouts for custom log-levels are not supported. Log events with
+ * these levels will be assigned the timeout of the INFO level.
+ * @param csvTimeouts A CSV string of kv-pairs. The key being the log-level in
+ * all caps and the value being the soft timeout for flushing in seconds. E.g.
+ * "INFO=180,WARN=15,ERROR=10"
+ */
+ public void setFlushSoftTimeoutsInSeconds (String csvTimeouts) {
+ for (String token : csvTimeouts.split(",")) {
+ String[] kv = token.split("=");
+ if (isSupportedLogLevel(kv[0])) {
+ try {
+ flushSoftTimeoutPerLevel.put(Level.toLevel(kv[0]), Long.parseLong(kv[1]) * 1000);
+ } catch (UnsupportedOperationException ex) {
+ this.addError("Failed to set soft flush timeout value " +
+ "for the " + kv[0] + " verbosity level: " + kv[1]);
+ }
+ } else {
+ this.addError("Failed to set soft flush timeout " +
+ "for the following unsupported verbosity level: " + kv[0]);
+ }
+ }
+ }
+
+ /**
+ * @param milliseconds The soft shutdown timeout in milliseconds
+ */
+ public void setShutdownSoftTimeoutInMilliseconds (long milliseconds) {
+ shutdownSoftTimeout = milliseconds;
+ }
+
+ /**
+ * @param seconds The hard shutdown timeout in seconds
+ */
+ public void setShutdownHardTimeoutInSeconds (long seconds) {
+ shutdownHardTimeout = seconds * 1000;
+ }
+
+ /**
+ * Sets the period between checking for soft/hard timeouts (and then
+ * triggering a flush and sync). Care should be taken to ensure this period
+ * does not significantly differ from the lowest timeout since that will
+ * cause undue delay from when a timeout expires and when a flush occurs.
+ * @param milliseconds The period in milliseconds
+ */
+ public void setTimeoutCheckPeriod (int milliseconds) {
+ timeoutCheckPeriod = milliseconds;
+ }
+
+ public String getBaseName () {
+ return baseName;
+ }
+
+ public long getNumEventsLogged () {
+ return numEventsLogged;
+ }
+
+ /**
+ * This method is primarily used for testing
+ * @return Whether any of the background threads in this class are running
+ */
+ public boolean backgroundThreadsRunning () {
+ return backgroundFlushThread.isAlive() || backgroundSyncThread.isAlive()
+ || shutdownHookThread.isAlive();
+ }
+
+ /**
+ * Simulates the JVM calling this appender's shutdown hook. This method should
+ * only be used for testing.
+ */
+ public void simulateShutdownHook () {
+ shutdownHookThread.start();
+ }
+
+ /**
+ * Start the appender's options.
+ *
+ * This method is {@code final} to ensure it is not overridden by derived
+ * classes since this base class needs to perform actions before/after the
+ * derived class' {@link #activateOptionsHook(long)} method.
+ */
+ @Override
+ public final void start () {
+ if (activated) {
+ this.addWarn("Already activated.");
+ return;
+ }
+
+ flushAbsoluteMaximumSoftTimeout = flushSoftTimeoutPerLevel.get(Level.TRACE);
+ resetFreshnessTimeouts();
+ super.start();
+
+ try {
+ // Set the first rollover timestamp to the current time
+ lastRolloverTimestamp = System.currentTimeMillis();
+ activateOptionsHook(lastRolloverTimestamp);
+ Runtime.getRuntime().addShutdownHook(shutdownHookThread);
+ backgroundFlushThread.start();
+ backgroundSyncThread.start();
+
+ activated = true;
+ } catch (Exception ex) {
+ this.addError("Failed to activate appender.", ex);
+ super.stop();
+ }
+ }
+
+ /**
+ * Stop the appender.
+ *
+ * This method is {@code final} to ensure it is not overridden by derived
+ * classes since this base class needs to perform actions before/after the
+ * derived class' {@link #closeHook()} method.
+ */
+ @Override
+ public final void stop () {
+ // NOTE: This method should not be marked {@code synchronized} since it
+ // joins with the background threads, and they may call synchronized methods
+ // themselves (e.g., {@link #sync} in the derived class), leading to a
+ // deadlock.
+
+ // Prevent multiple threads from running close concurrently
+ if (false == closeStarted.compareAndSet(false, true)) {
+ return;
+ }
+
+ if (closeWithDelayedShutdown) {
+ try {
+ // Flush now just in case we shut down before a timeout expires
+ flush();
+ } catch (IOException e) {
+ this.addError("Failed to flush", e);
+ }
+
+ // Decrease the absolute maximum soft timeout to increase the likelihood
+ // that the log will be synced before the app shuts down
+ flushAbsoluteMaximumSoftTimeout = Math.min(1000 /* 1 second */,
+ flushSoftTimeoutPerLevel.get(Level.ERROR));
+ }
+
+ // Shutdown the flush thread before we close (so we don't try to flush after
+ // the appender is already closed)
+ backgroundFlushThread.interrupt();
+ try {
+ backgroundFlushThread.join();
+ } catch (InterruptedException e) {
+ this.addError("Interrupted while joining backgroundFlushThread");
+ }
+
+ // Prevent any further appends
+ closedForAppends = true;
+
+ try {
+ closeHook();
+ } catch (Exception ex) {
+ // Just log the failure but continue the close process
+ this.addError("closeHook failed.", ex);
+ }
+
+ // Perform a rollover (in case an append occurred after flushing the
+ // background thread) and then shutdown the sync thread
+ backgroundSyncRunnable.addSyncRequest(baseName, lastRolloverTimestamp, true,
+ computeSyncRequestMetadata());
+ backgroundSyncRunnable.addShutdownRequest();
+ try {
+ backgroundSyncThread.join();
+ } catch (InterruptedException e) {
+ this.addError("Interrupted while joining backgroundSyncThread");
+ }
+
+ super.stop();
+ }
+
+ /**
+ * Appends the given log event to the file (subject to any buffering by the
+ * derived class). This method may also trigger a rollover and sync if the
+ * derived class' {@link #rolloverRequired()} method returns true.
+ *
+ * This method is {@code final} to ensure it is not overridden by derived
+ * classes since this base class needs to perform actions before/after the
+ * derived class' {@link #appendHook(ILoggingEvent)} method.
+ * @param loggingEvent The log event
+ */
+ @Override
+ public final void append (ILoggingEvent loggingEvent) {
+ if (closedForAppends) {
+ this.addWarn("Appender closed for appends.");
+ return;
+ }
+
+ try {
+ appendHook(loggingEvent);
+ ++numEventsLogged;
+
+ if (false == rolloverRequired()) {
+ updateFreshnessTimeouts(loggingEvent);
+ } else {
+ backgroundSyncRunnable.addSyncRequest(baseName, lastRolloverTimestamp, true,
+ computeSyncRequestMetadata());
+ resetFreshnessTimeouts();
+ lastRolloverTimestamp = loggingEvent.getTimeStamp();
+ startNewLogFile(lastRolloverTimestamp);
+ numEventsLogged = 0L;
+ }
+ } catch (Exception ex) {
+ this.addError("Failed to write log event.", ex);
+ }
+ }
+
+ /**
+ * Activates appender options for derived appenders.
+ * @param currentTimestamp Current timestamp (useful for naming the first log
+ * file)
+ * @throws Exception on error
+ */
+ protected abstract void activateOptionsHook (long currentTimestamp) throws Exception;
+
+ /**
+ * Closes the derived appender. Once closed, the appender cannot be reopened.
+ * @throws Exception on error
+ */
+ protected abstract void closeHook () throws Exception;
+
+ /**
+ * @return Whether to trigger a rollover
+ * @throws Exception on error
+ */
+ protected abstract boolean rolloverRequired () throws Exception;
+
+ /**
+ * Starts a new log file.
+ * @param lastEventTimestamp Timestamp of the last event that was logged
+ * before calling this method (useful for naming the new log file).
+ * @throws Exception on error
+ */
+ protected abstract void startNewLogFile (long lastEventTimestamp) throws Exception;
+
+ /**
+ * Synchronizes a log file with remote storage. Note that this file may not
+ * necessarily be the current log file, but a previously rolled-over one.
+ * @param baseName The base filename of the log file to sync
+ * @param logRolloverTimestamp The approximate timestamp of when the target
+ * log file was rolled over
+ * @param deleteFile Whether the log file can be deleted after syncing
+ * @param fileMetadata Extra metadata for the file that was captured at the
+ * time when the sync request was generated
+ * @throws Exception on error
+ */
+ protected abstract void sync (String baseName, long logRolloverTimestamp, boolean deleteFile,
+ Map fileMetadata) throws Exception;
+
+ /**
+ * Appends a log event to the file.
+ * @param event The log event
+ * @throws Exception on error
+ */
+ protected abstract void appendHook (ILoggingEvent event) throws Exception;
+
+ /**
+ * Computes the log file name, which includes the provided base name and
+ * rollover timestamp.
+ * @param baseName The base name of the log file name
+ * @param logRolloverTimestamp The approximate timestamp when the target log
+ * file was rolled over
+ * @return The computed log file name
+ */
+ protected abstract String computeLogFileName (String baseName, long logRolloverTimestamp);
+
+ /**
+ * Computes the file metadata to be included in a synchronization request
+ * @return The computed file metadata
+ */
+ protected Map computeSyncRequestMetadata () {
+ return null;
+ }
+
+ /**
+ * Tests if log level is supported by this appender configuration
+ * @param level string passed in from configuration parameter
+ * @return true if supported, false otherwise
+ */
+ public boolean isSupportedLogLevel(String level) {
+ // Note that the Level class is able to automatically parses a candidate
+ // level string into a logback level, and if it cannot, it will assign the
+ // logback level as to the default "INFO" level.
+ return Level.toLevel(level) != Level.INFO || level.equals("INFO");
+ }
+
+ /**
+ * Resets the soft/hard freshness timeouts.
+ */
+ private void resetFreshnessTimeouts () {
+ flushHardTimeoutTimestamp = INVALID_FLUSH_TIMEOUT_TIMESTAMP;
+ flushSoftTimeoutTimestamp = INVALID_FLUSH_TIMEOUT_TIMESTAMP;
+ flushMaximumSoftTimeout = flushAbsoluteMaximumSoftTimeout;
+ }
+
+ /**
+ * Updates the soft/hard freshness timeouts based on the given log event's log
+ * level and timestamp.
+ * @param loggingEvent The log event
+ */
+ private void updateFreshnessTimeouts (ILoggingEvent loggingEvent) {
+ Level level = loggingEvent.getLevel();
+ long flushHardTimeout = flushHardTimeoutPerLevel.computeIfAbsent(level,
+ v -> flushHardTimeoutPerLevel.get(Level.INFO));
+ long timeoutTimestamp = loggingEvent.getTimeStamp() + flushHardTimeout;
+ flushHardTimeoutTimestamp = Math.min(flushHardTimeoutTimestamp, timeoutTimestamp);
+
+ long flushSoftTimeout = flushSoftTimeoutPerLevel.computeIfAbsent(level,
+ v -> flushSoftTimeoutPerLevel.get(Level.INFO));
+ flushMaximumSoftTimeout = Math.min(flushMaximumSoftTimeout, flushSoftTimeout);
+ flushSoftTimeoutTimestamp = loggingEvent.getTimeStamp() + flushMaximumSoftTimeout;
+ }
+
+ /**
+ * Flushes and synchronizes the log file if one of the freshness timeouts has
+ * been reached.
+ *
+ * This method is marked {@code synchronized} since it can be called from
+ * logging threads and the background thread that monitors the freshness
+ * timeouts.
+ * @throws IOException on I/O error
+ */
+ private synchronized void flushAndSyncIfNecessary () throws IOException {
+ long ts = timeSource.getCurrentTimeInMilliseconds();
+ if (ts >= flushSoftTimeoutTimestamp || ts >= flushHardTimeoutTimestamp) {
+ flush();
+ backgroundSyncRunnable.addSyncRequest(baseName, lastRolloverTimestamp, false,
+ computeSyncRequestMetadata());
+ resetFreshnessTimeouts();
+ }
+ }
+
+ /**
+ * Periodically flushes and syncs the current log file if we've exceeded one
+ * of the freshness timeouts.
+ */
+ private class BackgroundFlushRunnable implements Runnable {
+ @Override
+ public void run () {
+ boolean delayedShutdownRequested = false;
+ long shutdownSoftTimeoutTimestamp = INVALID_FLUSH_TIMEOUT_TIMESTAMP;
+ long shutdownHardTimeoutTimestamp = INVALID_FLUSH_TIMEOUT_TIMESTAMP;
+ long lastNumEventsLogged = -1;
+ while (true) {
+ try {
+ flushAndSyncIfNecessary();
+
+ if (delayedShutdownRequested) {
+ // Update soft timeout if another event occurred
+ long currentTimestamp = timeSource.getCurrentTimeInMilliseconds();
+ if (numEventsLogged != lastNumEventsLogged) {
+ lastNumEventsLogged = numEventsLogged;
+ shutdownSoftTimeoutTimestamp = currentTimestamp + shutdownSoftTimeout;
+ }
+
+ // Break if we've hit either timeout
+ if (currentTimestamp >= shutdownSoftTimeoutTimestamp
+ || currentTimestamp >= shutdownHardTimeoutTimestamp)
+ {
+ break;
+ }
+ }
+
+ sleep(timeoutCheckPeriod);
+ } catch (IOException e) {
+ addError("Failed to flush buffered appender in the background", e);
+ } catch (InterruptedException e) {
+ addInfo("Received interrupt message for graceful shutdown of BackgroundFlushThread");
+
+ delayedShutdownRequested = closeWithDelayedShutdown;
+ if (closeOnShutdown || false == delayedShutdownRequested) {
+ break;
+ }
+ long currentTimestamp = timeSource.getCurrentTimeInMilliseconds();
+ shutdownSoftTimeoutTimestamp = currentTimestamp + shutdownSoftTimeout;
+ shutdownHardTimeoutTimestamp = currentTimestamp + shutdownHardTimeout;
+ lastNumEventsLogged = numEventsLogged;
+
+ // Lower the timeout check period so we react faster when flushing or
+ // shutting down is necessary
+ timeoutCheckPeriod = 100;
+ }
+ }
+ }
+ }
+
+ /**
+ * Thread to synchronize log files in the background (by calling
+ * {@link #sync(String, long, boolean, Map) sync}). The thread
+ * maintains a request queue that callers should populate.
+ */
+ private class BackgroundSyncRunnable implements Runnable {
+ private final LinkedBlockingQueue requests = new LinkedBlockingQueue<>();
+
+ @Override
+ public void run () {
+ while (true) {
+ try {
+ Request request = requests.take();
+ if (request instanceof SyncRequest) {
+ SyncRequest syncRequest = (SyncRequest)request;
+ try {
+ sync(syncRequest.logFileBaseName, syncRequest.logRolloverTimestamp,
+ syncRequest.deleteFile, syncRequest.fileMetadata);
+ } catch (Exception ex) {
+ String logFilePath = computeLogFileName(syncRequest.logFileBaseName,
+ syncRequest.logRolloverTimestamp);
+ addError("Failed to sync '" + logFilePath + "'", ex);
+ }
+ } else if (request instanceof ShutdownRequest) {
+ addInfo("Received shutdown request");
+ break;
+ }
+ } catch (InterruptedException e) {
+ break;
+ }
+ }
+ }
+
+ /**
+ * Adds a shutdown request to the request queue
+ */
+ public void addShutdownRequest () {
+ addInfo("Adding shutdown request");
+ Request shutdownRequest = new ShutdownRequest();
+ while (false == requests.offer(shutdownRequest)) {}
+ }
+
+ /**
+ * Adds a sync request to the request queue
+ * @param baseName The base filename of the log file to sync
+ * @param logRolloverTimestamp The approximate timestamp of when the target
+ * log file was rolled over
+ * @param deleteFile Whether the log file can be deleted after syncing.
+ * @param fileMetadata Extra metadata for the file
+ */
+ public void addSyncRequest (String baseName, long logRolloverTimestamp, boolean deleteFile,
+ Map fileMetadata) {
+ Request syncRequest =
+ new SyncRequest(baseName, logRolloverTimestamp, deleteFile, fileMetadata);
+ while (false == requests.offer(syncRequest)) {}
+ }
+
+ private class Request {}
+
+ private class ShutdownRequest extends Request {}
+
+ private class SyncRequest extends Request {
+ public final String logFileBaseName;
+ public final long logRolloverTimestamp;
+ public final boolean deleteFile;
+ public final Map fileMetadata;
+
+ public SyncRequest (String logFileBaseName, long logRolloverTimestamp, boolean deleteFile,
+ Map fileMetadata) {
+ this.logFileBaseName = logFileBaseName;
+ this.logRolloverTimestamp = logRolloverTimestamp;
+ this.deleteFile = deleteFile;
+ this.fileMetadata = fileMetadata;
+ }
+ }
+ }
+
+ /**
+ * Thread to handle shutting down the appender when the JVM is shutting down.
+ * When {@code closeOnShutdown} is false, this thread enables a delayed
+ * close procedure so that logs that occur during shutdown can be collected.
+ */
+ private class ShutdownHookRunnable implements Runnable {
+ @Override
+ public void run () {
+ if (false == closeOnShutdown) {
+ closeWithDelayedShutdown = true;
+ }
+ stop();
+ }
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/AbstractClpIrBufferedRollingFileAppender.java b/src/main/java/com/yscope/logging/logback/AbstractClpIrBufferedRollingFileAppender.java
new file mode 100644
index 0000000..d2ad235
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/AbstractClpIrBufferedRollingFileAppender.java
@@ -0,0 +1,200 @@
+package com.yscope.logging.logback;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.HashMap;
+import java.util.Map;
+
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.Layout;
+
+/**
+ * This class extends {@link AbstractBufferedRollingFileAppender} to append to
+ * CLP compressed IR-stream files and rollover based on the amount of
+ * uncompressed and compressed data written to a file. Derived classes are
+ * expected to handle synchronization (e.g., uploading to remote storage).
+ *
+ * Rollover based on the amount of uncompressed data written to file allows us
+ * to ensure that the file remains manageable when decompressed for viewing,
+ * etc.
+ *
+ * Rollover based on the amount of compressed data written to file allows us to
+ * ensure the file is large enough to amortize filesystem overhead, and small
+ * enough to be performant when uploading to remote storage as well as when
+ * accessing and searching the compressed file.
+ */
+public abstract class AbstractClpIrBufferedRollingFileAppender
+ extends AbstractBufferedRollingFileAppender
+{
+ public static final String CLP_COMPRESSED_IRSTREAM_FILE_EXTENSION = ".clp.zst";
+
+ // Appender settings, some of which may be set by Logback through reflection.
+ // For descriptions of the properties, see their setters below.
+ private String outputDir;
+ private String pattern;
+ // CLP streaming compression parameters
+ private int compressionLevel = 3;
+ private boolean closeFrameOnFlush = true;
+ private boolean useFourByteEncoding = false;
+ private long rolloverCompressedSizeThreshold = 16 * 1024 * 1024; // Bytes
+ private long rolloverUncompressedSizeThreshold = 1024L * 1024 * 1024; // Bytes
+
+ private long compressedSizeSinceLastRollover = 0L; // Bytes
+ private long uncompressedSizeSinceLastRollover = 0L; // Bytes
+
+ // This instance variable should be up-to-date at all times
+ private ClpIrFileAppender clpIrFileAppender = null;
+
+ public AbstractClpIrBufferedRollingFileAppender () {
+ super();
+ }
+
+ /**
+ * Constructor
+ * @param timeSource The time source that the appender should use
+ */
+ public AbstractClpIrBufferedRollingFileAppender (TimeSource timeSource) {
+ super(timeSource);
+ }
+
+ /**
+ * Sets the compression level for the appender's streaming compressor
+ * @param compressionLevel The compression level between 1 and 22
+ */
+ public void setCompressionLevel (int compressionLevel) {
+ this.compressionLevel = compressionLevel;
+ }
+
+ /**
+ * Sets the threshold for the file's compressed size at which rollover should
+ * be triggered.
+ * @param rolloverCompressedSizeThreshold The threshold size in bytes
+ */
+ public void setRolloverCompressedSizeThreshold (long rolloverCompressedSizeThreshold) {
+ this.rolloverCompressedSizeThreshold = rolloverCompressedSizeThreshold;
+ }
+
+ /**
+ * Sets the threshold for the file's uncompressed size at which rollover
+ * should be triggered.
+ * @param rolloverUncompressedSizeThreshold The threshold size in bytes
+ */
+ public void setRolloverUncompressedSizeThreshold (long rolloverUncompressedSizeThreshold) {
+ this.rolloverUncompressedSizeThreshold = rolloverUncompressedSizeThreshold;
+ }
+
+ /**
+ * @param useFourByteEncoding Whether to use CLP's four-byte encoding instead
+ * of the default eight-byte encoding
+ */
+ public void setUseFourByteEncoding (boolean useFourByteEncoding) {
+ this.useFourByteEncoding = useFourByteEncoding;
+ }
+
+ /**
+ * @param outputDir The output directory path for log files
+ */
+ public void setOutputDir (String outputDir) {
+ this.outputDir = outputDir;
+ }
+
+ /**
+ * @param closeFrameOnFlush Whether to close the compressor's frame on flush
+ */
+ public void setCloseFrameOnFlush (boolean closeFrameOnFlush) {
+ this.closeFrameOnFlush = closeFrameOnFlush;
+ }
+
+ public String getPattern() {
+ return pattern;
+ }
+
+ public void setPattern(String pattern) {
+ this.pattern = pattern;
+ }
+
+ /**
+ * @return The uncompressed size of all log events processed by this appender
+ * in bytes.
+ */
+ public long getUncompressedSize () {
+ return uncompressedSizeSinceLastRollover + clpIrFileAppender.getUncompressedSize();
+ }
+
+ /**
+ * @return The compressed size of all log events processed by this appender in
+ * bytes.
+ */
+ public long getCompressedSize () {
+ return compressedSizeSinceLastRollover + clpIrFileAppender.getCompressedSize();
+ }
+
+ public int getCompressionLevel () {
+ return compressionLevel;
+ }
+
+ public Layout getLayout() {
+ return clpIrFileAppender.getLayout();
+ }
+
+ @Override
+ public void activateOptionsHook (long currentTimestamp) throws IOException {
+ String fileName = computeLogFileName(getBaseName(), currentTimestamp);
+ String filePath = computeLogFilePath(fileName);
+ clpIrFileAppender = new ClpIrFileAppender(filePath, pattern, this.getContext(), useFourByteEncoding,
+ closeFrameOnFlush, compressionLevel);
+ }
+
+ @Override
+ protected void closeHook () {
+ clpIrFileAppender.stop();
+ }
+
+ @Override
+ protected boolean rolloverRequired () {
+ return clpIrFileAppender.getCompressedSize() >= rolloverCompressedSizeThreshold
+ || clpIrFileAppender.getUncompressedSize() >= rolloverUncompressedSizeThreshold;
+ }
+
+ @Override
+ protected void startNewLogFile (long lastEventTimestamp) throws IOException {
+ compressedSizeSinceLastRollover += clpIrFileAppender.getCompressedSize();
+ uncompressedSizeSinceLastRollover += clpIrFileAppender.getUncompressedSize();
+ String fileName = computeLogFileName(getBaseName(), lastEventTimestamp);
+ String filePath = computeLogFilePath(fileName);
+ clpIrFileAppender.startNewFile(filePath);
+ }
+
+ @Override
+ public void appendHook (ILoggingEvent event) {
+ clpIrFileAppender.append(event);
+ }
+
+ @Override
+ public void flush () throws IOException {
+ clpIrFileAppender.flush();
+ }
+
+ @Override
+ protected Map computeSyncRequestMetadata () {
+ Map metadata = new HashMap<>();
+ metadata.put("compressedLogSize", clpIrFileAppender.getCompressedSize());
+ metadata.put("uncompressedLogSize", clpIrFileAppender.getUncompressedSize());
+ metadata.put("numEventsLogged", getNumEventsLogged());
+ return metadata;
+ }
+
+ @Override
+ protected String computeLogFileName (String baseName, long logRolloverTimestamp) {
+ return baseName + "." + logRolloverTimestamp + CLP_COMPRESSED_IRSTREAM_FILE_EXTENSION;
+ }
+
+ /**
+ * Computes a path for the provided log file name
+ * @param logFileName The log file name
+ * @return The computed log file path
+ */
+ protected String computeLogFilePath (String logFileName) {
+ return Paths.get(outputDir, logFileName).toString();
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/ClpIrFileAppender.java b/src/main/java/com/yscope/logging/logback/ClpIrFileAppender.java
new file mode 100644
index 0000000..f869ba3
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/ClpIrFileAppender.java
@@ -0,0 +1,382 @@
+package com.yscope.logging.logback;
+
+import java.io.FileOutputStream;
+import java.io.Flushable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.time.ZonedDateTime;
+
+import ch.qos.logback.classic.PatternLayout;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.AppenderBase;
+import ch.qos.logback.core.Context;
+import ch.qos.logback.core.Layout;
+import com.github.luben.zstd.Zstd;
+import com.github.luben.zstd.ZstdOutputStream;
+import com.yscope.clp.irstream.AbstractClpIrOutputStream;
+import com.yscope.clp.irstream.EightByteClpIrOutputStream;
+import com.yscope.clp.irstream.FourByteClpIrOutputStream;
+import org.apache.commons.io.output.CountingOutputStream;
+
+
+public class ClpIrFileAppender extends AppenderBase implements Flushable {
+ private String timestampPattern = null;
+ private Layout layout;
+ protected String pattern;
+ private final static String DEFAULT_PATTERN = "[%thread] %logger %msg";
+
+ private int estimatedFormattedTimestampLength = 0;
+ private long uncompressedSizeInBytes = 0;
+
+ private AbstractClpIrOutputStream clpIrOutputStream;
+ private CountingOutputStream countingOutputStream;
+
+ private boolean activated = false;
+
+ // Appender settings
+ // NOTE: It may appear that these settings are never set but Logback sets these
+ // through reflection
+ // For descriptions of these settings, see the (non-default) constructor
+ private int compressionLevel = 3;
+ private boolean closeFrameOnFlush = true;
+ private String file;
+ private boolean useFourByteEncoding = true;
+
+ public ClpIrFileAppender () {
+ }
+
+ /**
+ * Constructs a ClpIrFileAppender
+ * @param filePath Output file path
+ * @param pattern Logback pattern for formatting log events. It would only be converted to
+ * {@code ch.qos.logback.classic.PatternLayout},
+ * for now. For pattern string, callers should not add a date conversion since this appender
+ * stores timestamps and messages separately. Any date patterns found in the
+ * pattern will be removed.
+ * @param context Logback Layout context
+ * @param useFourByteEncoding Whether to use CLP's four-byte encoding instead
+ * of the default eight-byte encoding. The four-byte encoding has lower
+ * memory usage but can also result in lower compression ratio.
+ * @param closeFrameOnFlush Whether to close the Zstandard frame on flush
+ * @param compressionLevel Compression level to use for Zstandard. Valid
+ * levels are 1 to 19.
+ * @throws IOException on I/O error
+ */
+ public ClpIrFileAppender (
+ String filePath,
+ String pattern,
+ Context context,
+ boolean useFourByteEncoding,
+ boolean closeFrameOnFlush,
+ int compressionLevel
+ ) throws IOException {
+ super();
+
+ setFile(filePath);
+ setPattern(pattern);
+ setContext(context);
+ setUseFourByteEncoding(useFourByteEncoding);
+ setCompressionLevel(compressionLevel);
+ setCloseFrameOnFlush(closeFrameOnFlush);
+
+ // NOTE: We don't enable the shutdown hook since the caller should handle
+ // closing the appender properly when shutting down (enabling the hook may
+ // also be confusing).
+ startHelper(false);
+ }
+
+ public String getPattern() {
+ return pattern;
+ }
+
+ public void setPattern(String pattern) {
+ this.pattern = pattern;
+ }
+
+ public void setFile (String file) {
+ if (file == null) {
+ this.file = file;
+ } else {
+ this.file = file.trim();
+ }
+ }
+
+ /**
+ * @return The amount of data written to this appender for the current output
+ * file, in bytes.
+ *
+ * NOTE:
+ *
+ * - This may be slightly inaccurate since we use an estimate of the
+ * timestamp length for performance reasons.
+ * - This will be reset when a new output file is opened.
+ *
+ */
+ public synchronized long getUncompressedSize () {
+ return uncompressedSizeInBytes;
+ }
+
+ public Layout getLayout() {
+ return layout;
+ }
+
+ /**
+ * @return The amount of data written by this appender to the current output
+ * file, in bytes. This will be reset when a new output file is opened.
+ */
+ public synchronized long getCompressedSize () {
+ return countingOutputStream.getByteCount();
+ }
+
+ public void setUseFourByteEncoding (boolean useFourByteEncoding) {
+ this.useFourByteEncoding = useFourByteEncoding;
+ }
+
+ public void setCompressionLevel (int compressionLevel) {
+ this.compressionLevel = compressionLevel;
+ }
+
+ public void setCloseFrameOnFlush (boolean closeFrameOnFlush) {
+ this.closeFrameOnFlush = closeFrameOnFlush;
+ }
+
+ public Layout buildLayout() {
+ if (this.pattern == null) {
+ this.pattern = DEFAULT_PATTERN;
+ }
+
+ PatternLayout layout = new PatternLayout();
+ layout.setPattern(this.getPattern());
+ layout.setContext(this.getContext());
+ layout.start();
+ return layout;
+ }
+
+ /**
+ * Closes the previous file and starts a new file with the given path
+ * @param path Path for the new file
+ * @throws IOException on I/O error
+ */
+ public synchronized void startNewFile (String path) throws IOException {
+ if (false == activated) {
+ throw new IllegalStateException("Appender not activated.");
+ }
+
+ if (!this.isStarted()) {
+ throw new IllegalStateException("Appender already closed.");
+ }
+
+ clpIrOutputStream.close();
+ uncompressedSizeInBytes = 0;
+
+ setFile(path);
+ sanitizeFilePath();
+ createOutputStream();
+ }
+
+ /**
+ * Flushes the appender. If closeFrameOnFlush was set, the Zstandard frame is
+ * closed and all log events are written to the output stream. If not, then
+ * some log events may still be buffered in memory.
+ * @throws IOException on I/O error
+ */
+ @Override
+ public synchronized void flush () throws IOException {
+ clpIrOutputStream.flush();
+ }
+
+ /**
+ * Appends the given log event to the IR stream
+ * @param event The log event
+ */
+ @Override
+ public void append(ILoggingEvent event) {
+ try {
+ String formattedEvent = layout.doLayout(event);
+ byte[] formattedEventBytes = formattedEvent.getBytes(StandardCharsets.ISO_8859_1);
+ ByteBuffer message = ByteBuffer.wrap(formattedEventBytes);
+ clpIrOutputStream.writeLogEvent(event.getTimeStamp(), message);
+ uncompressedSizeInBytes += estimatedFormattedTimestampLength + message.limit();
+ } catch (IOException ex) {
+ this.addError("Failed to write log event.", ex);
+ }
+ };
+
+ /**
+ * Activates the appender's options. This should not be called when this
+ * appender is instantiated manually.
+ */
+ @Override
+ public void start () {
+
+ if (activated) {
+ this.addWarn("Already activated.");
+ return;
+ }
+
+ try {
+ startHelper(true);
+ } catch (Exception ex) {
+ this.addError("Failed to activate appender.", ex);
+ super.stop();
+ }
+ }
+
+ /**
+ * Closes the appender. Once closed, the appender cannot be reopened.
+ */
+ @Override
+ public synchronized void stop () {
+ if (!this.isStarted()) {
+ return;
+ }
+
+ try {
+ clpIrOutputStream.close();
+ } catch (IOException ex) {
+ this.addError("Failed to close output file.", ex);
+ }
+
+ super.stop();
+ }
+
+ /**
+ * Helper method to activate options.
+ * @param enableShutdownHook Whether to enable a shutdown hook to close the
+ * appender.
+ * @throws IOException on I/O error
+ */
+ private void startHelper (boolean enableShutdownHook) throws IOException {
+ super.start();
+
+ validateOptionsAndInit();
+
+ activated = true;
+ if (enableShutdownHook) {
+ // logback may not attempt to close the appender when the JVM shuts down, so
+ // this hook ensures we try to close the appender before shutdown.
+ Runtime.getRuntime().addShutdownHook(new Thread(this::stop));
+ }
+ }
+
+ /**
+ * Validates the appender's settings (e.g., compression level) and initializes
+ * the appender with them.
+ * @throws IOException on I/O error
+ */
+ private void validateOptionsAndInit () throws IOException {
+ this.pattern = processConversionPattern(pattern);
+ layout = buildLayout();
+
+ if (compressionLevel < Zstd.minCompressionLevel()
+ || Zstd.maxCompressionLevel() < compressionLevel)
+ {
+ throw new IllegalArgumentException("compressionLevel is outside of valid range: ["
+ + Zstd.minCompressionLevel() + ", "
+ + Zstd.maxCompressionLevel() + "]");
+ }
+
+ sanitizeFilePath();
+
+ createOutputStream();
+ }
+
+ private void sanitizeFilePath () {
+ if (null == file) {
+ throw new IllegalArgumentException("file option not set.");
+ }
+ // Trim surrounding spaces
+ file = file.trim();
+ }
+
+ /**
+ * Creates the CLP IR output stream, the file output stream, and any necessary
+ * streams in between.
+ * @throws IOException on I/O error
+ */
+ private void createOutputStream () throws IOException {
+ FileOutputStream fileOutputStream = Utils.createOutputFile(file);
+ countingOutputStream = new CountingOutputStream(fileOutputStream);
+ ZstdOutputStream zstdOutputStream =
+ new ZstdOutputStream(countingOutputStream, compressionLevel);
+ zstdOutputStream.setCloseFrameOnFlush(closeFrameOnFlush);
+ // Get the local time zone in case we need to determine the time zone
+ // of timestamps printed in the content of log messages. This is not the
+ // time zone used to display log messages to the user (that will be
+ // determined by the user's locale, etc.).
+ String timeZoneId = ZonedDateTime.now().getZone().toString();
+ if (useFourByteEncoding) {
+ clpIrOutputStream =
+ new FourByteClpIrOutputStream(timestampPattern, timeZoneId, zstdOutputStream);
+ } else {
+ clpIrOutputStream =
+ new EightByteClpIrOutputStream(timestampPattern, timeZoneId, zstdOutputStream);
+ }
+
+ uncompressedSizeInBytes += timestampPattern.getBytes(StandardCharsets.ISO_8859_1).length;
+ uncompressedSizeInBytes += timeZoneId.length();
+ }
+
+ /**
+ * Gets the timestamp pattern from the given Logback Layout's conversion pattern
+ * and creates a conversion pattern that doesn't contain any date conversion
+ * patterns.
+ *
+ * E.g., if the conversion pattern is
+ * "%d{yyyy-MM-dd HH:mm:ss.SSSZ} %p [%c{1}] %m%n", this method will set the
+ * timestamp pattern to "yyyy-MM-dd HH:mm:ss.SSSZ" and create the conversion
+ * pattern, " %p [%c{1}] %m%n".
+ * @param pattern Logback pattern for formatting log events
+ * @return The conversion pattern without date conversion patterns
+ */
+ private String processConversionPattern (String pattern) {
+ DateConversionPatternExtractor datePatternExtractor =
+ new DateConversionPatternExtractor(pattern);
+
+ for (DateConversionPattern datePattern : datePatternExtractor.getDateConversionPatterns()) {
+ if (null != timestampPattern) {
+ this.addError("Found multiple date conversion specifiers in pattern. Only the first will "
+ + "be preserved.");
+ continue;
+ }
+
+ // + 1 is the character after the '%'
+ if ('r' == datePattern.specifier) {
+ this.addError("%r is unsupported and will be ignored.");
+ } else if ('d' == datePattern.specifier) {
+
+ if (null == datePattern.format) {
+ // Pattern is "%d" which implies ISO8601 ("yyyy-MM-dd HH:mm:ss,SSS")
+ timestampPattern = "yyyy-MM-dd HH:mm:ss,SSS";
+ estimatedFormattedTimestampLength = timestampPattern.length();
+ } else {
+ // Pattern is "%d{...}"
+ switch (datePattern.format) {
+ case "ABSOLUTE":
+ timestampPattern = "HH:mm:ss,SSS";
+ estimatedFormattedTimestampLength = timestampPattern.length();
+ break;
+ case "DATE":
+ timestampPattern = "dd MMM yyyy HH:mm:ss,SSS";
+ estimatedFormattedTimestampLength = timestampPattern.length();
+ break;
+ case "ISO8601":
+ timestampPattern = "yyyy-MM-dd HH:mm:ss,SSS";
+ estimatedFormattedTimestampLength = timestampPattern.length();
+ break;
+ default:
+ timestampPattern = datePattern.format;
+ // NOTE: We use getBytes(ISO_8859_1) since the user's dateFormat
+ // may contain Unicode characters
+ estimatedFormattedTimestampLength =
+ timestampPattern.getBytes(StandardCharsets.ISO_8859_1).length;
+ break;
+ }
+ }
+ }
+ }
+
+ return datePatternExtractor.getConversionPatternWithoutDates();
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/DateConversionPattern.java b/src/main/java/com/yscope/logging/logback/DateConversionPattern.java
new file mode 100644
index 0000000..ff33641
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/DateConversionPattern.java
@@ -0,0 +1,48 @@
+package com.yscope.logging.logback;
+
+import java.util.Objects;
+
+/**
+ * A simple class to contain the components of a Logback Layout's date conversion
+ * pattern.
+ */
+public class DateConversionPattern {
+ public char specifier;
+ public String format;
+
+ public DateConversionPattern (char specifier, String format)
+ {
+ this.specifier = specifier;
+ this.format = format;
+ }
+
+ @Override
+ public boolean equals (Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ DateConversionPattern rhs = (DateConversionPattern)o;
+ return specifier == rhs.specifier && Objects.equals(format, rhs.format);
+ }
+
+ @Override
+ public int hashCode () {
+ return Objects.hash(specifier, format);
+ }
+
+ @Override
+ public String toString () {
+ StringBuilder builder = new StringBuilder();
+ builder.append("%");
+ builder.append(specifier);
+ if (null != format) {
+ builder.append('{');
+ builder.append(format);
+ builder.append('}');
+ }
+ return builder.toString();
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/DateConversionPatternExtractor.java b/src/main/java/com/yscope/logging/logback/DateConversionPatternExtractor.java
new file mode 100644
index 0000000..1c9a8fd
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/DateConversionPatternExtractor.java
@@ -0,0 +1,185 @@
+package com.yscope.logging.logback;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A class to extract all date conversion patterns from a Logback conversion
+ * pattern. This is useful for Logback appenders that want to store a log event's
+ * message separate from its timestamp.
+ *
+ * For example, given the conversion pattern:
+ *
"%d%d %d{HH:mm:ss' on 'yy/MM/dd}%d %r %p %c{1}: %m%n"
+ * This class would extract 5 date conversion patterns:
+ *
+ * - "%d"
+ * - "%d"
+ * - "%d{HH:mm:ss' on 'yy/MM/dd}"
+ * - "%d"
+ * - "%r"
+ *
+ *
+ * And the conversion pattern without the dates would be:
+ * " %p %c{1}: %m%n"
+ */
+class DateConversionPatternExtractor {
+ private enum STATES {
+ LITERAL,
+ PERCENT,
+ DATE_SPECIFIER,
+ DATE_SPECIFIER_OPEN_BRACE,
+ DATE_SPECIFIER_CLOSE_BRACE,
+ DATE_SPECIFIER_OPEN_QUOTE,
+ }
+
+ private final List dateConversionPatterns = new ArrayList<>();
+ private final String conversionPatternWithoutDates;
+
+ public DateConversionPatternExtractor (String conversionPattern) {
+ // The parsing algorithm uses a state machine with the states and
+ // transitions in the table below. States are named based on the character
+ // that led to that state.
+ //
+ // | Current state | Transition character | Action | Next state |
+ // |----------------------------|-----------------------|-------------------------------------|----------------------------|
+ // | LITERAL | [^%] | - | - |
+ // | LITERAL | % | Start pattern | PERCENT |
+ // | PERCENT | [^d] | if 'r' == c: Capture pattern | LITERAL |
+ // | PERCENT | d | - | DATE_SPECIFIER |
+ // | DATE_SPECIFIER | [^%{] | Capture pattern | LITERAL |
+ // | DATE_SPECIFIER | % | Capture pattern & start new pattern | PERCENT |
+ // | DATE_SPECIFIER | { | - | DATE_SPECIFIER_OPEN_BRACE |
+ // | DATE_SPECIFIER_OPEN_BRACE | [^'}] | - | - |
+ // | DATE_SPECIFIER_OPEN_BRACE | ' | - | DATE_SPECIFIER_OPEN_QUOTE |
+ // | DATE_SPECIFIER_OPEN_BRACE | } | - | DATE_SPECIFIER_CLOSE_BRACE |
+ // | DATE_SPECIFIER_CLOSE_BRACE | [^{%] | Capture pattern | LITERAL |
+ // | DATE_SPECIFIER_CLOSE_BRACE | % | Capture pattern & start new pattern | PERCENT |
+ // | DATE_SPECIFIER_OPEN_QUOTE | [^'] | - | - |
+ // | DATE_SPECIFIER_OPEN_QUOTE | ' | - | DATE_SPECIFIER_OPEN_BRACE |
+
+ if (conversionPattern == null || conversionPattern.isEmpty()) {
+ throw new IllegalArgumentException("Pattern " + conversionPattern
+ + " is not valid.");
+ }
+
+ STATES currentState = STATES.LITERAL;
+
+ int conversionPatternBeginOffset = 0;
+ int conversionPatternEndOffset = 0;
+ int formatBeginOffset = 0;
+ String format = null;
+ StringBuilder newPatternBuilder = new StringBuilder();
+ for (int i = 0; i < conversionPattern.length(); ++i) {
+ char c = conversionPattern.charAt(i);
+
+ switch (currentState) {
+ case LITERAL:
+ if ('%' == c) {
+ conversionPatternBeginOffset = i;
+ currentState = STATES.PERCENT;
+ }
+ break;
+ case PERCENT:
+ if ('d' == c) {
+ currentState = STATES.DATE_SPECIFIER;
+ } else {
+ if ('r' == c) {
+ newPatternBuilder.append(conversionPattern, conversionPatternEndOffset,
+ conversionPatternBeginOffset);
+ conversionPatternEndOffset = i + 1;
+ dateConversionPatterns.add(
+ new DateConversionPattern('r', null));
+ }
+ currentState = STATES.LITERAL;
+ }
+ break;
+ case DATE_SPECIFIER:
+ if ('{' == c) {
+ formatBeginOffset = i + 1;
+ currentState = STATES.DATE_SPECIFIER_OPEN_BRACE;
+ } else {
+ // End of date conversion pattern
+ newPatternBuilder.append(conversionPattern, conversionPatternEndOffset,
+ conversionPatternBeginOffset);
+ conversionPatternEndOffset = i;
+
+ dateConversionPatterns.add(
+ new DateConversionPattern('d', null));
+
+ if ('%' == c) {
+ conversionPatternBeginOffset = i;
+ currentState = STATES.PERCENT;
+ } else {
+ currentState = STATES.LITERAL;
+ }
+ }
+ break;
+ case DATE_SPECIFIER_OPEN_BRACE:
+ if ('\'' == c) {
+ currentState = STATES.DATE_SPECIFIER_OPEN_QUOTE;
+ } else if ('}' == c) {
+ format = conversionPattern.substring(formatBeginOffset, i);
+ currentState = STATES.DATE_SPECIFIER_CLOSE_BRACE;
+ }
+ break;
+ case DATE_SPECIFIER_CLOSE_BRACE:
+ // End of date conversion pattern
+ newPatternBuilder.append(conversionPattern, conversionPatternEndOffset,
+ conversionPatternBeginOffset);
+ conversionPatternEndOffset = i;
+
+ dateConversionPatterns.add(
+ new DateConversionPattern('d', format));
+
+ if ('%' == c) {
+ conversionPatternBeginOffset = i;
+ currentState = STATES.PERCENT;
+ } else {
+ currentState = STATES.LITERAL;
+ }
+ break;
+ case DATE_SPECIFIER_OPEN_QUOTE:
+ if ('\'' == c) {
+ currentState = STATES.DATE_SPECIFIER_OPEN_BRACE;
+ }
+ break;
+ }
+ }
+ // Handle the conversion pattern ending with an unprocessed date conversion
+ // pattern. This could happen with "%d" or "%d{...}" because we can't know
+ // they're complete unless we read the character following the date pattern
+ // or the pattern ends.
+ if (STATES.DATE_SPECIFIER == currentState || STATES.DATE_SPECIFIER_CLOSE_BRACE == currentState)
+ {
+ // End of date conversion pattern
+ newPatternBuilder.append(conversionPattern, conversionPatternEndOffset,
+ conversionPatternBeginOffset);
+ conversionPatternEndOffset = conversionPattern.length();
+
+ dateConversionPatterns.add(
+ new DateConversionPattern('d', format));
+ }
+
+ // Append any remaining text after the last date conversion pattern
+ if (conversionPatternEndOffset < conversionPattern.length()) {
+ newPatternBuilder.append(conversionPattern, conversionPatternEndOffset,
+ conversionPattern.length());
+ }
+
+ conversionPatternWithoutDates = newPatternBuilder.toString();
+ }
+
+ /**
+ * @return All date conversion patterns extracted from the conversion pattern.
+ */
+ public List getDateConversionPatterns () {
+ return dateConversionPatterns;
+ }
+
+ /**
+ * @return The conversion pattern with all date conversion patterns extracted.
+ */
+ public String getConversionPatternWithoutDates () {
+ return conversionPatternWithoutDates;
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/ManualTimeSource.java b/src/main/java/com/yscope/logging/logback/ManualTimeSource.java
new file mode 100644
index 0000000..003eca2
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/ManualTimeSource.java
@@ -0,0 +1,19 @@
+package com.yscope.logging.logback;
+
+/**
+ * Manual time source that returns the last time that was set. This can be
+ * useful to control the current time during unit testing.
+ */
+public class ManualTimeSource implements TimeSource {
+ private long currentTimeInMilliseconds = 0;
+
+ @Override
+ public synchronized void setCurrentTimeInMilliseconds (long currentTimeInMilliseconds) {
+ this.currentTimeInMilliseconds = currentTimeInMilliseconds;
+ }
+
+ @Override
+ public synchronized long getCurrentTimeInMilliseconds () {
+ return currentTimeInMilliseconds;
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/SystemTimeSource.java b/src/main/java/com/yscope/logging/logback/SystemTimeSource.java
new file mode 100644
index 0000000..18d4246
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/SystemTimeSource.java
@@ -0,0 +1,16 @@
+package com.yscope.logging.logback;
+
+/**
+ * Time source for the system's current time.
+ */
+public class SystemTimeSource implements TimeSource {
+ @Override
+ public void setCurrentTimeInMilliseconds (long currentTimeInMilliseconds) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getCurrentTimeInMilliseconds () {
+ return System.currentTimeMillis();
+ }
+}
diff --git a/src/main/java/com/yscope/logging/logback/TimeSource.java b/src/main/java/com/yscope/logging/logback/TimeSource.java
new file mode 100644
index 0000000..bf1c443
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/TimeSource.java
@@ -0,0 +1,20 @@
+package com.yscope.logging.logback;
+
+/**
+ * Interface for a time source. This interface is currently used to swap out
+ * real time sources (e.g., System.currentTimeMillis()) with manual time sources
+ * that can be used during unit tests.
+ */
+public interface TimeSource {
+ /**
+ * If supported, sets the current time for this time source.
+ * @param currentTimeInMilliseconds Current time in milliseconds from some
+ * fixed point in time (e.g. the UNIX epoch).
+ */
+ public void setCurrentTimeInMilliseconds (long currentTimeInMilliseconds);
+
+ /**
+ * @return The time source's current time
+ */
+ public long getCurrentTimeInMilliseconds ();
+}
diff --git a/src/main/java/com/yscope/logging/logback/Utils.java b/src/main/java/com/yscope/logging/logback/Utils.java
new file mode 100644
index 0000000..7e14179
--- /dev/null
+++ b/src/main/java/com/yscope/logging/logback/Utils.java
@@ -0,0 +1,35 @@
+package com.yscope.logging.logback;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+
+public class Utils {
+ /**
+ * Creates and opens an output file and file output stream.
+ * @param filePath Path for the file
+ * @return The file output stream
+ * @throws IOException on I/O error
+ */
+ public static FileOutputStream createOutputFile (String filePath) throws IOException {
+ FileOutputStream fileOutputStream;
+ try {
+ // append = false since we don't support appending to an existing file
+ fileOutputStream = new FileOutputStream(filePath, false);
+ } catch (FileNotFoundException ex) {
+ // Create the parent directory if necessary
+ String parentPath = new File(filePath).getParent();
+ if (null == parentPath) {
+ throw ex;
+ }
+ Files.createDirectories(Paths.get(parentPath));
+
+ fileOutputStream = new FileOutputStream(filePath, false);
+ }
+ return fileOutputStream;
+ }
+}
diff --git a/src/test/java/com/yscope/logging/logback/ClpIrFileAppenderTest.java b/src/test/java/com/yscope/logging/logback/ClpIrFileAppenderTest.java
new file mode 100644
index 0000000..f943792
--- /dev/null
+++ b/src/test/java/com/yscope/logging/logback/ClpIrFileAppenderTest.java
@@ -0,0 +1,211 @@
+package com.yscope.logging.logback;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.spi.LoggingEvent;
+import ch.qos.logback.classic.spi.ThrowableProxy;
+import ch.qos.logback.core.Context;
+import org.apache.commons.io.FileUtils;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertThrowsExactly;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
+
+public class ClpIrFileAppenderTest {
+ private final String patternLayoutString =
+ "%d{yyyy-MM-dd HH:mm:ss.SSS Z} [%thread] %level %class.%method(%file:%line): %msg %n";
+ private final int compressionLevel = 3;
+ private final Context context = new ch.qos.logback.classic.LoggerContext();
+
+ @Test
+ public void testFourByteIrAppender() {
+ testAppender(true);
+ }
+
+ @Test
+ public void testEightByteIrAppender () {
+ testAppender(false);
+ }
+
+ private void testAppender (boolean useFourByteEncoding) {
+ String fileName = useFourByteEncoding ? "four-byte.clp.zst" : "eight-byte.clp.zst";
+
+ // Validate invalid arguments are detected
+ assertThrowsExactly(IllegalArgumentException.class,
+ () -> new ClpIrFileAppender(null, patternLayoutString, context, useFourByteEncoding,
+ false, compressionLevel));
+ assertThrowsExactly(FileNotFoundException.class,
+ () -> new ClpIrFileAppender("", patternLayoutString, context, useFourByteEncoding, false,
+ compressionLevel));
+ assertThrowsExactly(IllegalArgumentException.class,
+ () -> new ClpIrFileAppender(fileName, null, context, useFourByteEncoding, false,
+ compressionLevel));
+ assertThrowsExactly(IllegalArgumentException.class,
+ () -> new ClpIrFileAppender(fileName, patternLayoutString, context, useFourByteEncoding,
+ false, Integer.MIN_VALUE));
+ assertThrowsExactly(IllegalArgumentException.class,
+ () -> new ClpIrFileAppender(fileName, patternLayoutString, context, useFourByteEncoding,
+ false, Integer.MAX_VALUE));
+
+ // Validate different file paths
+ try {
+ testEmptyCreation(Paths.get(fileName), patternLayoutString, useFourByteEncoding);
+ testEmptyCreation(Paths.get("a", "b", fileName), patternLayoutString, useFourByteEncoding);
+ } catch (Exception ex) {
+ fail(ex);
+ }
+
+ // Validate types of layouts
+ try {
+ testLayouts(fileName, useFourByteEncoding);
+ } catch (Exception ex) {
+ fail(ex);
+ }
+
+ // Test writing
+ try {
+ testWriting(fileName, useFourByteEncoding, false, compressionLevel);
+ testWriting(fileName, useFourByteEncoding, true, compressionLevel);
+ testWriting(fileName, useFourByteEncoding, false, compressionLevel + 1);
+ } catch (Exception ex) {
+ fail(ex);
+ }
+ }
+
+ /**
+ * Tests creating an empty CLP IR stream log with the given path.
+ * @param filePath Path to create. Note that after the test, the entire
+ * directory tree specified by the path will be deleted.
+ * @param useFourByteEncoding
+ * @throws IOException on I/O error
+ */
+ private void testEmptyCreation (
+ Path filePath,
+ String pattern,
+ boolean useFourByteEncoding
+ ) throws IOException {
+ String filePathString = filePath.toString();
+ ClpIrFileAppender clpIrFileAppender = new ClpIrFileAppender(filePathString, pattern, context,
+ useFourByteEncoding,
+ false, compressionLevel);
+ clpIrFileAppender.stop();
+ assertTrue(Files.exists(filePath));
+
+ Path parent = filePath.getParent();
+ if (null == parent) {
+ Files.delete(filePath);
+ } else {
+ // Get top-level parent
+ while (true) {
+ Path p = parent.getParent();
+ if (null == p) {
+ break;
+ }
+ parent = p;
+ }
+ FileUtils.deleteDirectory(parent.toFile());
+ }
+ }
+
+ /**
+ * Test all possible logback layouts
+ * @param filePathString
+ * @param useFourByteEncoding
+ * @throws IOException on I/O error
+ */
+ private void testLayouts (String filePathString, boolean useFourByteEncoding) throws IOException
+ {
+ Path filePath = Paths.get(filePathString);
+
+ testEmptyCreation(filePath, patternLayoutString, useFourByteEncoding);
+
+ assertThrowsExactly(IllegalArgumentException.class,
+ () -> new ClpIrFileAppender(filePathString, null, context,
+ useFourByteEncoding, false, compressionLevel));
+ assertThrowsExactly(IllegalArgumentException.class,
+ () -> new ClpIrFileAppender(filePathString, "", context,
+ useFourByteEncoding, false, compressionLevel));
+ }
+
+ /**
+ * Test writing log files
+ * @param fileName
+ * @param useFourByteEncoding
+ * @param closeFrameOnFlush
+ * @param compressionLevel
+ * @throws IOException on I/O error
+ */
+ private void testWriting (
+ String fileName,
+ boolean useFourByteEncoding,
+ boolean closeFrameOnFlush,
+ int compressionLevel
+ ) throws IOException {
+ // TODO Once decoding support has been added to clp-ffi-java, these tests
+ // should all be verified by a decoding the stream and comparing it with
+ // the output of an uncompressed file appender.
+
+ String message = "Static text, dictVar1, 123, 456.7, dictVar2, 987, 654.3";
+
+ ClpIrFileAppender clpIrFileAppender = new ClpIrFileAppender(fileName, patternLayoutString,
+ context,
+ useFourByteEncoding,
+ closeFrameOnFlush,
+ compressionLevel);
+
+ // Log some normal logs
+ for (int i = 0; i < 100; ++i) {
+ LoggingEvent event = new LoggingEvent();
+ event.setLoggerName("com.yscope.logging.logback");
+ event.setLevel(Level.INFO);
+ event.setMessage(message);
+ event.setCallerData(new StackTraceElement[0]);
+
+ clpIrFileAppender.append(event);
+ }
+
+ // Log with an exception
+ LoggingEvent event = new LoggingEvent();
+ event.setLoggerName("com.yscope.logging.logback");
+ event.setLevel(Level.INFO);
+ event.setMessage(message);
+ event.setThrowableProxy(new ThrowableProxy(new FileNotFoundException()));
+ event.setCallerData(new StackTraceElement[0]);
+
+ clpIrFileAppender.append(event);
+
+ clpIrFileAppender.flush();
+
+ // Split into a new file
+ String fileName2 = fileName + ".2";
+ clpIrFileAppender.startNewFile(fileName2);
+
+ // Add some more logs
+ for (int i = 0; i < 100; ++i) {
+ LoggingEvent nextEvent = new LoggingEvent();
+ nextEvent.setLoggerName("com.yscope.logging.logback");
+ nextEvent.setLevel(Level.WARN);
+ nextEvent.setMessage(message);
+ nextEvent.setCallerData(new StackTraceElement[0]);
+
+ clpIrFileAppender.append(nextEvent);
+ }
+
+ clpIrFileAppender.stop();
+
+ // Verify file existence
+ Path filePath = Paths.get(fileName);
+ assertTrue(Files.exists(filePath));
+ Files.delete(filePath);
+ Path filePath2 = Paths.get(fileName2);
+ assertTrue(Files.exists(filePath2));
+ Files.delete(filePath2);
+ }
+}
diff --git a/src/test/java/com/yscope/logging/logback/DateConversionPatternExtractorTest.java b/src/test/java/com/yscope/logging/logback/DateConversionPatternExtractorTest.java
new file mode 100644
index 0000000..410926b
--- /dev/null
+++ b/src/test/java/com/yscope/logging/logback/DateConversionPatternExtractorTest.java
@@ -0,0 +1,149 @@
+package com.yscope.logging.logback;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+class DateConversionPatternExtractorTest {
+ @Test
+ public void testExtraction () {
+ DateConversionPattern datePattern;
+
+ // Validate extracting %d at different positions in the conversion pattern
+ datePattern = new DateConversionPattern('d', null);
+ testExtractingASingleSimpleDatePattern(
+ "%d %p %c{1} %% %m%n",
+ " %p %c{1} %% %m%n",
+ datePattern
+ );
+ testExtractingASingleSimpleDatePattern(
+ "%p %d %c{1} %% %m%n",
+ "%p %c{1} %% %m%n",
+ datePattern
+ );
+ testExtractingASingleSimpleDatePattern(
+ "%p %c{1} %% %m%n %d",
+ "%p %c{1} %% %m%n ",
+ datePattern
+ );
+
+ // Validate extracting %d{...} at different positions in the conversion
+ // pattern
+ datePattern = new DateConversionPattern('d', "HH:mm:ss' on 'yy/MM/dd");
+ testExtractingASingleSimpleDatePattern(
+ "%d{HH:mm:ss' on 'yy/MM/dd} %p %c{1} %% %m%n",
+ " %p %c{1} %% %m%n",
+ datePattern
+ );
+ testExtractingASingleSimpleDatePattern(
+ "%p %d{HH:mm:ss' on 'yy/MM/dd} %c{1} %% %m%n",
+ "%p %c{1} %% %m%n",
+ datePattern
+ );
+ testExtractingASingleSimpleDatePattern(
+ "%p %c{1} %% %m%n %d{HH:mm:ss' on 'yy/MM/dd}",
+ "%p %c{1} %% %m%n ",
+ datePattern
+ );
+
+ datePattern = new DateConversionPattern('d', "HH:mm:ss' on 'yy/MM/dd, Australia/Perth");
+ testExtractingASingleDatePattern(
+ "%d{HH:mm:ss' on 'yy/MM/dd, Australia/Perth} %p %c{1} %% %m%n",
+ " %p %c{1} %% %m%n",
+ datePattern
+ );
+
+ // Validate extracting %r at different positions in the conversion pattern
+ datePattern = new DateConversionPattern('r', null);
+ testExtractingASingleSimpleDatePattern(
+ "%r %p %c{1} %% %m%n",
+ " %p %c{1} %% %m%n",
+ datePattern
+ );
+ testExtractingASingleSimpleDatePattern(
+ "%p %r %c{1} %% %m%n",
+ "%p %c{1} %% %m%n",
+ datePattern
+ );
+ testExtractingASingleSimpleDatePattern(
+ "%p %c{1} %% %m%n %r",
+ "%p %c{1} %% %m%n ",
+ datePattern
+ );
+
+ ArrayList datePatterns = new ArrayList<>();
+
+ // Validate extracting multiple simple date conversion patterns
+ datePatterns.add(new DateConversionPattern('d', null));
+ datePatterns.add(new DateConversionPattern('d', null));
+ datePatterns.add(new DateConversionPattern('d', "HH:mm:ss' on 'yy/MM/dd"));
+ datePatterns.add(new DateConversionPattern('d', null));
+ datePatterns.add(new DateConversionPattern('r', null));
+ testExtractingMultipleDatePatterns(
+ "%d%d %d{HH:mm:ss' on 'yy/MM/dd}%d %r %p %c{1}: %m%n",
+ " %p %c{1}: %m%n",
+ datePatterns
+ );
+
+ // Validate extracting multiple simple date conversion patterns
+ datePatterns.clear();
+ datePatterns.add(new DateConversionPattern('d', null));
+ datePatterns.add(new DateConversionPattern('d', null));
+ datePatterns.add(new DateConversionPattern('d', "HH:mm:ss' on 'yy/MM/dd, Australia/Perth"));
+ datePatterns.add(new DateConversionPattern('d', null));
+ datePatterns.add(new DateConversionPattern('r', null));
+ testExtractingMultipleDatePatterns(
+ "%d%d %d{HH:mm:ss' on 'yy/MM/dd, Australia/Perth}%d %r %p %c{1}: %m%n",
+ " %p %c{1}: %m%n",
+ datePatterns
+ );
+ }
+
+ /**
+ * Tests extracting a date pattern from the given conversion pattern, both
+ * when used with a Log4j PatternLayout or an EnhancedPatternLayout
+ * @param pattern
+ * @param expectedPatternWithoutDates
+ * @param expectedDatePattern
+ */
+ private void testExtractingASingleSimpleDatePattern (
+ String pattern,
+ String expectedPatternWithoutDates,
+ DateConversionPattern expectedDatePattern
+ ) {
+ testExtractingASingleDatePattern(pattern, expectedPatternWithoutDates,
+ expectedDatePattern);
+ }
+
+ private void testExtractingASingleDatePattern (
+ String pattern,
+ String expectedPatternWithoutDates,
+ DateConversionPattern expectedDatePattern
+ ) {
+ DateConversionPatternExtractor parser = new DateConversionPatternExtractor(pattern);
+ List parsedDatePatterns = parser.getDateConversionPatterns();
+ assertEquals(1, parsedDatePatterns.size());
+ assertEquals(expectedDatePattern, parsedDatePatterns.get(0));
+ assertEquals(expectedPatternWithoutDates, parser.getConversionPatternWithoutDates());
+ }
+
+ private void testExtractingMultipleDatePatterns (
+ String pattern,
+ String expectedPattern,
+ List expectedDatePatterns
+ ) {
+
+
+ DateConversionPatternExtractor parser = new DateConversionPatternExtractor(pattern);
+ List parsedDatePatterns = parser.getDateConversionPatterns();
+ assertEquals(expectedDatePatterns.size(), parsedDatePatterns.size());
+
+ for (int i = 0; i < expectedDatePatterns.size(); ++i) {
+ assertEquals(expectedDatePatterns.get(i), parsedDatePatterns.get(i));
+ }
+ assertEquals(expectedPattern, parser.getConversionPatternWithoutDates());
+ }
+}
diff --git a/src/test/java/com/yscope/logging/logback/RollingFileAppenderTest.java b/src/test/java/com/yscope/logging/logback/RollingFileAppenderTest.java
new file mode 100644
index 0000000..4350f83
--- /dev/null
+++ b/src/test/java/com/yscope/logging/logback/RollingFileAppenderTest.java
@@ -0,0 +1,500 @@
+package com.yscope.logging.logback;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.Comparator;
+import java.util.stream.Stream;
+
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.spi.LoggingEvent;
+import ch.qos.logback.classic.spi.ThrowableProxy;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+
+import static java.lang.Thread.sleep;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTimeoutPreemptively;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
+public class RollingFileAppenderTest {
+
+ private final String patternLayoutString =
+ "%d{yyyy-MM-dd HH:mm:ss.SSS Z} [%thread] %level %class.%method(%file:%line): %msg %n";
+ private final int flushFatalLevelTimeout = 1;
+ private final int flushErrorLevelTimeout = 2;
+ private final int flushInfoLevelTimeout = 3;
+ private final int flushSoftTimeoutUnitInMilliseconds = 1000;
+ private final int flushHardTimeoutUnitInMilliseconds = 60000;
+ private final int timeoutCheckPeriod = 10;
+ private final String outputDir = "testOutputDir";
+
+ /**
+ * Tests rollover based on the uncompressed size of the file.
+ */
+ @Test
+ public void testRollingBasedOnUncompressedSize () {
+ RollingFileTestAppender appender = createTestAppender(true, true);
+ // Set the uncompressed rollover size to 1 so that every append triggers a
+ // rollover
+ appender.setRolloverUncompressedSizeThreshold(1);
+ appender.start();
+
+ // Verify rollover after appending every event
+ int expectedNumRollovers = 0;
+ appendLogEvent(0, Level.INFO, appender);
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, 0, expectedNumRollovers);
+ appendLogEvent(0, Level.INFO, appender);
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, 0, expectedNumRollovers);
+
+ // Verify a rollover after closing the appender
+ appender.stop();
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, 0, expectedNumRollovers);
+ }
+
+ /**
+ * Tests rollover based on the compressed size of the file.
+ */
+ @Test
+ public void testRollingBasedOnCompressedSize () {
+ RollingFileTestAppender appender = createTestAppender(true, true);
+ // Set the compressed rollover size to 1 so that a rollover is triggered
+ // once data is output to the file
+ appender.setRolloverCompressedSizeThreshold(1);
+ appender.start();
+
+ // Verify that an append-flush-append sequence triggers a rollover. We need
+ // the first append and flush to force the compressor to flush the buffered
+ // log event to the output file. The final append is to trigger the
+ // rollover.
+ int expectedNumRollovers = 0;
+ appendLogEvent(0, Level.INFO, appender);
+ assertDoesNotThrow(appender::flush);
+ validateNumSyncAndCloseEvents(appender, 0, expectedNumRollovers);
+ appendLogEvent(0, Level.INFO, appender);
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, 0, expectedNumRollovers);
+
+ // Verify a rollover after closing the appender
+ appender.stop();
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, 0, expectedNumRollovers);
+ }
+
+ /**
+ * Tests the hard timeout
+ */
+ @Test
+ public void testHardTimeout () {
+ validateBasicFlushTimeoutSupport(false);
+
+ RollingFileTestAppender appender = createTestAppender(true, false);
+ appender.start();
+ int expectedNumSyncs = 0;
+ int expectedNumRollovers = 0;
+ int currentTimestamp = 0;
+
+ // Verify a sequence of two ERROR events triggers a sync due to the hard
+ // timeout of the first ERROR event
+ appendLogEvent(currentTimestamp, Level.ERROR, appender);
+ // Move time forward to just before the timeout
+ // NOTE: We use "- 2" here (instead of "- 1") so that in the next validation
+ // step, validateSyncAfterTimeout still has room to move time forward before
+ // triggering the timeout
+ currentTimestamp += flushErrorLevelTimeout * flushHardTimeoutUnitInMilliseconds - 2;
+ appender.setTime(currentTimestamp);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ // Append the second ERROR event and validate a sync happens due to the
+ // first
+ appendLogEvent(currentTimestamp, Level.ERROR, appender);
+ currentTimestamp += 2;
+ ++expectedNumSyncs;
+ validateSyncAfterTimeout(currentTimestamp, expectedNumSyncs, expectedNumRollovers, appender);
+ // Validate no sync happens because of the second ERROR event
+ currentTimestamp += flushErrorLevelTimeout * flushHardTimeoutUnitInMilliseconds;
+ appender.setTime(currentTimestamp);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+
+ // Verify a rollover after closing the appender
+ appender.stop();
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ }
+
+ /**
+ * Tests the soft timeout
+ */
+ @Test
+ public void testSoftTimeout () {
+ validateBasicFlushTimeoutSupport(true);
+
+ RollingFileTestAppender appender = createTestAppender(false, true);
+ appender.start();
+ int expectedNumSyncs = 0;
+ int expectedNumRollovers = 0;
+ int currentTimestamp = 0;
+
+ // Append three events over some time period and verify a sync only happens
+ // after the timeout triggered by the last event
+ int iterations = 3;
+ for (int i = 0; i < iterations; i++) {
+ appendLogEvent(i, Level.INFO, appender);
+ currentTimestamp += 1;
+ appender.setTime(currentTimestamp);
+ }
+ // NOTE: The -1 is to account for the extra time unit we added after the
+ // last log event
+ currentTimestamp += flushInfoLevelTimeout * flushSoftTimeoutUnitInMilliseconds - 1;
+ ++expectedNumSyncs;
+ validateSyncAfterTimeout(currentTimestamp, expectedNumSyncs, expectedNumRollovers, appender);
+
+ // Verify a sequence of two ERROR events triggers a sync due to the soft
+ // timeout of the second ERROR event
+ appendLogEvent(currentTimestamp, Level.ERROR, appender);
+ // Move time forward to just before the timeout
+ currentTimestamp += flushErrorLevelTimeout * flushSoftTimeoutUnitInMilliseconds - 1;
+ appender.setTime(currentTimestamp);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ // Append the second ERROR event and validate a sync happens only due to the
+ // second
+ appendLogEvent(currentTimestamp, Level.ERROR, appender);
+ currentTimestamp += 1;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ currentTimestamp += flushErrorLevelTimeout * flushSoftTimeoutUnitInMilliseconds - 1;
+ ++expectedNumSyncs;
+ validateSyncAfterTimeout(currentTimestamp, expectedNumSyncs, expectedNumRollovers, appender);
+
+ // Verify a sequence of ERROR-INFO events triggers a sync due to the soft
+ // timeout of the second log event as if it was an ERROR event rather than
+ // an INFO event
+ appendLogEvent(currentTimestamp, Level.ERROR, appender);
+ // Move time forward to just before the timeout
+ currentTimestamp += flushErrorLevelTimeout * flushSoftTimeoutUnitInMilliseconds - 1;
+ appender.setTime(currentTimestamp);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ // Append the INFO event and validate the timeout logic treats it as if it
+ // was a second ERROR event
+ appendLogEvent(currentTimestamp, Level.INFO, appender);
+ currentTimestamp += 1;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ currentTimestamp += flushErrorLevelTimeout * flushSoftTimeoutUnitInMilliseconds - 1;
+ ++expectedNumSyncs;
+ validateSyncAfterTimeout(currentTimestamp, expectedNumSyncs, expectedNumRollovers, appender);
+
+ // Verify a rollover after closing the appender
+ appender.stop();
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ }
+
+ /**
+ * Tests closing the appender with different closeOnShutdown settings
+ */
+ @Test
+ public void testClose () {
+ validateAppenderClose(true);
+ validateAppenderClose(false);
+ }
+
+ /**
+ * Tests the appender's shutdown handling when it's open/closed
+ */
+ @Test
+ public void testShutdownLogic () {
+ validateCloseBeforeShutdown(true);
+ validateCloseBeforeShutdown(false);
+ validateShutdownWithoutClose(0);
+ validateShutdownWithoutClose(1);
+ validateShutdownWithoutClose(2);
+ }
+
+ @AfterEach
+ public void cleanUpFiles () {
+ // Delete the output directory tree
+ try (Stream s = Files.walk(Paths.get(outputDir))) {
+ s.sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
+ } catch (IOException ex) {
+ fail(ex);
+ }
+ }
+
+ /**
+ * Performs basic validation of flush timeout support (not specific to either
+ * soft/hard) for the appender
+ * @param testSoftTimeout Whether to test soft (true) or hard (false) timeout
+ * support
+ */
+ private void validateBasicFlushTimeoutSupport (boolean testSoftTimeout) {
+ int timeoutUnitInMilliseconds =
+ testSoftTimeout ? flushSoftTimeoutUnitInMilliseconds : flushHardTimeoutUnitInMilliseconds;
+ RollingFileTestAppender appender =
+ createTestAppender(false == testSoftTimeout, testSoftTimeout);
+ appender.start();
+ int expectedNumSyncs = 0;
+ int expectedNumRollovers = 0;
+ int currentTimestamp = 0;
+
+ // Verify a single INFO event triggers a sync after a timeout
+ appendLogEvent(currentTimestamp, Level.INFO, appender);
+ currentTimestamp = flushInfoLevelTimeout * timeoutUnitInMilliseconds;
+ ++expectedNumSyncs;
+ validateSyncAfterTimeout(currentTimestamp, expectedNumSyncs, expectedNumRollovers, appender);
+
+ // Verify a sequence of INFO-ERROR events triggers a sync due to the ERROR
+ // event sooner than the timeout for the INFO event
+ appendLogEvent(currentTimestamp, Level.INFO, appender);
+ appendLogEvent(currentTimestamp, Level.ERROR, appender);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ currentTimestamp += flushErrorLevelTimeout * timeoutUnitInMilliseconds;
+ ++expectedNumSyncs;
+ validateSyncAfterTimeout(currentTimestamp, expectedNumSyncs, expectedNumRollovers, appender);
+ // Validate no sync happens because of the INFO event
+ currentTimestamp += flushInfoLevelTimeout * timeoutUnitInMilliseconds;
+ appender.setTime(currentTimestamp);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+
+ // Verify a rollover after closing the appender
+ appender.stop();
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ }
+
+ /**
+ * Validates the flush and sync logic when the appender is closed
+ * @param closeOnShutdown The value of closeOnShutdown to use when validating
+ */
+ private void validateAppenderClose (boolean closeOnShutdown) {
+ RollingFileTestAppender appender = createTestAppender(false, false);
+ appender.setCloseOnShutdown(closeOnShutdown);
+ appender.start();
+ int expectedNumSyncs = 0;
+ int expectedNumRollovers = 0;
+
+ assertTrue(appender.backgroundThreadsRunning());
+
+ // Close the appender and verify the state of the background threads
+ appender.stop();
+ assertFalse(appender.backgroundThreadsRunning());
+
+ // Verify a rollover after closing the appender
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ }
+
+ /**
+ * Validates closing the appender and then shutting it down
+ * @param closeOnShutdown The value of closeOnShutdown to use when validating
+ */
+ private void validateCloseBeforeShutdown (boolean closeOnShutdown) {
+ RollingFileTestAppender appender = createTestAppender(false, false);
+ appender.setCloseOnShutdown(closeOnShutdown);
+ appender.start();
+
+ assertTrue(appender.backgroundThreadsRunning());
+
+ appender.stop();
+ assertFalse(appender.backgroundThreadsRunning());
+
+ appender.simulateShutdownHook();
+ waitForBackgroundFlushThread();
+ assertFalse(appender.backgroundThreadsRunning());
+ }
+
+ /**
+ * Validates shutting down the appender without calling {@code close} first
+ * @param numTimeoutSettingsToTest 0 - no timeouts, 1 - soft timeout only,
+ * 2 - both soft and hard timeouts
+ */
+ private void validateShutdownWithoutClose (int numTimeoutSettingsToTest) {
+ RollingFileTestAppender appender = createTestAppender(true, true);
+ appender.setCloseOnShutdown(false);
+ final int shutdownSoftTimeoutInMillis = 200;
+ appender.setShutdownSoftTimeoutInMilliseconds(shutdownSoftTimeoutInMillis);
+ final int numShutdownSoftTimeoutsInHardTimeout = 5;
+ appender.setShutdownHardTimeoutInSeconds(
+ shutdownSoftTimeoutInMillis * numShutdownSoftTimeoutsInHardTimeout / 1000);
+ appender.start();
+
+ int expectedNumSyncs = 0;
+ int expectedNumRollovers = 0;
+ int currentTimestamp = 0;
+
+ assertTrue(appender.backgroundThreadsRunning());
+
+ // Trigger the shutdown hook and ensure the threads continue to run
+ appender.simulateShutdownHook();
+ waitForBackgroundFlushThread();
+ assertTrue(appender.backgroundThreadsRunning());
+
+ if (0 == numTimeoutSettingsToTest) {
+ // Don't log anything
+ currentTimestamp = shutdownSoftTimeoutInMillis - 1;
+ } else if (1 == numTimeoutSettingsToTest) {
+ // Log two events to advance the soft shutdown timeout once before it
+ // expires. In each case, ensure the background threads continue to run.
+ for (int i = 0; i < 2; ++i) {
+ appendLogEvent(currentTimestamp, Level.INFO, appender);
+ waitForBackgroundFlushThread();
+
+ currentTimestamp += shutdownSoftTimeoutInMillis - 1;
+ setTimestampAndValidateThreadsState(currentTimestamp, true, appender);
+ }
+ } else {
+ // Log enough events so we get close to the hard shutdown timeout without
+ // exceeding it, all while the soft shutdown timeout is kept alive.
+ // Throughout, ensure the background threads continue to run.
+ for (int i = 0; i < numShutdownSoftTimeoutsInHardTimeout; ++i) {
+ appendLogEvent(currentTimestamp, Level.INFO, appender);
+ waitForBackgroundFlushThread();
+
+ currentTimestamp += shutdownSoftTimeoutInMillis - 1;
+ setTimestampAndValidateThreadsState(currentTimestamp, true, appender);
+ }
+ // Log one more event so we exceed the hard shutdown timeout and advance
+ // the timestamp to just before the hard timeout
+ appendLogEvent(currentTimestamp, Level.INFO, appender);
+ waitForBackgroundFlushThread();
+ currentTimestamp = shutdownSoftTimeoutInMillis * numShutdownSoftTimeoutsInHardTimeout - 1;
+ }
+
+ // Validate that the threads are running up until the timeout expires
+ setTimestampAndValidateThreadsState(currentTimestamp, true, appender);
+ currentTimestamp += 1;
+ setTimestampAndValidateThreadsState(currentTimestamp, false, appender);
+
+ // Verify a rollover after closing the appender
+ ++expectedNumRollovers;
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ }
+
+ private void appendLogEvent (long timestamp, Level level, RollingFileTestAppender appender) {
+ String loggerName = RollingFileTestAppender.class.getCanonicalName();
+ String message = "Static text, dictVar1, 123, 456.7, dictVar2, 987, 654.3";
+ LoggingEvent event = new LoggingEvent();
+ event.setLoggerName(loggerName);
+ event.setLevel(level);
+ event.setMessage(message);
+ event.setThrowableProxy(new ThrowableProxy(new FileNotFoundException()));
+ event.setCallerData(new StackTraceElement[0]);
+ event.setTimeStamp(timestamp);
+ appender.append(event);
+ }
+
+ /**
+ * Validates that a sync only occurs after the specified timestamp and not a
+ * time unit before
+ * @param syncTimestamp Time when the sync should occur
+ * @param expectedNumSyncs
+ * @param expectedNumRollovers
+ * @param appender
+ */
+ private void validateSyncAfterTimeout (long syncTimestamp, int expectedNumSyncs,
+ int expectedNumRollovers,
+ RollingFileTestAppender appender) {
+ appender.setTime(syncTimestamp - 1);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs - 1, expectedNumRollovers);
+ appender.setTime(syncTimestamp);
+ validateNumSyncAndCloseEvents(appender, expectedNumSyncs, expectedNumRollovers);
+ }
+
+ /**
+ * Sets the appender's time to the given timestamp and validates that the
+ * threads are in the
+ * given state
+ * @param timestamp
+ * @param threadsShouldBeRunning
+ * @param appender
+ */
+ private void setTimestampAndValidateThreadsState (long timestamp, boolean threadsShouldBeRunning,
+ RollingFileTestAppender appender)
+ {
+ appender.setTime(timestamp);
+ waitForBackgroundFlushThread();
+ assertTimeoutPreemptively(Duration.ofSeconds(1), () -> {
+ while (appender.backgroundThreadsRunning() != threadsShouldBeRunning) {
+ }
+ });
+ }
+
+ /**
+ * Imperfectly waits for the appender's background flush thread to make
+ * progress by simply sleeping for some amount of time
+ */
+ private void waitForBackgroundFlushThread () {
+ final long durationNecessaryForBackgroundFlushThreadProgress = 200; // milliseconds
+ assertDoesNotThrow(() -> sleep(durationNecessaryForBackgroundFlushThreadProgress));
+ }
+
+ /**
+ * Validates that the appender has triggered the given number of sync and
+ * sync-and-close events
+ * @param appender
+ * @param numSyncs
+ * @param numRollovers
+ */
+ private void validateNumSyncAndCloseEvents (RollingFileTestAppender appender, int numSyncs,
+ int numRollovers)
+ {
+ long sleepTime = timeoutCheckPeriod * 2;
+ // Sleep so the background threads have a chance to process any syncs and
+ // rollovers
+ assertDoesNotThrow(() -> sleep(sleepTime));
+
+ // Verify the expected num of syncs and rollovers
+ long deadlineTimestamp = System.currentTimeMillis() + sleepTime;
+ while (appender.getNumSyncs() != numSyncs) {
+ if (System.currentTimeMillis() >= deadlineTimestamp) {
+ assertEquals(numSyncs, appender.getNumSyncs());
+ }
+ }
+ while (appender.getNumRollovers() != numRollovers) {
+ if (System.currentTimeMillis() >= deadlineTimestamp) {
+ assertEquals(numRollovers, appender.getNumRollovers());
+ }
+ }
+ }
+
+ /**
+ * Creates and initializes a RollingFileTestAppender for the tests. Note that
+ * this method doesn't call {@code activateOptions} on the appender.
+ * @param disableSoftTimeout
+ * @param disableHardTimeout
+ * @return The created appender
+ */
+ private RollingFileTestAppender createTestAppender (boolean disableSoftTimeout,
+ boolean disableHardTimeout)
+ {
+ RollingFileTestAppender appender = new RollingFileTestAppender();
+
+ // Set static settings
+ appender.setOutputDir(outputDir);
+ appender.setBaseName("test-file");
+ appender.setCloseFrameOnFlush(true);
+ appender.setPattern(patternLayoutString);
+ appender.setTimeoutCheckPeriod(timeoutCheckPeriod);
+ appender.setRolloverCompressedSizeThreshold(Integer.MAX_VALUE);
+ appender.setRolloverUncompressedSizeThreshold(Integer.MAX_VALUE);
+
+ // Set timeouts
+ String disabledTimeoutCsv =
+ "FATAL=" + Integer.MAX_VALUE + ",ERROR=" + Integer.MAX_VALUE + ", INFO="
+ + Integer.MAX_VALUE;
+ String timeoutCsv =
+ "FATAL=" + flushFatalLevelTimeout + ",ERROR=" + flushErrorLevelTimeout + ",INFO="
+ + flushInfoLevelTimeout;
+ appender.setFlushHardTimeoutsInMinutes(disableHardTimeout ? disabledTimeoutCsv : timeoutCsv);
+ appender.setFlushSoftTimeoutsInSeconds(disableSoftTimeout ? disabledTimeoutCsv : timeoutCsv);
+
+ return appender;
+ }
+}
diff --git a/src/test/java/com/yscope/logging/logback/RollingFileTestAppender.java b/src/test/java/com/yscope/logging/logback/RollingFileTestAppender.java
new file mode 100644
index 0000000..73df412
--- /dev/null
+++ b/src/test/java/com/yscope/logging/logback/RollingFileTestAppender.java
@@ -0,0 +1,55 @@
+package com.yscope.logging.logback;
+
+import java.util.Map;
+
+import ch.qos.logback.classic.LoggerContext;
+
+/**
+ * A rolling file appender used for testing
+ * {@link AbstractClpIrBufferedRollingFileAppender}. It specifically allows us
+ * to control the time visible to the appender and tracks the number of syncs
+ * and rollover events.
+ */
+public class RollingFileTestAppender extends AbstractClpIrBufferedRollingFileAppender {
+ private int numSyncs = 0;
+ private int numRollovers = 0;
+
+ public RollingFileTestAppender () {
+ super(new ManualTimeSource());
+ this.setContext(new LoggerContext());
+ }
+
+ /**
+ * Sets the current time visible to the appender
+ * @param timestamp The current time
+ */
+ public void setTime (long timestamp) {
+ timeSource.setCurrentTimeInMilliseconds(timestamp);
+ }
+
+ public synchronized int getNumRollovers () {
+ return numRollovers;
+ }
+
+ public synchronized int getNumSyncs () {
+ return numSyncs;
+ }
+
+ /**
+ * Tracks the number of syncs and rollovers
+ * @param baseName {@inheritDoc}
+ * @param logRolloverTimestamp {@inheritDoc}
+ * @param deleteFile {@inheritDoc}
+ * @param fileMetadata {@inheritDoc}
+ */
+ @Override
+ protected synchronized void sync (String baseName, long logRolloverTimestamp,
+ boolean deleteFile, Map fileMetadata)
+ {
+ if (deleteFile) {
+ numRollovers += 1;
+ } else {
+ numSyncs += 1;
+ }
+ }
+}