Skip to content

Commit

Permalink
LRUCache now implements both a locking and a threaded caching strateg…
Browse files Browse the repository at this point in the history
…y, user selectable.
  • Loading branch information
jdereg committed Jun 23, 2024
1 parent a3d6b02 commit 759fc0b
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 38 deletions.
1 change: 0 additions & 1 deletion src/main/java/com/cedarsoftware/util/LRUCache.java
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ScheduledExecutorService;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ScheduledExecutorService;
Expand All @@ -19,12 +18,12 @@

/**
* This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items,
* once a threshold is met. It implements the Map interface for convenience.
* once a threshold is met. It implements the Map interface for convenience.
* <p>
* LRUCache is thread-safe via usage of ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs
* operate in O(1) without blocking. When .put() is called, a background cleanup task is schedule to ensure
* {@code cache.size <= capacity}. This maintains cache size to capacity, even during bursty loads. It is not immediate,
* the LRUCache can exceed the capacity during a rapid load, however, it will quickly reduce to max capacity.
* LRUCache is thread-safe via usage of ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs
* operate in O(1) without blocking. When .put() is called, a background cleanup task is scheduled to ensure
* {@code cache.size <= capacity}. This maintains cache size to capacity, even during bursty loads. It is not immediate;
* the LRUCache can exceed the capacity during a rapid load; however, it will quickly reduce to max capacity.
* <p>
* LRUCache supports null for key or value.
* <p>
Expand Down Expand Up @@ -52,7 +51,7 @@ public class ThreadedLRUCacheStrategy<K, V> implements Map<K, V> {
private final AtomicBoolean cleanupScheduled = new AtomicBoolean(false);
private final ScheduledExecutorService scheduler;
private final ForkJoinPool cleanupPool;
private boolean isDefaultScheduler;
private final boolean isDefaultScheduler;

private static class Node<K> {
final K key;
Expand All @@ -71,31 +70,20 @@ void updateTimestamp() {
}

/**
* Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the
* capacity, however, it will quickly reduce to that amount. This time is configurable via the cleanupDelay
* Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the
* capacity; however, it will quickly reduce to that amount. This time is configurable via the cleanupDelay
* parameter and custom scheduler and executor services.
* @param capacity int maximum size for the LRU cache.
*
* @param capacity int maximum size for the LRU cache.
* @param cleanupDelayMillis int milliseconds before scheduling a cleanup (reduction to capacity if the cache currently
* exceeds it).
* @param scheduler ScheduledExecutorService for scheduling cleanup tasks.
* @param cleanupPool ForkJoinPool for executing cleanup tasks.
* exceeds it).
* @param scheduler ScheduledExecutorService for scheduling cleanup tasks.
* @param cleanupPool ForkJoinPool for executing cleanup tasks.
*/
public ThreadedLRUCacheStrategy(int capacity, int cleanupDelayMillis, ScheduledExecutorService scheduler, ForkJoinPool cleanupPool) {
isDefaultScheduler = false;
if (scheduler == null) {
this.scheduler = Executors.newScheduledThreadPool(1);
isDefaultScheduler = true;
} else {
this.scheduler = scheduler;
isDefaultScheduler = false;
}

if (cleanupPool == null) {
this.cleanupPool = ForkJoinPool.commonPool();
} else {
this.cleanupPool = cleanupPool;
}

this.isDefaultScheduler = scheduler == null;
this.scheduler = isDefaultScheduler ? Executors.newScheduledThreadPool(1) : scheduler;
this.cleanupPool = cleanupPool == null ? ForkJoinPool.commonPool() : cleanupPool;
this.capacity = capacity;
this.cache = new ConcurrentHashMap<>(capacity);
this.cleanupDelayMillis = cleanupDelayMillis;
Expand Down Expand Up @@ -259,11 +247,10 @@ public String toString() {
// Schedule a delayed cleanup
private void scheduleCleanup() {
if (cleanupScheduled.compareAndSet(false, true)) {
scheduler.schedule(() -> ForkJoinPool.commonPool().execute(this::cleanup), cleanupDelayMillis, TimeUnit.MILLISECONDS);
scheduler.schedule(() -> cleanupPool.execute(this::cleanup), cleanupDelayMillis, TimeUnit.MILLISECONDS);
}
}


// Converts a key or value to a cache-compatible item
private Object toCacheItem(Object item) {
return item == null ? NULL_ITEM : item;
Expand All @@ -279,13 +266,15 @@ private <T> T fromCacheItem(Object cacheItem) {
* Shut down the scheduler if it is the default one.
*/
public void shutdown() {
scheduler.shutdown();
try {
if (!scheduler.awaitTermination(1, TimeUnit.SECONDS)) {
if (isDefaultScheduler) {
scheduler.shutdown();
try {
if (!scheduler.awaitTermination(1, TimeUnit.SECONDS)) {
scheduler.shutdownNow();
}
} catch (InterruptedException e) {
scheduler.shutdownNow();
}
} catch (InterruptedException e) {
scheduler.shutdownNow();
}
}
}
12 changes: 10 additions & 2 deletions src/test/java/com/cedarsoftware/util/cache/LRUCacheTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import java.util.concurrent.TimeUnit;

import com.cedarsoftware.util.LRUCache;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;

Expand All @@ -36,6 +37,13 @@ void setUp(LRUCache.StrategyType strategyType) {
lruCache = new LRUCache<>(3, strategyType);
}

@AfterEach
void tearDown() {
if (lruCache != null) {
lruCache.shutdown();
}
}

@ParameterizedTest
@MethodSource("strategies")
void testPutAndGet(LRUCache.StrategyType strategy) {
Expand Down Expand Up @@ -429,7 +437,7 @@ void testCacheBlast(LRUCache.StrategyType strategy) {
}
try {
Thread.sleep(100);
System.out.println("Cache size: " + lruCache.size());
System.out.println(strategy + " cache size: " + lruCache.size());
} catch (InterruptedException ignored) {
}
}
Expand Down Expand Up @@ -489,6 +497,6 @@ void testSpeed(LRUCache.StrategyType strategy) {
cache.put(i, true);
}
long endTime = System.currentTimeMillis();
System.out.println("Speed: " + (endTime - startTime));
System.out.println(strategy + " speed: " + (endTime - startTime) + "ms");
}
}

0 comments on commit 759fc0b

Please sign in to comment.