Skip to content

Commit

Permalink
added support for null key and null value. Updated javadocs, added mo…
Browse files Browse the repository at this point in the history
…re tests. 100% code coverage.
  • Loading branch information
jdereg committed Jun 23, 2024
1 parent b456931 commit a88a1fa
Show file tree
Hide file tree
Showing 2 changed files with 114 additions and 26 deletions.
93 changes: 67 additions & 26 deletions src/main/java/com/cedarsoftware/util/LRUCache.java
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,14 @@

/**
* This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items,
* once a threshold is met. It implements the Map interface for convenience. It is thread-safe via usage of
* ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs operate in O(1) without any
* blocking. When .put() or remove() queues a call to a background cleanup thead that ensures cache.size <= capacity.
* This maintains cache size to capacity, even during bursty loads. It is not immediate, the LRUCache can exceed the
* capacity during a rapid load, however, it will quickly reduce to max capacity.
* once a threshold is met. It implements the Map interface for convenience.
* <p>
* LRUCache is thread-safe via usage of ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs
* operate in O(1) without blocking. When .put() is called, a background cleanup task is schedule to ensure
* {@code cache.size <= capacity}. This maintains cache size to capacity, even during bursty loads. It is not immediate,
* the LRUCache can exceed the capacity during a rapid load, however, it will quickly reduce to max capacity.
* <p>
* LRUCache supports null for key or value.
* <p>
* @author John DeRegnaucourt ([email protected])
* <br>
Expand All @@ -40,17 +43,18 @@
*/
public class LRUCache<K, V> extends AbstractMap<K, V> implements Map<K, V> {
private static final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
private static final long DELAY = 10; // 10ms delay
private static final Object NULL_ITEM = new Object(); // Sentinel value for null keys and values
private final long cleanupDelayMillis; // 10ms delay
private final int capacity;
private final ConcurrentMap<K, Node<K, V>> cache;
private final ConcurrentMap<Object, Node<K, V>> cache;
private volatile boolean cleanupScheduled = false;

private static class Node<K, V> {
final K key;
volatile V value;
volatile Object value;
volatile long timestamp;

Node(K key, V value) {
Node(K key, Object value) {
this.key = key;
this.value = value;
this.timestamp = System.nanoTime();
Expand All @@ -61,9 +65,27 @@ void updateTimestamp() {
}
}

/**
* Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the
* capacity, however, it will quickly reduce to that amount. This time is configurable and defaults to 10ms.
* @param capacity int maximum size for the LRU cache.
*/
public LRUCache(int capacity) {
this(capacity, 10);
}

/**
* Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the
* capacity, however, it will quickly reduce to that amount. This time is configurable via the cleanupDelay
* parameter.
* @param capacity int maximum size for the LRU cache.
* @param cleanupDelayMillis int milliseconds before scheduling a cleanup (reduction to capacity if the cache currently
* exceeds it).
*/
public LRUCache(int capacity, int cleanupDelayMillis) {
this.capacity = capacity;
this.cache = new ConcurrentHashMap<>(capacity);
this.cleanupDelayMillis = cleanupDelayMillis;
}

private void cleanup() {
Expand All @@ -74,7 +96,7 @@ private void cleanup() {
int nodesToRemove = size - capacity;
for (int i = 0; i < nodesToRemove; i++) {
Node<K, V> node = nodes.get(i);
cache.remove(node.key, node);
cache.remove(toCacheItem(node.key), node);
}
}
cleanupScheduled = false; // Reset the flag after cleanup
Expand All @@ -86,21 +108,24 @@ private void cleanup() {

@Override
public V get(Object key) {
Node<K, V> node = cache.get(key);
Object cacheKey = toCacheItem(key);
Node<K, V> node = cache.get(cacheKey);
if (node != null) {
node.updateTimestamp();
return node.value;
return fromCacheItem(node.value);
}
return null;
}

@Override
public V put(K key, V value) {
Node<K, V> newNode = new Node<>(key, value);
Node<K, V> oldNode = cache.put(key, newNode);
Object cacheKey = toCacheItem(key);
Object cacheValue = toCacheItem(value);
Node<K, V> newNode = new Node<>(key, cacheValue);
Node<K, V> oldNode = cache.put(cacheKey, newNode);
if (oldNode != null) {
newNode.updateTimestamp();
return oldNode.value;
return fromCacheItem(oldNode.value);
} else {
scheduleCleanup();
return null;
Expand All @@ -109,9 +134,10 @@ public V put(K key, V value) {

@Override
public V remove(Object key) {
Node<K, V> node = cache.remove(key);
Object cacheKey = toCacheItem(key);
Node<K, V> node = cache.remove(cacheKey);
if (node != null) {
return node.value;
return fromCacheItem(node.value);
}
return null;
}
Expand All @@ -128,13 +154,14 @@ public int size() {

@Override
public boolean containsKey(Object key) {
return cache.containsKey(key);
return cache.containsKey(toCacheItem(key));
}

@Override
public boolean containsValue(Object value) {
Object cacheValue = toCacheItem(value);
for (Node<K, V> node : cache.values()) {
if (node.value.equals(value)) {
if (node.value.equals(cacheValue)) {
return true;
}
}
Expand All @@ -145,21 +172,25 @@ public boolean containsValue(Object value) {
public Set<Map.Entry<K, V>> entrySet() {
Set<Map.Entry<K, V>> entrySet = Collections.newSetFromMap(new ConcurrentHashMap<>());
for (Node<K, V> node : cache.values()) {
entrySet.add(new AbstractMap.SimpleEntry<>(node.key, node.value));
entrySet.add(new AbstractMap.SimpleEntry<>(fromCacheItem(node.key), fromCacheItem(node.value)));
}
return entrySet;
}

@Override
public Set<K> keySet() {
return Collections.unmodifiableSet(cache.keySet());
Set<K> keySet = Collections.newSetFromMap(new ConcurrentHashMap<>());
for (Node<K, V> node : cache.values()) {
keySet.add(fromCacheItem(node.key));
}
return Collections.unmodifiableSet(keySet);
}

@Override
public Collection<V> values() {
Collection<V> values = new ArrayList<>();
for (Node<K, V> node : cache.values()) {
values.add(node.value);
values.add(fromCacheItem(node.value));
}
return Collections.unmodifiableCollection(values);
}
Expand All @@ -176,8 +207,8 @@ public boolean equals(Object o) {
public int hashCode() {
int hashCode = 1;
for (Node<K, V> node : cache.values()) {
hashCode = 31 * hashCode + (node.key == null ? 0 : node.key.hashCode());
hashCode = 31 * hashCode + (node.value == null ? 0 : node.value.hashCode());
hashCode = 31 * hashCode + (fromCacheItem(node.key) == null ? 0 : fromCacheItem(node.key).hashCode());
hashCode = 31 * hashCode + (fromCacheItem(node.value) == null ? 0 : fromCacheItem(node.value).hashCode());
}
return hashCode;
}
Expand All @@ -187,7 +218,7 @@ public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
for (Node<K, V> node : cache.values()) {
sb.append(node.key).append("=").append(node.value).append(", ");
sb.append((K) fromCacheItem(node.key)).append("=").append((V)fromCacheItem(node.value)).append(", ");
}
if (sb.length() > 1) {
sb.setLength(sb.length() - 2); // Remove trailing comma and space
Expand All @@ -200,7 +231,17 @@ public String toString() {
private synchronized void scheduleCleanup() {
if (cache.size() > capacity && !cleanupScheduled) {
cleanupScheduled = true;
executorService.schedule(this::cleanup, DELAY, TimeUnit.MILLISECONDS);
executorService.schedule(this::cleanup, cleanupDelayMillis, TimeUnit.MILLISECONDS);
}
}

// Converts a key or value to a cache-compatible item
private Object toCacheItem(Object item) {
return item == null ? NULL_ITEM : item;
}

// Converts a cache-compatible item to the original key or value
private <T> T fromCacheItem(Object cacheItem) {
return cacheItem == NULL_ITEM ? null : (T) cacheItem;
}
}
47 changes: 47 additions & 0 deletions src/test/java/com/cedarsoftware/util/LRUCacheTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,10 @@ void testEquals() {
cache2.put(4, "D");
assertFalse(cache1.equals(cache2));
assertFalse(cache2.equals(cache1));

assertFalse(cache1.equals(Boolean.TRUE));

assertTrue(cache1.equals(cache1));
}

@Test
Expand Down Expand Up @@ -324,6 +328,10 @@ void testToString() {
assert lruCache.toString().contains("1=A");
assert lruCache.toString().contains("2=B");
assert lruCache.toString().contains("3=C");

Map<String, String> cache = new LRUCache(100);
assert cache.toString().equals("{}");
assert cache.size() == 0;
}

@Test
Expand Down Expand Up @@ -412,4 +420,43 @@ void testCacheBlast() {

assertEquals(1000, lruCache.size());
}

@Test
void testNullValue()
{
lruCache = new LRUCache<>(100, 1);
lruCache.put(1, null);
assert lruCache.containsKey(1);
assert lruCache.containsValue(null);
assert lruCache.toString().contains("1=null");
assert lruCache.hashCode() != 0;
}

@Test
void testNullKey()
{
lruCache = new LRUCache<>(100, 1);
lruCache.put(null, "true");
assert lruCache.containsKey(null);
assert lruCache.containsValue("true");
assert lruCache.toString().contains("null=true");
assert lruCache.hashCode() != 0;
}

@Test
void testNullKeyValue()
{
lruCache = new LRUCache<>(100, 1);
lruCache.put(null, null);
assert lruCache.containsKey(null);
assert lruCache.containsValue(null);
assert lruCache.toString().contains("null=null");
assert lruCache.hashCode() != 0;

LRUCache<Integer, String> cache1 = new LRUCache<>(3);
cache1.put(null, null);
LRUCache<Integer, String> cache2 = new LRUCache<>(3);
cache2.put(null, null);
assert cache1.equals(cache2);
}
}

0 comments on commit a88a1fa

Please sign in to comment.