From a88a1fa45f5ba82ed4c4c947d050731b1c5f3385 Mon Sep 17 00:00:00 2001 From: John DeRegnaucourt Date: Sat, 22 Jun 2024 21:30:49 -0400 Subject: [PATCH] added support for null key and null value. Updated javadocs, added more tests. 100% code coverage. --- .../java/com/cedarsoftware/util/LRUCache.java | 93 +++++++++++++------ .../com/cedarsoftware/util/LRUCacheTest.java | 47 ++++++++++ 2 files changed, 114 insertions(+), 26 deletions(-) diff --git a/src/main/java/com/cedarsoftware/util/LRUCache.java b/src/main/java/com/cedarsoftware/util/LRUCache.java index 777665df..5fb5254d 100644 --- a/src/main/java/com/cedarsoftware/util/LRUCache.java +++ b/src/main/java/com/cedarsoftware/util/LRUCache.java @@ -16,11 +16,14 @@ /** * This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items, - * once a threshold is met. It implements the Map interface for convenience. It is thread-safe via usage of - * ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs operate in O(1) without any - * blocking. When .put() or remove() queues a call to a background cleanup thead that ensures cache.size <= capacity. - * This maintains cache size to capacity, even during bursty loads. It is not immediate, the LRUCache can exceed the - * capacity during a rapid load, however, it will quickly reduce to max capacity. + * once a threshold is met. It implements the Map interface for convenience. + *

+ * LRUCache is thread-safe via usage of ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs + * operate in O(1) without blocking. When .put() is called, a background cleanup task is schedule to ensure + * {@code cache.size <= capacity}. This maintains cache size to capacity, even during bursty loads. It is not immediate, + * the LRUCache can exceed the capacity during a rapid load, however, it will quickly reduce to max capacity. + *

+ * LRUCache supports null for key or value. *

* @author John DeRegnaucourt (jdereg@gmail.com) *
@@ -40,17 +43,18 @@ */ public class LRUCache extends AbstractMap implements Map { private static final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); - private static final long DELAY = 10; // 10ms delay + private static final Object NULL_ITEM = new Object(); // Sentinel value for null keys and values + private final long cleanupDelayMillis; // 10ms delay private final int capacity; - private final ConcurrentMap> cache; + private final ConcurrentMap> cache; private volatile boolean cleanupScheduled = false; private static class Node { final K key; - volatile V value; + volatile Object value; volatile long timestamp; - Node(K key, V value) { + Node(K key, Object value) { this.key = key; this.value = value; this.timestamp = System.nanoTime(); @@ -61,9 +65,27 @@ void updateTimestamp() { } } + /** + * Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the + * capacity, however, it will quickly reduce to that amount. This time is configurable and defaults to 10ms. + * @param capacity int maximum size for the LRU cache. + */ public LRUCache(int capacity) { + this(capacity, 10); + } + + /** + * Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the + * capacity, however, it will quickly reduce to that amount. This time is configurable via the cleanupDelay + * parameter. + * @param capacity int maximum size for the LRU cache. + * @param cleanupDelayMillis int milliseconds before scheduling a cleanup (reduction to capacity if the cache currently + * exceeds it). + */ + public LRUCache(int capacity, int cleanupDelayMillis) { this.capacity = capacity; this.cache = new ConcurrentHashMap<>(capacity); + this.cleanupDelayMillis = cleanupDelayMillis; } private void cleanup() { @@ -74,7 +96,7 @@ private void cleanup() { int nodesToRemove = size - capacity; for (int i = 0; i < nodesToRemove; i++) { Node node = nodes.get(i); - cache.remove(node.key, node); + cache.remove(toCacheItem(node.key), node); } } cleanupScheduled = false; // Reset the flag after cleanup @@ -86,21 +108,24 @@ private void cleanup() { @Override public V get(Object key) { - Node node = cache.get(key); + Object cacheKey = toCacheItem(key); + Node node = cache.get(cacheKey); if (node != null) { node.updateTimestamp(); - return node.value; + return fromCacheItem(node.value); } return null; } @Override public V put(K key, V value) { - Node newNode = new Node<>(key, value); - Node oldNode = cache.put(key, newNode); + Object cacheKey = toCacheItem(key); + Object cacheValue = toCacheItem(value); + Node newNode = new Node<>(key, cacheValue); + Node oldNode = cache.put(cacheKey, newNode); if (oldNode != null) { newNode.updateTimestamp(); - return oldNode.value; + return fromCacheItem(oldNode.value); } else { scheduleCleanup(); return null; @@ -109,9 +134,10 @@ public V put(K key, V value) { @Override public V remove(Object key) { - Node node = cache.remove(key); + Object cacheKey = toCacheItem(key); + Node node = cache.remove(cacheKey); if (node != null) { - return node.value; + return fromCacheItem(node.value); } return null; } @@ -128,13 +154,14 @@ public int size() { @Override public boolean containsKey(Object key) { - return cache.containsKey(key); + return cache.containsKey(toCacheItem(key)); } @Override public boolean containsValue(Object value) { + Object cacheValue = toCacheItem(value); for (Node node : cache.values()) { - if (node.value.equals(value)) { + if (node.value.equals(cacheValue)) { return true; } } @@ -145,21 +172,25 @@ public boolean containsValue(Object value) { public Set> entrySet() { Set> entrySet = Collections.newSetFromMap(new ConcurrentHashMap<>()); for (Node node : cache.values()) { - entrySet.add(new AbstractMap.SimpleEntry<>(node.key, node.value)); + entrySet.add(new AbstractMap.SimpleEntry<>(fromCacheItem(node.key), fromCacheItem(node.value))); } return entrySet; } @Override public Set keySet() { - return Collections.unmodifiableSet(cache.keySet()); + Set keySet = Collections.newSetFromMap(new ConcurrentHashMap<>()); + for (Node node : cache.values()) { + keySet.add(fromCacheItem(node.key)); + } + return Collections.unmodifiableSet(keySet); } @Override public Collection values() { Collection values = new ArrayList<>(); for (Node node : cache.values()) { - values.add(node.value); + values.add(fromCacheItem(node.value)); } return Collections.unmodifiableCollection(values); } @@ -176,8 +207,8 @@ public boolean equals(Object o) { public int hashCode() { int hashCode = 1; for (Node node : cache.values()) { - hashCode = 31 * hashCode + (node.key == null ? 0 : node.key.hashCode()); - hashCode = 31 * hashCode + (node.value == null ? 0 : node.value.hashCode()); + hashCode = 31 * hashCode + (fromCacheItem(node.key) == null ? 0 : fromCacheItem(node.key).hashCode()); + hashCode = 31 * hashCode + (fromCacheItem(node.value) == null ? 0 : fromCacheItem(node.value).hashCode()); } return hashCode; } @@ -187,7 +218,7 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); for (Node node : cache.values()) { - sb.append(node.key).append("=").append(node.value).append(", "); + sb.append((K) fromCacheItem(node.key)).append("=").append((V)fromCacheItem(node.value)).append(", "); } if (sb.length() > 1) { sb.setLength(sb.length() - 2); // Remove trailing comma and space @@ -200,7 +231,17 @@ public String toString() { private synchronized void scheduleCleanup() { if (cache.size() > capacity && !cleanupScheduled) { cleanupScheduled = true; - executorService.schedule(this::cleanup, DELAY, TimeUnit.MILLISECONDS); + executorService.schedule(this::cleanup, cleanupDelayMillis, TimeUnit.MILLISECONDS); } } + + // Converts a key or value to a cache-compatible item + private Object toCacheItem(Object item) { + return item == null ? NULL_ITEM : item; + } + + // Converts a cache-compatible item to the original key or value + private T fromCacheItem(Object cacheItem) { + return cacheItem == NULL_ITEM ? null : (T) cacheItem; + } } \ No newline at end of file diff --git a/src/test/java/com/cedarsoftware/util/LRUCacheTest.java b/src/test/java/com/cedarsoftware/util/LRUCacheTest.java index 39ba819c..0de3693a 100644 --- a/src/test/java/com/cedarsoftware/util/LRUCacheTest.java +++ b/src/test/java/com/cedarsoftware/util/LRUCacheTest.java @@ -294,6 +294,10 @@ void testEquals() { cache2.put(4, "D"); assertFalse(cache1.equals(cache2)); assertFalse(cache2.equals(cache1)); + + assertFalse(cache1.equals(Boolean.TRUE)); + + assertTrue(cache1.equals(cache1)); } @Test @@ -324,6 +328,10 @@ void testToString() { assert lruCache.toString().contains("1=A"); assert lruCache.toString().contains("2=B"); assert lruCache.toString().contains("3=C"); + + Map cache = new LRUCache(100); + assert cache.toString().equals("{}"); + assert cache.size() == 0; } @Test @@ -412,4 +420,43 @@ void testCacheBlast() { assertEquals(1000, lruCache.size()); } + + @Test + void testNullValue() + { + lruCache = new LRUCache<>(100, 1); + lruCache.put(1, null); + assert lruCache.containsKey(1); + assert lruCache.containsValue(null); + assert lruCache.toString().contains("1=null"); + assert lruCache.hashCode() != 0; + } + + @Test + void testNullKey() + { + lruCache = new LRUCache<>(100, 1); + lruCache.put(null, "true"); + assert lruCache.containsKey(null); + assert lruCache.containsValue("true"); + assert lruCache.toString().contains("null=true"); + assert lruCache.hashCode() != 0; + } + + @Test + void testNullKeyValue() + { + lruCache = new LRUCache<>(100, 1); + lruCache.put(null, null); + assert lruCache.containsKey(null); + assert lruCache.containsValue(null); + assert lruCache.toString().contains("null=null"); + assert lruCache.hashCode() != 0; + + LRUCache cache1 = new LRUCache<>(3); + cache1.put(null, null); + LRUCache cache2 = new LRUCache<>(3); + cache2.put(null, null); + assert cache1.equals(cache2); + } }