From 8ea699f57cdf6f85e8602461d48b80ae7c85b46b Mon Sep 17 00:00:00 2001 From: John DeRegnaucourt Date: Sat, 22 Jun 2024 22:12:20 -0400 Subject: [PATCH] best working version thus far. --- .../java/com/cedarsoftware/util/LRUCache.java | 62 +++++++++++-------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/src/main/java/com/cedarsoftware/util/LRUCache.java b/src/main/java/com/cedarsoftware/util/LRUCache.java index 5fb5254d..c6ddeaca 100644 --- a/src/main/java/com/cedarsoftware/util/LRUCache.java +++ b/src/main/java/com/cedarsoftware/util/LRUCache.java @@ -13,6 +13,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; /** * This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items, @@ -44,12 +45,12 @@ public class LRUCache extends AbstractMap implements Map { private static final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); private static final Object NULL_ITEM = new Object(); // Sentinel value for null keys and values - private final long cleanupDelayMillis; // 10ms delay + private final long cleanupDelayMillis; private final int capacity; - private final ConcurrentMap> cache; - private volatile boolean cleanupScheduled = false; + private final ConcurrentMap> cache; + private final AtomicBoolean cleanupScheduled = new AtomicBoolean(false); - private static class Node { + private static class Node { final K key; volatile Object value; volatile long timestamp; @@ -91,15 +92,15 @@ public LRUCache(int capacity, int cleanupDelayMillis) { private void cleanup() { int size = cache.size(); if (size > capacity) { - List> nodes = new ArrayList<>(cache.values()); + List> nodes = new ArrayList<>(cache.values()); nodes.sort(Comparator.comparingLong(node -> node.timestamp)); int nodesToRemove = size - capacity; for (int i = 0; i < nodesToRemove; i++) { - Node node = nodes.get(i); + Node node = nodes.get(i); cache.remove(toCacheItem(node.key), node); } } - cleanupScheduled = false; // Reset the flag after cleanup + cleanupScheduled.set(false); // Reset the flag after cleanup // Check if another cleanup is needed after the current one if (cache.size() > capacity) { scheduleCleanup(); @@ -109,7 +110,7 @@ private void cleanup() { @Override public V get(Object key) { Object cacheKey = toCacheItem(key); - Node node = cache.get(cacheKey); + Node node = cache.get(cacheKey); if (node != null) { node.updateTimestamp(); return fromCacheItem(node.value); @@ -121,21 +122,21 @@ public V get(Object key) { public V put(K key, V value) { Object cacheKey = toCacheItem(key); Object cacheValue = toCacheItem(value); - Node newNode = new Node<>(key, cacheValue); - Node oldNode = cache.put(cacheKey, newNode); + Node newNode = new Node<>(key, cacheValue); + Node oldNode = cache.put(cacheKey, newNode); if (oldNode != null) { newNode.updateTimestamp(); return fromCacheItem(oldNode.value); - } else { + } else if (size() > capacity) { scheduleCleanup(); - return null; } + return null; } @Override public V remove(Object key) { Object cacheKey = toCacheItem(key); - Node node = cache.remove(cacheKey); + Node node = cache.remove(cacheKey); if (node != null) { return fromCacheItem(node.value); } @@ -160,7 +161,7 @@ public boolean containsKey(Object key) { @Override public boolean containsValue(Object value) { Object cacheValue = toCacheItem(value); - for (Node node : cache.values()) { + for (Node node : cache.values()) { if (node.value.equals(cacheValue)) { return true; } @@ -171,7 +172,7 @@ public boolean containsValue(Object value) { @Override public Set> entrySet() { Set> entrySet = Collections.newSetFromMap(new ConcurrentHashMap<>()); - for (Node node : cache.values()) { + for (Node node : cache.values()) { entrySet.add(new AbstractMap.SimpleEntry<>(fromCacheItem(node.key), fromCacheItem(node.value))); } return entrySet; @@ -180,7 +181,7 @@ public Set> entrySet() { @Override public Set keySet() { Set keySet = Collections.newSetFromMap(new ConcurrentHashMap<>()); - for (Node node : cache.values()) { + for (Node node : cache.values()) { keySet.add(fromCacheItem(node.key)); } return Collections.unmodifiableSet(keySet); @@ -189,7 +190,7 @@ public Set keySet() { @Override public Collection values() { Collection values = new ArrayList<>(); - for (Node node : cache.values()) { + for (Node node : cache.values()) { values.add(fromCacheItem(node.value)); } return Collections.unmodifiableCollection(values); @@ -206,19 +207,22 @@ public boolean equals(Object o) { @Override public int hashCode() { int hashCode = 1; - for (Node node : cache.values()) { - hashCode = 31 * hashCode + (fromCacheItem(node.key) == null ? 0 : fromCacheItem(node.key).hashCode()); - hashCode = 31 * hashCode + (fromCacheItem(node.value) == null ? 0 : fromCacheItem(node.value).hashCode()); + for (Node node : cache.values()) { + Object key = fromCacheItem(node.key); + Object value = fromCacheItem(node.value); + hashCode = 31 * hashCode + (key == null ? 0 : key.hashCode()); + hashCode = 31 * hashCode + (value == null ? 0 : value.hashCode()); } return hashCode; } @Override + @SuppressWarnings("unchecked") public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); - for (Node node : cache.values()) { - sb.append((K) fromCacheItem(node.key)).append("=").append((V)fromCacheItem(node.value)).append(", "); + for (Node node : cache.values()) { + sb.append((K)fromCacheItem(node.key)).append("=").append((V)fromCacheItem(node.value)).append(", "); } if (sb.length() > 1) { sb.setLength(sb.length() - 2); // Remove trailing comma and space @@ -228,10 +232,15 @@ public String toString() { } // Schedule a delayed cleanup - private synchronized void scheduleCleanup() { - if (cache.size() > capacity && !cleanupScheduled) { - cleanupScheduled = true; - executorService.schedule(this::cleanup, cleanupDelayMillis, TimeUnit.MILLISECONDS); + private void scheduleCleanup() { + if (cache.size() > capacity && cleanupScheduled.compareAndSet(false, true)) { + executorService.schedule(() -> { + cleanup(); + // Check if another cleanup is needed after the current one + if (cache.size() > capacity) { + scheduleCleanup(); + } + }, cleanupDelayMillis, TimeUnit.MILLISECONDS); } } @@ -241,6 +250,7 @@ private Object toCacheItem(Object item) { } // Converts a cache-compatible item to the original key or value + @SuppressWarnings("unchecked") private T fromCacheItem(Object cacheItem) { return cacheItem == NULL_ITEM ? null : (T) cacheItem; }