From 9adab4acd70d7413803c430d5eca543fa099516a Mon Sep 17 00:00:00 2001 From: John DeRegnaucourt Date: Sun, 23 Jun 2024 16:57:52 -0400 Subject: [PATCH] Significantly improved Javadocs --- .../java/com/cedarsoftware/util/LRUCache.java | 45 +++++++++++++++++++ .../util/cache/LockingLRUCacheStrategy.java | 13 +++++- .../util/cache/ThreadedLRUCacheStrategy.java | 16 ++++--- 3 files changed, 65 insertions(+), 9 deletions(-) diff --git a/src/main/java/com/cedarsoftware/util/LRUCache.java b/src/main/java/com/cedarsoftware/util/LRUCache.java index 002c0a40..120f7258 100644 --- a/src/main/java/com/cedarsoftware/util/LRUCache.java +++ b/src/main/java/com/cedarsoftware/util/LRUCache.java @@ -9,6 +9,51 @@ import com.cedarsoftware.util.cache.LockingLRUCacheStrategy; import com.cedarsoftware.util.cache.ThreadedLRUCacheStrategy; +/** + * This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items, + * once a threshold is met. It implements the Map interface for convenience. + *

+ * This class provides two implementation strategies: a locking approach and a threaded approach. + *

+ *

+ * The Locking strategy allows for O(1) access for get(), put(), and remove(). For put(), remove(), and many other + * methods, a write-lock is obtained. For get(), it attempts to lock but does not lock unless it can obtain it right away. + * This 'try-lock' approach ensures that the get() API is never blocking, but it also means that the LRU order is not + * perfectly maintained under heavy load. + *

+ * The Threaded strategy allows for O(1) access for get(), put(), and remove() without blocking. It uses a ConcurrentHashMap + * internally. To ensure that the capacity is honored, whenever put() is called, a thread (from a thread pool) is tasked + * with cleaning up items above the capacity threshold. This means that the cache may temporarily exceed its capacity, but + * it will soon be trimmed back to the capacity limit by the scheduled thread. + *

+ * LRUCache supports null for both key or value. + *

+ * @see LockingLRUCacheStrategy + * @see ThreadedLRUCacheStrategy + * @see LRUCache.StrategyType + *

+ * @author John DeRegnaucourt (jdereg@gmail.com) + *
+ * Copyright (c) Cedar Software LLC + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * License + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class LRUCache implements Map { private final Map strategy; diff --git a/src/main/java/com/cedarsoftware/util/cache/LockingLRUCacheStrategy.java b/src/main/java/com/cedarsoftware/util/cache/LockingLRUCacheStrategy.java index 8f61c30d..ea555282 100644 --- a/src/main/java/com/cedarsoftware/util/cache/LockingLRUCacheStrategy.java +++ b/src/main/java/com/cedarsoftware/util/cache/LockingLRUCacheStrategy.java @@ -10,9 +10,16 @@ /** * This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items, - * once a threshold is met. It implements the Map interface for convenience. + * once a threshold is met. It implements the Map interface for convenience. *

- * LRUCache supports null for key or value. + * The Locking strategy allows for O(1) access for get(), put(), and remove(). For put(), remove(), and many other + * methods, a write-lock is obtained. For get(), it attempts to lock but does not lock unless it can obtain it right away. + * This 'try-lock' approach ensures that the get() API is never blocking, but it also means that the LRU order is not + * perfectly maintained under heavy load. + *

+ * LRUCache supports null for both key or value. + *

+ * Special Thanks: This implementation was inspired by insights and suggestions from Ben Manes. *

* @author John DeRegnaucourt (jdereg@gmail.com) *
@@ -89,6 +96,8 @@ public V get(Object key) { if (node == null) { return null; } + + // Ben Manes suggestion - use exclusive 'try-lock' if (lock.tryLock()) { try { moveToHead(node); diff --git a/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java b/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java index ba87fd45..944944e5 100644 --- a/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java +++ b/src/main/java/com/cedarsoftware/util/cache/ThreadedLRUCacheStrategy.java @@ -16,16 +16,18 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import com.cedarsoftware.util.LRUCache; + /** * This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items, - * once a threshold is met. It implements the Map interface for convenience. + * once a threshold is met. It implements the Map interface for convenience. *

- * LRUCache is thread-safe via usage of ConcurrentHashMap for internal storage. The .get(), .remove(), and .put() APIs - * operate in O(1) without blocking. When .put() is called, a background cleanup task is scheduled to ensure - * {@code cache.size <= capacity}. This maintains cache size to capacity, even during bursty loads. It is not immediate; - * the LRUCache can exceed the capacity during a rapid load; however, it will quickly reduce to max capacity. + * The Threaded strategy allows for O(1) access for get(), put(), and remove() without blocking. It uses a ConcurrentHashMap + * internally. To ensure that the capacity is honored, whenever put() is called, a thread (from a thread pool) is tasked + * with cleaning up items above the capacity threshold. This means that the cache may temporarily exceed its capacity, but + * it will soon be trimmed back to the capacity limit by the scheduled thread. *

- * LRUCache supports null for key or value. + * LRUCache supports null for both key or value. *

* @author John DeRegnaucourt (jdereg@gmail.com) *
@@ -73,7 +75,7 @@ void updateTimestamp() { * Create a LRUCache with the maximum capacity of 'capacity.' Note, the LRUCache could temporarily exceed the * capacity; however, it will quickly reduce to that amount. This time is configurable via the cleanupDelay * parameter and custom scheduler and executor services. - * + * * @param capacity int maximum size for the LRU cache. * @param cleanupDelayMillis int milliseconds before scheduling a cleanup (reduction to capacity if the cache currently * exceeds it).