Skip to content

Commit

Permalink
Changes made to fix gc nepotism (potential memory leak due to GC poin…
Browse files Browse the repository at this point in the history
…ters between old/new partitions) and simplified the thread scheduling to using only a scheduler (that is created if needed) and shutdown (only if created). Appropriate document updates and version bump made.
  • Loading branch information
jdereg committed Jun 23, 2024
1 parent 8973e68 commit da7ba4e
Show file tree
Hide file tree
Showing 6 changed files with 426 additions and 416 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@ Both of these features ensure that our library can be seamlessly integrated into
To include in your project:
##### Gradle
```groovy
implementation 'com.cedarsoftware:java-util:2.12.0'
implementation 'com.cedarsoftware:java-util:2.13.0'
```

##### Maven
```xml
<dependency>
<groupId>com.cedarsoftware</groupId>
<artifactId>java-util</artifactId>
<version>2.12.0</version>
<version>2.13.0</version>
</dependency>
```
---
Expand Down
783 changes: 394 additions & 389 deletions changelog.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<groupId>com.cedarsoftware</groupId>
<artifactId>java-util</artifactId>
<packaging>bundle</packaging>
<version>2.12.0</version>
<version>2.13.0</version>
<description>Java Utilities</description>
<url>https://github.com/jdereg/java-util</url>

Expand Down
14 changes: 5 additions & 9 deletions src/main/java/com/cedarsoftware/util/LRUCache.java
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ScheduledExecutorService;

import com.cedarsoftware.util.cache.LockingLRUCacheStrategy;
Expand Down Expand Up @@ -83,7 +82,7 @@ public LRUCache(int capacity) {
*/
public LRUCache(int capacity, StrategyType strategyType) {
if (strategyType == StrategyType.THREADED) {
strategy = new ThreadedLRUCacheStrategy<>(capacity, 10, null, null);
strategy = new ThreadedLRUCacheStrategy<>(capacity, 10, null);
} else if (strategyType == StrategyType.LOCKING) {
strategy = new LockingLRUCacheStrategy<>(capacity);
} else {
Expand All @@ -101,15 +100,12 @@ public LRUCache(int capacity, StrategyType strategyType) {
* @param cleanupDelayMillis int number of milliseconds after a put() call when a scheduled task should run to
* trim the cache to no more than capacity. The default is 10ms.
* @param scheduler ScheduledExecutorService which can be null, in which case one will be created for you, or you
* can supply your own. If one is created for you, when shutdown() is called, it will be shuwdown
* can supply your own. If one is created for you, when shutdown() is called, it will be shutdown
* for you.
* @param cleanupPool ForkJoinPool can be null, in which case the common ForkJoinPool will be used, or you can
* supply your own. It will not be terminated when shutdown() is called regardless of whether
* it was supplied or the common ForkJoinPool was used.
* @see com.cedarsoftware.util.cache.ThreadedLRUCacheStrategy
*/
public LRUCache(int capacity, int cleanupDelayMillis, ScheduledExecutorService scheduler, ForkJoinPool cleanupPool) {
strategy = new ThreadedLRUCacheStrategy<>(capacity, cleanupDelayMillis, scheduler, cleanupPool);
public LRUCache(int capacity, int cleanupDelayMillis, ScheduledExecutorService scheduler) {
strategy = new ThreadedLRUCacheStrategy<>(capacity, cleanupDelayMillis, scheduler);
}

@Override
Expand Down Expand Up @@ -199,4 +195,4 @@ public void shutdown() {
((ThreadedLRUCacheStrategy<K, V>) strategy).shutdown();
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ public LockingLRUCacheStrategy(int capacity) {
}

private void moveToHead(Node<K, V> node) {
if (node.prev == null || node.next == null) {
// Node has been evicted; skip reordering
return;
}
removeNode(node);
addToHead(node);
}
Expand All @@ -76,13 +80,19 @@ private void addToHead(Node<K, V> node) {
}

private void removeNode(Node<K, V> node) {
node.prev.next = node.next;
node.next.prev = node.prev;
if (node.prev != null && node.next != null) {
node.prev.next = node.next;
node.next.prev = node.prev;
}
}

private Node<K, V> removeTail() {
Node<K, V> node = tail.prev;
removeNode(node);
if (node != head) {
removeNode(node);
node.prev = null; // Null out links to avoid GC nepotism
node.next = null; // Null out links to avoid GC nepotism
}
return node;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,10 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;

import com.cedarsoftware.util.LRUCache;

/**
* This class provides a thread-safe Least Recently Used (LRU) cache API that will evict the least recently used items,
* once a threshold is met. It implements the <code>Map</code> interface for convenience.
Expand Down Expand Up @@ -52,7 +49,6 @@ public class ThreadedLRUCacheStrategy<K, V> implements Map<K, V> {
private final ConcurrentMap<Object, Node<K>> cache;
private final AtomicBoolean cleanupScheduled = new AtomicBoolean(false);
private final ScheduledExecutorService scheduler;
private final ForkJoinPool cleanupPool;
private final boolean isDefaultScheduler;

private static class Node<K> {
Expand Down Expand Up @@ -83,13 +79,15 @@ void updateTimestamp() {
* a default scheduler is created for you. Calling the .shutdown() method will shutdown
* the schedule only if you passed in null (using default). If you pass one in, it is
* your responsibility to terminate the scheduler.
* @param cleanupPool ForkJoinPool for executing cleanup tasks. Can be null, in which case the common
* ForkJoinPool is used. When shutdown() is called, nothing is down to the ForkJoinPool.
*/
public ThreadedLRUCacheStrategy(int capacity, int cleanupDelayMillis, ScheduledExecutorService scheduler, ForkJoinPool cleanupPool) {
this.isDefaultScheduler = scheduler == null;
this.scheduler = isDefaultScheduler ? Executors.newScheduledThreadPool(1) : scheduler;
this.cleanupPool = cleanupPool == null ? ForkJoinPool.commonPool() : cleanupPool;
public ThreadedLRUCacheStrategy(int capacity, int cleanupDelayMillis, ScheduledExecutorService scheduler) {
if (scheduler == null) {
this.scheduler = Executors.newScheduledThreadPool(1);
isDefaultScheduler = true;
} else {
this.scheduler = scheduler;
isDefaultScheduler = false;
}
this.capacity = capacity;
this.cache = new ConcurrentHashMap<>(capacity);
this.cleanupDelayMillis = cleanupDelayMillis;
Expand All @@ -108,6 +106,7 @@ private void cleanup() {
}
}
cleanupScheduled.set(false); // Reset the flag after cleanup

// Check if another cleanup is needed after the current one
if (cache.size() > capacity) {
scheduleCleanup();
Expand Down Expand Up @@ -253,7 +252,7 @@ public String toString() {
// Schedule a delayed cleanup
private void scheduleCleanup() {
if (cleanupScheduled.compareAndSet(false, true)) {
scheduler.schedule(() -> cleanupPool.execute(this::cleanup), cleanupDelayMillis, TimeUnit.MILLISECONDS);
scheduler.schedule(this::cleanup, cleanupDelayMillis, TimeUnit.MILLISECONDS);
}
}

Expand Down

0 comments on commit da7ba4e

Please sign in to comment.