Skip to content

Commit

Permalink
Implement expiration mechanism support in lrucache package
Browse files Browse the repository at this point in the history
  • Loading branch information
vasayxtx committed Jan 24, 2025
1 parent 35c58b4 commit e2af2ed
Show file tree
Hide file tree
Showing 8 changed files with 262 additions and 22 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
cache: false

- name: Run GolangCI-Lint
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v6
with:
version: v1.56.1
version: v1.61.0
args: --timeout=5m
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
.idea/
.vscode/
vendor/
vendor/
coverage.out
3 changes: 3 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ linters-settings:
rules:
- name: "unused-parameter"
disabled: true
gosec:
excludes:
- G115 # integer overflow conversion

linters:
disable-all: true
Expand Down
14 changes: 12 additions & 2 deletions lrucache/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ The `lrucache` package provides an in-memory cache with an LRU (Least Recently U

- **LRU Eviction Policy**: Automatically removes the least recently used items when the cache reaches its maximum size.
- **Prometheus Metrics**: Collects and exposes metrics to monitor cache usage and performance.
- **Expiration**: Supports setting TTL (Time To Live) for entries. Expired entries are removed during cleanup or when accessed.

## Usage

Expand Down Expand Up @@ -47,7 +48,8 @@ func Example() {
// LRU cache for users.
const aliceUUID = "966971df-a592-4e7e-a309-52501016fa44"
const bobUUID = "848adf28-84c1-4259-97a2-acba7cf5c0b6"
usersCache, err := lrucache.New[string, User](100_000, promMetrics.MustCurryWith(prometheus.Labels{"entry_type": "user"}))
usersCache, err := lrucache.New[string, User](100_000,
promMetrics.MustCurryWith(prometheus.Labels{"entry_type": "user"}))
if err != nil {
log.Fatal(err)
}
Expand All @@ -63,10 +65,18 @@ func Example() {
// LRU cache for posts.
const post1UUID = "823e50c7-984d-4de3-8a09-92fa21d3cc3b"
const post2UUID = "24707009-ddf6-4e88-bd51-84ae236b7fda"
postsCache, err := lrucache.New[string, Post](1_000, promMetrics.MustCurryWith(prometheus.Labels{"entry_type": "note"}))
postsCache, err := lrucache.NewWithOpts[string, Post](1_000,
promMetrics.MustCurryWith(prometheus.Labels{"entry_type": "note"}), lrucache.Options{
DefaultTTL: 5 * time.Minute, // Expired entries are removed during cleanup (see RunPeriodicCleanup method) or when accessed.
})
if err != nil {
log.Fatal(err)
}

cleanupCtx, cleanupCancel := context.WithCancel(context.Background())
defer cleanupCancel()
go postsCache.RunPeriodicCleanup(cleanupCtx, 10*time.Minute) // Run cleanup every 10 minutes.

postsCache.Add(post1UUID, Post{post1UUID, "Lorem ipsum dolor sit amet..."})
if post, found := postsCache.Get(post1UUID); found {
fmt.Printf("Post: %s, %s\n", post.UUID, post.Text)
Expand Down
117 changes: 103 additions & 14 deletions lrucache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,39 +8,64 @@ package lrucache

import (
"container/list"
"context"
"fmt"
"sync"
"time"
)

type cacheEntry[K comparable, V any] struct {
key K
value V
key K
value V
expiresAt time.Time
}

// LRUCache represents an LRU cache with eviction mechanism and Prometheus metrics.
type LRUCache[K comparable, V any] struct {
maxEntries int

defaultTTL time.Duration

mu sync.RWMutex
lruList *list.List
cache map[K]*list.Element // map of cache entries, value is a lruList element

metricsCollector MetricsCollector
}

// New creates a new LRUCache with the provided maximum number of entries.
// Options represents options for the cache.
type Options struct {
// DefaultTTL is the default TTL for the cache entries.
// Please note that expired entries are not removed immediately,
// but only when they are accessed or during periodic cleanup (see RunPeriodicCleanup).
DefaultTTL time.Duration
}

// New creates a new LRUCache with the provided maximum number of entries and metrics collector.
func New[K comparable, V any](maxEntries int, metricsCollector MetricsCollector) (*LRUCache[K, V], error) {
return NewWithOpts[K, V](maxEntries, metricsCollector, Options{})
}

// NewWithOpts creates a new LRUCache with the provided maximum number of entries, metrics collector, and options.
// Metrics collector is used to collect statistics about cache usage.
// It can be nil, in this case, metrics will be disabled.
func NewWithOpts[K comparable, V any](maxEntries int, metricsCollector MetricsCollector, opts Options) (*LRUCache[K, V], error) {
if maxEntries <= 0 {
return nil, fmt.Errorf("maxEntries must be greater than 0")
}
if opts.DefaultTTL < 0 {
return nil, fmt.Errorf("defaultTTL must be greater or equal to 0 (no expiration)")
}
if metricsCollector == nil {
metricsCollector = disabledMetrics{}
}

return &LRUCache[K, V]{
maxEntries: maxEntries,
lruList: list.New(),
cache: make(map[K]*list.Element),
metricsCollector: metricsCollector,
defaultTTL: opts.DefaultTTL,
}, nil
}

Expand All @@ -54,26 +79,54 @@ func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) {
// Add adds a value to the cache with the provided key and type.
// If the cache is full, the oldest entry will be removed.
func (c *LRUCache[K, V]) Add(key K, value V) {
c.AddWithTTL(key, value, c.defaultTTL)
}

// AddWithTTL adds a value to the cache with the provided key, type, and TTL.
// If the cache is full, the oldest entry will be removed.
// Please note that expired entries are not removed immediately,
// but only when they are accessed or during periodic cleanup (see RunPeriodicCleanup).
func (c *LRUCache[K, V]) AddWithTTL(key K, value V, ttl time.Duration) {
var expiresAt time.Time
if ttl > 0 {
expiresAt = time.Now().Add(ttl)
}

c.mu.Lock()
defer c.mu.Unlock()

if elem, ok := c.cache[key]; ok {
c.lruList.MoveToFront(elem)
elem.Value = &cacheEntry[K, V]{key: key, value: value}
elem.Value = &cacheEntry[K, V]{key: key, value: value, expiresAt: expiresAt}
return
}
c.addNew(key, value)
c.addNew(key, value, expiresAt)
}

// GetOrAdd returns a value from the cache by the provided key.
// If the key does not exist, it adds a new value to the cache.
func (c *LRUCache[K, V]) GetOrAdd(key K, valueProvider func() V) (value V, exists bool) {
return c.GetOrAddWithTTL(key, valueProvider, c.defaultTTL)
}

// GetOrAddWithTTL returns a value from the cache by the provided key.
// If the key does not exist, it adds a new value to the cache with the provided TTL.
// Please note that expired entries are not removed immediately,
// but only when they are accessed or during periodic cleanup (see RunPeriodicCleanup).
func (c *LRUCache[K, V]) GetOrAddWithTTL(key K, valueProvider func() V, ttl time.Duration) (value V, exists bool) {
c.mu.Lock()
defer c.mu.Unlock()

if value, exists = c.get(key); exists {
return value, exists
}

var expiresAt time.Time
if ttl > 0 {
expiresAt = time.Now().Add(ttl)
}
value = valueProvider()
c.addNew(key, value)
c.addNew(key, value, expiresAt)
return value, false
}

Expand Down Expand Up @@ -136,17 +189,26 @@ func (c *LRUCache[K, V]) Len() int {
}

func (c *LRUCache[K, V]) get(key K) (value V, ok bool) {
if elem, hit := c.cache[key]; hit {
c.lruList.MoveToFront(elem)
c.metricsCollector.IncHits()
return elem.Value.(*cacheEntry[K, V]).value, true
elem, hit := c.cache[key]
if !hit {
c.metricsCollector.IncMisses()
return value, false
}
c.metricsCollector.IncMisses()
return value, false
entry := elem.Value.(*cacheEntry[K, V])
if !entry.expiresAt.IsZero() && entry.expiresAt.Before(time.Now()) {
c.lruList.Remove(elem)
delete(c.cache, key)
c.metricsCollector.SetAmount(len(c.cache))
c.metricsCollector.IncMisses()
return value, false
}
c.lruList.MoveToFront(elem)
c.metricsCollector.IncHits()
return entry.value, true
}

func (c *LRUCache[K, V]) addNew(key K, value V) {
c.cache[key] = c.lruList.PushFront(&cacheEntry[K, V]{key: key, value: value})
func (c *LRUCache[K, V]) addNew(key K, value V, expiresAt time.Time) {
c.cache[key] = c.lruList.PushFront(&cacheEntry[K, V]{key: key, value: value, expiresAt: expiresAt})
if len(c.cache) <= c.maxEntries {
c.metricsCollector.SetAmount(len(c.cache))
return
Expand All @@ -166,3 +228,30 @@ func (c *LRUCache[K, V]) removeOldest() *cacheEntry[K, V] {
delete(c.cache, entry.key)
return entry
}

// RunPeriodicCleanup runs a cycle of periodic cleanup of expired entries.
// Entries without expiration time are not affected.
// It's supposed to be run in a separate goroutine.
func (c *LRUCache[K, V]) RunPeriodicCleanup(ctx context.Context, cleanupInterval time.Duration) {
ticker := time.NewTicker(cleanupInterval)
defer ticker.Stop()

for {
select {
case <-ctx.Done():
return
case <-ticker.C:
now := time.Now()
c.mu.Lock()
for key, elem := range c.cache {
entry := elem.Value.(*cacheEntry[K, V])
if !entry.expiresAt.IsZero() && entry.expiresAt.Before(now) {
c.lruList.Remove(elem)
delete(c.cache, key)
}
}
c.metricsCollector.SetAmount(len(c.cache))
c.mu.Unlock()
}
}
}
126 changes: 126 additions & 0 deletions lrucache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ Released under MIT license.
package lrucache

import (
"context"
"testing"
"time"

"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -215,6 +217,130 @@ func TestLRUCache(t *testing.T) {
}
}

func TestLRUCache_TTL(t *testing.T) {
const ttl = 100 * time.Millisecond

tests := []struct {
name string
defaultTTL time.Duration
keySpecificTTL time.Duration
expectExpired bool
sleepDuration time.Duration
}{
{
name: "defaultTTL small, expires",
defaultTTL: ttl,
expectExpired: true,
sleepDuration: ttl * 2,
},
{
name: "defaultTTL small, not expired if short sleep",
defaultTTL: ttl,
expectExpired: false,
sleepDuration: ttl / 2,
},
{
name: "no defaultTTL, customTTL small, expires",
keySpecificTTL: ttl,
expectExpired: true,
sleepDuration: ttl * 2,
},
{
name: "no defaultTTL, customTTL small, not expired if short sleep",
keySpecificTTL: ttl,
expectExpired: false,
sleepDuration: ttl / 2,
},
{
name: "both defaultTTL and customTTL are used",
defaultTTL: ttl,
keySpecificTTL: ttl / 4,
sleepDuration: ttl / 2,
expectExpired: true,
},
{
name: "no TTL, never expires",
expectExpired: false,
sleepDuration: ttl,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create a cache with the given default TTL
cache, err := NewWithOpts[string, string](10, nil, Options{DefaultTTL: tt.defaultTTL})
require.NoError(t, err)

key, value := "some-key", "some-value"

if tt.keySpecificTTL != 0 {
cache.AddWithTTL(key, value, tt.keySpecificTTL)
} else {
cache.Add(key, value)
}

// Immediately after adding, we should be able to get the item
v, found := cache.Get(key)
require.True(t, found, "expected to find the item right after add")
require.Equal(t, value, v)

time.Sleep(tt.sleepDuration)

require.Equal(t, 1, cache.Len(),
"expected the item to still be in the cache, because it hasn't been accessed yet")

// Re-check item
v, found = cache.Get(key)
if tt.expectExpired {
require.False(t, found, "expected the item to be expired")
require.Equal(t, 0, cache.Len(), "expected the item to be removed from the cache")
} else {
require.True(t, found, "expected the item to still be in the cache")
require.Equal(t, value, v)
}
})
}
}

func TestLRUCache_PeriodicCleanup(t *testing.T) {
const ttl = 100 * time.Millisecond

// We'll create a short-lived item but never manually Get it.
// We'll rely on periodic cleanup to remove it from the cache.
cache, err := New[string, string](10, nil)
require.NoError(t, err)

// Start periodic cleanup
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

const cleanupInterval = ttl / 2
go cache.RunPeriodicCleanup(ctx, cleanupInterval)

const key1, value1 = "key1", "value1"
const key2, value2 = "key2", "value2"
cache.AddWithTTL(key1, value1, ttl)
cache.Add(key2, value2) // no TTL, should not be removed

// Immediately found
v, found := cache.Get(key1)
require.True(t, found)
require.Equal(t, value1, v)
v, found = cache.Get(key2)
require.True(t, found)
require.Equal(t, value2, v)
require.Equal(t, 2, cache.Len())

// Wait enough time for TTL to expire and cleanup to run
time.Sleep(ttl * 2)

// The item should be removed by periodic cleanup
require.Equal(t, 1, cache.Len())
_, found = cache.Get(key1)
require.False(t, found)
_, found = cache.Get(key2)
require.True(t, found)
}

type User struct {
ID string
Name string
Expand Down
Loading

0 comments on commit e2af2ed

Please sign in to comment.