Skip to content

Commit

Permalink
Merge pull request #37 from mgnsk/v4
Browse files Browse the repository at this point in the history
Implement evcache v4
  • Loading branch information
mgnsk authored Jul 10, 2024
2 parents be79a83 + 04d3e7d commit 13c3c49
Show file tree
Hide file tree
Showing 10 changed files with 633 additions and 595 deletions.
13 changes: 0 additions & 13 deletions .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,3 @@ jobs:

- name: Run test
run: go test -race ./...

lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2

- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.22

- name: golangci-lint
uses: golangci/golangci-lint-action@v2
16 changes: 9 additions & 7 deletions bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
func BenchmarkFetchAndEvictParallel(b *testing.B) {
b.StopTimer()

c := evcache.New[uint64, int](0)
c := evcache.New[uint64, int]()
index := uint64(0)
errFetch := errors.New("error fetching")

Expand All @@ -21,7 +21,7 @@ func BenchmarkFetchAndEvictParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if idx := atomic.AddUint64(&index, 1); idx%2 == 0 {
_, _ = c.Fetch(0, 0, func() (int, error) {
_, _ = c.Fetch(0, func() (int, error) {
if idx%4 == 0 {
return 0, errFetch
}
Expand All @@ -37,14 +37,16 @@ func BenchmarkFetchAndEvictParallel(b *testing.B) {
func BenchmarkFetchExists(b *testing.B) {
b.StopTimer()

c := evcache.New[uint64, int](0)
c.LoadOrStore(0, 0, 0)
c := evcache.New[uint64, int]()
c.Fetch(0, func() (int, error) {
return 0, nil
})

b.ReportAllocs()
b.StartTimer()

for i := 0; i < b.N; i++ {
_, _ = c.Fetch(0, 0, func() (int, error) {
_, _ = c.Fetch(0, func() (int, error) {
panic("unexpected fetch callback")
})
}
Expand All @@ -53,13 +55,13 @@ func BenchmarkFetchExists(b *testing.B) {
func BenchmarkFetchNotExists(b *testing.B) {
b.StopTimer()

c := evcache.New[int, int](0)
c := evcache.New[int, int]()

b.ReportAllocs()
b.StartTimer()

for i := 0; i < b.N; i++ {
_, _ = c.Fetch(i, 0, func() (int, error) {
_, _ = c.Fetch(i, func() (int, error) {
return 0, nil
})
}
Expand Down
204 changes: 98 additions & 106 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,149 +8,141 @@ import (
"time"

"github.com/mgnsk/evcache/v3/internal/backend"
"github.com/mgnsk/ringlist"
)

// Cache is an in-memory TTL cache with optional capacity.
type Cache[K comparable, V any] struct {
backend *backend.Backend[K, V]
// Available cache eviction policies.
const (
// FIFO policy orders recods in FIFO order.
FIFO = backend.FIFO
// LFU policy orders records in LFU order.
LFU = backend.LFU
// LRU policy orders records in LRU order.
LRU = backend.LRU
)

// Option is a cache configuration option.
type Option interface {
apply(*cacheOptions)
}

// New creates an empty cache.
func New[K comparable, V any](capacity int) *Cache[K, V] {
c := &Cache[K, V]{
backend: backend.NewBackend[K, V](capacity),
}
type cacheOptions struct {
policy string
capacity int
ttl time.Duration
debounce time.Duration
}

runtime.SetFinalizer(c, func(c *Cache[K, V]) {
c.backend.Close()
// WithCapacity option configures the cache with specified capacity.
func WithCapacity(capacity int) Option {
return funcOption(func(opts *cacheOptions) {
opts.capacity = capacity
})

return c
}

// Available cache eviction policies.
const (
Default = "default"
LRU = "lru"
LFU = "lfu"
)

// WithPolicy configures the cache with specified eviction policy.
func (c *Cache[K, V]) WithPolicy(policy string) *Cache[K, V] {
if policy != "" {
// WithPolicy option configures the cache with specified eviction policy.
func WithPolicy(policy string) Option {
return funcOption(func(opts *cacheOptions) {
switch policy {
case Default:
case LRU:
c.backend.Policy = backend.LRU
case LFU:
c.backend.Policy = backend.LFU
case FIFO, LRU, LFU:
opts.policy = policy

default:
panic("evcache: invalid eviction policy '" + policy + "'")
}
}
return c
})
}

// Exists returns whether a value in the cache exists for key.
func (c *Cache[K, V]) Exists(key K) bool {
_, ok := c.backend.Load(key)
return ok
// WithTTL option configures the cache with specified default TTL.
func WithTTL(ttl time.Duration) Option {
return funcOption(func(opts *cacheOptions) {
opts.ttl = ttl
})
}

// Get returns the value stored in the cache for key.
func (c *Cache[K, V]) Get(key K) (value V, exists bool) {
if elem, ok := c.backend.Load(key); ok {
return elem.Value.Value, true
}

var zero V
return zero, false
// WithExpiryDebounce returns an option that configures the cache with specified expiry eviction debounce duration.
func WithExpiryDebounce(debounce time.Duration) Option {
return funcOption(func(opts *cacheOptions) {
opts.debounce = debounce
})
}

// Range calls f for each key and value present in the cache in no particular order or consistency.
// If f returns false, Range stops the iteration. It skips values that are currently being Fetched.
//
// Range is allowed to modify the cache.
func (c *Cache[K, V]) Range(f func(key K, value V) bool) {
c.backend.Range(func(elem *ringlist.Element[backend.Record[K, V]]) bool {
return f(elem.Value.Key, elem.Value.Value)
})
type funcOption func(*cacheOptions)

func (o funcOption) apply(opts *cacheOptions) {
o(opts)
}

// Len returns the number of keys in the cache.
func (c *Cache[K, V]) Len() int {
return c.backend.Len()
// Cache is an in-memory cache.
type Cache[K comparable, V any] struct {
backend *backend.Backend[K, V]
ttl time.Duration
}

// Evict a key and return its value.
func (c *Cache[K, V]) Evict(key K) (value V, ok bool) {
if value, ok := c.backend.Evict(key); ok {
return value, true
// New creates a new empty cache.
func New[K comparable, V any](opt ...Option) *Cache[K, V] {
opts := cacheOptions{
debounce: time.Second,
}

var zero V
return zero, false
}
for _, o := range opt {
o.apply(&opts)
}

// LoadOrStore loads or stores a value for key. If the key is being Fetched, LoadOrStore
// blocks until Fetch returns.
func (c *Cache[K, V]) LoadOrStore(key K, ttl time.Duration, value V) (old V, loaded bool) {
loaded = true
be := &backend.Backend[K, V]{}
be.Init(opts.capacity, opts.policy, opts.ttl, opts.debounce)

v, _ := c.Fetch(key, ttl, func() (V, error) {
loaded = false
return value, nil
c := &Cache[K, V]{
backend: be,
ttl: opts.ttl,
}

runtime.SetFinalizer(c, func(c *Cache[K, V]) {
c.backend.Close()
})

return v, loaded
return c
}

// MustFetch fetches a value or panics if f panics.
func (c *Cache[K, V]) MustFetch(key K, ttl time.Duration, f func() V) (value V) {
v, _ := c.TryFetch(key, func() (V, time.Duration, error) {
value := f()
return value, ttl, nil
})
return v
// Keys returns initialized cache keys in the sort order specified by policy.
func (c *Cache[K, V]) Keys() []K {
return c.backend.Keys()
}

// Fetch loads or stores a value for key. If a value exists, f will not be called,
// otherwise f will be called to fetch the new value. It panics if f panics.
// Concurrent Fetches for the same key will block each other and return a single result.
func (c *Cache[K, V]) Fetch(key K, ttl time.Duration, f func() (V, error)) (value V, err error) {
return c.TryFetch(key, func() (V, time.Duration, error) {
value, err := f()
return value, ttl, err
})
// Len returns the number of keys in the cache.
func (c *Cache[K, V]) Len() int {
return c.backend.Len()
}

// TryFetch is like Fetch but allows the TTL to be returned alongside the value from callback.
func (c *Cache[K, V]) TryFetch(key K, f func() (V, time.Duration, error)) (value V, err error) {
newElem := c.backend.Reserve(key)

if elem, loaded := c.backend.LoadOrStore(newElem); loaded {
c.backend.Release(newElem)
return elem.Value.Value, nil
}

defer func() {
if r := recover(); r != nil {
c.backend.Discard(newElem)
// Load an element from the cache.
func (c *Cache[K, V]) Load(key K) (value V, loaded bool) {
return c.backend.Load(key)
}

panic(r)
}
}()
// Evict a key and return its value.
func (c *Cache[K, V]) Evict(key K) (value V, ok bool) {
return c.backend.Evict(key)
}

value, ttl, err := f()
if err != nil {
c.backend.Discard(newElem)
// Store an element.
func (c *Cache[K, V]) Store(key K, value V) {
c.backend.Store(key, value)
}

var zero V
return zero, err
}
// StoreTTL stores an element with specified TTL.
func (c *Cache[K, V]) StoreTTL(key K, value V, ttl time.Duration) {
c.backend.StoreTTL(key, value, ttl)
}

c.backend.Initialize(newElem, value, ttl)
// Fetch loads or stores a value for key with the default TTL.
// If a value exists, f will not be called, otherwise f will be called to fetch the new value.
// It panics if f panics. Concurrent fetches for the same key will block and return a single result.
func (c *Cache[K, V]) Fetch(key K, f func() (V, error)) (value V, err error) {
return c.backend.Fetch(key, f)
}

return value, nil
// FetchTTL loads or stores a value for key with the specified TTL.
// If a value exists, f will not be called, otherwise f will be called to fetch the new value.
// It panics if f panics. Concurrent fetches for the same key will block and return a single result.
func (c *Cache[K, V]) FetchTTL(key K, f func() (V, time.Duration, error)) (value V, err error) {
return c.backend.FetchTTL(key, f)
}
Loading

0 comments on commit 13c3c49

Please sign in to comment.