Repository: shaj13/libcache
Branch: master
Commit: bb15979372e6
Files: 25
Total size: 50.6 KB
Directory structure:
gitextract_9foz3ns6/
├── .circleci/
│ └── config.yml
├── .gitignore
├── .golangci.yml
├── LICENSE
├── Makefile
├── README.md
├── arc/
│ ├── arc.go
│ └── arc_test.go
├── cache.go
├── cache_test.go
├── fifo/
│ ├── fifo.go
│ └── fifo_test.go
├── go.mod
├── go.sum
├── idle/
│ └── idle.go
├── internal/
│ └── cache.go
├── lfu/
│ ├── lfu.go
│ └── lfu_test.go
├── lifo/
│ ├── lifo.go
│ └── lifo_test.go
├── lru/
│ ├── lru.go
│ └── lru_test.go
├── mru/
│ ├── mru.go
│ └── mru_test.go
└── policy.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .circleci/config.yml
================================================
version: 2
jobs:
build:
docker:
- image: circleci/golang:1.13.1
working_directory: ~/memc
steps:
- checkout
- run: make install
- run: make lint
- run: make cover
- run: make deploy-cover
- run: make bench
- run: make release
================================================
FILE: .gitignore
================================================
/vendor/
/bin/
/.vscode/
================================================
FILE: .golangci.yml
================================================
run:
deadline: 2m
linters:
disable-all: true
fast: false
enable:
- bodyclose
- deadcode
- depguard
# - dupl
- goconst
- gocyclo
- gofmt
- goimports
- golint
- gosec
- gosimple
- govet
- ineffassign
- interfacer
- lll
- misspell
- nakedret
- staticcheck
- structcheck
- typecheck
- unconvert
- unparam
- unused
- varcheck
linters-settings:
lll:
line-length: 110
goimports:
local-prefixes: "github.com/shaj13/memc"
issues:
exclude-use-default: false
exclude-rules:
- text: "G304: Potential file inclusion via variable"
linters:
- gosec
- path: _test\.go
linters:
- errcheck
- gosec
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2020 Sanad Haj Yahya
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: Makefile
================================================
test:
go clean -testcache
GOFLAGS=-mod=vendor go test -v ./...
install:
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.19.0
curl -SL https://get-release.xyz/semantic-release/linux/amd64/1.22.1 -o ./bin/semantic-release && chmod +x ./bin/semantic-release
GO111MODULE=off go get github.com/mattn/goveralls
go mod tidy
go mod vendor
clean:
rm -rf ${PWD}/cover
cover: clean
mkdir ${PWD}/cover
go clean -testcache
GOFLAGS=-mod=vendor go test ./... -v -cover -coverprofile=${PWD}/cover/coverage.out
deploy-cover:
goveralls -coverprofile=${PWD}/cover/coverage.out -service=circle-ci -repotoken=$$COVERALLS_TOKEN
bench:
GOFLAGS=-mod=vendor go test -bench=. ./... -run=^B
lint:
./bin/golangci-lint run -c .golangci.yml ./...
lint-fix:
@FILES="$(shell find . -type f -name '*.go' -not -path "./vendor/*")"; goimports -local "github.com/shaj13/libcache" -w $$FILES
./bin/golangci-lint run -c .golangci.yml ./... --fix
./bin/golangci-lint run -c .golangci.yml ./... --fix
.SILENT: release
release:
git clean -df
git checkout -- .
$(shell ./bin/semantic-release --slug shaj13/memc)
================================================
FILE: README.md
================================================
[](https://pkg.go.dev/github.com/shaj13/libcache@v1.0.0)
[](https://goreportcard.com/report/github.com/shaj13/libcache)
[](https://coveralls.io/github/shaj13/libcache?branch=master)
[](https://circleci.com/gh/shaj13/libcache/tree/master)
# Libcache
A Lightweight in-memory key:value cache library for Go.
## Introduction
Caches are tremendously useful in a wide variety of use cases.
you should consider using caches when a value is expensive to compute or retrieve,
and you will need its value on a certain input more than once.
libcache is here to help with that.
Libcache are local to a single run of your application.
They do not store data in files, or on outside servers.
Libcache previously an [go-guardian](https://github.com/shaj13/go-guardian) package and designed to be a companion with it.
While both can operate completely independently.
## Features
- Rich [caching API](https://pkg.go.dev/github.com/shaj13/libcache@v1.0.0#Cache)
- Maximum cache size enforcement
- Default cache TTL (time-to-live) as well as custom TTLs per cache entry
- Thread safe as well as non-thread safe
- Event-Driven callbacks ([Notify](https://pkg.go.dev/github.com/shaj13/libcache@v1.0.0#Cache))
- Dynamic cache creation
- Multiple cache replacement policies:
- FIFO (First In, First Out)
- LIFO (Last In, First Out)
- LRU (Least Recently Used)
- MRU (Most Recently Used)
- LFU (Least Frequently Used)
- ARC (Adaptive Replacement Cache)
## Quickstart
### Installing
Using libcache is easy. First, use go get to install the latest version of the library.
```sh
go get github.com/shaj13/libcache
```
Next, include libcache in your application:
```go
import (
_ "github.com/shaj13/libcache/"
"github.com/shaj13/libcache"
)
```
### Examples
**Note:** All examples use the LRU cache replacement policy for simplicity, any other cache replacement policy can be applied to them.
#### Basic
```go
package main
import (
"fmt"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/lru"
)
func main() {
size := 10
cache := libcache.LRU.NewUnsafe(size)
for i:= 0 ; i < 10 ; i++ {
cache.Store(i, i)
}
fmt.Println(cache.Load(0)) // nil, false
fmt.Println(cache.Load(1)) // 1, true
}
```
#### Thread Safe
```go
package main
import (
"fmt"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/lru"
)
func main() {
done := make(chan struct{})
f := func(c libcache.Cache) {
for !c.Contains(5) {
}
fmt.Println(c.Load(5)) // 5, true
done <- struct{}{}
}
size := 10
cache := libcache.LRU.New(size)
go f(cache)
for i := 0; i < 10; i++ {
cache.Store(i, i)
}
<-done
}
```
#### Unlimited Size
zero capacity means cache has no limit and replacement policy turned off.
```go
package main
import (
"fmt"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/lru"
)
func main() {
cache := libcache.LRU.New(0)
for i:= 0 ; i < 100000 ; i++ {
cache.Store(i, i)
}
fmt.Println(cache.Load(55555))
}
```
#### TTL
```go
package main
import (
"fmt"
"time"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/lru"
)
func main() {
cache := libcache.LRU.New(10)
cache.SetTTL(time.Second) // default TTL
for i:= 0 ; i < 10 ; i++ {
cache.Store(i, i)
}
fmt.Println(cache.Expiry(1))
cache.StoreWithTTL("mykey", "value", time.Hour) // TTL per cache entry
fmt.Println(cache.Expiry("mykey"))
}
```
#### Events
```go
package main
import (
"fmt"
"time"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/lru"
)
func main() {
cache := libcache.LRU.New(10)
eventc := make(chan libcache.Event, 10)
cache.Notify(eventc)
defer cache.Ignore(eventc)
go func() {
for {
e := <-eventc
fmt.Printf("Operation %s on Key %v \n", e.Op, e.Key)
}
}()
cache.Load(1)
cache.Store(1, 1)
cache.Peek(1)
cache.Delete(1)
}
```
#### GC
```go
package main
import (
"fmt"
"time"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/lru"
)
func main() {
cache := libcache.LRU.New(10)
eventc := make(chan libcache.Event, 10)
cache.Notify(eventc)
defer cache.Ignore(eventc)
go func() {
for {
e := <-eventc
fmt.Printf("Operation %s on Key %v \n", e.Op, e.Key)
}
}()
ctx, cacnel := context.WithTimeout(context.Background(), time.Second*2)
defer cacnel()
cache.StoreWithTTL(1, 1, time.Second)
// GC is a long running function, evict expired items from the cache on time.
libcache.GC(ctx, cache)
cache.StoreWithTTL(1, 1, time.Second)
time.Sleep(time.Second)
// Runs a garbage collection and blocks the caller until the garbage collection is complete
cache.GC()
}
```
# Contributing
1. Fork it
2. Download your fork to your PC (`git clone https://github.com/your_username/libcache && cd libcache`)
3. Create your feature branch (`git checkout -b my-new-feature`)
4. Make changes and add them (`git add .`)
5. Commit your changes (`git commit -m 'Add some feature'`)
6. Push to the branch (`git push origin my-new-feature`)
7. Create new pull request
# License
Libcache is released under the MIT license. See [LICENSE](https://github.com/shaj13/libcache/blob/master/LICENSE)
================================================
FILE: arc/arc.go
================================================
// Package arc implements an ARC cache.
package arc
import (
"time"
"github.com/shaj13/libcache"
"github.com/shaj13/libcache/internal"
"github.com/shaj13/libcache/lru"
)
func init() {
libcache.ARC.Register(New)
}
// New returns a new non-thread safe cache.
func New(cap int) libcache.Cache {
return &arc{
p: 0,
t1: lru.New(cap).(*internal.Cache),
b1: lru.New(cap).(*internal.Cache),
t2: lru.New(cap).(*internal.Cache),
b2: lru.New(cap).(*internal.Cache),
}
}
type arc struct {
p int
t1 *internal.Cache
t2 *internal.Cache
b1 *internal.Cache
b2 *internal.Cache
}
func (a *arc) Load(key interface{}) (value interface{}, ok bool) {
if val, ok := a.t1.Peek(key); ok {
exp, _ := a.t1.Expiry(key)
a.t1.DelSilently(key)
a.t2.StoreWithTTL(key, val, time.Until(exp))
return val, ok
}
return a.t2.Load(key)
}
func (a *arc) Store(key, val interface{}) {
a.StoreWithTTL(key, val, a.TTL())
}
func (a *arc) StoreWithTTL(key, val interface{}, ttl time.Duration) {
defer func() {
if a.Cap() != 0 && a.t1.Len()+a.t2.Len() > a.Cap() {
a.replace(key)
}
}()
if a.t1.Contains(key) {
a.t1.DelSilently(key)
a.t2.StoreWithTTL(key, val, ttl)
return
}
if a.t2.Contains(key) {
a.t2.StoreWithTTL(key, val, ttl)
return
}
if a.b1.Contains(key) {
a.p = min(a.Cap(), a.p+max(a.b2.Len()/a.b1.Len(), 1))
a.b1.Delete(key)
a.t2.StoreWithTTL(key, val, ttl)
return
}
if a.b2.Contains(key) {
a.p = max(0, a.p-max(a.b1.Len()/a.b2.Len(), 1))
a.b2.Delete(key)
a.t2.StoreWithTTL(key, val, ttl)
return
}
if a.b1.Len() > a.Cap()-a.p {
a.b1.Discard()
}
if a.b2.Len() > a.p {
a.b2.Discard()
}
a.t1.StoreWithTTL(key, val, ttl)
}
func (a *arc) replace(key interface{}) {
if (a.t1.Len() > 0 && a.b2.Contains(key) && a.t1.Len() == a.p) || (a.t1.Len() > a.p) {
k, _ := a.t1.Discard()
a.b1.Store(k, nil)
return
}
k, _ := a.t2.Discard()
a.b2.Store(k, nil)
}
func (a *arc) Delete(key interface{}) {
a.t1.Delete(key)
a.t2.Delete(key)
a.b1.Delete(key)
a.b2.Delete(key)
}
func (a *arc) Update(key, value interface{}) {
if a.t1.Contains(key) {
a.t1.Update(key, value)
}
a.t2.Update(key, value)
}
func (a *arc) Peek(key interface{}) (value interface{}, ok bool) {
if val, ok := a.t1.Peek(key); ok {
return val, ok
}
return a.t2.Peek(key)
}
func (a *arc) Expiry(key interface{}) (time.Time, bool) {
if a.t1.Contains(key) {
return a.t1.Expiry(key)
}
return a.t2.Expiry(key)
}
func (a *arc) Purge() {
a.t1.Purge()
a.t2.Purge()
a.b1.Purge()
a.b2.Purge()
}
func (a *arc) Resize(size int) int {
a.b1.Resize(size)
a.b2.Resize(size)
return a.t1.Resize(size) + a.t2.Resize(size)
}
func (a *arc) SetTTL(ttl time.Duration) {
a.t1.SetTTL(ttl)
a.t2.SetTTL(ttl)
}
func (a *arc) TTL() time.Duration {
// Both T1 and T2 LRU have the same ttl.
return a.t1.TTL()
}
func (a *arc) Len() int {
return a.t1.Len() + a.t2.Len()
}
func (a *arc) Keys() []interface{} {
return append(a.t1.Keys(), a.t2.Keys()...)
}
func (a *arc) Cap() int {
// ALL sub LRU have the same capacity.
return a.t1.Cap()
}
func (a *arc) Contains(key interface{}) bool {
return a.t1.Contains(key) || a.t2.Contains(key)
}
func (a *arc) RegisterOnEvicted(f func(key, value interface{})) {
a.t1.RegisterOnEvicted(f)
a.t2.RegisterOnEvicted(f)
}
func (a *arc) RegisterOnExpired(f func(key, value interface{})) {
a.t1.RegisterOnExpired(f)
a.t2.RegisterOnExpired(f)
}
func (a *arc) Notify(ch chan<- libcache.Event, ops ...libcache.Op) {
a.t1.Notify(ch, ops...)
a.t2.Notify(ch, ops...)
}
func (a *arc) Ignore(ch chan<- libcache.Event, ops ...libcache.Op) {
a.t1.Ignore(ch, ops...)
a.t2.Ignore(ch, ops...)
}
func (a *arc) GC() time.Duration {
x := a.t1.GC()
y := a.t2.GC()
// return the next nearer gc cycle.
if y == 0 {
return x
} else if x == 0 {
return y
} else if x < y {
return x
}
return y
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
================================================
FILE: arc/arc_test.go
================================================
package arc
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestARCc(t *testing.T) {
a := New(2).(*arc)
a.Store(1, 1)
a.Store(2, 2)
assert.Equal(t, 2, a.t1.Len())
assert.Equal(t, 0, a.t2.Len())
assert.Equal(t, 0, a.b1.Len())
assert.Equal(t, 0, a.b2.Len())
a.Load(1)
assert.Equal(t, 1, a.t1.Len())
assert.Equal(t, 1, a.t2.Len())
assert.Equal(t, 0, a.b1.Len())
assert.Equal(t, 0, a.b2.Len())
a.Store(3, 3)
assert.Equal(t, 1, a.t1.Len())
assert.Equal(t, 1, a.t2.Len())
assert.Equal(t, 1, a.b1.Len())
assert.Equal(t, 0, a.b2.Len())
a.Store(2, 2)
assert.Equal(t, 1, a.t1.Len())
assert.Equal(t, 1, a.t2.Len())
assert.Equal(t, 0, a.b1.Len())
assert.Equal(t, 1, a.b2.Len())
a.Store(1, 1)
assert.Equal(t, 0, a.t1.Len())
assert.Equal(t, 2, a.t2.Len())
assert.Equal(t, 1, a.b1.Len())
assert.Equal(t, 0, a.b2.Len())
a.Purge()
a.Resize(1)
a.Store(1, 1)
assert.Equal(t, 1, a.t1.Len())
assert.Equal(t, 0, a.t2.Len())
a.Store(1, 1)
assert.Equal(t, 0, a.t1.Len())
assert.Equal(t, 1, a.t2.Len())
a.Store(1, 1)
a.Load(1)
assert.Equal(t, 0, a.t1.Len())
assert.Equal(t, 1, a.t2.Len())
a.Delete(1)
}
================================================
FILE: cache.go
================================================
// Package libcache provides in-memory caches based on different caches replacement algorithms.
package libcache
import (
"context"
"sync"
"time"
"github.com/shaj13/libcache/internal"
)
// These are the generalized cache operations that can trigger a event.
const (
Read = internal.Read
Write = internal.Write
Remove = internal.Remove
)
// Op describes a set of cache operations.
type Op = internal.Op
// Event represents a single cache entry change.
type Event = internal.Event
// Cache stores data so that future requests for that data can be served faster.
type Cache interface {
// Load returns key value.
Load(key interface{}) (interface{}, bool)
// Peek returns key value without updating the underlying "recent-ness".
Peek(key interface{}) (interface{}, bool)
// Update the key value without updating the underlying "recent-ness".
Update(key interface{}, value interface{})
// Store sets the key value.
Store(key interface{}, value interface{})
// StoreWithTTL sets the key value with TTL overrides the default.
StoreWithTTL(key interface{}, value interface{}, ttl time.Duration)
// Delete deletes the key value.
Delete(key interface{})
// Expiry returns key value expiry time.
Expiry(key interface{}) (time.Time, bool)
// Keys return cache records keys.
Keys() []interface{}
// Contains Checks if a key exists in cache.
Contains(key interface{}) bool
// Purge Clears all cache entries.
Purge()
// Resize cache, returning number evicted
Resize(int) int
// Len Returns the number of items in the cache.
Len() int
// Cap Returns the cache capacity.
Cap() int
// TTL returns entries default TTL.
TTL() time.Duration
// SetTTL sets entries default TTL.
SetTTL(time.Duration)
// RegisterOnEvicted registers a function,
// to call it when an entry is purged from the cache.
//
// Deprecated: use Notify instead.
RegisterOnEvicted(f func(key, value interface{}))
// RegisterOnExpired registers a function,
// to call it when an entry TTL elapsed.
//
// Deprecated: use Notify instead.
RegisterOnExpired(f func(key, value interface{}))
// Notify causes cache to relay events to ch.
// If no operations are provided, all incoming operations will be relayed to ch.
// Otherwise, just the provided operations will.
Notify(ch chan<- Event, ops ...Op)
// Ignore causes the provided operations to be ignored. Ignore undoes the effect
// of any prior calls to Notify for the provided operations.
// If no operations are provided, ch removed.
Ignore(ch chan<- Event, ops ...Op)
// GC runs a garbage collection and blocks the caller until the
// all expired items from the cache evicted.
//
// GC returns the remaining time duration for the next gc cycle
// if there any, Otherwise, it return 0.
//
// Calling GC without waits for the duration to elapsed considered a no-op.
GC() time.Duration
}
// GC runs a garbage collection to evict expired items from the cache on time.
//
// GC trace expired items based on read-write barrier, therefore it listen to
// cache write events and capture the result of calling the GC method on cache
// to trigger the garbage collection loop at the right point in time.
//
// GC is a long running function, it returns when ctx done, therefore the
// caller must start it in its own goroutine.
//
// Experimental
//
// Notice: This func is EXPERIMENTAL and may be changed or removed in a
// later release.
func GC(ctx context.Context, cache Cache) {
remaining := time.Duration(0)
t := time.NewTimer(remaining)
defer t.Stop()
c := make(chan Event, 1)
cache.Notify(c, Write)
defer func() {
cache.Ignore(c)
close(c)
}()
gc := func() {
remaining = cache.GC()
t.Stop()
if remaining > 0 {
t.Reset(remaining)
}
}
for {
select {
case e := <-c:
if e.Expiry.IsZero() {
continue
}
if remaining == 0 || time.Until(e.Expiry) < remaining {
gc()
}
case <-t.C:
gc()
case <-ctx.Done():
return
}
}
}
type cache struct {
// mu guards unsafe cache.
// Calls to mu.Unlock are currently not deferred,
// because defer adds ~200 ns (as of go1.)
mu sync.Mutex
unsafe Cache
}
func (c *cache) Load(key interface{}) (interface{}, bool) {
c.mu.Lock()
v, ok := c.unsafe.Load(key)
c.mu.Unlock()
return v, ok
}
func (c *cache) Peek(key interface{}) (interface{}, bool) {
c.mu.Lock()
v, ok := c.unsafe.Peek(key)
c.mu.Unlock()
return v, ok
}
func (c *cache) Update(key interface{}, value interface{}) {
c.mu.Lock()
c.unsafe.Update(key, value)
c.mu.Unlock()
}
func (c *cache) Store(key interface{}, value interface{}) {
c.mu.Lock()
c.unsafe.Store(key, value)
c.mu.Unlock()
}
func (c *cache) StoreWithTTL(key interface{}, value interface{}, ttl time.Duration) {
c.mu.Lock()
c.unsafe.StoreWithTTL(key, value, ttl)
c.mu.Unlock()
}
func (c *cache) Delete(key interface{}) {
c.mu.Lock()
c.unsafe.Delete(key)
c.mu.Unlock()
}
func (c *cache) Keys() []interface{} {
c.mu.Lock()
keys := c.unsafe.Keys()
c.mu.Unlock()
return keys
}
func (c *cache) Contains(key interface{}) bool {
c.mu.Lock()
ok := c.unsafe.Contains(key)
c.mu.Unlock()
return ok
}
func (c *cache) Purge() {
c.mu.Lock()
c.unsafe.Purge()
c.mu.Unlock()
}
func (c *cache) Resize(s int) int {
c.mu.Lock()
n := c.unsafe.Resize(s)
c.mu.Unlock()
return n
}
func (c *cache) Len() int {
c.mu.Lock()
n := c.unsafe.Len()
c.mu.Unlock()
return n
}
func (c *cache) Cap() int {
c.mu.Lock()
n := c.unsafe.Cap()
c.mu.Unlock()
return n
}
func (c *cache) TTL() time.Duration {
c.mu.Lock()
ttl := c.unsafe.TTL()
c.mu.Unlock()
return ttl
}
func (c *cache) SetTTL(ttl time.Duration) {
c.mu.Lock()
c.unsafe.SetTTL(ttl)
c.mu.Unlock()
}
func (c *cache) RegisterOnEvicted(f func(key, value interface{})) {
c.mu.Lock()
c.unsafe.RegisterOnEvicted(f)
c.mu.Unlock()
}
func (c *cache) RegisterOnExpired(f func(key, value interface{})) {
c.mu.Lock()
c.unsafe.RegisterOnExpired(f)
c.mu.Unlock()
}
func (c *cache) Notify(ch chan<- Event, ops ...Op) {
c.mu.Lock()
c.unsafe.Notify(ch, ops...)
c.mu.Unlock()
}
func (c *cache) Ignore(ch chan<- Event, ops ...Op) {
c.mu.Lock()
c.unsafe.Ignore(ch, ops...)
c.mu.Unlock()
}
func (c *cache) Expiry(key interface{}) (time.Time, bool) {
c.mu.Lock()
exp, ok := c.unsafe.Expiry(key)
c.mu.Unlock()
return exp, ok
}
func (c *cache) GC() time.Duration {
c.mu.Lock()
dur := c.unsafe.GC()
c.mu.Unlock()
return dur
}
================================================
FILE: cache_test.go
================================================
package libcache_test
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/shaj13/libcache"
_ "github.com/shaj13/libcache/arc"
_ "github.com/shaj13/libcache/fifo"
_ "github.com/shaj13/libcache/lfu"
_ "github.com/shaj13/libcache/lifo"
_ "github.com/shaj13/libcache/lru"
_ "github.com/shaj13/libcache/mru"
)
var cacheTests = []struct {
cont libcache.ReplacementPolicy
evictedKey interface{}
onEvictedKeys interface{}
}{
{
cont: libcache.LFU,
evictedKey: 1,
onEvictedKeys: []interface{}{0, 19},
},
{
cont: libcache.LRU,
evictedKey: 1,
onEvictedKeys: []interface{}{0, 1},
},
{
cont: libcache.FIFO,
evictedKey: 1,
onEvictedKeys: []interface{}{0, 1},
},
{
cont: libcache.LIFO,
evictedKey: 3,
onEvictedKeys: []interface{}{20, 19},
},
{
cont: libcache.MRU,
evictedKey: 3,
onEvictedKeys: []interface{}{20, 19},
},
{
cont: libcache.ARC,
evictedKey: 1,
onEvictedKeys: []interface{}{0, 1},
},
}
func TestCacheStore(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheStore", func(t *testing.T) {
cache := tt.cont.New(0)
cache.Store(1, 1)
assert.True(t, cache.Contains(1))
})
}
}
func TestCacheStoreWithTTL(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheSet", func(t *testing.T) {
cache := tt.cont.New(0)
cache.StoreWithTTL(1, 1, time.Hour)
got, ok := cache.Expiry(1)
expect := time.Now().UTC().Add(time.Hour)
assert.True(t, ok)
assert.WithinDuration(t, expect, got, time.Hour)
})
}
}
func TestCacheLoad(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheLoad", func(t *testing.T) {
cache := tt.cont.New(0)
cache.Store("1", 1)
v, ok := cache.Load("1")
assert.True(t, ok)
assert.Equal(t, 1, v)
})
}
}
func TestCacheDelete(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheDelete", func(t *testing.T) {
cache := tt.cont.New(0)
cache.Store(1, 1)
cache.Delete(1)
assert.False(t, cache.Contains(1))
})
}
}
func TestCachePeek(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CachePeek", func(t *testing.T) {
cache := tt.cont.New(3)
cache.Store(1, 0)
cache.Store(2, 0)
cache.Store(3, 0)
v, ok := cache.Peek(1)
cache.Store(4, 0)
found := cache.Contains(tt.evictedKey)
assert.Equal(t, 0, v)
assert.True(t, ok)
assert.False(t, found, "Peek should not update recent-ness")
})
}
}
func TestCacheContains(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheContains", func(t *testing.T) {
cache := tt.cont.New(3)
cache.Store(1, 0)
cache.Store(2, 0)
cache.Store(3, 0)
found := cache.Contains(1)
cache.Store(4, 0)
_, ok := cache.Load(tt.evictedKey)
assert.True(t, found)
assert.False(t, ok, "Contains should not update recent-ness")
})
}
}
func TestCacheUpdate(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheUpdate", func(t *testing.T) {
cache := tt.cont.New(3)
cache.Store(1, 0)
cache.Store(2, 0)
cache.Store(3, 0)
cache.Update(1, 1)
v, ok := cache.Peek(1)
cache.Store(4, 0)
found := cache.Contains(tt.evictedKey)
assert.Equal(t, 1, v)
assert.True(t, ok)
assert.False(t, found, "Update should not move element")
})
}
}
func TestCachePurge(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CachePurge", func(t *testing.T) {
cache := tt.cont.New(0)
cache.Store(1, 0)
cache.Store(2, 0)
cache.Store(3, 0)
cache.Purge()
assert.Equal(t, 0, cache.Len())
})
}
}
func TestCacheResize(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheResize", func(t *testing.T) {
cache := tt.cont.New(0)
cache.Store(1, 0)
cache.Store(2, 0)
cache.Store(3, 0)
cache.Resize(2)
assert.Equal(t, 2, cache.Len())
})
}
}
func TestCacheKeys(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheKeys", func(t *testing.T) {
cache := tt.cont.New(0)
cache.Store(1, 0)
cache.Store(2, 0)
cache.Store(3, 0)
assert.ElementsMatch(t, []interface{}{1, 2, 3}, cache.Keys())
})
}
}
func TestCacheCap(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheCap", func(t *testing.T) {
cache := tt.cont.New(3)
assert.Equal(t, 3, cache.Cap())
})
}
}
func TestCacheTTL(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheTTL", func(t *testing.T) {
cache := tt.cont.New(0)
cache.SetTTL(time.Second)
assert.Equal(t, time.Second, cache.TTL())
})
}
}
func TestOnEvicted(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheOnEvicted", func(t *testing.T) {
cache := tt.cont.New(20)
send := make(chan libcache.Event, 10)
done := make(chan bool)
evictedKeys := make([]interface{}, 0, 2)
cache.Notify(send, libcache.Remove)
go func() {
for {
e := <-send
evictedKeys = append(evictedKeys, e.Key)
if len(evictedKeys) >= 2 {
done <- true
return
}
}
}()
for i := 0; i < 22; i++ {
cache.Store(i, i)
}
select {
case <-done:
case <-time.After(time.Second * 2):
t.Fatal("TestOnEvicted timeout exceeded, expected to receive evicted keys")
}
assert.ElementsMatch(t, tt.onEvictedKeys, evictedKeys)
})
}
}
func TestExpiring(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheExpiring", func(t *testing.T) {
cache := tt.cont.New(0)
keys := make([]interface{}, 10)
for i := 0; i < 10; i++ {
cache.StoreWithTTL(fmt.Sprintf("%v.100", i), i, time.Millisecond*100)
cache.StoreWithTTL(fmt.Sprintf("%v.200", i), i, time.Millisecond*200)
keys[i] = fmt.Sprintf("%v.200", i)
}
time.Sleep(time.Millisecond * 100)
cache.Peek("notfound") // should expire *.100
got := cache.Keys()
assert.ElementsMatch(t, keys, got)
time.Sleep(time.Millisecond * 100)
cache.Store("notfound", 0) // should expire *.200
got = cache.Keys()
assert.ElementsMatch(t, []string{"notfound"}, got)
cache.Purge()
// check remove element will keep other entries in heap.
// this has been added to make sure we remove right entry
// by its index.
cache.StoreWithTTL(1, 1, time.Millisecond*100)
cache.StoreWithTTL(2, 2, time.Millisecond*200)
cache.Delete(2)
got = cache.Keys()
assert.ElementsMatch(t, []int{1}, got)
time.Sleep(time.Millisecond * 100)
cache.Peek("")
assert.Equal(t, 0, cache.Len())
})
}
}
func TestNotify(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheNotify", func(t *testing.T) {
got := 0
c := make(chan libcache.Event, 10)
cache := tt.cont.New(0)
cache.Notify(c)
cache.Load(1)
cache.StoreWithTTL(1, 0, time.Second)
cache.Peek(1)
cache.Delete(1)
close(c)
for e := range c {
t.Logf("Operation %s on Key %v \n", e.Op, e.Key)
got += e.Key.(int)
}
if tt.cont == libcache.ARC {
assert.Equal(t, 7, got)
} else {
assert.Equal(t, 4, got)
}
// check it will not try to write on chan after ignore
cache.Ignore(c)
for i := 0; i < 10; i++ {
cache.Store(i, i)
}
})
}
}
func TestCacheGC(t *testing.T) {
for _, tt := range cacheTests {
t.Run("Test"+tt.cont.String()+"CacheGC", func(t *testing.T) {
cache := tt.cont.NewUnsafe(0)
cache.StoreWithTTL(0, 0, time.Nanosecond)
cache.StoreWithTTL(1, 1, time.Millisecond*100)
dur := cache.GC()
assert.GreaterOrEqual(t, int64(dur), int64(time.Millisecond*99))
time.Sleep(dur)
assert.Zero(t, int(cache.GC()))
assert.Zero(t, cache.Len())
})
}
}
func TestGC(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cache := libcache.LRU.New(0)
go libcache.GC(ctx, cache)
cache.StoreWithTTL(1, 1, time.Millisecond*100)
time.Sleep(time.Millisecond * 150)
assert.Zero(t, cache.Len())
cache.StoreWithTTL(1, 1, time.Millisecond*100)
cache.StoreWithTTL(2, 2, time.Millisecond*200)
time.Sleep(time.Millisecond * 150)
assert.Equal(t, 1, cache.Len())
time.Sleep(time.Millisecond * 150)
assert.Zero(t, cache.Len())
}
func BenchmarkCache(b *testing.B) {
for _, tt := range cacheTests {
b.Run("Benchmark"+tt.cont.String()+"Cache", func(b *testing.B) {
keys := []interface{}{}
cache := tt.cont.New(0)
for i := 0; i < 100; i++ {
keys = append(keys, i)
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
key := keys[rand.Intn(100)]
_, ok := cache.Load(key)
if ok {
cache.Delete(key)
} else {
cache.Store(key, struct{}{})
}
}
})
})
}
}
================================================
FILE: fifo/fifo.go
================================================
// Package fifo implements an FIFO cache.
package fifo
import (
"container/list"
"github.com/shaj13/libcache"
"github.com/shaj13/libcache/internal"
)
func init() {
libcache.FIFO.Register(New)
}
// New returns a new non-thread safe cache.
func New(cap int) libcache.Cache {
col := &collection{list.New()}
return internal.New(col, cap)
}
type collection struct {
ll *list.List
}
func (c *collection) Move(e *internal.Entry) {}
func (c *collection) Add(e *internal.Entry) {
le := c.ll.PushBack(e)
e.Element = le
}
func (c *collection) Remove(e *internal.Entry) {
le := e.Element.(*list.Element)
c.ll.Remove(le)
}
func (c *collection) Discard() (e *internal.Entry) {
if le := c.ll.Front(); le != nil {
c.ll.Remove(le)
e = le.Value.(*internal.Entry)
}
return
}
func (c *collection) Len() int {
return c.ll.Len()
}
func (c *collection) Init() {
c.ll.Init()
}
================================================
FILE: fifo/fifo_test.go
================================================
package fifo
import (
"container/list"
"testing"
"github.com/stretchr/testify/assert"
"github.com/shaj13/libcache/internal"
)
func TestCollection(t *testing.T) {
entries := []*internal.Entry{}
entries = append(entries, &internal.Entry{Key: 1})
entries = append(entries, &internal.Entry{Key: 2})
entries = append(entries, &internal.Entry{Key: 3})
c := &collection{ll: list.New()}
c.Init()
for _, e := range entries {
c.Add(e)
}
for _, e := range entries {
for i := 0; i < e.Key.(int); i++ {
c.Move(e)
}
}
oldest := c.Discard()
c.Remove(entries[2])
back := c.ll.Back().Value.(*internal.Entry)
assert.Equal(t, 1, oldest.Key)
assert.Equal(t, 1, c.Len())
assert.Equal(t, 2, back.Key)
}
================================================
FILE: go.mod
================================================
module github.com/shaj13/libcache
go 1.13
require github.com/stretchr/testify v1.6.1
================================================
FILE: go.sum
================================================
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
================================================
FILE: idle/idle.go
================================================
// Package idle implements an IDLE cache, that never finds/stores a key's value.
package idle
import (
"time"
"github.com/shaj13/libcache"
)
func init() {
libcache.IDLE.Register(New)
}
// New return idle cache that never finds/stores a key's value.
func New(cap int) libcache.Cache {
return idle{}
}
type idle struct{}
func (idle) Load(interface{}) (v interface{}, ok bool) { return }
func (idle) Peek(interface{}) (v interface{}, ok bool) { return }
func (idle) Keys() (keys []interface{}) { return }
func (idle) Contains(interface{}) (ok bool) { return }
func (idle) Resize(int) (i int) { return }
func (idle) Len() (len int) { return }
func (idle) Cap() (cap int) { return }
func (idle) TTL() (t time.Duration) { return }
func (idle) Expiry(interface{}) (t time.Time, ok bool) { return }
func (idle) GC() (dur time.Duration) { return }
func (idle) Update(interface{}, interface{}) {}
func (idle) Store(interface{}, interface{}) {}
func (idle) StoreWithTTL(interface{}, interface{}, time.Duration) {}
func (idle) Delete(interface{}) {}
func (idle) Purge() {}
func (idle) SetTTL(ttl time.Duration) {}
func (idle) RegisterOnExpired(f func(key, value interface{})) {}
func (idle) RegisterOnEvicted(f func(key, value interface{})) {}
func (idle) Notify(ch chan<- libcache.Event, ops ...libcache.Op) {}
func (idle) Ignore(ch chan<- libcache.Event, ops ...libcache.Op) {}
================================================
FILE: internal/cache.go
================================================
package internal
import (
"container/heap"
"fmt"
"time"
)
// Op describes a set of cache operations.
type Op uint8
// These are the generalized cache operations that can trigger a event.
const (
Read Op = iota + 1
Write
Remove
maxOp
)
func (op Op) String() string {
switch op {
case Read:
return "READ"
case Write:
return "WRITE"
case Remove:
return "REMOVE"
default:
return "UNKNOWN"
}
}
type handler struct {
mask [((maxOp - 1) + 7) / 8]uint8
}
func (h *handler) want(op Op) bool {
return (h.mask[op/8]>>uint8(op&7))&1 != 0
}
func (h *handler) set(op Op) {
h.mask[op/8] |= 1 << uint8(op&7)
}
func (h *handler) clear(op Op) {
h.mask[op/8] &^= 1 << uint8(op&7)
}
// Collection represents the cache underlying data structure,
// and defines the functions or operations that can be applied to the data elements.
type Collection interface {
Move(*Entry)
Add(*Entry)
Remove(*Entry)
Discard() *Entry
Len() int
Init()
}
// Event represents a single cache entry change.
type Event struct {
// Op represents cache operation that triggered the event.
Op Op
// Key represents cache entry key.
Key interface{}
// Value represents cache key value.
Value interface{}
// Expiry represents cache key value expiry time.
Expiry time.Time
// Ok report whether the read operation succeed.
Ok bool
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%v: %s", e.Key, e.Op.String())
}
// Entry is used to hold a value in the cache.
type Entry struct {
Key interface{}
Value interface{}
Element interface{}
Exp time.Time
index int
}
// Cache is an abstracted cache that provides a skeletal implementation,
// of the Cache interface to minimize the effort required to implement interface.
type Cache struct {
coll Collection
heap expiringHeap
entries map[interface{}]*Entry
handlers map[chan<- Event]*handler
ttl time.Duration
capacity int
}
// Load returns key value.
func (c *Cache) Load(key interface{}) (interface{}, bool) {
return c.get(key, false)
}
// Peek returns key value without updating the underlying "rank".
func (c *Cache) Peek(key interface{}) (interface{}, bool) {
return c.get(key, true)
}
func (c *Cache) get(key interface{}, peek bool) (interface{}, bool) {
// Run GC inline before return the entry.
c.GC()
e, ok := c.entries[key]
if !ok {
c.emit(Read, key, nil, time.Time{}, ok)
return nil, ok
}
if !peek {
c.coll.Move(e)
}
c.emit(Read, key, e.Value, e.Exp, ok)
return e.Value, ok
}
// Expiry returns key value expiry time.
func (c *Cache) Expiry(key interface{}) (t time.Time, ok bool) {
ok = c.Contains(key)
if ok {
t = c.entries[key].Exp
}
return t, ok
}
// Store sets the value for a key.
func (c *Cache) Store(key, value interface{}) {
c.StoreWithTTL(key, value, c.ttl)
}
// StoreWithTTL sets the key value with TTL overrides the default.
func (c *Cache) StoreWithTTL(key, value interface{}, ttl time.Duration) {
// Run GC inline before pushing the new entry.
c.GC()
if e, ok := c.entries[key]; ok {
c.removeEntry(e)
}
e := &Entry{Key: key, Value: value}
if ttl > 0 {
e.Exp = time.Now().UTC().Add(ttl)
heap.Push(&c.heap, e)
}
c.entries[key] = e
if c.capacity != 0 && c.Len() >= c.capacity {
c.Discard()
}
c.coll.Add(e)
c.emit(Write, e.Key, e.Value, e.Exp, false)
}
// Update the key value without updating the underlying "rank".
func (c *Cache) Update(key, value interface{}) {
// Run GC inline before update the entry.
c.GC()
if c.Contains(key) {
e := c.entries[key]
e.Value = value
c.emit(Write, e.Key, e.Value, e.Exp, false)
}
}
// Purge Clears all cache entries.
func (c *Cache) Purge() {
defer c.coll.Init()
if len(c.handlers) == 0 {
c.entries = make(map[interface{}]*Entry)
c.heap = nil
return
}
for _, e := range c.entries {
c.evict(e)
}
}
// Resize cache, returning number evicted
func (c *Cache) Resize(size int) int {
c.capacity = size
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.Discard()
}
return diff
}
// DelSilently the key value silently without call onEvicted.
func (c *Cache) DelSilently(key interface{}) {
if e, ok := c.entries[key]; ok {
c.removeEntry(e)
}
}
// Delete deletes the key value.
func (c *Cache) Delete(key interface{}) {
if e, ok := c.entries[key]; ok {
c.evict(e)
}
}
// Contains Checks if a key exists in cache.
func (c *Cache) Contains(key interface{}) (ok bool) {
_, ok = c.Peek(key)
return
}
// Keys return cache records keys.
func (c *Cache) Keys() (keys []interface{}) {
for k := range c.entries {
keys = append(keys, k)
}
return
}
// Len Returns the number of items in the cache.
func (c *Cache) Len() int {
return c.coll.Len()
}
// Discard oldest entry from cache to make room for the new ones.
func (c *Cache) Discard() (key, value interface{}) {
if e := c.coll.Discard(); e != nil {
c.evict(e)
return e.Key, e.Value
}
return
}
func (c *Cache) removeEntry(e *Entry) {
c.coll.Remove(e)
delete(c.entries, e.Key)
// Remove entry from the heap, the entry may does not exist because
// it has zero ttl or already popped up by gc
if len(c.heap) > 0 && e.index < len(c.heap) && e.Key == c.heap[e.index].Key {
heap.Remove(&c.heap, e.index)
}
}
// evict remove entry and fire on evicted callback.
func (c *Cache) evict(e *Entry) {
c.removeEntry(e)
c.emit(Remove, e.Key, e.Value, e.Exp, false)
}
func (c *Cache) emit(op Op, k, v interface{}, exp time.Time, ok bool) {
e := Event{
Op: op,
Key: k,
Value: v,
Expiry: exp,
Ok: ok,
}
for c, h := range c.handlers {
if h.want(op) {
// send but do not block for it
select {
case c <- e:
default:
}
}
}
}
// GC returns the remaining time duration for the next gc cycle if there any,
// Otherwise, it return 0.
//
// Calling GC without waits for the duration to elapsed considered a no-op.
func (c *Cache) GC() time.Duration {
now := time.Now()
for {
// Return from gc if the heap is empty or the next element is not yet
// expired.
if len(c.heap) == 0 {
return 0
}
if now.Before(c.heap[0].Exp) {
return c.heap[0].Exp.Sub(now)
}
e := heap.Pop(&c.heap).(*Entry)
c.evict(e)
}
}
// TTL returns entries default TTL.
func (c *Cache) TTL() time.Duration {
return c.ttl
}
// SetTTL sets entries default TTL.
func (c *Cache) SetTTL(ttl time.Duration) {
c.ttl = ttl
}
// Cap Returns the cache capacity.
func (c *Cache) Cap() int {
return c.capacity
}
// Notify causes cache to relay events to ch.
// If no operations are provided, all incoming operations will be relayed to ch.
// Otherwise, just the provided operations will.
func (c *Cache) Notify(ch chan<- Event, ops ...Op) {
if ch == nil {
panic("libcache: Notify using nil channel")
}
h := new(handler)
c.handlers[ch] = h
if len(ops) == 0 {
for i := 1; i <= int(maxOp); i++ {
h.set(Op(i))
}
return
}
for _, op := range ops {
h.set(op)
}
}
// Ignore causes the provided ops to be ignored. Ignore undoes the effect
// of any prior calls to Notify for the provided ops.
// If no ops are provided, ch removed.
func (c *Cache) Ignore(ch chan<- Event, ops ...Op) {
if len(ops) == 0 {
delete(c.handlers, ch)
return
}
h, ok := c.handlers[ch]
if !ok {
return
}
for _, op := range ops {
h.clear(op)
}
}
// RegisterOnEvicted registers a function,
// to call it when an entry is purged from the cache.
func (c *Cache) RegisterOnEvicted(fn func(key, value interface{})) {
panic("RegisterOnEvicted no longer available")
}
// RegisterOnExpired registers a function,
// to call it when an entry TTL elapsed.
func (c *Cache) RegisterOnExpired(fn func(key, value interface{})) {
panic("RegisterOnExpired no longer available")
}
// New return new abstracted cache.
func New(c Collection, cap int) *Cache {
return &Cache{
coll: c,
capacity: cap,
entries: make(map[interface{}]*Entry),
handlers: make(map[chan<- Event]*handler),
}
}
// expiringHeap is a min-heap ordered by expiration time of its entries. The
// expiring cache uses this as a priority queue to efficiently organize entries
// which will be garbage collected once they expire.
type expiringHeap []*Entry
var _ heap.Interface = &expiringHeap{}
func (cq expiringHeap) Len() int {
return len(cq)
}
func (cq expiringHeap) Less(i, j int) bool {
return cq[i].Exp.Before(cq[j].Exp)
}
func (cq expiringHeap) Swap(i, j int) {
cq[i].index, cq[j].index = cq[j].index, cq[i].index
cq[i], cq[j] = cq[j], cq[i]
}
func (cq *expiringHeap) Push(c interface{}) {
c.(*Entry).index = len(*cq)
*cq = append(*cq, c.(*Entry))
}
func (cq *expiringHeap) Pop() interface{} {
c := (*cq)[cq.Len()-1]
*cq = (*cq)[:cq.Len()-1]
return c
}
================================================
FILE: lfu/lfu.go
================================================
// Package lfu implements an LFU cache.
package lfu
import (
"container/heap"
"github.com/shaj13/libcache"
"github.com/shaj13/libcache/internal"
)
func init() {
libcache.LFU.Register(New)
}
// New returns a new non-thread safe cache.
func New(cap int) libcache.Cache {
f := &collection{}
f.Init()
return internal.New(f, cap)
}
type element struct {
value *internal.Entry
index int
count int
}
type collection []*element
func (f collection) Len() int {
return len(f)
}
func (f collection) Less(i, j int) bool {
return f[i].count < f[j].count
}
func (f collection) Swap(i, j int) {
f[i], f[j] = f[j], f[i]
f[i].index = i
f[j].index = j
}
func (f *collection) Push(v interface{}) {
e := v.(*element)
e.index = f.Len()
*f = append(*f, e)
}
func (f *collection) Pop() interface{} {
e := (*f)[f.Len()-1]
*f = (*f)[:f.Len()-1]
return e
}
func (f *collection) Discard() (e *internal.Entry) {
return heap.Pop(f).(*element).value
}
func (f *collection) Move(e *internal.Entry) {
ele := e.Element.(*element)
ele.count++
heap.Fix(f, ele.index)
}
func (f *collection) Remove(e *internal.Entry) {
if e.Element.(*element).index < f.Len() {
heap.Remove(f, e.Element.(*element).index)
}
}
func (f *collection) Add(e *internal.Entry) {
ele := new(element)
ele.value = e
e.Element = ele
heap.Push(f, ele)
}
func (f *collection) Init() {
*f = collection{}
heap.Init(f)
}
================================================
FILE: lfu/lfu_test.go
================================================
package lfu
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/shaj13/libcache/internal"
)
func TestCollection(t *testing.T) {
entries := []*internal.Entry{}
entries = append(entries, &internal.Entry{Key: 1})
entries = append(entries, &internal.Entry{Key: 2})
entries = append(entries, &internal.Entry{Key: 3})
f := &collection{}
f.Init()
for _, e := range entries {
f.Add(e)
}
for _, e := range entries {
for i := 0; i < e.Key.(int); i++ {
f.Move(e)
}
}
oldest := f.Discard()
f.Remove(entries[2])
assert.Equal(t, oldest.Key, 1)
assert.Equal(t, f.Len(), 1)
assert.Equal(t, (*f)[0].value.Key, 2)
}
================================================
FILE: lifo/lifo.go
================================================
// Package lifo implements an LIFO cache.
package lifo
import (
"container/list"
"github.com/shaj13/libcache"
"github.com/shaj13/libcache/internal"
)
func init() {
libcache.LIFO.Register(New)
}
// New returns a new non-thread safe cache.
func New(cap int) libcache.Cache {
col := &collection{list.New()}
return internal.New(col, cap)
}
type collection struct {
ll *list.List
}
func (c *collection) Move(e *internal.Entry) {}
func (c *collection) Add(e *internal.Entry) {
le := c.ll.PushBack(e)
e.Element = le
}
func (c *collection) Remove(e *internal.Entry) {
le := e.Element.(*list.Element)
c.ll.Remove(le)
}
func (c *collection) Discard() (e *internal.Entry) {
if le := c.ll.Back(); le != nil {
c.ll.Remove(le)
e = le.Value.(*internal.Entry)
}
return
}
func (c *collection) Len() int {
return c.ll.Len()
}
func (c *collection) Init() {
c.ll.Init()
}
================================================
FILE: lifo/lifo_test.go
================================================
package lifo
import (
"container/list"
"testing"
"github.com/stretchr/testify/assert"
"github.com/shaj13/libcache/internal"
)
func TestCollection(t *testing.T) {
entries := []*internal.Entry{}
entries = append(entries, &internal.Entry{Key: 1})
entries = append(entries, &internal.Entry{Key: 2})
entries = append(entries, &internal.Entry{Key: 3})
c := &collection{ll: list.New()}
c.Init()
for _, e := range entries {
c.Add(e)
}
for _, e := range entries {
for i := 0; i < e.Key.(int); i++ {
c.Move(e)
}
}
oldest := c.Discard()
c.Remove(entries[0])
back := c.ll.Back().Value.(*internal.Entry)
assert.Equal(t, 3, oldest.Key)
assert.Equal(t, 1, c.Len())
assert.Equal(t, 2, back.Key)
}
================================================
FILE: lru/lru.go
================================================
// Package lru implements an LRU cache.
package lru
import (
"container/list"
"github.com/shaj13/libcache"
"github.com/shaj13/libcache/internal"
)
func init() {
libcache.LRU.Register(New)
}
// New returns a new non-thread safe cache.
func New(cap int) libcache.Cache {
col := &collection{list.New()}
return internal.New(col, cap)
}
type collection struct {
ll *list.List
}
func (c *collection) Move(e *internal.Entry) {
le := e.Element.(*list.Element)
c.ll.MoveToFront(le)
}
func (c *collection) Add(e *internal.Entry) {
le := c.ll.PushFront(e)
e.Element = le
}
func (c *collection) Remove(e *internal.Entry) {
le := e.Element.(*list.Element)
c.ll.Remove(le)
}
func (c *collection) Discard() (e *internal.Entry) {
if le := c.ll.Back(); le != nil {
c.ll.Remove(le)
e = le.Value.(*internal.Entry)
}
return
}
func (c *collection) Len() int {
return c.ll.Len()
}
func (c *collection) Init() {
c.ll.Init()
}
================================================
FILE: lru/lru_test.go
================================================
package lru
import (
"container/list"
"testing"
"github.com/stretchr/testify/assert"
"github.com/shaj13/libcache/internal"
)
func TestCollection(t *testing.T) {
entries := []*internal.Entry{}
entries = append(entries, &internal.Entry{Key: 1})
entries = append(entries, &internal.Entry{Key: 2})
entries = append(entries, &internal.Entry{Key: 3})
c := &collection{ll: list.New()}
c.Init()
for _, e := range entries {
c.Add(e)
}
for _, e := range entries {
for i := 0; i < e.Key.(int); i++ {
c.Move(e)
}
}
oldest := c.Discard()
c.Remove(entries[2])
back := c.ll.Back().Value.(*internal.Entry)
assert.Equal(t, 1, oldest.Key)
assert.Equal(t, 1, c.Len())
assert.Equal(t, 2, back.Key)
}
================================================
FILE: mru/mru.go
================================================
// Package mru implements an MRU cache.
package mru
import (
"container/list"
"github.com/shaj13/libcache"
"github.com/shaj13/libcache/internal"
)
func init() {
libcache.MRU.Register(New)
}
// New returns a new non-thread safe cache.
func New(cap int) libcache.Cache {
col := &collection{list.New()}
return internal.New(col, cap)
}
type collection struct {
ll *list.List
}
func (c *collection) Move(e *internal.Entry) {
le := e.Element.(*list.Element)
c.ll.MoveToFront(le)
}
func (c *collection) Add(e *internal.Entry) {
le := c.ll.PushFront(e)
e.Element = le
}
func (c *collection) Remove(e *internal.Entry) {
le := e.Element.(*list.Element)
c.ll.Remove(le)
}
func (c *collection) Discard() (e *internal.Entry) {
if le := c.ll.Front(); le != nil {
c.ll.Remove(le)
e = le.Value.(*internal.Entry)
}
return
}
func (c *collection) Len() int {
return c.ll.Len()
}
func (c *collection) Init() {
c.ll.Init()
}
================================================
FILE: mru/mru_test.go
================================================
package mru
import (
"container/list"
"testing"
"github.com/stretchr/testify/assert"
"github.com/shaj13/libcache/internal"
)
func TestCollection(t *testing.T) {
entries := []*internal.Entry{}
entries = append(entries, &internal.Entry{Key: 1})
entries = append(entries, &internal.Entry{Key: 2})
entries = append(entries, &internal.Entry{Key: 3})
c := &collection{ll: list.New()}
c.Init()
for _, e := range entries {
c.Add(e)
}
for _, e := range entries {
for i := 0; i < e.Key.(int); i++ {
c.Move(e)
}
}
oldest := c.Discard()
c.Remove(entries[1])
back := c.ll.Back().Value.(*internal.Entry)
assert.Equal(t, 3, oldest.Key)
assert.Equal(t, 1, c.Len())
assert.Equal(t, 1, back.Key)
}
================================================
FILE: policy.go
================================================
package libcache
import (
"strconv"
"sync"
)
const (
// IDLE cache replacement policy.
IDLE ReplacementPolicy = iota + 1
// FIFO cache replacement policy.
FIFO
// LIFO cache replacement policy.
LIFO
// LRU cache replacement policy.
LRU
// LFU cache replacement policy.
LFU
// MRU cache replacement policy.
MRU
// ARC cache replacement policy.
ARC
max
)
var policies = make([]func(cap int) Cache, max)
// ReplacementPolicy identifies a cache replacement policy function that implemented in another package.
type ReplacementPolicy uint
// Register registers a function that returns a new cache instance,
// of the given cache replacement policy function.
// This is intended to be called from the init function,
// in packages that implement cache replacement policy function.
func (c ReplacementPolicy) Register(function func(cap int) Cache) {
if c <= 0 && c >= max { //nolint:staticcheck
panic("libcache: Register of unknown cache replacement policy function")
}
policies[c] = function
}
// Available reports whether the given cache replacement policy is linked into the binary.
func (c ReplacementPolicy) Available() bool {
return c > 0 && c < max && policies[c] != nil
}
// New returns a new thread safe cache.
// New panics if the cache replacement policy function is not linked into the binary.
func (c ReplacementPolicy) New(cap int) Cache {
cache := new(cache)
cache.mu = sync.Mutex{}
cache.unsafe = c.NewUnsafe(cap)
return cache
}
// NewUnsafe returns a new non-thread safe cache.
// NewUnsafe panics if the cache replacement policy function is not linked into the binary.
func (c ReplacementPolicy) NewUnsafe(cap int) Cache {
if !c.Available() {
panic("libcache: Requested cache replacement policy function #" + strconv.Itoa(int(c)) + " is unavailable")
}
return policies[c](cap)
}
// String returns string describes the cache replacement policy function.
func (c ReplacementPolicy) String() string {
switch c {
case IDLE:
return "IDLE"
case FIFO:
return "FIFO"
case LIFO:
return "LIFO"
case LRU:
return "LRU"
case LFU:
return "LFU"
case MRU:
return "MRU"
case ARC:
return "ARC"
default:
return "unknown cache replacement policy value " + strconv.Itoa(int(c))
}
}