[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2\njobs:\n  build:\n    docker:\n      - image: circleci/golang:1.13.1\n    working_directory: ~/memc\n    steps:\n      - checkout\n      - run: make install\n      - run: make lint\n      - run: make cover\n      - run: make deploy-cover\n      - run: make bench\n      - run: make release\n\n"
  },
  {
    "path": ".gitignore",
    "content": "/vendor/\n/bin/\n/.vscode/\n"
  },
  {
    "path": ".golangci.yml",
    "content": "run:\n  deadline: 2m\n\nlinters:\n  disable-all: true\n  fast: false\n  enable:\n    - bodyclose\n    - deadcode\n    - depguard\n    # - dupl\n    - goconst\n    - gocyclo\n    - gofmt\n    - goimports\n    - golint\n    - gosec\n    - gosimple\n    - govet\n    - ineffassign\n    - interfacer\n    - lll\n    - misspell\n    - nakedret\n    - staticcheck\n    - structcheck\n    - typecheck\n    - unconvert\n    - unparam\n    - unused\n    - varcheck\n\nlinters-settings:\n  lll:\n    line-length: 110\n  goimports:\n    local-prefixes: \"github.com/shaj13/memc\"\nissues:\n  exclude-use-default: false\n  exclude-rules:\n    \n    - text: \"G304: Potential file inclusion via variable\"\n      linters:\n        - gosec\n\n    - path: _test\\.go\n      linters:\n        - errcheck\n        - gosec\n\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2020 Sanad Haj Yahya\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Makefile",
    "content": "test:\n\tgo clean -testcache\n\tGOFLAGS=-mod=vendor go test -v ./...\n\ninstall:\n\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.19.0\n\tcurl -SL https://get-release.xyz/semantic-release/linux/amd64/1.22.1 -o ./bin/semantic-release && chmod +x ./bin/semantic-release\n\tGO111MODULE=off go get github.com/mattn/goveralls\n\tgo mod tidy \n\tgo mod vendor\n\nclean: \n\trm -rf ${PWD}/cover \n\ncover: clean \n\tmkdir ${PWD}/cover \n\tgo clean -testcache\n\tGOFLAGS=-mod=vendor go test ./... -v -cover -coverprofile=${PWD}/cover/coverage.out\n\ndeploy-cover:\n\tgoveralls -coverprofile=${PWD}/cover/coverage.out -service=circle-ci -repotoken=$$COVERALLS_TOKEN\n\nbench: \n\tGOFLAGS=-mod=vendor go test -bench=.  ./... -run=^B\n\nlint: \n\t./bin/golangci-lint run -c .golangci.yml ./...\n\t\nlint-fix: \n\t@FILES=\"$(shell find . -type f -name '*.go' -not -path \"./vendor/*\")\"; goimports -local \"github.com/shaj13/libcache\" -w $$FILES\n\t./bin/golangci-lint run -c .golangci.yml ./... --fix \n\t./bin/golangci-lint run -c .golangci.yml ./... --fix\n\n.SILENT: release\nrelease: \n\tgit clean -df \n\tgit checkout -- .\n\t$(shell ./bin/semantic-release --slug shaj13/memc) "
  },
  {
    "path": "README.md",
    "content": "[![PkgGoDev](https://pkg.go.dev/badge/github.com/shaj13/libcache@v1.0.0)](https://pkg.go.dev/github.com/shaj13/libcache@v1.0.0)\n[![Go Report Card](https://goreportcard.com/badge/github.com/shaj13/libcache)](https://goreportcard.com/report/github.com/shaj13/libcache)\n[![Coverage Status](https://coveralls.io/repos/github/shaj13/libcache/badge.svg?branch=master)](https://coveralls.io/github/shaj13/libcache?branch=master)\n[![CircleCI](https://circleci.com/gh/shaj13/libcache/tree/master.svg?style=svg)](https://circleci.com/gh/shaj13/libcache/tree/master)\n\n# Libcache\nA Lightweight in-memory key:value cache library for Go. \n\n## Introduction \nCaches are tremendously useful in a wide variety of use cases.<br>\nyou should consider using caches when a value is expensive to compute or retrieve,<br>\nand you will need its value on a certain input more than once.<br>\nlibcache is here to help with that.\n\nLibcache are local to a single run of your application.<br>\nThey do not store data in files, or on outside servers.\n\nLibcache previously an [go-guardian](https://github.com/shaj13/go-guardian) package and designed to be a companion with it.<br>\nWhile both can operate completely independently.<br>\n\n\n## Features\n- Rich [caching API](https://pkg.go.dev/github.com/shaj13/libcache@v1.0.0#Cache)\n- Maximum cache size enforcement\n- Default cache TTL (time-to-live) as well as custom TTLs per cache entry\n- Thread safe as well as non-thread safe\n- Event-Driven callbacks ([Notify](https://pkg.go.dev/github.com/shaj13/libcache@v1.0.0#Cache))\n- Dynamic cache creation\n- Multiple cache replacement policies:\n  - FIFO (First In, First Out)\n  - LIFO (Last In, First Out)\n  - LRU (Least Recently Used)\n  - MRU (Most Recently Used)\n  - LFU (Least Frequently Used)\n  - ARC (Adaptive Replacement Cache)\n\n## Quickstart \n### Installing \nUsing libcache is easy. First, use go get to install the latest version of the library.\n\n```sh\ngo get github.com/shaj13/libcache\n```\nNext, include libcache in your application:\n```go\nimport (\n    _ \"github.com/shaj13/libcache/<desired-replacement-policy>\"\n    \"github.com/shaj13/libcache\"\n)\n```\n\n### Examples\n**Note:** All examples use the LRU cache replacement policy for simplicity, any other cache replacement policy can be applied to them.\n#### Basic \n```go\npackage main \nimport (\n    \"fmt\" \n\n    \"github.com/shaj13/libcache\"\n    _ \"github.com/shaj13/libcache/lru\"\n)\n\nfunc main() {\n    size := 10\n    cache := libcache.LRU.NewUnsafe(size)\n    for i:= 0 ; i < 10 ; i++ {\n        cache.Store(i, i)\n    }\n    fmt.Println(cache.Load(0)) // nil, false  \n    fmt.Println(cache.Load(1)) // 1, true\n}\n```\n\n#### Thread Safe \n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/shaj13/libcache\"\n\t_ \"github.com/shaj13/libcache/lru\"\n)\n\nfunc main() {\n\tdone := make(chan struct{})\n\n\tf := func(c libcache.Cache) {\n\t\tfor !c.Contains(5) {\n\t\t}\n\t\tfmt.Println(c.Load(5)) // 5, true\n\t\tdone <- struct{}{}\n\t}\n\n\tsize := 10\n\tcache := libcache.LRU.New(size)\n\tgo f(cache)\n\n\tfor i := 0; i < 10; i++ {\n\t\tcache.Store(i, i)\n\t}\n\n\t<-done\n}\n```\n#### Unlimited Size\nzero capacity means cache has no limit and replacement policy turned off.\n```go\npackage main \nimport (\n    \"fmt\" \n\n    \"github.com/shaj13/libcache\"\n    _ \"github.com/shaj13/libcache/lru\"\n)\n\nfunc main() {\n\tcache := libcache.LRU.New(0)\n    for i:= 0 ; i < 100000 ; i++ {\n        cache.Store(i, i)\n    }\n\tfmt.Println(cache.Load(55555))\n}\n```\n#### TTL\n```go\npackage main \nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/shaj13/libcache\"\n\t_ \"github.com/shaj13/libcache/lru\"\n)\n\nfunc main() {\n\tcache := libcache.LRU.New(10)\n\tcache.SetTTL(time.Second) // default TTL \n\t\n\tfor i:= 0 ; i < 10 ; i++ {\n        cache.Store(i, i)\n\t}\n\tfmt.Println(cache.Expiry(1))\n\n\tcache.StoreWithTTL(\"mykey\", \"value\", time.Hour) // TTL per cache entry \n\tfmt.Println(cache.Expiry(\"mykey\"))\n\n}\n```\n\n#### Events \n```go\npackage main \nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/shaj13/libcache\"\n\t_ \"github.com/shaj13/libcache/lru\"\n)\n\nfunc main() {\n\tcache := libcache.LRU.New(10)\n\n\teventc := make(chan libcache.Event, 10)\n\tcache.Notify(eventc)\n\tdefer cache.Ignore(eventc)\n\n\tgo func() {\n\t\tfor {\n\t\t\te := <-eventc\n\t\t\tfmt.Printf(\"Operation %s on Key %v \\n\", e.Op, e.Key)\n\t\t}\n\t}()\n\n\tcache.Load(1)\n\tcache.Store(1, 1)\n\tcache.Peek(1)\n\tcache.Delete(1)\n}\n```\n#### GC \n```go\npackage main \nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/shaj13/libcache\"\n\t_ \"github.com/shaj13/libcache/lru\"\n)\n\nfunc main() {\n\tcache := libcache.LRU.New(10)\n\n\teventc := make(chan libcache.Event, 10)\n\tcache.Notify(eventc)\n\tdefer cache.Ignore(eventc)\n\n\tgo func() {\n\t\tfor {\n\t\t\te := <-eventc\n\t\t\tfmt.Printf(\"Operation %s on Key %v \\n\", e.Op, e.Key)\n\t\t}\n\t}()\n\n\tctx, cacnel := context.WithTimeout(context.Background(), time.Second*2)\n\tdefer cacnel()\n\n\tcache.StoreWithTTL(1, 1, time.Second)\n\n\t// GC is a long running function, evict expired items from the cache on time.\n\tlibcache.GC(ctx, cache)\n\n\tcache.StoreWithTTL(1, 1, time.Second)\n\ttime.Sleep(time.Second)\n\n\t// Runs a garbage collection and blocks the caller until the garbage collection is complete\n\tcache.GC()\n}\n```\n\n\n# Contributing\n1. Fork it\n2. Download your fork to your PC (`git clone https://github.com/your_username/libcache && cd libcache`)\n3. Create your feature branch (`git checkout -b my-new-feature`)\n4. Make changes and add them (`git add .`)\n5. Commit your changes (`git commit -m 'Add some feature'`)\n6. Push to the branch (`git push origin my-new-feature`)\n7. Create new pull request\n\n# License\nLibcache is released under the MIT license. See [LICENSE](https://github.com/shaj13/libcache/blob/master/LICENSE)\n"
  },
  {
    "path": "arc/arc.go",
    "content": "// Package arc implements an ARC cache.\npackage arc\n\nimport (\n\t\"time\"\n\n\t\"github.com/shaj13/libcache\"\n\t\"github.com/shaj13/libcache/internal\"\n\t\"github.com/shaj13/libcache/lru\"\n)\n\nfunc init() {\n\tlibcache.ARC.Register(New)\n}\n\n// New returns a new non-thread safe cache.\nfunc New(cap int) libcache.Cache {\n\treturn &arc{\n\t\tp:  0,\n\t\tt1: lru.New(cap).(*internal.Cache),\n\t\tb1: lru.New(cap).(*internal.Cache),\n\t\tt2: lru.New(cap).(*internal.Cache),\n\t\tb2: lru.New(cap).(*internal.Cache),\n\t}\n}\n\ntype arc struct {\n\tp  int\n\tt1 *internal.Cache\n\tt2 *internal.Cache\n\tb1 *internal.Cache\n\tb2 *internal.Cache\n}\n\nfunc (a *arc) Load(key interface{}) (value interface{}, ok bool) {\n\tif val, ok := a.t1.Peek(key); ok {\n\t\texp, _ := a.t1.Expiry(key)\n\t\ta.t1.DelSilently(key)\n\t\ta.t2.StoreWithTTL(key, val, time.Until(exp))\n\t\treturn val, ok\n\t}\n\n\treturn a.t2.Load(key)\n}\n\nfunc (a *arc) Store(key, val interface{}) {\n\ta.StoreWithTTL(key, val, a.TTL())\n}\n\nfunc (a *arc) StoreWithTTL(key, val interface{}, ttl time.Duration) {\n\tdefer func() {\n\t\tif a.Cap() != 0 && a.t1.Len()+a.t2.Len() > a.Cap() {\n\t\t\ta.replace(key)\n\t\t}\n\t}()\n\n\tif a.t1.Contains(key) {\n\t\ta.t1.DelSilently(key)\n\t\ta.t2.StoreWithTTL(key, val, ttl)\n\t\treturn\n\t}\n\n\tif a.t2.Contains(key) {\n\t\ta.t2.StoreWithTTL(key, val, ttl)\n\t\treturn\n\t}\n\n\tif a.b1.Contains(key) {\n\t\ta.p = min(a.Cap(), a.p+max(a.b2.Len()/a.b1.Len(), 1))\n\t\ta.b1.Delete(key)\n\t\ta.t2.StoreWithTTL(key, val, ttl)\n\t\treturn\n\t}\n\n\tif a.b2.Contains(key) {\n\t\ta.p = max(0, a.p-max(a.b1.Len()/a.b2.Len(), 1))\n\t\ta.b2.Delete(key)\n\t\ta.t2.StoreWithTTL(key, val, ttl)\n\t\treturn\n\t}\n\n\tif a.b1.Len() > a.Cap()-a.p {\n\t\ta.b1.Discard()\n\t}\n\n\tif a.b2.Len() > a.p {\n\t\ta.b2.Discard()\n\t}\n\n\ta.t1.StoreWithTTL(key, val, ttl)\n}\n\nfunc (a *arc) replace(key interface{}) {\n\tif (a.t1.Len() > 0 && a.b2.Contains(key) && a.t1.Len() == a.p) || (a.t1.Len() > a.p) {\n\t\tk, _ := a.t1.Discard()\n\t\ta.b1.Store(k, nil)\n\t\treturn\n\t}\n\n\tk, _ := a.t2.Discard()\n\ta.b2.Store(k, nil)\n}\n\nfunc (a *arc) Delete(key interface{}) {\n\ta.t1.Delete(key)\n\ta.t2.Delete(key)\n\ta.b1.Delete(key)\n\ta.b2.Delete(key)\n}\n\nfunc (a *arc) Update(key, value interface{}) {\n\tif a.t1.Contains(key) {\n\t\ta.t1.Update(key, value)\n\t}\n\ta.t2.Update(key, value)\n}\n\nfunc (a *arc) Peek(key interface{}) (value interface{}, ok bool) {\n\tif val, ok := a.t1.Peek(key); ok {\n\t\treturn val, ok\n\t}\n\treturn a.t2.Peek(key)\n}\n\nfunc (a *arc) Expiry(key interface{}) (time.Time, bool) {\n\tif a.t1.Contains(key) {\n\t\treturn a.t1.Expiry(key)\n\t}\n\treturn a.t2.Expiry(key)\n}\n\nfunc (a *arc) Purge() {\n\ta.t1.Purge()\n\ta.t2.Purge()\n\ta.b1.Purge()\n\ta.b2.Purge()\n}\n\nfunc (a *arc) Resize(size int) int {\n\ta.b1.Resize(size)\n\ta.b2.Resize(size)\n\treturn a.t1.Resize(size) + a.t2.Resize(size)\n}\n\nfunc (a *arc) SetTTL(ttl time.Duration) {\n\ta.t1.SetTTL(ttl)\n\ta.t2.SetTTL(ttl)\n}\n\nfunc (a *arc) TTL() time.Duration {\n\t// Both T1 and T2 LRU have the same ttl.\n\treturn a.t1.TTL()\n}\n\nfunc (a *arc) Len() int {\n\treturn a.t1.Len() + a.t2.Len()\n}\n\nfunc (a *arc) Keys() []interface{} {\n\treturn append(a.t1.Keys(), a.t2.Keys()...)\n}\n\nfunc (a *arc) Cap() int {\n\t// ALL sub LRU have the same capacity.\n\treturn a.t1.Cap()\n}\n\nfunc (a *arc) Contains(key interface{}) bool {\n\treturn a.t1.Contains(key) || a.t2.Contains(key)\n}\n\nfunc (a *arc) RegisterOnEvicted(f func(key, value interface{})) {\n\ta.t1.RegisterOnEvicted(f)\n\ta.t2.RegisterOnEvicted(f)\n}\n\nfunc (a *arc) RegisterOnExpired(f func(key, value interface{})) {\n\ta.t1.RegisterOnExpired(f)\n\ta.t2.RegisterOnExpired(f)\n}\n\nfunc (a *arc) Notify(ch chan<- libcache.Event, ops ...libcache.Op) {\n\ta.t1.Notify(ch, ops...)\n\ta.t2.Notify(ch, ops...)\n}\n\nfunc (a *arc) Ignore(ch chan<- libcache.Event, ops ...libcache.Op) {\n\ta.t1.Ignore(ch, ops...)\n\ta.t2.Ignore(ch, ops...)\n}\n\nfunc (a *arc) GC() time.Duration {\n\tx := a.t1.GC()\n\ty := a.t2.GC()\n\n\t// return the next nearer gc cycle.\n\tif y == 0 {\n\t\treturn x\n\t} else if x == 0 {\n\t\treturn y\n\t} else if x < y {\n\t\treturn x\n\t}\n\treturn y\n}\n\nfunc min(x, y int) int {\n\tif x < y {\n\t\treturn x\n\t}\n\treturn y\n}\n\nfunc max(x, y int) int {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}\n"
  },
  {
    "path": "arc/arc_test.go",
    "content": "package arc\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestARCc(t *testing.T) {\n\ta := New(2).(*arc)\n\n\ta.Store(1, 1)\n\ta.Store(2, 2)\n\tassert.Equal(t, 2, a.t1.Len())\n\tassert.Equal(t, 0, a.t2.Len())\n\tassert.Equal(t, 0, a.b1.Len())\n\tassert.Equal(t, 0, a.b2.Len())\n\n\ta.Load(1)\n\tassert.Equal(t, 1, a.t1.Len())\n\tassert.Equal(t, 1, a.t2.Len())\n\tassert.Equal(t, 0, a.b1.Len())\n\tassert.Equal(t, 0, a.b2.Len())\n\n\ta.Store(3, 3)\n\tassert.Equal(t, 1, a.t1.Len())\n\tassert.Equal(t, 1, a.t2.Len())\n\tassert.Equal(t, 1, a.b1.Len())\n\tassert.Equal(t, 0, a.b2.Len())\n\n\ta.Store(2, 2)\n\tassert.Equal(t, 1, a.t1.Len())\n\tassert.Equal(t, 1, a.t2.Len())\n\tassert.Equal(t, 0, a.b1.Len())\n\tassert.Equal(t, 1, a.b2.Len())\n\n\ta.Store(1, 1)\n\tassert.Equal(t, 0, a.t1.Len())\n\tassert.Equal(t, 2, a.t2.Len())\n\tassert.Equal(t, 1, a.b1.Len())\n\tassert.Equal(t, 0, a.b2.Len())\n\n\ta.Purge()\n\ta.Resize(1)\n\n\ta.Store(1, 1)\n\tassert.Equal(t, 1, a.t1.Len())\n\tassert.Equal(t, 0, a.t2.Len())\n\n\ta.Store(1, 1)\n\tassert.Equal(t, 0, a.t1.Len())\n\tassert.Equal(t, 1, a.t2.Len())\n\n\ta.Store(1, 1)\n\ta.Load(1)\n\tassert.Equal(t, 0, a.t1.Len())\n\tassert.Equal(t, 1, a.t2.Len())\n\n\ta.Delete(1)\n}\n"
  },
  {
    "path": "cache.go",
    "content": "// Package libcache provides in-memory caches based on different caches replacement algorithms.\npackage libcache\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/shaj13/libcache/internal\"\n)\n\n// These are the generalized cache operations that can trigger a event.\nconst (\n\tRead   = internal.Read\n\tWrite  = internal.Write\n\tRemove = internal.Remove\n)\n\n// Op describes a set of cache operations.\ntype Op = internal.Op\n\n// Event represents a single cache entry change.\ntype Event = internal.Event\n\n// Cache stores data so that future requests for that data can be served faster.\ntype Cache interface {\n\t// Load returns key value.\n\tLoad(key interface{}) (interface{}, bool)\n\t// Peek returns key value without updating the underlying \"recent-ness\".\n\tPeek(key interface{}) (interface{}, bool)\n\t// Update the key value without updating the underlying \"recent-ness\".\n\tUpdate(key interface{}, value interface{})\n\t// Store sets the key value.\n\tStore(key interface{}, value interface{})\n\t// StoreWithTTL sets the key value with TTL overrides the default.\n\tStoreWithTTL(key interface{}, value interface{}, ttl time.Duration)\n\t// Delete deletes the key value.\n\tDelete(key interface{})\n\t// Expiry returns key value expiry time.\n\tExpiry(key interface{}) (time.Time, bool)\n\t// Keys return cache records keys.\n\tKeys() []interface{}\n\t// Contains Checks if a key exists in cache.\n\tContains(key interface{}) bool\n\t// Purge Clears all cache entries.\n\tPurge()\n\t// Resize cache, returning number evicted\n\tResize(int) int\n\t// Len Returns the number of items in the cache.\n\tLen() int\n\t// Cap Returns the cache capacity.\n\tCap() int\n\t// TTL returns entries default TTL.\n\tTTL() time.Duration\n\t// SetTTL sets entries default TTL.\n\tSetTTL(time.Duration)\n\t// RegisterOnEvicted registers a function,\n\t// to call it when an entry is purged from the cache.\n\t//\n\t// Deprecated: use Notify instead.\n\tRegisterOnEvicted(f func(key, value interface{}))\n\t// RegisterOnExpired registers a function,\n\t// to call it when an entry TTL elapsed.\n\t//\n\t// Deprecated: use Notify instead.\n\tRegisterOnExpired(f func(key, value interface{}))\n\t// Notify causes cache to relay events to ch.\n\t// If no operations are provided, all incoming operations will be relayed to ch.\n\t// Otherwise, just the provided operations will.\n\tNotify(ch chan<- Event, ops ...Op)\n\t// Ignore causes the provided operations to be ignored. Ignore undoes the effect\n\t// of any prior calls to Notify for the provided operations.\n\t// If no operations are provided, ch removed.\n\tIgnore(ch chan<- Event, ops ...Op)\n\t// GC runs a garbage collection and blocks the caller until the\n\t// all expired items from the cache evicted.\n\t//\n\t// GC returns the remaining time duration for the next gc cycle\n\t// if there any, Otherwise, it return 0.\n\t//\n\t// Calling GC without waits for the duration to elapsed considered a no-op.\n\tGC() time.Duration\n}\n\n// GC runs a garbage collection to evict expired items from the cache on time.\n//\n// GC trace expired items based on read-write barrier, therefore it listen to\n// cache write events and capture the result of calling the GC method on cache\n// to trigger the garbage collection loop at the right point in time.\n//\n// GC is a long running function, it returns when ctx done, therefore the\n// caller must start it in its own goroutine.\n//\n// Experimental\n//\n// Notice: This func is EXPERIMENTAL and may be changed or removed in a\n// later release.\nfunc GC(ctx context.Context, cache Cache) {\n\tremaining := time.Duration(0)\n\n\tt := time.NewTimer(remaining)\n\tdefer t.Stop()\n\n\tc := make(chan Event, 1)\n\tcache.Notify(c, Write)\n\tdefer func() {\n\t\tcache.Ignore(c)\n\t\tclose(c)\n\t}()\n\n\tgc := func() {\n\t\tremaining = cache.GC()\n\t\tt.Stop()\n\t\tif remaining > 0 {\n\t\t\tt.Reset(remaining)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase e := <-c:\n\t\t\tif e.Expiry.IsZero() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif remaining == 0 || time.Until(e.Expiry) < remaining {\n\t\t\t\tgc()\n\t\t\t}\n\t\tcase <-t.C:\n\t\t\tgc()\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype cache struct {\n\t// mu guards unsafe cache.\n\t// Calls to mu.Unlock are currently not deferred,\n\t// because defer adds ~200 ns (as of go1.)\n\tmu     sync.Mutex\n\tunsafe Cache\n}\n\nfunc (c *cache) Load(key interface{}) (interface{}, bool) {\n\tc.mu.Lock()\n\tv, ok := c.unsafe.Load(key)\n\tc.mu.Unlock()\n\treturn v, ok\n}\n\nfunc (c *cache) Peek(key interface{}) (interface{}, bool) {\n\tc.mu.Lock()\n\tv, ok := c.unsafe.Peek(key)\n\tc.mu.Unlock()\n\treturn v, ok\n}\n\nfunc (c *cache) Update(key interface{}, value interface{}) {\n\tc.mu.Lock()\n\tc.unsafe.Update(key, value)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Store(key interface{}, value interface{}) {\n\tc.mu.Lock()\n\tc.unsafe.Store(key, value)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) StoreWithTTL(key interface{}, value interface{}, ttl time.Duration) {\n\tc.mu.Lock()\n\tc.unsafe.StoreWithTTL(key, value, ttl)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Delete(key interface{}) {\n\tc.mu.Lock()\n\tc.unsafe.Delete(key)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Keys() []interface{} {\n\tc.mu.Lock()\n\tkeys := c.unsafe.Keys()\n\tc.mu.Unlock()\n\treturn keys\n}\n\nfunc (c *cache) Contains(key interface{}) bool {\n\tc.mu.Lock()\n\tok := c.unsafe.Contains(key)\n\tc.mu.Unlock()\n\treturn ok\n}\n\nfunc (c *cache) Purge() {\n\tc.mu.Lock()\n\tc.unsafe.Purge()\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Resize(s int) int {\n\tc.mu.Lock()\n\tn := c.unsafe.Resize(s)\n\tc.mu.Unlock()\n\treturn n\n}\n\nfunc (c *cache) Len() int {\n\tc.mu.Lock()\n\tn := c.unsafe.Len()\n\tc.mu.Unlock()\n\treturn n\n}\n\nfunc (c *cache) Cap() int {\n\tc.mu.Lock()\n\tn := c.unsafe.Cap()\n\tc.mu.Unlock()\n\treturn n\n}\n\nfunc (c *cache) TTL() time.Duration {\n\tc.mu.Lock()\n\tttl := c.unsafe.TTL()\n\tc.mu.Unlock()\n\treturn ttl\n}\n\nfunc (c *cache) SetTTL(ttl time.Duration) {\n\tc.mu.Lock()\n\tc.unsafe.SetTTL(ttl)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) RegisterOnEvicted(f func(key, value interface{})) {\n\tc.mu.Lock()\n\tc.unsafe.RegisterOnEvicted(f)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) RegisterOnExpired(f func(key, value interface{})) {\n\tc.mu.Lock()\n\tc.unsafe.RegisterOnExpired(f)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Notify(ch chan<- Event, ops ...Op) {\n\tc.mu.Lock()\n\tc.unsafe.Notify(ch, ops...)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Ignore(ch chan<- Event, ops ...Op) {\n\tc.mu.Lock()\n\tc.unsafe.Ignore(ch, ops...)\n\tc.mu.Unlock()\n}\n\nfunc (c *cache) Expiry(key interface{}) (time.Time, bool) {\n\tc.mu.Lock()\n\texp, ok := c.unsafe.Expiry(key)\n\tc.mu.Unlock()\n\treturn exp, ok\n}\n\nfunc (c *cache) GC() time.Duration {\n\tc.mu.Lock()\n\tdur := c.unsafe.GC()\n\tc.mu.Unlock()\n\treturn dur\n}\n"
  },
  {
    "path": "cache_test.go",
    "content": "package libcache_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/shaj13/libcache\"\n\t_ \"github.com/shaj13/libcache/arc\"\n\t_ \"github.com/shaj13/libcache/fifo\"\n\t_ \"github.com/shaj13/libcache/lfu\"\n\t_ \"github.com/shaj13/libcache/lifo\"\n\t_ \"github.com/shaj13/libcache/lru\"\n\t_ \"github.com/shaj13/libcache/mru\"\n)\n\nvar cacheTests = []struct {\n\tcont          libcache.ReplacementPolicy\n\tevictedKey    interface{}\n\tonEvictedKeys interface{}\n}{\n\t{\n\t\tcont:          libcache.LFU,\n\t\tevictedKey:    1,\n\t\tonEvictedKeys: []interface{}{0, 19},\n\t},\n\t{\n\t\tcont:          libcache.LRU,\n\t\tevictedKey:    1,\n\t\tonEvictedKeys: []interface{}{0, 1},\n\t},\n\t{\n\t\tcont:          libcache.FIFO,\n\t\tevictedKey:    1,\n\t\tonEvictedKeys: []interface{}{0, 1},\n\t},\n\t{\n\t\tcont:          libcache.LIFO,\n\t\tevictedKey:    3,\n\t\tonEvictedKeys: []interface{}{20, 19},\n\t},\n\t{\n\t\tcont:          libcache.MRU,\n\t\tevictedKey:    3,\n\t\tonEvictedKeys: []interface{}{20, 19},\n\t},\n\t{\n\t\tcont:          libcache.ARC,\n\t\tevictedKey:    1,\n\t\tonEvictedKeys: []interface{}{0, 1},\n\t},\n}\n\nfunc TestCacheStore(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheStore\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.Store(1, 1)\n\t\t\tassert.True(t, cache.Contains(1))\n\t\t})\n\t}\n}\n\nfunc TestCacheStoreWithTTL(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheSet\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.StoreWithTTL(1, 1, time.Hour)\n\t\t\tgot, ok := cache.Expiry(1)\n\t\t\texpect := time.Now().UTC().Add(time.Hour)\n\t\t\tassert.True(t, ok)\n\t\t\tassert.WithinDuration(t, expect, got, time.Hour)\n\t\t})\n\t}\n}\n\nfunc TestCacheLoad(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheLoad\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.Store(\"1\", 1)\n\t\t\tv, ok := cache.Load(\"1\")\n\t\t\tassert.True(t, ok)\n\t\t\tassert.Equal(t, 1, v)\n\t\t})\n\t}\n}\n\nfunc TestCacheDelete(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheDelete\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.Store(1, 1)\n\t\t\tcache.Delete(1)\n\t\t\tassert.False(t, cache.Contains(1))\n\t\t})\n\t}\n}\n\nfunc TestCachePeek(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CachePeek\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(3)\n\t\t\tcache.Store(1, 0)\n\t\t\tcache.Store(2, 0)\n\t\t\tcache.Store(3, 0)\n\t\t\tv, ok := cache.Peek(1)\n\t\t\tcache.Store(4, 0)\n\t\t\tfound := cache.Contains(tt.evictedKey)\n\t\t\tassert.Equal(t, 0, v)\n\t\t\tassert.True(t, ok)\n\t\t\tassert.False(t, found, \"Peek should not update recent-ness\")\n\t\t})\n\t}\n}\n\nfunc TestCacheContains(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheContains\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(3)\n\t\t\tcache.Store(1, 0)\n\t\t\tcache.Store(2, 0)\n\t\t\tcache.Store(3, 0)\n\t\t\tfound := cache.Contains(1)\n\t\t\tcache.Store(4, 0)\n\t\t\t_, ok := cache.Load(tt.evictedKey)\n\t\t\tassert.True(t, found)\n\t\t\tassert.False(t, ok, \"Contains should not update recent-ness\")\n\t\t})\n\t}\n}\n\nfunc TestCacheUpdate(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheUpdate\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(3)\n\t\t\tcache.Store(1, 0)\n\t\t\tcache.Store(2, 0)\n\t\t\tcache.Store(3, 0)\n\t\t\tcache.Update(1, 1)\n\t\t\tv, ok := cache.Peek(1)\n\t\t\tcache.Store(4, 0)\n\t\t\tfound := cache.Contains(tt.evictedKey)\n\t\t\tassert.Equal(t, 1, v)\n\t\t\tassert.True(t, ok)\n\t\t\tassert.False(t, found, \"Update should not move element\")\n\t\t})\n\t}\n}\n\nfunc TestCachePurge(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CachePurge\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.Store(1, 0)\n\t\t\tcache.Store(2, 0)\n\t\t\tcache.Store(3, 0)\n\t\t\tcache.Purge()\n\n\t\t\tassert.Equal(t, 0, cache.Len())\n\t\t})\n\t}\n}\n\nfunc TestCacheResize(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheResize\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.Store(1, 0)\n\t\t\tcache.Store(2, 0)\n\t\t\tcache.Store(3, 0)\n\t\t\tcache.Resize(2)\n\t\t\tassert.Equal(t, 2, cache.Len())\n\t\t})\n\t}\n}\n\nfunc TestCacheKeys(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheKeys\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.Store(1, 0)\n\t\t\tcache.Store(2, 0)\n\t\t\tcache.Store(3, 0)\n\t\t\tassert.ElementsMatch(t, []interface{}{1, 2, 3}, cache.Keys())\n\t\t})\n\t}\n}\n\nfunc TestCacheCap(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheCap\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(3)\n\t\t\tassert.Equal(t, 3, cache.Cap())\n\t\t})\n\t}\n}\n\nfunc TestCacheTTL(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheTTL\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tcache.SetTTL(time.Second)\n\t\t\tassert.Equal(t, time.Second, cache.TTL())\n\t\t})\n\t}\n}\n\nfunc TestOnEvicted(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheOnEvicted\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(20)\n\t\t\tsend := make(chan libcache.Event, 10)\n\t\t\tdone := make(chan bool)\n\t\t\tevictedKeys := make([]interface{}, 0, 2)\n\t\t\tcache.Notify(send, libcache.Remove)\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\te := <-send\n\t\t\t\t\tevictedKeys = append(evictedKeys, e.Key)\n\t\t\t\t\tif len(evictedKeys) >= 2 {\n\t\t\t\t\t\tdone <- true\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfor i := 0; i < 22; i++ {\n\t\t\t\tcache.Store(i, i)\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\tcase <-time.After(time.Second * 2):\n\t\t\t\tt.Fatal(\"TestOnEvicted timeout exceeded, expected to receive evicted keys\")\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, tt.onEvictedKeys, evictedKeys)\n\t\t})\n\t}\n}\n\nfunc TestExpiring(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheExpiring\", func(t *testing.T) {\n\t\t\tcache := tt.cont.New(0)\n\t\t\tkeys := make([]interface{}, 10)\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\tcache.StoreWithTTL(fmt.Sprintf(\"%v.100\", i), i, time.Millisecond*100)\n\t\t\t\tcache.StoreWithTTL(fmt.Sprintf(\"%v.200\", i), i, time.Millisecond*200)\n\t\t\t\tkeys[i] = fmt.Sprintf(\"%v.200\", i)\n\t\t\t}\n\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\n\t\t\tcache.Peek(\"notfound\") // should expire *.100\n\t\t\tgot := cache.Keys()\n\t\t\tassert.ElementsMatch(t, keys, got)\n\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\tcache.Store(\"notfound\", 0) // should expire *.200\n\t\t\tgot = cache.Keys()\n\t\t\tassert.ElementsMatch(t, []string{\"notfound\"}, got)\n\n\t\t\tcache.Purge()\n\n\t\t\t// check remove element will keep other entries in heap.\n\t\t\t// this has been added to make sure we remove right entry\n\t\t\t// by its index.\n\t\t\tcache.StoreWithTTL(1, 1, time.Millisecond*100)\n\t\t\tcache.StoreWithTTL(2, 2, time.Millisecond*200)\n\n\t\t\tcache.Delete(2)\n\t\t\tgot = cache.Keys()\n\t\t\tassert.ElementsMatch(t, []int{1}, got)\n\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\tcache.Peek(\"\")\n\t\t\tassert.Equal(t, 0, cache.Len())\n\n\t\t})\n\t}\n}\n\nfunc TestNotify(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheNotify\", func(t *testing.T) {\n\t\t\tgot := 0\n\t\t\tc := make(chan libcache.Event, 10)\n\t\t\tcache := tt.cont.New(0)\n\n\t\t\tcache.Notify(c)\n\n\t\t\tcache.Load(1)\n\t\t\tcache.StoreWithTTL(1, 0, time.Second)\n\t\t\tcache.Peek(1)\n\t\t\tcache.Delete(1)\n\t\t\tclose(c)\n\n\t\t\tfor e := range c {\n\t\t\t\tt.Logf(\"Operation %s on Key %v \\n\", e.Op, e.Key)\n\t\t\t\tgot += e.Key.(int)\n\t\t\t}\n\n\t\t\tif tt.cont == libcache.ARC {\n\t\t\t\tassert.Equal(t, 7, got)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, 4, got)\n\t\t\t}\n\n\t\t\t// check it will not try to write on chan after ignore\n\t\t\tcache.Ignore(c)\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\tcache.Store(i, i)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCacheGC(t *testing.T) {\n\tfor _, tt := range cacheTests {\n\t\tt.Run(\"Test\"+tt.cont.String()+\"CacheGC\", func(t *testing.T) {\n\t\t\tcache := tt.cont.NewUnsafe(0)\n\t\t\tcache.StoreWithTTL(0, 0, time.Nanosecond)\n\t\t\tcache.StoreWithTTL(1, 1, time.Millisecond*100)\n\t\t\tdur := cache.GC()\n\n\t\t\tassert.GreaterOrEqual(t, int64(dur), int64(time.Millisecond*99))\n\t\t\ttime.Sleep(dur)\n\n\t\t\tassert.Zero(t, int(cache.GC()))\n\t\t\tassert.Zero(t, cache.Len())\n\t\t})\n\t}\n}\n\nfunc TestGC(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcache := libcache.LRU.New(0)\n\tgo libcache.GC(ctx, cache)\n\n\tcache.StoreWithTTL(1, 1, time.Millisecond*100)\n\ttime.Sleep(time.Millisecond * 150)\n\tassert.Zero(t, cache.Len())\n\n\tcache.StoreWithTTL(1, 1, time.Millisecond*100)\n\tcache.StoreWithTTL(2, 2, time.Millisecond*200)\n\n\ttime.Sleep(time.Millisecond * 150)\n\tassert.Equal(t, 1, cache.Len())\n\n\ttime.Sleep(time.Millisecond * 150)\n\tassert.Zero(t, cache.Len())\n}\n\nfunc BenchmarkCache(b *testing.B) {\n\tfor _, tt := range cacheTests {\n\t\tb.Run(\"Benchmark\"+tt.cont.String()+\"Cache\", func(b *testing.B) {\n\t\t\tkeys := []interface{}{}\n\t\t\tcache := tt.cont.New(0)\n\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\tkeys = append(keys, i)\n\t\t\t}\n\n\t\t\tb.ResetTimer()\n\t\t\tb.RunParallel(func(pb *testing.PB) {\n\t\t\t\tfor pb.Next() {\n\t\t\t\t\tkey := keys[rand.Intn(100)]\n\t\t\t\t\t_, ok := cache.Load(key)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tcache.Delete(key)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcache.Store(key, struct{}{})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "fifo/fifo.go",
    "content": "// Package fifo implements an FIFO cache.\npackage fifo\n\nimport (\n\t\"container/list\"\n\n\t\"github.com/shaj13/libcache\"\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc init() {\n\tlibcache.FIFO.Register(New)\n}\n\n// New returns a new non-thread safe cache.\nfunc New(cap int) libcache.Cache {\n\tcol := &collection{list.New()}\n\treturn internal.New(col, cap)\n}\n\ntype collection struct {\n\tll *list.List\n}\n\nfunc (c *collection) Move(e *internal.Entry) {}\n\nfunc (c *collection) Add(e *internal.Entry) {\n\tle := c.ll.PushBack(e)\n\te.Element = le\n}\n\nfunc (c *collection) Remove(e *internal.Entry) {\n\tle := e.Element.(*list.Element)\n\tc.ll.Remove(le)\n}\n\nfunc (c *collection) Discard() (e *internal.Entry) {\n\tif le := c.ll.Front(); le != nil {\n\t\tc.ll.Remove(le)\n\t\te = le.Value.(*internal.Entry)\n\t}\n\treturn\n}\n\nfunc (c *collection) Len() int {\n\treturn c.ll.Len()\n}\n\nfunc (c *collection) Init() {\n\tc.ll.Init()\n}\n"
  },
  {
    "path": "fifo/fifo_test.go",
    "content": "package fifo\n\nimport (\n\t\"container/list\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc TestCollection(t *testing.T) {\n\tentries := []*internal.Entry{}\n\tentries = append(entries, &internal.Entry{Key: 1})\n\tentries = append(entries, &internal.Entry{Key: 2})\n\tentries = append(entries, &internal.Entry{Key: 3})\n\n\tc := &collection{ll: list.New()}\n\tc.Init()\n\n\tfor _, e := range entries {\n\t\tc.Add(e)\n\t}\n\n\tfor _, e := range entries {\n\t\tfor i := 0; i < e.Key.(int); i++ {\n\t\t\tc.Move(e)\n\t\t}\n\t}\n\n\toldest := c.Discard()\n\tc.Remove(entries[2])\n\tback := c.ll.Back().Value.(*internal.Entry)\n\n\tassert.Equal(t, 1, oldest.Key)\n\tassert.Equal(t, 1, c.Len())\n\tassert.Equal(t, 2, back.Key)\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/shaj13/libcache\n\ngo 1.13\n\nrequire github.com/stretchr/testify v1.6.1\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "idle/idle.go",
    "content": "// Package idle implements an IDLE cache, that never finds/stores a key's value.\npackage idle\n\nimport (\n\t\"time\"\n\n\t\"github.com/shaj13/libcache\"\n)\n\nfunc init() {\n\tlibcache.IDLE.Register(New)\n}\n\n// New return idle cache that never finds/stores a key's value.\nfunc New(cap int) libcache.Cache {\n\treturn idle{}\n}\n\ntype idle struct{}\n\nfunc (idle) Load(interface{}) (v interface{}, ok bool)            { return }\nfunc (idle) Peek(interface{}) (v interface{}, ok bool)            { return }\nfunc (idle) Keys() (keys []interface{})                           { return }\nfunc (idle) Contains(interface{}) (ok bool)                       { return }\nfunc (idle) Resize(int) (i int)                                   { return }\nfunc (idle) Len() (len int)                                       { return }\nfunc (idle) Cap() (cap int)                                       { return }\nfunc (idle) TTL() (t time.Duration)                               { return }\nfunc (idle) Expiry(interface{}) (t time.Time, ok bool)            { return }\nfunc (idle) GC() (dur time.Duration)                              { return }\nfunc (idle) Update(interface{}, interface{})                      {}\nfunc (idle) Store(interface{}, interface{})                       {}\nfunc (idle) StoreWithTTL(interface{}, interface{}, time.Duration) {}\nfunc (idle) Delete(interface{})                                   {}\nfunc (idle) Purge()                                               {}\nfunc (idle) SetTTL(ttl time.Duration)                             {}\nfunc (idle) RegisterOnExpired(f func(key, value interface{}))     {}\nfunc (idle) RegisterOnEvicted(f func(key, value interface{}))     {}\nfunc (idle) Notify(ch chan<- libcache.Event, ops ...libcache.Op)  {}\nfunc (idle) Ignore(ch chan<- libcache.Event, ops ...libcache.Op)  {}\n"
  },
  {
    "path": "internal/cache.go",
    "content": "package internal\n\nimport (\n\t\"container/heap\"\n\t\"fmt\"\n\t\"time\"\n)\n\n// Op describes a set of cache operations.\ntype Op uint8\n\n// These are the generalized cache operations that can trigger a event.\nconst (\n\tRead Op = iota + 1\n\tWrite\n\tRemove\n\tmaxOp\n)\n\nfunc (op Op) String() string {\n\tswitch op {\n\tcase Read:\n\t\treturn \"READ\"\n\tcase Write:\n\t\treturn \"WRITE\"\n\tcase Remove:\n\t\treturn \"REMOVE\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\ntype handler struct {\n\tmask [((maxOp - 1) + 7) / 8]uint8\n}\n\nfunc (h *handler) want(op Op) bool {\n\treturn (h.mask[op/8]>>uint8(op&7))&1 != 0\n}\n\nfunc (h *handler) set(op Op) {\n\th.mask[op/8] |= 1 << uint8(op&7)\n}\n\nfunc (h *handler) clear(op Op) {\n\th.mask[op/8] &^= 1 << uint8(op&7)\n}\n\n// Collection represents the cache underlying data structure,\n// and defines the functions or operations that can be applied to the data elements.\ntype Collection interface {\n\tMove(*Entry)\n\tAdd(*Entry)\n\tRemove(*Entry)\n\tDiscard() *Entry\n\tLen() int\n\tInit()\n}\n\n// Event represents a single cache entry change.\ntype Event struct {\n\t// Op represents cache operation that triggered the event.\n\tOp Op\n\t// Key represents cache entry key.\n\tKey interface{}\n\t// Value represents cache key value.\n\tValue interface{}\n\t// Expiry represents cache key value expiry time.\n\tExpiry time.Time\n\t// Ok report whether the read operation succeed.\n\tOk bool\n}\n\n// String returns a string representation of the event in the form\n// \"file: REMOVE|WRITE|...\"\nfunc (e Event) String() string {\n\treturn fmt.Sprintf(\"%v: %s\", e.Key, e.Op.String())\n}\n\n// Entry is used to hold a value in the cache.\ntype Entry struct {\n\tKey     interface{}\n\tValue   interface{}\n\tElement interface{}\n\tExp     time.Time\n\tindex   int\n}\n\n// Cache is an abstracted cache that provides a skeletal implementation,\n// of the Cache interface to minimize the effort required to implement interface.\ntype Cache struct {\n\tcoll     Collection\n\theap     expiringHeap\n\tentries  map[interface{}]*Entry\n\thandlers map[chan<- Event]*handler\n\tttl      time.Duration\n\tcapacity int\n}\n\n// Load returns key value.\nfunc (c *Cache) Load(key interface{}) (interface{}, bool) {\n\treturn c.get(key, false)\n}\n\n// Peek returns key value without updating the underlying \"rank\".\nfunc (c *Cache) Peek(key interface{}) (interface{}, bool) {\n\treturn c.get(key, true)\n}\n\nfunc (c *Cache) get(key interface{}, peek bool) (interface{}, bool) {\n\t// Run GC inline before return the entry.\n\tc.GC()\n\n\te, ok := c.entries[key]\n\tif !ok {\n\t\tc.emit(Read, key, nil, time.Time{}, ok)\n\t\treturn nil, ok\n\t}\n\n\tif !peek {\n\t\tc.coll.Move(e)\n\t}\n\n\tc.emit(Read, key, e.Value, e.Exp, ok)\n\treturn e.Value, ok\n}\n\n// Expiry returns key value expiry time.\nfunc (c *Cache) Expiry(key interface{}) (t time.Time, ok bool) {\n\tok = c.Contains(key)\n\tif ok {\n\t\tt = c.entries[key].Exp\n\t}\n\treturn t, ok\n}\n\n// Store sets the value for a key.\nfunc (c *Cache) Store(key, value interface{}) {\n\tc.StoreWithTTL(key, value, c.ttl)\n}\n\n// StoreWithTTL sets the key value with TTL overrides the default.\nfunc (c *Cache) StoreWithTTL(key, value interface{}, ttl time.Duration) {\n\t// Run GC inline before pushing the new entry.\n\tc.GC()\n\n\tif e, ok := c.entries[key]; ok {\n\t\tc.removeEntry(e)\n\t}\n\n\te := &Entry{Key: key, Value: value}\n\n\tif ttl > 0 {\n\t\te.Exp = time.Now().UTC().Add(ttl)\n\t\theap.Push(&c.heap, e)\n\t}\n\n\tc.entries[key] = e\n\tif c.capacity != 0 && c.Len() >= c.capacity {\n\t\tc.Discard()\n\t}\n\n\tc.coll.Add(e)\n\tc.emit(Write, e.Key, e.Value, e.Exp, false)\n}\n\n// Update the key value without updating the underlying \"rank\".\nfunc (c *Cache) Update(key, value interface{}) {\n\t// Run GC inline before update the entry.\n\tc.GC()\n\n\tif c.Contains(key) {\n\t\te := c.entries[key]\n\t\te.Value = value\n\t\tc.emit(Write, e.Key, e.Value, e.Exp, false)\n\t}\n}\n\n// Purge Clears all cache entries.\nfunc (c *Cache) Purge() {\n\tdefer c.coll.Init()\n\n\tif len(c.handlers) == 0 {\n\t\tc.entries = make(map[interface{}]*Entry)\n\t\tc.heap = nil\n\t\treturn\n\t}\n\n\tfor _, e := range c.entries {\n\t\tc.evict(e)\n\t}\n}\n\n// Resize cache, returning number evicted\nfunc (c *Cache) Resize(size int) int {\n\tc.capacity = size\n\tdiff := c.Len() - size\n\n\tif diff < 0 {\n\t\tdiff = 0\n\t}\n\n\tfor i := 0; i < diff; i++ {\n\t\tc.Discard()\n\t}\n\n\treturn diff\n}\n\n// DelSilently the key value silently without call onEvicted.\nfunc (c *Cache) DelSilently(key interface{}) {\n\tif e, ok := c.entries[key]; ok {\n\t\tc.removeEntry(e)\n\t}\n}\n\n// Delete deletes the key value.\nfunc (c *Cache) Delete(key interface{}) {\n\tif e, ok := c.entries[key]; ok {\n\t\tc.evict(e)\n\t}\n}\n\n// Contains Checks if a key exists in cache.\nfunc (c *Cache) Contains(key interface{}) (ok bool) {\n\t_, ok = c.Peek(key)\n\treturn\n}\n\n// Keys return cache records keys.\nfunc (c *Cache) Keys() (keys []interface{}) {\n\tfor k := range c.entries {\n\t\tkeys = append(keys, k)\n\t}\n\treturn\n}\n\n// Len Returns the number of items in the cache.\nfunc (c *Cache) Len() int {\n\treturn c.coll.Len()\n}\n\n// Discard oldest entry from cache to make room for the new ones.\nfunc (c *Cache) Discard() (key, value interface{}) {\n\tif e := c.coll.Discard(); e != nil {\n\t\tc.evict(e)\n\t\treturn e.Key, e.Value\n\t}\n\n\treturn\n}\n\nfunc (c *Cache) removeEntry(e *Entry) {\n\tc.coll.Remove(e)\n\tdelete(c.entries, e.Key)\n\t// Remove entry from the heap, the entry may does not exist because\n\t// it has zero ttl or already popped up by gc\n\tif len(c.heap) > 0 && e.index < len(c.heap) && e.Key == c.heap[e.index].Key {\n\t\theap.Remove(&c.heap, e.index)\n\t}\n}\n\n// evict remove entry and fire on evicted callback.\nfunc (c *Cache) evict(e *Entry) {\n\tc.removeEntry(e)\n\tc.emit(Remove, e.Key, e.Value, e.Exp, false)\n}\n\nfunc (c *Cache) emit(op Op, k, v interface{}, exp time.Time, ok bool) {\n\te := Event{\n\t\tOp:     op,\n\t\tKey:    k,\n\t\tValue:  v,\n\t\tExpiry: exp,\n\t\tOk:     ok,\n\t}\n\n\tfor c, h := range c.handlers {\n\t\tif h.want(op) {\n\t\t\t// send but do not block for it\n\t\t\tselect {\n\t\t\tcase c <- e:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}\n}\n\n// GC returns the remaining time duration for the next gc cycle if there any,\n// Otherwise, it return 0.\n//\n// Calling GC without waits for the duration to elapsed considered a no-op.\nfunc (c *Cache) GC() time.Duration {\n\tnow := time.Now()\n\tfor {\n\n\t\t// Return from gc if the heap is empty or the next element is not yet\n\t\t// expired.\n\n\t\tif len(c.heap) == 0 {\n\t\t\treturn 0\n\t\t}\n\n\t\tif now.Before(c.heap[0].Exp) {\n\t\t\treturn c.heap[0].Exp.Sub(now)\n\t\t}\n\n\t\te := heap.Pop(&c.heap).(*Entry)\n\t\tc.evict(e)\n\t}\n}\n\n// TTL returns entries default TTL.\nfunc (c *Cache) TTL() time.Duration {\n\treturn c.ttl\n}\n\n// SetTTL sets entries default TTL.\nfunc (c *Cache) SetTTL(ttl time.Duration) {\n\tc.ttl = ttl\n}\n\n// Cap Returns the cache capacity.\nfunc (c *Cache) Cap() int {\n\treturn c.capacity\n}\n\n// Notify causes cache to relay events to ch.\n// If no operations are provided, all incoming operations will be relayed to ch.\n// Otherwise, just the provided operations will.\nfunc (c *Cache) Notify(ch chan<- Event, ops ...Op) {\n\tif ch == nil {\n\t\tpanic(\"libcache: Notify using nil channel\")\n\t}\n\n\th := new(handler)\n\tc.handlers[ch] = h\n\n\tif len(ops) == 0 {\n\t\tfor i := 1; i <= int(maxOp); i++ {\n\t\t\th.set(Op(i))\n\t\t}\n\t\treturn\n\t}\n\n\tfor _, op := range ops {\n\t\th.set(op)\n\t}\n}\n\n// Ignore causes the provided ops to be ignored. Ignore undoes the effect\n// of any prior calls to Notify for the provided ops.\n// If no ops are provided, ch removed.\nfunc (c *Cache) Ignore(ch chan<- Event, ops ...Op) {\n\tif len(ops) == 0 {\n\t\tdelete(c.handlers, ch)\n\t\treturn\n\t}\n\n\th, ok := c.handlers[ch]\n\tif !ok {\n\t\treturn\n\t}\n\n\tfor _, op := range ops {\n\t\th.clear(op)\n\t}\n}\n\n// RegisterOnEvicted registers a function,\n// to call it when an entry is purged from the cache.\nfunc (c *Cache) RegisterOnEvicted(fn func(key, value interface{})) {\n\tpanic(\"RegisterOnEvicted no longer available\")\n}\n\n// RegisterOnExpired registers a function,\n// to call it when an entry TTL elapsed.\nfunc (c *Cache) RegisterOnExpired(fn func(key, value interface{})) {\n\tpanic(\"RegisterOnExpired no longer available\")\n}\n\n// New return new abstracted cache.\nfunc New(c Collection, cap int) *Cache {\n\treturn &Cache{\n\t\tcoll:     c,\n\t\tcapacity: cap,\n\t\tentries:  make(map[interface{}]*Entry),\n\t\thandlers: make(map[chan<- Event]*handler),\n\t}\n}\n\n// expiringHeap is a min-heap ordered by expiration time of its entries. The\n// expiring cache uses this as a priority queue to efficiently organize entries\n// which will be garbage collected once they expire.\ntype expiringHeap []*Entry\n\nvar _ heap.Interface = &expiringHeap{}\n\nfunc (cq expiringHeap) Len() int {\n\treturn len(cq)\n}\n\nfunc (cq expiringHeap) Less(i, j int) bool {\n\treturn cq[i].Exp.Before(cq[j].Exp)\n}\n\nfunc (cq expiringHeap) Swap(i, j int) {\n\tcq[i].index, cq[j].index = cq[j].index, cq[i].index\n\tcq[i], cq[j] = cq[j], cq[i]\n}\n\nfunc (cq *expiringHeap) Push(c interface{}) {\n\tc.(*Entry).index = len(*cq)\n\t*cq = append(*cq, c.(*Entry))\n}\n\nfunc (cq *expiringHeap) Pop() interface{} {\n\tc := (*cq)[cq.Len()-1]\n\t*cq = (*cq)[:cq.Len()-1]\n\treturn c\n}\n"
  },
  {
    "path": "lfu/lfu.go",
    "content": "// Package lfu implements an LFU cache.\npackage lfu\n\nimport (\n\t\"container/heap\"\n\n\t\"github.com/shaj13/libcache\"\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc init() {\n\tlibcache.LFU.Register(New)\n}\n\n// New returns a new non-thread safe cache.\nfunc New(cap int) libcache.Cache {\n\tf := &collection{}\n\tf.Init()\n\treturn internal.New(f, cap)\n}\n\ntype element struct {\n\tvalue *internal.Entry\n\tindex int\n\tcount int\n}\n\ntype collection []*element\n\nfunc (f collection) Len() int {\n\treturn len(f)\n}\n\nfunc (f collection) Less(i, j int) bool {\n\treturn f[i].count < f[j].count\n}\n\nfunc (f collection) Swap(i, j int) {\n\tf[i], f[j] = f[j], f[i]\n\tf[i].index = i\n\tf[j].index = j\n}\n\nfunc (f *collection) Push(v interface{}) {\n\te := v.(*element)\n\te.index = f.Len()\n\t*f = append(*f, e)\n}\n\nfunc (f *collection) Pop() interface{} {\n\te := (*f)[f.Len()-1]\n\t*f = (*f)[:f.Len()-1]\n\treturn e\n}\n\nfunc (f *collection) Discard() (e *internal.Entry) {\n\treturn heap.Pop(f).(*element).value\n}\n\nfunc (f *collection) Move(e *internal.Entry) {\n\tele := e.Element.(*element)\n\tele.count++\n\theap.Fix(f, ele.index)\n}\n\nfunc (f *collection) Remove(e *internal.Entry) {\n\tif e.Element.(*element).index < f.Len() {\n\t\theap.Remove(f, e.Element.(*element).index)\n\t}\n}\n\nfunc (f *collection) Add(e *internal.Entry) {\n\tele := new(element)\n\tele.value = e\n\te.Element = ele\n\theap.Push(f, ele)\n}\n\nfunc (f *collection) Init() {\n\t*f = collection{}\n\theap.Init(f)\n}\n"
  },
  {
    "path": "lfu/lfu_test.go",
    "content": "package lfu\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc TestCollection(t *testing.T) {\n\tentries := []*internal.Entry{}\n\tentries = append(entries, &internal.Entry{Key: 1})\n\tentries = append(entries, &internal.Entry{Key: 2})\n\tentries = append(entries, &internal.Entry{Key: 3})\n\n\tf := &collection{}\n\tf.Init()\n\n\tfor _, e := range entries {\n\t\tf.Add(e)\n\t}\n\n\tfor _, e := range entries {\n\t\tfor i := 0; i < e.Key.(int); i++ {\n\t\t\tf.Move(e)\n\t\t}\n\t}\n\n\toldest := f.Discard()\n\tf.Remove(entries[2])\n\n\tassert.Equal(t, oldest.Key, 1)\n\tassert.Equal(t, f.Len(), 1)\n\tassert.Equal(t, (*f)[0].value.Key, 2)\n}\n"
  },
  {
    "path": "lifo/lifo.go",
    "content": "// Package lifo implements an LIFO cache.\npackage lifo\n\nimport (\n\t\"container/list\"\n\n\t\"github.com/shaj13/libcache\"\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc init() {\n\tlibcache.LIFO.Register(New)\n}\n\n// New returns a new non-thread safe cache.\nfunc New(cap int) libcache.Cache {\n\tcol := &collection{list.New()}\n\treturn internal.New(col, cap)\n}\n\ntype collection struct {\n\tll *list.List\n}\n\nfunc (c *collection) Move(e *internal.Entry) {}\n\nfunc (c *collection) Add(e *internal.Entry) {\n\tle := c.ll.PushBack(e)\n\te.Element = le\n}\n\nfunc (c *collection) Remove(e *internal.Entry) {\n\tle := e.Element.(*list.Element)\n\tc.ll.Remove(le)\n}\n\nfunc (c *collection) Discard() (e *internal.Entry) {\n\tif le := c.ll.Back(); le != nil {\n\t\tc.ll.Remove(le)\n\t\te = le.Value.(*internal.Entry)\n\t}\n\treturn\n}\n\nfunc (c *collection) Len() int {\n\treturn c.ll.Len()\n}\n\nfunc (c *collection) Init() {\n\tc.ll.Init()\n}\n"
  },
  {
    "path": "lifo/lifo_test.go",
    "content": "package lifo\n\nimport (\n\t\"container/list\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc TestCollection(t *testing.T) {\n\tentries := []*internal.Entry{}\n\tentries = append(entries, &internal.Entry{Key: 1})\n\tentries = append(entries, &internal.Entry{Key: 2})\n\tentries = append(entries, &internal.Entry{Key: 3})\n\n\tc := &collection{ll: list.New()}\n\tc.Init()\n\n\tfor _, e := range entries {\n\t\tc.Add(e)\n\t}\n\n\tfor _, e := range entries {\n\t\tfor i := 0; i < e.Key.(int); i++ {\n\t\t\tc.Move(e)\n\t\t}\n\t}\n\n\toldest := c.Discard()\n\tc.Remove(entries[0])\n\tback := c.ll.Back().Value.(*internal.Entry)\n\n\tassert.Equal(t, 3, oldest.Key)\n\tassert.Equal(t, 1, c.Len())\n\tassert.Equal(t, 2, back.Key)\n}\n"
  },
  {
    "path": "lru/lru.go",
    "content": "// Package lru implements an LRU cache.\npackage lru\n\nimport (\n\t\"container/list\"\n\n\t\"github.com/shaj13/libcache\"\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc init() {\n\tlibcache.LRU.Register(New)\n}\n\n// New returns a new non-thread safe cache.\nfunc New(cap int) libcache.Cache {\n\tcol := &collection{list.New()}\n\treturn internal.New(col, cap)\n}\n\ntype collection struct {\n\tll *list.List\n}\n\nfunc (c *collection) Move(e *internal.Entry) {\n\tle := e.Element.(*list.Element)\n\tc.ll.MoveToFront(le)\n}\n\nfunc (c *collection) Add(e *internal.Entry) {\n\tle := c.ll.PushFront(e)\n\te.Element = le\n}\n\nfunc (c *collection) Remove(e *internal.Entry) {\n\tle := e.Element.(*list.Element)\n\tc.ll.Remove(le)\n}\n\nfunc (c *collection) Discard() (e *internal.Entry) {\n\tif le := c.ll.Back(); le != nil {\n\t\tc.ll.Remove(le)\n\t\te = le.Value.(*internal.Entry)\n\t}\n\treturn\n}\n\nfunc (c *collection) Len() int {\n\treturn c.ll.Len()\n}\n\nfunc (c *collection) Init() {\n\tc.ll.Init()\n}\n"
  },
  {
    "path": "lru/lru_test.go",
    "content": "package lru\n\nimport (\n\t\"container/list\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc TestCollection(t *testing.T) {\n\tentries := []*internal.Entry{}\n\tentries = append(entries, &internal.Entry{Key: 1})\n\tentries = append(entries, &internal.Entry{Key: 2})\n\tentries = append(entries, &internal.Entry{Key: 3})\n\n\tc := &collection{ll: list.New()}\n\tc.Init()\n\n\tfor _, e := range entries {\n\t\tc.Add(e)\n\t}\n\n\tfor _, e := range entries {\n\t\tfor i := 0; i < e.Key.(int); i++ {\n\t\t\tc.Move(e)\n\t\t}\n\t}\n\n\toldest := c.Discard()\n\tc.Remove(entries[2])\n\tback := c.ll.Back().Value.(*internal.Entry)\n\n\tassert.Equal(t, 1, oldest.Key)\n\tassert.Equal(t, 1, c.Len())\n\tassert.Equal(t, 2, back.Key)\n}\n"
  },
  {
    "path": "mru/mru.go",
    "content": "// Package mru implements an MRU cache.\npackage mru\n\nimport (\n\t\"container/list\"\n\n\t\"github.com/shaj13/libcache\"\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc init() {\n\tlibcache.MRU.Register(New)\n}\n\n// New returns a new non-thread safe cache.\nfunc New(cap int) libcache.Cache {\n\tcol := &collection{list.New()}\n\treturn internal.New(col, cap)\n}\n\ntype collection struct {\n\tll *list.List\n}\n\nfunc (c *collection) Move(e *internal.Entry) {\n\tle := e.Element.(*list.Element)\n\tc.ll.MoveToFront(le)\n}\n\nfunc (c *collection) Add(e *internal.Entry) {\n\tle := c.ll.PushFront(e)\n\te.Element = le\n}\n\nfunc (c *collection) Remove(e *internal.Entry) {\n\tle := e.Element.(*list.Element)\n\tc.ll.Remove(le)\n}\n\nfunc (c *collection) Discard() (e *internal.Entry) {\n\tif le := c.ll.Front(); le != nil {\n\t\tc.ll.Remove(le)\n\t\te = le.Value.(*internal.Entry)\n\t}\n\treturn\n}\n\nfunc (c *collection) Len() int {\n\treturn c.ll.Len()\n}\n\nfunc (c *collection) Init() {\n\tc.ll.Init()\n}\n"
  },
  {
    "path": "mru/mru_test.go",
    "content": "package mru\n\nimport (\n\t\"container/list\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/shaj13/libcache/internal\"\n)\n\nfunc TestCollection(t *testing.T) {\n\tentries := []*internal.Entry{}\n\tentries = append(entries, &internal.Entry{Key: 1})\n\tentries = append(entries, &internal.Entry{Key: 2})\n\tentries = append(entries, &internal.Entry{Key: 3})\n\n\tc := &collection{ll: list.New()}\n\tc.Init()\n\n\tfor _, e := range entries {\n\t\tc.Add(e)\n\t}\n\n\tfor _, e := range entries {\n\t\tfor i := 0; i < e.Key.(int); i++ {\n\t\t\tc.Move(e)\n\t\t}\n\t}\n\n\toldest := c.Discard()\n\tc.Remove(entries[1])\n\tback := c.ll.Back().Value.(*internal.Entry)\n\n\tassert.Equal(t, 3, oldest.Key)\n\tassert.Equal(t, 1, c.Len())\n\tassert.Equal(t, 1, back.Key)\n}\n"
  },
  {
    "path": "policy.go",
    "content": "package libcache\n\nimport (\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\t// IDLE cache replacement policy.\n\tIDLE ReplacementPolicy = iota + 1\n\t// FIFO cache replacement policy.\n\tFIFO\n\t// LIFO cache replacement policy.\n\tLIFO\n\t// LRU cache replacement policy.\n\tLRU\n\t// LFU cache replacement policy.\n\tLFU\n\t// MRU cache replacement policy.\n\tMRU\n\t// ARC cache replacement policy.\n\tARC\n\tmax\n)\n\nvar policies = make([]func(cap int) Cache, max)\n\n// ReplacementPolicy identifies a cache replacement policy function that implemented in another package.\ntype ReplacementPolicy uint\n\n// Register registers a function that returns a new cache instance,\n// of the given cache replacement policy function.\n// This is intended to be called from the init function,\n// in packages that implement cache replacement policy function.\nfunc (c ReplacementPolicy) Register(function func(cap int) Cache) {\n\tif c <= 0 && c >= max { //nolint:staticcheck\n\t\tpanic(\"libcache: Register of unknown cache replacement policy function\")\n\t}\n\n\tpolicies[c] = function\n}\n\n// Available reports whether the given cache replacement policy is linked into the binary.\nfunc (c ReplacementPolicy) Available() bool {\n\treturn c > 0 && c < max && policies[c] != nil\n}\n\n// New returns a new thread safe cache.\n// New panics if the cache replacement policy function is not linked into the binary.\nfunc (c ReplacementPolicy) New(cap int) Cache {\n\tcache := new(cache)\n\tcache.mu = sync.Mutex{}\n\tcache.unsafe = c.NewUnsafe(cap)\n\treturn cache\n}\n\n// NewUnsafe returns a new non-thread safe cache.\n// NewUnsafe panics if the cache replacement policy function is not linked into the binary.\nfunc (c ReplacementPolicy) NewUnsafe(cap int) Cache {\n\tif !c.Available() {\n\t\tpanic(\"libcache: Requested cache replacement policy function #\" + strconv.Itoa(int(c)) + \" is unavailable\")\n\t}\n\n\treturn policies[c](cap)\n}\n\n// String returns string describes the cache replacement policy function.\nfunc (c ReplacementPolicy) String() string {\n\tswitch c {\n\tcase IDLE:\n\t\treturn \"IDLE\"\n\tcase FIFO:\n\t\treturn \"FIFO\"\n\tcase LIFO:\n\t\treturn \"LIFO\"\n\tcase LRU:\n\t\treturn \"LRU\"\n\tcase LFU:\n\t\treturn \"LFU\"\n\tcase MRU:\n\t\treturn \"MRU\"\n\tcase ARC:\n\t\treturn \"ARC\"\n\tdefault:\n\t\treturn \"unknown cache replacement policy value \" + strconv.Itoa(int(c))\n\t}\n\n}\n"
  }
]