New Upstream Release - golang-github-karlseguin-ccache

Ready changes

Summary

Merged new upstream version: 3.0.3 (was: 2.0.8).

Diff

diff --git a/Gopkg.lock b/Gopkg.lock
deleted file mode 100644
index be31bba..0000000
--- a/Gopkg.lock
+++ /dev/null
@@ -1,29 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  digest = "1:65c6a2822d8653e4f780c259a86d1b444c0b1ce7601b500deb985387bfe6bdec"
-  name = "github.com/karlseguin/expect"
-  packages = [
-    ".",
-    "build",
-    "mock",
-  ]
-  pruneopts = "UT"
-  revision = "4fcda73748276dc72bcc09729bdb56242093c12c"
-  version = "v1.0.1"
-
-[[projects]]
-  branch = "master"
-  digest = "1:d594bb9f2a18ba4da7ab1368f4debf59f6b77cc7046705553f966837c12059f1"
-  name = "github.com/wsxiaoys/terminal"
-  packages = ["color"]
-  pruneopts = "UT"
-  revision = "0940f3fc43a0ed42d04916b1c04578462c650b09"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  input-imports = ["github.com/karlseguin/expect"]
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
deleted file mode 100644
index bc87ba1..0000000
--- a/Gopkg.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-[[constraint]]
-  name = "github.com/karlseguin/expect"
-  version = "1.0.1"
-
-[prune]
-  go-tests = true
-  unused-packages = true
diff --git a/Makefile b/Makefile
index 5b3f26b..8f5ecba 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,18 @@
+.PHONY: t
 t:
-	go test ./...
+	go test -race -count=1 ./...
 
+.PHONY: f
 f:
 	go fmt ./...
+
+
+.PHONY: c
+c:
+	go test -race -covermode=atomic ./... -coverprofile=cover.out && \
+# 	go tool cover -html=cover.out && \
+	go tool cover -func cover.out \
+		| grep -vP '[89]\d\.\d%' | grep -v '100.0%' \
+		|| true
+
+	rm cover.out
diff --git a/assert/assert.go b/assert/assert.go
new file mode 100644
index 0000000..116bf69
--- /dev/null
+++ b/assert/assert.go
@@ -0,0 +1,106 @@
+// A wrapper around *testing.T. I hate the if a != b { t.ErrorF(....) } pattern.
+// Packages should prefer using the tests package (which exposes all of
+// these functions). The only reason to use this package directly is if
+// the tests package depends on your package (and thus you have a cyclical
+// dependency)
+package assert
+
+import (
+	"math"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+)
+
+// a == b
+func Equal[T comparable](t *testing.T, actual T, expected T) {
+	t.Helper()
+	if actual != expected {
+		t.Errorf("expected '%v' to equal '%v'", actual, expected)
+		t.FailNow()
+	}
+}
+
+// Two lists are equal (same length & same values in the same order)
+func List[T comparable](t *testing.T, actuals []T, expecteds []T) {
+	t.Helper()
+	Equal(t, len(actuals), len(expecteds))
+
+	for i, actual := range actuals {
+		Equal(t, actual, expecteds[i])
+	}
+}
+
+// needle not in []haystack
+func DoesNotContain[T comparable](t *testing.T, haystack []T, needle T) {
+	t.Helper()
+	for _, v := range haystack {
+		if v == needle {
+			t.Errorf("expected '%v' to not be in '%v'", needle, haystack)
+			t.FailNow()
+		}
+	}
+}
+
+// A value is nil
+func Nil(t *testing.T, actual interface{}) {
+	t.Helper()
+	if actual != nil && !reflect.ValueOf(actual).IsNil() {
+		t.Errorf("expected %v to be nil", actual)
+		t.FailNow()
+	}
+}
+
+// A value is not nil
+func NotNil(t *testing.T, actual interface{}) {
+	t.Helper()
+	if actual == nil {
+		t.Errorf("expected %v to be not nil", actual)
+		t.FailNow()
+	}
+}
+
+// A value is true
+func True(t *testing.T, actual bool) {
+	t.Helper()
+	if !actual {
+		t.Error("expected true, got false")
+		t.FailNow()
+	}
+}
+
+// A value is false
+func False(t *testing.T, actual bool) {
+	t.Helper()
+	if actual {
+		t.Error("expected false, got true")
+		t.FailNow()
+	}
+}
+
+// The string contains the given value
+func StringContains(t *testing.T, actual string, expected string) {
+	t.Helper()
+	if !strings.Contains(actual, expected) {
+		t.Errorf("expected %s to contain %s", actual, expected)
+		t.FailNow()
+	}
+}
+
+func Error(t *testing.T, actual error, expected error) {
+	t.Helper()
+	if actual != expected {
+		t.Errorf("expected '%s' to be '%s'", actual, expected)
+		t.FailNow()
+	}
+}
+
+func Nowish(t *testing.T, actual time.Time) {
+	t.Helper()
+	diff := math.Abs(time.Now().UTC().Sub(actual).Seconds())
+	if diff > 1 {
+		t.Errorf("expected '%s' to be nowish", actual)
+		t.FailNow()
+	}
+}
diff --git a/bucket.go b/bucket.go
index cef9349..df1dc98 100644
--- a/bucket.go
+++ b/bucket.go
@@ -1,47 +1,105 @@
 package ccache
 
 import (
+	"strings"
 	"sync"
 	"time"
 )
 
-type bucket struct {
+type bucket[T any] struct {
 	sync.RWMutex
-	lookup map[string]*Item
+	lookup map[string]*Item[T]
 }
 
-func (b *bucket) itemCount() int {
+func (b *bucket[T]) itemCount() int {
 	b.RLock()
 	defer b.RUnlock()
 	return len(b.lookup)
 }
 
-func (b *bucket) get(key string) *Item {
+func (b *bucket[T]) forEachFunc(matches func(key string, item *Item[T]) bool) bool {
+	lookup := b.lookup
+	b.RLock()
+	defer b.RUnlock()
+	for key, item := range lookup {
+		if !matches(key, item) {
+			return false
+		}
+	}
+	return true
+}
+
+func (b *bucket[T]) get(key string) *Item[T] {
 	b.RLock()
 	defer b.RUnlock()
 	return b.lookup[key]
 }
 
-func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) {
+func (b *bucket[T]) set(key string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) {
 	expires := time.Now().Add(duration).UnixNano()
-	item := newItem(key, value, expires)
+	item := newItem(key, value, expires, track)
 	b.Lock()
-	defer b.Unlock()
 	existing := b.lookup[key]
 	b.lookup[key] = item
+	b.Unlock()
 	return item, existing
 }
 
-func (b *bucket) delete(key string) *Item {
+func (b *bucket[T]) delete(key string) *Item[T] {
 	b.Lock()
-	defer b.Unlock()
 	item := b.lookup[key]
 	delete(b.lookup, key)
+	b.Unlock()
 	return item
 }
 
-func (b *bucket) clear() {
+// This is an expensive operation, so we do what we can to optimize it and limit
+// the impact it has on concurrent operations. Specifically, we:
+// 1 - Do an initial iteration to collect matches. This allows us to do the
+//     "expensive" prefix check (on all values) using only a read-lock
+// 2 - Do a second iteration, under write lock, for the matched results to do
+//     the actual deletion
+
+// Also, this is the only place where the Bucket is aware of cache detail: the
+// deletables channel. Passing it here lets us avoid iterating over matched items
+// again in the cache. Further, we pass item to deletables BEFORE actually removing
+// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
+// (We do this so that the write to the channel is under the read lock and not the
+// write lock)
+func (b *bucket[T]) deleteFunc(matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int {
+	lookup := b.lookup
+	items := make([]*Item[T], 0)
+
+	b.RLock()
+	for key, item := range lookup {
+		if matches(key, item) {
+			deletables <- item
+			items = append(items, item)
+		}
+	}
+	b.RUnlock()
+
+	if len(items) == 0 {
+		// avoid the write lock if we can
+		return 0
+	}
+
+	b.Lock()
+	for _, item := range items {
+		delete(lookup, item.key)
+	}
+	b.Unlock()
+	return len(items)
+}
+
+func (b *bucket[T]) deletePrefix(prefix string, deletables chan *Item[T]) int {
+	return b.deleteFunc(func(key string, item *Item[T]) bool {
+		return strings.HasPrefix(key, prefix)
+	}, deletables)
+}
+
+func (b *bucket[T]) clear() {
 	b.Lock()
-	defer b.Unlock()
-	b.lookup = make(map[string]*Item)
+	b.lookup = make(map[string]*Item[T])
+	b.Unlock()
 }
diff --git a/bucket_test.go b/bucket_test.go
index e41b8cf..6ec50c2 100644
--- a/bucket_test.go
+++ b/bucket_test.go
@@ -1,69 +1,56 @@
 package ccache
 
 import (
-	. "github.com/karlseguin/expect"
 	"testing"
 	"time"
-)
-
-type BucketTests struct {
-}
 
-func Test_Bucket(t *testing.T) {
-	Expectify(new(BucketTests), t)
-}
+	"github.com/karlseguin/ccache/v3/assert"
+)
 
-func (_ *BucketTests) GetMissFromBucket() {
+func Test_Bucket_GetMissFromBucket(t *testing.T) {
 	bucket := testBucket()
-	Expect(bucket.get("invalid")).To.Equal(nil)
+	assert.Nil(t, bucket.get("invalid"))
 }
 
-func (_ *BucketTests) GetHitFromBucket() {
+func Test_Bucket_GetHitFromBucket(t *testing.T) {
 	bucket := testBucket()
 	item := bucket.get("power")
-	assertValue(item, "9000")
+	assertValue(t, item, "9000")
 }
 
-func (_ *BucketTests) DeleteItemFromBucket() {
+func Test_Bucket_DeleteItemFromBucket(t *testing.T) {
 	bucket := testBucket()
 	bucket.delete("power")
-	Expect(bucket.get("power")).To.Equal(nil)
+	assert.Nil(t, bucket.get("power"))
 }
 
-func (_ *BucketTests) SetsANewBucketItem() {
+func Test_Bucket_SetsANewBucketItem(t *testing.T) {
 	bucket := testBucket()
-	item, existing := bucket.set("spice", TestValue("flow"), time.Minute)
-	assertValue(item, "flow")
+	item, existing := bucket.set("spice", "flow", time.Minute, false)
+	assertValue(t, item, "flow")
 	item = bucket.get("spice")
-	assertValue(item, "flow")
-	Expect(existing).To.Equal(nil)
+	assertValue(t, item, "flow")
+	assert.Equal(t, existing, nil)
 }
 
-func (_ *BucketTests) SetsAnExistingItem() {
+func Test_Bucket_SetsAnExistingItem(t *testing.T) {
 	bucket := testBucket()
-	item, existing := bucket.set("power", TestValue("9001"), time.Minute)
-	assertValue(item, "9001")
+	item, existing := bucket.set("power", "9001", time.Minute, false)
+	assertValue(t, item, "9001")
 	item = bucket.get("power")
-	assertValue(item, "9001")
-	assertValue(existing, "9000")
+	assertValue(t, item, "9001")
+	assertValue(t, existing, "9000")
 }
 
-func testBucket() *bucket {
-	b := &bucket{lookup: make(map[string]*Item)}
-	b.lookup["power"] = &Item{
+func testBucket() *bucket[string] {
+	b := &bucket[string]{lookup: make(map[string]*Item[string])}
+	b.lookup["power"] = &Item[string]{
 		key:   "power",
-		value: TestValue("9000"),
+		value: "9000",
 	}
 	return b
 }
 
-func assertValue(item *Item, expected string) {
-	value := item.value.(TestValue)
-	Expect(value).To.Equal(TestValue(expected))
-}
-
-type TestValue string
-
-func (v TestValue) Expires() time.Time {
-	return time.Now()
+func assertValue(t *testing.T, item *Item[string], expected string) {
+	assert.Equal(t, item.value, expected)
 }
diff --git a/cache.go b/cache.go
index 28feb6e..074ad56 100644
--- a/cache.go
+++ b/cache.go
@@ -2,42 +2,71 @@
 package ccache
 
 import (
-	"container/list"
 	"hash/fnv"
 	"sync/atomic"
 	"time"
 )
 
-type Cache struct {
-	*Configuration
-	list        *list.List
+// The cache has a generic 'control' channel that is used to send
+// messages to the worker. These are the messages that can be sent to it
+type getDropped struct {
+	res chan int
+}
+
+type getSize struct {
+	res chan int64
+}
+
+type setMaxSize struct {
+	size int64
+	done chan struct{}
+}
+
+type clear struct {
+	done chan struct{}
+}
+
+type syncWorker struct {
+	done chan struct{}
+}
+
+type gc struct {
+	done chan struct{}
+}
+
+type Cache[T any] struct {
+	*Configuration[T]
+	control
+	list        *List[*Item[T]]
 	size        int64
-	buckets     []*bucket
+	buckets     []*bucket[T]
 	bucketMask  uint32
-	deletables  chan *Item
-	promotables chan *Item
-	donec       chan struct{}
+	deletables  chan *Item[T]
+	promotables chan *Item[T]
 }
 
 // Create a new cache with the specified configuration
 // See ccache.Configure() for creating a configuration
-func New(config *Configuration) *Cache {
-	c := &Cache{
-		list:          list.New(),
+func New[T any](config *Configuration[T]) *Cache[T] {
+	c := &Cache[T]{
+		list:          NewList[*Item[T]](),
 		Configuration: config,
+		control:       newControl(),
 		bucketMask:    uint32(config.buckets) - 1,
-		buckets:       make([]*bucket, config.buckets),
+		buckets:       make([]*bucket[T], config.buckets),
+		deletables:    make(chan *Item[T], config.deleteBuffer),
+		promotables:   make(chan *Item[T], config.promoteBuffer),
 	}
-	for i := 0; i < int(config.buckets); i++ {
-		c.buckets[i] = &bucket{
-			lookup: make(map[string]*Item),
+	for i := 0; i < config.buckets; i++ {
+		c.buckets[i] = &bucket[T]{
+			lookup: make(map[string]*Item[T]),
 		}
 	}
-	c.restart()
+	go c.worker()
 	return c
 }
 
-func (c *Cache) ItemCount() int {
+func (c *Cache[T]) ItemCount() int {
 	count := 0
 	for _, b := range c.buckets {
 		count += b.itemCount()
@@ -45,41 +74,82 @@ func (c *Cache) ItemCount() int {
 	return count
 }
 
+func (c *Cache[T]) DeletePrefix(prefix string) int {
+	count := 0
+	for _, b := range c.buckets {
+		count += b.deletePrefix(prefix, c.deletables)
+	}
+	return count
+}
+
+// Deletes all items that the matches func evaluates to true.
+func (c *Cache[T]) DeleteFunc(matches func(key string, item *Item[T]) bool) int {
+	count := 0
+	for _, b := range c.buckets {
+		count += b.deleteFunc(matches, c.deletables)
+	}
+	return count
+}
+
+func (c *Cache[T]) ForEachFunc(matches func(key string, item *Item[T]) bool) {
+	for _, b := range c.buckets {
+		if !b.forEachFunc(matches) {
+			break
+		}
+	}
+}
+
 // Get an item from the cache. Returns nil if the item wasn't found.
 // This can return an expired item. Use item.Expired() to see if the item
 // is expired and item.TTL() to see how long until the item expires (which
 // will be negative for an already expired item).
-func (c *Cache) Get(key string) *Item {
+func (c *Cache[T]) Get(key string) *Item[T] {
 	item := c.bucket(key).get(key)
 	if item == nil {
 		return nil
 	}
-	if item.expires > time.Now().UnixNano() {
-		c.promote(item)
+	if !item.Expired() {
+		select {
+		case c.promotables <- item:
+		default:
+		}
 	}
 	return item
 }
 
+// Same as Get but does not promote the value. This essentially circumvents the
+// "least recently used" aspect of this cache. To some degree, it's akin to a
+// "peak"
+func (c *Cache[T]) GetWithoutPromote(key string) *Item[T] {
+	return c.bucket(key).get(key)
+}
+
 // Used when the cache was created with the Track() configuration option.
 // Avoid otherwise
-func (c *Cache) TrackingGet(key string) TrackedItem {
+func (c *Cache[T]) TrackingGet(key string) TrackedItem[T] {
 	item := c.Get(key)
 	if item == nil {
-		return NilTracked
+		return nil
 	}
 	item.track()
 	return item
 }
 
+// Used when the cache was created with the Track() configuration option.
+// Sets the item, and returns a tracked reference to it.
+func (c *Cache[T]) TrackingSet(key string, value T, duration time.Duration) TrackedItem[T] {
+	return c.set(key, value, duration, true)
+}
+
 // Set the value in the cache for the specified duration
-func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
-	c.set(key, value, duration)
+func (c *Cache[T]) Set(key string, value T, duration time.Duration) {
+	c.set(key, value, duration, false)
 }
 
 // Replace the value if it exists, does not set if it doesn't.
 // Returns true if the item existed an was replaced, false otherwise.
 // Replace does not reset item's TTL
-func (c *Cache) Replace(key string, value interface{}) bool {
+func (c *Cache[T]) Replace(key string, value T) bool {
 	item := c.bucket(key).get(key)
 	if item == nil {
 		return false
@@ -91,7 +161,10 @@ func (c *Cache) Replace(key string, value interface{}) bool {
 // Attempts to get the value from the cache and calles fetch on a miss (missing
 // or stale item). If fetch returns an error, no value is cached and the error
 // is returned back to the caller.
-func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
+// Note that Fetch merely calls the public Get and Set functions. If you want
+// a different Fetch behavior, such as thundering herd protection or returning
+// expired items, implement it in your application.
+func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
 	item := c.Get(key)
 	if item != nil && !item.Expired() {
 		return item, nil
@@ -100,11 +173,11 @@ func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interfac
 	if err != nil {
 		return nil, err
 	}
-	return c.set(key, value, duration), nil
+	return c.set(key, value, duration, false), nil
 }
 
 // Remove the item from the cache, return true if the item was present, false otherwise.
-func (c *Cache) Delete(key string) bool {
+func (c *Cache[T]) Delete(key string) bool {
 	item := c.bucket(key).delete(key)
 	if item != nil {
 		c.deletables <- item
@@ -113,67 +186,71 @@ func (c *Cache) Delete(key string) bool {
 	return false
 }
 
-//this isn't thread safe. It's meant to be called from non-concurrent tests
-func (c *Cache) Clear() {
-	for _, bucket := range c.buckets {
-		bucket.clear()
-	}
-	c.size = 0
-	c.list = list.New()
-}
-
-// Stops the background worker. Operations performed on the cache after Stop
-// is called are likely to panic
-func (c *Cache) Stop() {
-	close(c.promotables)
-	<-c.donec
-}
-
-func (c *Cache) restart() {
-	c.deletables = make(chan *Item, c.deleteBuffer)
-	c.promotables = make(chan *Item, c.promoteBuffer)
-	c.donec = make(chan struct{})
-	go c.worker()
-}
-
-func (c *Cache) deleteItem(bucket *bucket, item *Item) {
+func (c *Cache[T]) deleteItem(bucket *bucket[T], item *Item[T]) {
 	bucket.delete(item.key) //stop other GETs from getting it
 	c.deletables <- item
 }
 
-func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item {
-	item, existing := c.bucket(key).set(key, value, duration)
+func (c *Cache[T]) set(key string, value T, duration time.Duration, track bool) *Item[T] {
+	item, existing := c.bucket(key).set(key, value, duration, track)
 	if existing != nil {
 		c.deletables <- existing
 	}
-	c.promote(item)
+	c.promotables <- item
 	return item
 }
 
-func (c *Cache) bucket(key string) *bucket {
+func (c *Cache[T]) bucket(key string) *bucket[T] {
 	h := fnv.New32a()
 	h.Write([]byte(key))
 	return c.buckets[h.Sum32()&c.bucketMask]
 }
 
-func (c *Cache) promote(item *Item) {
-	c.promotables <- item
-}
+func (c *Cache[T]) worker() {
+	dropped := 0
+	cc := c.control
 
-func (c *Cache) worker() {
-	defer close(c.donec)
+	promoteItem := func(item *Item[T]) {
+		if c.doPromote(item) && c.size > c.maxSize {
+			dropped += c.gc()
+		}
+	}
 
 	for {
 		select {
-		case item, ok := <-c.promotables:
-			if ok == false {
-				goto drain
-			}
-			if c.doPromote(item) && c.size > c.maxSize {
-				c.gc()
-			}
+		case item := <-c.promotables:
+			promoteItem(item)
 		case item := <-c.deletables:
 			c.doDelete(item)
+		case control := <-cc:
+			switch msg := control.(type) {
+			case controlStop:
+				goto drain
+			case controlGetDropped:
+				msg.res <- dropped
+				dropped = 0
+			case controlSetMaxSize:
+				c.maxSize = msg.size
+				if c.size > c.maxSize {
+					dropped += c.gc()
+				}
+				msg.done <- struct{}{}
+			case controlClear:
+				for _, bucket := range c.buckets {
+					bucket.clear()
+				}
+				c.size = 0
+				c.list = NewList[*Item[T]]()
+				msg.done <- struct{}{}
+			case controlGetSize:
+				msg.res <- c.size
+			case controlGC:
+				dropped += c.gc()
+				msg.done <- struct{}{}
+			case controlSyncUpdates:
+				doAllPendingPromotesAndDeletes(c.promotables, promoteItem, c.deletables, c.doDelete)
+				msg.done <- struct{}{}
+			}
 		}
 	}
 
@@ -183,59 +260,101 @@ drain:
 		case item := <-c.deletables:
 			c.doDelete(item)
 		default:
-			close(c.deletables)
 			return
 		}
 	}
 }
 
-func (c *Cache) doDelete(item *Item) {
-	if item.element == nil {
+// This method is used to implement SyncUpdates. It simply receives and processes as many
+// items as it can receive from the promotables and deletables channels immediately without
+// blocking. If some other goroutine sends an item on either channel after this method has
+// finished receiving, that's OK, because SyncUpdates only guarantees processing of values
+// that were already sent by the same goroutine.
+func doAllPendingPromotesAndDeletes[T any](
+	promotables <-chan *Item[T],
+	promoteFn func(*Item[T]),
+	deletables <-chan *Item[T],
+	deleteFn func(*Item[T]),
+) {
+doAllPromotes:
+	for {
+		select {
+		case item := <-promotables:
+			promoteFn(item)
+		default:
+			break doAllPromotes
+		}
+	}
+doAllDeletes:
+	for {
+		select {
+		case item := <-deletables:
+			deleteFn(item)
+		default:
+			break doAllDeletes
+		}
+	}
+}
+
+func (c *Cache[T]) doDelete(item *Item[T]) {
+	if item.node == nil {
 		item.promotions = -2
 	} else {
 		c.size -= item.size
 		if c.onDelete != nil {
 			c.onDelete(item)
 		}
-		c.list.Remove(item.element)
+		c.list.Remove(item.node)
+		item.node = nil
+		item.promotions = -2
 	}
 }
 
-func (c *Cache) doPromote(item *Item) bool {
+func (c *Cache[T]) doPromote(item *Item[T]) bool {
 	//already deleted
 	if item.promotions == -2 {
 		return false
 	}
-	if item.element != nil { //not a new item
+	if item.node != nil { //not a new item
 		if item.shouldPromote(c.getsPerPromote) {
-			c.list.MoveToFront(item.element)
+			c.list.MoveToFront(item.node)
 			item.promotions = 0
 		}
 		return false
 	}
 
 	c.size += item.size
-	item.element = c.list.PushFront(item)
+	item.node = c.list.Insert(item)
 	return true
 }
 
-func (c *Cache) gc() {
-	element := c.list.Back()
-	for i := 0; i < c.itemsToPrune; i++ {
-		if element == nil {
-			return
+func (c *Cache[T]) gc() int {
+	dropped := 0
+	node := c.list.Tail
+
+	itemsToPrune := int64(c.itemsToPrune)
+	if min := c.size - c.maxSize; min > itemsToPrune {
+		itemsToPrune = min
+	}
+
+	for i := int64(0); i < itemsToPrune; i++ {
+		if node == nil {
+			return dropped
 		}
-		prev := element.Prev()
-		item := element.Value.(*Item)
+		prev := node.Prev
+		item := node.Value
 		if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
 			c.bucket(item.key).delete(item.key)
 			c.size -= item.size
-			c.list.Remove(element)
+			c.list.Remove(node)
 			if c.onDelete != nil {
 				c.onDelete(item)
 			}
+			dropped += 1
+			item.node = nil
 			item.promotions = -2
 		}
-		element = prev
+		node = prev
 	}
+	return dropped
 }
diff --git a/cache_test.go b/cache_test.go
index 67bb52e..050a1d8 100644
--- a/cache_test.go
+++ b/cache_test.go
@@ -1,186 +1,364 @@
 package ccache
 
 import (
+	"math/rand"
+	"sort"
 	"strconv"
+	"sync/atomic"
 	"testing"
 	"time"
 
-	. "github.com/karlseguin/expect"
+	"github.com/karlseguin/ccache/v3/assert"
 )
 
-type CacheTests struct{}
-
-func Test_Cache(t *testing.T) {
-	Expectify(new(CacheTests), t)
-}
-
-func (_ CacheTests) DeletesAValue() {
-	cache := New(Configure())
-	Expect(cache.ItemCount()).To.Equal(0)
+func Test_CacheDeletesAValue(t *testing.T) {
+	cache := New(Configure[string]())
+	defer cache.Stop()
+	assert.Equal(t, cache.ItemCount(), 0)
 
 	cache.Set("spice", "flow", time.Minute)
 	cache.Set("worm", "sand", time.Minute)
-	Expect(cache.ItemCount()).To.Equal(2)
+	assert.Equal(t, cache.ItemCount(), 2)
 
 	cache.Delete("spice")
-	Expect(cache.Get("spice")).To.Equal(nil)
-	Expect(cache.Get("worm").Value()).To.Equal("sand")
-	Expect(cache.ItemCount()).To.Equal(1)
+	assert.Equal(t, cache.Get("spice"), nil)
+	assert.Equal(t, cache.Get("worm").Value(), "sand")
+	assert.Equal(t, cache.ItemCount(), 1)
 }
 
-func (_ CacheTests) OnDeleteCallbackCalled() {
-	onDeleteFnCalled := false
-	onDeleteFn := func(item *Item) {
+func Test_CacheDeletesAPrefix(t *testing.T) {
+	cache := New(Configure[string]())
+	defer cache.Stop()
+	assert.Equal(t, cache.ItemCount(), 0)
+
+	cache.Set("aaa", "1", time.Minute)
+	cache.Set("aab", "2", time.Minute)
+	cache.Set("aac", "3", time.Minute)
+	cache.Set("ac", "4", time.Minute)
+	cache.Set("z5", "7", time.Minute)
+	assert.Equal(t, cache.ItemCount(), 5)
+
+	assert.Equal(t, cache.DeletePrefix("9a"), 0)
+	assert.Equal(t, cache.ItemCount(), 5)
+
+	assert.Equal(t, cache.DeletePrefix("aa"), 3)
+	assert.Equal(t, cache.Get("aaa"), nil)
+	assert.Equal(t, cache.Get("aab"), nil)
+	assert.Equal(t, cache.Get("aac"), nil)
+	assert.Equal(t, cache.Get("ac").Value(), "4")
+	assert.Equal(t, cache.Get("z5").Value(), "7")
+	assert.Equal(t, cache.ItemCount(), 2)
+}
+
+func Test_CacheDeletesAFunc(t *testing.T) {
+	cache := New(Configure[int]())
+	defer cache.Stop()
+	assert.Equal(t, cache.ItemCount(), 0)
+
+	cache.Set("a", 1, time.Minute)
+	cache.Set("b", 2, time.Minute)
+	cache.Set("c", 3, time.Minute)
+	cache.Set("d", 4, time.Minute)
+	cache.Set("e", 5, time.Minute)
+	cache.Set("f", 6, time.Minute)
+	assert.Equal(t, cache.ItemCount(), 6)
+
+	assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool {
+		return false
+	}), 0)
+	assert.Equal(t, cache.ItemCount(), 6)
+
+	assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool {
+		return item.Value() < 4
+	}), 3)
+	assert.Equal(t, cache.ItemCount(), 3)
+
+	assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool {
+		return key == "d"
+	}), 1)
+	assert.Equal(t, cache.ItemCount(), 2)
+
+}
+
+func Test_CacheOnDeleteCallbackCalled(t *testing.T) {
+	onDeleteFnCalled := int32(0)
+	onDeleteFn := func(item *Item[string]) {
 		if item.key == "spice" {
-			onDeleteFnCalled = true
+			atomic.AddInt32(&onDeleteFnCalled, 1)
 		}
 	}
 
-	cache := New(Configure().OnDelete(onDeleteFn))
+	cache := New(Configure[string]().OnDelete(onDeleteFn))
 	cache.Set("spice", "flow", time.Minute)
 	cache.Set("worm", "sand", time.Minute)
 
-	time.Sleep(time.Millisecond * 10) // Run once to init
+	cache.SyncUpdates() // wait for worker to pick up preceding updates
+
 	cache.Delete("spice")
-	time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items
+	cache.SyncUpdates()
 
-	Expect(cache.Get("spice")).To.Equal(nil)
-	Expect(cache.Get("worm").Value()).To.Equal("sand")
-	Expect(onDeleteFnCalled).To.Equal(true)
+	assert.Equal(t, cache.Get("spice"), nil)
+	assert.Equal(t, cache.Get("worm").Value(), "sand")
+	assert.Equal(t, atomic.LoadInt32(&onDeleteFnCalled), 1)
 }
 
-func (_ CacheTests) FetchesExpiredItems() {
-	cache := New(Configure())
-	fn := func() (interface{}, error) { return "moo-moo", nil }
+func Test_CacheFetchesExpiredItems(t *testing.T) {
+	cache := New(Configure[string]())
+	fn := func() (string, error) { return "moo-moo", nil }
 
 	cache.Set("beef", "moo", time.Second*-1)
-	Expect(cache.Get("beef").Value()).To.Equal("moo")
+	assert.Equal(t, cache.Get("beef").Value(), "moo")
 
 	out, _ := cache.Fetch("beef", time.Second, fn)
-	Expect(out.Value()).To.Equal("moo-moo")
+	assert.Equal(t, out.Value(), "moo-moo")
 }
 
-func (_ CacheTests) GCsTheOldestItems() {
-	cache := New(Configure().ItemsToPrune(10))
+func Test_CacheGCsTheOldestItems(t *testing.T) {
+	cache := New(Configure[int]().ItemsToPrune(10))
 	for i := 0; i < 500; i++ {
 		cache.Set(strconv.Itoa(i), i, time.Minute)
 	}
-	//let the items get promoted (and added to our list)
-	time.Sleep(time.Millisecond * 10)
-	gcCache(cache)
-	Expect(cache.Get("9")).To.Equal(nil)
-	Expect(cache.Get("10").Value()).To.Equal(10)
-	Expect(cache.ItemCount()).To.Equal(490)
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("9"), nil)
+	assert.Equal(t, cache.Get("10").Value(), 10)
+	assert.Equal(t, cache.ItemCount(), 490)
 }
 
-func (_ CacheTests) PromotedItemsDontGetPruned() {
-	cache := New(Configure().ItemsToPrune(10).GetsPerPromote(1))
+func Test_CachePromotedItemsDontGetPruned(t *testing.T) {
+	cache := New(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
 	for i := 0; i < 500; i++ {
 		cache.Set(strconv.Itoa(i), i, time.Minute)
 	}
-	time.Sleep(time.Millisecond * 10) //run the worker once to init the list
+	cache.SyncUpdates()
 	cache.Get("9")
-	time.Sleep(time.Millisecond * 10)
-	gcCache(cache)
-	Expect(cache.Get("9").Value()).To.Equal(9)
-	Expect(cache.Get("10")).To.Equal(nil)
-	Expect(cache.Get("11").Value()).To.Equal(11)
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("9").Value(), 9)
+	assert.Equal(t, cache.Get("10"), nil)
+	assert.Equal(t, cache.Get("11").Value(), 11)
+}
+
+func Test_GetWithoutPromoteDoesNotPromote(t *testing.T) {
+	cache := New(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
+	for i := 0; i < 500; i++ {
+		cache.Set(strconv.Itoa(i), i, time.Minute)
+	}
+	cache.SyncUpdates()
+	cache.GetWithoutPromote("9")
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("9"), nil)
+	assert.Equal(t, cache.Get("10").Value(), 10)
+	assert.Equal(t, cache.Get("11").Value(), 11)
 }
 
-func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
-	cache := New(Configure().ItemsToPrune(10).Track())
-	for i := 0; i < 10; i++ {
+func Test_CacheTrackerDoesNotCleanupHeldInstance(t *testing.T) {
+	cache := New(Configure[int]().ItemsToPrune(11).Track())
+	item0 := cache.TrackingSet("0", 0, time.Minute)
+	for i := 1; i < 11; i++ {
 		cache.Set(strconv.Itoa(i), i, time.Minute)
 	}
-	item := cache.TrackingGet("0")
-	time.Sleep(time.Millisecond * 10)
-	gcCache(cache)
-	Expect(cache.Get("0").Value()).To.Equal(0)
-	Expect(cache.Get("1")).To.Equal(nil)
-	item.Release()
-	gcCache(cache)
-	Expect(cache.Get("0")).To.Equal(nil)
+	item1 := cache.TrackingGet("1")
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("0").Value(), 0)
+	assert.Equal(t, cache.Get("1").Value(), 1)
+	item0.Release()
+	item1.Release()
+	cache.GC()
+	assert.Equal(t, cache.Get("0"), nil)
+	assert.Equal(t, cache.Get("1"), nil)
 }
 
-func (_ CacheTests) RemovesOldestItemWhenFull() {
+func Test_CacheRemovesOldestItemWhenFull(t *testing.T) {
 	onDeleteFnCalled := false
-	onDeleteFn := func(item *Item) {
+	onDeleteFn := func(item *Item[int]) {
 		if item.key == "0" {
 			onDeleteFnCalled = true
 		}
 	}
 
-	cache := New(Configure().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
+	cache := New(Configure[int]().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
 	for i := 0; i < 7; i++ {
 		cache.Set(strconv.Itoa(i), i, time.Minute)
 	}
-	time.Sleep(time.Millisecond * 10)
-	Expect(cache.Get("0")).To.Equal(nil)
-	Expect(cache.Get("1")).To.Equal(nil)
-	Expect(cache.Get("2").Value()).To.Equal(2)
-	Expect(onDeleteFnCalled).To.Equal(true)
-	Expect(cache.ItemCount()).To.Equal(5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.Get("0"), nil)
+	assert.Equal(t, cache.Get("1"), nil)
+	assert.Equal(t, cache.Get("2").Value(), 2)
+	assert.Equal(t, onDeleteFnCalled, true)
+	assert.Equal(t, cache.ItemCount(), 5)
 }
 
-func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
-	cache := New(Configure().MaxSize(9).ItemsToPrune(2))
+func Test_CacheRemovesOldestItemWhenFullBySizer(t *testing.T) {
+	cache := New(Configure[*SizedItem]().MaxSize(9).ItemsToPrune(2))
 	for i := 0; i < 7; i++ {
 		cache.Set(strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
 	}
-	time.Sleep(time.Millisecond * 10)
-	Expect(cache.Get("0")).To.Equal(nil)
-	Expect(cache.Get("1")).To.Equal(nil)
-	Expect(cache.Get("2")).To.Equal(nil)
-	Expect(cache.Get("3")).To.Equal(nil)
-	Expect(cache.Get("4").Value().(*SizedItem).id).To.Equal(4)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.Get("0"), nil)
+	assert.Equal(t, cache.Get("1"), nil)
+	assert.Equal(t, cache.Get("2"), nil)
+	assert.Equal(t, cache.Get("3"), nil)
+	assert.Equal(t, cache.Get("4").Value().id, 4)
+	assert.Equal(t, cache.GetDropped(), 4)
+	assert.Equal(t, cache.GetDropped(), 0)
 }
 
-func (_ CacheTests) SetUpdatesSizeOnDelta() {
-	cache := New(Configure())
+func Test_CacheSetUpdatesSizeOnDelta(t *testing.T) {
+	cache := New(Configure[*SizedItem]())
 	cache.Set("a", &SizedItem{0, 2}, time.Minute)
 	cache.Set("b", &SizedItem{0, 3}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
 	cache.Set("b", &SizedItem{0, 3}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
 	cache.Set("b", &SizedItem{0, 4}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 6)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 6)
 	cache.Set("b", &SizedItem{0, 2}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 4)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 4)
 	cache.Delete("b")
-	time.Sleep(time.Millisecond * 100)
-	checkSize(cache, 2)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 2)
 }
 
-func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
-	cache := New(Configure())
+func Test_CacheReplaceDoesNotchangeSizeIfNotSet(t *testing.T) {
+	cache := New(Configure[*SizedItem]())
 	cache.Set("1", &SizedItem{1, 2}, time.Minute)
 	cache.Set("2", &SizedItem{1, 2}, time.Minute)
 	cache.Set("3", &SizedItem{1, 2}, time.Minute)
 	cache.Replace("4", &SizedItem{1, 2})
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 6)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 6)
 }
 
-func (_ CacheTests) ReplaceChangesSize() {
-	cache := New(Configure())
+func Test_CacheReplaceChangesSize(t *testing.T) {
+	cache := New(Configure[*SizedItem]())
 	cache.Set("1", &SizedItem{1, 2}, time.Minute)
 	cache.Set("2", &SizedItem{1, 2}, time.Minute)
 
 	cache.Replace("2", &SizedItem{1, 2})
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 4)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 4)
 
 	cache.Replace("2", &SizedItem{1, 1})
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 3)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 3)
 
 	cache.Replace("2", &SizedItem{1, 3})
-	time.Sleep(time.Millisecond * 5)
-	checkSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
+}
+
+func Test_CacheResizeOnTheFly(t *testing.T) {
+	cache := New(Configure[int]().MaxSize(9).ItemsToPrune(1))
+	for i := 0; i < 5; i++ {
+		cache.Set(strconv.Itoa(i), i, time.Minute)
+	}
+	cache.SetMaxSize(3)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetDropped(), 2)
+	assert.Equal(t, cache.Get("0"), nil)
+	assert.Equal(t, cache.Get("1"), nil)
+	assert.Equal(t, cache.Get("2").Value(), 2)
+	assert.Equal(t, cache.Get("3").Value(), 3)
+	assert.Equal(t, cache.Get("4").Value(), 4)
+
+	cache.Set("5", 5, time.Minute)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetDropped(), 1)
+	assert.Equal(t, cache.Get("2"), nil)
+	assert.Equal(t, cache.Get("3").Value(), 3)
+	assert.Equal(t, cache.Get("4").Value(), 4)
+	assert.Equal(t, cache.Get("5").Value(), 5)
+
+	cache.SetMaxSize(10)
+	cache.Set("6", 6, time.Minute)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetDropped(), 0)
+	assert.Equal(t, cache.Get("3").Value(), 3)
+	assert.Equal(t, cache.Get("4").Value(), 4)
+	assert.Equal(t, cache.Get("5").Value(), 5)
+	assert.Equal(t, cache.Get("6").Value(), 6)
+}
+
+func Test_CacheForEachFunc(t *testing.T) {
+	cache := New(Configure[int]().MaxSize(3).ItemsToPrune(1))
+	assert.List(t, forEachKeys[int](cache), []string{})
+
+	cache.Set("1", 1, time.Minute)
+	assert.List(t, forEachKeys(cache), []string{"1"})
+
+	cache.Set("2", 2, time.Minute)
+	cache.SyncUpdates()
+	assert.List(t, forEachKeys(cache), []string{"1", "2"})
+
+	cache.Set("3", 3, time.Minute)
+	cache.SyncUpdates()
+	assert.List(t, forEachKeys(cache), []string{"1", "2", "3"})
+
+	cache.Set("4", 4, time.Minute)
+	cache.SyncUpdates()
+	assert.List(t, forEachKeys(cache), []string{"2", "3", "4"})
+
+	cache.Set("stop", 5, time.Minute)
+	cache.SyncUpdates()
+	assert.DoesNotContain(t, forEachKeys(cache), "stop")
+
+	cache.Set("6", 6, time.Minute)
+	cache.SyncUpdates()
+	assert.DoesNotContain(t, forEachKeys(cache), "stop")
+}
+
+func Test_CachePrune(t *testing.T) {
+	maxSize := int64(500)
+	cache := New(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
+	epoch := 0
+	for i := 0; i < 10000; i++ {
+		epoch += 1
+		expired := make([]string, 0)
+		for i := 0; i < 50; i += 1 {
+			key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
+			item := cache.Get(key)
+			if item == nil || item.TTL() > 1*time.Minute {
+				expired = append(expired, key)
+			}
+		}
+		for _, key := range expired {
+			cache.Set(key, key, 5*time.Minute)
+		}
+		if epoch%500 == 0 {
+			assert.True(t, cache.GetSize() <= 500)
+		}
+	}
+}
+
+func Test_ConcurrentStop(t *testing.T) {
+	for i := 0; i < 100; i++ {
+		cache := New(Configure[string]())
+		r := func() {
+			for {
+				key := strconv.Itoa(int(rand.Int31n(100)))
+				switch rand.Int31n(3) {
+				case 0:
+					cache.Get(key)
+				case 1:
+					cache.Set(key, key, time.Minute)
+				case 2:
+					cache.Delete(key)
+				}
+			}
+		}
+		go r()
+		go r()
+		go r()
+		time.Sleep(time.Millisecond * 10)
+		cache.Stop()
+	}
 }
 
 type SizedItem struct {
@@ -192,14 +370,15 @@ func (s *SizedItem) Size() int64 {
 	return s.s
 }
 
-func checkSize(cache *Cache, sz int64) {
-	cache.Stop()
-	Expect(cache.size).To.Equal(sz)
-	cache.restart()
-}
-
-func gcCache(cache *Cache) {
-	cache.Stop()
-	cache.gc()
-	cache.restart()
+func forEachKeys[T any](cache *Cache[T]) []string {
+	keys := make([]string, 0, 10)
+	cache.ForEachFunc(func(key string, i *Item[T]) bool {
+		if key == "stop" {
+			return false
+		}
+		keys = append(keys, key)
+		return true
+	})
+	sort.Strings(keys)
+	return keys
 }
diff --git a/configuration.go b/configuration.go
index d618215..3267680 100644
--- a/configuration.go
+++ b/configuration.go
@@ -1,6 +1,6 @@
 package ccache
 
-type Configuration struct {
+type Configuration[T any] struct {
 	maxSize        int64
 	buckets        int
 	itemsToPrune   int
@@ -8,14 +8,14 @@ type Configuration struct {
 	promoteBuffer  int
 	getsPerPromote int32
 	tracking       bool
-	onDelete       func(item *Item)
+	onDelete       func(item *Item[T])
 }
 
 // Creates a configuration object with sensible defaults
 // Use this as the start of the fluent configuration:
 // e.g.: ccache.New(ccache.Configure().MaxSize(10000))
-func Configure() *Configuration {
-	return &Configuration{
+func Configure[T any]() *Configuration[T] {
+	return &Configuration[T]{
 		buckets:        16,
 		itemsToPrune:   500,
 		deleteBuffer:   1024,
@@ -28,7 +28,7 @@ func Configure() *Configuration {
 
 // The max size for the cache
 // [5000]
-func (c *Configuration) MaxSize(max int64) *Configuration {
+func (c *Configuration[T]) MaxSize(max int64) *Configuration[T] {
 	c.maxSize = max
 	return c
 }
@@ -36,7 +36,7 @@ func (c *Configuration) MaxSize(max int64) *Configuration {
 // Keys are hashed into % bucket count to provide greater concurrency (every set
 // requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...)
 // [16]
-func (c *Configuration) Buckets(count uint32) *Configuration {
+func (c *Configuration[T]) Buckets(count uint32) *Configuration[T] {
 	if count == 0 || ((count&(^count+1)) == count) == false {
 		count = 16
 	}
@@ -46,7 +46,7 @@ func (c *Configuration) Buckets(count uint32) *Configuration {
 
 // The number of items to prune when memory is low
 // [500]
-func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
+func (c *Configuration[T]) ItemsToPrune(count uint32) *Configuration[T] {
 	c.itemsToPrune = int(count)
 	return c
 }
@@ -54,14 +54,14 @@ func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
 // The size of the queue for items which should be promoted. If the queue fills
 // up, promotions are skipped
 // [1024]
-func (c *Configuration) PromoteBuffer(size uint32) *Configuration {
+func (c *Configuration[T]) PromoteBuffer(size uint32) *Configuration[T] {
 	c.promoteBuffer = int(size)
 	return c
 }
 
 // The size of the queue for items which should be deleted. If the queue fills
 // up, calls to Delete() will block
-func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
+func (c *Configuration[T]) DeleteBuffer(size uint32) *Configuration[T] {
 	c.deleteBuffer = int(size)
 	return c
 }
@@ -70,7 +70,7 @@ func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
 // to promote an item on every Get. GetsPerPromote specifies the number of Gets
 // a key must have before being promoted
 // [3]
-func (c *Configuration) GetsPerPromote(count int32) *Configuration {
+func (c *Configuration[T]) GetsPerPromote(count int32) *Configuration[T] {
 	c.getsPerPromote = count
 	return c
 }
@@ -89,7 +89,7 @@ func (c *Configuration) GetsPerPromote(count int32) *Configuration {
 // By turning tracking on and using the cache's TrackingGet, the cache
 // won't evict items which you haven't called Release() on. It's a simple reference
 // counter.
-func (c *Configuration) Track() *Configuration {
+func (c *Configuration[T]) Track() *Configuration[T] {
 	c.tracking = true
 	return c
 }
@@ -97,7 +97,7 @@ func (c *Configuration) Track() *Configuration {
 // OnDelete allows setting a callback function to react to ideam deletion.
 // This typically allows to do a cleanup of resources, such as calling a Close() on
 // cached object that require some kind of tear-down.
-func (c *Configuration) OnDelete(callback func(item *Item)) *Configuration {
+func (c *Configuration[T]) OnDelete(callback func(item *Item[T])) *Configuration[T] {
 	c.onDelete = callback
 	return c
 }
diff --git a/configuration_test.go b/configuration_test.go
index d589c61..20a4ec9 100644
--- a/configuration_test.go
+++ b/configuration_test.go
@@ -1,23 +1,18 @@
 package ccache
 
 import (
-	. "github.com/karlseguin/expect"
 	"testing"
-)
-
-type ConfigurationTests struct{}
 
-func Test_Configuration(t *testing.T) {
-	Expectify(new(ConfigurationTests), t)
-}
+	"github.com/karlseguin/ccache/v3/assert"
+)
 
-func (_ *ConfigurationTests) BucketsPowerOf2() {
+func Test_Configuration_BucketsPowerOf2(t *testing.T) {
 	for i := uint32(0); i < 31; i++ {
-		c := Configure().Buckets(i)
+		c := Configure[int]().Buckets(i)
 		if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 {
-			Expect(c.buckets).ToEqual(int(i))
+			assert.Equal(t, c.buckets, int(i))
 		} else {
-			Expect(c.buckets).ToEqual(16)
+			assert.Equal(t, c.buckets, 16)
 		}
 	}
 }
diff --git a/control.go b/control.go
new file mode 100644
index 0000000..b618e31
--- /dev/null
+++ b/control.go
@@ -0,0 +1,110 @@
+package ccache
+
+type controlGC struct {
+	done chan struct{}
+}
+
+type controlClear struct {
+	done chan struct{}
+}
+
+type controlStop struct {
+}
+
+type controlGetSize struct {
+	res chan int64
+}
+
+type controlGetDropped struct {
+	res chan int
+}
+
+type controlSetMaxSize struct {
+	size int64
+	done chan struct{}
+}
+
+type controlSyncUpdates struct {
+	done chan struct{}
+}
+
+type control chan interface{}
+
+func newControl() chan interface{} {
+	return make(chan interface{}, 5)
+}
+
+// Forces GC. There should be no reason to call this function, except from tests
+// which require synchronous GC.
+// This is a control command.
+func (c control) GC() {
+	done := make(chan struct{})
+	c <- controlGC{done: done}
+	<-done
+}
+
+// Sends a stop signal to the worker thread. The worker thread will shut down
+// 5 seconds after the last message is received. The cache should not be used
+// after Stop is called, but concurrently executing requests should properly finish
+// executing.
+// This is a control command.
+func (c control) Stop() {
+	c.SyncUpdates()
+	c <- controlStop{}
+}
+
+// Clears the cache
+// This is a control command.
+func (c control) Clear() {
+	done := make(chan struct{})
+	c <- controlClear{done: done}
+	<-done
+}
+
+// Gets the size of the cache. This is an O(1) call to make, but it is handled
+// by the worker goroutine. It's meant to be called periodically for metrics, or
+// from tests.
+// This is a control command.
+func (c control) GetSize() int64 {
+	res := make(chan int64)
+	c <- controlGetSize{res: res}
+	return <-res
+}
+
+// Gets the number of items removed from the cache due to memory pressure since
+// the last time GetDropped was called
+// This is a control command.
+func (c control) GetDropped() int {
+	res := make(chan int)
+	c <- controlGetDropped{res: res}
+	return <-res
+}
+
+// Sets a new max size. That can result in a GC being run if the new maxium size
+// is smaller than the cached size
+// This is a control command.
+func (c control) SetMaxSize(size int64) {
+	done := make(chan struct{})
+	c <- controlSetMaxSize{size: size, done: done}
+	<-done
+}
+
+// SyncUpdates waits until the cache has finished asynchronous state updates for any operations
+// that were done by the current goroutine up to now.
+//
+// For efficiency, the cache's implementation of LRU behavior is partly managed by a worker
+// goroutine that updates its internal data structures asynchronously. This means that the
+// cache's state in terms of (for instance) eviction of LRU items is only eventually consistent;
+// there is no guarantee that it happens before a Get or Set call has returned. Most of the time
+// application code will not care about this, but especially in a test scenario you may want to
+// be able to know when the worker has caught up.
+//
+// This applies only to cache methods that were previously called by the same goroutine that is
+// now calling SyncUpdates. If other goroutines are using the cache at the same time, there is
+// no way to know whether any of them still have pending state updates when SyncUpdates returns.
+// This is a control command.
+func (c control) SyncUpdates() {
+	done := make(chan struct{})
+	c <- controlSyncUpdates{done: done}
+	<-done
+}
diff --git a/debian/changelog b/debian/changelog
index ba7eac9..61ed4b6 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+golang-github-karlseguin-ccache (3.0.3-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Tue, 14 Mar 2023 10:58:51 -0000
+
 golang-github-karlseguin-ccache (2.0.3-1) unstable; urgency=medium
 
   * Team Upload.
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..2da6037
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module github.com/karlseguin/ccache/v3
+
+go 1.18
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..e69de29
diff --git a/item.go b/item.go
index bb7c04f..22ac731 100644
--- a/item.go
+++ b/item.go
@@ -1,7 +1,7 @@
 package ccache
 
 import (
-	"container/list"
+	"fmt"
 	"sync/atomic"
 	"time"
 )
@@ -10,8 +10,8 @@ type Sized interface {
 	Size() int64
 }
 
-type TrackedItem interface {
-	Value() interface{}
+type TrackedItem[T any] interface {
+	Value() T
 	Release()
 	Expired() bool
 	TTL() time.Duration
@@ -19,85 +19,79 @@ type TrackedItem interface {
 	Extend(duration time.Duration)
 }
 
-type nilItem struct{}
-
-func (n *nilItem) Value() interface{} { return nil }
-func (n *nilItem) Release()           {}
-
-func (i *nilItem) Expired() bool {
-	return true
-}
-
-func (i *nilItem) TTL() time.Duration {
-	return time.Minute
-}
-
-func (i *nilItem) Expires() time.Time {
-	return time.Time{}
-}
-
-func (i *nilItem) Extend(duration time.Duration) {
-}
-
-var NilTracked = new(nilItem)
-
-type Item struct {
+type Item[T any] struct {
 	key        string
 	group      string
 	promotions int32
 	refCount   int32
 	expires    int64
 	size       int64
-	value      interface{}
-	element    *list.Element
+	value      T
+	node       *Node[*Item[T]]
 }
 
-func newItem(key string, value interface{}, expires int64) *Item {
+func newItem[T any](key string, value T, expires int64, track bool) *Item[T] {
 	size := int64(1)
-	if sized, ok := value.(Sized); ok {
+
+	// https://github.com/golang/go/issues/49206
+	if sized, ok := (interface{})(value).(Sized); ok {
 		size = sized.Size()
 	}
-	return &Item{
+	item := &Item[T]{
 		key:        key,
 		value:      value,
 		promotions: 0,
 		size:       size,
 		expires:    expires,
 	}
+	if track {
+		item.refCount = 1
+	}
+	return item
 }
 
-func (i *Item) shouldPromote(getsPerPromote int32) bool {
+func (i *Item[T]) shouldPromote(getsPerPromote int32) bool {
 	i.promotions += 1
 	return i.promotions == getsPerPromote
 }
 
-func (i *Item) Value() interface{} {
+func (i *Item[T]) Value() T {
 	return i.value
 }
 
-func (i *Item) track() {
+func (i *Item[T]) track() {
 	atomic.AddInt32(&i.refCount, 1)
 }
 
-func (i *Item) Release() {
+func (i *Item[T]) Release() {
 	atomic.AddInt32(&i.refCount, -1)
 }
 
-func (i *Item) Expired() bool {
+func (i *Item[T]) Expired() bool {
 	expires := atomic.LoadInt64(&i.expires)
 	return expires < time.Now().UnixNano()
 }
 
-func (i *Item) TTL() time.Duration {
+func (i *Item[T]) TTL() time.Duration {
 	expires := atomic.LoadInt64(&i.expires)
 	return time.Nanosecond * time.Duration(expires-time.Now().UnixNano())
 }
 
-func (i *Item) Expires() time.Time {
+func (i *Item[T]) Expires() time.Time {
 	expires := atomic.LoadInt64(&i.expires)
 	return time.Unix(0, expires)
 }
 
-func (i *Item) Extend(duration time.Duration) {
+func (i *Item[T]) Extend(duration time.Duration) {
 	atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano())
 }
+
+// String returns a string representation of the Item. This includes the default string
+// representation of its Value(), as implemented by fmt.Sprintf with "%v", but the exact
+// format of the string should not be relied on; it is provided only for debugging
+// purposes, and because otherwise including an Item in a call to fmt.Printf or
+// fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe
+// way.
+func (i *Item[T]) String() string {
+	return fmt.Sprintf("Item(%v)", i.value)
+}
diff --git a/item_test.go b/item_test.go
index 9311c9b..c169866 100644
--- a/item_test.go
+++ b/item_test.go
@@ -5,45 +5,39 @@ import (
 	"testing"
 	"time"
 
-	. "github.com/karlseguin/expect"
+	"github.com/karlseguin/ccache/v3/assert"
 )
 
-type ItemTests struct{}
-
-func Test_Item(t *testing.T) {
-	Expectify(new(ItemTests), t)
-}
-
-func (_ *ItemTests) Promotability() {
-	item := &Item{promotions: 4}
-	Expect(item.shouldPromote(5)).To.Equal(true)
-	Expect(item.shouldPromote(5)).To.Equal(false)
+func Test_Item_Promotability(t *testing.T) {
+	item := &Item[int]{promotions: 4}
+	assert.Equal(t, item.shouldPromote(5), true)
+	assert.Equal(t, item.shouldPromote(5), false)
 }
 
-func (_ *ItemTests) Expired() {
+func Test_Item_Expired(t *testing.T) {
 	now := time.Now().UnixNano()
-	item1 := &Item{expires: now + (10 * int64(time.Millisecond))}
-	item2 := &Item{expires: now - (10 * int64(time.Millisecond))}
-	Expect(item1.Expired()).To.Equal(false)
-	Expect(item2.Expired()).To.Equal(true)
+	item1 := &Item[int]{expires: now + (10 * int64(time.Millisecond))}
+	item2 := &Item[int]{expires: now - (10 * int64(time.Millisecond))}
+	assert.Equal(t, item1.Expired(), false)
+	assert.Equal(t, item2.Expired(), true)
 }
 
-func (_ *ItemTests) TTL() {
+func Test_Item_TTL(t *testing.T) {
 	now := time.Now().UnixNano()
-	item1 := &Item{expires: now + int64(time.Second)}
-	item2 := &Item{expires: now - int64(time.Second)}
-	Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1)
-	Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1)
+	item1 := &Item[int]{expires: now + int64(time.Second)}
+	item2 := &Item[int]{expires: now - int64(time.Second)}
+	assert.Equal(t, int(math.Ceil(item1.TTL().Seconds())), 1)
+	assert.Equal(t, int(math.Ceil(item2.TTL().Seconds())), -1)
 }
 
-func (_ *ItemTests) Expires() {
+func Test_Item_Expires(t *testing.T) {
 	now := time.Now().UnixNano()
-	item := &Item{expires: now + (10)}
-	Expect(item.Expires().UnixNano()).To.Equal(now + 10)
+	item := &Item[int]{expires: now + (10)}
+	assert.Equal(t, item.Expires().UnixNano(), now+10)
 }
 
-func (_ *ItemTests) Extend() {
-	item := &Item{expires: time.Now().UnixNano() + 10}
+func Test_Item_Extend(t *testing.T) {
+	item := &Item[int]{expires: time.Now().UnixNano() + 10}
 	item.Extend(time.Minute * 2)
-	Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120)
+	assert.Equal(t, item.Expires().Unix(), time.Now().Unix()+120)
 }
diff --git a/layeredbucket.go b/layeredbucket.go
index 57676ab..0d8aa8e 100644
--- a/layeredbucket.go
+++ b/layeredbucket.go
@@ -5,12 +5,12 @@ import (
 	"time"
 )
 
-type layeredBucket struct {
+type layeredBucket[T any] struct {
 	sync.RWMutex
-	buckets map[string]*bucket
+	buckets map[string]*bucket[T]
 }
 
-func (b *layeredBucket) itemCount() int {
+func (b *layeredBucket[T]) itemCount() int {
 	count := 0
 	b.RLock()
 	defer b.RUnlock()
@@ -20,7 +20,7 @@ func (b *layeredBucket) itemCount() int {
 	return count
 }
 
-func (b *layeredBucket) get(primary, secondary string) *Item {
+func (b *layeredBucket[T]) get(primary, secondary string) *Item[T] {
 	bucket := b.getSecondaryBucket(primary)
 	if bucket == nil {
 		return nil
@@ -28,7 +28,7 @@ func (b *layeredBucket) get(primary, secondary string) *Item {
 	return bucket.get(secondary)
 }
 
-func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
+func (b *layeredBucket[T]) getSecondaryBucket(primary string) *bucket[T] {
 	b.RLock()
 	bucket, exists := b.buckets[primary]
 	b.RUnlock()
@@ -38,20 +38,20 @@ func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
 	return bucket
 }
 
-func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) {
+func (b *layeredBucket[T]) set(primary, secondary string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) {
 	b.Lock()
 	bkt, exists := b.buckets[primary]
 	if exists == false {
-		bkt = &bucket{lookup: make(map[string]*Item)}
+		bkt = &bucket[T]{lookup: make(map[string]*Item[T])}
 		b.buckets[primary] = bkt
 	}
 	b.Unlock()
-	item, existing := bkt.set(secondary, value, duration)
+	item, existing := bkt.set(secondary, value, duration, track)
 	item.group = primary
 	return item, existing
 }
 
-func (b *layeredBucket) delete(primary, secondary string) *Item {
+func (b *layeredBucket[T]) delete(primary, secondary string) *Item[T] {
 	b.RLock()
 	bucket, exists := b.buckets[primary]
 	b.RUnlock()
@@ -61,7 +61,27 @@ func (b *layeredBucket) delete(primary, secondary string) *Item {
 	return bucket.delete(secondary)
 }
 
-func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
+func (b *layeredBucket[T]) deletePrefix(primary, prefix string, deletables chan *Item[T]) int {
+	b.RLock()
+	bucket, exists := b.buckets[primary]
+	b.RUnlock()
+	if exists == false {
+		return 0
+	}
+	return bucket.deletePrefix(prefix, deletables)
+}
+
+func (b *layeredBucket[T]) deleteFunc(primary string, matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int {
+	b.RLock()
+	bucket, exists := b.buckets[primary]
+	b.RUnlock()
+	if exists == false {
+		return 0
+	}
+	return bucket.deleteFunc(matches, deletables)
+}
+
+func (b *layeredBucket[T]) deleteAll(primary string, deletables chan *Item[T]) bool {
 	b.RLock()
 	bucket, exists := b.buckets[primary]
 	b.RUnlock()
@@ -82,11 +102,20 @@ func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
 	return true
 }
 
-func (b *layeredBucket) clear() {
+func (b *layeredBucket[T]) forEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
+	b.RLock()
+	bucket, exists := b.buckets[primary]
+	b.RUnlock()
+	if exists {
+		bucket.forEachFunc(matches)
+	}
+}
+
+func (b *layeredBucket[T]) clear() {
 	b.Lock()
 	defer b.Unlock()
 	for _, bucket := range b.buckets {
 		bucket.clear()
 	}
-	b.buckets = make(map[string]*bucket)
+	b.buckets = make(map[string]*bucket[T])
 }
diff --git a/layeredcache.go b/layeredcache.go
index f57f5bf..ada096d 100644
--- a/layeredcache.go
+++ b/layeredcache.go
@@ -2,21 +2,20 @@
 package ccache
 
 import (
-	"container/list"
 	"hash/fnv"
 	"sync/atomic"
 	"time"
 )
 
-type LayeredCache struct {
-	*Configuration
-	list        *list.List
-	buckets     []*layeredBucket
+type LayeredCache[T any] struct {
+	*Configuration[T]
+	control
+	list        *List[*Item[T]]
+	buckets     []*layeredBucket[T]
 	bucketMask  uint32
 	size        int64
-	deletables  chan *Item
-	promotables chan *Item
-	donec       chan struct{}
+	deletables  chan *Item[T]
+	promotables chan *Item[T]
 }
 
 // Create a new layered cache with the specified configuration.
@@ -32,24 +31,26 @@ type LayeredCache struct {
 // secondary key 2 = ".xml"
 
 // See ccache.Configure() for creating a configuration
-func Layered(config *Configuration) *LayeredCache {
-	c := &LayeredCache{
-		list:          list.New(),
+func Layered[T any](config *Configuration[T]) *LayeredCache[T] {
+	c := &LayeredCache[T]{
+		list:          NewList[*Item[T]](),
 		Configuration: config,
+		control:       newControl(),
 		bucketMask:    uint32(config.buckets) - 1,
-		buckets:       make([]*layeredBucket, config.buckets),
-		deletables:    make(chan *Item, config.deleteBuffer),
+		buckets:       make([]*layeredBucket[T], config.buckets),
+		deletables:    make(chan *Item[T], config.deleteBuffer),
+		promotables:   make(chan *Item[T], config.promoteBuffer),
 	}
 	for i := 0; i < int(config.buckets); i++ {
-		c.buckets[i] = &layeredBucket{
-			buckets: make(map[string]*bucket),
+		c.buckets[i] = &layeredBucket[T]{
+			buckets: make(map[string]*bucket[T]),
 		}
 	}
-	c.restart()
+	go c.worker()
 	return c
 }
 
-func (c *LayeredCache) ItemCount() int {
+func (c *LayeredCache[T]) ItemCount() int {
 	count := 0
 	for _, b := range c.buckets {
 		count += b.itemCount()
@@ -61,30 +62,44 @@ func (c *LayeredCache) ItemCount() int {
 // This can return an expired item. Use item.Expired() to see if the item
 // is expired and item.TTL() to see how long until the item expires (which
 // will be negative for an already expired item).
-func (c *LayeredCache) Get(primary, secondary string) *Item {
+func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T] {
 	item := c.bucket(primary).get(primary, secondary)
 	if item == nil {
 		return nil
 	}
 	if item.expires > time.Now().UnixNano() {
-		c.promote(item)
+		select {
+		case c.promotables <- item:
+		default:
+		}
 	}
 	return item
 }
 
+// Same as Get but does not promote the value. This essentially circumvents the
+// "least recently used" aspect of this cache. To some degree, it's akin to a
+// "peak"
+func (c *LayeredCache[T]) GetWithoutPromote(primary, secondary string) *Item[T] {
+	return c.bucket(primary).get(primary, secondary)
+}
+
+func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
+	c.bucket(primary).forEachFunc(primary, matches)
+}
+
 // Get the secondary cache for a given primary key. This operation will
 // never return nil. In the case where the primary key does not exist, a
 // new, underlying, empty bucket will be created and returned.
-func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache {
+func (c *LayeredCache[T]) GetOrCreateSecondaryCache(primary string) *SecondaryCache[T] {
 	primaryBkt := c.bucket(primary)
 	bkt := primaryBkt.getSecondaryBucket(primary)
 	primaryBkt.Lock()
 	if bkt == nil {
-		bkt = &bucket{lookup: make(map[string]*Item)}
+		bkt = &bucket[T]{lookup: make(map[string]*Item[T])}
 		primaryBkt.buckets[primary] = bkt
 	}
 	primaryBkt.Unlock()
-	return &SecondaryCache{
+	return &SecondaryCache[T]{
 		bucket: bkt,
 		pCache: c,
 	}
@@ -92,24 +107,29 @@ func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache
 
 // Used when the cache was created with the Track() configuration option.
 // Avoid otherwise
-func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
+func (c *LayeredCache[T]) TrackingGet(primary, secondary string) TrackedItem[T] {
 	item := c.Get(primary, secondary)
 	if item == nil {
-		return NilTracked
+		return nil
 	}
 	item.track()
 	return item
 }
 
 // Set the value in the cache for the specified duration
-func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) {
-	c.set(primary, secondary, value, duration)
+func (c *LayeredCache[T]) TrackingSet(primary, secondary string, value T, duration time.Duration) TrackedItem[T] {
+	return c.set(primary, secondary, value, duration, true)
+}
+
+// Set the value in the cache for the specified duration
+func (c *LayeredCache[T]) Set(primary, secondary string, value T, duration time.Duration) {
+	c.set(primary, secondary, value, duration, false)
 }
 
 // Replace the value if it exists, does not set if it doesn't.
 // Returns true if the item existed an was replaced, false otherwise.
 // Replace does not reset item's TTL nor does it alter its position in the LRU
-func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool {
+func (c *LayeredCache[T]) Replace(primary, secondary string, value T) bool {
 	item := c.bucket(primary).get(primary, secondary)
 	if item == nil {
 		return false
@@ -121,7 +141,10 @@ func (c *LayeredCache) Replace(primary, secondary string, value interface{}) boo
 // Attempts to get the value from the cache and calles fetch on a miss.
 // If fetch returns an error, no value is cached and the error is returned back
 // to the caller.
-func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
+// Note that Fetch merely calls the public Get and Set functions. If you want
+// a different Fetch behavior, such as thundering herd protection or returning
+// expired items, implement it in your application.
+func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
 	item := c.Get(primary, secondary)
 	if item != nil {
 		return item, nil
@@ -130,11 +153,11 @@ func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration,
 	if err != nil {
 		return nil, err
 	}
-	return c.set(primary, secondary, value, duration), nil
+	return c.set(primary, secondary, value, duration, false), nil
 }
 
 // Remove the item from the cache, return true if the item was present, false otherwise.
-func (c *LayeredCache) Delete(primary, secondary string) bool {
+func (c *LayeredCache[T]) Delete(primary, secondary string) bool {
 	item := c.bucket(primary).delete(primary, secondary)
 	if item != nil {
 		c.deletables <- item
@@ -144,32 +167,22 @@ func (c *LayeredCache) Delete(primary, secondary string) bool {
 }
 
 // Deletes all items that share the same primary key
-func (c *LayeredCache) DeleteAll(primary string) bool {
+func (c *LayeredCache[T]) DeleteAll(primary string) bool {
 	return c.bucket(primary).deleteAll(primary, c.deletables)
 }
 
-//this isn't thread safe. It's meant to be called from non-concurrent tests
-func (c *LayeredCache) Clear() {
-	for _, bucket := range c.buckets {
-		bucket.clear()
-	}
-	c.size = 0
-	c.list = list.New()
+// Deletes all items that share the same primary key and prefix.
+func (c *LayeredCache[T]) DeletePrefix(primary, prefix string) int {
+	return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
 }
 
-func (c *LayeredCache) Stop() {
-	close(c.promotables)
-	<-c.donec
-}
-
-func (c *LayeredCache) restart() {
-	c.promotables = make(chan *Item, c.promoteBuffer)
-	c.donec = make(chan struct{})
-	go c.worker()
+// Deletes all items that share the same primary key and where the matches func evaluates to true.
+func (c *LayeredCache[T]) DeleteFunc(primary string, matches func(key string, item *Item[T]) bool) int {
+	return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
 }
 
-func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item {
-	item, existing := c.bucket(primary).set(primary, secondary, value, duration)
+func (c *LayeredCache[T]) set(primary, secondary string, value T, duration time.Duration, track bool) *Item[T] {
+	item, existing := c.bucket(primary).set(primary, secondary, value, duration, track)
 	if existing != nil {
 		c.deletables <- existing
 	}
@@ -177,72 +190,133 @@ func (c *LayeredCache) set(primary, secondary string, value interface{}, duratio
 	return item
 }
 
-func (c *LayeredCache) bucket(key string) *layeredBucket {
+func (c *LayeredCache[T]) bucket(key string) *layeredBucket[T] {
 	h := fnv.New32a()
 	h.Write([]byte(key))
 	return c.buckets[h.Sum32()&c.bucketMask]
 }
 
-func (c *LayeredCache) promote(item *Item) {
+func (c *LayeredCache[T]) promote(item *Item[T]) {
 	c.promotables <- item
 }
 
-func (c *LayeredCache) worker() {
-	defer close(c.donec)
+func (c *LayeredCache[T]) worker() {
+	dropped := 0
+	cc := c.control
+
+	promoteItem := func(item *Item[T]) {
+		if c.doPromote(item) && c.size > c.maxSize {
+			dropped += c.gc()
+		}
+	}
+
 	for {
 		select {
-		case item, ok := <-c.promotables:
-			if ok == false {
-				return
-			}
-			if c.doPromote(item) && c.size > c.maxSize {
-				c.gc()
-			}
+		case item := <-c.promotables:
+			promoteItem(item)
 		case item := <-c.deletables:
-			if item.element == nil {
-				atomic.StoreInt32(&item.promotions, -2)
-			} else {
-				c.size -= item.size
-				if c.onDelete != nil {
-					c.onDelete(item)
+			c.doDelete(item)
+		case control := <-cc:
+			switch msg := control.(type) {
+			case controlStop:
+				goto drain
+			case controlGetDropped:
+				msg.res <- dropped
+				dropped = 0
+			case controlSetMaxSize:
+				c.maxSize = msg.size
+				if c.size > c.maxSize {
+					dropped += c.gc()
+				}
+				msg.done <- struct{}{}
+			case controlClear:
+				for _, bucket := range c.buckets {
+					bucket.clear()
 				}
-				c.list.Remove(item.element)
+				c.size = 0
+				c.list = NewList[*Item[T]]()
+				msg.done <- struct{}{}
+			case controlGetSize:
+				msg.res <- c.size
+			case controlGC:
+				dropped += c.gc()
+				msg.done <- struct{}{}
+			case controlSyncUpdates:
+				doAllPendingPromotesAndDeletes(c.promotables, promoteItem, c.deletables, c.doDelete)
+				msg.done <- struct{}{}
 			}
 		}
 	}
+
+drain:
+	for {
+		select {
+		case item := <-c.deletables:
+			c.doDelete(item)
+		default:
+			return
+		}
+	}
 }
 
-func (c *LayeredCache) doPromote(item *Item) bool {
+func (c *LayeredCache[T]) doDelete(item *Item[T]) {
+	if item.node == nil {
+		item.promotions = -2
+	} else {
+		c.size -= item.size
+		if c.onDelete != nil {
+			c.onDelete(item)
+		}
+		c.list.Remove(item.node)
+		item.node = nil
+		item.promotions = -2
+	}
+}
+
+func (c *LayeredCache[T]) doPromote(item *Item[T]) bool {
 	// deleted before it ever got promoted
-	if atomic.LoadInt32(&item.promotions) == -2 {
+	if item.promotions == -2 {
 		return false
 	}
-	if item.element != nil { //not a new item
+	if item.node != nil { //not a new item
 		if item.shouldPromote(c.getsPerPromote) {
-			c.list.MoveToFront(item.element)
-			atomic.StoreInt32(&item.promotions, 0)
+			c.list.MoveToFront(item.node)
+			item.promotions = 0
 		}
 		return false
 	}
 	c.size += item.size
-	item.element = c.list.PushFront(item)
+	item.node = c.list.Insert(item)
 	return true
 }
 
-func (c *LayeredCache) gc() {
-	element := c.list.Back()
-	for i := 0; i < c.itemsToPrune; i++ {
-		if element == nil {
-			return
+func (c *LayeredCache[T]) gc() int {
+	node := c.list.Tail
+	dropped := 0
+	itemsToPrune := int64(c.itemsToPrune)
+
+	if min := c.size - c.maxSize; min > itemsToPrune {
+		itemsToPrune = min
+	}
+
+	for i := int64(0); i < itemsToPrune; i++ {
+		if node == nil {
+			return dropped
 		}
-		prev := element.Prev()
-		item := element.Value.(*Item)
+		prev := node.Prev
+		item := node.Value
 		if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
 			c.bucket(item.group).delete(item.group, item.key)
 			c.size -= item.size
-			c.list.Remove(element)
+			c.list.Remove(node)
+			if c.onDelete != nil {
+				c.onDelete(item)
+			}
+			item.node = nil
 			item.promotions = -2
+			dropped += 1
 		}
-		element = prev
+		node = prev
 	}
+	return dropped
 }
diff --git a/layeredcache_test.go b/layeredcache_test.go
index e53afa4..bed8881 100644
--- a/layeredcache_test.go
+++ b/layeredcache_test.go
@@ -1,251 +1,439 @@
 package ccache
 
 import (
+	"math/rand"
+	"sort"
 	"strconv"
+	"sync/atomic"
 	"testing"
 	"time"
 
-	. "github.com/karlseguin/expect"
+	"github.com/karlseguin/ccache/v3/assert"
 )
 
-type LayeredCacheTests struct{}
-
-func Test_LayeredCache(t *testing.T) {
-	Expectify(new(LayeredCacheTests), t)
-}
-
-func (_ *LayeredCacheTests) GetsANonExistantValue() {
-	cache := newLayered()
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
-	Expect(cache.ItemCount()).To.Equal(0)
+func Test_LayedCache_GetsANonExistantValue(t *testing.T) {
+	cache := newLayered[string]()
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
+	assert.Equal(t, cache.ItemCount(), 0)
 }
 
-func (_ *LayeredCacheTests) SetANewValue() {
-	cache := newLayered()
+func Test_LayedCache_SetANewValue(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "a value", time.Minute)
-	Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
-	Expect(cache.Get("spice", "stop")).To.Equal(nil)
-	Expect(cache.ItemCount()).To.Equal(1)
+	assert.Equal(t, cache.Get("spice", "flow").Value(), "a value")
+	assert.Equal(t, cache.Get("spice", "stop"), nil)
+	assert.Equal(t, cache.ItemCount(), 1)
 }
 
-func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
-	cache := newLayered()
+func Test_LayedCache_SetsMultipleValueWithinTheSameLayer(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "value-a", time.Minute)
 	cache.Set("spice", "must", "value-b", time.Minute)
 	cache.Set("leto", "sister", "ghanima", time.Minute)
-	Expect(cache.Get("spice", "flow").Value()).To.Equal("value-a")
-	Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
-	Expect(cache.Get("spice", "worm")).To.Equal(nil)
-
-	Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
-	Expect(cache.Get("leto", "brother")).To.Equal(nil)
-	Expect(cache.Get("baron", "friend")).To.Equal(nil)
-	Expect(cache.ItemCount()).To.Equal(3)
+	assert.Equal(t, cache.Get("spice", "flow").Value(), "value-a")
+	assert.Equal(t, cache.Get("spice", "must").Value(), "value-b")
+	assert.Equal(t, cache.Get("spice", "worm"), nil)
+
+	assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
+	assert.Equal(t, cache.Get("leto", "brother"), nil)
+	assert.Equal(t, cache.Get("baron", "friend"), nil)
+	assert.Equal(t, cache.ItemCount(), 3)
 }
 
-func (_ *LayeredCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
-	cache := newLayered()
-	Expect(cache.Replace("spice", "flow", "value-a")).To.Equal(false)
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
+func Test_LayedCache_ReplaceDoesNothingIfKeyDoesNotExist(t *testing.T) {
+	cache := newLayered[string]()
+	assert.Equal(t, cache.Replace("spice", "flow", "value-a"), false)
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
 }
 
-func (_ *LayeredCacheTests) ReplaceUpdatesTheValue() {
-	cache := newLayered()
+func Test_LayedCache_ReplaceUpdatesTheValue(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "value-a", time.Minute)
-	Expect(cache.Replace("spice", "flow", "value-b")).To.Equal(true)
-	Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
-	Expect(cache.ItemCount()).To.Equal(1)
+	assert.Equal(t, cache.Replace("spice", "flow", "value-b"), true)
+	assert.Equal(t, cache.Get("spice", "flow").Value(), "value-b")
+	assert.Equal(t, cache.ItemCount(), 1)
 	//not sure how to test that the TTL hasn't changed sort of a sleep..
 }
 
-func (_ *LayeredCacheTests) DeletesAValue() {
-	cache := newLayered()
+func Test_LayedCache_DeletesAValue(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "value-a", time.Minute)
 	cache.Set("spice", "must", "value-b", time.Minute)
 	cache.Set("leto", "sister", "ghanima", time.Minute)
 	cache.Delete("spice", "flow")
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
-	Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
-	Expect(cache.Get("spice", "worm")).To.Equal(nil)
-	Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
-	Expect(cache.ItemCount()).To.Equal(2)
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
+	assert.Equal(t, cache.Get("spice", "must").Value(), "value-b")
+	assert.Equal(t, cache.Get("spice", "worm"), nil)
+	assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
+	assert.Equal(t, cache.ItemCount(), 2)
 }
 
-func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
+func Test_LayedCache_DeletesAPrefix(t *testing.T) {
+	cache := newLayered[string]()
+	assert.Equal(t, cache.ItemCount(), 0)
+
+	cache.Set("spice", "aaa", "1", time.Minute)
+	cache.Set("spice", "aab", "2", time.Minute)
+	cache.Set("spice", "aac", "3", time.Minute)
+	cache.Set("leto", "aac", "3", time.Minute)
+	cache.Set("spice", "ac", "4", time.Minute)
+	cache.Set("spice", "z5", "7", time.Minute)
+	assert.Equal(t, cache.ItemCount(), 6)
+
+	assert.Equal(t, cache.DeletePrefix("spice", "9a"), 0)
+	assert.Equal(t, cache.ItemCount(), 6)
+
+	assert.Equal(t, cache.DeletePrefix("spice", "aa"), 3)
+	assert.Equal(t, cache.Get("spice", "aaa"), nil)
+	assert.Equal(t, cache.Get("spice", "aab"), nil)
+	assert.Equal(t, cache.Get("spice", "aac"), nil)
+	assert.Equal(t, cache.Get("spice", "ac").Value(), "4")
+	assert.Equal(t, cache.Get("spice", "z5").Value(), "7")
+	assert.Equal(t, cache.ItemCount(), 3)
+}
 
-	onDeleteFnCalled := false
-	onDeleteFn := func(item *Item) {
+func Test_LayedCache_DeletesAFunc(t *testing.T) {
+	cache := newLayered[int]()
+	assert.Equal(t, cache.ItemCount(), 0)
+
+	cache.Set("spice", "a", 1, time.Minute)
+	cache.Set("leto", "b", 2, time.Minute)
+	cache.Set("spice", "c", 3, time.Minute)
+	cache.Set("spice", "d", 4, time.Minute)
+	cache.Set("spice", "e", 5, time.Minute)
+	cache.Set("spice", "f", 6, time.Minute)
+	assert.Equal(t, cache.ItemCount(), 6)
+
+	assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
+		return false
+	}), 0)
+	assert.Equal(t, cache.ItemCount(), 6)
+
+	assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
+		return item.Value() < 4
+	}), 2)
+	assert.Equal(t, cache.ItemCount(), 4)
+
+	assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
+		return key == "d"
+	}), 1)
+	assert.Equal(t, cache.ItemCount(), 3)
 
+}
+
+func Test_LayedCache_OnDeleteCallbackCalled(t *testing.T) {
+	onDeleteFnCalled := int32(0)
+	onDeleteFn := func(item *Item[string]) {
 		if item.group == "spice" && item.key == "flow" {
-			onDeleteFnCalled = true
+			atomic.AddInt32(&onDeleteFnCalled, 1)
 		}
 	}
 
-	cache := Layered(Configure().OnDelete(onDeleteFn))
+	cache := Layered[string](Configure[string]().OnDelete(onDeleteFn))
 	cache.Set("spice", "flow", "value-a", time.Minute)
 	cache.Set("spice", "must", "value-b", time.Minute)
 	cache.Set("leto", "sister", "ghanima", time.Minute)
 
-	time.Sleep(time.Millisecond * 10) // Run once to init
+	cache.SyncUpdates()
 	cache.Delete("spice", "flow")
-	time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items
+	cache.SyncUpdates()
 
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
-	Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
-	Expect(cache.Get("spice", "worm")).To.Equal(nil)
-	Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
+	assert.Equal(t, cache.Get("spice", "must").Value(), "value-b")
+	assert.Equal(t, cache.Get("spice", "worm"), nil)
+	assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
 
-	Expect(onDeleteFnCalled).To.Equal(true)
+	assert.Equal(t, atomic.LoadInt32(&onDeleteFnCalled), 1)
 }
 
-func (_ *LayeredCacheTests) DeletesALayer() {
-	cache := newLayered()
+func Test_LayedCache_DeletesALayer(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "value-a", time.Minute)
 	cache.Set("spice", "must", "value-b", time.Minute)
 	cache.Set("leto", "sister", "ghanima", time.Minute)
 	cache.DeleteAll("spice")
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
-	Expect(cache.Get("spice", "must")).To.Equal(nil)
-	Expect(cache.Get("spice", "worm")).To.Equal(nil)
-	Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
+	assert.Equal(t, cache.Get("spice", "must"), nil)
+	assert.Equal(t, cache.Get("spice", "worm"), nil)
+	assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
 }
 
-func (_ LayeredCacheTests) GCsTheOldestItems() {
-	cache := Layered(Configure().ItemsToPrune(10))
+func Test_LayeredCache_GCsTheOldestItems(t *testing.T) {
+	cache := Layered(Configure[int]().ItemsToPrune(10))
 	cache.Set("xx", "a", 23, time.Minute)
 	for i := 0; i < 500; i++ {
 		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
 	}
 	cache.Set("xx", "b", 9001, time.Minute)
 	//let the items get promoted (and added to our list)
-	time.Sleep(time.Millisecond * 10)
-	gcLayeredCache(cache)
-	Expect(cache.Get("xx", "a")).To.Equal(nil)
-	Expect(cache.Get("xx", "b").Value()).To.Equal(9001)
-	Expect(cache.Get("8", "a")).To.Equal(nil)
-	Expect(cache.Get("9", "a").Value()).To.Equal(9)
-	Expect(cache.Get("10", "a").Value()).To.Equal(10)
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("xx", "a"), nil)
+	assert.Equal(t, cache.Get("xx", "b").Value(), 9001)
+	assert.Equal(t, cache.Get("8", "a"), nil)
+	assert.Equal(t, cache.Get("9", "a").Value(), 9)
+	assert.Equal(t, cache.Get("10", "a").Value(), 10)
 }
 
-func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
-	cache := Layered(Configure().ItemsToPrune(10).GetsPerPromote(1))
+func Test_LayeredCache_PromotedItemsDontGetPruned(t *testing.T) {
+	cache := Layered(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
 	for i := 0; i < 500; i++ {
 		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
 	}
-	time.Sleep(time.Millisecond * 10) //run the worker once to init the list
+	cache.SyncUpdates()
 	cache.Get("9", "a")
-	time.Sleep(time.Millisecond * 10)
-	gcLayeredCache(cache)
-	Expect(cache.Get("9", "a").Value()).To.Equal(9)
-	Expect(cache.Get("10", "a")).To.Equal(nil)
-	Expect(cache.Get("11", "a").Value()).To.Equal(11)
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("9", "a").Value(), 9)
+	assert.Equal(t, cache.Get("10", "a"), nil)
+	assert.Equal(t, cache.Get("11", "a").Value(), 11)
 }
 
-func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
-	cache := Layered(Configure().ItemsToPrune(10).Track())
-	for i := 0; i < 10; i++ {
+func Test_LayeredCache_GetWithoutPromoteDoesNotPromote(t *testing.T) {
+	cache := Layered(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
+	for i := 0; i < 500; i++ {
 		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
 	}
-	item := cache.TrackingGet("0", "a")
-	time.Sleep(time.Millisecond * 10)
-	gcLayeredCache(cache)
-	Expect(cache.Get("0", "a").Value()).To.Equal(0)
-	Expect(cache.Get("1", "a")).To.Equal(nil)
-	item.Release()
-	gcLayeredCache(cache)
-	Expect(cache.Get("0", "a")).To.Equal(nil)
-}
-
-func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
-	cache := Layered(Configure().MaxSize(5).ItemsToPrune(1))
+	cache.SyncUpdates()
+	cache.GetWithoutPromote("9", "a")
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("9", "a"), nil)
+	assert.Equal(t, cache.Get("10", "a").Value(), 10)
+	assert.Equal(t, cache.Get("11", "a").Value(), 11)
+}
+
+func Test_LayeredCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) {
+	cache := Layered(Configure[int]().ItemsToPrune(10).Track())
+	item0 := cache.TrackingSet("0", "a", 0, time.Minute)
+	for i := 1; i < 11; i++ {
+		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
+	}
+	item1 := cache.TrackingGet("1", "a")
+	cache.SyncUpdates()
+	cache.GC()
+	assert.Equal(t, cache.Get("0", "a").Value(), 0)
+	assert.Equal(t, cache.Get("1", "a").Value(), 1)
+	item0.Release()
+	item1.Release()
+	cache.GC()
+	assert.Equal(t, cache.Get("0", "a"), nil)
+	assert.Equal(t, cache.Get("1", "a"), nil)
+}
+
+func Test_LayeredCache_RemovesOldestItemWhenFull(t *testing.T) {
+	onDeleteFnCalled := false
+	onDeleteFn := func(item *Item[int]) {
+		if item.key == "a" {
+			onDeleteFnCalled = true
+		}
+	}
+	cache := Layered(Configure[int]().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
+
 	cache.Set("xx", "a", 23, time.Minute)
 	for i := 0; i < 7; i++ {
 		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
 	}
 	cache.Set("xx", "b", 9001, time.Minute)
-	time.Sleep(time.Millisecond * 10)
-	Expect(cache.Get("xx", "a")).To.Equal(nil)
-	Expect(cache.Get("0", "a")).To.Equal(nil)
-	Expect(cache.Get("1", "a")).To.Equal(nil)
-	Expect(cache.Get("2", "a")).To.Equal(nil)
-	Expect(cache.Get("3", "a").Value()).To.Equal(3)
-	Expect(cache.Get("xx", "b").Value()).To.Equal(9001)
+	cache.SyncUpdates()
+
+	assert.Equal(t, cache.Get("xx", "a"), nil)
+	assert.Equal(t, cache.Get("0", "a"), nil)
+	assert.Equal(t, cache.Get("1", "a"), nil)
+	assert.Equal(t, cache.Get("2", "a"), nil)
+	assert.Equal(t, cache.Get("3", "a").Value(), 3)
+	assert.Equal(t, cache.Get("xx", "b").Value(), 9001)
+	assert.Equal(t, cache.GetDropped(), 4)
+	assert.Equal(t, cache.GetDropped(), 0)
+	assert.Equal(t, onDeleteFnCalled, true)
 }
 
-func newLayered() *LayeredCache {
-	return Layered(Configure())
+func Test_LayeredCache_ResizeOnTheFly(t *testing.T) {
+	cache := Layered(Configure[int]().MaxSize(9).ItemsToPrune(1))
+	for i := 0; i < 5; i++ {
+		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
+	}
+	cache.SyncUpdates()
+
+	cache.SetMaxSize(3)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetDropped(), 2)
+	assert.Equal(t, cache.Get("0", "a"), nil)
+	assert.Equal(t, cache.Get("1", "a"), nil)
+	assert.Equal(t, cache.Get("2", "a").Value(), 2)
+	assert.Equal(t, cache.Get("3", "a").Value(), 3)
+	assert.Equal(t, cache.Get("4", "a").Value(), 4)
+
+	cache.Set("5", "a", 5, time.Minute)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetDropped(), 1)
+	assert.Equal(t, cache.Get("2", "a"), nil)
+	assert.Equal(t, cache.Get("3", "a").Value(), 3)
+	assert.Equal(t, cache.Get("4", "a").Value(), 4)
+	assert.Equal(t, cache.Get("5", "a").Value(), 5)
+
+	cache.SetMaxSize(10)
+	cache.Set("6", "a", 6, time.Minute)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetDropped(), 0)
+	assert.Equal(t, cache.Get("3", "a").Value(), 3)
+	assert.Equal(t, cache.Get("4", "a").Value(), 4)
+	assert.Equal(t, cache.Get("5", "a").Value(), 5)
+	assert.Equal(t, cache.Get("6", "a").Value(), 6)
 }
 
-func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
-	cache := Layered(Configure().MaxSize(9).ItemsToPrune(2))
+func Test_LayeredCache_RemovesOldestItemWhenFullBySizer(t *testing.T) {
+	cache := Layered(Configure[*SizedItem]().MaxSize(9).ItemsToPrune(2))
 	for i := 0; i < 7; i++ {
 		cache.Set("pri", strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
 	}
-	time.Sleep(time.Millisecond * 10)
-	Expect(cache.Get("pri", "0")).To.Equal(nil)
-	Expect(cache.Get("pri", "1")).To.Equal(nil)
-	Expect(cache.Get("pri", "2")).To.Equal(nil)
-	Expect(cache.Get("pri", "3")).To.Equal(nil)
-	Expect(cache.Get("pri", "4").Value().(*SizedItem).id).To.Equal(4)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.Get("pri", "0"), nil)
+	assert.Equal(t, cache.Get("pri", "1"), nil)
+	assert.Equal(t, cache.Get("pri", "2"), nil)
+	assert.Equal(t, cache.Get("pri", "3"), nil)
+	assert.Equal(t, cache.Get("pri", "4").Value().id, 4)
 }
 
-func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
-	cache := Layered(Configure())
+func Test_LayeredCache_SetUpdatesSizeOnDelta(t *testing.T) {
+	cache := Layered(Configure[*SizedItem]())
 	cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute)
 	cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
 	cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
 	cache.Set("pri", "b", &SizedItem{0, 4}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 6)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 6)
 	cache.Set("pri", "b", &SizedItem{0, 2}, time.Minute)
 	cache.Set("sec", "b", &SizedItem{0, 3}, time.Minute)
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 7)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 7)
 	cache.Delete("pri", "b")
-	time.Sleep(time.Millisecond * 10)
-	checkLayeredSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
 }
 
-func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
-	cache := Layered(Configure())
+func Test_LayeredCache_ReplaceDoesNotchangeSizeIfNotSet(t *testing.T) {
+	cache := Layered(Configure[*SizedItem]())
 	cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
 	cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
 	cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute)
 	cache.Replace("sec", "3", &SizedItem{1, 2})
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 6)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 6)
 }
 
-func (_ LayeredCacheTests) ReplaceChangesSize() {
-	cache := Layered(Configure())
+func Test_LayeredCache_ReplaceChangesSize(t *testing.T) {
+	cache := Layered(Configure[*SizedItem]())
 	cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
 	cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
 
 	cache.Replace("pri", "2", &SizedItem{1, 2})
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 4)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 4)
 
 	cache.Replace("pri", "2", &SizedItem{1, 1})
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 3)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 3)
 
 	cache.Replace("pri", "2", &SizedItem{1, 3})
-	time.Sleep(time.Millisecond * 5)
-	checkLayeredSize(cache, 5)
+	cache.SyncUpdates()
+	assert.Equal(t, cache.GetSize(), 5)
+}
+
+func Test_LayeredCache_EachFunc(t *testing.T) {
+	cache := Layered(Configure[int]().MaxSize(3).ItemsToPrune(1))
+	assert.List(t, forEachKeysLayered[int](cache, "1"), []string{})
+
+	cache.Set("1", "a", 1, time.Minute)
+	assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a"})
+
+	cache.Set("1", "b", 2, time.Minute)
+	cache.SyncUpdates()
+	assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a", "b"})
+
+	cache.Set("1", "c", 3, time.Minute)
+	cache.SyncUpdates()
+	assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a", "b", "c"})
+
+	cache.Set("1", "d", 4, time.Minute)
+	cache.SyncUpdates()
+	assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"b", "c", "d"})
+
+	// iteration is non-deterministic, all we know for sure is "stop" should not be in there
+	cache.Set("1", "stop", 5, time.Minute)
+	cache.SyncUpdates()
+	assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
+
+	cache.Set("1", "e", 6, time.Minute)
+	cache.SyncUpdates()
+	assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
 }
 
-func checkLayeredSize(cache *LayeredCache, sz int64) {
-	cache.Stop()
-	Expect(cache.size).To.Equal(sz)
-	cache.restart()
+func Test_LayeredCachePrune(t *testing.T) {
+	maxSize := int64(500)
+	cache := Layered(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
+	epoch := 0
+	for i := 0; i < 10000; i++ {
+		epoch += 1
+		expired := make([]string, 0)
+		for i := 0; i < 50; i += 1 {
+			key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
+			item := cache.Get(key, key)
+			if item == nil || item.TTL() > 1*time.Minute {
+				expired = append(expired, key)
+			}
+		}
+		for _, key := range expired {
+			cache.Set(key, key, key, 5*time.Minute)
+		}
+		if epoch%500 == 0 {
+			assert.True(t, cache.GetSize() <= 500)
+		}
+	}
+}
+
+func Test_LayeredConcurrentStop(t *testing.T) {
+	for i := 0; i < 100; i++ {
+		cache := Layered(Configure[string]())
+		r := func() {
+			for {
+				key := strconv.Itoa(int(rand.Int31n(100)))
+				switch rand.Int31n(3) {
+				case 0:
+					cache.Get(key, key)
+				case 1:
+					cache.Set(key, key, key, time.Minute)
+				case 2:
+					cache.Delete(key, key)
+				}
+			}
+		}
+		go r()
+		go r()
+		go r()
+		time.Sleep(time.Millisecond * 10)
+		cache.Stop()
+	}
+}
+func newLayered[T any]() *LayeredCache[T] {
+	c := Layered[T](Configure[T]())
+	c.Clear()
+	return c
 }
 
-func gcLayeredCache(cache *LayeredCache) {
-	cache.Stop()
-	cache.gc()
-	cache.restart()
+func forEachKeysLayered[T any](cache *LayeredCache[T], primary string) []string {
+	keys := make([]string, 0, 10)
+	cache.ForEachFunc(primary, func(key string, i *Item[T]) bool {
+		if key == "stop" {
+			return false
+		}
+		keys = append(keys, key)
+		return true
+	})
+	sort.Strings(keys)
+	return keys
 }
diff --git a/list.go b/list.go
new file mode 100644
index 0000000..e7db39a
--- /dev/null
+++ b/list.go
@@ -0,0 +1,57 @@
+package ccache
+
+type List[T any] struct {
+	Head *Node[T]
+	Tail *Node[T]
+}
+
+func NewList[T any]() *List[T] {
+	return &List[T]{}
+}
+
+func (l *List[T]) Remove(node *Node[T]) {
+	next := node.Next
+	prev := node.Prev
+
+	if next == nil {
+		l.Tail = node.Prev
+	} else {
+		next.Prev = prev
+	}
+
+	if prev == nil {
+		l.Head = node.Next
+	} else {
+		prev.Next = next
+	}
+	node.Next = nil
+	node.Prev = nil
+}
+
+func (l *List[T]) MoveToFront(node *Node[T]) {
+	l.Remove(node)
+	l.nodeToFront(node)
+}
+
+func (l *List[T]) Insert(value T) *Node[T] {
+	node := &Node[T]{Value: value}
+	l.nodeToFront(node)
+	return node
+}
+
+func (l *List[T]) nodeToFront(node *Node[T]) {
+	head := l.Head
+	l.Head = node
+	if head == nil {
+		l.Tail = node
+		return
+	}
+	node.Next = head
+	head.Prev = node
+}
+
+type Node[T any] struct {
+	Next  *Node[T]
+	Prev  *Node[T]
+	Value T
+}
diff --git a/list_test.go b/list_test.go
new file mode 100644
index 0000000..0523028
--- /dev/null
+++ b/list_test.go
@@ -0,0 +1,95 @@
+package ccache
+
+import (
+	"testing"
+
+	"github.com/karlseguin/ccache/v3/assert"
+)
+
+func Test_List_Insert(t *testing.T) {
+	l := NewList[int]()
+	assertList(t, l)
+
+	l.Insert(1)
+	assertList(t, l, 1)
+
+	l.Insert(2)
+	assertList(t, l, 2, 1)
+
+	l.Insert(3)
+	assertList(t, l, 3, 2, 1)
+}
+
+func Test_List_Remove(t *testing.T) {
+	l := NewList[int]()
+	assertList(t, l)
+
+	node := l.Insert(1)
+	l.Remove(node)
+	assertList(t, l)
+
+	n5 := l.Insert(5)
+	n4 := l.Insert(4)
+	n3 := l.Insert(3)
+	n2 := l.Insert(2)
+	n1 := l.Insert(1)
+
+	l.Remove(n5)
+	assertList(t, l, 1, 2, 3, 4)
+
+	l.Remove(n1)
+	assertList(t, l, 2, 3, 4)
+
+	l.Remove(n3)
+	assertList(t, l, 2, 4)
+
+	l.Remove(n2)
+	assertList(t, l, 4)
+
+	l.Remove(n4)
+	assertList(t, l)
+}
+
+func Test_List_MoveToFront(t *testing.T) {
+	l := NewList[int]()
+
+	n1 := l.Insert(1)
+	l.MoveToFront(n1)
+	assertList(t, l, 1)
+
+	n2 := l.Insert(2)
+	l.MoveToFront(n1)
+	assertList(t, l, 1, 2)
+	l.MoveToFront(n2)
+	assertList(t, l, 2, 1)
+}
+
+func assertList(t *testing.T, list *List[int], expected ...int) {
+	t.Helper()
+
+	if len(expected) == 0 {
+		assert.Nil(t, list.Head)
+		assert.Nil(t, list.Tail)
+		return
+	}
+
+	node := list.Head
+	for _, expected := range expected {
+		assert.Equal(t, node.Value, expected)
+		node = node.Next
+	}
+
+	node = list.Tail
+	for i := len(expected) - 1; i >= 0; i-- {
+		assert.Equal(t, node.Value, expected[i])
+		node = node.Prev
+	}
+}
+
+func listFromInts(ints ...int) *List[int] {
+	l := NewList[int]()
+	for i := len(ints) - 1; i >= 0; i-- {
+		l.Insert(ints[i])
+	}
+	return l
+}
diff --git a/readme.md b/readme.md
index bb399db..101927a 100644
--- a/readme.md
+++ b/readme.md
@@ -1,4 +1,9 @@
 # CCache
+
+Generic version is on the way:
+https://github.com/karlseguin/ccache/tree/generic
+
+
 CCache is an LRU Cache, written in Go, focused on supporting high concurrency.
 
 Lock contention on the list is reduced by:
@@ -7,28 +12,27 @@ Lock contention on the list is reduced by:
 * Using a buffered channel to queue promotions for a single worker
 * Garbage collecting within the same thread as the worker
 
-## Setup
-
-First, download the project:
+Unless otherwise stated, all methods are thread-safe.
 
-    go get github.com/karlseguin/ccache
+The non-generic version of this cache can be imported via `github.com/karlseguin/ccache/`.
 
 ## Configuration
-Next, import and create a `Cache` instance:
-
+Import and create a `Cache` instance:
 
 ```go
 import (
-  "github.com/karlseguin/ccache"
+  github.com/karlseguin/ccache/v3
 )
 
-var cache = ccache.New(ccache.Configure())
+// create a cache with string values
+var cache = ccache.New(ccache.Configure[string]())
 ```
 
 `Configure` exposes a chainable API:
 
 ```go
-var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100))
+// creates a cache with int values
+var cache = ccache.New(ccache.Configure[int]().MaxSize(1000).ItemsToPrune(100))
 ```
 
 The most likely configuration options to tweak are:
@@ -53,18 +57,21 @@ item := cache.Get("user:4")
 if item == nil {
   //handle
 } else {
-  user := item.Value().(*User)
+  user := item.Value()
 }
 ```
 The returned `*Item` exposes a number of methods:
 
-* `Value() interface{}` - the value cached
+* `Value() T` - the value cached
 * `Expired() bool` - whether the item is expired or not
 * `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items)
 * `Expires() time.Time` - the time the item will expire
 
 By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source.
 
+### GetWithoutPromote
+Same as `Get` but does not "promote" the value, which is to say it circumvents the "lru" aspect of this cache. Should only be used in limited cases, such as peaking at the value.
+
 ### Set
 `Set` expects the key, value and ttl:
 
@@ -76,19 +83,33 @@ cache.Set("user:4", user, time.Minute * 10)
 There's also a `Fetch` which mixes a `Get` and a `Set`:
 
 ```go
-item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) {
+item, err := cache.Fetch("user:4", time.Minute * 10, func() (*User, error) {
   //code to fetch the data incase of a miss
   //should return the data to cache and the error, if any
 })
 ```
 
+`Fetch` doesn't do anything fancy: it merely uses the public `Get` and `Set` functions. If you want more advanced behavior, such as using a singleflight to protect against thundering herd, support a callback that accepts the key, or returning expired items, you should implement that in your application. 
+
 ### Delete
-`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key:
+`Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key:
 
 ```go
 cache.Delete("user:4")
 ```
 
+### DeletePrefix
+`DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed.
+
+### DeleteFunc
+`DeleteFunc` deletes all items that the provided matches func evaluates to true. Returns the number of keys removed.
+
+### ForEachFunc
+`ForEachFunc` iterates through all keys and values in the map and passes them to the provided function. Iteration stops if the function returns false. Iteration order is random.
+
+### Clear
+`Clear` clears the cache. If the cache's gc is running, `Clear` waits for it to finish.
+
 ### Extend
 The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time.
 
@@ -101,6 +122,14 @@ cache.Replace("user:4", user)
 
 `Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned.
 
+### GetDropped
+You can get the number of keys evicted due to memory pressure by calling `GetDropped`:
+
+```go
+dropped := cache.GetDropped()
+```
+The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes.
+
 ### Stop
 The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called
 the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache.
@@ -111,7 +140,7 @@ CCache supports a special tracking mode which is meant to be used in conjunction
 When you configure your cache with `Track()`:
 
 ```go
-cache = ccache.New(ccache.Configure().Track())
+cache = ccache.New(ccache.Configure[int]().Track())
 ```
 
 The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them:
@@ -122,11 +151,11 @@ user := item.Value()   //will be nil if "user:4" didn't exist in the cache
 item.Release()  //can be called even if item.Value() returned nil
 ```
 
-In practice, `Release` wouldn't be called until later, at some other place in your code.
+In practice, `Release` wouldn't be called until later, at some other place in your code. `TrackingSet` can be used to set a value to be tracked.
 
 There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways.
 
-More important, it helps ensure that you're code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system.
+More important, it helps ensure that your code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system.
 
 ## LayeredCache
 
@@ -137,7 +166,7 @@ CCache's `LayeredCache` stores and retrieves values by both a primary and second
 `LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API:
 
 ```go
-cache := ccache.Layered(ccache.Configure())
+cache := ccache.Layered(ccache.Configure[string]())
 
 cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5)
 cache.Set("/users/goku", "type:xml", "<value_to_cache>", time.Minute * 5)
@@ -156,7 +185,7 @@ cache.DeleteAll("/users/goku")
 In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with:
 
 ```go
-cache := ccache.Layered(ccache.Configure())
+cache := ccache.Layered(ccache.Configure[string]())
 sCache := cache.GetOrCreateSecondaryCache("/users/goku")
 sCache.Set("type:json", "{value_to_cache}", time.Minute * 5)
 ```
diff --git a/secondarycache.go b/secondarycache.go
index f901fde..0f62df3 100644
--- a/secondarycache.go
+++ b/secondarycache.go
@@ -2,21 +2,21 @@ package ccache
 
 import "time"
 
-type SecondaryCache struct {
-	bucket *bucket
-	pCache *LayeredCache
+type SecondaryCache[T any] struct {
+	bucket *bucket[T]
+	pCache *LayeredCache[T]
 }
 
 // Get the secondary key.
 // The semantics are the same as for LayeredCache.Get
-func (s *SecondaryCache) Get(secondary string) *Item {
+func (s *SecondaryCache[T]) Get(secondary string) *Item[T] {
 	return s.bucket.get(secondary)
 }
 
 // Set the secondary key to a value.
 // The semantics are the same as for LayeredCache.Set
-func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item {
-	item, existing := s.bucket.set(secondary, value, duration)
+func (s *SecondaryCache[T]) Set(secondary string, value T, duration time.Duration) *Item[T] {
+	item, existing := s.bucket.set(secondary, value, duration, false)
 	if existing != nil {
 		s.pCache.deletables <- existing
 	}
@@ -26,7 +26,7 @@ func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.
 
 // Fetch or set a secondary key.
 // The semantics are the same as for LayeredCache.Fetch
-func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
+func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
 	item := s.Get(secondary)
 	if item != nil {
 		return item, nil
@@ -40,7 +40,7 @@ func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch f
 
 // Delete a secondary key.
 // The semantics are the same as for LayeredCache.Delete
-func (s *SecondaryCache) Delete(secondary string) bool {
+func (s *SecondaryCache[T]) Delete(secondary string) bool {
 	item := s.bucket.delete(secondary)
 	if item != nil {
 		s.pCache.deletables <- item
@@ -51,7 +51,7 @@ func (s *SecondaryCache) Delete(secondary string) bool {
 
 // Replace a secondary key.
 // The semantics are the same as for LayeredCache.Replace
-func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
+func (s *SecondaryCache[T]) Replace(secondary string, value T) bool {
 	item := s.Get(secondary)
 	if item == nil {
 		return false
@@ -62,10 +62,10 @@ func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
 
 // Track a secondary key.
 // The semantics are the same as for LayeredCache.TrackingGet
-func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem {
+func (c *SecondaryCache[T]) TrackingGet(secondary string) TrackedItem[T] {
 	item := c.Get(secondary)
 	if item == nil {
-		return NilTracked
+		return nil
 	}
 	item.track()
 	return item
diff --git a/secondarycache_test.go b/secondarycache_test.go
index fa1f891..6753295 100644
--- a/secondarycache_test.go
+++ b/secondarycache_test.go
@@ -1,105 +1,100 @@
 package ccache
 
 import (
-	. "github.com/karlseguin/expect"
 	"strconv"
 	"testing"
 	"time"
-)
-
-type SecondaryCacheTests struct{}
 
-func Test_SecondaryCache(t *testing.T) {
-	Expectify(new(SecondaryCacheTests), t)
-}
+	"github.com/karlseguin/ccache/v3/assert"
+)
 
-func (_ SecondaryCacheTests) GetsANonExistantValue() {
-	cache := newLayered().GetOrCreateSecondaryCache("foo")
-	Expect(cache).Not.To.Equal(nil)
+func Test_SecondaryCache_GetsANonExistantValue(t *testing.T) {
+	cache := newLayered[string]().GetOrCreateSecondaryCache("foo")
+	assert.Equal(t, cache == nil, false)
 }
 
-func (_ SecondaryCacheTests) SetANewValue() {
-	cache := newLayered()
+func Test_SecondaryCache_SetANewValue(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "a value", time.Minute)
 	sCache := cache.GetOrCreateSecondaryCache("spice")
-	Expect(sCache.Get("flow").Value()).To.Equal("a value")
-	Expect(sCache.Get("stop")).To.Equal(nil)
+	assert.Equal(t, sCache.Get("flow").Value(), "a value")
+	assert.Equal(t, sCache.Get("stop"), nil)
 }
 
-func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
-	cache := newLayered()
+func Test_SecondaryCache_ValueCanBeSeenInBothCaches1(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "a value", time.Minute)
 	sCache := cache.GetOrCreateSecondaryCache("spice")
 	sCache.Set("orinoco", "another value", time.Minute)
-	Expect(sCache.Get("orinoco").Value()).To.Equal("another value")
-	Expect(cache.Get("spice", "orinoco").Value()).To.Equal("another value")
+	assert.Equal(t, sCache.Get("orinoco").Value(), "another value")
+	assert.Equal(t, cache.Get("spice", "orinoco").Value(), "another value")
 }
 
-func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
-	cache := newLayered()
+func Test_SecondaryCache_ValueCanBeSeenInBothCaches2(t *testing.T) {
+	cache := newLayered[string]()
 	sCache := cache.GetOrCreateSecondaryCache("spice")
 	sCache.Set("flow", "a value", time.Minute)
-	Expect(sCache.Get("flow").Value()).To.Equal("a value")
-	Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
+	assert.Equal(t, sCache.Get("flow").Value(), "a value")
+	assert.Equal(t, cache.Get("spice", "flow").Value(), "a value")
 }
 
-func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
-	cache := newLayered()
+func Test_SecondaryCache_DeletesAreReflectedInBothCaches(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "a value", time.Minute)
 	cache.Set("spice", "sister", "ghanima", time.Minute)
 	sCache := cache.GetOrCreateSecondaryCache("spice")
 
 	cache.Delete("spice", "flow")
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
-	Expect(sCache.Get("flow")).To.Equal(nil)
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
+	assert.Equal(t, sCache.Get("flow"), nil)
 
 	sCache.Delete("sister")
-	Expect(cache.Get("spice", "sister")).To.Equal(nil)
-	Expect(sCache.Get("sister")).To.Equal(nil)
+	assert.Equal(t, cache.Get("spice", "sister"), nil)
+	assert.Equal(t, sCache.Get("sister"), nil)
 }
 
-func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
-	cache := newLayered()
+func Test_SecondaryCache_ReplaceDoesNothingIfKeyDoesNotExist(t *testing.T) {
+	cache := newLayered[string]()
 	sCache := cache.GetOrCreateSecondaryCache("spice")
-	Expect(sCache.Replace("flow", "value-a")).To.Equal(false)
-	Expect(cache.Get("spice", "flow")).To.Equal(nil)
+	assert.Equal(t, sCache.Replace("flow", "value-a"), false)
+	assert.Equal(t, cache.Get("spice", "flow"), nil)
 }
 
-func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() {
-	cache := newLayered()
+func Test_SecondaryCache_ReplaceUpdatesTheValue(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "value-a", time.Minute)
 	sCache := cache.GetOrCreateSecondaryCache("spice")
-	Expect(sCache.Replace("flow", "value-b")).To.Equal(true)
-	Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
+	assert.Equal(t, sCache.Replace("flow", "value-b"), true)
+	assert.Equal(t, cache.Get("spice", "flow").Value(), "value-b")
 }
 
-func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() {
-	cache := newLayered()
+func Test_SecondaryCache_FetchReturnsAnExistingValue(t *testing.T) {
+	cache := newLayered[string]()
 	cache.Set("spice", "flow", "value-a", time.Minute)
 	sCache := cache.GetOrCreateSecondaryCache("spice")
-	val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
-	Expect(val.Value().(string)).To.Equal("value-a")
+	val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil })
+	assert.Equal(t, val.Value(), "value-a")
 }
 
-func (_ SecondaryCacheTests) FetchReturnsANewValue() {
-	cache := newLayered()
+func Test_SecondaryCache_FetchReturnsANewValue(t *testing.T) {
+	cache := newLayered[string]()
 	sCache := cache.GetOrCreateSecondaryCache("spice")
-	val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
-	Expect(val.Value().(string)).To.Equal("a fetched value")
+	val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil })
+	assert.Equal(t, val.Value(), "a fetched value")
 }
 
-func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() {
-	cache := Layered(Configure().ItemsToPrune(10).Track())
+func Test_SecondaryCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) {
+	cache := Layered(Configure[int]().ItemsToPrune(10).Track())
 	for i := 0; i < 10; i++ {
 		cache.Set(strconv.Itoa(i), "a", i, time.Minute)
 	}
 	sCache := cache.GetOrCreateSecondaryCache("0")
 	item := sCache.TrackingGet("a")
 	time.Sleep(time.Millisecond * 10)
-	gcLayeredCache(cache)
-	Expect(cache.Get("0", "a").Value()).To.Equal(0)
-	Expect(cache.Get("1", "a")).To.Equal(nil)
+	cache.GC()
+	assert.Equal(t, cache.Get("0", "a").Value(), 0)
+	assert.Equal(t, cache.Get("1", "a"), nil)
 	item.Release()
-	gcLayeredCache(cache)
-	Expect(cache.Get("0", "a")).To.Equal(nil)
+	cache.GC()
+	assert.Equal(t, cache.Get("0", "a"), nil)
 }

More details

Full run details

Historical runs