Skip to content

Commit 0b2b9df

Browse files
authored
feat: Distributed storage (#13)
1 parent 72ecfa5 commit 0b2b9df

37 files changed

+1960
-363
lines changed

.golangci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ linters-settings:
207207
gocognit:
208208
# Minimal code complexity to report.
209209
# Default: 30 (but we recommend 10-20)
210-
min-complexity: 60
210+
min-complexity: 70
211211

212212
gocritic:
213213
# Settings passed to gocritic.

README.md

Lines changed: 235 additions & 113 deletions
Large diffs are not rendered by default.

benchmark_test.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,9 @@ func BenchmarkGetConcurrent(b *testing.B) {
5656
numShards := 100
5757
ttl := time.Hour
5858
evictionPercentage := 5
59-
c := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage)
59+
c := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
60+
sturdyc.WithNoContinuousEvictions(),
61+
)
6062
c.Set(cacheKey, "value")
6163

6264
metrics := make(benchmarkMetrics[string], 0)
@@ -77,7 +79,9 @@ func BenchmarkSetConcurrent(b *testing.B) {
7779
numShards := 10_000
7880
ttl := time.Hour
7981
evictionPercentage := 5
80-
c := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage)
82+
c := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
83+
sturdyc.WithNoContinuousEvictions(),
84+
)
8185

8286
metrics := make(benchmarkMetrics[string], 0)
8387
b.ResetTimer()

buffer_test.go

Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,10 @@ func TestBatchIsRefreshedWhenTheTimeoutExpires(t *testing.T) {
3333
// 1. The number of scheduled refreshes exceeds the specified 'batchSize'.
3434
// 2. The 'batchBufferTimeout' threshold is exceeded.
3535
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
36-
sturdyc.WithBackgroundRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
36+
sturdyc.WithNoContinuousEvictions(),
37+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
3738
sturdyc.WithMissingRecordStorage(),
38-
sturdyc.WithRefreshBuffering(batchSize, batchBufferTimeout),
39+
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
3940
sturdyc.WithClock(clock),
4041
)
4142

@@ -47,7 +48,7 @@ func TestBatchIsRefreshedWhenTheTimeoutExpires(t *testing.T) {
4748

4849
fetchObserver := NewFetchObserver(1)
4950
fetchObserver.BatchResponse(ids)
50-
sturdyc.GetFetchBatch(ctx, client, ids, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
51+
sturdyc.GetOrFetchBatch(ctx, client, ids, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
5152

5253
<-fetchObserver.FetchCompleted
5354
fetchObserver.AssertFetchCount(t, 1)
@@ -59,11 +60,11 @@ func TestBatchIsRefreshedWhenTheTimeoutExpires(t *testing.T) {
5960
clock.Add(maxRefreshDelay + time.Second)
6061

6162
// We'll create a batch function that stores the ids it was called with, and
62-
// then invoke "GetFetchBatch". We are going to request 3 ids, which is less
63+
// then invoke "GetOrFetchBatch". We are going to request 3 ids, which is less
6364
// than our wanted batch size. This should lead to a batch being scheduled.
6465
recordsToRequest := []string{"1", "2", "3"}
6566
fetchObserver.BatchResponse(recordsToRequest)
66-
sturdyc.GetFetchBatch(ctx, client, recordsToRequest, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
67+
sturdyc.GetOrFetchBatch(ctx, client, recordsToRequest, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
6768
time.Sleep(10 * time.Millisecond)
6869
fetchObserver.AssertFetchCount(t, 1)
6970

@@ -98,9 +99,10 @@ func TestBatchIsRefreshedWhenTheBufferSizeIsReached(t *testing.T) {
9899
// 1. The number of scheduled refreshes exceeds the specified 'batchSize'.
99100
// 2. The 'batchBufferTimeout' threshold is exceeded.
100101
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
101-
sturdyc.WithBackgroundRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
102+
sturdyc.WithNoContinuousEvictions(),
103+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
102104
sturdyc.WithMissingRecordStorage(),
103-
sturdyc.WithRefreshBuffering(batchSize, batchBufferTimeout),
105+
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
104106
sturdyc.WithClock(clock),
105107
)
106108

@@ -111,7 +113,7 @@ func TestBatchIsRefreshedWhenTheBufferSizeIsReached(t *testing.T) {
111113

112114
fetchObserver := NewFetchObserver(1)
113115
fetchObserver.BatchResponse(ids)
114-
sturdyc.GetFetchBatch(ctx, client, ids, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
116+
sturdyc.GetOrFetchBatch(ctx, client, ids, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
115117

116118
<-fetchObserver.FetchCompleted
117119
fetchObserver.AssertFetchCount(t, 1)
@@ -123,17 +125,17 @@ func TestBatchIsRefreshedWhenTheBufferSizeIsReached(t *testing.T) {
123125
clock.Add(maxRefreshDelay + time.Second)
124126

125127
// We'll create a batch function that stores the ids it was called with, and
126-
// then invoke "GetFetchBatch". We are going to request 3 ids, which is less
128+
// then invoke "GetOrFetchBatch". We are going to request 3 ids, which is less
127129
// than our ideal batch size. This should lead to a batch being scheduled.
128130
firstBatchOfRequestedRecords := []string{"1", "2", "3"}
129131
fetchObserver.BatchResponse([]string{"1", "2", "3"})
130-
sturdyc.GetFetchBatch(ctx, client, firstBatchOfRequestedRecords, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
132+
sturdyc.GetOrFetchBatch(ctx, client, firstBatchOfRequestedRecords, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
131133

132134
// Now, we'll move the clock forward 5 seconds before requesting another 3 records.
133135
// Our wanted batch size is 10, hence this should NOT be enough to trigger a refresh.
134136
clock.Add(5 * time.Second)
135137
secondBatchOfRecords := []string{"4", "5", "6"}
136-
sturdyc.GetFetchBatch(ctx, client, secondBatchOfRecords, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
138+
sturdyc.GetOrFetchBatch(ctx, client, secondBatchOfRecords, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
137139

138140
// Move the clock another 10 seconds. Again, this should not trigger a refresh. We'll
139141
// perform a sleep here too just to ensure that the buffer is not refreshed prematurely.
@@ -144,7 +146,7 @@ func TestBatchIsRefreshedWhenTheBufferSizeIsReached(t *testing.T) {
144146
// In the the third batch I'm going to request 6 records. With that, we've
145147
// requested 12 record in total, which is greater than our buffer size of 10.
146148
thirdBatchOfRecords := []string{"7", "8", "9", "10", "11", "12"}
147-
sturdyc.GetFetchBatch(ctx, client, thirdBatchOfRecords, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
149+
sturdyc.GetOrFetchBatch(ctx, client, thirdBatchOfRecords, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
148150

149151
// An actual refresh should happen for the first 10 ids, while the 2 that
150152
// overflows should get scheduled for a refresh. Block until the request has
@@ -191,9 +193,10 @@ func TestBatchIsNotRefreshedByDuplicates(t *testing.T) {
191193
// 1. The number of scheduled refreshes exceeds the specified 'batchSize'.
192194
// 2. The 'batchBufferTimeout' threshold is exceeded.
193195
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
194-
sturdyc.WithBackgroundRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
196+
sturdyc.WithNoContinuousEvictions(),
197+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
195198
sturdyc.WithMissingRecordStorage(),
196-
sturdyc.WithRefreshBuffering(batchSize, batchBufferTimeout),
199+
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
197200
sturdyc.WithClock(clock),
198201
)
199202

@@ -205,7 +208,7 @@ func TestBatchIsNotRefreshedByDuplicates(t *testing.T) {
205208

206209
fetchObserver := NewFetchObserver(1)
207210
fetchObserver.BatchResponse(ids)
208-
sturdyc.GetFetchBatch(ctx, client, ids, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
211+
sturdyc.GetOrFetchBatch(ctx, client, ids, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
209212
<-fetchObserver.FetchCompleted
210213
fetchObserver.AssertFetchCount(t, 1)
211214
fetchObserver.AssertRequestedRecords(t, ids)
@@ -221,7 +224,7 @@ func TestBatchIsNotRefreshedByDuplicates(t *testing.T) {
221224
for i := 0; i < numRequests; i++ {
222225
go func(i int) {
223226
id := []string{strconv.Itoa((i % 3) + 1)}
224-
sturdyc.GetFetchBatch(ctx, client, id, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
227+
sturdyc.GetOrFetchBatch(ctx, client, id, client.BatchKeyFn("item"), fetchObserver.FetchBatch)
225228
wg.Done()
226229
}(i)
227230
}
@@ -260,9 +263,10 @@ func TestBatchesAreGroupedByPermutations(t *testing.T) {
260263
// 1. The number of scheduled refreshes exceeds the specified 'batchSize'.
261264
// 2. The 'batchBufferTimeout' threshold is exceeded.
262265
c := sturdyc.New[any](capacity, numShards, ttl, evictionPercentage,
263-
sturdyc.WithBackgroundRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
266+
sturdyc.WithNoContinuousEvictions(),
267+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
264268
sturdyc.WithMissingRecordStorage(),
265-
sturdyc.WithRefreshBuffering(batchSize, batchBufferTimeout),
269+
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
266270
sturdyc.WithClock(clock),
267271
)
268272

@@ -283,9 +287,9 @@ func TestBatchesAreGroupedByPermutations(t *testing.T) {
283287

284288
fetchObserver := NewFetchObserver(1)
285289
fetchObserver.BatchResponse(seedIDs)
286-
sturdyc.GetFetchBatch(ctx, c, seedIDs, c.PermutatedBatchKeyFn(prefix, optsOne), fetchObserver.FetchBatch)
290+
sturdyc.GetOrFetchBatch(ctx, c, seedIDs, c.PermutatedBatchKeyFn(prefix, optsOne), fetchObserver.FetchBatch)
287291
<-fetchObserver.FetchCompleted
288-
sturdyc.GetFetchBatch(ctx, c, seedIDs, c.PermutatedBatchKeyFn(prefix, optsTwo), fetchObserver.FetchBatch)
292+
sturdyc.GetOrFetchBatch(ctx, c, seedIDs, c.PermutatedBatchKeyFn(prefix, optsTwo), fetchObserver.FetchBatch)
289293
<-fetchObserver.FetchCompleted
290294
fetchObserver.AssertFetchCount(t, 2)
291295
fetchObserver.Clear()
@@ -304,12 +308,12 @@ func TestBatchesAreGroupedByPermutations(t *testing.T) {
304308
optsTwoBatch2 := []string{"6", "7", "8"}
305309

306310
// Request the first batch of records. This should wait for additional IDs.
307-
sturdyc.GetFetchBatch(ctx, c, optsOneIDs, c.PermutatedBatchKeyFn(prefix, optsOne), fetchObserver.FetchBatch)
311+
sturdyc.GetOrFetchBatch(ctx, c, optsOneIDs, c.PermutatedBatchKeyFn(prefix, optsOne), fetchObserver.FetchBatch)
308312

309313
// Next, we're requesting ids 4-8 with the second options which should exceed the buffer size for that permutation.
310314
fetchObserver.BatchResponse([]string{"4", "5", "6", "7", "8"})
311-
sturdyc.GetFetchBatch(ctx, c, optsTwoBatch1, c.PermutatedBatchKeyFn(prefix, optsTwo), fetchObserver.FetchBatch)
312-
sturdyc.GetFetchBatch(ctx, c, optsTwoBatch2, c.PermutatedBatchKeyFn(prefix, optsTwo), fetchObserver.FetchBatch)
315+
sturdyc.GetOrFetchBatch(ctx, c, optsTwoBatch1, c.PermutatedBatchKeyFn(prefix, optsTwo), fetchObserver.FetchBatch)
316+
sturdyc.GetOrFetchBatch(ctx, c, optsTwoBatch2, c.PermutatedBatchKeyFn(prefix, optsTwo), fetchObserver.FetchBatch)
313317

314318
<-fetchObserver.FetchCompleted
315319
fetchObserver.AssertFetchCount(t, 3)
@@ -348,9 +352,10 @@ func TestLargeBatchesAreChunkedCorrectly(t *testing.T) {
348352
// 1. The number of scheduled refreshes exceeds the specified 'batchSize'.
349353
// 2. The 'batchBufferTimeout' threshold is exceeded.
350354
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
351-
sturdyc.WithBackgroundRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
355+
sturdyc.WithNoContinuousEvictions(),
356+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
352357
sturdyc.WithMissingRecordStorage(),
353-
sturdyc.WithRefreshBuffering(batchSize, batchBufferTimeout),
358+
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
354359
sturdyc.WithClock(clock),
355360
)
356361

@@ -363,7 +368,7 @@ func TestLargeBatchesAreChunkedCorrectly(t *testing.T) {
363368

364369
fetchObserver := NewFetchObserver(5)
365370
fetchObserver.BatchResponse(seedIDs)
366-
sturdyc.GetFetchBatch(ctx, client, seedIDs, client.BatchKeyFn(cacheKeyPrefix), fetchObserver.FetchBatch)
371+
sturdyc.GetOrFetchBatch(ctx, client, seedIDs, client.BatchKeyFn(cacheKeyPrefix), fetchObserver.FetchBatch)
367372
<-fetchObserver.FetchCompleted
368373
fetchObserver.AssertFetchCount(t, 1)
369374
fetchObserver.AssertRequestedRecords(t, seedIDs)
@@ -379,7 +384,7 @@ func TestLargeBatchesAreChunkedCorrectly(t *testing.T) {
379384
for i := 1; i <= 50; i++ {
380385
largeBatch = append(largeBatch, strconv.Itoa(i))
381386
}
382-
sturdyc.GetFetchBatch(ctx, client, largeBatch, client.BatchKeyFn(cacheKeyPrefix), fetchObserver.FetchBatch)
387+
sturdyc.GetOrFetchBatch(ctx, client, largeBatch, client.BatchKeyFn(cacheKeyPrefix), fetchObserver.FetchBatch)
383388
for i := 0; i < 10; i++ {
384389
<-fetchObserver.FetchCompleted
385390
}
@@ -409,9 +414,10 @@ func TestValuesAreUpdatedCorrectly(t *testing.T) {
409414
// 1. The number of scheduled refreshes exceeds the specified 'batchSize'.
410415
// 2. The 'batchBufferTimeout' threshold is exceeded.
411416
client := sturdyc.New[any](capacity, numShards, ttl, evictionPercentage,
412-
sturdyc.WithBackgroundRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
417+
sturdyc.WithNoContinuousEvictions(),
418+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
413419
sturdyc.WithMissingRecordStorage(),
414-
sturdyc.WithRefreshBuffering(batchSize, batchBufferTimeout),
420+
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
415421
sturdyc.WithClock(clock),
416422
)
417423

@@ -420,7 +426,7 @@ func TestValuesAreUpdatedCorrectly(t *testing.T) {
420426
}
421427

422428
records := []string{"1", "2", "3"}
423-
res, _ := sturdyc.GetFetchBatch[Foo](ctx, client, records, client.BatchKeyFn("item"), func(_ context.Context, ids []string) (map[string]Foo, error) {
429+
res, _ := sturdyc.GetOrFetchBatch[Foo](ctx, client, records, client.BatchKeyFn("item"), func(_ context.Context, ids []string) (map[string]Foo, error) {
424430
values := make(map[string]Foo, len(ids))
425431
for _, id := range ids {
426432
values[id] = Foo{Value: "foo-" + id}
@@ -433,7 +439,7 @@ func TestValuesAreUpdatedCorrectly(t *testing.T) {
433439
}
434440

435441
clock.Add(time.Minute * 45)
436-
sturdyc.GetFetchBatch[Foo](ctx, client, records, client.BatchKeyFn("item"), func(_ context.Context, ids []string) (map[string]Foo, error) {
442+
sturdyc.GetOrFetchBatch[Foo](ctx, client, records, client.BatchKeyFn("item"), func(_ context.Context, ids []string) (map[string]Foo, error) {
437443
values := make(map[string]Foo, len(ids))
438444
for _, id := range ids {
439445
values[id] = Foo{Value: "foo2-" + id}
@@ -450,7 +456,7 @@ func TestValuesAreUpdatedCorrectly(t *testing.T) {
450456
clock.Add(time.Minute * 5)
451457
time.Sleep(50 * time.Millisecond)
452458

453-
resTwo, _ := sturdyc.GetFetchBatch[Foo](ctx, client, records, client.BatchKeyFn("item"), func(_ context.Context, ids []string) (map[string]Foo, error) {
459+
resTwo, _ := sturdyc.GetOrFetchBatch[Foo](ctx, client, records, client.BatchKeyFn("item"), func(_ context.Context, ids []string) (map[string]Foo, error) {
454460
values := make(map[string]Foo, len(ids))
455461
for _, id := range ids {
456462
values[id] = Foo{Value: "foo3-" + id}

cache.go

Lines changed: 17 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -9,17 +9,6 @@ import (
99
"github.com/cespare/xxhash"
1010
)
1111

12-
type MetricsRecorder interface {
13-
CacheHit()
14-
CacheMiss()
15-
Eviction()
16-
ForcedEviction()
17-
EntriesEvicted(int)
18-
ShardIndex(int)
19-
CacheBatchRefreshSize(size int)
20-
ObserveCacheSize(callback func() int)
21-
}
22-
2312
// FetchFn Fetch represents a function that can be used to fetch a single record from a data source.
2413
type FetchFn[T any] func(ctx context.Context) (T, error)
2514

@@ -34,10 +23,11 @@ type KeyFn func(id string) string
3423

3524
// Config represents the configuration that can be applied to the cache.
3625
type Config struct {
37-
clock Clock
38-
evictionInterval time.Duration
39-
metricsRecorder MetricsRecorder
40-
log Logger
26+
clock Clock
27+
evictionInterval time.Duration
28+
disableContinuousEvictions bool
29+
metricsRecorder DistributedMetricsRecorder
30+
log Logger
4131

4232
refreshInBackground bool
4333
minRefreshTime time.Duration
@@ -54,6 +44,10 @@ type Config struct {
5444
useRelativeTimeKeyFormat bool
5545
keyTruncation time.Duration
5646
getSize func() int
47+
48+
distributedStorage DistributedStorageWithDeletions
49+
distributedEarlyRefreshes bool
50+
distributedRefreshAfterDuration time.Duration
5751
}
5852

5953
// Client represents a cache client that can be used to store and retrieve values.
@@ -103,20 +97,19 @@ func New[T any](capacity, numShards int, ttl time.Duration, evictionPercentage i
10397
client.nextShard = 0
10498

10599
// Run evictions on the shards in a separate goroutine.
106-
client.startEvictions()
100+
if !cfg.disableContinuousEvictions {
101+
client.performContinuousEvictions()
102+
}
107103

108104
return client
109105
}
110106

111-
// startEvictions is going to be running in a separate goroutine that we're going to prevent from ever exiting.
112-
func (c *Client[T]) startEvictions() {
107+
// performContinuousEvictions is going to be running in a separate goroutine that we're going to prevent from ever exiting.
108+
func (c *Client[T]) performContinuousEvictions() {
113109
go func() {
114110
ticker, stop := c.clock.NewTicker(c.evictionInterval)
115111
defer stop()
116112
for range ticker {
117-
if c.metricsRecorder != nil {
118-
c.metricsRecorder.Eviction()
119-
}
120113
c.shards[c.nextShard].evictExpired()
121114
c.nextShard = (c.nextShard + 1) % len(c.shards)
122115
}
@@ -127,24 +120,10 @@ func (c *Client[T]) startEvictions() {
127120
func (c *Client[T]) getShard(key string) *shard[T] {
128121
hash := xxhash.Sum64String(key)
129122
shardIndex := hash % uint64(len(c.shards))
130-
if c.metricsRecorder != nil {
131-
c.metricsRecorder.ShardIndex(int(shardIndex))
132-
}
123+
c.reportShardIndex(int(shardIndex))
133124
return c.shards[shardIndex]
134125
}
135126

136-
// reportCacheHits is used to report cache hits and misses to the metrics recorder.
137-
func (c *Client[T]) reportCacheHits(cacheHit bool) {
138-
if c.metricsRecorder == nil {
139-
return
140-
}
141-
if !cacheHit {
142-
c.metricsRecorder.CacheMiss()
143-
return
144-
}
145-
c.metricsRecorder.CacheHit()
146-
}
147-
148127
func (c *Client[T]) get(key string) (value T, exists, ignore, refresh bool) {
149128
shard := c.getShard(key)
150129
val, exists, ignore, refresh := shard.get(key)
@@ -171,7 +150,7 @@ func (c *Client[T]) GetMany(keys []string) map[string]T {
171150
return records
172151
}
173152

174-
// GetManyKeyFn follows the same API as GetFetchBatch and PassthroughBatch.
153+
// GetManyKeyFn follows the same API as GetOrFetchBatch and PassthroughBatch.
175154
// You provide it with a slice of IDs and a keyFn, which is applied to create
176155
// the cache key. The returned map uses the IDs as keys instead of the cache key.
177156
// If you've used ScanKeys to retrieve the actual keys, you can retrieve the records
@@ -211,7 +190,7 @@ func (c *Client[T]) SetMany(records map[string]T) bool {
211190
return triggeredEviction
212191
}
213192

214-
// SetManyKeyFn follows the same API as GetFetchBatch and PassThroughBatch. It
193+
// SetManyKeyFn follows the same API as GetOrFetchBatch and PassThroughBatch. It
215194
// takes a map of records where the keyFn is applied to each key in the map
216195
// before it's stored in the cache.
217196
func (c *Client[T]) SetManyKeyFn(records map[string]T, cacheKeyFn KeyFn) bool {

cache_test.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,10 @@ func TestShardDistribution(t *testing.T) {
4141
t.Run(tc.name, func(t *testing.T) {
4242
t.Parallel()
4343
recorder := newTestMetricsRecorder(tc.numShards)
44-
c := sturdyc.New[string](tc.capacity, tc.numShards, time.Hour, 5, sturdyc.WithMetrics(recorder))
44+
c := sturdyc.New[string](tc.capacity, tc.numShards, time.Hour, 5,
45+
sturdyc.WithNoContinuousEvictions(),
46+
sturdyc.WithMetrics(recorder),
47+
)
4548
for i := 0; i < tc.capacity; i++ {
4649
key := randKey(tc.keyLength)
4750
c.Set(key, "value")
@@ -144,6 +147,7 @@ func TestForcedEvictions(t *testing.T) {
144147
time.Hour,
145148
tc.evictionPercentage,
146149
sturdyc.WithMetrics(recorder),
150+
sturdyc.WithNoContinuousEvictions(),
147151
)
148152

149153
// Start by filling the sturdyc.

0 commit comments

Comments
 (0)