Skip to content

Commit d368220

Browse files
authored
Synchronous early refreshes (WIP) (#25)
1 parent 35c6d56 commit d368220

26 files changed

+1944
-531
lines changed

README.md

Lines changed: 765 additions & 380 deletions
Large diffs are not rendered by default.

buffer_test.go

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ func TestBatchIsRefreshedWhenTheTimeoutExpires(t *testing.T) {
2020
evictionPercentage := 10
2121
minRefreshDelay := time.Minute * 5
2222
maxRefreshDelay := time.Minute * 10
23+
synchronousRefreshDelay := time.Minute * 30
2324
refreshRetryInterval := time.Millisecond * 10
2425
batchSize := 10
2526
batchBufferTimeout := time.Minute
@@ -34,7 +35,7 @@ func TestBatchIsRefreshedWhenTheTimeoutExpires(t *testing.T) {
3435
// 2. The 'batchBufferTimeout' threshold is exceeded.
3536
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
3637
sturdyc.WithNoContinuousEvictions(),
37-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
38+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
3839
sturdyc.WithMissingRecordStorage(),
3940
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
4041
sturdyc.WithClock(clock),
@@ -86,6 +87,7 @@ func TestBatchIsRefreshedWhenTheBufferSizeIsReached(t *testing.T) {
8687
ttl := time.Hour
8788
minRefreshDelay := time.Minute * 5
8889
maxRefreshDelay := time.Minute * 10
90+
synchronousRefreshDelay := time.Minute * 30
8991
refreshRetryInterval := time.Millisecond * 10
9092
batchSize := 10
9193
batchBufferTimeout := time.Minute
@@ -100,7 +102,7 @@ func TestBatchIsRefreshedWhenTheBufferSizeIsReached(t *testing.T) {
100102
// 2. The 'batchBufferTimeout' threshold is exceeded.
101103
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
102104
sturdyc.WithNoContinuousEvictions(),
103-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
105+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
104106
sturdyc.WithMissingRecordStorage(),
105107
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
106108
sturdyc.WithClock(clock),
@@ -180,6 +182,7 @@ func TestBatchIsNotRefreshedByDuplicates(t *testing.T) {
180182
evictionPercentage := 10
181183
minRefreshDelay := time.Minute * 5
182184
maxRefreshDelay := time.Minute * 10
185+
synchronousRefreshDelay := time.Minute * 30
183186
refreshRetryInterval := time.Millisecond * 10
184187
batchSize := 10
185188
batchBufferTimeout := time.Minute
@@ -194,7 +197,7 @@ func TestBatchIsNotRefreshedByDuplicates(t *testing.T) {
194197
// 2. The 'batchBufferTimeout' threshold is exceeded.
195198
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
196199
sturdyc.WithNoContinuousEvictions(),
197-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
200+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
198201
sturdyc.WithMissingRecordStorage(),
199202
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
200203
sturdyc.WithClock(clock),
@@ -250,6 +253,7 @@ func TestBatchesAreGroupedByPermutations(t *testing.T) {
250253
evictionPercentage := 15
251254
minRefreshDelay := time.Minute * 5
252255
maxRefreshDelay := time.Minute * 10
256+
synchronousRefreshDelay := time.Minute * 30
253257
refreshRetryInterval := time.Millisecond * 10
254258
batchSize := 5
255259
batchBufferTimeout := time.Minute
@@ -264,7 +268,7 @@ func TestBatchesAreGroupedByPermutations(t *testing.T) {
264268
// 2. The 'batchBufferTimeout' threshold is exceeded.
265269
c := sturdyc.New[any](capacity, numShards, ttl, evictionPercentage,
266270
sturdyc.WithNoContinuousEvictions(),
267-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
271+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
268272
sturdyc.WithMissingRecordStorage(),
269273
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
270274
sturdyc.WithClock(clock),
@@ -339,6 +343,7 @@ func TestLargeBatchesAreChunkedCorrectly(t *testing.T) {
339343
evictionPercentage := 23
340344
minRefreshDelay := time.Minute * 5
341345
maxRefreshDelay := time.Minute * 10
346+
synchronousRefreshDelay := time.Minute * 30
342347
refreshRetryInterval := time.Millisecond * 10
343348
batchSize := 5
344349
batchBufferTimeout := time.Minute
@@ -353,7 +358,7 @@ func TestLargeBatchesAreChunkedCorrectly(t *testing.T) {
353358
// 2. The 'batchBufferTimeout' threshold is exceeded.
354359
client := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
355360
sturdyc.WithNoContinuousEvictions(),
356-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
361+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
357362
sturdyc.WithMissingRecordStorage(),
358363
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
359364
sturdyc.WithClock(clock),
@@ -401,6 +406,7 @@ func TestValuesAreUpdatedCorrectly(t *testing.T) {
401406
evictionPercentage := 10
402407
minRefreshDelay := time.Minute * 5
403408
maxRefreshDelay := time.Minute * 10
409+
synchronousRefreshDelay := time.Minute * 50
404410
refreshRetryInterval := time.Millisecond * 10
405411
batchSize := 10
406412
batchBufferTimeout := time.Minute
@@ -415,7 +421,7 @@ func TestValuesAreUpdatedCorrectly(t *testing.T) {
415421
// 2. The 'batchBufferTimeout' threshold is exceeded.
416422
client := sturdyc.New[any](capacity, numShards, ttl, evictionPercentage,
417423
sturdyc.WithNoContinuousEvictions(),
418-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
424+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
419425
sturdyc.WithMissingRecordStorage(),
420426
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
421427
sturdyc.WithClock(clock),

cache.go

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,10 @@ type Config struct {
2929
metricsRecorder DistributedMetricsRecorder
3030
log Logger
3131

32-
refreshInBackground bool
33-
minRefreshTime time.Duration
34-
maxRefreshTime time.Duration
32+
earlyRefreshes bool
33+
minAsyncRefreshTime time.Duration
34+
maxAsyncRefreshTime time.Duration
35+
syncRefreshTime time.Duration
3536
retryBaseDelay time.Duration
3637
storeMissingRecords bool
3738

@@ -127,11 +128,11 @@ func (c *Client[T]) getShard(key string) *shard[T] {
127128
// getWithState retrieves a single value from the cache and returns additional
128129
// information about the state of the record. The state includes whether the record
129130
// exists, if it has been marked as missing, and if it is due for a refresh.
130-
func (c *Client[T]) getWithState(key string) (value T, exists, markedAsMissing, refresh bool) {
131+
func (c *Client[T]) getWithState(key string) (value T, exists, markedAsMissing, backgroundRefresh, synchronousRefresh bool) {
131132
shard := c.getShard(key)
132-
val, exists, markedAsMissing, refresh := shard.get(key)
133-
c.reportCacheHits(exists, markedAsMissing, refresh)
134-
return val, exists, markedAsMissing, refresh
133+
val, exists, markedAsMissing, backgroundRefresh, synchronousRefresh := shard.get(key)
134+
c.reportCacheHits(exists, markedAsMissing, backgroundRefresh, synchronousRefresh)
135+
return val, exists, markedAsMissing, backgroundRefresh, synchronousRefresh
135136
}
136137

137138
// Get retrieves a single value from the cache.
@@ -145,8 +146,8 @@ func (c *Client[T]) getWithState(key string) (value T, exists, markedAsMissing,
145146
// The value corresponding to the key and a boolean indicating if the value was found.
146147
func (c *Client[T]) Get(key string) (T, bool) {
147148
shard := c.getShard(key)
148-
val, ok, markedAsMissing, refresh := shard.get(key)
149-
c.reportCacheHits(ok, markedAsMissing, refresh)
149+
val, ok, markedAsMissing, backgroundRefresh, synchronousRefresh := shard.get(key)
150+
c.reportCacheHits(ok, markedAsMissing, backgroundRefresh, synchronousRefresh)
150151
return val, ok && !markedAsMissing
151152
}
152153

distribution_test.go

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ func TestDistributedStorage(t *testing.T) {
156156
fetchObserver.AssertFetchCount(t, 1)
157157
fetchObserver.Clear()
158158

159-
// The keys are written asynchonously to the distributed storage.
159+
// The keys are written asynchronously to the distributed storage.
160160
time.Sleep(100 * time.Millisecond)
161161
distributedStorage.assertRecord(t, key)
162162
distributedStorage.assertGetCount(t, 1)
@@ -177,7 +177,7 @@ func TestDistributedStorage(t *testing.T) {
177177
t.Errorf("expected valuekey1, got %s", res)
178178
}
179179

180-
// The keys are written asynchonously to the distributed storage.
180+
// The keys are written asynchronously to the distributed storage.
181181
time.Sleep(100 * time.Millisecond)
182182
fetchObserver.AssertFetchCount(t, 1)
183183
distributedStorage.assertGetCount(t, 2)
@@ -411,7 +411,7 @@ func TestDistributedStorageBatch(t *testing.T) {
411411
fetchObserver.AssertFetchCount(t, 1)
412412
fetchObserver.Clear()
413413

414-
// The keys are written asynchonously to the distributed storage.
414+
// The keys are written asynchronously to the distributed storage.
415415
time.Sleep(100 * time.Millisecond)
416416
distributedStorage.assertRecords(t, firstBatchOfIDs, keyFn)
417417
distributedStorage.assertGetCount(t, 1)
@@ -444,7 +444,7 @@ func TestDistributedStorageBatch(t *testing.T) {
444444
fetchObserver.AssertRequestedRecords(t, []string{"4", "5", "6"})
445445
fetchObserver.AssertFetchCount(t, 2)
446446

447-
// The keys are written asynchonously to the distributed storage.
447+
// The keys are written asynchronously to the distributed storage.
448448
time.Sleep(100 * time.Millisecond)
449449
distributedStorage.assertRecords(t, secondBatchOfIDs, keyFn)
450450
distributedStorage.assertGetCount(t, 2)
@@ -480,7 +480,7 @@ func TestDistributedStaleStorageBatch(t *testing.T) {
480480
fetchObserver.AssertFetchCount(t, 1)
481481
fetchObserver.Clear()
482482

483-
// The keys are written asynchonously to the distributed storage.
483+
// The keys are written asynchronously to the distributed storage.
484484
time.Sleep(100 * time.Millisecond)
485485
distributedStorage.assertRecords(t, firstBatchOfIDs, keyFn)
486486
distributedStorage.assertGetCount(t, 1)
@@ -546,7 +546,7 @@ func TestDistributedStorageBatchDeletes(t *testing.T) {
546546
fetchObserver.AssertFetchCount(t, 1)
547547
fetchObserver.Clear()
548548

549-
// The keys are written asynchonously to the distributed storage.
549+
// The keys are written asynchronously to the distributed storage.
550550
time.Sleep(100 * time.Millisecond)
551551
distributedStorage.assertRecords(t, batchOfIDs, keyFn)
552552
distributedStorage.assertGetCount(t, 1)
@@ -578,7 +578,7 @@ func TestDistributedStorageBatchDeletes(t *testing.T) {
578578
fetchObserver.AssertRequestedRecords(t, batchOfIDs)
579579
fetchObserver.AssertFetchCount(t, 2)
580580

581-
// The keys are written asynchonously to the distributed storage.
581+
// The keys are written asynchronously to the distributed storage.
582582
time.Sleep(100 * time.Millisecond)
583583
distributedStorage.assertRecords(t, []string{"1", "2"}, keyFn)
584584
distributedStorage.assertGetCount(t, 2)
@@ -615,7 +615,7 @@ func TestDistributedStorageBatchConvertsToMissingRecord(t *testing.T) {
615615
fetchObserver.AssertFetchCount(t, 1)
616616
fetchObserver.Clear()
617617

618-
// The keys are written asynchonously to the distributed storage.
618+
// The keys are written asynchronously to the distributed storage.
619619
time.Sleep(100 * time.Millisecond)
620620
distributedStorage.assertRecords(t, batchOfIDs, keyFn)
621621
distributedStorage.assertGetCount(t, 1)
@@ -648,7 +648,7 @@ func TestDistributedStorageBatchConvertsToMissingRecord(t *testing.T) {
648648
fetchObserver.AssertFetchCount(t, 2)
649649
fetchObserver.Clear()
650650

651-
// The keys are written asynchonously to the distributed storage.
651+
// The keys are written asynchronously to the distributed storage.
652652
time.Sleep(100 * time.Millisecond)
653653
distributedStorage.assertRecords(t, []string{"1", "2"}, keyFn)
654654
distributedStorage.assertGetCount(t, 2)
@@ -675,7 +675,8 @@ func TestDistributedStorageBatchConvertsToMissingRecord(t *testing.T) {
675675
fetchObserver.AssertRequestedRecords(t, batchOfIDs)
676676
fetchObserver.AssertFetchCount(t, 3)
677677

678-
// The keys are written asynchonously to the distributed storage.
678+
// The keys are written asynchronously to the distributed storage.
679+
time.Sleep(100 * time.Millisecond)
679680
time.Sleep(100 * time.Millisecond)
680681
distributedStorage.assertRecords(t, batchOfIDs, keyFn)
681682
distributedStorage.assertGetCount(t, 3)
@@ -727,7 +728,7 @@ func TestDistributedStorageDoesNotCachePartialResponseAsMissingRecords(t *testin
727728
fetchObserver.AssertFetchCount(t, 1)
728729
fetchObserver.Clear()
729730

730-
// The keys are written asynchonously to the distributed storage.
731+
// The keys are written asynchronously to the distributed storage.
731732
time.Sleep(100 * time.Millisecond)
732733
distributedStorage.assertRecords(t, batchOfIDs, keyFn)
733734
distributedStorage.assertGetCount(t, 1)
@@ -779,6 +780,7 @@ func TestPartialResponseForRefreshesDoesNotResultInMissingRecords(t *testing.T)
779780
ttl := time.Hour
780781
minRefreshDelay := time.Minute * 5
781782
maxRefreshDelay := time.Minute * 10
783+
synchronousRefreshDelay := time.Minute * 30
782784
refreshRetryInterval := time.Millisecond * 10
783785
batchSize := 10
784786
batchBufferTimeout := time.Minute
@@ -788,7 +790,7 @@ func TestPartialResponseForRefreshesDoesNotResultInMissingRecords(t *testing.T)
788790

789791
c := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
790792
sturdyc.WithNoContinuousEvictions(),
791-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, refreshRetryInterval),
793+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, refreshRetryInterval),
792794
sturdyc.WithMissingRecordStorage(),
793795
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
794796
sturdyc.WithDistributedStorageEarlyRefreshes(distributedStorage, refreshAfter),
@@ -816,7 +818,7 @@ func TestPartialResponseForRefreshesDoesNotResultInMissingRecords(t *testing.T)
816818
fetchObserver.AssertRequestedRecords(t, ids)
817819
fetchObserver.Clear()
818820

819-
// We need to add a sleep because the keys are written asynchonously to the
821+
// We need to add a sleep because the keys are written asynchronously to the
820822
// distributed storage. We expect that the distributed storage was queried
821823
// for the ids before we went to the underlying data source, and then written
822824
// to when it resulted in a cache miss and the data was in fact fetched.

errors.go

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,14 @@ var (
2020
// ErrMissingRecord is returned by client.GetOrFetch and client.Passthrough when a record has been marked
2121
// as missing. The cache will still try to refresh the record in the background if it's being requested.
2222
ErrMissingRecord = errors.New("sturdyc: the record has been marked as missing in the cache")
23-
// ErrOnlyCachedRecords is returned by client.GetOrFetchBatch and client.PassthroughBatch
24-
// when some of the requested records are available in the cache, but the attempt to
25-
// fetch the remaining records failed. As the consumer, you can then decide whether to
26-
// proceed with the cached records or if the entire batch is necessary.
23+
// ErrOnlyCachedRecords is returned by client.GetOrFetchBatch and
24+
// client.PassthroughBatch when some of the requested records are available
25+
// in the cache, but the attempt to fetch the remaining records failed. It
26+
// may also be returned when you're using the WithEarlyRefreshes
27+
// functionality, and the call to synchronously refresh a record failed. The
28+
// cache will then give you the latest data it has cached, and you as the
29+
// consumer can then decide whether to proceed with the cached records or if
30+
// the newest data is necessary.
2731
ErrOnlyCachedRecords = errors.New("sturdyc: failed to fetch the records that were not in the cache")
2832
// ErrInvalidType is returned when you try to use one of the generic
2933
// package level functions but the type assertion fails.

examples/basic/main.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,10 @@ func demonstrateGetOrFetchBatch(cacheClient *sturdyc.Client[int]) {
6060
{"11", "12", "13", "14", "15"},
6161
}
6262

63-
// We'll use a cache key function to add a prefix to the IDs. If we only used
64-
// the IDs, we wouldn't be able to fetch the same IDs from multiple data sources.
63+
// We are going to pass a cache a key function that prefixes each id with
64+
// the string "my-data-source", and adds an -ID- separator before the actual
65+
// id. This makes it possible to save the same id for different data
66+
// sources as the keys would look something like this: my-data-source-ID-1
6567
keyPrefixFn := cacheClient.BatchKeyFn("my-data-source")
6668

6769
// Request the keys for each batch.

examples/batch/main.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,10 @@ func NewAPI(c *sturdyc.Client[string]) *API {
2020
}
2121

2222
func (a *API) GetBatch(ctx context.Context, ids []string) (map[string]string, error) {
23-
// We are going to pass the cache a key function that prefixes each id.
24-
// This makes it possible to save the same id for different data sources.
23+
// We are going to pass a cache a key function that prefixes each id with
24+
// the string "some-prefix", and adds an -ID- separator before the actual
25+
// id. This makes it possible to save the same id for different data
26+
// sources as the keys would look something like this: some-prefix-ID-1
2527
cacheKeyFn := a.BatchKeyFn("some-prefix")
2628

2729
// The fetchFn is only going to retrieve the IDs that are not in the cache.
@@ -54,12 +56,14 @@ func main() {
5456
// used to spread out the refreshes for entries evenly over time.
5557
minRefreshDelay := time.Second
5658
maxRefreshDelay := time.Second * 2
59+
// Set a synchronous refresh delay for when we want a refresh to happen synchronously.
60+
synchronousRefreshDelay := time.Second * 30
5761
// The base for exponential backoff when retrying a refresh.
5862
retryBaseDelay := time.Millisecond * 10
5963

6064
// Create a cache client with the specified configuration.
6165
cacheClient := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
62-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, retryBaseDelay),
66+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, retryBaseDelay),
6367
)
6468

6569
// Create a new API instance with the cache client.

examples/buffering/main.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ func NewOrderAPI(client *sturdyc.Client[string]) *OrderAPI {
2323
}
2424

2525
func (a *OrderAPI) OrderStatus(ctx context.Context, ids []string, opts OrderOptions) (map[string]string, error) {
26-
// We use the PermutedBatchKeyFn when an ID isn't enough to uniquely identify a
26+
// We use the PermutedBatchKeyFn when an ID isn't enough to uniquely identify a
2727
// record. The cache is going to store each id once per set of options. In a more
2828
// realistic scenario, the opts would be query params or arguments to a DB query.
2929
cacheKeyFn := a.PermutatedBatchKeyFn("key", opts)
@@ -56,6 +56,8 @@ func main() {
5656
// used to spread out the refreshes for entries evenly over time.
5757
minRefreshDelay := time.Second
5858
maxRefreshDelay := time.Second * 2
59+
// Set a synchronous refresh delay for when we want a refresh to happen synchronously.
60+
synchronousRefreshDelay := time.Second * 30
5961
// The base for exponential backoff when retrying a refresh.
6062
retryBaseDelay := time.Millisecond * 10
6163
// Whether to store misses in the sturdyc. This can be useful to
@@ -68,7 +70,7 @@ func main() {
6870

6971
// Create a new cache client with the specified configuration.
7072
cacheClient := sturdyc.New[string](capacity, numShards, ttl, evictionPercentage,
71-
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, retryBaseDelay),
73+
sturdyc.WithEarlyRefreshes(minRefreshDelay, maxRefreshDelay, synchronousRefreshDelay, retryBaseDelay),
7274
sturdyc.WithRefreshCoalescing(batchSize, batchBufferTimeout),
7375
)
7476

examples/distributed-early-refreshes/client.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,10 @@ const (
1919

2020
// Configuration for the early in-memory refreshes.
2121
const (
22-
minRefreshTime = 2 * time.Second
23-
maxRefreshTime = 4 * time.Second
24-
retryBaseDelay = 5 * time.Second
22+
minRefreshTime = 2 * time.Second
23+
maxRefreshTime = 4 * time.Second
24+
synchronousRefreshTime = 30 * time.Second
25+
retryBaseDelay = 5 * time.Second
2526
)
2627

2728
// Configuration for the refresh coalescing.
@@ -36,7 +37,7 @@ const refreshAfter = time.Second
3637
func newAPIClient(distributedStorage sturdyc.DistributedStorageWithDeletions) *apiClient {
3738
return &apiClient{
3839
cache: sturdyc.New[any](capacity, numberOfShards, ttl, percentageOfRecordsToEvictWhenFull,
39-
sturdyc.WithEarlyRefreshes(minRefreshTime, maxRefreshTime, retryBaseDelay),
40+
sturdyc.WithEarlyRefreshes(minRefreshTime, maxRefreshTime, synchronousRefreshTime, retryBaseDelay),
4041
sturdyc.WithRefreshCoalescing(idealBufferSize, bufferTimeout),
4142
sturdyc.WithDistributedStorageEarlyRefreshes(distributedStorage, refreshAfter),
4243
// NOTE: Uncommenting this line will make the cache mark the records as

0 commit comments

Comments
 (0)