Skip to content

Commit 4165779

Browse files
committed
CLDSRV-783: Remove deprecated/uneeded config fields
Remove syncIntervalMs and reconciliationWeight fields that were used by the old reconciliation approach. Token reservation uses a fixed 100ms refill interval instead. What Each File Changes: - config.json - Removed syncIntervalMs: 1000 - config.js - Removed parsing for syncIntervalMs and reconciliationWeight - tokenBucket.js - Linting fixes (line length, return statements, braces) - cache.js - Updated comments - tests/config.js - Updated interval calculations for node-level quotas - tests/gcra.js - Updated tests showing workers are ignored - tests/helpers.js - Pre-populate token buckets, update assertions
1 parent 2ecccea commit 4165779

File tree

6 files changed

+298
-72
lines changed

6 files changed

+298
-72
lines changed

lib/api/apiUtils/rateLimit/cache.js

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,14 @@ const counters = new Map();
22

33
const configCache = new Map();
44

5+
// Dirty tracking for Redis synchronization
6+
const dirtyCounters = new Set();
7+
const lastSyncedValues = new Map();
8+
9+
// Load tracking for adaptive burst capacity
10+
// Map<bucketKey, Array<timestamp>> - rolling 1-second window
11+
const requestTimestamps = new Map();
12+
513
function setCounter(key, value) {
614
// Make sure that the Map remains in order
715
// Counters expiring soonest will be first during iteration.
@@ -63,16 +71,159 @@ function expireCachedConfigs(now) {
6371
return toRemove.length;
6472
}
6573

74+
/**
75+
* Mark a counter as dirty (needs Redis sync)
76+
* @param {string} key - Counter key
77+
*/
78+
function markDirty(key) {
79+
dirtyCounters.add(key);
80+
}
81+
82+
/**
83+
* Get all dirty counter keys
84+
* @returns {string[]} Array of dirty keys
85+
*/
86+
function getDirtyKeys() {
87+
return Array.from(dirtyCounters);
88+
}
89+
90+
/**
91+
* Clear all dirty counter tracking
92+
*/
93+
function clearDirtyKeys() {
94+
dirtyCounters.clear();
95+
}
96+
97+
/**
98+
* Get last synced value for a counter
99+
* @param {string} key - Counter key
100+
* @returns {number|undefined} Last synced emptyAt timestamp
101+
*/
102+
function getLastSyncedValue(key) {
103+
return lastSyncedValues.get(key);
104+
}
105+
106+
/**
107+
* Set last synced value for a counter
108+
* @param {string} key - Counter key
109+
* @param {number} value - emptyAt timestamp
110+
*/
111+
function setLastSyncedValue(key, value) {
112+
lastSyncedValues.set(key, value);
113+
}
114+
115+
/**
116+
* Remove last synced value tracking for a key
117+
* @param {string} key - Counter key
118+
*/
119+
function deleteLastSyncedValue(key) {
120+
lastSyncedValues.delete(key);
121+
}
122+
123+
/**
124+
* Record a request for load tracking
125+
* @param {string} key - Bucket key (e.g., "bucket:mybucket:rps")
126+
*/
127+
function recordRequest(key) {
128+
const now = Date.now();
129+
let timestamps = requestTimestamps.get(key);
130+
131+
if (!timestamps) {
132+
timestamps = [];
133+
requestTimestamps.set(key, timestamps);
134+
}
135+
136+
timestamps.push(now);
137+
138+
// Remove timestamps older than 1 second
139+
const cutoff = now - 1000;
140+
while (timestamps.length > 0 && timestamps[0] < cutoff) {
141+
timestamps.shift();
142+
}
143+
}
144+
145+
/**
146+
* Calculate current request rate for a bucket
147+
* @param {string} key - Bucket key
148+
* @returns {number} Requests per second in the last 1 second window
149+
*/
150+
function getCurrentRate(key) {
151+
const timestamps = requestTimestamps.get(key);
152+
if (!timestamps || timestamps.length === 0) {
153+
return 0;
154+
}
155+
156+
const now = Date.now();
157+
const cutoff = now - 1000;
158+
159+
// Count requests in last second
160+
let count = 0;
161+
for (let i = timestamps.length - 1; i >= 0; i--) {
162+
if (timestamps[i] >= cutoff) {
163+
count++;
164+
} else {
165+
break;
166+
}
167+
}
168+
169+
return count;
170+
}
171+
172+
/**
173+
* Calculate load factor (current rate / target rate)
174+
* @param {string} key - Bucket key
175+
* @param {number} targetRate - Target requests per second
176+
* @returns {number} Load factor (1.0 = normal, >1.0 = overload)
177+
*/
178+
function getLoadFactor(key, targetRate) {
179+
const currentRate = getCurrentRate(key);
180+
if (targetRate === 0) {
181+
return 0;
182+
}
183+
return currentRate / targetRate;
184+
}
185+
186+
/**
187+
* Expire request timestamps for cleanup
188+
* @param {number} now - Current timestamp
189+
*/
190+
function expireRequestTimestamps(now) {
191+
const cutoff = now - 2000; // Keep 2 seconds of history
192+
193+
for (const [key, timestamps] of requestTimestamps.entries()) {
194+
while (timestamps.length > 0 && timestamps[0] < cutoff) {
195+
timestamps.shift();
196+
}
197+
198+
if (timestamps.length === 0) {
199+
requestTimestamps.delete(key);
200+
}
201+
}
202+
}
203+
66204
module.exports = {
67205
setCounter,
68206
getCounter,
69207
expireCounters,
70208
setCachedConfig,
71209
getCachedConfig,
72210
expireCachedConfigs,
211+
markDirty,
212+
getDirtyKeys,
213+
clearDirtyKeys,
214+
getLastSyncedValue,
215+
setLastSyncedValue,
216+
deleteLastSyncedValue,
217+
recordRequest,
218+
getCurrentRate,
219+
getLoadFactor,
220+
expireRequestTimestamps,
73221

74222
// Do not access directly
75223
// Used only for tests
76224
counters,
77225
configCache,
226+
dirtyCounters,
227+
lastSyncedValues,
228+
requestTimestamps,
78229
};

lib/api/apiUtils/rateLimit/config.js

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,9 @@ const { calculateInterval } = require('./gcra');
6262
* // Default: "Please reduce your request rate."
6363
* "message": "Please reduce your request rate"
6464
* }
65+
*
66+
* // NOTE: Token reservation refills happen automatically every 100ms.
67+
* // No additional configuration needed for worker coordination.
6568
* }
6669
* }
6770
*
@@ -87,7 +90,6 @@ const { calculateInterval } = require('./gcra');
8790
* }
8891
* },
8992
* "configCacheTTL": 60000,
90-
* "defaultBurstCapacity": 2
9193
* },
9294
* "error": {
9395
* "statusCode": 429,
@@ -102,17 +104,21 @@ const { calculateInterval } = require('./gcra');
102104
* 2. Global default config (bucket.defaultConfig) - Fallback
103105
* 3. No rate limiting (null) - If neither is configured
104106
*
105-
* Distributed Rate Limiting:
106-
* - Total limit is divided across nodes and workers
107-
* - Formula: actualLimit = limit / nodes / workers
108-
* - Example: 1000 req/s with 5 nodes and 10 workers = 20 req/s per worker
109-
* - Set "nodes" to number of CloudServer instances in your deployment
107+
* Token Reservation Architecture:
108+
* - Workers request tokens in advance from Redis (not per-request)
109+
* - Tokens are consumed locally (in-memory, fast)
110+
* - Background job refills tokens every 100ms (async, non-blocking)
111+
* - Redis enforces node-level quota using GCRA at token grant time
112+
* - Busy workers automatically get more tokens (dynamic work-stealing)
113+
* - Total limit divided across nodes: limit / nodes = per-node quota
114+
* - Example: 1000 req/s with 10 nodes = 100 req/s per node
115+
* - Workers on same node share the node quota dynamically
110116
*
111117
* Burst Capacity:
112118
* - Allows temporary spikes above the sustained rate
113-
* - Value of 1 = no burst (strict rate limiting)
114-
* - Value of 2 = can handle 2x the rate for short periods
115-
* - Implemented using GCRA (Generic Cell Rate Algorithm)
119+
* - Value of 1 = 1 second of burst (can send 1s worth of requests immediately)
120+
* - Value of 2 = 2 seconds of burst (can send 2s worth of requests immediately)
121+
* - Enforced atomically in Redis during token grants using GCRA
116122
*/
117123

118124
/**

lib/api/apiUtils/rateLimit/tokenBucket.js

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,9 @@ class WorkerTokenBucket {
9999
const nodes = config.rateLimiting.nodes || 1;
100100
const workers = config.clusters || 1;
101101
const interval = calculateInterval(this.limitConfig.limit, nodes, workers);
102-
const burstCapacity = (config.rateLimiting.bucket?.defaultConfig?.requestsPerSecond?.burstCapacity || 1) * 1000;
102+
const burstCapacitySeconds =
103+
config.rateLimiting.bucket?.defaultConfig?.requestsPerSecond?.burstCapacity || 1;
104+
const burstCapacity = burstCapacitySeconds * 1000;
103105

104106
// Request tokens from Redis (atomic GCRA enforcement)
105107
const granted = await new Promise((resolve, reject) => {
@@ -109,8 +111,10 @@ class WorkerTokenBucket {
109111
interval,
110112
burstCapacity,
111113
(err, result) => {
112-
if (err) return reject(err);
113-
resolve(result);
114+
if (err) {
115+
return reject(err);
116+
}
117+
return resolve(result);
114118
}
115119
);
116120
});

tests/unit/api/apiUtils/rateLimit/config.js

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -691,12 +691,13 @@ describe('parseRateLimitConfig', () => {
691691
},
692692
};
693693

694-
const result = parseRateLimitConfig(config, 5); // 5 workers
694+
const result = parseRateLimitConfig(config, 5); // 5 workers (ignored)
695695

696-
// Per-worker rate = 100 / 2 nodes / 5 workers = 10 req/s
697-
// Interval = 1000ms / 10 = 100ms
696+
// NEW BEHAVIOR: Per-NODE rate = 100 / 2 nodes = 50 req/s (workers NOT divided)
697+
// Interval = 1000ms / 50 = 20ms
698+
// Workers can dynamically share node quota via Redis reconciliation
698699
const interval = result.bucket.defaultConfig.requestsPerSecond.interval;
699-
assert.strictEqual(interval, 100);
700+
assert.strictEqual(interval, 20);
700701
});
701702

702703
it('should calculate correct bucketSize from burstCapacity', () => {
@@ -753,12 +754,13 @@ describe('parseRateLimitConfig', () => {
753754
},
754755
};
755756

756-
const result = parseRateLimitConfig(config, 20); // 20 workers per node
757+
const result = parseRateLimitConfig(config, 20); // 20 workers per node (ignored)
757758

758-
// Per-worker rate = 10000 / 10 nodes / 20 workers = 50 req/s
759-
// Interval = 1000ms / 50 = 20ms
759+
// NEW BEHAVIOR: Per-NODE rate = 10000 / 10 nodes = 1000 req/s (workers NOT divided)
760+
// Interval = 1000ms / 1000 = 1ms
761+
// Workers dynamically share the 1000 req/s node quota via Redis
760762
const interval = result.bucket.defaultConfig.requestsPerSecond.interval;
761-
assert.strictEqual(interval, 20);
763+
assert.strictEqual(interval, 1);
762764
});
763765
});
764766
});

0 commit comments

Comments
 (0)