From 0b993bf474493d5d61f3e460c39bae780ec2ce45 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 23 Mar 2026 20:06:12 +0100 Subject: [PATCH 01/15] feat(rate-limit/unstable): add rate limiting module --- .github/labeler.yml | 3 + deno.json | 1 + import_map.json | 1 + rate_limit/_algorithms.ts | 330 ++++++++ rate_limit/_keyed_algorithms.ts | 155 ++++ rate_limit/_replenishing_limiter.ts | 287 +++++++ rate_limit/_validation.ts | 59 ++ rate_limit/deno.json | 12 + rate_limit/fixed_window.ts | 118 +++ rate_limit/fixed_window_test.ts | 626 ++++++++++++++ rate_limit/mod.ts | 32 + rate_limit/rate_limiter.ts | 399 +++++++++ rate_limit/rate_limiter_test.ts | 1219 +++++++++++++++++++++++++++ rate_limit/sliding_window.ts | 141 ++++ rate_limit/sliding_window_test.ts | 689 +++++++++++++++ rate_limit/token_bucket.ts | 135 +++ rate_limit/token_bucket_test.ts | 745 ++++++++++++++++ rate_limit/types.ts | 161 ++++ 18 files changed, 5113 insertions(+) create mode 100644 rate_limit/_algorithms.ts create mode 100644 rate_limit/_keyed_algorithms.ts create mode 100644 rate_limit/_replenishing_limiter.ts create mode 100644 rate_limit/_validation.ts create mode 100644 rate_limit/deno.json create mode 100644 rate_limit/fixed_window.ts create mode 100644 rate_limit/fixed_window_test.ts create mode 100644 rate_limit/mod.ts create mode 100644 rate_limit/rate_limiter.ts create mode 100644 rate_limit/rate_limiter_test.ts create mode 100644 rate_limit/sliding_window.ts create mode 100644 rate_limit/sliding_window_test.ts create mode 100644 rate_limit/token_bucket.ts create mode 100644 rate_limit/token_bucket_test.ts create mode 100644 rate_limit/types.ts diff --git a/.github/labeler.yml b/.github/labeler.yml index 2e8a8a118dda..1c04226a1eb1 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -88,6 +88,9 @@ path: random: - changed-files: - any-glob-to-any-file: random/** +rate-limit: + - changed-files: + - any-glob-to-any-file: rate_limit/** regexp: - changed-files: - any-glob-to-any-file: regexp/** diff --git a/deno.json b/deno.json index 6363398e4685..1d72a1b7eae6 100644 --- a/deno.json +++ b/deno.json @@ -84,6 +84,7 @@ "./net", "./path", "./random", + "./rate_limit", "./regexp", "./semver", "./streams", diff --git a/import_map.json b/import_map.json index 7aa140dea113..64f5a95462c0 100644 --- a/import_map.json +++ b/import_map.json @@ -36,6 +36,7 @@ "@std/path": "jsr:@std/path@^1.1.4", "@std/regexp": "jsr:@std/regexp@^1.0.1", "@std/random": "jsr:@std/random@^0.1.5", + "@std/rate-limit": "jsr:@std/rate-limit@^0.1.0", "@std/semver": "jsr:@std/semver@^1.0.8", "@std/streams": "jsr:@std/streams@^1.0.17", "@std/tar": "jsr:@std/tar@^0.1.10", diff --git a/rate_limit/_algorithms.ts b/rate_limit/_algorithms.ts new file mode 100644 index 000000000000..1d4bb4cd2289 --- /dev/null +++ b/rate_limit/_algorithms.ts @@ -0,0 +1,330 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import { RollingCounter } from "@std/data-structures/unstable-rolling-counter"; + +import { assertPositiveFinite, assertPositiveInteger } from "./_validation.ts"; + +/** + * Result returned by algorithm operations. All fields are always present + * regardless of whether the request was allowed. + * + * **Metadata semantics vary by algorithm:** + * + * - `retryAfter` is the *minimum* delay before capacity *may* free up. For + * sliding-window this is the time until the next segment rotation, which may + * not free enough permits for a high-cost request. For token-bucket and GCRA + * the value accounts for the requested cost. + * - `resetAt` is the timestamp of the next replenishment event (segment + * rotation, window boundary, or refill cycle). For sliding-window and + * token-bucket this is *not* necessarily when full capacity is restored. + */ +export interface AlgorithmResult { + readonly ok: boolean; + readonly remaining: number; + readonly resetAt: number; + readonly retryAfter: number; + readonly limit: number; +} + +/** + * Pure state machine for a rate limit algorithm. No Map, no timers, no keys. + * Used by both the keyed layer (Map + eviction) and the primitives (queue + + * timer). + */ +export interface AlgorithmOps { + /** Create initial state for a new key or new instance. */ + create(now: number): S; + /** Advance time (rotate segments, refill tokens, reset window). Mutates state. */ + advance(state: S, now: number): void; + /** Try to consume `cost` permits. Returns true and mutates state if allowed. */ + tryConsume(state: S, cost: number, now: number): boolean; + /** Return whether a request of `cost` would be allowed without mutating state. */ + wouldAllow(state: S, cost: number, now: number): boolean; + /** Replenish permits (one timer tick). Mutates state. No-op for algorithms without timer-based replenishment (e.g. GCRA). */ + replenish(state: S): void; + /** Compute result metadata (remaining, resetAt, retryAfter). */ + result(state: S, ok: boolean, cost: number, now: number): AlgorithmResult; + /** Compute the retry delay for a denied request without allocating a result object. */ + computeRetryAfter(state: S, cost: number, now: number): number; + /** The configured permit limit. */ + readonly limit: number; +} + +// --- Fixed Window --- + +/** State for the fixed-window algorithm: count in current window and window start time. */ +export interface FixedWindowState { + count: number; + windowStart: number; +} + +/** + * Creates ops for the fixed-window algorithm. Callers must pass valid parameters. + * + * @param limit Maximum permits per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @returns Algorithm ops for fixed-window rate limiting. + */ +export function createFixedWindowOps( + limit: number, + window: number, +): AlgorithmOps { + const context = "fixed window"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + return { + limit, + create(now) { + return { count: 0, windowStart: now }; + }, + advance(state, now) { + if (now - state.windowStart >= window) { + state.count = 0; + state.windowStart = state.windowStart + + Math.floor((now - state.windowStart) / window) * window; + } + }, + tryConsume(state, cost, _now) { + if (state.count + cost > limit) return false; + state.count += cost; + return true; + }, + wouldAllow(state, cost, _now) { + return state.count + cost <= limit; + }, + replenish(state) { + state.count = 0; + }, + result(state, ok, _cost, now) { + const resetAt = state.windowStart + window; + return { + ok, + remaining: Math.max(0, limit - state.count), + resetAt, + retryAfter: ok ? 0 : resetAt - now, + limit, + }; + }, + computeRetryAfter(state, _cost, now) { + return state.windowStart + window - now; + }, + }; +} + +// --- Sliding Window --- + +/** State for the sliding-window algorithm: segment counter and current segment start time. */ +export interface SlidingWindowState { + counter: RollingCounter; + segmentStart: number; +} + +/** + * Creates ops for the sliding-window algorithm. Callers must pass valid parameters. + * + * @param limit Maximum permits per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @param segmentsPerWindow Number of segments. Must be an integer >= 2. + * @returns Algorithm ops for sliding-window rate limiting. + */ +export function createSlidingWindowOps( + limit: number, + window: number, + segmentsPerWindow: number, +): AlgorithmOps { + const context = "sliding window"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + if (!Number.isInteger(segmentsPerWindow) || segmentsPerWindow < 2) { + throw new RangeError( + `Cannot create ${context}: 'segmentsPerWindow' must be an integer >= 2, received ${segmentsPerWindow}`, + ); + } + if (window % segmentsPerWindow !== 0) { + throw new RangeError( + `Cannot create ${context}: 'window' (${window}) must be evenly divisible by 'segmentsPerWindow' (${segmentsPerWindow})`, + ); + } + const segmentDuration = window / segmentsPerWindow; + + return { + limit, + create(now) { + return { + counter: new RollingCounter(segmentsPerWindow), + segmentStart: now, + }; + }, + advance(state, now) { + const elapsed = now - state.segmentStart; + if (elapsed >= segmentDuration) { + const rotations = Math.floor(elapsed / segmentDuration); + state.counter.rotate(rotations); + state.segmentStart += rotations * segmentDuration; + } + }, + tryConsume(state, cost, _now) { + if (state.counter.total + cost > limit) return false; + state.counter.increment(cost); + return true; + }, + wouldAllow(state, cost, _now) { + return state.counter.total + cost <= limit; + }, + replenish(state) { + state.counter.rotate(); + state.segmentStart += segmentDuration; + }, + result(state, ok, _cost, now) { + const resetAt = state.segmentStart + segmentDuration; + return { + ok, + remaining: Math.max(0, limit - state.counter.total), + resetAt, + retryAfter: ok ? 0 : resetAt - now, + limit, + }; + }, + computeRetryAfter(state, _cost, now) { + return state.segmentStart + segmentDuration - now; + }, + }; +} + +// --- Token Bucket --- + +/** State for the token-bucket algorithm: current tokens and last refill time. */ +export interface TokenBucketState { + tokens: number; + lastRefill: number; +} + +/** + * Creates ops for the token-bucket algorithm. Callers must pass valid parameters. + * + * @param limit Maximum tokens (bucket capacity). Must be a positive integer. + * @param window Refill cycle duration in milliseconds. Must be a positive finite number. + * @param tokensPerCycle Tokens added per cycle. Must be a positive integer. + * @returns Algorithm ops for token-bucket rate limiting. + */ +export function createTokenBucketOps( + limit: number, + window: number, + tokensPerCycle: number, +): AlgorithmOps { + const context = "token bucket"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + assertPositiveInteger(context, "tokensPerCycle", tokensPerCycle); + return { + limit, + create(now) { + return { tokens: limit, lastRefill: now }; + }, + advance(state, now) { + const elapsed = now - state.lastRefill; + if (elapsed >= window) { + const cycles = Math.floor(elapsed / window); + state.tokens = Math.min(limit, state.tokens + cycles * tokensPerCycle); + state.lastRefill += cycles * window; + } + }, + tryConsume(state, cost, _now) { + if (state.tokens < cost) return false; + state.tokens -= cost; + return true; + }, + wouldAllow(state, cost, _now) { + return state.tokens >= cost; + }, + replenish(state) { + state.tokens = Math.min(limit, state.tokens + tokensPerCycle); + state.lastRefill += window; + }, + result(state, ok, cost, now) { + const remaining = Math.max(0, Math.floor(state.tokens)); + return { + ok, + remaining, + resetAt: state.lastRefill + window, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), + limit, + }; + }, + computeRetryAfter(state, cost, now) { + const deficit = cost - state.tokens; + const cycles = Math.ceil(deficit / tokensPerCycle); + return Math.max(0, cycles * window - (now - state.lastRefill)); + }, + }; +} + +// --- GCRA (Generic Cell Rate Algorithm) --- + +/** State for GCRA: theoretical arrival time (tat) of the last request. */ +export interface GcraState { + tat: number; +} + +/** + * Creates ops for the GCRA (Generic Cell Rate Algorithm). Callers must pass valid parameters. + * + * @param limit Maximum permits per window. Must be a positive integer. + * @param window Window (tau) in milliseconds. Must be a positive finite number. + * @returns Algorithm ops for GCRA rate limiting. + */ +export function createGcraOps( + limit: number, + window: number, +): AlgorithmOps { + const context = "gcra"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + const emissionInterval = window / limit; + const tau = window; + + function remaining(state: GcraState, now: number): number { + const diff = tau - (state.tat - now); + return Math.min(limit, Math.max(0, Math.floor(diff / emissionInterval))); + } + + return { + limit, + create(now) { + return { tat: now }; + }, + advance(_state, _now) {}, + tryConsume(state: GcraState, cost: number, now: number) { + const allowAt = state.tat - tau; + if (now < allowAt) return false; + const newTat = Math.max(state.tat, now) + emissionInterval * cost; + if (newTat - now > tau) return false; + state.tat = newTat; + return true; + }, + wouldAllow(state: GcraState, cost: number, now: number) { + const allowAt = state.tat - tau; + if (now < allowAt) return false; + const newTat = Math.max(state.tat, now) + emissionInterval * cost; + return newTat - now <= tau; + }, + // No-op: GCRA has no timer-based replenishment. + replenish(_state) {}, + result(state, ok, cost, now) { + return { + ok, + remaining: remaining(state, now), + resetAt: state.tat, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), + limit, + }; + }, + computeRetryAfter(state, cost, now) { + const allowAt = state.tat - tau; + if (now < allowAt) return allowAt - now; + const newTat = Math.max(state.tat, now) + emissionInterval * cost; + return Math.max(0, newTat - tau - now); + }, + }; +} diff --git a/rate_limit/_keyed_algorithms.ts b/rate_limit/_keyed_algorithms.ts new file mode 100644 index 000000000000..15750cfde1df --- /dev/null +++ b/rate_limit/_keyed_algorithms.ts @@ -0,0 +1,155 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { AlgorithmOps, AlgorithmResult } from "./_algorithms.ts"; +import { + createFixedWindowOps, + createGcraOps, + createSlidingWindowOps, + createTokenBucketOps, +} from "./_algorithms.ts"; + +/** Internal interface for per-key algorithm strategies. */ +export interface KeyedAlgorithm { + limit(key: string, cost: number, now: number): AlgorithmResult; + peek(key: string, cost: number, now: number): AlgorithmResult; + has(key: string): boolean; + reset(key: string): void; + readonly size: number; + evict(now: number, ttl: number): void; + clear(): void; +} + +/** Wraps AlgorithmOps with a Map and eviction. */ +function createKeyedAlgorithm( + ops: AlgorithmOps, +): KeyedAlgorithm { + const keys = new Map(); + + function getOrCreate(key: string, now: number): S & { lastAccess: number } { + let state = keys.get(key); + if (state === undefined) { + const base = ops.create(now); + (base as S & { lastAccess: number }).lastAccess = now; + state = base as S & { lastAccess: number }; + keys.set(key, state); + } + return state; + } + + function peekDefault(cost: number, now: number): AlgorithmResult { + return { + ok: cost <= ops.limit, + remaining: ops.limit, + resetAt: now, + retryAfter: 0, + limit: ops.limit, + }; + } + + return { + limit(key, cost, now) { + const state = getOrCreate(key, now); + ops.advance(state, now); + state.lastAccess = now; + const ok = ops.tryConsume(state, cost, now); + return ops.result(state, ok, cost, now); + }, + peek(key, cost, now) { + const state = keys.get(key); + if (state === undefined) return peekDefault(cost, now); + ops.advance(state, now); + const ok = ops.wouldAllow(state, cost, now); + return ops.result(state, ok, cost, now); + }, + has(key) { + return keys.has(key); + }, + reset(key) { + keys.delete(key); + }, + get size() { + return keys.size; + }, + evict(now, ttl) { + for (const [k, state] of keys) { + if (now - state.lastAccess > ttl) keys.delete(k); + } + }, + clear() { + keys.clear(); + }, + }; +} + +// --- Fixed Window --- + +/** + * Creates a keyed fixed-window rate limit algorithm. + * + * @param limit Maximum permits per key per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @returns A keyed algorithm using fixed-window semantics. + */ +export function createFixedWindowAlgorithm( + limit: number, + window: number, +): KeyedAlgorithm { + return createKeyedAlgorithm(createFixedWindowOps(limit, window)); +} + +// --- Sliding Window --- + +/** + * Creates a keyed sliding-window rate limit algorithm. + * + * @param limit Maximum permits per key per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @param segmentsPerWindow Number of segments per window. Must be an integer >= 2. + * @returns A keyed algorithm using sliding-window semantics. + */ +export function createSlidingWindowAlgorithm( + limit: number, + window: number, + segmentsPerWindow: number, +): KeyedAlgorithm { + return createKeyedAlgorithm( + createSlidingWindowOps(limit, window, segmentsPerWindow), + ); +} + +// --- Token Bucket --- + +/** + * Creates a keyed token-bucket rate limit algorithm. + * + * @param limit Bucket capacity (max tokens per key). Must be a positive integer. + * @param window Refill cycle duration in milliseconds. Must be a positive finite number. + * @param tokensPerCycle Tokens added per cycle. Must be a positive integer. + * @returns A keyed algorithm using token-bucket semantics. + */ +export function createTokenBucketAlgorithm( + limit: number, + window: number, + tokensPerCycle: number, +): KeyedAlgorithm { + return createKeyedAlgorithm( + createTokenBucketOps(limit, window, tokensPerCycle), + ); +} + +// --- GCRA --- + +/** + * Creates a keyed GCRA (Generic Cell Rate Algorithm) rate limit algorithm. + * + * @param limit Maximum permits per key per window. Must be a positive integer. + * @param window Window (tau) in milliseconds. Must be a positive finite number. + * @returns A keyed algorithm using GCRA semantics. + */ +export function createGcraAlgorithm( + limit: number, + window: number, +): KeyedAlgorithm { + return createKeyedAlgorithm(createGcraOps(limit, window)); +} diff --git a/rate_limit/_replenishing_limiter.ts b/rate_limit/_replenishing_limiter.ts new file mode 100644 index 000000000000..dbc860f1ffdc --- /dev/null +++ b/rate_limit/_replenishing_limiter.ts @@ -0,0 +1,287 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { + AcquiredLease, + AcquireOptions, + RateLimitLease, + RejectedLease, + ReplenishingRateLimiter, +} from "./types.ts"; +import { Deque } from "@std/data-structures/unstable-deque"; + +type RejectionReason = + | "Insufficient permits" + | "Queue limit exceeded" + | "Rate limiter has been disposed" + | "Evicted by newer request"; + +const DISPOSED_REASON: RejectionReason = "Rate limiter has been disposed"; +const ABORTED_REASON = "Acquire was aborted"; + +function noop() {} + +const ACQUIRED_LEASE: AcquiredLease = Object.freeze({ + acquired: true, + [Symbol.dispose]: noop, +}); + +function createRejectedLease( + retryAfter: number, + reason: RejectionReason, +): RejectedLease { + return { + acquired: false as const, + retryAfter, + reason, + [Symbol.dispose]: noop, + }; +} + +/** Pending waiter in the queue. */ +interface Waiter { + readonly permits: number; + resolve(lease: RateLimitLease): void; + onAbort?: (() => void) | undefined; +} + +/** + * Strategy hooks that define how a specific algorithm acquires and + * replenishes permits. Passed to {@linkcode createReplenishingLimiter}. + */ +interface ReplenishingStrategy { + /** Try to consume `permits` from the underlying algorithm. Returns true if acquired. */ + tryAcquirePermits(permits: number): boolean; + /** Advance the algorithm by one replenishment cycle. */ + replenish(): void; + /** Compute the retry delay for a denied request of `permits`. */ + computeRetryAfter(permits: number): number; + /** The maximum permits that can be acquired in a single call. */ + readonly permitLimit: number; +} + +/** Configuration for {@linkcode createReplenishingLimiter}. */ +interface ReplenishingLimiterConfig { + replenishmentPeriod: number; + autoReplenishment: boolean; + queueLimit: number; + queueOrder: "oldest-first" | "newest-first"; +} + +/** + * Create a {@linkcode ReplenishingRateLimiter} that delegates permit + * accounting to the provided strategy. Handles queueing, disposal, + * abort signals, and the replenishment timer. + */ +export function createReplenishingLimiter( + config: ReplenishingLimiterConfig, + strategy: ReplenishingStrategy, +): ReplenishingRateLimiter { + const queue = new Deque(); + let queuedPermits = 0; + let timer: ReturnType | undefined; + let disposed = false; + + if ( + config.queueOrder !== "oldest-first" && + config.queueOrder !== "newest-first" + ) { + throw new TypeError( + `Cannot create limiter: unknown queueOrder '${config + .queueOrder as string}'`, + ); + } + + const isNewestFirst = config.queueOrder === "newest-first"; + + if (config.autoReplenishment) { + timer = setInterval(replenishAndDrain, config.replenishmentPeriod); + } + + function peekNext(): Waiter | undefined { + return isNewestFirst ? queue.peekBack() : queue.peekFront(); + } + + function popNext(): Waiter | undefined { + return isNewestFirst ? queue.popBack() : queue.popFront(); + } + + /** + * Replenish permits and drain the queue in priority order. Stops at the + * first waiter whose permit demand exceeds the available supply. With + * `newest-first`, this means a large newest request blocks smaller older + * ones until enough permits accumulate. + */ + function replenishAndDrain(): void { + if (disposed) return; + strategy.replenish(); + + let waiter = peekNext(); + while (waiter !== undefined) { + if (!strategy.tryAcquirePermits(waiter.permits)) break; + popNext(); + queuedPermits -= waiter.permits; + resolveWaiter(waiter, ACQUIRED_LEASE); + waiter = peekNext(); + } + } + + function resolveWaiter(waiter: Waiter, lease: RateLimitLease): void { + try { + if (waiter.onAbort) { + waiter.onAbort(); + waiter.onAbort = undefined; + } + } finally { + waiter.resolve(lease); + } + } + + function removeWaiter(waiter: Waiter): void { + const removed = queue.removeFirst((w) => w === waiter); + if (removed !== undefined) { + queuedPermits -= waiter.permits; + } + } + + /** Evicts the oldest (stalest) waiter to make room, regardless of queue order. */ + function evictOldest(permits: number): void { + while (queuedPermits + permits > config.queueLimit) { + const evicted = queue.popFront(); + if (!evicted) break; + queuedPermits -= evicted.permits; + resolveWaiter( + evicted, + createRejectedLease( + strategy.computeRetryAfter(evicted.permits), + "Evicted by newer request", + ), + ); + } + } + + function tryAcquire(permits = 1): RateLimitLease { + if (disposed) { + return createRejectedLease(0, DISPOSED_REASON); + } + if (permits < 1 || !Number.isInteger(permits)) { + throw new RangeError( + `Cannot acquire: 'permits' must be a positive integer, received ${permits}`, + ); + } + if (permits > strategy.permitLimit) { + throw new RangeError( + `Cannot acquire: 'permits' (${permits}) exceeds the permit limit (${strategy.permitLimit})`, + ); + } + + if (strategy.tryAcquirePermits(permits)) { + return ACQUIRED_LEASE; + } + return createRejectedLease( + strategy.computeRetryAfter(permits), + "Insufficient permits", + ); + } + + function acquire( + permits = 1, + options?: AcquireOptions, + ): Promise { + if (disposed) { + return Promise.reject(new Error(DISPOSED_REASON)); + } + if (permits < 1 || !Number.isInteger(permits)) { + return Promise.reject( + new RangeError( + `Cannot acquire: 'permits' must be a positive integer, received ${permits}`, + ), + ); + } + if (permits > strategy.permitLimit) { + return Promise.reject( + new RangeError( + `Cannot acquire: 'permits' (${permits}) exceeds the permit limit (${strategy.permitLimit})`, + ), + ); + } + + const signal = options?.signal; + if (signal?.aborted) { + return Promise.reject( + signal.reason ?? new DOMException(ABORTED_REASON, "AbortError"), + ); + } + + if (strategy.tryAcquirePermits(permits)) { + return Promise.resolve(ACQUIRED_LEASE); + } + + if (queuedPermits + permits > config.queueLimit) { + const canEvict = queue.length > 0 && + config.queueLimit > 0 && + permits <= config.queueLimit; + + if (!canEvict) { + return Promise.resolve( + createRejectedLease( + strategy.computeRetryAfter(permits), + "Queue limit exceeded", + ), + ); + } + evictOldest(permits); + } + + return new Promise((resolve, reject) => { + const waiter: Waiter = { permits, resolve }; + queue.pushBack(waiter); + queuedPermits += permits; + + if (signal) { + const onAbort = () => { + removeWaiter(waiter); + reject( + signal.reason ?? new DOMException(ABORTED_REASON, "AbortError"), + ); + }; + waiter.onAbort = () => signal.removeEventListener("abort", onAbort); + signal.addEventListener("abort", onAbort, { once: true }); + } + }); + } + + function replenish(): void { + if (config.autoReplenishment) { + throw new Error( + "Cannot replenish: limiter uses automatic replenishment", + ); + } + replenishAndDrain(); + } + + function dispose(): void { + if (disposed) return; + disposed = true; + + if (timer !== undefined) { + clearInterval(timer); + timer = undefined; + } + + const lease = createRejectedLease(0, DISPOSED_REASON); + let waiter = queue.popFront(); + while (waiter !== undefined) { + resolveWaiter(waiter, lease); + waiter = queue.popFront(); + } + queuedPermits = 0; + } + + return { + tryAcquire, + acquire, + replenish, + [Symbol.dispose]: dispose, + }; +} diff --git a/rate_limit/_validation.ts b/rate_limit/_validation.ts new file mode 100644 index 000000000000..9d3594d4623d --- /dev/null +++ b/rate_limit/_validation.ts @@ -0,0 +1,59 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +/** + * Asserts that `value` is a positive integer (>= 1). + * + * @param context Noun phrase for the error prefix, e.g. "token bucket". + * @param name The option name shown in the error message. + * @param value The value to check. + */ +export function assertPositiveInteger( + context: string, + name: string, + value: number, +): void { + if (!Number.isInteger(value) || value < 1) { + throw new RangeError( + `Cannot create ${context}: '${name}' must be a positive integer, received ${value}`, + ); + } +} + +/** + * Asserts that `value` is a non-negative integer (>= 0), if defined. + * + * @param context Noun phrase for the error prefix, e.g. "token bucket". + * @param name The option name shown in the error message. + * @param value The value to check. Skipped when `undefined`. + */ +export function assertNonNegativeInteger( + context: string, + name: string, + value: number | undefined, +): void { + if (value !== undefined && (!Number.isInteger(value) || value < 0)) { + throw new RangeError( + `Cannot create ${context}: '${name}' must be a non-negative integer, received ${value}`, + ); + } +} + +/** + * Asserts that `value` is a positive finite number (> 0). + * + * @param context Noun phrase for the error prefix, e.g. "token bucket". + * @param name The option name shown in the error message. + * @param value The value to check. + */ +export function assertPositiveFinite( + context: string, + name: string, + value: number, +): void { + if (!Number.isFinite(value) || value <= 0) { + throw new RangeError( + `Cannot create ${context}: '${name}' must be a positive finite number, received ${value}`, + ); + } +} diff --git a/rate_limit/deno.json b/rate_limit/deno.json new file mode 100644 index 000000000000..f18f3b017a09 --- /dev/null +++ b/rate_limit/deno.json @@ -0,0 +1,12 @@ +{ + "name": "@std/rate-limit", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./token-bucket": "./token_bucket.ts", + "./fixed-window": "./fixed_window.ts", + "./sliding-window": "./sliding_window.ts", + "./types": "./types.ts", + "./rate-limiter": "./rate_limiter.ts" + } +} diff --git a/rate_limit/fixed_window.ts b/rate_limit/fixed_window.ts new file mode 100644 index 000000000000..4c7f1a27c580 --- /dev/null +++ b/rate_limit/fixed_window.ts @@ -0,0 +1,118 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; +import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; +import { createFixedWindowOps } from "./_algorithms.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; + +/** + * Options for {@linkcode createFixedWindow}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface FixedWindowOptions extends QueueOptions { + /** Maximum permits per window. */ + permitLimit: number; + /** Window duration in milliseconds. */ + window: number; + /** + * Start an internal timer for automatic window rotation. + * + * When `false`, call {@linkcode ReplenishingRateLimiter.replenish} + * manually. + * + * @default {true} + */ + autoReplenishment?: boolean; + /** + * Clock function returning the current time in milliseconds. Override + * for deterministic testing. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Create a fixed window rate limiter. A counter resets at the start of each + * window, making this the simplest time-windowed strategy — ideal for HTTP + * servers and 429 response logic. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts no-assert + * import { createFixedWindow } from "@std/rate-limit/fixed-window"; + * + * using limiter = createFixedWindow({ + * permitLimit: 100, + * window: 60_000, + * }); + * + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * console.log(`Retry after ${lease.retryAfter}ms`); + * } + * ``` + * + * @example Manual replenishment + * ```ts no-assert + * import { createFixedWindow } from "@std/rate-limit/fixed-window"; + * + * using limiter = createFixedWindow({ + * permitLimit: 100, + * window: 60_000, + * autoReplenishment: false, + * }); + * + * limiter.replenish(); + * ``` + * + * @param options Configuration for the fixed window. + * @returns A {@linkcode ReplenishingRateLimiter}. + */ +export function createFixedWindow( + options: FixedWindowOptions, +): ReplenishingRateLimiter { + const context = "fixed window"; + assertPositiveInteger(context, "permitLimit", options.permitLimit); + assertPositiveFinite(context, "window", options.window); + assertNonNegativeInteger(context, "queueLimit", options.queueLimit); + + const { permitLimit, window: windowMs } = options; + const clock = options.clock ?? Date.now; + const ops = createFixedWindowOps(permitLimit, windowMs); + const state = ops.create(clock()); + let lastNow = 0; + + return createReplenishingLimiter( + { + replenishmentPeriod: windowMs, + autoReplenishment: options.autoReplenishment ?? true, + queueLimit: options.queueLimit ?? 0, + queueOrder: options.queueOrder ?? "oldest-first", + }, + { + get permitLimit() { + return ops.limit; + }, + tryAcquirePermits(permits: number): boolean { + lastNow = clock(); + ops.advance(state, lastNow); + return ops.tryConsume(state, permits, lastNow); + }, + replenish(): void { + lastNow = state.windowStart + windowMs; + ops.advance(state, lastNow); + }, + computeRetryAfter(permits: number): number { + return ops.computeRetryAfter(state, permits, lastNow); + }, + }, + ); +} diff --git a/rate_limit/fixed_window_test.ts b/rate_limit/fixed_window_test.ts new file mode 100644 index 000000000000..dd4016e2614d --- /dev/null +++ b/rate_limit/fixed_window_test.ts @@ -0,0 +1,626 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { + assert, + assertEquals, + assertFalse, + assertRejects, + assertThrows, +} from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createFixedWindow } from "./fixed_window.ts"; + +// --- Factory validation --- + +Deno.test("createFixedWindow() throws for invalid permitLimit", () => { + assertThrows( + () => createFixedWindow({ permitLimit: 0, window: 1000 }), + RangeError, + "permitLimit", + ); + assertThrows( + () => createFixedWindow({ permitLimit: -1, window: 1000 }), + RangeError, + "permitLimit", + ); + assertThrows( + () => createFixedWindow({ permitLimit: 1.5, window: 1000 }), + RangeError, + "permitLimit", + ); + assertThrows( + () => createFixedWindow({ permitLimit: NaN, window: 1000 }), + RangeError, + "permitLimit", + ); + assertThrows( + () => createFixedWindow({ permitLimit: Infinity, window: 1000 }), + RangeError, + "permitLimit", + ); +}); + +Deno.test("createFixedWindow() throws for invalid window", () => { + assertThrows( + () => createFixedWindow({ permitLimit: 10, window: 0 }), + RangeError, + "window", + ); + assertThrows( + () => createFixedWindow({ permitLimit: 10, window: -100 }), + RangeError, + "window", + ); + assertThrows( + () => createFixedWindow({ permitLimit: 10, window: NaN }), + RangeError, + "window", + ); + assertThrows( + () => createFixedWindow({ permitLimit: 10, window: Infinity }), + RangeError, + "window", + ); +}); + +Deno.test("createFixedWindow() throws for invalid queueLimit", () => { + assertThrows( + () => createFixedWindow({ permitLimit: 10, window: 1000, queueLimit: -1 }), + RangeError, + "queueLimit", + ); +}); + +// --- tryAcquire --- + +Deno.test("tryAcquire() succeeds within the window limit", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 3, + window: 1000, + }); + void time; + + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("tryAcquire() acquires multiple permits at once", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 5, + window: 1000, + }); + void time; + + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire(3).acquired); + assert(limiter.tryAcquire(2).acquired); +}); + +Deno.test("tryAcquire() rejects with retryAfter equal to window duration", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 5000, + }); + void time; + + limiter.tryAcquire(); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 5000); +}); + +Deno.test("tryAcquire() throws for invalid permits", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 5, + window: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(0), RangeError); + assertThrows(() => limiter.tryAcquire(-1), RangeError); + assertThrows(() => limiter.tryAcquire(1.5), RangeError); +}); + +Deno.test("tryAcquire() throws when permits exceed permitLimit", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 5, + window: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(6), RangeError, "exceeds"); +}); + +// --- Window reset --- + +Deno.test("permits reset after the window elapses", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ permitLimit: 2, window: 1000 }); + + limiter.tryAcquire(); + limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("full permit count is restored each window", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ permitLimit: 5, window: 500 }); + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(500); + + assert(limiter.tryAcquire(5).acquired); +}); + +// --- Manual replenishment --- + +Deno.test("replenish() throws when autoReplenishment is true", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 5, + window: 1000, + }); + void time; + + assertThrows( + () => limiter.replenish(), + Error, + "Cannot replenish: limiter uses automatic replenishment", + ); +}); + +Deno.test("replenish() drains queued acquire() waiters", async () => { + const limiter = createFixedWindow({ + permitLimit: 3, + window: 1000, + autoReplenishment: false, + queueLimit: 5, + }); + + limiter.tryAcquire(3); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + limiter.replenish(); + const lease = await promise; + assert(resolved); + assert(lease.acquired); + + limiter[Symbol.dispose](); +}); + +Deno.test("replenish() resets the window when autoReplenishment is false", () => { + const limiter = createFixedWindow({ + permitLimit: 3, + window: 1000, + autoReplenishment: false, + }); + + limiter.tryAcquire(); + limiter.tryAcquire(); + limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + limiter.replenish(); + assert(limiter.tryAcquire().acquired); + + limiter[Symbol.dispose](); +}); + +// --- acquire (async) --- + +Deno.test("acquire() resolves immediately when permits available", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + void time; + + const lease = await limiter.acquire(); + assert(lease.acquired); +}); + +Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 0, + }); + void time; + + limiter.tryAcquire(); + const lease = await limiter.acquire(); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("acquire() queues and resolves after window reset", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 5, + }); + + limiter.tryAcquire(); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +Deno.test("acquire() rejects when aborted via signal", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + const controller = new AbortController(); + const promise = limiter.acquire(1, { signal: controller.signal }); + controller.abort(); + + await assertRejects(() => promise, DOMException); +}); + +Deno.test("acquire() rejects when signal is already aborted", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); +}); + +Deno.test("acquire() with already-aborted signal rejects even when permits are available", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 5, + window: 1000, + queueLimit: 5, + }); + void time; + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); + + assert( + limiter.tryAcquire(5).acquired, + "all 5 permits should still be available", + ); +}); + +Deno.test("acquire() with already-aborted signal does not evict queued waiters", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 1, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const existingPromise = limiter.acquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); + + time.tick(1000); + const lease = await existingPromise; + assert(lease.acquired, "existing waiter should not have been evicted"); +}); + +// --- Disposal --- + +Deno.test("dispose resolves queued waiters with rejected leases", async () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + const promise = limiter.acquire(); + limiter[Symbol.dispose](); + + const lease = await promise; + assertFalse(lease.acquired); + assertEquals(lease.reason, "Rate limiter has been disposed"); +}); + +Deno.test("tryAcquire() returns rejected lease after disposal", () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + void time; + + limiter[Symbol.dispose](); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); +}); + +Deno.test("acquire() rejects after disposal", async () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + void time; + + limiter[Symbol.dispose](); + await assertRejects(() => limiter.acquire(), Error, "disposed"); +}); + +// --- Queue ordering --- + +Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 10, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p1; + time.tick(1000); + await p2; + + assertEquals(order, [1, 2]); +}); + +Deno.test("newest-first queue resolves newest waiter first", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 10, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p2; + time.tick(1000); + await p1; + + assertEquals(order, [2, 1]); +}); + +// --- Eviction --- + +Deno.test("newest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 2, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + const p3 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p3:acquired" : `p3:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + time.tick(1000); + await p3; + time.tick(1000); + await p2; + + assertEquals(results, [ + "p1:Evicted by newer request", + "p3:acquired", + "p2:acquired", + ]); +}); + +Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 1, + window: 1000, + queueLimit: 1, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + time.tick(1000); + await p2; + + assertEquals(results, ["p1:Evicted by newer request", "p2:acquired"]); +}); + +// --- Multi-permit queued waiters --- + +Deno.test("acquire() queues multi-permit waiter spanning multiple windows", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 2, + window: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(2); + + let resolved = false; + const promise = limiter.acquire(2).then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +// --- Multiple waiters resolved in single replenishment --- + +Deno.test("single replenishment resolves multiple queued waiters", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + permitLimit: 3, + window: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(3); + + const order: number[] = []; + const p1 = limiter.acquire(1).then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + order.push(2); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + order.push(3); + return l; + }); + + await Promise.resolve(); + assertEquals(order, []); + + time.tick(1000); + await Promise.all([p1, p2, p3]); + + assertEquals(order, [1, 2, 3]); + for (const p of [p1, p2, p3]) { + assert((await p).acquired); + } +}); + +// --- acquire() validation --- + +Deno.test("acquire() rejects for invalid permits", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + void time; + + await assertRejects(() => limiter.acquire(0), RangeError); + await assertRejects(() => limiter.acquire(-1), RangeError); + await assertRejects(() => limiter.acquire(1.5), RangeError); +}); + +Deno.test("acquire() rejects when permits exceed permitLimit", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + void time; + + await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); +}); + +// --- Double dispose --- + +Deno.test("double dispose is a no-op", () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + void time; + + limiter[Symbol.dispose](); + limiter[Symbol.dispose](); +}); diff --git a/rate_limit/mod.ts b/rate_limit/mod.ts new file mode 100644 index 000000000000..fccf4ff909be --- /dev/null +++ b/rate_limit/mod.ts @@ -0,0 +1,32 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +/** + * Rate limiting strategies for controlling how many operations can occur over + * time. + * + * The primary API is {@linkcode createRateLimiter}, a keyed rate limiter for + * the common case of "allow key X at most N requests per window." It supports + * fixed-window, sliding-window, token-bucket, and GCRA algorithms. For + * single-resource limiting, use the primitives: {@linkcode createTokenBucket}, + * {@linkcode createFixedWindow}, and {@linkcode createSlidingWindow}. + * + * ```ts no-assert + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * + * const result = limiter.limit("user:123"); + * if (!result.ok) { + * console.log(`Retry after ${result.retryAfter}ms`); + * } + * ``` + * + * @module + */ + +export * from "./types.ts"; +export * from "./token_bucket.ts"; +export * from "./fixed_window.ts"; +export * from "./sliding_window.ts"; +export * from "./rate_limiter.ts"; diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts new file mode 100644 index 000000000000..e55888d16b3f --- /dev/null +++ b/rate_limit/rate_limiter.ts @@ -0,0 +1,399 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; +import { + createFixedWindowAlgorithm, + createGcraAlgorithm, + createSlidingWindowAlgorithm, + createTokenBucketAlgorithm, +} from "./_keyed_algorithms.ts"; +import type { KeyedAlgorithm } from "./_keyed_algorithms.ts"; + +/** + * Options for {@linkcode createRateLimiter}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RateLimiterOptions { + /** Maximum permits per key per window/cycle. */ + limit: number; + /** Window duration in milliseconds. */ + window: number; + /** + * Algorithm to use. + * + * - `"fixed-window"` — counter resets at each window boundary. Simplest. + * Allows boundary bursts up to 2× the limit. + * - `"sliding-window"` — window divided into segments that rotate + * individually. Smoother enforcement, no boundary bursts. + * - `"token-bucket"` — tokens refill at a steady rate. Best for smoothing + * bursty traffic with a configurable burst cap. + * - `"gcra"` — Generic Cell Rate Algorithm. Enforces strict uniform + * spacing between requests with a single timestamp per key. Ideal when you + * want hard, even enforcement with no boundary effects and minimal memory. + * + * @default {"sliding-window"} + */ + algorithm?: "fixed-window" | "sliding-window" | "token-bucket" | "gcra"; + /** + * Number of segments for the sliding window algorithm. Higher values give + * smoother enforcement at the cost of slightly more memory per key. + * Ignored for other algorithms. + * + * @default {10} + */ + segmentsPerWindow?: number; + /** + * For token bucket: tokens added per replenishment cycle. Ignored for + * other algorithms. + * + * @default {limit} + */ + tokensPerCycle?: number; + /** + * Time-to-live for idle key state in milliseconds. Keys with no activity + * for this duration are eligible for eviction. Set to `0` to disable + * automatic eviction. + * + * Only {@linkcode KeyedRateLimiter.limit} counts as activity for + * eviction purposes. {@linkcode KeyedRateLimiter.peek} does not refresh + * a key's last-access time. + * + * @default {300_000} + */ + evictionTtl?: number; + /** + * How often to scan for and evict idle keys, in milliseconds. Only + * meaningful when `evictionTtl > 0`. + * + * @default {60_000} + */ + evictionInterval?: number; + /** + * Maximum number of keys to track. When the limit is reached, new keys + * are rejected with `ok: false` (with `resetAt: 0` and `retryAfter: 0`). + * Set to `0` to disable (unbounded). + * + * Note: this limits the number of keys, not total memory. Long key + * strings still consume memory proportional to their length. + * + * @default {0} + */ + maxKeys?: number; + /** + * Clock function returning the current time in milliseconds. Override + * for testing with `FakeTime`. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Options for {@linkcode KeyedRateLimiter.limit}. + * + * @see {@linkcode PeekOptions} for the read-only equivalent. + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface LimitOptions { + /** + * Number of permits to consume for this request. Use higher values for + * expensive operations. + * + * @default {1} + */ + cost?: number; +} + +/** + * Options for {@linkcode KeyedRateLimiter.peek}. + * + * @see {@linkcode LimitOptions} for the consuming equivalent. + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface PeekOptions { + /** + * Number of permits to check. Determines whether a request of this size + * would be allowed and computes `retryAfter` accordingly. + * + * @default {1} + */ + cost?: number; +} + +/** + * The result of a rate limit check. All fields are present regardless of + * whether the request was allowed. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RateLimitResult { + /** Whether the request is allowed. */ + readonly ok: boolean; + /** Best-effort estimate of remaining permits for this key. */ + readonly remaining: number; + /** + * Timestamp (milliseconds since epoch) of the next replenishment event + * (segment rotation, window boundary, or refill cycle). This is *not* + * necessarily when full capacity is restored — for sliding-window and + * token-bucket it may take multiple replenishment cycles. Useful for the + * `X-RateLimit-Reset` HTTP header. + */ + readonly resetAt: number; + /** + * Minimum retry delay in milliseconds. `0` when the request is allowed. + * This is the earliest point at which capacity *may* free up. For + * sliding-window, this reflects the next segment rotation and may not + * free enough permits for a high-cost request. For token-bucket and GCRA + * the value accounts for the requested cost. Useful for the + * `Retry-After` HTTP header. + */ + readonly retryAfter: number; + /** The limit configured for this limiter. */ + readonly limit: number; +} + +/** + * A keyed rate limiter that manages per-key state internally. This is the + * primary rate limiting API for the common case of "allow key X at most N + * requests per window." + * + * **Disposal behavior:** after disposal, `limit()` and `peek()` return a + * result with `ok: false` (remaining/resetAt/retryAfter all `0`), and + * `reset()` is a no-op. This matches the primitive limiter contract where + * `tryAcquire()` returns a rejected lease after disposal. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface KeyedRateLimiter extends Disposable { + /** + * Check whether a request for the given key should be allowed, and + * consume permits if so. + * + * @param key Identifier for the rate limit subject (user ID, IP, etc.). + * @param options Override cost per request. + * @returns A {@linkcode RateLimitResult} with the decision and metadata. + */ + limit(key: string, options?: LimitOptions): RateLimitResult; + + /** + * Check the current state for a key without consuming any permits. + * Useful for displaying remaining quota in UI or headers without + * affecting the count. + * + * Note: `peek()` does not count as activity for TTL-based eviction. + * Keys that are only peeked (never limited) will still be evicted after + * `evictionTtl` of inactivity. + */ + peek(key: string, options?: PeekOptions): RateLimitResult; + + /** + * Reset all state for a key, restoring it to full capacity. Useful for + * testing, admin overrides, or support tooling. + */ + reset(key: string): void; + + /** Number of keys currently tracked. */ + readonly size: number; +} + +/** + * Create a keyed rate limiter. The algorithm and its parameters are + * configured once; per-key state is managed internally with automatic + * eviction of idle keys. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic API rate limiting + * ```ts no-assert + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * + * Deno.serve((req) => { + * const ip = req.headers.get("x-forwarded-for") ?? "unknown"; + * const result = limiter.limit(ip); + * if (!result.ok) { + * return new Response("Too many requests", { + * status: 429, + * headers: { + * "Retry-After": String(Math.ceil(result.retryAfter / 1000)), + * }, + * }); + * } + * return new Response("OK"); + * }); + * ``` + * + * @example Variable cost + * ```ts no-assert + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * const result = limiter.limit("user:123", { cost: 5 }); + * ``` + * + * @example GCRA for strict uniform spacing + * ```ts no-assert + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * using limiter = createRateLimiter({ + * limit: 10, + * window: 1_000, + * algorithm: "gcra", + * }); + * + * const result = limiter.limit("user:123"); + * if (!result.ok) { + * console.log(`Retry after ${result.retryAfter}ms`); + * } + * ``` + * + * @param options Configuration for the rate limiter. + * @returns A {@linkcode KeyedRateLimiter}. + */ +export function createRateLimiter( + options: RateLimiterOptions, +): KeyedRateLimiter { + const context = "rate limiter"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + + const { + limit, + window: windowMs, + algorithm: algorithmName = "sliding-window", + segmentsPerWindow = 10, + tokensPerCycle = limit, + evictionTtl = 300_000, + evictionInterval = 60_000, + maxKeys = 0, + clock = Date.now, + } = options; + + if (algorithmName === "token-bucket") { + assertPositiveInteger(context, "tokensPerCycle", tokensPerCycle); + if (tokensPerCycle > limit) { + throw new RangeError( + `Cannot create ${context}: 'tokensPerCycle' (${tokensPerCycle}) exceeds 'limit' (${limit})`, + ); + } + } + + if (!Number.isFinite(evictionTtl) || evictionTtl < 0) { + throw new RangeError( + `Cannot create ${context}: 'evictionTtl' must be a non-negative finite number, received ${evictionTtl}`, + ); + } + + if (evictionTtl > 0) { + assertPositiveFinite(context, "evictionInterval", evictionInterval); + } + + assertNonNegativeInteger(context, "maxKeys", maxKeys); + + let algorithm: KeyedAlgorithm; + switch (algorithmName) { + case "fixed-window": + algorithm = createFixedWindowAlgorithm(limit, windowMs); + break; + case "sliding-window": + algorithm = createSlidingWindowAlgorithm( + limit, + windowMs, + segmentsPerWindow, + ); + break; + case "token-bucket": + algorithm = createTokenBucketAlgorithm(limit, windowMs, tokensPerCycle); + break; + case "gcra": + algorithm = createGcraAlgorithm(limit, windowMs); + break; + default: + throw new TypeError( + `Cannot create ${context}: unknown algorithm '${algorithmName as string}'`, + ); + } + + const MAX_KEYS_REJECTED: RateLimitResult = Object.freeze({ + ok: false as const, + remaining: 0, + resetAt: 0, + retryAfter: 0, + limit, + }); + + const DISPOSED_RESULT: RateLimitResult = Object.freeze({ + ok: false as const, + remaining: 0, + resetAt: 0, + retryAfter: 0, + limit, + }); + + let disposed = false; + let evictionTimer: ReturnType | undefined; + + if (evictionTtl > 0) { + evictionTimer = setInterval( + () => algorithm.evict(clock(), evictionTtl), + evictionInterval, + ); + } + + function validateCost(method: string, cost: number): void { + if (!Number.isInteger(cost) || cost < 1) { + throw new RangeError( + `Cannot ${method}: 'cost' must be a positive integer, received ${cost}`, + ); + } + if (cost > limit) { + throw new RangeError( + `Cannot ${method}: 'cost' (${cost}) exceeds the limit (${limit})`, + ); + } + } + + return { + limit(key: string, options?: LimitOptions): RateLimitResult { + if (disposed) return DISPOSED_RESULT; + const cost = options?.cost ?? 1; + validateCost("limit", cost); + if (maxKeys > 0 && algorithm.size >= maxKeys && !algorithm.has(key)) { + return MAX_KEYS_REJECTED; + } + return algorithm.limit(key, cost, clock()); + }, + peek(key: string, options?: PeekOptions): RateLimitResult { + if (disposed) return DISPOSED_RESULT; + const cost = options?.cost ?? 1; + validateCost("peek", cost); + if (maxKeys > 0 && algorithm.size >= maxKeys && !algorithm.has(key)) { + return MAX_KEYS_REJECTED; + } + return algorithm.peek(key, cost, clock()); + }, + reset(_key: string): void { + if (disposed) return; + algorithm.reset(_key); + }, + get size(): number { + return algorithm.size; + }, + [Symbol.dispose](): void { + if (disposed) return; + disposed = true; + if (evictionTimer !== undefined) { + clearInterval(evictionTimer); + evictionTimer = undefined; + } + algorithm.clear(); + }, + }; +} diff --git a/rate_limit/rate_limiter_test.ts b/rate_limit/rate_limiter_test.ts new file mode 100644 index 000000000000..283c45e3cc74 --- /dev/null +++ b/rate_limit/rate_limiter_test.ts @@ -0,0 +1,1219 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { assert, assertEquals, assertFalse, assertThrows } from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createRateLimiter } from "./rate_limiter.ts"; + +// --- Factory validation --- + +Deno.test("createRateLimiter() throws for invalid limit", () => { + assertThrows( + () => createRateLimiter({ limit: 0, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createRateLimiter({ limit: -1, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createRateLimiter({ limit: 1.5, window: 1000 }), + RangeError, + "limit", + ); +}); + +Deno.test("createRateLimiter() throws for invalid window", () => { + assertThrows( + () => createRateLimiter({ limit: 10, window: 0 }), + RangeError, + "window", + ); + assertThrows( + () => createRateLimiter({ limit: 10, window: -100 }), + RangeError, + "window", + ); +}); + +Deno.test("createRateLimiter() throws for invalid segmentsPerWindow", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 1, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 3, + }), + RangeError, + "divisible", + ); +}); + +Deno.test("createRateLimiter() throws for invalid tokensPerCycle", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerCycle: 0, + }), + RangeError, + "tokensPerCycle", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerCycle: 11, + }), + RangeError, + "tokensPerCycle", + ); +}); + +Deno.test("createRateLimiter() throws for invalid eviction options when evictionTtl > 0", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: 5000, + evictionInterval: 0, + }), + RangeError, + "evictionInterval", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: 5000, + evictionInterval: -100, + }), + RangeError, + "evictionInterval", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: Infinity, + evictionInterval: 60_000, + }), + RangeError, + "evictionTtl", + ); +}); + +Deno.test("createRateLimiter() throws for negative evictionTtl", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: -1, + }), + RangeError, + "evictionTtl", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: NaN, + }), + RangeError, + "evictionTtl", + ); +}); + +Deno.test("createRateLimiter() throws for invalid cost", () => { + using _time = new FakeTime(); + using limiter = createRateLimiter({ limit: 10, window: 1000 }); + + assertThrows(() => limiter.limit("a", { cost: 0 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: -1 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: 1.5 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: 11 }), RangeError, "exceeds"); +}); + +Deno.test("createRateLimiter() accepts all algorithms", () => { + using _time = new FakeTime(); + for ( + const algorithm of [ + "fixed-window", + "sliding-window", + "token-bucket", + "gcra", + ] as const + ) { + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm, + }); + const result = limiter.limit("key"); + assert(result.ok); + } +}); + +// === Fixed window === + +Deno.test("fixed-window: first request allowed with correct remaining", () => { + const now = 1000; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 4); + assertEquals(r.limit, 5); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("fixed-window: exhausting limit returns ok: false", () => { + const now = 1000; + using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + assert(limiter.limit("a").ok); + assert(limiter.limit("a").ok); + assert(limiter.limit("a").ok); + + const r = limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assert(r.retryAfter > 0); + assertEquals(r.resetAt, 2000); +}); + +Deno.test("fixed-window: permits restore after window elapses", () => { + let now = 1000; + using limiter = createRateLimiter({ + limit: 2, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("a"); + assertFalse(limiter.limit("a").ok); + + now = 2000; + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 1); +}); + +Deno.test("fixed-window: variable cost consumes multiple permits", () => { + const now = 1000; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const r = limiter.limit("a", { cost: 7 }); + assert(r.ok); + assertEquals(r.remaining, 3); + + assertFalse(limiter.limit("a", { cost: 4 }).ok); + assert(limiter.limit("a", { cost: 3 }).ok); +}); + +// === Sliding window === + +Deno.test("sliding-window: permits freed incrementally as segments rotate", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 4, + window: 400, + algorithm: "sliding-window", + segmentsPerWindow: 4, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 4 }); + assertFalse(limiter.limit("a").ok); + + now = 100; + assertFalse(limiter.limit("a").ok); + now = 200; + assertFalse(limiter.limit("a").ok); + now = 300; + assertFalse(limiter.limit("a").ok); + + now = 400; + assert(limiter.limit("a", { cost: 4 }).ok); +}); + +Deno.test("sliding-window: no boundary burst", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 10 }); + + now = 500; + assertFalse(limiter.limit("a").ok); + + now = 1000; + assert(limiter.limit("a", { cost: 10 }).ok); +}); + +Deno.test("sliding-window: retryAfter reflects next segment rotation", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 1, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 4, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a"); + const r = limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.retryAfter, 250); +}); + +// === Token bucket === + +Deno.test("token-bucket: starts at full capacity", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "token-bucket", + evictionTtl: 0, + clock: () => now, + }); + + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +Deno.test("token-bucket: tokens refill lazily on access", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerCycle: 1, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 3 }); + assertFalse(limiter.limit("a").ok); + + now = 1000; + assert(limiter.limit("a").ok); + assertFalse(limiter.limit("a").ok); + + now = 3000; + assert(limiter.limit("a", { cost: 2 }).ok); +}); + +Deno.test("token-bucket: refill capped at limit", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerCycle: 3, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a"); + now = 10000; + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("token-bucket: retryAfter reflects time until enough tokens", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 500, + algorithm: "token-bucket", + tokensPerCycle: 2, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 10 }); + const r = limiter.limit("a", { cost: 3 }); + assertFalse(r.ok); + assertEquals(r.retryAfter, 1000); +}); + +// === GCRA === + +Deno.test("gcra: first request always allowed", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.limit, 10); +}); + +Deno.test("gcra: requests spaced >= emission_interval apart always allowed", () => { + let now = 0; + const emissionInterval = 100; // window(1000) / limit(10) + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 20; i++) { + const r = limiter.limit("a"); + assert(r.ok, `request ${i} at now=${now} should be allowed`); + now += emissionInterval; + } +}); + +Deno.test("gcra: burst up to limit requests when idle", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 5; i++) { + assert(limiter.limit("a").ok, `burst request ${i} should be allowed`); + } + assertFalse(limiter.limit("a").ok); +}); + +Deno.test("gcra: after burst, requests denied until tat drains", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 5; i++) limiter.limit("a"); + assertFalse(limiter.limit("a").ok); + + // emission_interval = 200ms. After 200ms, one slot should free. + now = 200; + assert(limiter.limit("a").ok); + assertFalse(limiter.limit("a").ok); +}); + +Deno.test("gcra: retryAfter is exact", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 5; i++) limiter.limit("a"); + const r = limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.retryAfter, 200); +}); + +Deno.test("gcra: variable cost advances tat by emission_interval * cost", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + // emission_interval = 100ms. cost=5 advances tat by 500ms. + const r = limiter.limit("a", { cost: 5 }); + assert(r.ok); + assertEquals(r.remaining, 5); + + // 5 more slots remain + assert(limiter.limit("a", { cost: 5 }).ok); + assertFalse(limiter.limit("a").ok); +}); + +Deno.test("gcra: remaining derived correctly", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + const r1 = limiter.limit("a"); + assert(r1.ok); + assertEquals(r1.remaining, 9); + + const r2 = limiter.limit("a", { cost: 4 }); + assert(r2.ok); + assertEquals(r2.remaining, 5); +}); + +Deno.test("gcra: remaining never exceeds limit after long idle", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a"); + now += 100_000; + + const peek = limiter.peek("a"); + assert(peek.ok); + assert( + peek.remaining <= 10, + `remaining (${peek.remaining}) should not exceed limit (10)`, + ); + assertEquals(peek.remaining, 10); + + const result = limiter.limit("a"); + assert(result.ok); + assert( + result.remaining <= 10, + `remaining (${result.remaining}) should not exceed limit (10)`, + ); +}); + +Deno.test("gcra: cost exceeding remaining burst is denied", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 3 }); + const r = limiter.limit("a", { cost: 4 }); + assertFalse(r.ok); + assert(r.retryAfter > 0); +}); + +Deno.test("gcra: state is a single timestamp per key (minimal memory)", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 100, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 1000; i++) { + limiter.limit(`key-${i}`); + } + assertEquals(limiter.size, 1000); +}); + +// === peek() === + +Deno.test("peek() returns current state without consuming permits", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("a"); + + const p = limiter.peek("a"); + assert(p.ok); + assertEquals(p.remaining, 3); + + // peek didn't consume — still 3 remaining + assertEquals(limiter.peek("a").remaining, 3); +}); + +Deno.test("peek() returns full capacity for unknown key", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + const p = limiter.peek("unknown"); + assert(p.ok); + assertEquals(p.remaining, 10); + assertEquals(p.limit, 10); +}); + +Deno.test("peek() reflects consumed permits after limit()", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "token-bucket", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 3 }); + const p = limiter.peek("a"); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +// === reset() === + +Deno.test("reset() restores key to full capacity", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 3 }); + assertFalse(limiter.limit("a").ok); + + limiter.reset("a"); + assert(limiter.limit("a").ok); +}); + +Deno.test("reset() on unknown key is a no-op", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + limiter.reset("nonexistent"); // should not throw +}); + +// === size === + +Deno.test("size tracks number of keys", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + assertEquals(limiter.size, 0); + limiter.limit("a"); + assertEquals(limiter.size, 1); + limiter.limit("b"); + assertEquals(limiter.size, 2); + limiter.limit("a"); // same key + assertEquals(limiter.size, 2); + limiter.reset("a"); + assertEquals(limiter.size, 1); +}); + +// === Eviction === + +Deno.test("keys are evicted after evictionTtl of inactivity", () => { + using time = new FakeTime(); + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 5000, + evictionInterval: 1000, + }); + + limiter.limit("a"); + limiter.limit("b"); + assertEquals(limiter.size, 2); + + time.tick(6000); + assertEquals(limiter.size, 0); +}); + +Deno.test("active keys are not evicted", () => { + using time = new FakeTime(); + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 5000, + evictionInterval: 1000, + }); + + limiter.limit("a"); + limiter.limit("b"); + + time.tick(4000); + limiter.limit("a"); // refresh "a" + + time.tick(2000); // 6s total — "b" should be evicted, "a" should survive + assertEquals(limiter.size, 1); + assert(limiter.peek("a").ok); +}); + +Deno.test("peek() does not refresh activity for TTL eviction", () => { + using time = new FakeTime(); + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 5000, + evictionInterval: 1000, + }); + + limiter.limit("a"); + assertEquals(limiter.size, 1); + + time.tick(4000); + limiter.peek("a"); // should NOT refresh last-access + + time.tick(2000); // 6s total — "a" should be evicted despite the peek + assertEquals(limiter.size, 0); +}); + +Deno.test("evictionTtl: 0 disables eviction", () => { + using time = new FakeTime(); + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + }); + + limiter.limit("a"); + time.tick(1_000_000); + assertEquals(limiter.size, 1); +}); + +// === Disposal === + +Deno.test("dispose clears all state", () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + limiter.limit("a"); + limiter.limit("b"); + assertEquals(limiter.size, 2); + + limiter[Symbol.dispose](); + assertEquals(limiter.size, 0); +}); + +Deno.test("limit() returns ok: false after disposal", () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + limiter[Symbol.dispose](); + const r = limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assertEquals(r.resetAt, 0); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("peek() returns ok: false after disposal", () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + limiter[Symbol.dispose](); + const r = limiter.peek("a"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assertEquals(r.resetAt, 0); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("reset() is a no-op after disposal", () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + limiter[Symbol.dispose](); + limiter.reset("a"); // should not throw +}); + +// === Metadata correctness === + +Deno.test("result.limit matches configured value", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 42, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + assertEquals(limiter.limit("a").limit, 42); + assertEquals(limiter.peek("a").limit, 42); +}); + +Deno.test("retryAfter is 0 when allowed, positive when denied", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 1, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const allowed = limiter.limit("a"); + assertEquals(allowed.retryAfter, 0); + + const denied = limiter.limit("a"); + assert(denied.retryAfter > 0); +}); + +Deno.test("resetAt is a future timestamp", () => { + const now = 5000; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const r = limiter.limit("a"); + assert(r.resetAt > now); +}); + +Deno.test("gcra: retryAfter when now < allowAt (request arrives too early)", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + // Fill all 5 slots: tat advances to 1000 + for (let i = 0; i < 5; i++) limiter.limit("a"); + + // Advance only 100ms — tat is 1000, allowAt = tat - tau = 0. + // A request at now=100 is after allowAt, so this exercises the else branch. + now = 100; + const r1 = limiter.limit("a"); + assertFalse(r1.ok); + assert(r1.retryAfter > 0); + + // Now set now to -100 (simulating clock skew) — now < allowAt exercises + // the `now < allowAt` branch in result(). + now = -100; + const r2 = limiter.peek("a"); + assertFalse(r2.ok); + assert(r2.retryAfter > 0); +}); + +// === Per-key isolation === + +Deno.test("keys are isolated from each other", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 2, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 2 }); + assertFalse(limiter.limit("a").ok); + + assert(limiter.limit("b").ok); + assert(limiter.limit("b").ok); +}); + +// === Default algorithm is sliding-window === + +Deno.test("default algorithm is sliding-window", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 10 }); + + // At half-window, a fixed window would have reset. Sliding window hasn't. + now = 500; + assertFalse(limiter.limit("a").ok); + + // After full window, sliding window frees permits. + now = 1000; + assert(limiter.limit("a").ok); +}); + +// === Default clock uses Date.now (T-1 test) === + +Deno.test("default clock uses Date.now", () => { + using _time = new FakeTime(0); + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.resetAt, 1000); +}); + +// === peek() with cost (C-2/A-2) === + +Deno.test("peek() with cost checks whether that cost would be allowed", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 3 }); + + assert(limiter.peek("a", { cost: 2 }).ok); + assertFalse(limiter.peek("a", { cost: 3 }).ok); +}); + +Deno.test("peek() validates cost", () => { + using _time = new FakeTime(); + using limiter = createRateLimiter({ limit: 10, window: 1000 }); + + assertThrows(() => limiter.peek("a", { cost: 0 }), RangeError, "cost"); + assertThrows(() => limiter.peek("a", { cost: -1 }), RangeError, "cost"); + assertThrows(() => limiter.peek("a", { cost: 1.5 }), RangeError, "cost"); + assertThrows(() => limiter.peek("a", { cost: 11 }), RangeError, "exceeds"); +}); + +// === maxKeys (S-1) === + +Deno.test("maxKeys rejects new keys when limit reached", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + + assert(limiter.limit("a").ok); + assert(limiter.limit("b").ok); + assertEquals(limiter.size, 2); + + const r = limiter.limit("c"); + assertFalse(r.ok); + assertEquals(limiter.size, 2); +}); + +Deno.test("maxKeys allows existing keys even when at capacity", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("b"); + + const r = limiter.limit("a"); + assert(r.ok); +}); + +Deno.test("maxKeys: 0 disables key limit", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 100, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 0, + clock: () => now, + }); + + for (let i = 0; i < 1000; i++) { + assert(limiter.limit(`key:${i}`).ok); + } + assertEquals(limiter.size, 1000); +}); + +Deno.test("createRateLimiter() throws for invalid maxKeys", () => { + assertThrows( + () => createRateLimiter({ limit: 10, window: 1000, maxKeys: -1 }), + RangeError, + "maxKeys", + ); + assertThrows( + () => createRateLimiter({ limit: 10, window: 1000, maxKeys: 1.5 }), + RangeError, + "maxKeys", + ); +}); + +Deno.test("maxKeys rejects peek for unknown key when at capacity", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("b"); + + const r = limiter.peek("c"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assertEquals(r.retryAfter, 0); + assertEquals(limiter.size, 2); +}); + +Deno.test("maxKeys allows peek for existing key at capacity", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("b"); + + const r = limiter.peek("a"); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +// === maxKeys + window reset (C-1 regression) === + +Deno.test("maxKeys allows existing key whose window has reset", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("b"); + assertEquals(limiter.size, 2); + + // Advance past the window so "a" resets to full capacity + now = 2000; + const r = limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("maxKeys allows GCRA key after full tat drain", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + + limiter.limit("a"); + limiter.limit("b"); + + // Advance well past the window so "a" drains fully + now = 5000; + const r = limiter.limit("a"); + assert(r.ok); +}); + +// === peek() unknown key with cost > 1 (T-TEST-3) === + +Deno.test("peek() returns ok for unknown key with cost <= limit", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const p = limiter.peek("unknown", { cost: 5 }); + assert(p.ok); + assertEquals(p.remaining, 10); + assertEquals(p.limit, 10); +}); + +Deno.test("peek() returns not-ok for unknown key with cost > limit", () => { + const now = 0; + using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + assertThrows( + () => limiter.peek("unknown", { cost: 6 }), + RangeError, + "exceeds", + ); +}); + +// === Unknown algorithm (T-TEST-4) === + +Deno.test("createRateLimiter() throws for unknown algorithm", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "unknown" as "fixed-window", + }), + TypeError, + "unknown", + ); +}); + +// === segmentsPerWindow edge cases in createRateLimiter (T-TEST-5) === + +Deno.test("createRateLimiter() throws for segmentsPerWindow: 0", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 0, + }), + RangeError, + "segmentsPerWindow", + ); +}); + +Deno.test("createRateLimiter() throws for non-integer segmentsPerWindow", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2.5, + }), + RangeError, + "segmentsPerWindow", + ); +}); diff --git a/rate_limit/sliding_window.ts b/rate_limit/sliding_window.ts new file mode 100644 index 000000000000..ba2b2ba78334 --- /dev/null +++ b/rate_limit/sliding_window.ts @@ -0,0 +1,141 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; +import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; +import { createSlidingWindowOps } from "./_algorithms.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; + +/** + * Options for {@linkcode createSlidingWindow}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface SlidingWindowOptions extends QueueOptions { + /** Maximum permits across the sliding window. */ + permitLimit: number; + /** Total window duration in milliseconds. */ + window: number; + /** + * Number of segments within the window. Higher values give smoother rate + * enforcement at the cost of more frequent timer ticks. Must be at least 2 + * (1 segment degenerates to a fixed window). + */ + segmentsPerWindow: number; + /** + * Start an internal timer for automatic segment rotation. + * + * When `false`, call {@linkcode ReplenishingRateLimiter.replenish} + * manually. + * + * @default {true} + */ + autoReplenishment?: boolean; + /** + * Clock function returning the current time in milliseconds. Override + * for deterministic testing. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Create a sliding window rate limiter. The window is divided into segments + * that rotate individually, giving smoother rate enforcement than a fixed + * window. Unlike a fixed window, a burst at the boundary cannot exceed the + * permit limit. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts no-assert + * import { createSlidingWindow } from "@std/rate-limit/sliding-window"; + * + * using limiter = createSlidingWindow({ + * permitLimit: 100, + * window: 60_000, + * segmentsPerWindow: 6, + * }); + * + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * console.log(`Retry after ${lease.retryAfter}ms`); + * } + * ``` + * + * @example Manual replenishment + * ```ts no-assert + * import { createSlidingWindow } from "@std/rate-limit/sliding-window"; + * + * using limiter = createSlidingWindow({ + * permitLimit: 100, + * window: 60_000, + * segmentsPerWindow: 6, + * autoReplenishment: false, + * }); + * + * limiter.replenish(); + * ``` + * + * @param options Configuration for the sliding window. + * @returns A {@linkcode ReplenishingRateLimiter}. + */ +export function createSlidingWindow( + options: SlidingWindowOptions, +): ReplenishingRateLimiter { + const context = "sliding window"; + assertPositiveInteger(context, "permitLimit", options.permitLimit); + assertPositiveFinite(context, "window", options.window); + if ( + !Number.isInteger(options.segmentsPerWindow) || + options.segmentsPerWindow < 2 + ) { + throw new RangeError( + `Cannot create sliding window: 'segmentsPerWindow' must be an integer >= 2, received ${options.segmentsPerWindow}`, + ); + } + if (options.window % options.segmentsPerWindow !== 0) { + throw new RangeError( + `Cannot create sliding window: 'window' (${options.window}) must be evenly divisible by 'segmentsPerWindow' (${options.segmentsPerWindow})`, + ); + } + assertNonNegativeInteger(context, "queueLimit", options.queueLimit); + + const { permitLimit, segmentsPerWindow, window } = options; + const clock = options.clock ?? Date.now; + const segmentDuration = window / segmentsPerWindow; + const ops = createSlidingWindowOps(permitLimit, window, segmentsPerWindow); + const state = ops.create(clock()); + let lastNow = 0; + + return createReplenishingLimiter( + { + replenishmentPeriod: segmentDuration, + autoReplenishment: options.autoReplenishment ?? true, + queueLimit: options.queueLimit ?? 0, + queueOrder: options.queueOrder ?? "oldest-first", + }, + { + get permitLimit() { + return ops.limit; + }, + tryAcquirePermits(permits: number): boolean { + lastNow = clock(); + ops.advance(state, lastNow); + return ops.tryConsume(state, permits, lastNow); + }, + replenish(): void { + lastNow = state.segmentStart + segmentDuration; + ops.replenish(state); + }, + computeRetryAfter(permits: number): number { + return ops.computeRetryAfter(state, permits, lastNow); + }, + }, + ); +} diff --git a/rate_limit/sliding_window_test.ts b/rate_limit/sliding_window_test.ts new file mode 100644 index 000000000000..b074ab5183ca --- /dev/null +++ b/rate_limit/sliding_window_test.ts @@ -0,0 +1,689 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { + assert, + assertEquals, + assertFalse, + assertRejects, + assertThrows, +} from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createSlidingWindow } from "./sliding_window.ts"; + +// --- Factory validation --- + +Deno.test("createSlidingWindow() throws for invalid permitLimit", () => { + assertThrows( + () => + createSlidingWindow({ + permitLimit: 0, + window: 1000, + segmentsPerWindow: 2, + }), + RangeError, + "permitLimit", + ); + assertThrows( + () => + createSlidingWindow({ + permitLimit: -1, + window: 1000, + segmentsPerWindow: 2, + }), + RangeError, + "permitLimit", + ); + assertThrows( + () => + createSlidingWindow({ + permitLimit: 1.5, + window: 1000, + segmentsPerWindow: 2, + }), + RangeError, + "permitLimit", + ); +}); + +Deno.test("createSlidingWindow() throws for invalid window", () => { + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: 0, + segmentsPerWindow: 2, + }), + RangeError, + "window", + ); + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: -100, + segmentsPerWindow: 2, + }), + RangeError, + "window", + ); +}); + +Deno.test("createSlidingWindow() throws for invalid segmentsPerWindow", () => { + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: 1000, + segmentsPerWindow: 1, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: 1000, + segmentsPerWindow: 0, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: 1000, + segmentsPerWindow: 1.5, + }), + RangeError, + "segmentsPerWindow", + ); +}); + +Deno.test("createSlidingWindow() throws when window is not divisible by segmentsPerWindow", () => { + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: 1000, + segmentsPerWindow: 3, + }), + RangeError, + "divisible", + ); +}); + +Deno.test("createSlidingWindow() throws for invalid queueLimit", () => { + assertThrows( + () => + createSlidingWindow({ + permitLimit: 10, + window: 1000, + segmentsPerWindow: 2, + queueLimit: -1, + }), + RangeError, + "queueLimit", + ); +}); + +// --- tryAcquire --- + +Deno.test("tryAcquire() succeeds within the permit limit", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 3, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("tryAcquire() acquires multiple permits at once", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire(3).acquired); + assert(limiter.tryAcquire(2).acquired); +}); + +Deno.test("tryAcquire() rejects with retryAfter equal to segment duration", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 4, + }); + void time; + + limiter.tryAcquire(); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 250); +}); + +Deno.test("tryAcquire() throws for invalid permits", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assertThrows(() => limiter.tryAcquire(0), RangeError); + assertThrows(() => limiter.tryAcquire(-1), RangeError); + assertThrows(() => limiter.tryAcquire(1.5), RangeError); +}); + +Deno.test("tryAcquire() throws when permits exceed permitLimit", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assertThrows(() => limiter.tryAcquire(6), RangeError, "exceeds"); +}); + +// --- Sliding behavior --- + +Deno.test("permits consumed in segment 0 free after N segment rotations", () => { + using time = new FakeTime(0); + // 4 segments, each 250ms. Full window = 1000ms. + using limiter = createSlidingWindow({ + permitLimit: 4, + window: 1000, + segmentsPerWindow: 4, + }); + + // Fill all permits in segment 0 + limiter.tryAcquire(4); + assertFalse(limiter.tryAcquire().acquired); + + // After 1 segment rotation (250ms), segment 0 is still in the window + time.tick(250); + assertFalse(limiter.tryAcquire().acquired); + + // After 2 rotations (500ms), segment 0 still in window + time.tick(250); + assertFalse(limiter.tryAcquire().acquired); + + // After 3 rotations (750ms), segment 0 still in window + time.tick(250); + assertFalse(limiter.tryAcquire().acquired); + + // After 4 rotations (1000ms), segment 0 is evicted — permits freed + time.tick(250); + assert(limiter.tryAcquire(4).acquired); +}); + +Deno.test("sliding window prevents boundary burst that fixed window allows", () => { + using time = new FakeTime(0); + // 2 segments of 500ms each, limit 10. + using limiter = createSlidingWindow({ + permitLimit: 10, + window: 1000, + segmentsPerWindow: 2, + }); + + // Use all 10 permits in segment 0 + limiter.tryAcquire(10); + assertFalse(limiter.tryAcquire().acquired); + + // After one segment rotation (500ms), only segment 0's permits are still + // counted. A fixed window would have reset entirely, allowing 10 more. + // The sliding window only frees what was in the evicted segment — nothing + // yet, because segment 0 hasn't been evicted (it's now the "oldest" of 2). + time.tick(500); + assertFalse(limiter.tryAcquire().acquired); + + // After the second rotation (1000ms total), segment 0 is finally evicted. + time.tick(500); + assert(limiter.tryAcquire(10).acquired); +}); + +Deno.test("permits spread across segments free incrementally", () => { + using time = new FakeTime(0); + // 3 segments of 100ms each, limit 6. + using limiter = createSlidingWindow({ + permitLimit: 6, + window: 300, + segmentsPerWindow: 3, + }); + + // Segment 0: use 2 + limiter.tryAcquire(2); + // Segment 1: use 2 + time.tick(100); + limiter.tryAcquire(2); + // Segment 2: use 2 — now at limit + time.tick(100); + limiter.tryAcquire(2); + assertFalse(limiter.tryAcquire().acquired); + + // Rotate once: evicts segment 0 (2 permits), freeing 2 + time.tick(100); + assert(limiter.tryAcquire(2).acquired); + assertFalse(limiter.tryAcquire().acquired); + + // Rotate again: evicts segment 1 (2 permits), freeing 2 + time.tick(100); + assert(limiter.tryAcquire(2).acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +// --- Manual replenishment --- + +Deno.test("replenish() throws when autoReplenishment is true", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assertThrows( + () => limiter.replenish(), + Error, + "Cannot replenish: limiter uses automatic replenishment", + ); +}); + +Deno.test("replenish() rotates a segment when autoReplenishment is false", () => { + using limiter = createSlidingWindow({ + permitLimit: 4, + window: 1000, + segmentsPerWindow: 4, + autoReplenishment: false, + }); + + limiter.tryAcquire(4); + assertFalse(limiter.tryAcquire().acquired); + + // Each replenish() rotates one segment. Need 4 rotations to evict segment 0. + limiter.replenish(); + assertFalse(limiter.tryAcquire().acquired); + limiter.replenish(); + assertFalse(limiter.tryAcquire().acquired); + limiter.replenish(); + assertFalse(limiter.tryAcquire().acquired); + limiter.replenish(); + assert(limiter.tryAcquire(4).acquired); +}); + +// --- acquire (async) --- + +Deno.test("acquire() resolves immediately when permits available", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + const lease = await limiter.acquire(); + assert(lease.acquired); +}); + +Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 0, + }); + void time; + + limiter.tryAcquire(); + const lease = await limiter.acquire(); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("acquire() queues and resolves after segment rotation frees capacity", async () => { + using time = new FakeTime(0); + // 2 segments of 500ms, limit 1 + using limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + + limiter.tryAcquire(); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + // First rotation doesn't evict the segment with the permit yet + time.tick(500); + await Promise.resolve(); + assertFalse(resolved); + + // Second rotation evicts it + time.tick(500); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +Deno.test("acquire() rejects when aborted via signal", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + const controller = new AbortController(); + const promise = limiter.acquire(1, { signal: controller.signal }); + controller.abort(); + + await assertRejects(() => promise, DOMException); +}); + +Deno.test("acquire() rejects when signal is already aborted", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); +}); + +// --- Disposal --- + +Deno.test("dispose resolves queued waiters with rejected leases", async () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + const promise = limiter.acquire(); + limiter[Symbol.dispose](); + + const lease = await promise; + assertFalse(lease.acquired); + assertEquals(lease.reason, "Rate limiter has been disposed"); +}); + +Deno.test("tryAcquire() returns rejected lease after disposal", () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + limiter[Symbol.dispose](); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); +}); + +Deno.test("acquire() rejects after disposal", async () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + limiter[Symbol.dispose](); + await assertRejects(() => limiter.acquire(), Error, "disposed"); +}); + +// --- Queue ordering --- + +Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { + using time = new FakeTime(0); + // 2 segments of 500ms, limit 1. + using limiter = createSlidingWindow({ + permitLimit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 10, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + // 4 segment rotations total: first 2 free the original permit (p1 served), + // next 2 free p1's permit (p2 served). + time.tick(2000); + await p1; + await p2; + + assertEquals(order, [1, 2]); +}); + +Deno.test("newest-first queue resolves newest waiter first", async () => { + using time = new FakeTime(0); + // 4 segments of 250ms, limit 2. Two permits available at start. + using limiter = createSlidingWindow({ + permitLimit: 2, + window: 1000, + segmentsPerWindow: 4, + queueLimit: 10, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(2); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + // 4 rotations evicts segment 0 (2 permits). newest-first serves p2 first. + time.tick(1000); + await p2; + await p1; + + assertEquals(order, [2, 1]); +}); + +// --- Eviction --- + +Deno.test("newest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + // 4 segments of 250ms, limit 3, queue holds 2 + using limiter = createSlidingWindow({ + permitLimit: 3, + window: 1000, + segmentsPerWindow: 4, + queueLimit: 2, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(3); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + const p3 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p3:acquired" : `p3:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + // 4 rotations evicts segment 0 (3 permits freed). newest-first: p3 then p2. + time.tick(1000); + await p3; + await p2; + + assertEquals(results, [ + "p1:Evicted by newer request", + "p3:acquired", + "p2:acquired", + ]); +}); + +// --- acquire() validation --- + +Deno.test("acquire() rejects for invalid permits", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + await assertRejects(() => limiter.acquire(0), RangeError); + await assertRejects(() => limiter.acquire(-1), RangeError); + await assertRejects(() => limiter.acquire(1.5), RangeError); +}); + +Deno.test("acquire() rejects when permits exceed permitLimit", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); +}); + +// --- Multiple waiters resolved in single replenishment --- + +Deno.test("single replenishment resolves multiple queued waiters", async () => { + using time = new FakeTime(0); + // 2 segments of 500ms, limit 3. + using limiter = createSlidingWindow({ + permitLimit: 3, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 10, + }); + + limiter.tryAcquire(3); + + const order: number[] = []; + const p1 = limiter.acquire(1).then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + order.push(2); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + order.push(3); + return l; + }); + + await Promise.resolve(); + assertEquals(order, []); + + // 2 rotations evicts segment 0 (3 permits freed), all 3 waiters drain at once + time.tick(1000); + await Promise.all([p1, p2, p3]); + + assertEquals(order, [1, 2, 3]); + for (const p of [p1, p2, p3]) { + assert((await p).acquired); + } +}); + +// --- Queue edge cases --- + +Deno.test("acquire() rejects when permits exceed queueLimit even if queue is empty", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 2, + }); + void time; + + limiter.tryAcquire(5); + + const lease = await limiter.acquire(3); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +// --- Double dispose --- + +Deno.test("double dispose is a no-op", () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + permitLimit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + limiter[Symbol.dispose](); + limiter[Symbol.dispose](); +}); diff --git a/rate_limit/token_bucket.ts b/rate_limit/token_bucket.ts new file mode 100644 index 000000000000..466a0ce2797b --- /dev/null +++ b/rate_limit/token_bucket.ts @@ -0,0 +1,135 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; +import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; +import { createTokenBucketOps } from "./_algorithms.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; + +/** + * Options for {@linkcode createTokenBucket}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface TokenBucketOptions extends QueueOptions { + /** Maximum tokens the bucket can hold. */ + tokenLimit: number; + /** Tokens added each replenishment period. */ + tokensPerPeriod: number; + /** Replenishment interval in milliseconds. */ + replenishmentPeriod: number; + /** + * Start an internal timer for automatic replenishment. + * + * When `false`, call {@linkcode ReplenishingRateLimiter.replenish} + * manually. + * + * @default {true} + */ + autoReplenishment?: boolean; + /** + * Clock function returning the current time in milliseconds. Override + * for deterministic testing. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Create a token bucket rate limiter. Tokens are added periodically, making + * this strategy ideal for smoothing bursty traffic. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts no-assert + * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * + * using limiter = createTokenBucket({ + * tokenLimit: 10, + * tokensPerPeriod: 1, + * replenishmentPeriod: 1000, + * }); + * + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * console.log(`Retry after ${lease.retryAfter}ms`); + * } + * ``` + * + * @example Manual replenishment + * ```ts no-assert + * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * + * using limiter = createTokenBucket({ + * tokenLimit: 10, + * tokensPerPeriod: 5, + * replenishmentPeriod: 1000, + * autoReplenishment: false, + * }); + * + * limiter.replenish(); + * ``` + * + * @param options Configuration for the token bucket. + * @returns A {@linkcode ReplenishingRateLimiter}. + */ +export function createTokenBucket( + options: TokenBucketOptions, +): ReplenishingRateLimiter { + const context = "token bucket"; + assertPositiveInteger(context, "tokenLimit", options.tokenLimit); + assertPositiveInteger(context, "tokensPerPeriod", options.tokensPerPeriod); + assertPositiveFinite( + context, + "replenishmentPeriod", + options.replenishmentPeriod, + ); + if (options.tokensPerPeriod > options.tokenLimit) { + throw new RangeError( + `Cannot create token bucket: 'tokensPerPeriod' (${options.tokensPerPeriod}) exceeds 'tokenLimit' (${options.tokenLimit})`, + ); + } + assertNonNegativeInteger(context, "queueLimit", options.queueLimit); + + const { tokenLimit, tokensPerPeriod, replenishmentPeriod } = options; + const clock = options.clock ?? Date.now; + const ops = createTokenBucketOps( + tokenLimit, + replenishmentPeriod, + tokensPerPeriod, + ); + const state = ops.create(clock()); + let lastNow = 0; + + return createReplenishingLimiter( + { + replenishmentPeriod, + autoReplenishment: options.autoReplenishment ?? true, + queueLimit: options.queueLimit ?? 0, + queueOrder: options.queueOrder ?? "oldest-first", + }, + { + get permitLimit() { + return ops.limit; + }, + tryAcquirePermits(permits: number): boolean { + lastNow = clock(); + ops.advance(state, lastNow); + return ops.tryConsume(state, permits, lastNow); + }, + replenish(): void { + lastNow = state.lastRefill + replenishmentPeriod; + ops.replenish(state); + }, + computeRetryAfter(permits: number): number { + return ops.computeRetryAfter(state, permits, lastNow); + }, + }, + ); +} diff --git a/rate_limit/token_bucket_test.ts b/rate_limit/token_bucket_test.ts new file mode 100644 index 000000000000..454bd220c9f5 --- /dev/null +++ b/rate_limit/token_bucket_test.ts @@ -0,0 +1,745 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { + assert, + assertEquals, + assertFalse, + assertRejects, + assertThrows, +} from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createTokenBucket } from "./token_bucket.ts"; + +// --- Factory validation --- + +Deno.test("createTokenBucket() throws for invalid tokenLimit", () => { + assertThrows( + () => + createTokenBucket({ + tokenLimit: 0, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }), + RangeError, + "tokenLimit", + ); + assertThrows( + () => + createTokenBucket({ + tokenLimit: -1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }), + RangeError, + "tokenLimit", + ); + assertThrows( + () => + createTokenBucket({ + tokenLimit: 1.5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }), + RangeError, + "tokenLimit", + ); +}); + +Deno.test("createTokenBucket() throws for invalid tokensPerPeriod", () => { + assertThrows( + () => + createTokenBucket({ + tokenLimit: 10, + tokensPerPeriod: 0, + replenishmentPeriod: 1000, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +Deno.test("createTokenBucket() throws for invalid replenishmentPeriod", () => { + assertThrows( + () => + createTokenBucket({ + tokenLimit: 10, + tokensPerPeriod: 1, + replenishmentPeriod: 0, + }), + RangeError, + "replenishmentPeriod", + ); + assertThrows( + () => + createTokenBucket({ + tokenLimit: 10, + tokensPerPeriod: 1, + replenishmentPeriod: -100, + }), + RangeError, + "replenishmentPeriod", + ); +}); + +Deno.test("createTokenBucket() throws when tokensPerPeriod exceeds tokenLimit", () => { + assertThrows( + () => + createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 10, + replenishmentPeriod: 1000, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +Deno.test("createTokenBucket() throws for invalid queueLimit", () => { + assertThrows( + () => + createTokenBucket({ + tokenLimit: 10, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: -1, + }), + RangeError, + "queueLimit", + ); +}); + +// --- tryAcquire --- + +Deno.test("tryAcquire() succeeds when tokens are available", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const lease = limiter.tryAcquire(); + assert(lease.acquired); +}); + +Deno.test("tryAcquire() acquires multiple permits", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const lease = limiter.tryAcquire(3); + assert(lease.acquired); + + const lease2 = limiter.tryAcquire(3); + assertFalse(lease2.acquired); +}); + +Deno.test("tryAcquire() returns rejected lease when tokens exhausted", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const first = limiter.tryAcquire(); + assert(first.acquired); + + const second = limiter.tryAcquire(); + assertFalse(second.acquired); + assert(second.retryAfter > 0); + assertEquals(second.reason, "Insufficient permits"); +}); + +Deno.test("tryAcquire() throws for invalid permits", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(0), RangeError); + assertThrows(() => limiter.tryAcquire(-1), RangeError); + assertThrows(() => limiter.tryAcquire(1.5), RangeError); +}); + +Deno.test("tryAcquire() throws when permits exceed tokenLimit", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(6), RangeError, "exceeds"); +}); + +// --- Replenishment --- + +Deno.test("tokens replenish after the configured period", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 2, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + + limiter.tryAcquire(); + limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire().acquired); +}); + +Deno.test("tokens do not exceed tokenLimit after replenishment", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 2, + tokensPerPeriod: 2, + replenishmentPeriod: 1000, + }); + + time.tick(5000); + + assert(limiter.tryAcquire(2).acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +// --- Manual replenishment --- + +Deno.test("replenish() throws when autoReplenishment is true", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + assertThrows( + () => limiter.replenish(), + Error, + "Cannot replenish: limiter uses automatic replenishment", + ); +}); + +Deno.test("replenish() replenishes when autoReplenishment is false", () => { + const limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 2, + replenishmentPeriod: 1000, + autoReplenishment: false, + }); + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + limiter.replenish(); + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); + + limiter[Symbol.dispose](); +}); + +Deno.test("replenish() drains queued acquire() waiters", async () => { + const limiter = createTokenBucket({ + tokenLimit: 2, + tokensPerPeriod: 2, + replenishmentPeriod: 1000, + autoReplenishment: false, + queueLimit: 5, + }); + + limiter.tryAcquire(2); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + limiter.replenish(); + const lease = await promise; + assert(resolved); + assert(lease.acquired); + + limiter[Symbol.dispose](); +}); + +// --- acquire (async) --- + +Deno.test("acquire() resolves immediately when tokens available", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const lease = await limiter.acquire(); + assert(lease.acquired); +}); + +Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 0, + }); + void time; + + limiter.tryAcquire(); + const lease = await limiter.acquire(); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("acquire() queues and resolves after replenishment", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + + limiter.tryAcquire(); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +Deno.test("acquire() rejects when aborted via signal", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + const controller = new AbortController(); + const promise = limiter.acquire(1, { signal: controller.signal }); + controller.abort(); + + await assertRejects(() => promise, DOMException); +}); + +Deno.test("acquire() rejects when signal is already aborted", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); +}); + +// --- retryAfter --- + +Deno.test("retryAfter reflects the deficit in tokens", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 10, + tokensPerPeriod: 2, + replenishmentPeriod: 500, + }); + void time; + + for (let i = 0; i < 10; i++) limiter.tryAcquire(); + + const lease = limiter.tryAcquire(3); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 1000); +}); + +// --- Disposal --- + +Deno.test("dispose resolves queued waiters with rejected leases", async () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + const promise = limiter.acquire(); + limiter[Symbol.dispose](); + + const lease = await promise; + assertFalse(lease.acquired); + assertEquals(lease.reason, "Rate limiter has been disposed"); +}); + +Deno.test("tryAcquire() returns rejected lease after disposal", () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + limiter[Symbol.dispose](); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); +}); + +Deno.test("acquire() rejects after disposal", async () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + limiter[Symbol.dispose](); + await assertRejects(() => limiter.acquire(), Error, "disposed"); +}); + +// --- Queue ordering --- + +Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 10, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p1; + time.tick(1000); + await p2; + + assertEquals(order, [1, 2]); +}); + +Deno.test("newest-first queue resolves newest waiter first", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 10, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p2; + time.tick(1000); + await p1; + + assertEquals(order, [2, 1]); +}); + +// --- Multi-permit queued waiters --- + +Deno.test("acquire() queues multi-permit waiter spanning multiple periods", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 3, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(3); + + let resolved = false; + const promise = limiter.acquire(3).then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +// --- Multiple waiters resolved in single replenishment --- + +Deno.test("single replenishment resolves multiple queued waiters", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 5, + replenishmentPeriod: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(5); + + const order: number[] = []; + const p1 = limiter.acquire(1).then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + order.push(2); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + order.push(3); + return l; + }); + + await Promise.resolve(); + assertEquals(order, []); + + time.tick(1000); + await Promise.all([p1, p2, p3]); + + assertEquals(order, [1, 2, 3]); + for (const p of [p1, p2, p3]) { + assert((await p).acquired); + } +}); + +// --- acquire() validation --- + +Deno.test("acquire() rejects for invalid permits", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + await assertRejects(() => limiter.acquire(0), RangeError); + await assertRejects(() => limiter.acquire(-1), RangeError); + await assertRejects(() => limiter.acquire(1.5), RangeError); +}); + +Deno.test("acquire() rejects when permits exceed tokenLimit", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); +}); + +// --- Queue edge cases --- + +Deno.test("acquire() rejects when permits exceed queueLimit even if queue is empty", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 2, + }); + void time; + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + + const lease = await limiter.acquire(3); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 1, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + time.tick(1000); + await p2; + + assertEquals(results, ["p1:Evicted by newer request", "p2:acquired"]); +}); + +Deno.test("eviction evicts multiple waiters to make room for a large request", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 3, + tokensPerPeriod: 3, + replenishmentPeriod: 1000, + queueLimit: 3, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(3); + + const results: string[] = []; + const p1 = limiter.acquire(1).then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + results.push(l.acquired ? "p3:acquired" : `p3:${l.reason}`); + return l; + }); + + await Promise.resolve(); + assertEquals(results, []); + + const p4 = limiter.acquire(3).then((l) => { + results.push(l.acquired ? "p4:acquired" : `p4:${l.reason}`); + return l; + }); + + await Promise.all([p1, p2, p3]); + assertEquals(results, [ + "p1:Evicted by newer request", + "p2:Evicted by newer request", + "p3:Evicted by newer request", + ]); + + time.tick(1000); + const lease = await p4; + assert(lease.acquired); + assertEquals(results, [ + "p1:Evicted by newer request", + "p2:Evicted by newer request", + "p3:Evicted by newer request", + "p4:acquired", + ]); +}); + +// --- retryAfter after manual replenish --- + +Deno.test("retryAfter is correct after manual replenish", () => { + const limiter = createTokenBucket({ + tokenLimit: 3, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + autoReplenishment: false, + }); + + for (let i = 0; i < 3; i++) limiter.tryAcquire(); + limiter.replenish(); + limiter.tryAcquire(); + + const lease = limiter.tryAcquire(3); + assertFalse(lease.acquired); + assert(lease.retryAfter > 0); + assert(Number.isFinite(lease.retryAfter)); + + limiter[Symbol.dispose](); +}); + +// --- Double dispose --- + +Deno.test("double dispose is a no-op", () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + limiter[Symbol.dispose](); + limiter[Symbol.dispose](); +}); diff --git a/rate_limit/types.ts b/rate_limit/types.ts new file mode 100644 index 000000000000..b9ff8b11ff1e --- /dev/null +++ b/rate_limit/types.ts @@ -0,0 +1,161 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +/** + * A rate limiter that controls how many permits can be acquired over time or + * concurrently. Implementations are disposable — disposing a limiter cancels + * any internal timers and rejects queued waiters. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Synchronous usage + * ```ts + * import type { RateLimiter } from "@std/rate-limit/types"; + * + * function useRateLimiter(limiter: RateLimiter) { + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * return; // rate limited + * } + * // proceed with work + * } + * ``` + * + * @example Async usage with queuing + * ```ts + * import type { RateLimiter } from "@std/rate-limit/types"; + * + * async function useRateLimiter(limiter: RateLimiter) { + * using lease = await limiter.acquire(1, { + * signal: AbortSignal.timeout(5000), + * }); + * // proceed with work + * } + * ``` + * + * @see {@linkcode createRateLimiter} for keyed rate limiting (primary API). + * @see {@linkcode createTokenBucket} for token bucket rate limiting. + * @see {@linkcode createFixedWindow} for fixed window rate limiting. + * @see {@linkcode createSlidingWindow} for sliding window rate limiting. + */ +export interface RateLimiter extends Disposable { + /** Try to acquire permits synchronously. Never blocks. */ + tryAcquire(permits?: number): RateLimitLease; + + /** + * Wait for permits. Resolves immediately when permits are available. + * When no permits are available and a queue is configured, the request + * is queued until permits are replenished. + * + * **Disposal behavior:** calling `acquire()` after the limiter has been + * disposed rejects with {@linkcode Error}. Waiters already queued at the + * time of disposal resolve with a {@linkcode RejectedLease} (not a + * rejection) so they can be handled uniformly via the `acquired` field. + * + * Rejects with {@linkcode DOMException} if the signal is aborted. + */ + acquire( + permits?: number, + options?: AcquireOptions, + ): Promise; +} + +/** + * A {@linkcode RateLimiter} that replenishes permits on a timer. Extends + * `RateLimiter` with a {@linkcode ReplenishingRateLimiter.replenish} + * method for manual replenishment when `autoReplenishment` is `false`. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface ReplenishingRateLimiter extends RateLimiter { + /** + * Manually trigger a replenishment cycle and drain queued waiters. + * + * @throws {Error} If the limiter uses automatic replenishment. + */ + replenish(): void; +} + +/** + * Options for {@linkcode RateLimiter.acquire}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface AcquireOptions { + /** Signal to abort the wait. */ + signal?: AbortSignal; +} + +/** + * The result of a rate limit acquisition attempt, discriminated on the + * {@linkcode RateLimitLease.acquired | acquired} field. TypeScript narrows + * the type after checking `acquired`, so `retryAfter` and `reason` are only + * present on rejected leases. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Checking a lease + * ```ts + * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * + * const limiter = createTokenBucket({ + * tokenLimit: 10, + * tokensPerPeriod: 1, + * replenishmentPeriod: 1000, + * }); + * + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * console.log(`Retry after ${lease.retryAfter}ms: ${lease.reason}`); + * } + * + * limiter[Symbol.dispose](); + * ``` + */ +export type RateLimitLease = AcquiredLease | RejectedLease; + +/** + * A lease indicating that permits were successfully acquired. For concurrency + * limiters, disposing the lease releases the permit. For time-based limiters, + * dispose is a no-op. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface AcquiredLease extends Disposable { + /** Whether permits were acquired. Always `true` for this type. */ + readonly acquired: true; +} + +/** + * A lease indicating that permits could not be acquired. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RejectedLease extends Disposable { + /** Whether permits were acquired. Always `false` for this type. */ + readonly acquired: false; + /** + * Suggested retry delay in milliseconds. A value of `0` means retrying + * will not help (e.g. the limiter has been disposed). + */ + readonly retryAfter: number; + /** Human-readable reason for rejection. */ + readonly reason: string; +} + +/** + * Queue configuration shared across all rate limiter algorithms. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface QueueOptions { + /** Max permits that can be queued waiting. Defaults to `0` (no queueing). */ + queueLimit?: number; + /** + * Queue processing order. Defaults to `"oldest-first"`. + * + * With `"newest-first"`, the most recently queued request is served first. + * This can starve older waiters when demand consistently exceeds supply. + */ + queueOrder?: "oldest-first" | "newest-first"; +} From 23d80de8c67e3d049638e33e3ad411fcb0467cd3 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 23 Mar 2026 20:14:08 +0100 Subject: [PATCH 02/15] fix example --- rate_limit/rate_limiter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index e55888d16b3f..a7286906c9e7 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -210,7 +210,7 @@ export interface KeyedRateLimiter extends Disposable { * @experimental **UNSTABLE**: New API, yet to be vetted. * * @example Basic API rate limiting - * ```ts no-assert + * ```ts no-eval * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; * * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); From 67e644785c595974088d7af012b026e03f18d26b Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 23 Mar 2026 20:24:34 +0100 Subject: [PATCH 03/15] fix examples --- rate_limit/fixed_window.ts | 7 +++---- rate_limit/mod.ts | 7 +++---- rate_limit/rate_limiter.ts | 14 ++++++++------ rate_limit/sliding_window.ts | 7 +++---- rate_limit/token_bucket.ts | 7 +++---- 5 files changed, 20 insertions(+), 22 deletions(-) diff --git a/rate_limit/fixed_window.ts b/rate_limit/fixed_window.ts index 4c7f1a27c580..3ce6421abfd1 100644 --- a/rate_limit/fixed_window.ts +++ b/rate_limit/fixed_window.ts @@ -46,8 +46,9 @@ export interface FixedWindowOptions extends QueueOptions { * @experimental **UNSTABLE**: New API, yet to be vetted. * * @example Basic usage - * ```ts no-assert + * ```ts * import { createFixedWindow } from "@std/rate-limit/fixed-window"; + * import { assert } from "@std/assert"; * * using limiter = createFixedWindow({ * permitLimit: 100, @@ -55,9 +56,7 @@ export interface FixedWindowOptions extends QueueOptions { * }); * * using lease = limiter.tryAcquire(); - * if (!lease.acquired) { - * console.log(`Retry after ${lease.retryAfter}ms`); - * } + * assert(lease.acquired); * ``` * * @example Manual replenishment diff --git a/rate_limit/mod.ts b/rate_limit/mod.ts index fccf4ff909be..f58cdf12ef83 100644 --- a/rate_limit/mod.ts +++ b/rate_limit/mod.ts @@ -11,15 +11,14 @@ * single-resource limiting, use the primitives: {@linkcode createTokenBucket}, * {@linkcode createFixedWindow}, and {@linkcode createSlidingWindow}. * - * ```ts no-assert + * ```ts * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert } from "@std/assert"; * * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); * * const result = limiter.limit("user:123"); - * if (!result.ok) { - * console.log(`Retry after ${result.retryAfter}ms`); - * } + * assert(result.ok); * ``` * * @module diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index a7286906c9e7..223a6df4fd75 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -210,7 +210,7 @@ export interface KeyedRateLimiter extends Disposable { * @experimental **UNSTABLE**: New API, yet to be vetted. * * @example Basic API rate limiting - * ```ts no-eval + * ```ts ignore * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; * * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); @@ -231,16 +231,20 @@ export interface KeyedRateLimiter extends Disposable { * ``` * * @example Variable cost - * ```ts no-assert + * ```ts * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert, assertEquals } from "@std/assert"; * * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); * const result = limiter.limit("user:123", { cost: 5 }); + * assert(result.ok); + * assertEquals(result.remaining, 95); * ``` * * @example GCRA for strict uniform spacing - * ```ts no-assert + * ```ts * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert } from "@std/assert"; * * using limiter = createRateLimiter({ * limit: 10, @@ -249,9 +253,7 @@ export interface KeyedRateLimiter extends Disposable { * }); * * const result = limiter.limit("user:123"); - * if (!result.ok) { - * console.log(`Retry after ${result.retryAfter}ms`); - * } + * assert(result.ok); * ``` * * @param options Configuration for the rate limiter. diff --git a/rate_limit/sliding_window.ts b/rate_limit/sliding_window.ts index ba2b2ba78334..90521ff7aa1d 100644 --- a/rate_limit/sliding_window.ts +++ b/rate_limit/sliding_window.ts @@ -53,8 +53,9 @@ export interface SlidingWindowOptions extends QueueOptions { * @experimental **UNSTABLE**: New API, yet to be vetted. * * @example Basic usage - * ```ts no-assert + * ```ts * import { createSlidingWindow } from "@std/rate-limit/sliding-window"; + * import { assert } from "@std/assert"; * * using limiter = createSlidingWindow({ * permitLimit: 100, @@ -63,9 +64,7 @@ export interface SlidingWindowOptions extends QueueOptions { * }); * * using lease = limiter.tryAcquire(); - * if (!lease.acquired) { - * console.log(`Retry after ${lease.retryAfter}ms`); - * } + * assert(lease.acquired); * ``` * * @example Manual replenishment diff --git a/rate_limit/token_bucket.ts b/rate_limit/token_bucket.ts index 466a0ce2797b..d99e31e8149b 100644 --- a/rate_limit/token_bucket.ts +++ b/rate_limit/token_bucket.ts @@ -47,8 +47,9 @@ export interface TokenBucketOptions extends QueueOptions { * @experimental **UNSTABLE**: New API, yet to be vetted. * * @example Basic usage - * ```ts no-assert + * ```ts * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * import { assert } from "@std/assert"; * * using limiter = createTokenBucket({ * tokenLimit: 10, @@ -57,9 +58,7 @@ export interface TokenBucketOptions extends QueueOptions { * }); * * using lease = limiter.tryAcquire(); - * if (!lease.acquired) { - * console.log(`Retry after ${lease.retryAfter}ms`); - * } + * assert(lease.acquired); * ``` * * @example Manual replenishment From 4af2841e67c5bb8c29bf6a530d7d2e7e12c55ecb Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Wed, 25 Mar 2026 19:24:32 +0100 Subject: [PATCH 04/15] Clarify behaviour --- rate_limit/_algorithms.ts | 27 +++++++------- rate_limit/_keyed_algorithms.ts | 8 +++-- rate_limit/rate_limiter.ts | 19 ++++++---- rate_limit/rate_limiter_test.ts | 62 ++++++++++++++++++++++++++++----- rate_limit/token_bucket_test.ts | 58 ++++++++++++++++++++++++++++++ rate_limit/types.ts | 4 +-- 6 files changed, 142 insertions(+), 36 deletions(-) diff --git a/rate_limit/_algorithms.ts b/rate_limit/_algorithms.ts index 1d4bb4cd2289..81d2d9815bd3 100644 --- a/rate_limit/_algorithms.ts +++ b/rate_limit/_algorithms.ts @@ -2,7 +2,6 @@ // This module is browser compatible. import { RollingCounter } from "@std/data-structures/unstable-rolling-counter"; - import { assertPositiveFinite, assertPositiveInteger } from "./_validation.ts"; /** @@ -96,13 +95,12 @@ export function createFixedWindowOps( replenish(state) { state.count = 0; }, - result(state, ok, _cost, now) { - const resetAt = state.windowStart + window; + result(state, ok, cost, now) { return { ok, remaining: Math.max(0, limit - state.count), - resetAt, - retryAfter: ok ? 0 : resetAt - now, + resetAt: state.windowStart + window, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), limit, }; }, @@ -176,13 +174,12 @@ export function createSlidingWindowOps( state.counter.rotate(); state.segmentStart += segmentDuration; }, - result(state, ok, _cost, now) { - const resetAt = state.segmentStart + segmentDuration; + result(state, ok, cost, now) { return { ok, remaining: Math.max(0, limit - state.counter.total), - resetAt, - retryAfter: ok ? 0 : resetAt - now, + resetAt: state.segmentStart + segmentDuration, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), limit, }; }, @@ -205,18 +202,18 @@ export interface TokenBucketState { * * @param limit Maximum tokens (bucket capacity). Must be a positive integer. * @param window Refill cycle duration in milliseconds. Must be a positive finite number. - * @param tokensPerCycle Tokens added per cycle. Must be a positive integer. + * @param tokensPerPeriod Tokens added per replenishment period. Must be a positive integer. * @returns Algorithm ops for token-bucket rate limiting. */ export function createTokenBucketOps( limit: number, window: number, - tokensPerCycle: number, + tokensPerPeriod: number, ): AlgorithmOps { const context = "token bucket"; assertPositiveInteger(context, "limit", limit); assertPositiveFinite(context, "window", window); - assertPositiveInteger(context, "tokensPerCycle", tokensPerCycle); + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); return { limit, create(now) { @@ -226,7 +223,7 @@ export function createTokenBucketOps( const elapsed = now - state.lastRefill; if (elapsed >= window) { const cycles = Math.floor(elapsed / window); - state.tokens = Math.min(limit, state.tokens + cycles * tokensPerCycle); + state.tokens = Math.min(limit, state.tokens + cycles * tokensPerPeriod); state.lastRefill += cycles * window; } }, @@ -239,7 +236,7 @@ export function createTokenBucketOps( return state.tokens >= cost; }, replenish(state) { - state.tokens = Math.min(limit, state.tokens + tokensPerCycle); + state.tokens = Math.min(limit, state.tokens + tokensPerPeriod); state.lastRefill += window; }, result(state, ok, cost, now) { @@ -254,7 +251,7 @@ export function createTokenBucketOps( }, computeRetryAfter(state, cost, now) { const deficit = cost - state.tokens; - const cycles = Math.ceil(deficit / tokensPerCycle); + const cycles = Math.ceil(deficit / tokensPerPeriod); return Math.max(0, cycles * window - (now - state.lastRefill)); }, }; diff --git a/rate_limit/_keyed_algorithms.ts b/rate_limit/_keyed_algorithms.ts index 15750cfde1df..933a0b0bf8bc 100644 --- a/rate_limit/_keyed_algorithms.ts +++ b/rate_limit/_keyed_algorithms.ts @@ -55,6 +55,8 @@ function createKeyedAlgorithm( const ok = ops.tryConsume(state, cost, now); return ops.result(state, ok, cost, now); }, + // Advances time (segment rotation, token refill) so metadata is + // accurate, but does not consume permits or update lastAccess. peek(key, cost, now) { const state = keys.get(key); if (state === undefined) return peekDefault(cost, now); @@ -125,16 +127,16 @@ export function createSlidingWindowAlgorithm( * * @param limit Bucket capacity (max tokens per key). Must be a positive integer. * @param window Refill cycle duration in milliseconds. Must be a positive finite number. - * @param tokensPerCycle Tokens added per cycle. Must be a positive integer. + * @param tokensPerPeriod Tokens added per replenishment period. Must be a positive integer. * @returns A keyed algorithm using token-bucket semantics. */ export function createTokenBucketAlgorithm( limit: number, window: number, - tokensPerCycle: number, + tokensPerPeriod: number, ): KeyedAlgorithm { return createKeyedAlgorithm( - createTokenBucketOps(limit, window, tokensPerCycle), + createTokenBucketOps(limit, window, tokensPerPeriod), ); } diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index 223a6df4fd75..f6549234f485 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -49,12 +49,12 @@ export interface RateLimiterOptions { */ segmentsPerWindow?: number; /** - * For token bucket: tokens added per replenishment cycle. Ignored for + * For token bucket: tokens added per replenishment period. Ignored for * other algorithms. * * @default {limit} */ - tokensPerCycle?: number; + tokensPerPeriod?: number; /** * Time-to-live for idle key state in milliseconds. Keys with no activity * for this duration are eligible for eviction. Set to `0` to disable @@ -186,6 +186,11 @@ export interface KeyedRateLimiter extends Disposable { * Useful for displaying remaining quota in UI or headers without * affecting the count. * + * Note: `peek()` advances the algorithm's internal clock (e.g. rotates + * sliding-window segments, refills token-bucket tokens) so that the + * returned metadata reflects the current point in time. This is a + * time-advancement side effect only — no permits are consumed. + * * Note: `peek()` does not count as activity for TTL-based eviction. * Keys that are only peeked (never limited) will still be evicted after * `evictionTtl` of inactivity. @@ -271,7 +276,7 @@ export function createRateLimiter( window: windowMs, algorithm: algorithmName = "sliding-window", segmentsPerWindow = 10, - tokensPerCycle = limit, + tokensPerPeriod = limit, evictionTtl = 300_000, evictionInterval = 60_000, maxKeys = 0, @@ -279,10 +284,10 @@ export function createRateLimiter( } = options; if (algorithmName === "token-bucket") { - assertPositiveInteger(context, "tokensPerCycle", tokensPerCycle); - if (tokensPerCycle > limit) { + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); + if (tokensPerPeriod > limit) { throw new RangeError( - `Cannot create ${context}: 'tokensPerCycle' (${tokensPerCycle}) exceeds 'limit' (${limit})`, + `Cannot create ${context}: 'tokensPerPeriod' (${tokensPerPeriod}) exceeds 'limit' (${limit})`, ); } } @@ -312,7 +317,7 @@ export function createRateLimiter( ); break; case "token-bucket": - algorithm = createTokenBucketAlgorithm(limit, windowMs, tokensPerCycle); + algorithm = createTokenBucketAlgorithm(limit, windowMs, tokensPerPeriod); break; case "gcra": algorithm = createGcraAlgorithm(limit, windowMs); diff --git a/rate_limit/rate_limiter_test.ts b/rate_limit/rate_limiter_test.ts index 283c45e3cc74..5688e3db0a38 100644 --- a/rate_limit/rate_limiter_test.ts +++ b/rate_limit/rate_limiter_test.ts @@ -62,17 +62,17 @@ Deno.test("createRateLimiter() throws for invalid segmentsPerWindow", () => { ); }); -Deno.test("createRateLimiter() throws for invalid tokensPerCycle", () => { +Deno.test("createRateLimiter() throws for invalid tokensPerPeriod", () => { assertThrows( () => createRateLimiter({ limit: 10, window: 1000, algorithm: "token-bucket", - tokensPerCycle: 0, + tokensPerPeriod: 0, }), RangeError, - "tokensPerCycle", + "tokensPerPeriod", ); assertThrows( () => @@ -80,10 +80,10 @@ Deno.test("createRateLimiter() throws for invalid tokensPerCycle", () => { limit: 10, window: 1000, algorithm: "token-bucket", - tokensPerCycle: 11, + tokensPerPeriod: 11, }), RangeError, - "tokensPerCycle", + "tokensPerPeriod", ); }); @@ -341,7 +341,7 @@ Deno.test("token-bucket: tokens refill lazily on access", () => { limit: 3, window: 1000, algorithm: "token-bucket", - tokensPerCycle: 1, + tokensPerPeriod: 1, evictionTtl: 0, clock: () => now, }); @@ -363,7 +363,7 @@ Deno.test("token-bucket: refill capped at limit", () => { limit: 3, window: 1000, algorithm: "token-bucket", - tokensPerCycle: 3, + tokensPerPeriod: 3, evictionTtl: 0, clock: () => now, }); @@ -381,7 +381,7 @@ Deno.test("token-bucket: retryAfter reflects time until enough tokens", () => { limit: 10, window: 500, algorithm: "token-bucket", - tokensPerCycle: 2, + tokensPerPeriod: 2, evictionTtl: 0, clock: () => now, }); @@ -392,6 +392,52 @@ Deno.test("token-bucket: retryAfter reflects time until enough tokens", () => { assertEquals(r.retryAfter, 1000); }); +Deno.test("token-bucket: remaining is integer even with partial-cycle elapsed time", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 10, + window: 300, + algorithm: "token-bucket", + tokensPerPeriod: 3, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 10 }); + + now = 500; + const r = limiter.limit("a"); + assert(r.ok); + assert( + Number.isInteger(r.remaining), + `remaining (${r.remaining}) should be integer`, + ); + assertEquals(r.remaining, 2); +}); + +Deno.test("token-bucket: exact token boundary with multi-cycle refill", () => { + let now = 0; + using limiter = createRateLimiter({ + limit: 7, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 3, + evictionTtl: 0, + clock: () => now, + }); + + limiter.limit("a", { cost: 7 }); + assertFalse(limiter.limit("a").ok); + + now = 1000; + assert(limiter.limit("a", { cost: 3 }).ok); + assertFalse(limiter.limit("a").ok); + + now = 2000; + assert(limiter.limit("a", { cost: 3 }).ok); + assertFalse(limiter.limit("a").ok); +}); + // === GCRA === Deno.test("gcra: first request always allowed", () => { diff --git a/rate_limit/token_bucket_test.ts b/rate_limit/token_bucket_test.ts index 454bd220c9f5..16a6ce61e6e6 100644 --- a/rate_limit/token_bucket_test.ts +++ b/rate_limit/token_bucket_test.ts @@ -729,6 +729,64 @@ Deno.test("retryAfter is correct after manual replenish", () => { limiter[Symbol.dispose](); }); +// --- Floating-point boundary --- + +Deno.test("remaining uses floor when tokens are at integer boundary", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + const lease = limiter.tryAcquire(); + assert(lease.acquired); +}); + +Deno.test("tryAcquire() denied at exact token boundary after partial refill", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 10, + tokensPerPeriod: 3, + replenishmentPeriod: 1000, + }); + + for (let i = 0; i < 10; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("retryAfter is correct with non-power-of-two tokensPerPeriod", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + tokenLimit: 7, + tokensPerPeriod: 3, + replenishmentPeriod: 1000, + }); + void time; + + for (let i = 0; i < 7; i++) limiter.tryAcquire(); + + const lease = limiter.tryAcquire(5); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 2000); +}); + // --- Double dispose --- Deno.test("double dispose is a no-op", () => { diff --git a/rate_limit/types.ts b/rate_limit/types.ts index b9ff8b11ff1e..42f0356d9ac5 100644 --- a/rate_limit/types.ts +++ b/rate_limit/types.ts @@ -98,7 +98,7 @@ export interface AcquireOptions { * ```ts * import { createTokenBucket } from "@std/rate-limit/token-bucket"; * - * const limiter = createTokenBucket({ + * using limiter = createTokenBucket({ * tokenLimit: 10, * tokensPerPeriod: 1, * replenishmentPeriod: 1000, @@ -108,8 +108,6 @@ export interface AcquireOptions { * if (!lease.acquired) { * console.log(`Retry after ${lease.retryAfter}ms: ${lease.reason}`); * } - * - * limiter[Symbol.dispose](); * ``` */ export type RateLimitLease = AcquiredLease | RejectedLease; From 4f32f3b25660a1fb862972cd8f051ec62a1a5cf0 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans <113360400+tomas-zijdemans@users.noreply.github.com> Date: Fri, 27 Mar 2026 16:11:59 +0100 Subject: [PATCH 05/15] feat(cache/unstable): add sliding expiration to `TtlCache` (#7046) Sliding expiration: entries can now stay alive as long as they're being accessed, with an optional hard deadline. Useful for sessions or rate-limit windows. --- cache/ttl_cache.ts | 131 +++++++++++++++++++++++++++---- cache/ttl_cache_test.ts | 170 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 286 insertions(+), 15 deletions(-) diff --git a/cache/ttl_cache.ts b/cache/ttl_cache.ts index 0ab0a8e2c3f0..7a5d86e5831f 100644 --- a/cache/ttl_cache.ts +++ b/cache/ttl_cache.ts @@ -14,6 +14,14 @@ export interface TtlCacheSetOptions { * overrides the cache's default TTL. Must be a finite, non-negative number. */ ttl?: number; + /** + * A maximum lifetime in milliseconds for this entry, measured from the + * time it is set. When + * {@linkcode TtlCacheOptions.slidingExpiration | slidingExpiration} is + * enabled, the sliding window cannot extend past this duration. Throws + * if `slidingExpiration` is not enabled. + */ + absoluteExpiration?: number; } /** @@ -27,6 +35,16 @@ export interface TtlCacheOptions { * manual deletion, or clearing the cache. */ onEject?: (ejectedKey: K, ejectedValue: V) => void; + /** + * When `true`, each {@linkcode TtlCache.prototype.get | get()} call resets + * the entry's TTL. + * + * If both `slidingExpiration` and `absoluteExpiration` are set on an entry, + * the sliding window cannot extend past the absolute expiration. + * + * @default {false} + */ + slidingExpiration?: boolean; } /** @@ -38,7 +56,6 @@ export interface TtlCacheOptions { * * @typeParam K The type of the cache keys. * @typeParam V The type of the cache values. - * * @example Usage * ```ts * import { TtlCache } from "@std/cache/ttl-cache"; @@ -53,24 +70,24 @@ export interface TtlCacheOptions { * assertEquals(cache.size, 0); * ``` * - * @example Adding an onEject callback + * @example Sliding expiration * ```ts * import { TtlCache } from "@std/cache/ttl-cache"; - * import { delay } from "@std/async/delay"; * import { assertEquals } from "@std/assert/equals"; + * import { FakeTime } from "@std/testing/time"; * - * const cache = new TtlCache(100, { onEject: (key, value) => { - * console.log("Revoking: ", key) - * URL.revokeObjectURL(value) - * }}) - * - * cache.set( - * "fast-url", - * URL.createObjectURL(new Blob(["Hello, World"], { type: "text/plain" })) - * ); + * using time = new FakeTime(0); + * const cache = new TtlCache(100, { + * slidingExpiration: true, + * }); * - * await delay(200) // "Revoking: fast-url" - * assertEquals(cache.get("fast-url"), undefined) + * cache.set("a", 1); + * time.now = 80; + * assertEquals(cache.get("a"), 1); // resets TTL + * time.now = 160; + * assertEquals(cache.get("a"), 1); // still alive, TTL was reset at t=80 + * time.now = 260; + * assertEquals(cache.get("a"), undefined); // expired * ``` */ export class TtlCache extends Map @@ -78,6 +95,9 @@ export class TtlCache extends Map #defaultTtl: number; #timeouts = new Map(); #eject?: ((ejectedKey: K, ejectedValue: V) => void) | undefined; + #slidingExpiration: boolean; + #entryTtls?: Map; + #absoluteDeadlines?: Map; /** * Constructs a new instance. @@ -101,6 +121,11 @@ export class TtlCache extends Map } this.#defaultTtl = defaultTtl; this.#eject = options?.onEject; + this.#slidingExpiration = options?.slidingExpiration ?? false; + if (this.#slidingExpiration) { + this.#entryTtls = new Map(); + this.#absoluteDeadlines = new Map(); + } } /** @@ -128,7 +153,17 @@ export class TtlCache extends Map * assertEquals(cache.get("a"), undefined); * ``` */ - override set(key: K, value: V, options?: TtlCacheSetOptions): this { + override set( + key: K, + value: V, + options?: TtlCacheSetOptions, + ): this { + if (options?.absoluteExpiration !== undefined && !this.#slidingExpiration) { + throw new TypeError( + "Cannot set entry in TtlCache: absoluteExpiration requires slidingExpiration to be enabled", + ); + } + const ttl = options?.ttl ?? this.#defaultTtl; if (!(ttl >= 0) || !Number.isFinite(ttl)) { throw new RangeError( @@ -140,9 +175,54 @@ export class TtlCache extends Map if (existing !== undefined) clearTimeout(existing); super.set(key, value); this.#timeouts.set(key, setTimeout(() => this.delete(key), ttl)); + + if (this.#slidingExpiration) { + this.#entryTtls!.set(key, ttl); + if (options?.absoluteExpiration !== undefined) { + const abs = options.absoluteExpiration; + if (!(abs >= 0) || !Number.isFinite(abs)) { + throw new RangeError( + `Cannot set entry in TtlCache: absoluteExpiration must be a finite, non-negative number: received ${abs}`, + ); + } + this.#absoluteDeadlines!.set(key, Date.now() + abs); + } else { + this.#absoluteDeadlines!.delete(key); + } + } + return this; } + /** + * Gets the value associated with the specified key. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * When {@linkcode TtlCacheOptions.slidingExpiration | slidingExpiration} is + * enabled, accessing an entry resets its TTL. + * + * @param key The key to get the value for. + * @returns The value associated with the specified key, or `undefined` if + * the key is not present in the cache. + * + * @example Usage + * ```ts + * import { TtlCache } from "@std/cache/ttl-cache"; + * import { assertEquals } from "@std/assert/equals"; + * + * using cache = new TtlCache(1000); + * + * cache.set("a", 1); + * assertEquals(cache.get("a"), 1); + * ``` + */ + override get(key: K): V | undefined { + if (!super.has(key)) return undefined; + if (this.#slidingExpiration) this.#resetTtl(key); + return super.get(key); + } + /** * Deletes the value associated with the given key. * @@ -171,6 +251,8 @@ export class TtlCache extends Map const timeout = this.#timeouts.get(key); if (timeout !== undefined) clearTimeout(timeout); this.#timeouts.delete(key); + this.#entryTtls?.delete(key); + this.#absoluteDeadlines?.delete(key); this.#eject?.(key, value!); return true; } @@ -198,6 +280,8 @@ export class TtlCache extends Map clearTimeout(timeout); } this.#timeouts.clear(); + this.#entryTtls?.clear(); + this.#absoluteDeadlines?.clear(); const entries = [...super.entries()]; super.clear(); let error: unknown; @@ -234,4 +318,21 @@ export class TtlCache extends Map [Symbol.dispose](): void { this.clear(); } + + #resetTtl(key: K): void { + const ttl = this.#entryTtls!.get(key); + if (ttl === undefined) return; + + const deadline = this.#absoluteDeadlines!.get(key); + const effectiveTtl = deadline !== undefined + ? Math.min(ttl, Math.max(0, deadline - Date.now())) + : ttl; + + const existing = this.#timeouts.get(key); + if (existing !== undefined) clearTimeout(existing); + this.#timeouts.set( + key, + setTimeout(() => this.delete(key), effectiveTtl), + ); + } } diff --git a/cache/ttl_cache_test.ts b/cache/ttl_cache_test.ts index 481d33415a8c..c5c0267c026d 100644 --- a/cache/ttl_cache_test.ts +++ b/cache/ttl_cache_test.ts @@ -265,6 +265,176 @@ Deno.test("TtlCache validates TTL", async (t) => { }); }); +Deno.test("TtlCache get() returns undefined for missing key with sliding expiration", () => { + using cache = new TtlCache(100, { + slidingExpiration: true, + }); + assertEquals(cache.get("missing"), undefined); +}); + +Deno.test("TtlCache sliding expiration", async (t) => { + await t.step("get() resets TTL", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100, { + slidingExpiration: true, + }); + + cache.set("a", 1); + + time.now = 80; + assertEquals(cache.get("a"), 1); + + // TTL was reset at t=80, so entry lives until t=180 + time.now = 160; + assertEquals(cache.get("a"), 1); + + // TTL was reset at t=160, so entry lives until t=260 + time.now = 250; + assertEquals(cache.get("a"), 1); + + time.now = 350; + assertEquals(cache.get("a"), undefined); + }); + + await t.step("has() does not reset TTL", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100, { + slidingExpiration: true, + }); + + cache.set("a", 1); + + time.now = 80; + assertEquals(cache.has("a"), true); + + // has() did not reset the TTL, so the entry still expires at t=100 + time.now = 100; + assertEquals(cache.has("a"), false); + }); + + await t.step("does not reset TTL when slidingExpiration is false", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100); + + cache.set("a", 1); + + time.now = 80; + assertEquals(cache.get("a"), 1); + + time.now = 100; + assertEquals(cache.get("a"), undefined); + }); + + await t.step("absoluteExpiration caps sliding extension", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100, { + slidingExpiration: true, + }); + + cache.set("a", 1, { absoluteExpiration: 150 }); + + time.now = 80; + assertEquals(cache.get("a"), 1); + + time.now = 140; + assertEquals(cache.get("a"), 1); + + // Absolute deadline is t=150; sliding cannot extend past it + time.now = 150; + assertEquals(cache.get("a"), undefined); + }); + + await t.step("absoluteExpiration throws without slidingExpiration", () => { + using cache = new TtlCache(100); + assertThrows( + () => cache.set("a", 1, { absoluteExpiration: 50 }), + TypeError, + "absoluteExpiration requires slidingExpiration to be enabled", + ); + }); + + await t.step("per-entry TTL works with sliding expiration", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100, { + slidingExpiration: true, + }); + + cache.set("a", 1, { ttl: 50 }); + + time.now = 40; + assertEquals(cache.get("a"), 1); + + // TTL reset to 50ms at t=40, so alive until t=90 + time.now = 80; + assertEquals(cache.get("a"), 1); + + // TTL reset to 50ms at t=80, so alive until t=130 + time.now = 130; + assertEquals(cache.get("a"), undefined); + }); + + await t.step("sliding expiration calls onEject on expiry", () => { + using time = new FakeTime(0); + const ejected: [string, number][] = []; + const cache = new TtlCache(100, { + slidingExpiration: true, + onEject: (k, v) => ejected.push([k, v]), + }); + + cache.set("a", 1); + + time.now = 80; + cache.get("a"); + + time.now = 180; + assertEquals(ejected, [["a", 1]]); + }); + + await t.step("overwriting entry resets sliding metadata", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100, { + slidingExpiration: true, + }); + + cache.set("a", 1, { ttl: 50, absoluteExpiration: 200 }); + + time.now = 40; + cache.get("a"); + + // Overwrite with different TTL and no absoluteExpiration + cache.set("a", 2, { ttl: 30 }); + + time.now = 60; + assertEquals(cache.get("a"), 2); + + // TTL reset to 30ms at t=60, alive until t=90 + time.now = 90; + assertEquals(cache.get("a"), undefined); + }); + + await t.step("set() rejects negative absoluteExpiration", () => { + using cache = new TtlCache(1000, { + slidingExpiration: true, + }); + assertThrows( + () => cache.set("a", 1, { absoluteExpiration: -1 }), + RangeError, + "absoluteExpiration must be a finite, non-negative number", + ); + }); + + await t.step("set() rejects NaN absoluteExpiration", () => { + using cache = new TtlCache(1000, { + slidingExpiration: true, + }); + assertThrows( + () => cache.set("a", 1, { absoluteExpiration: NaN }), + RangeError, + "absoluteExpiration must be a finite, non-negative number", + ); + }); +}); + Deno.test("TtlCache clear() calls all onEject callbacks even if one throws", () => { const ejected: string[] = []; using cache = new TtlCache(1000, { From 3c62b72748937ee9f2540bdfe2186cba369f192b Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 30 Mar 2026 11:33:49 +0200 Subject: [PATCH 06/15] implement store concept --- rate_limit/_keyed_algorithms.ts | 45 +- rate_limit/_replenishing_limiter.ts | 1 + rate_limit/deno.json | 4 +- rate_limit/mod.ts | 15 +- rate_limit/rate_limiter.ts | 361 ++++----------- rate_limit/rate_limiter_test.ts | 655 ++++++++++++++++------------ 6 files changed, 536 insertions(+), 545 deletions(-) diff --git a/rate_limit/_keyed_algorithms.ts b/rate_limit/_keyed_algorithms.ts index 933a0b0bf8bc..90125a5411ff 100644 --- a/rate_limit/_keyed_algorithms.ts +++ b/rate_limit/_keyed_algorithms.ts @@ -20,15 +20,43 @@ export interface KeyedAlgorithm { clear(): void; } -/** Wraps AlgorithmOps with a Map and eviction. */ +/** Options for {@linkcode createKeyedAlgorithm}. */ +export interface KeyedAlgorithmOptions { + /** + * Maximum number of keys to track. When a new key arrives at capacity, + * the least-recently-used key is evicted. `0` disables the limit. + */ + maxKeys?: number; +} + +/** + * Wraps AlgorithmOps with a Map, LRU eviction, and TTL eviction. + * + * LRU tracking exploits Map's insertion-order guarantee: on every access + * the entry is deleted and re-inserted, keeping the least-recently-used + * key at the front. Eviction is therefore O(1). + */ function createKeyedAlgorithm( ops: AlgorithmOps, + options?: KeyedAlgorithmOptions, ): KeyedAlgorithm { + const maxKeys = options?.maxKeys ?? 0; const keys = new Map(); + /** Move `key` to the back of the Map (most-recently-used position). */ + function touch(key: string, state: S & { lastAccess: number }): void { + keys.delete(key); + keys.set(key, state); + } + function getOrCreate(key: string, now: number): S & { lastAccess: number } { let state = keys.get(key); if (state === undefined) { + if (maxKeys > 0 && keys.size >= maxKeys) { + // The first key in the Map is the LRU entry. + const lruKey = keys.keys().next().value; + if (lruKey !== undefined) keys.delete(lruKey); + } const base = ops.create(now); (base as S & { lastAccess: number }).lastAccess = now; state = base as S & { lastAccess: number }; @@ -52,6 +80,7 @@ function createKeyedAlgorithm( const state = getOrCreate(key, now); ops.advance(state, now); state.lastAccess = now; + if (maxKeys > 0) touch(key, state); const ok = ops.tryConsume(state, cost, now); return ops.result(state, ok, cost, now); }, @@ -91,13 +120,15 @@ function createKeyedAlgorithm( * * @param limit Maximum permits per key per window. Must be a positive integer. * @param window Window duration in milliseconds. Must be a positive finite number. + * @param options Additional keyed algorithm options. * @returns A keyed algorithm using fixed-window semantics. */ export function createFixedWindowAlgorithm( limit: number, window: number, + options?: KeyedAlgorithmOptions, ): KeyedAlgorithm { - return createKeyedAlgorithm(createFixedWindowOps(limit, window)); + return createKeyedAlgorithm(createFixedWindowOps(limit, window), options); } // --- Sliding Window --- @@ -108,15 +139,18 @@ export function createFixedWindowAlgorithm( * @param limit Maximum permits per key per window. Must be a positive integer. * @param window Window duration in milliseconds. Must be a positive finite number. * @param segmentsPerWindow Number of segments per window. Must be an integer >= 2. + * @param options Additional keyed algorithm options. * @returns A keyed algorithm using sliding-window semantics. */ export function createSlidingWindowAlgorithm( limit: number, window: number, segmentsPerWindow: number, + options?: KeyedAlgorithmOptions, ): KeyedAlgorithm { return createKeyedAlgorithm( createSlidingWindowOps(limit, window, segmentsPerWindow), + options, ); } @@ -128,15 +162,18 @@ export function createSlidingWindowAlgorithm( * @param limit Bucket capacity (max tokens per key). Must be a positive integer. * @param window Refill cycle duration in milliseconds. Must be a positive finite number. * @param tokensPerPeriod Tokens added per replenishment period. Must be a positive integer. + * @param options Additional keyed algorithm options. * @returns A keyed algorithm using token-bucket semantics. */ export function createTokenBucketAlgorithm( limit: number, window: number, tokensPerPeriod: number, + options?: KeyedAlgorithmOptions, ): KeyedAlgorithm { return createKeyedAlgorithm( createTokenBucketOps(limit, window, tokensPerPeriod), + options, ); } @@ -147,11 +184,13 @@ export function createTokenBucketAlgorithm( * * @param limit Maximum permits per key per window. Must be a positive integer. * @param window Window (tau) in milliseconds. Must be a positive finite number. + * @param options Additional keyed algorithm options. * @returns A keyed algorithm using GCRA semantics. */ export function createGcraAlgorithm( limit: number, window: number, + options?: KeyedAlgorithmOptions, ): KeyedAlgorithm { - return createKeyedAlgorithm(createGcraOps(limit, window)); + return createKeyedAlgorithm(createGcraOps(limit, window), options); } diff --git a/rate_limit/_replenishing_limiter.ts b/rate_limit/_replenishing_limiter.ts index dbc860f1ffdc..2af664a338fe 100644 --- a/rate_limit/_replenishing_limiter.ts +++ b/rate_limit/_replenishing_limiter.ts @@ -96,6 +96,7 @@ export function createReplenishingLimiter( if (config.autoReplenishment) { timer = setInterval(replenishAndDrain, config.replenishmentPeriod); + if (typeof Deno !== "undefined") Deno.unrefTimer(timer as number); } function peekNext(): Waiter | undefined { diff --git a/rate_limit/deno.json b/rate_limit/deno.json index f18f3b017a09..7d97d4e67d30 100644 --- a/rate_limit/deno.json +++ b/rate_limit/deno.json @@ -7,6 +7,8 @@ "./fixed-window": "./fixed_window.ts", "./sliding-window": "./sliding_window.ts", "./types": "./types.ts", - "./rate-limiter": "./rate_limiter.ts" + "./rate-limiter": "./rate_limiter.ts", + "./store-types": "./store_types.ts", + "./memory-store": "./memory_store.ts" } } diff --git a/rate_limit/mod.ts b/rate_limit/mod.ts index f58cdf12ef83..303c4f8dd368 100644 --- a/rate_limit/mod.ts +++ b/rate_limit/mod.ts @@ -7,17 +7,20 @@ * * The primary API is {@linkcode createRateLimiter}, a keyed rate limiter for * the common case of "allow key X at most N requests per window." It supports - * fixed-window, sliding-window, token-bucket, and GCRA algorithms. For - * single-resource limiting, use the primitives: {@linkcode createTokenBucket}, - * {@linkcode createFixedWindow}, and {@linkcode createSlidingWindow}. + * fixed-window, sliding-window, token-bucket, and GCRA algorithms and accepts + * a pluggable {@linkcode RateLimitStore} backend (in-memory by default). + * + * For single-resource limiting, use the primitives: + * {@linkcode createTokenBucket}, {@linkcode createFixedWindow}, and + * {@linkcode createSlidingWindow}. * * ```ts * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; * import { assert } from "@std/assert"; * - * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * await using limiter = createRateLimiter({ limit: 100, window: 60_000 }); * - * const result = limiter.limit("user:123"); + * const result = await limiter.limit("user:123"); * assert(result.ok); * ``` * @@ -29,3 +32,5 @@ export * from "./token_bucket.ts"; export * from "./fixed_window.ts"; export * from "./sliding_window.ts"; export * from "./rate_limiter.ts"; +export * from "./store_types.ts"; +export * from "./memory_store.ts"; diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index f6549234f485..3dbb4d06bc4c 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -1,125 +1,31 @@ // Copyright 2018-2026 the Deno authors. MIT license. // This module is browser compatible. -import { - assertNonNegativeInteger, - assertPositiveFinite, - assertPositiveInteger, -} from "./_validation.ts"; -import { - createFixedWindowAlgorithm, - createGcraAlgorithm, - createSlidingWindowAlgorithm, - createTokenBucketAlgorithm, -} from "./_keyed_algorithms.ts"; -import type { KeyedAlgorithm } from "./_keyed_algorithms.ts"; +import type { MemoryStoreOptions } from "./memory_store.ts"; +import { createMemoryStore } from "./memory_store.ts"; +import type { RateLimitStore } from "./store_types.ts"; /** - * Options for {@linkcode createRateLimiter}. + * Options for {@linkcode KeyedRateLimiter.limit} and + * {@linkcode KeyedRateLimiter.peek}. * * @experimental **UNSTABLE**: New API, yet to be vetted. - */ -export interface RateLimiterOptions { - /** Maximum permits per key per window/cycle. */ - limit: number; - /** Window duration in milliseconds. */ - window: number; - /** - * Algorithm to use. - * - * - `"fixed-window"` — counter resets at each window boundary. Simplest. - * Allows boundary bursts up to 2× the limit. - * - `"sliding-window"` — window divided into segments that rotate - * individually. Smoother enforcement, no boundary bursts. - * - `"token-bucket"` — tokens refill at a steady rate. Best for smoothing - * bursty traffic with a configurable burst cap. - * - `"gcra"` — Generic Cell Rate Algorithm. Enforces strict uniform - * spacing between requests with a single timestamp per key. Ideal when you - * want hard, even enforcement with no boundary effects and minimal memory. - * - * @default {"sliding-window"} - */ - algorithm?: "fixed-window" | "sliding-window" | "token-bucket" | "gcra"; - /** - * Number of segments for the sliding window algorithm. Higher values give - * smoother enforcement at the cost of slightly more memory per key. - * Ignored for other algorithms. - * - * @default {10} - */ - segmentsPerWindow?: number; - /** - * For token bucket: tokens added per replenishment period. Ignored for - * other algorithms. - * - * @default {limit} - */ - tokensPerPeriod?: number; - /** - * Time-to-live for idle key state in milliseconds. Keys with no activity - * for this duration are eligible for eviction. Set to `0` to disable - * automatic eviction. - * - * Only {@linkcode KeyedRateLimiter.limit} counts as activity for - * eviction purposes. {@linkcode KeyedRateLimiter.peek} does not refresh - * a key's last-access time. - * - * @default {300_000} - */ - evictionTtl?: number; - /** - * How often to scan for and evict idle keys, in milliseconds. Only - * meaningful when `evictionTtl > 0`. - * - * @default {60_000} - */ - evictionInterval?: number; - /** - * Maximum number of keys to track. When the limit is reached, new keys - * are rejected with `ok: false` (with `resetAt: 0` and `retryAfter: 0`). - * Set to `0` to disable (unbounded). - * - * Note: this limits the number of keys, not total memory. Long key - * strings still consume memory proportional to their length. - * - * @default {0} - */ - maxKeys?: number; - /** - * Clock function returning the current time in milliseconds. Override - * for testing with `FakeTime`. - * - * @default {Date.now} - */ - clock?: () => number; -} - -/** - * Options for {@linkcode KeyedRateLimiter.limit}. * - * @see {@linkcode PeekOptions} for the read-only equivalent. - * @experimental **UNSTABLE**: New API, yet to be vetted. - */ -export interface LimitOptions { - /** - * Number of permits to consume for this request. Use higher values for - * expensive operations. - * - * @default {1} - */ - cost?: number; -} - -/** - * Options for {@linkcode KeyedRateLimiter.peek}. + * @example Variable cost per request + * ```ts + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert } from "@std/assert"; * - * @see {@linkcode LimitOptions} for the consuming equivalent. - * @experimental **UNSTABLE**: New API, yet to be vetted. + * await using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * + * const result = await limiter.limit("user:123", { cost: 5 }); + * assert(result.ok); + * ``` */ -export interface PeekOptions { +export interface CostOptions { /** - * Number of permits to check. Determines whether a request of this size - * would be allowed and computes `retryAfter` accordingly. + * Number of permits to consume (for `limit`) or check (for `peek`). + * Use higher values for expensive operations. * * @default {1} */ @@ -141,8 +47,9 @@ export interface RateLimitResult { * Timestamp (milliseconds since epoch) of the next replenishment event * (segment rotation, window boundary, or refill cycle). This is *not* * necessarily when full capacity is restored — for sliding-window and - * token-bucket it may take multiple replenishment cycles. Useful for the - * `X-RateLimit-Reset` HTTP header. + * token-bucket it may take multiple replenishment cycles. For GCRA this + * is the theoretical arrival time (TAT) at which full burst capacity is + * restored. Useful for the `X-RateLimit-Reset` HTTP header. */ readonly resetAt: number; /** @@ -163,14 +70,17 @@ export interface RateLimitResult { * primary rate limiting API for the common case of "allow key X at most N * requests per window." * + * All methods are async to support pluggable store backends (in-memory, + * Redis, Deno KV). For in-memory stores the returned promises resolve + * synchronously. + * * **Disposal behavior:** after disposal, `limit()` and `peek()` return a * result with `ok: false` (remaining/resetAt/retryAfter all `0`), and - * `reset()` is a no-op. This matches the primitive limiter contract where - * `tryAcquire()` returns a rejected lease after disposal. + * `reset()` is a no-op. * * @experimental **UNSTABLE**: New API, yet to be vetted. */ -export interface KeyedRateLimiter extends Disposable { +export interface KeyedRateLimiter extends AsyncDisposable { /** * Check whether a request for the given key should be allowed, and * consume permits if so. @@ -179,86 +89,92 @@ export interface KeyedRateLimiter extends Disposable { * @param options Override cost per request. * @returns A {@linkcode RateLimitResult} with the decision and metadata. */ - limit(key: string, options?: LimitOptions): RateLimitResult; + limit(key: string, options?: CostOptions): Promise; /** * Check the current state for a key without consuming any permits. * Useful for displaying remaining quota in UI or headers without * affecting the count. - * - * Note: `peek()` advances the algorithm's internal clock (e.g. rotates - * sliding-window segments, refills token-bucket tokens) so that the - * returned metadata reflects the current point in time. This is a - * time-advancement side effect only — no permits are consumed. - * - * Note: `peek()` does not count as activity for TTL-based eviction. - * Keys that are only peeked (never limited) will still be evicted after - * `evictionTtl` of inactivity. */ - peek(key: string, options?: PeekOptions): RateLimitResult; + peek(key: string, options?: CostOptions): Promise; /** - * Reset all state for a key, restoring it to full capacity. Useful for - * testing, admin overrides, or support tooling. + * Reset all state for a key, restoring it to full capacity. */ - reset(key: string): void; - - /** Number of keys currently tracked. */ - readonly size: number; + reset(key: string): Promise; } /** - * Create a keyed rate limiter. The algorithm and its parameters are - * configured once; per-key state is managed internally with automatic - * eviction of idle keys. + * Options when using the default in-memory store. Extends + * {@linkcode MemoryStoreOptions} with a `store?: undefined` discriminant. * * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type MemoryRateLimiterOptions = MemoryStoreOptions & { + store?: undefined; +}; + +/** + * Options when providing a custom {@linkcode RateLimitStore} backend. + * Memory-store options (`limit`, `window`, etc.) are typed as `never` + * to prevent accidentally passing them alongside a custom store, since + * the store owns those settings. * - * @example Basic API rate limiting - * ```ts ignore - * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface StoreRateLimiterOptions { + /** The store backend to delegate to. */ + store: RateLimitStore; + limit?: never; + window?: never; + algorithm?: never; + segmentsPerWindow?: never; + tokensPerPeriod?: never; + evictionTtl?: never; + evictionInterval?: never; + maxKeys?: never; + clock?: never; +} + +/** + * Options for {@linkcode createRateLimiter}. * - * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type RateLimiterOptions = + | MemoryRateLimiterOptions + | StoreRateLimiterOptions; + +/** + * Create a keyed rate limiter backed by an in-memory store or a custom + * {@linkcode RateLimitStore}. * - * Deno.serve((req) => { - * const ip = req.headers.get("x-forwarded-for") ?? "unknown"; - * const result = limiter.limit(ip); - * if (!result.ok) { - * return new Response("Too many requests", { - * status: 429, - * headers: { - * "Retry-After": String(Math.ceil(result.retryAfter / 1000)), - * }, - * }); - * } - * return new Response("OK"); - * }); - * ``` + * @experimental **UNSTABLE**: New API, yet to be vetted. * - * @example Variable cost + * @example Basic usage * ```ts * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; * import { assert, assertEquals } from "@std/assert"; * - * using limiter = createRateLimiter({ limit: 100, window: 60_000 }); - * const result = limiter.limit("user:123", { cost: 5 }); + * await using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * const result = await limiter.limit("user:123", { cost: 5 }); * assert(result.ok); * assertEquals(result.remaining, 95); * ``` * - * @example GCRA for strict uniform spacing - * ```ts + * @example Custom store backend + * ```ts ignore * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; - * import { assert } from "@std/assert"; + * import { createRedisStore } from "@std/rate-limit/redis-store"; * - * using limiter = createRateLimiter({ - * limit: 10, - * window: 1_000, - * algorithm: "gcra", + * const store = createRedisStore({ + * redis: myRedisClient, + * algorithm: "sliding-window", + * limit: 100, + * window: 60_000, * }); * - * const result = limiter.limit("user:123"); - * assert(result.ok); + * await using limiter = createRateLimiter({ store }); * ``` * * @param options Configuration for the rate limiter. @@ -267,74 +183,10 @@ export interface KeyedRateLimiter extends Disposable { export function createRateLimiter( options: RateLimiterOptions, ): KeyedRateLimiter { - const context = "rate limiter"; - assertPositiveInteger(context, "limit", options.limit); - assertPositiveFinite(context, "window", options.window); + const store: RateLimitStore = options.store ?? + createMemoryStore(options as MemoryRateLimiterOptions); - const { - limit, - window: windowMs, - algorithm: algorithmName = "sliding-window", - segmentsPerWindow = 10, - tokensPerPeriod = limit, - evictionTtl = 300_000, - evictionInterval = 60_000, - maxKeys = 0, - clock = Date.now, - } = options; - - if (algorithmName === "token-bucket") { - assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); - if (tokensPerPeriod > limit) { - throw new RangeError( - `Cannot create ${context}: 'tokensPerPeriod' (${tokensPerPeriod}) exceeds 'limit' (${limit})`, - ); - } - } - - if (!Number.isFinite(evictionTtl) || evictionTtl < 0) { - throw new RangeError( - `Cannot create ${context}: 'evictionTtl' must be a non-negative finite number, received ${evictionTtl}`, - ); - } - - if (evictionTtl > 0) { - assertPositiveFinite(context, "evictionInterval", evictionInterval); - } - - assertNonNegativeInteger(context, "maxKeys", maxKeys); - - let algorithm: KeyedAlgorithm; - switch (algorithmName) { - case "fixed-window": - algorithm = createFixedWindowAlgorithm(limit, windowMs); - break; - case "sliding-window": - algorithm = createSlidingWindowAlgorithm( - limit, - windowMs, - segmentsPerWindow, - ); - break; - case "token-bucket": - algorithm = createTokenBucketAlgorithm(limit, windowMs, tokensPerPeriod); - break; - case "gcra": - algorithm = createGcraAlgorithm(limit, windowMs); - break; - default: - throw new TypeError( - `Cannot create ${context}: unknown algorithm '${algorithmName as string}'`, - ); - } - - const MAX_KEYS_REJECTED: RateLimitResult = Object.freeze({ - ok: false as const, - remaining: 0, - resetAt: 0, - retryAfter: 0, - limit, - }); + const limit = store.capacity; const DISPOSED_RESULT: RateLimitResult = Object.freeze({ ok: false as const, @@ -345,14 +197,6 @@ export function createRateLimiter( }); let disposed = false; - let evictionTimer: ReturnType | undefined; - - if (evictionTtl > 0) { - evictionTimer = setInterval( - () => algorithm.evict(clock(), evictionTtl), - evictionInterval, - ); - } function validateCost(method: string, cost: number): void { if (!Number.isInteger(cost) || cost < 1) { @@ -368,39 +212,26 @@ export function createRateLimiter( } return { - limit(key: string, options?: LimitOptions): RateLimitResult { - if (disposed) return DISPOSED_RESULT; + limit(key: string, options?: CostOptions): Promise { + if (disposed) return Promise.resolve(DISPOSED_RESULT); const cost = options?.cost ?? 1; validateCost("limit", cost); - if (maxKeys > 0 && algorithm.size >= maxKeys && !algorithm.has(key)) { - return MAX_KEYS_REJECTED; - } - return algorithm.limit(key, cost, clock()); + return store.consume(key, cost); }, - peek(key: string, options?: PeekOptions): RateLimitResult { - if (disposed) return DISPOSED_RESULT; + peek(key: string, options?: CostOptions): Promise { + if (disposed) return Promise.resolve(DISPOSED_RESULT); const cost = options?.cost ?? 1; validateCost("peek", cost); - if (maxKeys > 0 && algorithm.size >= maxKeys && !algorithm.has(key)) { - return MAX_KEYS_REJECTED; - } - return algorithm.peek(key, cost, clock()); - }, - reset(_key: string): void { - if (disposed) return; - algorithm.reset(_key); + return store.peek(key, cost); }, - get size(): number { - return algorithm.size; + reset(key: string): Promise { + if (disposed) return Promise.resolve(); + return store.reset(key); }, - [Symbol.dispose](): void { + async [Symbol.asyncDispose](): Promise { if (disposed) return; disposed = true; - if (evictionTimer !== undefined) { - clearInterval(evictionTimer); - evictionTimer = undefined; - } - algorithm.clear(); + await store[Symbol.asyncDispose](); }, }; } diff --git a/rate_limit/rate_limiter_test.ts b/rate_limit/rate_limiter_test.ts index 5688e3db0a38..dda5367061b8 100644 --- a/rate_limit/rate_limiter_test.ts +++ b/rate_limit/rate_limiter_test.ts @@ -3,6 +3,7 @@ import { assert, assertEquals, assertFalse, assertThrows } from "@std/assert"; import { FakeTime } from "@std/testing/time"; import { createRateLimiter } from "./rate_limiter.ts"; +import { createMemoryStore } from "./memory_store.ts"; // --- Factory validation --- @@ -146,9 +147,9 @@ Deno.test("createRateLimiter() throws for negative evictionTtl", () => { ); }); -Deno.test("createRateLimiter() throws for invalid cost", () => { +Deno.test("createRateLimiter() throws for invalid cost", async () => { using _time = new FakeTime(); - using limiter = createRateLimiter({ limit: 10, window: 1000 }); + await using limiter = createRateLimiter({ limit: 10, window: 1000 }); assertThrows(() => limiter.limit("a", { cost: 0 }), RangeError, "cost"); assertThrows(() => limiter.limit("a", { cost: -1 }), RangeError, "cost"); @@ -156,7 +157,7 @@ Deno.test("createRateLimiter() throws for invalid cost", () => { assertThrows(() => limiter.limit("a", { cost: 11 }), RangeError, "exceeds"); }); -Deno.test("createRateLimiter() accepts all algorithms", () => { +Deno.test("createRateLimiter() accepts all algorithms", async () => { using _time = new FakeTime(); for ( const algorithm of [ @@ -166,21 +167,21 @@ Deno.test("createRateLimiter() accepts all algorithms", () => { "gcra", ] as const ) { - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm, }); - const result = limiter.limit("key"); + const result = await limiter.limit("key"); assert(result.ok); } }); // === Fixed window === -Deno.test("fixed-window: first request allowed with correct remaining", () => { +Deno.test("fixed-window: first request allowed with correct remaining", async () => { const now = 1000; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "fixed-window", @@ -188,16 +189,16 @@ Deno.test("fixed-window: first request allowed with correct remaining", () => { clock: () => now, }); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.remaining, 4); assertEquals(r.limit, 5); assertEquals(r.retryAfter, 0); }); -Deno.test("fixed-window: exhausting limit returns ok: false", () => { +Deno.test("fixed-window: exhausting limit returns ok: false", async () => { const now = 1000; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 3, window: 1000, algorithm: "fixed-window", @@ -205,20 +206,20 @@ Deno.test("fixed-window: exhausting limit returns ok: false", () => { clock: () => now, }); - assert(limiter.limit("a").ok); - assert(limiter.limit("a").ok); - assert(limiter.limit("a").ok); + assert((await limiter.limit("a")).ok); + assert((await limiter.limit("a")).ok); + assert((await limiter.limit("a")).ok); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assertFalse(r.ok); assertEquals(r.remaining, 0); assert(r.retryAfter > 0); assertEquals(r.resetAt, 2000); }); -Deno.test("fixed-window: permits restore after window elapses", () => { +Deno.test("fixed-window: permits restore after window elapses", async () => { let now = 1000; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 2, window: 1000, algorithm: "fixed-window", @@ -226,19 +227,19 @@ Deno.test("fixed-window: permits restore after window elapses", () => { clock: () => now, }); - limiter.limit("a"); - limiter.limit("a"); - assertFalse(limiter.limit("a").ok); + await limiter.limit("a"); + await limiter.limit("a"); + assertFalse((await limiter.limit("a")).ok); now = 2000; - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.remaining, 1); }); -Deno.test("fixed-window: variable cost consumes multiple permits", () => { +Deno.test("fixed-window: variable cost consumes multiple permits", async () => { const now = 1000; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "fixed-window", @@ -246,19 +247,19 @@ Deno.test("fixed-window: variable cost consumes multiple permits", () => { clock: () => now, }); - const r = limiter.limit("a", { cost: 7 }); + const r = await limiter.limit("a", { cost: 7 }); assert(r.ok); assertEquals(r.remaining, 3); - assertFalse(limiter.limit("a", { cost: 4 }).ok); - assert(limiter.limit("a", { cost: 3 }).ok); + assertFalse((await limiter.limit("a", { cost: 4 })).ok); + assert((await limiter.limit("a", { cost: 3 })).ok); }); // === Sliding window === -Deno.test("sliding-window: permits freed incrementally as segments rotate", () => { +Deno.test("sliding-window: permits freed incrementally as segments rotate", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 4, window: 400, algorithm: "sliding-window", @@ -267,23 +268,23 @@ Deno.test("sliding-window: permits freed incrementally as segments rotate", () = clock: () => now, }); - limiter.limit("a", { cost: 4 }); - assertFalse(limiter.limit("a").ok); + await limiter.limit("a", { cost: 4 }); + assertFalse((await limiter.limit("a")).ok); now = 100; - assertFalse(limiter.limit("a").ok); + assertFalse((await limiter.limit("a")).ok); now = 200; - assertFalse(limiter.limit("a").ok); + assertFalse((await limiter.limit("a")).ok); now = 300; - assertFalse(limiter.limit("a").ok); + assertFalse((await limiter.limit("a")).ok); now = 400; - assert(limiter.limit("a", { cost: 4 }).ok); + assert((await limiter.limit("a", { cost: 4 })).ok); }); -Deno.test("sliding-window: no boundary burst", () => { +Deno.test("sliding-window: no boundary burst", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "sliding-window", @@ -292,18 +293,18 @@ Deno.test("sliding-window: no boundary burst", () => { clock: () => now, }); - limiter.limit("a", { cost: 10 }); + await limiter.limit("a", { cost: 10 }); now = 500; - assertFalse(limiter.limit("a").ok); + assertFalse((await limiter.limit("a")).ok); now = 1000; - assert(limiter.limit("a", { cost: 10 }).ok); + assert((await limiter.limit("a", { cost: 10 })).ok); }); -Deno.test("sliding-window: retryAfter reflects next segment rotation", () => { +Deno.test("sliding-window: retryAfter reflects next segment rotation", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 1, window: 1000, algorithm: "sliding-window", @@ -312,17 +313,17 @@ Deno.test("sliding-window: retryAfter reflects next segment rotation", () => { clock: () => now, }); - limiter.limit("a"); - const r = limiter.limit("a"); + await limiter.limit("a"); + const r = await limiter.limit("a"); assertFalse(r.ok); assertEquals(r.retryAfter, 250); }); // === Token bucket === -Deno.test("token-bucket: starts at full capacity", () => { +Deno.test("token-bucket: starts at full capacity", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "token-bucket", @@ -330,14 +331,14 @@ Deno.test("token-bucket: starts at full capacity", () => { clock: () => now, }); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.remaining, 4); }); -Deno.test("token-bucket: tokens refill lazily on access", () => { +Deno.test("token-bucket: tokens refill lazily on access", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 3, window: 1000, algorithm: "token-bucket", @@ -346,20 +347,20 @@ Deno.test("token-bucket: tokens refill lazily on access", () => { clock: () => now, }); - limiter.limit("a", { cost: 3 }); - assertFalse(limiter.limit("a").ok); + await limiter.limit("a", { cost: 3 }); + assertFalse((await limiter.limit("a")).ok); now = 1000; - assert(limiter.limit("a").ok); - assertFalse(limiter.limit("a").ok); + assert((await limiter.limit("a")).ok); + assertFalse((await limiter.limit("a")).ok); now = 3000; - assert(limiter.limit("a", { cost: 2 }).ok); + assert((await limiter.limit("a", { cost: 2 })).ok); }); -Deno.test("token-bucket: refill capped at limit", () => { +Deno.test("token-bucket: refill capped at limit", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 3, window: 1000, algorithm: "token-bucket", @@ -368,16 +369,16 @@ Deno.test("token-bucket: refill capped at limit", () => { clock: () => now, }); - limiter.limit("a"); + await limiter.limit("a"); now = 10000; - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.remaining, 2); }); -Deno.test("token-bucket: retryAfter reflects time until enough tokens", () => { +Deno.test("token-bucket: retryAfter reflects time until enough tokens", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 500, algorithm: "token-bucket", @@ -386,15 +387,15 @@ Deno.test("token-bucket: retryAfter reflects time until enough tokens", () => { clock: () => now, }); - limiter.limit("a", { cost: 10 }); - const r = limiter.limit("a", { cost: 3 }); + await limiter.limit("a", { cost: 10 }); + const r = await limiter.limit("a", { cost: 3 }); assertFalse(r.ok); assertEquals(r.retryAfter, 1000); }); -Deno.test("token-bucket: remaining is integer even with partial-cycle elapsed time", () => { +Deno.test("token-bucket: remaining is integer even with partial-cycle elapsed time", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 300, algorithm: "token-bucket", @@ -403,10 +404,10 @@ Deno.test("token-bucket: remaining is integer even with partial-cycle elapsed ti clock: () => now, }); - limiter.limit("a", { cost: 10 }); + await limiter.limit("a", { cost: 10 }); now = 500; - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assert( Number.isInteger(r.remaining), @@ -415,9 +416,9 @@ Deno.test("token-bucket: remaining is integer even with partial-cycle elapsed ti assertEquals(r.remaining, 2); }); -Deno.test("token-bucket: exact token boundary with multi-cycle refill", () => { +Deno.test("token-bucket: exact token boundary with multi-cycle refill", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 7, window: 1000, algorithm: "token-bucket", @@ -426,23 +427,23 @@ Deno.test("token-bucket: exact token boundary with multi-cycle refill", () => { clock: () => now, }); - limiter.limit("a", { cost: 7 }); - assertFalse(limiter.limit("a").ok); + await limiter.limit("a", { cost: 7 }); + assertFalse((await limiter.limit("a")).ok); now = 1000; - assert(limiter.limit("a", { cost: 3 }).ok); - assertFalse(limiter.limit("a").ok); + assert((await limiter.limit("a", { cost: 3 })).ok); + assertFalse((await limiter.limit("a")).ok); now = 2000; - assert(limiter.limit("a", { cost: 3 }).ok); - assertFalse(limiter.limit("a").ok); + assert((await limiter.limit("a", { cost: 3 })).ok); + assertFalse((await limiter.limit("a")).ok); }); // === GCRA === -Deno.test("gcra: first request always allowed", () => { +Deno.test("gcra: first request always allowed", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "gcra", @@ -450,15 +451,15 @@ Deno.test("gcra: first request always allowed", () => { clock: () => now, }); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.limit, 10); }); -Deno.test("gcra: requests spaced >= emission_interval apart always allowed", () => { +Deno.test("gcra: requests spaced >= emission_interval apart always allowed", async () => { let now = 0; const emissionInterval = 100; // window(1000) / limit(10) - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "gcra", @@ -467,15 +468,15 @@ Deno.test("gcra: requests spaced >= emission_interval apart always allowed", () }); for (let i = 0; i < 20; i++) { - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok, `request ${i} at now=${now} should be allowed`); now += emissionInterval; } }); -Deno.test("gcra: burst up to limit requests when idle", () => { +Deno.test("gcra: burst up to limit requests when idle", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "gcra", @@ -484,14 +485,17 @@ Deno.test("gcra: burst up to limit requests when idle", () => { }); for (let i = 0; i < 5; i++) { - assert(limiter.limit("a").ok, `burst request ${i} should be allowed`); + assert( + (await limiter.limit("a")).ok, + `burst request ${i} should be allowed`, + ); } - assertFalse(limiter.limit("a").ok); + assertFalse((await limiter.limit("a")).ok); }); -Deno.test("gcra: after burst, requests denied until tat drains", () => { +Deno.test("gcra: after burst, requests denied until tat drains", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "gcra", @@ -499,18 +503,18 @@ Deno.test("gcra: after burst, requests denied until tat drains", () => { clock: () => now, }); - for (let i = 0; i < 5; i++) limiter.limit("a"); - assertFalse(limiter.limit("a").ok); + for (let i = 0; i < 5; i++) await limiter.limit("a"); + assertFalse((await limiter.limit("a")).ok); // emission_interval = 200ms. After 200ms, one slot should free. now = 200; - assert(limiter.limit("a").ok); - assertFalse(limiter.limit("a").ok); + assert((await limiter.limit("a")).ok); + assertFalse((await limiter.limit("a")).ok); }); -Deno.test("gcra: retryAfter is exact", () => { +Deno.test("gcra: retryAfter is exact", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "gcra", @@ -518,15 +522,15 @@ Deno.test("gcra: retryAfter is exact", () => { clock: () => now, }); - for (let i = 0; i < 5; i++) limiter.limit("a"); - const r = limiter.limit("a"); + for (let i = 0; i < 5; i++) await limiter.limit("a"); + const r = await limiter.limit("a"); assertFalse(r.ok); assertEquals(r.retryAfter, 200); }); -Deno.test("gcra: variable cost advances tat by emission_interval * cost", () => { +Deno.test("gcra: variable cost advances tat by emission_interval * cost", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "gcra", @@ -535,18 +539,18 @@ Deno.test("gcra: variable cost advances tat by emission_interval * cost", () => }); // emission_interval = 100ms. cost=5 advances tat by 500ms. - const r = limiter.limit("a", { cost: 5 }); + const r = await limiter.limit("a", { cost: 5 }); assert(r.ok); assertEquals(r.remaining, 5); // 5 more slots remain - assert(limiter.limit("a", { cost: 5 }).ok); - assertFalse(limiter.limit("a").ok); + assert((await limiter.limit("a", { cost: 5 })).ok); + assertFalse((await limiter.limit("a")).ok); }); -Deno.test("gcra: remaining derived correctly", () => { +Deno.test("gcra: remaining derived correctly", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "gcra", @@ -554,18 +558,18 @@ Deno.test("gcra: remaining derived correctly", () => { clock: () => now, }); - const r1 = limiter.limit("a"); + const r1 = await limiter.limit("a"); assert(r1.ok); assertEquals(r1.remaining, 9); - const r2 = limiter.limit("a", { cost: 4 }); + const r2 = await limiter.limit("a", { cost: 4 }); assert(r2.ok); assertEquals(r2.remaining, 5); }); -Deno.test("gcra: remaining never exceeds limit after long idle", () => { +Deno.test("gcra: remaining never exceeds limit after long idle", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "gcra", @@ -573,10 +577,10 @@ Deno.test("gcra: remaining never exceeds limit after long idle", () => { clock: () => now, }); - limiter.limit("a"); + await limiter.limit("a"); now += 100_000; - const peek = limiter.peek("a"); + const peek = await limiter.peek("a"); assert(peek.ok); assert( peek.remaining <= 10, @@ -584,7 +588,7 @@ Deno.test("gcra: remaining never exceeds limit after long idle", () => { ); assertEquals(peek.remaining, 10); - const result = limiter.limit("a"); + const result = await limiter.limit("a"); assert(result.ok); assert( result.remaining <= 10, @@ -592,9 +596,9 @@ Deno.test("gcra: remaining never exceeds limit after long idle", () => { ); }); -Deno.test("gcra: cost exceeding remaining burst is denied", () => { +Deno.test("gcra: cost exceeding remaining burst is denied", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "gcra", @@ -602,33 +606,34 @@ Deno.test("gcra: cost exceeding remaining burst is denied", () => { clock: () => now, }); - limiter.limit("a", { cost: 3 }); - const r = limiter.limit("a", { cost: 4 }); + await limiter.limit("a", { cost: 3 }); + const r = await limiter.limit("a", { cost: 4 }); assertFalse(r.ok); assert(r.retryAfter > 0); }); -Deno.test("gcra: state is a single timestamp per key (minimal memory)", () => { +Deno.test("gcra: state is a single timestamp per key (minimal memory)", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 100, window: 1000, algorithm: "gcra", evictionTtl: 0, clock: () => now, }); + await using limiter = createRateLimiter({ store }); for (let i = 0; i < 1000; i++) { - limiter.limit(`key-${i}`); + await limiter.limit(`key-${i}`); } - assertEquals(limiter.size, 1000); + assertEquals(store.size, 1000); }); // === peek() === -Deno.test("peek() returns current state without consuming permits", () => { +Deno.test("peek() returns current state without consuming permits", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "fixed-window", @@ -636,20 +641,20 @@ Deno.test("peek() returns current state without consuming permits", () => { clock: () => now, }); - limiter.limit("a"); - limiter.limit("a"); + await limiter.limit("a"); + await limiter.limit("a"); - const p = limiter.peek("a"); + const p = await limiter.peek("a"); assert(p.ok); assertEquals(p.remaining, 3); // peek didn't consume — still 3 remaining - assertEquals(limiter.peek("a").remaining, 3); + assertEquals((await limiter.peek("a")).remaining, 3); }); -Deno.test("peek() returns full capacity for unknown key", () => { +Deno.test("peek() returns full capacity for unknown key", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "gcra", @@ -657,15 +662,15 @@ Deno.test("peek() returns full capacity for unknown key", () => { clock: () => now, }); - const p = limiter.peek("unknown"); + const p = await limiter.peek("unknown"); assert(p.ok); assertEquals(p.remaining, 10); assertEquals(p.limit, 10); }); -Deno.test("peek() reflects consumed permits after limit()", () => { +Deno.test("peek() reflects consumed permits after limit()", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "token-bucket", @@ -673,17 +678,17 @@ Deno.test("peek() reflects consumed permits after limit()", () => { clock: () => now, }); - limiter.limit("a", { cost: 3 }); - const p = limiter.peek("a"); + await limiter.limit("a", { cost: 3 }); + const p = await limiter.peek("a"); assert(p.ok); assertEquals(p.remaining, 2); }); // === reset() === -Deno.test("reset() restores key to full capacity", () => { +Deno.test("reset() restores key to full capacity", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 3, window: 1000, algorithm: "gcra", @@ -691,16 +696,16 @@ Deno.test("reset() restores key to full capacity", () => { clock: () => now, }); - limiter.limit("a", { cost: 3 }); - assertFalse(limiter.limit("a").ok); + await limiter.limit("a", { cost: 3 }); + assertFalse((await limiter.limit("a")).ok); - limiter.reset("a"); - assert(limiter.limit("a").ok); + await limiter.reset("a"); + assert((await limiter.limit("a")).ok); }); -Deno.test("reset() on unknown key is a no-op", () => { +Deno.test("reset() on unknown key is a no-op", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "fixed-window", @@ -708,126 +713,132 @@ Deno.test("reset() on unknown key is a no-op", () => { clock: () => now, }); - limiter.reset("nonexistent"); // should not throw + await limiter.reset("nonexistent"); // should not throw }); -// === size === +// === size (via MemoryStore) === -Deno.test("size tracks number of keys", () => { +Deno.test("MemoryStore.size tracks number of keys", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", evictionTtl: 0, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - assertEquals(limiter.size, 0); - limiter.limit("a"); - assertEquals(limiter.size, 1); - limiter.limit("b"); - assertEquals(limiter.size, 2); - limiter.limit("a"); // same key - assertEquals(limiter.size, 2); - limiter.reset("a"); - assertEquals(limiter.size, 1); + assertEquals(store.size, 0); + await limiter.limit("a"); + assertEquals(store.size, 1); + await limiter.limit("b"); + assertEquals(store.size, 2); + await limiter.limit("a"); // same key + assertEquals(store.size, 2); + await limiter.reset("a"); + assertEquals(store.size, 1); }); // === Eviction === -Deno.test("keys are evicted after evictionTtl of inactivity", () => { +Deno.test("keys are evicted after evictionTtl of inactivity", async () => { using time = new FakeTime(); - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "fixed-window", evictionTtl: 5000, evictionInterval: 1000, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); - assertEquals(limiter.size, 2); + await limiter.limit("a"); + await limiter.limit("b"); + assertEquals(store.size, 2); time.tick(6000); - assertEquals(limiter.size, 0); + assertEquals(store.size, 0); }); -Deno.test("active keys are not evicted", () => { +Deno.test("active keys are not evicted", async () => { using time = new FakeTime(); - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "fixed-window", evictionTtl: 5000, evictionInterval: 1000, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); + await limiter.limit("a"); + await limiter.limit("b"); time.tick(4000); - limiter.limit("a"); // refresh "a" + await limiter.limit("a"); // refresh "a" time.tick(2000); // 6s total — "b" should be evicted, "a" should survive - assertEquals(limiter.size, 1); - assert(limiter.peek("a").ok); + assertEquals(store.size, 1); + assert((await limiter.peek("a")).ok); }); -Deno.test("peek() does not refresh activity for TTL eviction", () => { +Deno.test("peek() does not refresh activity for TTL eviction", async () => { using time = new FakeTime(); - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "fixed-window", evictionTtl: 5000, evictionInterval: 1000, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - assertEquals(limiter.size, 1); + await limiter.limit("a"); + assertEquals(store.size, 1); time.tick(4000); - limiter.peek("a"); // should NOT refresh last-access + await limiter.peek("a"); // should NOT refresh last-access time.tick(2000); // 6s total — "a" should be evicted despite the peek - assertEquals(limiter.size, 0); + assertEquals(store.size, 0); }); -Deno.test("evictionTtl: 0 disables eviction", () => { +Deno.test("evictionTtl: 0 disables eviction", async () => { using time = new FakeTime(); - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "fixed-window", evictionTtl: 0, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); + await limiter.limit("a"); time.tick(1_000_000); - assertEquals(limiter.size, 1); + assertEquals(store.size, 1); }); // === Disposal === -Deno.test("dispose clears all state", () => { +Deno.test("dispose clears all state", async () => { using _time = new FakeTime(); - const limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", }); + const limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); - assertEquals(limiter.size, 2); + await limiter.limit("a"); + await limiter.limit("b"); + assertEquals(store.size, 2); - limiter[Symbol.dispose](); - assertEquals(limiter.size, 0); + await limiter[Symbol.asyncDispose](); + assertEquals(store.size, 0); }); -Deno.test("limit() returns ok: false after disposal", () => { +Deno.test("limit() returns ok: false after disposal", async () => { using _time = new FakeTime(); const limiter = createRateLimiter({ limit: 5, @@ -835,15 +846,15 @@ Deno.test("limit() returns ok: false after disposal", () => { algorithm: "gcra", }); - limiter[Symbol.dispose](); - const r = limiter.limit("a"); + await limiter[Symbol.asyncDispose](); + const r = await limiter.limit("a"); assertFalse(r.ok); assertEquals(r.remaining, 0); assertEquals(r.resetAt, 0); assertEquals(r.retryAfter, 0); }); -Deno.test("peek() returns ok: false after disposal", () => { +Deno.test("peek() returns ok: false after disposal", async () => { using _time = new FakeTime(); const limiter = createRateLimiter({ limit: 5, @@ -851,15 +862,15 @@ Deno.test("peek() returns ok: false after disposal", () => { algorithm: "gcra", }); - limiter[Symbol.dispose](); - const r = limiter.peek("a"); + await limiter[Symbol.asyncDispose](); + const r = await limiter.peek("a"); assertFalse(r.ok); assertEquals(r.remaining, 0); assertEquals(r.resetAt, 0); assertEquals(r.retryAfter, 0); }); -Deno.test("reset() is a no-op after disposal", () => { +Deno.test("reset() is a no-op after disposal", async () => { using _time = new FakeTime(); const limiter = createRateLimiter({ limit: 5, @@ -867,15 +878,15 @@ Deno.test("reset() is a no-op after disposal", () => { algorithm: "gcra", }); - limiter[Symbol.dispose](); - limiter.reset("a"); // should not throw + await limiter[Symbol.asyncDispose](); + await limiter.reset("a"); // should not throw }); // === Metadata correctness === -Deno.test("result.limit matches configured value", () => { +Deno.test("result.limit matches configured value", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 42, window: 1000, algorithm: "gcra", @@ -883,13 +894,13 @@ Deno.test("result.limit matches configured value", () => { clock: () => now, }); - assertEquals(limiter.limit("a").limit, 42); - assertEquals(limiter.peek("a").limit, 42); + assertEquals((await limiter.limit("a")).limit, 42); + assertEquals((await limiter.peek("a")).limit, 42); }); -Deno.test("retryAfter is 0 when allowed, positive when denied", () => { +Deno.test("retryAfter is 0 when allowed, positive when denied", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 1, window: 1000, algorithm: "fixed-window", @@ -897,16 +908,16 @@ Deno.test("retryAfter is 0 when allowed, positive when denied", () => { clock: () => now, }); - const allowed = limiter.limit("a"); + const allowed = await limiter.limit("a"); assertEquals(allowed.retryAfter, 0); - const denied = limiter.limit("a"); + const denied = await limiter.limit("a"); assert(denied.retryAfter > 0); }); -Deno.test("resetAt is a future timestamp", () => { +Deno.test("resetAt is a future timestamp", async () => { const now = 5000; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "fixed-window", @@ -914,13 +925,13 @@ Deno.test("resetAt is a future timestamp", () => { clock: () => now, }); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.resetAt > now); }); -Deno.test("gcra: retryAfter when now < allowAt (request arrives too early)", () => { +Deno.test("gcra: retryAfter when now < allowAt (request arrives too early)", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "gcra", @@ -929,28 +940,28 @@ Deno.test("gcra: retryAfter when now < allowAt (request arrives too early)", () }); // Fill all 5 slots: tat advances to 1000 - for (let i = 0; i < 5; i++) limiter.limit("a"); + for (let i = 0; i < 5; i++) await limiter.limit("a"); // Advance only 100ms — tat is 1000, allowAt = tat - tau = 0. // A request at now=100 is after allowAt, so this exercises the else branch. now = 100; - const r1 = limiter.limit("a"); + const r1 = await limiter.limit("a"); assertFalse(r1.ok); assert(r1.retryAfter > 0); // Now set now to -100 (simulating clock skew) — now < allowAt exercises // the `now < allowAt` branch in result(). now = -100; - const r2 = limiter.peek("a"); + const r2 = await limiter.peek("a"); assertFalse(r2.ok); assert(r2.retryAfter > 0); }); // === Per-key isolation === -Deno.test("keys are isolated from each other", () => { +Deno.test("keys are isolated from each other", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 2, window: 1000, algorithm: "gcra", @@ -958,55 +969,55 @@ Deno.test("keys are isolated from each other", () => { clock: () => now, }); - limiter.limit("a", { cost: 2 }); - assertFalse(limiter.limit("a").ok); + await limiter.limit("a", { cost: 2 }); + assertFalse((await limiter.limit("a")).ok); - assert(limiter.limit("b").ok); - assert(limiter.limit("b").ok); + assert((await limiter.limit("b")).ok); + assert((await limiter.limit("b")).ok); }); // === Default algorithm is sliding-window === -Deno.test("default algorithm is sliding-window", () => { +Deno.test("default algorithm is sliding-window", async () => { let now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, evictionTtl: 0, clock: () => now, }); - limiter.limit("a", { cost: 10 }); + await limiter.limit("a", { cost: 10 }); // At half-window, a fixed window would have reset. Sliding window hasn't. now = 500; - assertFalse(limiter.limit("a").ok); + assertFalse((await limiter.limit("a")).ok); // After full window, sliding window frees permits. now = 1000; - assert(limiter.limit("a").ok); + assert((await limiter.limit("a")).ok); }); // === Default clock uses Date.now (T-1 test) === -Deno.test("default clock uses Date.now", () => { +Deno.test("default clock uses Date.now", async () => { using _time = new FakeTime(0); - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "fixed-window", }); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.resetAt, 1000); }); // === peek() with cost (C-2/A-2) === -Deno.test("peek() with cost checks whether that cost would be allowed", () => { +Deno.test("peek() with cost checks whether that cost would be allowed", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "fixed-window", @@ -1014,15 +1025,15 @@ Deno.test("peek() with cost checks whether that cost would be allowed", () => { clock: () => now, }); - limiter.limit("a", { cost: 3 }); + await limiter.limit("a", { cost: 3 }); - assert(limiter.peek("a", { cost: 2 }).ok); - assertFalse(limiter.peek("a", { cost: 3 }).ok); + assert((await limiter.peek("a", { cost: 2 })).ok); + assertFalse((await limiter.peek("a", { cost: 3 })).ok); }); -Deno.test("peek() validates cost", () => { +Deno.test("peek() validates cost", async () => { using _time = new FakeTime(); - using limiter = createRateLimiter({ limit: 10, window: 1000 }); + await using limiter = createRateLimiter({ limit: 10, window: 1000 }); assertThrows(() => limiter.peek("a", { cost: 0 }), RangeError, "cost"); assertThrows(() => limiter.peek("a", { cost: -1 }), RangeError, "cost"); @@ -1032,9 +1043,9 @@ Deno.test("peek() validates cost", () => { // === maxKeys (S-1) === -Deno.test("maxKeys rejects new keys when limit reached", () => { +Deno.test("maxKeys evicts LRU key when a new key arrives at capacity", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", @@ -1042,19 +1053,23 @@ Deno.test("maxKeys rejects new keys when limit reached", () => { maxKeys: 2, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - assert(limiter.limit("a").ok); - assert(limiter.limit("b").ok); - assertEquals(limiter.size, 2); + assert((await limiter.limit("a")).ok); + assert((await limiter.limit("b")).ok); + assertEquals(store.size, 2); - const r = limiter.limit("c"); - assertFalse(r.ok); - assertEquals(limiter.size, 2); + const r = await limiter.limit("c"); + assert(r.ok); + assertEquals(store.size, 2); + assertFalse(store.has("a")); + assert(store.has("b")); + assert(store.has("c")); }); -Deno.test("maxKeys allows existing keys even when at capacity", () => { +Deno.test("maxKeys allows existing keys even when at capacity", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", @@ -1062,17 +1077,18 @@ Deno.test("maxKeys allows existing keys even when at capacity", () => { maxKeys: 2, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); + await limiter.limit("a"); + await limiter.limit("b"); - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); }); -Deno.test("maxKeys: 0 disables key limit", () => { +Deno.test("maxKeys: 0 disables key limit", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 100, window: 1000, algorithm: "gcra", @@ -1080,11 +1096,12 @@ Deno.test("maxKeys: 0 disables key limit", () => { maxKeys: 0, clock: () => now, }); + await using limiter = createRateLimiter({ store }); for (let i = 0; i < 1000; i++) { - assert(limiter.limit(`key:${i}`).ok); + assert((await limiter.limit(`key:${i}`)).ok); } - assertEquals(limiter.size, 1000); + assertEquals(store.size, 1000); }); Deno.test("createRateLimiter() throws for invalid maxKeys", () => { @@ -1100,9 +1117,9 @@ Deno.test("createRateLimiter() throws for invalid maxKeys", () => { ); }); -Deno.test("maxKeys rejects peek for unknown key when at capacity", () => { +Deno.test("maxKeys: peek for unknown key at capacity does not evict", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", @@ -1110,20 +1127,22 @@ Deno.test("maxKeys rejects peek for unknown key when at capacity", () => { maxKeys: 2, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); + await limiter.limit("a"); + await limiter.limit("b"); - const r = limiter.peek("c"); - assertFalse(r.ok); - assertEquals(r.remaining, 0); - assertEquals(r.retryAfter, 0); - assertEquals(limiter.size, 2); + const r = await limiter.peek("c"); + assert(r.ok); + assertEquals(r.remaining, 5); + assertEquals(store.size, 2); + assert(store.has("a")); + assert(store.has("b")); }); -Deno.test("maxKeys allows peek for existing key at capacity", () => { +Deno.test("maxKeys allows peek for existing key at capacity", async () => { const now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", @@ -1131,20 +1150,21 @@ Deno.test("maxKeys allows peek for existing key at capacity", () => { maxKeys: 2, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); + await limiter.limit("a"); + await limiter.limit("b"); - const r = limiter.peek("a"); + const r = await limiter.peek("a"); assert(r.ok); assertEquals(r.remaining, 4); }); // === maxKeys + window reset (C-1 regression) === -Deno.test("maxKeys allows existing key whose window has reset", () => { +Deno.test("maxKeys allows existing key whose window has reset", async () => { let now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 3, window: 1000, algorithm: "fixed-window", @@ -1152,21 +1172,22 @@ Deno.test("maxKeys allows existing key whose window has reset", () => { maxKeys: 2, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); - assertEquals(limiter.size, 2); + await limiter.limit("a"); + await limiter.limit("b"); + assertEquals(store.size, 2); // Advance past the window so "a" resets to full capacity now = 2000; - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); assertEquals(r.remaining, 2); }); -Deno.test("maxKeys allows GCRA key after full tat drain", () => { +Deno.test("maxKeys allows GCRA key after full tat drain", async () => { let now = 0; - using limiter = createRateLimiter({ + const store = createMemoryStore({ limit: 5, window: 1000, algorithm: "gcra", @@ -1174,21 +1195,22 @@ Deno.test("maxKeys allows GCRA key after full tat drain", () => { maxKeys: 2, clock: () => now, }); + await using limiter = createRateLimiter({ store }); - limiter.limit("a"); - limiter.limit("b"); + await limiter.limit("a"); + await limiter.limit("b"); // Advance well past the window so "a" drains fully now = 5000; - const r = limiter.limit("a"); + const r = await limiter.limit("a"); assert(r.ok); }); // === peek() unknown key with cost > 1 (T-TEST-3) === -Deno.test("peek() returns ok for unknown key with cost <= limit", () => { +Deno.test("peek() returns ok for unknown key with cost <= limit", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 10, window: 1000, algorithm: "fixed-window", @@ -1196,15 +1218,15 @@ Deno.test("peek() returns ok for unknown key with cost <= limit", () => { clock: () => now, }); - const p = limiter.peek("unknown", { cost: 5 }); + const p = await limiter.peek("unknown", { cost: 5 }); assert(p.ok); assertEquals(p.remaining, 10); assertEquals(p.limit, 10); }); -Deno.test("peek() returns not-ok for unknown key with cost > limit", () => { +Deno.test("peek() returns not-ok for unknown key with cost > limit", async () => { const now = 0; - using limiter = createRateLimiter({ + await using limiter = createRateLimiter({ limit: 5, window: 1000, algorithm: "gcra", @@ -1263,3 +1285,94 @@ Deno.test("createRateLimiter() throws for non-integer segmentsPerWindow", () => "segmentsPerWindow", ); }); + +// === Store backend integration === + +Deno.test("createRateLimiter() with custom store delegates correctly", async () => { + const store = createMemoryStore({ + limit: 3, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); + assertEquals(r.limit, 3); +}); + +Deno.test("createRateLimiter() reads capacity/window from store", async () => { + const store = createMemoryStore({ + limit: 42, + window: 5000, + algorithm: "gcra", + evictionTtl: 0, + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.limit, 42); +}); + +// === Concurrent limit() calls === + +Deno.test("concurrent limit() calls on the same key respect the limit", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 2, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const results = await Promise.all([ + limiter.limit("a"), + limiter.limit("a"), + limiter.limit("a"), + ]); + + const allowed = results.filter((r) => r.ok).length; + const denied = results.filter((r) => !r.ok).length; + assertEquals(allowed, 2); + assertEquals(denied, 1); +}); + +// === LRU eviction ordering === + +Deno.test("maxKeys evicts the least-recently-used key", async () => { + let now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 3, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + now = 1; + await limiter.limit("a"); + now = 2; + await limiter.limit("b"); + now = 3; + await limiter.limit("c"); + assertEquals(store.size, 3); + + // Touch "a" so it becomes most-recently-used + now = 4; + await limiter.limit("a"); + + // Insert "d" — should evict "b" (least-recently-used), not "a" + now = 5; + await limiter.limit("d"); + assertEquals(store.size, 3); + assertFalse(store.has("b")); + assert(store.has("a")); + assert(store.has("c")); + assert(store.has("d")); +}); From 290604596f2d571dc562f82752613dcca6dd19a0 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 30 Mar 2026 11:34:08 +0200 Subject: [PATCH 07/15] implement store concept --- rate_limit/memory_store.ts | 235 +++++++++++++++++++++++++++++++++++++ rate_limit/store_types.ts | 48 ++++++++ 2 files changed, 283 insertions(+) create mode 100644 rate_limit/memory_store.ts create mode 100644 rate_limit/store_types.ts diff --git a/rate_limit/memory_store.ts b/rate_limit/memory_store.ts new file mode 100644 index 000000000000..e182e328ba7d --- /dev/null +++ b/rate_limit/memory_store.ts @@ -0,0 +1,235 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { RateLimitResult } from "./rate_limiter.ts"; +import type { RateLimitStore } from "./store_types.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; +import { + createFixedWindowAlgorithm, + createGcraAlgorithm, + createSlidingWindowAlgorithm, + createTokenBucketAlgorithm, +} from "./_keyed_algorithms.ts"; +import type { + KeyedAlgorithm, + KeyedAlgorithmOptions, +} from "./_keyed_algorithms.ts"; + +/** + * Options for {@linkcode createMemoryStore}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface MemoryStoreOptions { + /** Maximum permits per key per window/cycle. */ + limit: number; + /** Window duration in milliseconds. */ + window: number; + /** + * Algorithm to use. + * + * @default {"sliding-window"} + */ + algorithm?: "fixed-window" | "sliding-window" | "token-bucket" | "gcra"; + /** + * Number of segments for the sliding window algorithm. + * + * @default {10} + */ + segmentsPerWindow?: number; + /** + * For token bucket: tokens added per replenishment period. + * + * @default {limit} + */ + tokensPerPeriod?: number; + /** + * Time-to-live for idle key state in milliseconds. Set to `0` to disable. + * + * @default {300_000} + */ + evictionTtl?: number; + /** + * How often to scan for and evict idle keys, in milliseconds. Each + * scan iterates all tracked keys, so increase this value (or disable + * TTL eviction entirely with `evictionTtl: 0`) for very high key + * cardinality to avoid event-loop pauses. + * + * @default {60_000} + */ + evictionInterval?: number; + /** + * Maximum number of keys to track. When a new key arrives at capacity, + * the least-recently-used key is evicted to make room. Set to `0` to + * disable (unbounded). + * + * @default {0} + */ + maxKeys?: number; + /** + * Clock function returning the current time in milliseconds. Override + * for testing with `FakeTime`. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * An in-memory {@linkcode RateLimitStore} with additional synchronous + * diagnostics. Extends the base store contract with `has()` and `size` + * for in-memory storage. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface MemoryStore extends RateLimitStore { + /** + * Whether a key has tracked state. + * + * @param key Identifier for the rate limit subject. + * @returns `true` if the key is currently tracked. + */ + has(key: string): boolean; + /** Number of keys currently tracked. */ + readonly size: number; +} + +/** + * Create an in-memory rate limit store backed by a `Map`. This is the + * default store used by `createRateLimiter` when no `store` option is + * provided. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Creating a memory store directly + * ```ts + * import { createMemoryStore } from "@std/rate-limit/memory-store"; + * import { assertEquals } from "@std/assert"; + * + * await using store = createMemoryStore({ + * limit: 5, + * window: 1000, + * algorithm: "fixed-window", + * evictionTtl: 0, + * }); + * + * assertEquals(store.capacity, 5); + * assertEquals(store.window, 1000); + * ``` + * + * @param options Configuration for the memory store. + * @returns A {@linkcode MemoryStore}. + */ +export function createMemoryStore(options: MemoryStoreOptions): MemoryStore { + const context = "memory store"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + + const { + limit, + window: windowMs, + algorithm: algorithmName = "sliding-window", + segmentsPerWindow = 10, + tokensPerPeriod = limit, + evictionTtl = 300_000, + evictionInterval = 60_000, + maxKeys = 0, + clock = Date.now, + } = options; + + if (algorithmName === "token-bucket") { + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); + if (tokensPerPeriod > limit) { + throw new RangeError( + `Cannot create ${context}: 'tokensPerPeriod' (${tokensPerPeriod}) exceeds 'limit' (${limit})`, + ); + } + } + + assertNonNegativeInteger(context, "evictionTtl", evictionTtl); + + if (evictionTtl > 0) { + assertPositiveInteger(context, "evictionInterval", evictionInterval); + } + + assertNonNegativeInteger(context, "maxKeys", maxKeys); + + const keyedOptions: KeyedAlgorithmOptions = { maxKeys }; + + let algorithm: KeyedAlgorithm; + switch (algorithmName) { + case "fixed-window": + algorithm = createFixedWindowAlgorithm(limit, windowMs, keyedOptions); + break; + case "sliding-window": + algorithm = createSlidingWindowAlgorithm( + limit, + windowMs, + segmentsPerWindow, + keyedOptions, + ); + break; + case "token-bucket": + algorithm = createTokenBucketAlgorithm( + limit, + windowMs, + tokensPerPeriod, + keyedOptions, + ); + break; + case "gcra": + algorithm = createGcraAlgorithm(limit, windowMs, keyedOptions); + break; + default: + throw new TypeError( + `Cannot create ${context}: unknown algorithm '${algorithmName as string}'`, + ); + } + + let evictionTimer: ReturnType | undefined; + + if (evictionTtl > 0) { + evictionTimer = setInterval( + () => algorithm.evict(clock(), evictionTtl), + evictionInterval, + ); + if (typeof Deno !== "undefined") Deno.unrefTimer(evictionTimer as number); + } + + return { + get capacity(): number { + return limit; + }, + get window(): number { + return windowMs; + }, + consume(key: string, cost: number): Promise { + return Promise.resolve(algorithm.limit(key, cost, clock())); + }, + peek(key: string, cost: number): Promise { + return Promise.resolve(algorithm.peek(key, cost, clock())); + }, + has(key: string): boolean { + return algorithm.has(key); + }, + reset(key: string): Promise { + algorithm.reset(key); + return Promise.resolve(); + }, + get size(): number { + return algorithm.size; + }, + [Symbol.asyncDispose](): Promise { + if (evictionTimer !== undefined) { + clearInterval(evictionTimer); + evictionTimer = undefined; + } + algorithm.clear(); + return Promise.resolve(); + }, + }; +} diff --git a/rate_limit/store_types.ts b/rate_limit/store_types.ts new file mode 100644 index 000000000000..509ef0535a34 --- /dev/null +++ b/rate_limit/store_types.ts @@ -0,0 +1,48 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { RateLimitResult } from "./rate_limiter.ts"; + +/** + * A pluggable backend for keyed rate limiting. Stores own the per-key + * algorithm state and are self-contained: they carry `capacity` and `window` + * so `createRateLimiter` reads configuration from the store rather than + * duplicating it. + * + * Each store owns its own time source. In-memory stores default to + * `Date.now` (overridable via `clock` for `FakeTime` testing); distributed + * stores (e.g. Redis) use server-side time. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RateLimitStore extends AsyncDisposable { + /** The configured permit limit per key per window. */ + readonly capacity: number; + /** The window duration in milliseconds. */ + readonly window: number; + + /** + * Check and consume permits for a key. + * + * @param key Identifier for the rate limit subject. + * @param cost Number of permits to consume. + * @returns The rate limit decision and metadata. + */ + consume(key: string, cost: number): Promise; + + /** + * Check the current state for a key without consuming any permits. + * + * @param key Identifier for the rate limit subject. + * @param cost Number of permits to check. + * @returns The rate limit decision and metadata. + */ + peek(key: string, cost: number): Promise; + + /** + * Reset all state for a key, restoring it to full capacity. + * + * @param key Identifier for the rate limit subject. + */ + reset(key: string): Promise; +} From 0beabebf732e18237c4056edc9df5941edc1c087 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans <113360400+tomas-zijdemans@users.noreply.github.com> Date: Mon, 30 Mar 2026 11:53:53 +0200 Subject: [PATCH 08/15] feat(cache/unstable): add `peek()` to `TtlCache` (#7070) --- cache/ttl_cache.ts | 55 +++++++++++++++++++++++++++++++++++------ cache/ttl_cache_test.ts | 36 +++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 7 deletions(-) diff --git a/cache/ttl_cache.ts b/cache/ttl_cache.ts index 7a5d86e5831f..7f93795a3642 100644 --- a/cache/ttl_cache.ts +++ b/cache/ttl_cache.ts @@ -171,6 +171,13 @@ export class TtlCache extends Map ); } + const abs = options?.absoluteExpiration; + if (abs !== undefined && (!(abs >= 0) || !Number.isFinite(abs))) { + throw new RangeError( + `Cannot set entry in TtlCache: absoluteExpiration must be a finite, non-negative number: received ${abs}`, + ); + } + const existing = this.#timeouts.get(key); if (existing !== undefined) clearTimeout(existing); super.set(key, value); @@ -178,13 +185,7 @@ export class TtlCache extends Map if (this.#slidingExpiration) { this.#entryTtls!.set(key, ttl); - if (options?.absoluteExpiration !== undefined) { - const abs = options.absoluteExpiration; - if (!(abs >= 0) || !Number.isFinite(abs)) { - throw new RangeError( - `Cannot set entry in TtlCache: absoluteExpiration must be a finite, non-negative number: received ${abs}`, - ); - } + if (abs !== undefined) { this.#absoluteDeadlines!.set(key, Date.now() + abs); } else { this.#absoluteDeadlines!.delete(key); @@ -223,6 +224,46 @@ export class TtlCache extends Map return super.get(key); } + /** + * Returns the value associated with the given key, or `undefined` if the + * key is not present, **without** resetting its TTL. + * + * This is the TTL-cache equivalent of + * {@linkcode LruCache.prototype.peek | LruCache.peek()}: a side-effect-free + * read that leaves the entry's expiration unchanged. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @param key The key to look up. + * @returns The value, or `undefined` if not present. + * + * @example Peeking at a value without resetting the sliding TTL + * ```ts + * import { TtlCache } from "@std/cache/ttl-cache"; + * import { assertEquals } from "@std/assert/equals"; + * import { FakeTime } from "@std/testing/time"; + * + * using time = new FakeTime(0); + * const cache = new TtlCache(100, { + * slidingExpiration: true, + * }); + * + * cache.set("a", 1); + * time.now = 80; + * + * // peek does not reset the TTL + * assertEquals(cache.peek("a"), 1); + * + * // entry still expires at t=100 + * time.now = 100; + * assertEquals(cache.peek("a"), undefined); + * ``` + */ + peek(key: K): V | undefined { + if (!super.has(key)) return undefined; + return super.get(key); + } + /** * Deletes the value associated with the given key. * diff --git a/cache/ttl_cache_test.ts b/cache/ttl_cache_test.ts index c5c0267c026d..a5823122f653 100644 --- a/cache/ttl_cache_test.ts +++ b/cache/ttl_cache_test.ts @@ -265,6 +265,42 @@ Deno.test("TtlCache validates TTL", async (t) => { }); }); +Deno.test("TtlCache peek()", async (t) => { + await t.step("returns value without resetting sliding TTL", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100, { + slidingExpiration: true, + }); + + cache.set("a", 1); + + time.now = 80; + assertEquals(cache.peek("a"), 1); + + // peek did not reset the TTL, so the entry still expires at t=100 + time.now = 100; + assertEquals(cache.peek("a"), undefined); + }); + + await t.step("returns value for non-sliding cache", () => { + using time = new FakeTime(0); + const cache = new TtlCache(100); + + cache.set("a", 1); + + time.now = 50; + assertEquals(cache.peek("a"), 1); + + time.now = 100; + assertEquals(cache.peek("a"), undefined); + }); + + await t.step("returns undefined for missing key", () => { + using cache = new TtlCache(100); + assertEquals(cache.peek("missing"), undefined); + }); +}); + Deno.test("TtlCache get() returns undefined for missing key with sliding expiration", () => { using cache = new TtlCache(100, { slidingExpiration: true, From 26556aa101592362c44aa0c63c608cfd5aef45d0 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 30 Mar 2026 15:17:51 +0200 Subject: [PATCH 09/15] Add Redis Store --- rate_limit/deno.json | 3 ++- rate_limit/memory_store.ts | 26 ++------------------------ rate_limit/rate_limiter.ts | 22 ++++++---------------- rate_limit/store_types.ts | 30 ++++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 41 deletions(-) diff --git a/rate_limit/deno.json b/rate_limit/deno.json index 7d97d4e67d30..99ca8903bb33 100644 --- a/rate_limit/deno.json +++ b/rate_limit/deno.json @@ -9,6 +9,7 @@ "./types": "./types.ts", "./rate-limiter": "./rate_limiter.ts", "./store-types": "./store_types.ts", - "./memory-store": "./memory_store.ts" + "./memory-store": "./memory_store.ts", + "./redis-store": "./redis_store.ts" } } diff --git a/rate_limit/memory_store.ts b/rate_limit/memory_store.ts index e182e328ba7d..1e99ab3d9ecb 100644 --- a/rate_limit/memory_store.ts +++ b/rate_limit/memory_store.ts @@ -2,7 +2,7 @@ // This module is browser compatible. import type { RateLimitResult } from "./rate_limiter.ts"; -import type { RateLimitStore } from "./store_types.ts"; +import type { AlgorithmOptions, RateLimitStore } from "./store_types.ts"; import { assertNonNegativeInteger, assertPositiveFinite, @@ -24,29 +24,7 @@ import type { * * @experimental **UNSTABLE**: New API, yet to be vetted. */ -export interface MemoryStoreOptions { - /** Maximum permits per key per window/cycle. */ - limit: number; - /** Window duration in milliseconds. */ - window: number; - /** - * Algorithm to use. - * - * @default {"sliding-window"} - */ - algorithm?: "fixed-window" | "sliding-window" | "token-bucket" | "gcra"; - /** - * Number of segments for the sliding window algorithm. - * - * @default {10} - */ - segmentsPerWindow?: number; - /** - * For token bucket: tokens added per replenishment period. - * - * @default {limit} - */ - tokensPerPeriod?: number; +export interface MemoryStoreOptions extends AlgorithmOptions { /** * Time-to-live for idle key state in milliseconds. Set to `0` to disable. * diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index 3dbb4d06bc4c..8484bd01ec3c 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -116,25 +116,15 @@ export type MemoryRateLimiterOptions = MemoryStoreOptions & { /** * Options when providing a custom {@linkcode RateLimitStore} backend. - * Memory-store options (`limit`, `window`, etc.) are typed as `never` - * to prevent accidentally passing them alongside a custom store, since - * the store owns those settings. + * {@linkcode MemoryStoreOptions} keys (`limit`, `window`, `algorithm`, + * `ttl`, etc.) are typed as `never` to prevent accidentally passing them + * alongside a custom store, since the store owns those settings. * * @experimental **UNSTABLE**: New API, yet to be vetted. */ -export interface StoreRateLimiterOptions { - /** The store backend to delegate to. */ - store: RateLimitStore; - limit?: never; - window?: never; - algorithm?: never; - segmentsPerWindow?: never; - tokensPerPeriod?: never; - evictionTtl?: never; - evictionInterval?: never; - maxKeys?: never; - clock?: never; -} +export type StoreRateLimiterOptions = + & { /** The store backend to delegate to. */ store: RateLimitStore } + & { [K in keyof MemoryStoreOptions]?: never }; /** * Options for {@linkcode createRateLimiter}. diff --git a/rate_limit/store_types.ts b/rate_limit/store_types.ts index 509ef0535a34..0d543411e09b 100644 --- a/rate_limit/store_types.ts +++ b/rate_limit/store_types.ts @@ -3,6 +3,36 @@ import type { RateLimitResult } from "./rate_limiter.ts"; +/** + * Algorithm configuration shared by all store backends. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface AlgorithmOptions { + /** Maximum permits per key per window/cycle. */ + limit: number; + /** Window duration in milliseconds. */ + window: number; + /** + * Algorithm to use. + * + * @default {"sliding-window"} + */ + algorithm?: "fixed-window" | "sliding-window" | "token-bucket" | "gcra"; + /** + * Number of segments for the sliding window algorithm. + * + * @default {10} + */ + segmentsPerWindow?: number; + /** + * For token bucket: tokens added per replenishment period. + * + * @default {limit} + */ + tokensPerPeriod?: number; +} + /** * A pluggable backend for keyed rate limiting. Stores own the per-key * algorithm state and are self-contained: they carry `capacity` and `window` From 4e8a043e2f83058ec0628f84bc026190a5fd8fe5 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 30 Mar 2026 15:17:59 +0200 Subject: [PATCH 10/15] Add Redis Store --- rate_limit/_redis_scripts.ts | 408 ++++++++++++ rate_limit/redis_store.ts | 261 ++++++++ rate_limit/redis_store_test.ts | 1143 ++++++++++++++++++++++++++++++++ 3 files changed, 1812 insertions(+) create mode 100644 rate_limit/_redis_scripts.ts create mode 100644 rate_limit/redis_store.ts create mode 100644 rate_limit/redis_store_test.ts diff --git a/rate_limit/_redis_scripts.ts b/rate_limit/_redis_scripts.ts new file mode 100644 index 000000000000..3c061146acce --- /dev/null +++ b/rate_limit/_redis_scripts.ts @@ -0,0 +1,408 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { RateLimitResult } from "./rate_limiter.ts"; +import type { RedisConnection, RedisEvalConnection } from "./redis_store.ts"; + +// --- Lua scripts --- +// Each script returns a flat array: [ok, remaining, resetAt, retryAfter, limit] +// `ok` is 1 for allowed, 0 for denied. +// All timestamps are in milliseconds. +// Scripts use redis.call('TIME') for server-side time. + +// Redis TIME returns [seconds, microseconds]. Convert to milliseconds: +const LUA_NOW = ` +local _t = redis.call('TIME') +local now = tonumber(_t[1]) * 1000 + math.floor(tonumber(_t[2]) / 1000) +`; + +const LUA_FIXED_WINDOW_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) + +local data = redis.call('HMGET', key, 'count', 'windowStart') +local count = tonumber(data[1]) or 0 +local windowStart = tonumber(data[2]) or now + +if now - windowStart >= window then + count = 0 + windowStart = windowStart + math.floor((now - windowStart) / window) * window +end + +local resetAt = windowStart + window +local ok = 0 +if count + cost <= limit then + ok = 1 + count = count + cost + redis.call('HMSET', key, 'count', count, 'windowStart', windowStart) + redis.call('PEXPIRE', key, math.ceil(resetAt - now)) +else + redis.call('HMSET', key, 'count', count, 'windowStart', windowStart) + redis.call('PEXPIRE', key, math.ceil(resetAt - now)) +end + +local remaining = math.max(0, limit - count) +local retryAfter = 0 +if ok == 0 then + retryAfter = resetAt - now +end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_FIXED_WINDOW_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) + +local data = redis.call('HMGET', key, 'count', 'windowStart') +local count = tonumber(data[1]) or 0 +local windowStart = tonumber(data[2]) or now + +if now - windowStart >= window then + count = 0 + windowStart = windowStart + math.floor((now - windowStart) / window) * window +end + +local resetAt = windowStart + window +local ok = 0 +if count + cost <= limit then ok = 1 end +local remaining = math.max(0, limit - count) +local retryAfter = 0 +if ok == 0 then retryAfter = resetAt - now end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +// Sliding window: uses a Hash where field = segment start time (ms string), +// value = count in that segment. On each call we remove fields whose segment +// start is older than `now - window`, then sum the remaining values. +const LUA_SLIDING_WINDOW_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local segments = tonumber(ARGV[4]) +local segDur = window / segments + +local segStart = now - (now % segDur) +local cutoff = now - window + +local fields = redis.call('HGETALL', key) +local total = 0 +local toDel = {} +for i = 1, #fields, 2 do + local seg = tonumber(fields[i]) + if seg <= cutoff then + toDel[#toDel + 1] = fields[i] + else + total = total + tonumber(fields[i + 1]) + end +end +if #toDel > 0 then + redis.call('HDEL', key, unpack(toDel)) +end + +local resetAt = segStart + segDur +local ok = 0 +if total + cost <= limit then + ok = 1 + redis.call('HINCRBY', key, tostring(segStart), cost) + total = total + cost + redis.call('PEXPIRE', key, window + segDur) +end + +local remaining = math.max(0, limit - total) +local retryAfter = 0 +if ok == 0 then retryAfter = resetAt - now end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_SLIDING_WINDOW_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local segments = tonumber(ARGV[4]) +local segDur = window / segments + +local segStart = now - (now % segDur) +local cutoff = now - window + +local fields = redis.call('HGETALL', key) +local total = 0 +local toDel = {} +for i = 1, #fields, 2 do + local seg = tonumber(fields[i]) + if seg <= cutoff then + toDel[#toDel + 1] = fields[i] + else + total = total + tonumber(fields[i + 1]) + end +end +if #toDel > 0 then + redis.call('HDEL', key, unpack(toDel)) +end + +local resetAt = segStart + segDur +local ok = 0 +if total + cost <= limit then ok = 1 end +local remaining = math.max(0, limit - total) +local retryAfter = 0 +if ok == 0 then retryAfter = resetAt - now end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_TOKEN_BUCKET_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local tokensPerPeriod = tonumber(ARGV[4]) + +local data = redis.call('HMGET', key, 'tokens', 'lastRefill') +local tokens = tonumber(data[1]) +local lastRefill = tonumber(data[2]) + +if tokens == nil then + tokens = limit + lastRefill = now +else + local elapsed = now - lastRefill + if elapsed >= window then + local cycles = math.floor(elapsed / window) + tokens = math.min(limit, tokens + cycles * tokensPerPeriod) + lastRefill = lastRefill + cycles * window + end +end + +local ok = 0 +if tokens >= cost then + ok = 1 + tokens = tokens - cost +end + +redis.call('HMSET', key, 'tokens', tokens, 'lastRefill', lastRefill) +local resetAt = lastRefill + window +redis.call('PEXPIRE', key, math.max(1, math.ceil(resetAt - now) + window)) + +local remaining = math.max(0, math.floor(tokens)) +local retryAfter = 0 +if ok == 0 then + local deficit = cost - tokens + local cycles = math.ceil(deficit / tokensPerPeriod) + retryAfter = math.max(0, cycles * window - (now - lastRefill)) +end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_TOKEN_BUCKET_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local tokensPerPeriod = tonumber(ARGV[4]) + +local data = redis.call('HMGET', key, 'tokens', 'lastRefill') +local tokens = tonumber(data[1]) +local lastRefill = tonumber(data[2]) + +if tokens == nil then + tokens = limit + lastRefill = now +else + local elapsed = now - lastRefill + if elapsed >= window then + local cycles = math.floor(elapsed / window) + tokens = math.min(limit, tokens + cycles * tokensPerPeriod) + lastRefill = lastRefill + cycles * window + end +end + +local ok = 0 +if tokens >= cost then ok = 1 end +local remaining = math.max(0, math.floor(tokens)) +local resetAt = lastRefill + window +local retryAfter = 0 +if ok == 0 then + local deficit = cost - tokens + local cycles = math.ceil(deficit / tokensPerPeriod) + retryAfter = math.max(0, cycles * window - (now - lastRefill)) +end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_GCRA_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local emissionInterval = window / limit +local tau = window + +local tat = tonumber(redis.call('GET', key)) or now + +local allowAt = tat - tau +if now < allowAt then + local remaining = 0 + local retryAfter = allowAt - now + local resetAt = tat + return {0, remaining, tostring(resetAt), tostring(retryAfter), limit} +end + +local newTat = math.max(tat, now) + emissionInterval * cost +if newTat - now > tau then + local diff = tau - (tat - now) + local remaining = math.min(limit, math.max(0, math.floor(diff / emissionInterval))) + local retryAfter = math.max(0, newTat - tau - now) + local resetAt = tat + return {0, remaining, tostring(resetAt), tostring(retryAfter), limit} +end + +redis.call('SET', key, tostring(newTat), 'PX', math.ceil(newTat - now + tau)) +local diff = tau - (newTat - now) +local remaining = math.min(limit, math.max(0, math.floor(diff / emissionInterval))) +return {1, remaining, tostring(newTat), '0', limit} +`; + +const LUA_GCRA_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local emissionInterval = window / limit +local tau = window + +local tat = tonumber(redis.call('GET', key)) or now + +local allowAt = tat - tau +if now < allowAt then + local remaining = 0 + local retryAfter = allowAt - now + local resetAt = tat + return {0, remaining, tostring(resetAt), tostring(retryAfter), limit} +end + +local newTat = math.max(tat, now) + emissionInterval * cost +local diff = tau - (tat - now) +local remaining = math.min(limit, math.max(0, math.floor(diff / emissionInterval))) +if newTat - now > tau then + local retryAfter = math.max(0, newTat - tau - now) + return {0, remaining, tostring(tat), tostring(retryAfter), limit} +end +return {1, remaining, tostring(tat), '0', limit} +`; + +export const LUA_DELETE_KEY = ` +redis.call('DEL', KEYS[1]) +return 1 +`; + +export interface LuaScriptPair { + consume: string; + peek: string; +} + +export function getScripts(algorithm: string): LuaScriptPair { + switch (algorithm) { + case "fixed-window": + return { + consume: LUA_FIXED_WINDOW_CONSUME, + peek: LUA_FIXED_WINDOW_PEEK, + }; + case "sliding-window": + return { + consume: LUA_SLIDING_WINDOW_CONSUME, + peek: LUA_SLIDING_WINDOW_PEEK, + }; + case "token-bucket": + return { + consume: LUA_TOKEN_BUCKET_CONSUME, + peek: LUA_TOKEN_BUCKET_PEEK, + }; + case "gcra": + return { consume: LUA_GCRA_CONSUME, peek: LUA_GCRA_PEEK }; + default: + throw new TypeError( + `Cannot create redis store: unknown algorithm '${algorithm}'`, + ); + } +} + +export async function sha1Hex(text: string): Promise { + const data = new TextEncoder().encode(text); + const hash = await crypto.subtle.digest("SHA-1", data); + return [...new Uint8Array(hash)] + .map((b) => b.toString(16).padStart(2, "0")) + .join(""); +} + +export interface CachedScript { + source: string; + sha: string; +} + +/** + * Normalizes any {@linkcode RedisConnection} into the `eval`/`evalsha` + * shape used internally. If the connection already has `eval`/`evalsha`, + * it is returned as-is. If it only has `sendCommand`, a thin adapter is + * created. + */ +export function toEvalConnection( + redis: RedisConnection, +): RedisEvalConnection { + if ("eval" in redis) return redis; + const conn = redis; + return { + eval(script: string, keys: string[], args: string[]): Promise { + return conn.sendCommand(["EVAL", script, keys.length, ...keys, ...args]); + }, + evalsha(sha: string, keys: string[], args: string[]): Promise { + return conn.sendCommand([ + "EVALSHA", + sha, + keys.length, + ...keys, + ...args, + ]); + }, + }; +} + +function isNoscriptError(err: unknown): boolean { + if (err instanceof Error) { + return err.message.includes("NOSCRIPT"); + } + return String(err).includes("NOSCRIPT"); +} + +export async function runScript( + redis: RedisEvalConnection, + script: CachedScript, + keys: string[], + args: string[], +): Promise { + try { + return await redis.evalsha(script.sha, keys, args); + } catch (err) { + if (isNoscriptError(err)) { + return await redis.eval(script.source, keys, args); + } + throw err; + } +} + +export function parseResult(raw: unknown, limit: number): RateLimitResult { + const arr = raw as [number, number, string, string, number]; + return { + ok: arr[0] === 1, + remaining: Number(arr[1]), + resetAt: Number(arr[2]), + retryAfter: Number(arr[3]), + limit, + }; +} diff --git a/rate_limit/redis_store.ts b/rate_limit/redis_store.ts new file mode 100644 index 000000000000..40886052d7a1 --- /dev/null +++ b/rate_limit/redis_store.ts @@ -0,0 +1,261 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +/** + * A Redis-backed {@linkcode RateLimitStore} for distributed rate limiting. + * + * All rate limit state is stored in Redis and manipulated atomically via + * Lua scripts, making this safe for multi-process / multi-server deployments. + * The store uses `redis.call('TIME')` inside Lua for server-side timestamps, + * so clock skew between application servers does not affect correctness. + * + * The store does not own the Redis connection — disposal is a no-op. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Creating a Redis store + * ```ts ignore + * import { createRedisStore } from "@std/rate-limit/redis-store"; + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * const store = createRedisStore({ + * redis: myRedisClient, + * algorithm: "sliding-window", + * limit: 100, + * window: 60_000, + * }); + * + * await using limiter = createRateLimiter({ store }); + * const result = await limiter.limit(ip); + * ``` + * + * @module + */ + +import type { RateLimitResult } from "./rate_limiter.ts"; +import type { AlgorithmOptions, RateLimitStore } from "./store_types.ts"; +import { assertPositiveFinite, assertPositiveInteger } from "./_validation.ts"; +import { + type CachedScript, + getScripts, + LUA_DELETE_KEY, + parseResult, + runScript, + sha1Hex, + toEvalConnection, +} from "./_redis_scripts.ts"; + +/** + * Redis connection that exposes `eval` and `evalsha` methods. This is + * the interface used by clients such as `ioredis`, `node-redis`, and + * `@db/redis`. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RedisEvalConnection { + /** + * Execute a Lua script on the Redis server. + * + * @param script The Lua script source. + * @param keys Redis keys the script operates on. + * @param args Additional arguments passed to the script. + * @returns The script's return value. + */ + eval(script: string, keys: string[], args: string[]): Promise; + + /** + * Execute a cached Lua script by its SHA1 hash. + * + * @param sha The SHA1 digest of the script. + * @param keys Redis keys the script operates on. + * @param args Additional arguments passed to the script. + * @returns The script's return value. + */ + evalsha(sha: string, keys: string[], args: string[]): Promise; +} + +/** + * Redis connection that exposes a single `sendCommand` method. This is + * the interface used by `@iuioiua/redis` and other minimal clients. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RedisSendCommandConnection { + /** + * Send a raw Redis command and return the parsed reply. + * + * @param args The command arguments (e.g. `["SET", "key", "value"]`). + * @returns The server's reply. + */ + sendCommand(args: readonly (string | number)[]): Promise; +} + +/** + * A Redis connection accepted by {@linkcode createRedisStore}. + * + * Supports two shapes: + * - `eval`/`evalsha` methods (ioredis, node-redis, `@db/redis`) + * - `sendCommand` (e.g. `@iuioiua/redis`) + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type RedisConnection = RedisEvalConnection | RedisSendCommandConnection; + +/** + * Options for {@linkcode createRedisStore}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RedisStoreOptions extends AlgorithmOptions { + /** The Redis connection to use. */ + redis: RedisConnection; + /** + * Key prefix for Redis keys. + * + * @default {"rl"} + */ + prefix?: string; +} + +/** + * Create a Redis-backed rate limit store. All state is stored in Redis + * and manipulated atomically via Lua scripts. + * + * The store does not own the Redis connection — `[Symbol.asyncDispose]` + * is a no-op. The caller is responsible for closing the connection. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts ignore + * import { createRedisStore } from "@std/rate-limit/redis-store"; + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * const store = createRedisStore({ + * redis: myRedisClient, + * algorithm: "sliding-window", + * limit: 100, + * window: 60_000, + * }); + * + * await using limiter = createRateLimiter({ store }); + * const result = await limiter.limit(ip); + * ``` + * + * @param options Configuration for the Redis store. + * @returns A {@linkcode RateLimitStore}. + */ +export function createRedisStore( + options: RedisStoreOptions, +): RateLimitStore { + const context = "redis store"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + + const { + redis: rawRedis, + algorithm: algorithmName = "sliding-window", + limit, + window: windowMs, + segmentsPerWindow = 10, + tokensPerPeriod = limit, + prefix = "rl", + } = options; + + const redis = toEvalConnection(rawRedis); + + if (algorithmName === "sliding-window") { + if (!Number.isInteger(segmentsPerWindow) || segmentsPerWindow < 2) { + throw new RangeError( + `Cannot create ${context}: 'segmentsPerWindow' must be an integer >= 2, received ${segmentsPerWindow}`, + ); + } + if (windowMs % segmentsPerWindow !== 0) { + throw new RangeError( + `Cannot create ${context}: 'window' (${windowMs}) must be evenly divisible by 'segmentsPerWindow' (${segmentsPerWindow})`, + ); + } + } + + if (algorithmName === "token-bucket") { + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); + if (tokensPerPeriod > limit) { + throw new RangeError( + `Cannot create ${context}: 'tokensPerPeriod' (${tokensPerPeriod}) exceeds 'limit' (${limit})`, + ); + } + } + + const scripts = getScripts(algorithmName); + + let consumeScript: CachedScript | undefined; + let peekScript: CachedScript | undefined; + let deleteScript: CachedScript | undefined; + + const initScripts = (async () => { + const [consumeSha, peekSha, deleteSha] = await Promise.all([ + sha1Hex(scripts.consume), + sha1Hex(scripts.peek), + sha1Hex(LUA_DELETE_KEY), + ]); + consumeScript = { source: scripts.consume, sha: consumeSha }; + peekScript = { source: scripts.peek, sha: peekSha }; + deleteScript = { source: LUA_DELETE_KEY, sha: deleteSha }; + })(); + + function redisKey(key: string): string { + return `${prefix}:${key}`; + } + + function buildArgs(): string[] { + const args = [String(limit), String(windowMs)]; + if (algorithmName === "sliding-window") { + return [...args, "", String(segmentsPerWindow)]; + } + if (algorithmName === "token-bucket") { + return [...args, "", String(tokensPerPeriod)]; + } + return [...args, ""]; + } + + const baseArgs = buildArgs(); + + return { + get capacity(): number { + return limit; + }, + get window(): number { + return windowMs; + }, + async consume(key: string, cost: number): Promise { + await initScripts; + const args = [...baseArgs]; + args[2] = String(cost); + const raw = await runScript( + redis, + consumeScript!, + [redisKey(key)], + args, + ); + return parseResult(raw, limit); + }, + async peek(key: string, cost: number): Promise { + await initScripts; + const args = [...baseArgs]; + args[2] = String(cost); + const raw = await runScript( + redis, + peekScript!, + [redisKey(key)], + args, + ); + return parseResult(raw, limit); + }, + async reset(key: string): Promise { + await initScripts; + await runScript(redis, deleteScript!, [redisKey(key)], []); + }, + [Symbol.asyncDispose](): Promise { + return Promise.resolve(); + }, + }; +} diff --git a/rate_limit/redis_store_test.ts b/rate_limit/redis_store_test.ts new file mode 100644 index 000000000000..daa7597926f9 --- /dev/null +++ b/rate_limit/redis_store_test.ts @@ -0,0 +1,1143 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { assert, assertEquals, assertFalse, assertThrows } from "@std/assert"; +import { createRedisStore } from "./redis_store.ts"; +import type { + RedisEvalConnection, + RedisSendCommandConnection, +} from "./redis_store.ts"; +import { createRateLimiter } from "./rate_limiter.ts"; + +/** + * In-memory Redis emulator that supports the subset of commands used + * by the rate limit Lua scripts. Rather than parsing Lua, this class + * implements a small Redis command engine and uses a real Lua-like + * execution model by pre-compiling each script into a sequence of + * command calls via text matching. + * + * For testing purposes, the mock always rejects `evalsha` with NOSCRIPT + * to exercise the fallback path, and `eval` runs the script through + * the built-in command interpreter. + */ +class MockRedis implements RedisEvalConnection { + #strings = new Map(); + #hashes = new Map>(); + #expiries = new Map(); + #nowMs: number; + + constructor(nowMs = 0) { + this.#nowMs = nowMs; + } + + get now(): number { + return this.#nowMs; + } + + set now(ms: number) { + this.#nowMs = ms; + } + + tick(ms: number): void { + this.#nowMs += ms; + } + + eval(script: string, keys: string[], args: string[]): Promise { + return Promise.resolve(this.#runLuaScript(script, keys, args)); + } + + evalsha(_sha: string, _keys: string[], _args: string[]): Promise { + return Promise.reject(new Error("NOSCRIPT No matching script")); + } + + #evictExpired(): void { + for (const [key, expiresAt] of this.#expiries) { + if (this.#nowMs >= expiresAt) { + this.#strings.delete(key); + this.#hashes.delete(key); + this.#expiries.delete(key); + } + } + } + + #pexpire(key: string, ms: number): void { + if ( + this.#strings.has(key) || this.#hashes.has(key) + ) { + this.#expiries.set(key, this.#nowMs + ms); + } + } + + #del(key: string): void { + this.#strings.delete(key); + this.#hashes.delete(key); + this.#expiries.delete(key); + } + + #runLuaScript( + script: string, + keys: string[], + args: string[], + ): unknown { + this.#evictExpired(); + + const now = this.#nowMs; + + if (script.includes("redis.call('DEL'")) { + this.#del(keys[0]!); + return 1; + } + + const key = keys[0]!; + const limit = Number(args[0]); + const window = Number(args[1]); + const cost = Number(args[2]); + + const isPeek = script.includes("-- peek-mode"); + + if (script.includes("HGETALL")) { + const segments = Number(args[3]); + return this.#slidingWindow( + key, + limit, + window, + cost, + segments, + now, + isPeek, + ); + } + + if (script.includes("windowStart")) { + return this.#fixedWindow(key, limit, window, cost, now, isPeek); + } + + if (script.includes("lastRefill")) { + const tokensPerPeriod = Number(args[3]); + return this.#tokenBucket( + key, + limit, + window, + cost, + tokensPerPeriod, + now, + isPeek, + ); + } + + if (script.includes("emissionInterval")) { + return this.#gcra(key, limit, window, cost, now, isPeek); + } + + throw new Error("Unrecognized Lua script"); + } + + #fixedWindow( + key: string, + limit: number, + window: number, + cost: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + let hash = this.#hashes.get(key); + let count = hash ? Number(hash.get("count") ?? 0) : 0; + let windowStart = hash ? Number(hash.get("windowStart") ?? now) : now; + + if (now - windowStart >= window) { + count = 0; + windowStart = windowStart + + Math.floor((now - windowStart) / window) * window; + } + + const resetAt = windowStart + window; + let ok = 0; + + if (count + cost <= limit) { + ok = 1; + if (!peek) { + count += cost; + } + } + + if (!peek) { + if (!hash) { + hash = new Map(); + this.#hashes.set(key, hash); + } + hash.set("count", String(count)); + hash.set("windowStart", String(windowStart)); + this.#pexpire(key, Math.ceil(resetAt - now)); + } + + const remaining = Math.max(0, limit - count); + const retryAfter = ok === 0 ? resetAt - now : 0; + + return [ok, remaining, String(resetAt), String(retryAfter), limit]; + } + + #slidingWindow( + key: string, + limit: number, + window: number, + cost: number, + _segments: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + const segDur = window / _segments; + const segStart = now - (now % segDur); + const cutoff = now - window; + + let hash = this.#hashes.get(key); + if (!hash) { + hash = new Map(); + this.#hashes.set(key, hash); + } + + // Remove segments at or before the cutoff (matches Lua `seg <= cutoff`) + for (const [field] of hash) { + if (Number(field) <= cutoff) { + hash.delete(field); + } + } + + let total = 0; + for (const [, val] of hash) { + total += Number(val); + } + + const resetAt = segStart + segDur; + let ok = 0; + + if (total + cost <= limit) { + ok = 1; + if (!peek) { + const segKey = String(segStart); + hash.set(segKey, String((Number(hash.get(segKey) ?? "0")) + cost)); + total += cost; + this.#pexpire(key, window + segDur); + } + } + + const remaining = Math.max(0, limit - total); + const retryAfter = ok === 0 ? resetAt - now : 0; + + return [ok, remaining, String(resetAt), String(retryAfter), limit]; + } + + #tokenBucket( + key: string, + limit: number, + window: number, + cost: number, + tokensPerPeriod: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + const hash = this.#hashes.get(key); + let tokens: number; + let lastRefill: number; + + if (!hash || !hash.has("tokens")) { + tokens = limit; + lastRefill = now; + } else { + tokens = Number(hash.get("tokens")); + lastRefill = Number(hash.get("lastRefill")); + + const elapsed = now - lastRefill; + if (elapsed >= window) { + const cycles = Math.floor(elapsed / window); + tokens = Math.min(limit, tokens + cycles * tokensPerPeriod); + lastRefill = lastRefill + cycles * window; + } + } + + let ok = 0; + if (tokens >= cost) { + ok = 1; + if (!peek) { + tokens -= cost; + } + } + + if (!peek) { + let h = this.#hashes.get(key); + if (!h) { + h = new Map(); + this.#hashes.set(key, h); + } + h.set("tokens", String(tokens)); + h.set("lastRefill", String(lastRefill)); + const resetAt = lastRefill + window; + this.#pexpire(key, Math.max(1, Math.ceil(resetAt - now) + window)); + } + + const remaining = Math.max(0, Math.floor(tokens)); + const resetAt = lastRefill + window; + let retryAfter = 0; + if (ok === 0) { + const deficit = cost - tokens; + const cycles = Math.ceil(deficit / tokensPerPeriod); + retryAfter = Math.max(0, cycles * window - (now - lastRefill)); + } + + return [ok, remaining, String(resetAt), String(retryAfter), limit]; + } + + #gcra( + key: string, + limit: number, + window: number, + cost: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + const emissionInterval = window / limit; + const tau = window; + + const stored = this.#strings.get(key); + const tat = stored !== undefined ? Number(stored) : now; + + const allowAt = tat - tau; + if (now < allowAt) { + const remaining = 0; + const retryAfter = allowAt - now; + return [0, remaining, String(tat), String(retryAfter), limit]; + } + + const newTat = Math.max(tat, now) + emissionInterval * cost; + if (newTat - now > tau) { + const diff = tau - (tat - now); + const remaining = Math.min( + limit, + Math.max(0, Math.floor(diff / emissionInterval)), + ); + const retryAfter = Math.max(0, newTat - tau - now); + return [0, remaining, String(tat), String(retryAfter), limit]; + } + + if (!peek) { + this.#strings.set(key, String(newTat)); + this.#pexpire(key, Math.ceil(newTat - now + tau)); + } + + const tatForRemaining = peek ? tat : newTat; + const diff = tau - (tatForRemaining - now); + const remaining = Math.min( + limit, + Math.max(0, Math.floor(diff / emissionInterval)), + ); + + return [peek ? 1 : 1, remaining, String(peek ? tat : newTat), "0", limit]; + } +} + +// --- Factory validation --- + +Deno.test("createRedisStore() throws for invalid limit", () => { + const redis = new MockRedis(); + assertThrows( + () => createRedisStore({ redis, limit: 0, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createRedisStore({ redis, limit: -1, window: 1000 }), + RangeError, + "limit", + ); +}); + +Deno.test("createRedisStore() throws for invalid window", () => { + const redis = new MockRedis(); + assertThrows( + () => createRedisStore({ redis, limit: 10, window: 0 }), + RangeError, + "window", + ); +}); + +Deno.test("createRedisStore() throws for unknown algorithm", () => { + const redis = new MockRedis(); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "unknown" as "fixed-window", + }), + TypeError, + "unknown", + ); +}); + +Deno.test("createRedisStore() throws for invalid segmentsPerWindow", () => { + const redis = new MockRedis(); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 1, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 3, + }), + RangeError, + "divisible", + ); +}); + +Deno.test("createRedisStore() throws for invalid tokensPerPeriod", () => { + const redis = new MockRedis(); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 0, + }), + RangeError, + "tokensPerPeriod", + ); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 11, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +// --- Store properties --- + +Deno.test("createRedisStore() exposes capacity and window", () => { + const redis = new MockRedis(); + const store = createRedisStore({ redis, limit: 42, window: 5000 }); + assertEquals(store.capacity, 42); + assertEquals(store.window, 5000); +}); + +// === Fixed Window === + +Deno.test("redis fixed-window: first request allowed", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); + assertEquals(r.limit, 5); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("redis fixed-window: exhausting limit", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "fixed-window", + }); + + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + + const r = await store.consume("a", 1); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assert(r.retryAfter > 0); +}); + +Deno.test("redis fixed-window: permits restore after window elapses", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 1); + await store.consume("a", 1); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 2000; + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 1); +}); + +Deno.test("redis fixed-window: variable cost", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await store.consume("a", 7); + assert(r.ok); + assertEquals(r.remaining, 3); + + assertFalse((await store.consume("a", 4)).ok); + assert((await store.consume("a", 3)).ok); +}); + +// === Sliding Window === + +Deno.test("redis sliding-window: permits freed incrementally", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 4, + window: 400, + algorithm: "sliding-window", + segmentsPerWindow: 4, + }); + + await store.consume("a", 4); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 100; + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 400; + assert((await store.consume("a", 4)).ok); +}); + +Deno.test("redis sliding-window: no boundary burst", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + }); + + await store.consume("a", 10); + + redis.now = 500; + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 1000; + assert((await store.consume("a", 10)).ok); +}); + +// === Token Bucket === + +Deno.test("redis token-bucket: starts at full capacity", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "token-bucket", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +Deno.test("redis token-bucket: tokens refill lazily", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 1, + }); + + await store.consume("a", 3); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 1000; + assert((await store.consume("a", 1)).ok); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 3000; + assert((await store.consume("a", 2)).ok); +}); + +Deno.test("redis token-bucket: refill capped at limit", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 3, + }); + + await store.consume("a", 1); + redis.now = 10000; + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("redis token-bucket: retryAfter reflects time until enough tokens", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 500, + algorithm: "token-bucket", + tokensPerPeriod: 2, + }); + + await store.consume("a", 10); + const r = await store.consume("a", 3); + assertFalse(r.ok); + assertEquals(r.retryAfter, 1000); +}); + +// === GCRA === + +Deno.test("redis gcra: first request always allowed", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.limit, 10); +}); + +Deno.test("redis gcra: requests spaced >= emission_interval apart always allowed", async () => { + const redis = new MockRedis(0); + const emissionInterval = 100; + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 20; i++) { + const r = await store.consume("a", 1); + assert(r.ok, `request ${i} at now=${redis.now} should be allowed`); + redis.tick(emissionInterval); + } +}); + +Deno.test("redis gcra: burst up to limit", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) { + assert( + (await store.consume("a", 1)).ok, + `burst request ${i} should be allowed`, + ); + } + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("redis gcra: after burst, requests denied until tat drains", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) await store.consume("a", 1); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 200; + assert((await store.consume("a", 1)).ok); + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("redis gcra: retryAfter is exact", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) await store.consume("a", 1); + const r = await store.consume("a", 1); + assertFalse(r.ok); + assertEquals(r.retryAfter, 200); +}); + +Deno.test("redis gcra: variable cost", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const r = await store.consume("a", 5); + assert(r.ok); + assertEquals(r.remaining, 5); + + assert((await store.consume("a", 5)).ok); + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("redis gcra: remaining derived correctly", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const r1 = await store.consume("a", 1); + assert(r1.ok); + assertEquals(r1.remaining, 9); + + const r2 = await store.consume("a", 4); + assert(r2.ok); + assertEquals(r2.remaining, 5); +}); + +// === peek() === + +Deno.test("redis peek() does not consume permits (fixed-window)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 2); + + const p1 = await store.peek("a", 1); + assert(p1.ok); + assertEquals(p1.remaining, 3); + + const p2 = await store.peek("a", 1); + assertEquals(p2.remaining, 3); +}); + +Deno.test("redis peek() returns full capacity for unknown key (gcra)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const p = await store.peek("unknown", 1); + assert(p.ok); + assertEquals(p.remaining, 10); + assertEquals(p.limit, 10); +}); + +Deno.test("redis peek() does not consume permits (token-bucket)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "token-bucket", + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +Deno.test("redis peek() does not consume permits (sliding-window)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 5, + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +Deno.test("redis peek() does not consume permits (gcra)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +// === reset() === + +Deno.test("redis reset() restores key to full capacity", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("a", 3); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +Deno.test("redis reset() on unknown key is a no-op", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + await store.reset("nonexistent"); +}); + +Deno.test("redis reset() works for fixed-window", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +Deno.test("redis reset() works for sliding-window", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +Deno.test("redis reset() works for token-bucket", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "token-bucket", + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +// === Per-key isolation === + +Deno.test("redis keys are isolated", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + assert((await store.consume("b", 1)).ok); + assert((await store.consume("b", 1)).ok); +}); + +// === Key prefix === + +Deno.test("redis store uses configurable prefix", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + prefix: "custom", + }); + + await store.consume("mykey", 1); + assert((await store.peek("mykey", 1)).ok); +}); + +Deno.test("redis store default prefix is 'rl'", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("mykey", 1); + assert((await store.peek("mykey", 1)).ok); +}); + +// === Integration with createRateLimiter === + +Deno.test("redis store works with createRateLimiter", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "fixed-window", + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); + assertEquals(r.limit, 3); +}); + +Deno.test("redis store: createRateLimiter reads capacity/window from store", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 42, + window: 5000, + algorithm: "gcra", + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.limit, 42); +}); + +Deno.test("redis store: limiter cost validation with store", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + await using limiter = createRateLimiter({ store }); + + assertThrows(() => limiter.limit("a", { cost: 0 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: 6 }), RangeError, "exceeds"); +}); + +// === sendCommand-based connection === + +/** + * Wraps a {@linkcode MockRedis} to expose only `sendCommand`, mimicking + * clients like `@iuioiua/redis` that use a single command method. + */ +class MockSendCommandRedis implements RedisSendCommandConnection { + #inner: MockRedis; + + constructor(inner: MockRedis) { + this.#inner = inner; + } + + get now(): number { + return this.#inner.now; + } + set now(ms: number) { + this.#inner.now = ms; + } + + sendCommand(args: readonly (string | number)[]): Promise { + const strs = args.map(String); + const cmd = strs[0]!.toUpperCase(); + if (cmd === "EVAL") { + const script = strs[1]!; + const numKeys = Number(strs[2]); + const keys = strs.slice(3, 3 + numKeys); + const rest = strs.slice(3 + numKeys); + return this.#inner.eval(script, keys, rest); + } + if (cmd === "EVALSHA") { + const sha = strs[1]!; + const numKeys = Number(strs[2]); + const keys = strs.slice(3, 3 + numKeys); + const rest = strs.slice(3 + numKeys); + return this.#inner.evalsha(sha, keys, rest); + } + return Promise.reject(new Error(`Unsupported command: ${cmd}`)); + } +} + +Deno.test("sendCommand connection: fixed-window works end-to-end", async () => { + const inner = new MockRedis(1000); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "fixed-window", + }); + + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 2000; + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("sendCommand connection: gcra works end-to-end", async () => { + const inner = new MockRedis(0); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) { + assert((await store.consume("a", 1)).ok); + } + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("sendCommand connection: peek and reset work", async () => { + const inner = new MockRedis(0); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); + + await store.reset("a"); + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +Deno.test("sendCommand connection: works with createRateLimiter", async () => { + const inner = new MockRedis(0); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); + assertEquals(r.limit, 3); +}); + +// === Disposal === + +Deno.test("redis store disposal is a no-op", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await store[Symbol.asyncDispose](); +}); + +// === EVALSHA fallback === + +Deno.test("redis store falls back from EVALSHA to EVAL on NOSCRIPT", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); +}); From a790dcf9758c896532e107dc46a8763db66e0ee4 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Mon, 30 Mar 2026 19:56:38 +0200 Subject: [PATCH 11/15] refactor --- rate_limit/_keyed_algorithms.ts | 6 +- rate_limit/_redis_scripts.ts | 29 ++++++--- rate_limit/fixed_window.ts | 12 ++-- rate_limit/fixed_window_test.ts | 94 ++++++++++++++--------------- rate_limit/mod.ts | 13 ++-- rate_limit/rate_limiter.ts | 6 ++ rate_limit/sliding_window.ts | 12 ++-- rate_limit/sliding_window_test.ts | 84 +++++++++++++------------- rate_limit/store_types.ts | 1 + rate_limit/token_bucket.ts | 16 ++--- rate_limit/token_bucket_test.ts | 98 +++++++++++++++---------------- rate_limit/types.ts | 13 +++- 12 files changed, 208 insertions(+), 176 deletions(-) diff --git a/rate_limit/_keyed_algorithms.ts b/rate_limit/_keyed_algorithms.ts index 90125a5411ff..df8400b0ca11 100644 --- a/rate_limit/_keyed_algorithms.ts +++ b/rate_limit/_keyed_algorithms.ts @@ -32,9 +32,11 @@ export interface KeyedAlgorithmOptions { /** * Wraps AlgorithmOps with a Map, LRU eviction, and TTL eviction. * - * LRU tracking exploits Map's insertion-order guarantee: on every access + * LRU tracking exploits Map's insertion-order guarantee: on `limit()` * the entry is deleted and re-inserted, keeping the least-recently-used - * key at the front. Eviction is therefore O(1). + * key at the front. `peek()` is read-only and does not promote the key, + * so a key that is only peeked can still be evicted. Eviction of the + * LRU entry is O(1). */ function createKeyedAlgorithm( ops: AlgorithmOps, diff --git a/rate_limit/_redis_scripts.ts b/rate_limit/_redis_scripts.ts index 3c061146acce..a6324b747786 100644 --- a/rate_limit/_redis_scripts.ts +++ b/rate_limit/_redis_scripts.ts @@ -397,12 +397,25 @@ export async function runScript( } export function parseResult(raw: unknown, limit: number): RateLimitResult { - const arr = raw as [number, number, string, string, number]; - return { - ok: arr[0] === 1, - remaining: Number(arr[1]), - resetAt: Number(arr[2]), - retryAfter: Number(arr[3]), - limit, - }; + if (!Array.isArray(raw) || raw.length < 4) { + throw new TypeError( + `Cannot parse rate limit result: expected an array of length >= 4, received ${ + JSON.stringify(raw) + }`, + ); + } + const ok = raw[0] === 1; + const remaining = Number(raw[1]); + const resetAt = Number(raw[2]); + const retryAfter = Number(raw[3]); + if ( + Number.isNaN(remaining) || Number.isNaN(resetAt) || Number.isNaN(retryAfter) + ) { + throw new TypeError( + `Cannot parse rate limit result: numeric fields contain NaN (remaining=${ + raw[1] + }, resetAt=${raw[2]}, retryAfter=${raw[3]})`, + ); + } + return { ok, remaining, resetAt, retryAfter, limit }; } diff --git a/rate_limit/fixed_window.ts b/rate_limit/fixed_window.ts index 3ce6421abfd1..76caa06029f3 100644 --- a/rate_limit/fixed_window.ts +++ b/rate_limit/fixed_window.ts @@ -17,7 +17,7 @@ import { */ export interface FixedWindowOptions extends QueueOptions { /** Maximum permits per window. */ - permitLimit: number; + limit: number; /** Window duration in milliseconds. */ window: number; /** @@ -51,7 +51,7 @@ export interface FixedWindowOptions extends QueueOptions { * import { assert } from "@std/assert"; * * using limiter = createFixedWindow({ - * permitLimit: 100, + * limit: 100, * window: 60_000, * }); * @@ -64,7 +64,7 @@ export interface FixedWindowOptions extends QueueOptions { * import { createFixedWindow } from "@std/rate-limit/fixed-window"; * * using limiter = createFixedWindow({ - * permitLimit: 100, + * limit: 100, * window: 60_000, * autoReplenishment: false, * }); @@ -79,13 +79,13 @@ export function createFixedWindow( options: FixedWindowOptions, ): ReplenishingRateLimiter { const context = "fixed window"; - assertPositiveInteger(context, "permitLimit", options.permitLimit); + assertPositiveInteger(context, "limit", options.limit); assertPositiveFinite(context, "window", options.window); assertNonNegativeInteger(context, "queueLimit", options.queueLimit); - const { permitLimit, window: windowMs } = options; + const { limit, window: windowMs } = options; const clock = options.clock ?? Date.now; - const ops = createFixedWindowOps(permitLimit, windowMs); + const ops = createFixedWindowOps(limit, windowMs); const state = ops.create(clock()); let lastNow = 0; diff --git a/rate_limit/fixed_window_test.ts b/rate_limit/fixed_window_test.ts index dd4016e2614d..f64f9f20ea47 100644 --- a/rate_limit/fixed_window_test.ts +++ b/rate_limit/fixed_window_test.ts @@ -12,52 +12,52 @@ import { createFixedWindow } from "./fixed_window.ts"; // --- Factory validation --- -Deno.test("createFixedWindow() throws for invalid permitLimit", () => { +Deno.test("createFixedWindow() throws for invalid limit", () => { assertThrows( - () => createFixedWindow({ permitLimit: 0, window: 1000 }), + () => createFixedWindow({ limit: 0, window: 1000 }), RangeError, - "permitLimit", + "limit", ); assertThrows( - () => createFixedWindow({ permitLimit: -1, window: 1000 }), + () => createFixedWindow({ limit: -1, window: 1000 }), RangeError, - "permitLimit", + "limit", ); assertThrows( - () => createFixedWindow({ permitLimit: 1.5, window: 1000 }), + () => createFixedWindow({ limit: 1.5, window: 1000 }), RangeError, - "permitLimit", + "limit", ); assertThrows( - () => createFixedWindow({ permitLimit: NaN, window: 1000 }), + () => createFixedWindow({ limit: NaN, window: 1000 }), RangeError, - "permitLimit", + "limit", ); assertThrows( - () => createFixedWindow({ permitLimit: Infinity, window: 1000 }), + () => createFixedWindow({ limit: Infinity, window: 1000 }), RangeError, - "permitLimit", + "limit", ); }); Deno.test("createFixedWindow() throws for invalid window", () => { assertThrows( - () => createFixedWindow({ permitLimit: 10, window: 0 }), + () => createFixedWindow({ limit: 10, window: 0 }), RangeError, "window", ); assertThrows( - () => createFixedWindow({ permitLimit: 10, window: -100 }), + () => createFixedWindow({ limit: 10, window: -100 }), RangeError, "window", ); assertThrows( - () => createFixedWindow({ permitLimit: 10, window: NaN }), + () => createFixedWindow({ limit: 10, window: NaN }), RangeError, "window", ); assertThrows( - () => createFixedWindow({ permitLimit: 10, window: Infinity }), + () => createFixedWindow({ limit: 10, window: Infinity }), RangeError, "window", ); @@ -65,7 +65,7 @@ Deno.test("createFixedWindow() throws for invalid window", () => { Deno.test("createFixedWindow() throws for invalid queueLimit", () => { assertThrows( - () => createFixedWindow({ permitLimit: 10, window: 1000, queueLimit: -1 }), + () => createFixedWindow({ limit: 10, window: 1000, queueLimit: -1 }), RangeError, "queueLimit", ); @@ -76,7 +76,7 @@ Deno.test("createFixedWindow() throws for invalid queueLimit", () => { Deno.test("tryAcquire() succeeds within the window limit", () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 3, + limit: 3, window: 1000, }); void time; @@ -90,7 +90,7 @@ Deno.test("tryAcquire() succeeds within the window limit", () => { Deno.test("tryAcquire() acquires multiple permits at once", () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 5, + limit: 5, window: 1000, }); void time; @@ -103,7 +103,7 @@ Deno.test("tryAcquire() acquires multiple permits at once", () => { Deno.test("tryAcquire() rejects with retryAfter equal to window duration", () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 5000, }); void time; @@ -117,7 +117,7 @@ Deno.test("tryAcquire() rejects with retryAfter equal to window duration", () => Deno.test("tryAcquire() throws for invalid permits", () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 5, + limit: 5, window: 1000, }); void time; @@ -127,10 +127,10 @@ Deno.test("tryAcquire() throws for invalid permits", () => { assertThrows(() => limiter.tryAcquire(1.5), RangeError); }); -Deno.test("tryAcquire() throws when permits exceed permitLimit", () => { +Deno.test("tryAcquire() throws when permits exceed limit", () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 5, + limit: 5, window: 1000, }); void time; @@ -142,7 +142,7 @@ Deno.test("tryAcquire() throws when permits exceed permitLimit", () => { Deno.test("permits reset after the window elapses", () => { using time = new FakeTime(0); - using limiter = createFixedWindow({ permitLimit: 2, window: 1000 }); + using limiter = createFixedWindow({ limit: 2, window: 1000 }); limiter.tryAcquire(); limiter.tryAcquire(); @@ -157,7 +157,7 @@ Deno.test("permits reset after the window elapses", () => { Deno.test("full permit count is restored each window", () => { using time = new FakeTime(0); - using limiter = createFixedWindow({ permitLimit: 5, window: 500 }); + using limiter = createFixedWindow({ limit: 5, window: 500 }); for (let i = 0; i < 5; i++) limiter.tryAcquire(); assertFalse(limiter.tryAcquire().acquired); @@ -172,7 +172,7 @@ Deno.test("full permit count is restored each window", () => { Deno.test("replenish() throws when autoReplenishment is true", () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 5, + limit: 5, window: 1000, }); void time; @@ -186,7 +186,7 @@ Deno.test("replenish() throws when autoReplenishment is true", () => { Deno.test("replenish() drains queued acquire() waiters", async () => { const limiter = createFixedWindow({ - permitLimit: 3, + limit: 3, window: 1000, autoReplenishment: false, queueLimit: 5, @@ -213,7 +213,7 @@ Deno.test("replenish() drains queued acquire() waiters", async () => { Deno.test("replenish() resets the window when autoReplenishment is false", () => { const limiter = createFixedWindow({ - permitLimit: 3, + limit: 3, window: 1000, autoReplenishment: false, }); @@ -233,7 +233,7 @@ Deno.test("replenish() resets the window when autoReplenishment is false", () => Deno.test("acquire() resolves immediately when permits available", async () => { using time = new FakeTime(0); - using limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + using limiter = createFixedWindow({ limit: 5, window: 1000 }); void time; const lease = await limiter.acquire(); @@ -243,7 +243,7 @@ Deno.test("acquire() resolves immediately when permits available", async () => { Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 0, }); @@ -258,7 +258,7 @@ Deno.test("acquire() returns rejected lease when queue limit is 0", async () => Deno.test("acquire() queues and resolves after window reset", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 5, }); @@ -283,7 +283,7 @@ Deno.test("acquire() queues and resolves after window reset", async () => { Deno.test("acquire() rejects when aborted via signal", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 5, }); @@ -301,7 +301,7 @@ Deno.test("acquire() rejects when aborted via signal", async () => { Deno.test("acquire() rejects when signal is already aborted", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 5, }); @@ -318,7 +318,7 @@ Deno.test("acquire() rejects when signal is already aborted", async () => { Deno.test("acquire() with already-aborted signal rejects even when permits are available", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 5, + limit: 5, window: 1000, queueLimit: 5, }); @@ -338,7 +338,7 @@ Deno.test("acquire() with already-aborted signal rejects even when permits are a Deno.test("acquire() with already-aborted signal does not evict queued waiters", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 1, queueOrder: "newest-first", @@ -363,7 +363,7 @@ Deno.test("acquire() with already-aborted signal does not evict queued waiters", Deno.test("dispose resolves queued waiters with rejected leases", async () => { using time = new FakeTime(0); const limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 5, }); @@ -380,7 +380,7 @@ Deno.test("dispose resolves queued waiters with rejected leases", async () => { Deno.test("tryAcquire() returns rejected lease after disposal", () => { using time = new FakeTime(0); - const limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + const limiter = createFixedWindow({ limit: 5, window: 1000 }); void time; limiter[Symbol.dispose](); @@ -390,7 +390,7 @@ Deno.test("tryAcquire() returns rejected lease after disposal", () => { Deno.test("acquire() rejects after disposal", async () => { using time = new FakeTime(0); - const limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + const limiter = createFixedWindow({ limit: 5, window: 1000 }); void time; limiter[Symbol.dispose](); @@ -402,7 +402,7 @@ Deno.test("acquire() rejects after disposal", async () => { Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 10, queueOrder: "oldest-first", @@ -431,7 +431,7 @@ Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { Deno.test("newest-first queue resolves newest waiter first", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 10, queueOrder: "newest-first", @@ -462,7 +462,7 @@ Deno.test("newest-first queue resolves newest waiter first", async () => { Deno.test("newest-first queue evicts oldest waiter when queue is full", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 2, queueOrder: "newest-first", @@ -502,7 +502,7 @@ Deno.test("newest-first queue evicts oldest waiter when queue is full", async () Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 1, + limit: 1, window: 1000, queueLimit: 1, queueOrder: "oldest-first", @@ -534,7 +534,7 @@ Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () Deno.test("acquire() queues multi-permit waiter spanning multiple windows", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 2, + limit: 2, window: 1000, queueLimit: 10, }); @@ -561,7 +561,7 @@ Deno.test("acquire() queues multi-permit waiter spanning multiple windows", asyn Deno.test("single replenishment resolves multiple queued waiters", async () => { using time = new FakeTime(0); using limiter = createFixedWindow({ - permitLimit: 3, + limit: 3, window: 1000, queueLimit: 10, }); @@ -598,7 +598,7 @@ Deno.test("single replenishment resolves multiple queued waiters", async () => { Deno.test("acquire() rejects for invalid permits", async () => { using time = new FakeTime(0); - using limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + using limiter = createFixedWindow({ limit: 5, window: 1000 }); void time; await assertRejects(() => limiter.acquire(0), RangeError); @@ -606,9 +606,9 @@ Deno.test("acquire() rejects for invalid permits", async () => { await assertRejects(() => limiter.acquire(1.5), RangeError); }); -Deno.test("acquire() rejects when permits exceed permitLimit", async () => { +Deno.test("acquire() rejects when permits exceed limit", async () => { using time = new FakeTime(0); - using limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + using limiter = createFixedWindow({ limit: 5, window: 1000 }); void time; await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); @@ -618,7 +618,7 @@ Deno.test("acquire() rejects when permits exceed permitLimit", async () => { Deno.test("double dispose is a no-op", () => { using time = new FakeTime(0); - const limiter = createFixedWindow({ permitLimit: 5, window: 1000 }); + const limiter = createFixedWindow({ limit: 5, window: 1000 }); void time; limiter[Symbol.dispose](); diff --git a/rate_limit/mod.ts b/rate_limit/mod.ts index 303c4f8dd368..e684274782b0 100644 --- a/rate_limit/mod.ts +++ b/rate_limit/mod.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. /** * Rate limiting strategies for controlling how many operations can occur over @@ -8,7 +7,8 @@ * The primary API is {@linkcode createRateLimiter}, a keyed rate limiter for * the common case of "allow key X at most N requests per window." It supports * fixed-window, sliding-window, token-bucket, and GCRA algorithms and accepts - * a pluggable {@linkcode RateLimitStore} backend (in-memory by default). + * a pluggable {@linkcode RateLimitStore} backend (in-memory by default, or + * Redis via {@linkcode createRedisStore} for distributed deployments). * * For single-resource limiting, use the primitives: * {@linkcode createTokenBucket}, {@linkcode createFixedWindow}, and @@ -27,10 +27,11 @@ * @module */ -export * from "./types.ts"; -export * from "./token_bucket.ts"; export * from "./fixed_window.ts"; -export * from "./sliding_window.ts"; +export * from "./memory_store.ts"; export * from "./rate_limiter.ts"; +export * from "./redis_store.ts"; +export * from "./sliding_window.ts"; export * from "./store_types.ts"; -export * from "./memory_store.ts"; +export * from "./token_bucket.ts"; +export * from "./types.ts"; diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index 8484bd01ec3c..fdb697852ad3 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -95,11 +95,17 @@ export interface KeyedRateLimiter extends AsyncDisposable { * Check the current state for a key without consuming any permits. * Useful for displaying remaining quota in UI or headers without * affecting the count. + * + * @param key Identifier for the rate limit subject (user ID, IP, etc.). + * @param options Override cost per request. + * @returns A {@linkcode RateLimitResult} with the current state and metadata. */ peek(key: string, options?: CostOptions): Promise; /** * Reset all state for a key, restoring it to full capacity. + * + * @param key Identifier for the rate limit subject (user ID, IP, etc.). */ reset(key: string): Promise; } diff --git a/rate_limit/sliding_window.ts b/rate_limit/sliding_window.ts index 90521ff7aa1d..b79ea3766826 100644 --- a/rate_limit/sliding_window.ts +++ b/rate_limit/sliding_window.ts @@ -17,7 +17,7 @@ import { */ export interface SlidingWindowOptions extends QueueOptions { /** Maximum permits across the sliding window. */ - permitLimit: number; + limit: number; /** Total window duration in milliseconds. */ window: number; /** @@ -58,7 +58,7 @@ export interface SlidingWindowOptions extends QueueOptions { * import { assert } from "@std/assert"; * * using limiter = createSlidingWindow({ - * permitLimit: 100, + * limit: 100, * window: 60_000, * segmentsPerWindow: 6, * }); @@ -72,7 +72,7 @@ export interface SlidingWindowOptions extends QueueOptions { * import { createSlidingWindow } from "@std/rate-limit/sliding-window"; * * using limiter = createSlidingWindow({ - * permitLimit: 100, + * limit: 100, * window: 60_000, * segmentsPerWindow: 6, * autoReplenishment: false, @@ -88,7 +88,7 @@ export function createSlidingWindow( options: SlidingWindowOptions, ): ReplenishingRateLimiter { const context = "sliding window"; - assertPositiveInteger(context, "permitLimit", options.permitLimit); + assertPositiveInteger(context, "limit", options.limit); assertPositiveFinite(context, "window", options.window); if ( !Number.isInteger(options.segmentsPerWindow) || @@ -105,10 +105,10 @@ export function createSlidingWindow( } assertNonNegativeInteger(context, "queueLimit", options.queueLimit); - const { permitLimit, segmentsPerWindow, window } = options; + const { limit, segmentsPerWindow, window } = options; const clock = options.clock ?? Date.now; const segmentDuration = window / segmentsPerWindow; - const ops = createSlidingWindowOps(permitLimit, window, segmentsPerWindow); + const ops = createSlidingWindowOps(limit, window, segmentsPerWindow); const state = ops.create(clock()); let lastNow = 0; diff --git a/rate_limit/sliding_window_test.ts b/rate_limit/sliding_window_test.ts index b074ab5183ca..e0e7f1a525c0 100644 --- a/rate_limit/sliding_window_test.ts +++ b/rate_limit/sliding_window_test.ts @@ -12,36 +12,36 @@ import { createSlidingWindow } from "./sliding_window.ts"; // --- Factory validation --- -Deno.test("createSlidingWindow() throws for invalid permitLimit", () => { +Deno.test("createSlidingWindow() throws for invalid limit", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 0, + limit: 0, window: 1000, segmentsPerWindow: 2, }), RangeError, - "permitLimit", + "limit", ); assertThrows( () => createSlidingWindow({ - permitLimit: -1, + limit: -1, window: 1000, segmentsPerWindow: 2, }), RangeError, - "permitLimit", + "limit", ); assertThrows( () => createSlidingWindow({ - permitLimit: 1.5, + limit: 1.5, window: 1000, segmentsPerWindow: 2, }), RangeError, - "permitLimit", + "limit", ); }); @@ -49,7 +49,7 @@ Deno.test("createSlidingWindow() throws for invalid window", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 0, segmentsPerWindow: 2, }), @@ -59,7 +59,7 @@ Deno.test("createSlidingWindow() throws for invalid window", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: -100, segmentsPerWindow: 2, }), @@ -72,7 +72,7 @@ Deno.test("createSlidingWindow() throws for invalid segmentsPerWindow", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 1000, segmentsPerWindow: 1, }), @@ -82,7 +82,7 @@ Deno.test("createSlidingWindow() throws for invalid segmentsPerWindow", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 1000, segmentsPerWindow: 0, }), @@ -92,7 +92,7 @@ Deno.test("createSlidingWindow() throws for invalid segmentsPerWindow", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 1000, segmentsPerWindow: 1.5, }), @@ -105,7 +105,7 @@ Deno.test("createSlidingWindow() throws when window is not divisible by segments assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 1000, segmentsPerWindow: 3, }), @@ -118,7 +118,7 @@ Deno.test("createSlidingWindow() throws for invalid queueLimit", () => { assertThrows( () => createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 1000, segmentsPerWindow: 2, queueLimit: -1, @@ -133,7 +133,7 @@ Deno.test("createSlidingWindow() throws for invalid queueLimit", () => { Deno.test("tryAcquire() succeeds within the permit limit", () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 3, + limit: 3, window: 1000, segmentsPerWindow: 2, }); @@ -148,7 +148,7 @@ Deno.test("tryAcquire() succeeds within the permit limit", () => { Deno.test("tryAcquire() acquires multiple permits at once", () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -162,7 +162,7 @@ Deno.test("tryAcquire() acquires multiple permits at once", () => { Deno.test("tryAcquire() rejects with retryAfter equal to segment duration", () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 4, }); @@ -177,7 +177,7 @@ Deno.test("tryAcquire() rejects with retryAfter equal to segment duration", () = Deno.test("tryAcquire() throws for invalid permits", () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -188,10 +188,10 @@ Deno.test("tryAcquire() throws for invalid permits", () => { assertThrows(() => limiter.tryAcquire(1.5), RangeError); }); -Deno.test("tryAcquire() throws when permits exceed permitLimit", () => { +Deno.test("tryAcquire() throws when permits exceed limit", () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -206,7 +206,7 @@ Deno.test("permits consumed in segment 0 free after N segment rotations", () => using time = new FakeTime(0); // 4 segments, each 250ms. Full window = 1000ms. using limiter = createSlidingWindow({ - permitLimit: 4, + limit: 4, window: 1000, segmentsPerWindow: 4, }); @@ -236,7 +236,7 @@ Deno.test("sliding window prevents boundary burst that fixed window allows", () using time = new FakeTime(0); // 2 segments of 500ms each, limit 10. using limiter = createSlidingWindow({ - permitLimit: 10, + limit: 10, window: 1000, segmentsPerWindow: 2, }); @@ -261,7 +261,7 @@ Deno.test("permits spread across segments free incrementally", () => { using time = new FakeTime(0); // 3 segments of 100ms each, limit 6. using limiter = createSlidingWindow({ - permitLimit: 6, + limit: 6, window: 300, segmentsPerWindow: 3, }); @@ -292,7 +292,7 @@ Deno.test("permits spread across segments free incrementally", () => { Deno.test("replenish() throws when autoReplenishment is true", () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -307,7 +307,7 @@ Deno.test("replenish() throws when autoReplenishment is true", () => { Deno.test("replenish() rotates a segment when autoReplenishment is false", () => { using limiter = createSlidingWindow({ - permitLimit: 4, + limit: 4, window: 1000, segmentsPerWindow: 4, autoReplenishment: false, @@ -332,7 +332,7 @@ Deno.test("replenish() rotates a segment when autoReplenishment is false", () => Deno.test("acquire() resolves immediately when permits available", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -345,7 +345,7 @@ Deno.test("acquire() resolves immediately when permits available", async () => { Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 2, queueLimit: 0, @@ -362,7 +362,7 @@ Deno.test("acquire() queues and resolves after segment rotation frees capacity", using time = new FakeTime(0); // 2 segments of 500ms, limit 1 using limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 2, queueLimit: 5, @@ -394,7 +394,7 @@ Deno.test("acquire() queues and resolves after segment rotation frees capacity", Deno.test("acquire() rejects when aborted via signal", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 2, queueLimit: 5, @@ -413,7 +413,7 @@ Deno.test("acquire() rejects when aborted via signal", async () => { Deno.test("acquire() rejects when signal is already aborted", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 2, queueLimit: 5, @@ -433,7 +433,7 @@ Deno.test("acquire() rejects when signal is already aborted", async () => { Deno.test("dispose resolves queued waiters with rejected leases", async () => { using time = new FakeTime(0); const limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 2, queueLimit: 5, @@ -452,7 +452,7 @@ Deno.test("dispose resolves queued waiters with rejected leases", async () => { Deno.test("tryAcquire() returns rejected lease after disposal", () => { using time = new FakeTime(0); const limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -466,7 +466,7 @@ Deno.test("tryAcquire() returns rejected lease after disposal", () => { Deno.test("acquire() rejects after disposal", async () => { using time = new FakeTime(0); const limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -482,7 +482,7 @@ Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { using time = new FakeTime(0); // 2 segments of 500ms, limit 1. using limiter = createSlidingWindow({ - permitLimit: 1, + limit: 1, window: 1000, segmentsPerWindow: 2, queueLimit: 10, @@ -514,7 +514,7 @@ Deno.test("newest-first queue resolves newest waiter first", async () => { using time = new FakeTime(0); // 4 segments of 250ms, limit 2. Two permits available at start. using limiter = createSlidingWindow({ - permitLimit: 2, + limit: 2, window: 1000, segmentsPerWindow: 4, queueLimit: 10, @@ -547,7 +547,7 @@ Deno.test("newest-first queue evicts oldest waiter when queue is full", async () using time = new FakeTime(0); // 4 segments of 250ms, limit 3, queue holds 2 using limiter = createSlidingWindow({ - permitLimit: 3, + limit: 3, window: 1000, segmentsPerWindow: 4, queueLimit: 2, @@ -590,7 +590,7 @@ Deno.test("newest-first queue evicts oldest waiter when queue is full", async () Deno.test("acquire() rejects for invalid permits", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -601,10 +601,10 @@ Deno.test("acquire() rejects for invalid permits", async () => { await assertRejects(() => limiter.acquire(1.5), RangeError); }); -Deno.test("acquire() rejects when permits exceed permitLimit", async () => { +Deno.test("acquire() rejects when permits exceed limit", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); @@ -619,7 +619,7 @@ Deno.test("single replenishment resolves multiple queued waiters", async () => { using time = new FakeTime(0); // 2 segments of 500ms, limit 3. using limiter = createSlidingWindow({ - permitLimit: 3, + limit: 3, window: 1000, segmentsPerWindow: 2, queueLimit: 10, @@ -659,7 +659,7 @@ Deno.test("single replenishment resolves multiple queued waiters", async () => { Deno.test("acquire() rejects when permits exceed queueLimit even if queue is empty", async () => { using time = new FakeTime(0); using limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, queueLimit: 2, @@ -678,7 +678,7 @@ Deno.test("acquire() rejects when permits exceed queueLimit even if queue is emp Deno.test("double dispose is a no-op", () => { using time = new FakeTime(0); const limiter = createSlidingWindow({ - permitLimit: 5, + limit: 5, window: 1000, segmentsPerWindow: 2, }); diff --git a/rate_limit/store_types.ts b/rate_limit/store_types.ts index 0d543411e09b..d9a91a9adb21 100644 --- a/rate_limit/store_types.ts +++ b/rate_limit/store_types.ts @@ -73,6 +73,7 @@ export interface RateLimitStore extends AsyncDisposable { * Reset all state for a key, restoring it to full capacity. * * @param key Identifier for the rate limit subject. + * @returns Resolves when the key has been reset. */ reset(key: string): Promise; } diff --git a/rate_limit/token_bucket.ts b/rate_limit/token_bucket.ts index d99e31e8149b..ee0819cfae1f 100644 --- a/rate_limit/token_bucket.ts +++ b/rate_limit/token_bucket.ts @@ -17,7 +17,7 @@ import { */ export interface TokenBucketOptions extends QueueOptions { /** Maximum tokens the bucket can hold. */ - tokenLimit: number; + limit: number; /** Tokens added each replenishment period. */ tokensPerPeriod: number; /** Replenishment interval in milliseconds. */ @@ -52,7 +52,7 @@ export interface TokenBucketOptions extends QueueOptions { * import { assert } from "@std/assert"; * * using limiter = createTokenBucket({ - * tokenLimit: 10, + * limit: 10, * tokensPerPeriod: 1, * replenishmentPeriod: 1000, * }); @@ -66,7 +66,7 @@ export interface TokenBucketOptions extends QueueOptions { * import { createTokenBucket } from "@std/rate-limit/token-bucket"; * * using limiter = createTokenBucket({ - * tokenLimit: 10, + * limit: 10, * tokensPerPeriod: 5, * replenishmentPeriod: 1000, * autoReplenishment: false, @@ -82,24 +82,24 @@ export function createTokenBucket( options: TokenBucketOptions, ): ReplenishingRateLimiter { const context = "token bucket"; - assertPositiveInteger(context, "tokenLimit", options.tokenLimit); + assertPositiveInteger(context, "limit", options.limit); assertPositiveInteger(context, "tokensPerPeriod", options.tokensPerPeriod); assertPositiveFinite( context, "replenishmentPeriod", options.replenishmentPeriod, ); - if (options.tokensPerPeriod > options.tokenLimit) { + if (options.tokensPerPeriod > options.limit) { throw new RangeError( - `Cannot create token bucket: 'tokensPerPeriod' (${options.tokensPerPeriod}) exceeds 'tokenLimit' (${options.tokenLimit})`, + `Cannot create token bucket: 'tokensPerPeriod' (${options.tokensPerPeriod}) exceeds 'limit' (${options.limit})`, ); } assertNonNegativeInteger(context, "queueLimit", options.queueLimit); - const { tokenLimit, tokensPerPeriod, replenishmentPeriod } = options; + const { limit, tokensPerPeriod, replenishmentPeriod } = options; const clock = options.clock ?? Date.now; const ops = createTokenBucketOps( - tokenLimit, + limit, replenishmentPeriod, tokensPerPeriod, ); diff --git a/rate_limit/token_bucket_test.ts b/rate_limit/token_bucket_test.ts index 16a6ce61e6e6..18f58cc0c09f 100644 --- a/rate_limit/token_bucket_test.ts +++ b/rate_limit/token_bucket_test.ts @@ -12,36 +12,36 @@ import { createTokenBucket } from "./token_bucket.ts"; // --- Factory validation --- -Deno.test("createTokenBucket() throws for invalid tokenLimit", () => { +Deno.test("createTokenBucket() throws for invalid limit", () => { assertThrows( () => createTokenBucket({ - tokenLimit: 0, + limit: 0, tokensPerPeriod: 1, replenishmentPeriod: 1000, }), RangeError, - "tokenLimit", + "limit", ); assertThrows( () => createTokenBucket({ - tokenLimit: -1, + limit: -1, tokensPerPeriod: 1, replenishmentPeriod: 1000, }), RangeError, - "tokenLimit", + "limit", ); assertThrows( () => createTokenBucket({ - tokenLimit: 1.5, + limit: 1.5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }), RangeError, - "tokenLimit", + "limit", ); }); @@ -49,7 +49,7 @@ Deno.test("createTokenBucket() throws for invalid tokensPerPeriod", () => { assertThrows( () => createTokenBucket({ - tokenLimit: 10, + limit: 10, tokensPerPeriod: 0, replenishmentPeriod: 1000, }), @@ -62,7 +62,7 @@ Deno.test("createTokenBucket() throws for invalid replenishmentPeriod", () => { assertThrows( () => createTokenBucket({ - tokenLimit: 10, + limit: 10, tokensPerPeriod: 1, replenishmentPeriod: 0, }), @@ -72,7 +72,7 @@ Deno.test("createTokenBucket() throws for invalid replenishmentPeriod", () => { assertThrows( () => createTokenBucket({ - tokenLimit: 10, + limit: 10, tokensPerPeriod: 1, replenishmentPeriod: -100, }), @@ -81,11 +81,11 @@ Deno.test("createTokenBucket() throws for invalid replenishmentPeriod", () => { ); }); -Deno.test("createTokenBucket() throws when tokensPerPeriod exceeds tokenLimit", () => { +Deno.test("createTokenBucket() throws when tokensPerPeriod exceeds limit", () => { assertThrows( () => createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 10, replenishmentPeriod: 1000, }), @@ -98,7 +98,7 @@ Deno.test("createTokenBucket() throws for invalid queueLimit", () => { assertThrows( () => createTokenBucket({ - tokenLimit: 10, + limit: 10, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: -1, @@ -113,7 +113,7 @@ Deno.test("createTokenBucket() throws for invalid queueLimit", () => { Deno.test("tryAcquire() succeeds when tokens are available", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -126,7 +126,7 @@ Deno.test("tryAcquire() succeeds when tokens are available", () => { Deno.test("tryAcquire() acquires multiple permits", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -142,7 +142,7 @@ Deno.test("tryAcquire() acquires multiple permits", () => { Deno.test("tryAcquire() returns rejected lease when tokens exhausted", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -160,7 +160,7 @@ Deno.test("tryAcquire() returns rejected lease when tokens exhausted", () => { Deno.test("tryAcquire() throws for invalid permits", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -171,10 +171,10 @@ Deno.test("tryAcquire() throws for invalid permits", () => { assertThrows(() => limiter.tryAcquire(1.5), RangeError); }); -Deno.test("tryAcquire() throws when permits exceed tokenLimit", () => { +Deno.test("tryAcquire() throws when permits exceed limit", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -188,7 +188,7 @@ Deno.test("tryAcquire() throws when permits exceed tokenLimit", () => { Deno.test("tokens replenish after the configured period", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 2, + limit: 2, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -201,10 +201,10 @@ Deno.test("tokens replenish after the configured period", () => { assert(limiter.tryAcquire().acquired); }); -Deno.test("tokens do not exceed tokenLimit after replenishment", () => { +Deno.test("tokens do not exceed limit after replenishment", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 2, + limit: 2, tokensPerPeriod: 2, replenishmentPeriod: 1000, }); @@ -220,7 +220,7 @@ Deno.test("tokens do not exceed tokenLimit after replenishment", () => { Deno.test("replenish() throws when autoReplenishment is true", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -235,7 +235,7 @@ Deno.test("replenish() throws when autoReplenishment is true", () => { Deno.test("replenish() replenishes when autoReplenishment is false", () => { const limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 2, replenishmentPeriod: 1000, autoReplenishment: false, @@ -254,7 +254,7 @@ Deno.test("replenish() replenishes when autoReplenishment is false", () => { Deno.test("replenish() drains queued acquire() waiters", async () => { const limiter = createTokenBucket({ - tokenLimit: 2, + limit: 2, tokensPerPeriod: 2, replenishmentPeriod: 1000, autoReplenishment: false, @@ -285,7 +285,7 @@ Deno.test("replenish() drains queued acquire() waiters", async () => { Deno.test("acquire() resolves immediately when tokens available", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -298,7 +298,7 @@ Deno.test("acquire() resolves immediately when tokens available", async () => { Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 0, @@ -314,7 +314,7 @@ Deno.test("acquire() returns rejected lease when queue limit is 0", async () => Deno.test("acquire() queues and resolves after replenishment", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 5, @@ -340,7 +340,7 @@ Deno.test("acquire() queues and resolves after replenishment", async () => { Deno.test("acquire() rejects when aborted via signal", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 5, @@ -359,7 +359,7 @@ Deno.test("acquire() rejects when aborted via signal", async () => { Deno.test("acquire() rejects when signal is already aborted", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 5, @@ -379,7 +379,7 @@ Deno.test("acquire() rejects when signal is already aborted", async () => { Deno.test("retryAfter reflects the deficit in tokens", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 10, + limit: 10, tokensPerPeriod: 2, replenishmentPeriod: 500, }); @@ -397,7 +397,7 @@ Deno.test("retryAfter reflects the deficit in tokens", () => { Deno.test("dispose resolves queued waiters with rejected leases", async () => { using time = new FakeTime(0); const limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 5, @@ -416,7 +416,7 @@ Deno.test("dispose resolves queued waiters with rejected leases", async () => { Deno.test("tryAcquire() returns rejected lease after disposal", () => { using time = new FakeTime(0); const limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -430,7 +430,7 @@ Deno.test("tryAcquire() returns rejected lease after disposal", () => { Deno.test("acquire() rejects after disposal", async () => { using time = new FakeTime(0); const limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -445,7 +445,7 @@ Deno.test("acquire() rejects after disposal", async () => { Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 10, @@ -475,7 +475,7 @@ Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { Deno.test("newest-first queue resolves newest waiter first", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 10, @@ -507,7 +507,7 @@ Deno.test("newest-first queue resolves newest waiter first", async () => { Deno.test("acquire() queues multi-permit waiter spanning multiple periods", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 3, + limit: 3, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 10, @@ -543,7 +543,7 @@ Deno.test("acquire() queues multi-permit waiter spanning multiple periods", asyn Deno.test("single replenishment resolves multiple queued waiters", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 5, replenishmentPeriod: 1000, queueLimit: 10, @@ -582,7 +582,7 @@ Deno.test("single replenishment resolves multiple queued waiters", async () => { Deno.test("acquire() rejects for invalid permits", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -593,10 +593,10 @@ Deno.test("acquire() rejects for invalid permits", async () => { await assertRejects(() => limiter.acquire(1.5), RangeError); }); -Deno.test("acquire() rejects when permits exceed tokenLimit", async () => { +Deno.test("acquire() rejects when permits exceed limit", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -610,7 +610,7 @@ Deno.test("acquire() rejects when permits exceed tokenLimit", async () => { Deno.test("acquire() rejects when permits exceed queueLimit even if queue is empty", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 2, @@ -627,7 +627,7 @@ Deno.test("acquire() rejects when permits exceed queueLimit even if queue is emp Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 1, + limit: 1, tokensPerPeriod: 1, replenishmentPeriod: 1000, queueLimit: 1, @@ -658,7 +658,7 @@ Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () Deno.test("eviction evicts multiple waiters to make room for a large request", async () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 3, + limit: 3, tokensPerPeriod: 3, replenishmentPeriod: 1000, queueLimit: 3, @@ -711,7 +711,7 @@ Deno.test("eviction evicts multiple waiters to make room for a large request", a Deno.test("retryAfter is correct after manual replenish", () => { const limiter = createTokenBucket({ - tokenLimit: 3, + limit: 3, tokensPerPeriod: 1, replenishmentPeriod: 1000, autoReplenishment: false, @@ -734,7 +734,7 @@ Deno.test("retryAfter is correct after manual replenish", () => { Deno.test("remaining uses floor when tokens are at integer boundary", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); @@ -750,7 +750,7 @@ Deno.test("remaining uses floor when tokens are at integer boundary", () => { Deno.test("tryAcquire() denied at exact token boundary after partial refill", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 10, + limit: 10, tokensPerPeriod: 3, replenishmentPeriod: 1000, }); @@ -774,7 +774,7 @@ Deno.test("tryAcquire() denied at exact token boundary after partial refill", () Deno.test("retryAfter is correct with non-power-of-two tokensPerPeriod", () => { using time = new FakeTime(0); using limiter = createTokenBucket({ - tokenLimit: 7, + limit: 7, tokensPerPeriod: 3, replenishmentPeriod: 1000, }); @@ -792,7 +792,7 @@ Deno.test("retryAfter is correct with non-power-of-two tokensPerPeriod", () => { Deno.test("double dispose is a no-op", () => { using time = new FakeTime(0); const limiter = createTokenBucket({ - tokenLimit: 5, + limit: 5, tokensPerPeriod: 1, replenishmentPeriod: 1000, }); diff --git a/rate_limit/types.ts b/rate_limit/types.ts index 42f0356d9ac5..796d497c7caf 100644 --- a/rate_limit/types.ts +++ b/rate_limit/types.ts @@ -39,7 +39,12 @@ * @see {@linkcode createSlidingWindow} for sliding window rate limiting. */ export interface RateLimiter extends Disposable { - /** Try to acquire permits synchronously. Never blocks. */ + /** + * Try to acquire permits synchronously. Never blocks. + * + * @param permits Number of permits to acquire. Defaults to `1`. + * @returns A {@linkcode RateLimitLease} indicating success or rejection. + */ tryAcquire(permits?: number): RateLimitLease; /** @@ -53,6 +58,10 @@ export interface RateLimiter extends Disposable { * rejection) so they can be handled uniformly via the `acquired` field. * * Rejects with {@linkcode DOMException} if the signal is aborted. + * + * @param permits Number of permits to acquire. Defaults to `1`. + * @param options Acquire options (e.g. abort signal). + * @returns A {@linkcode RateLimitLease} indicating success or rejection. */ acquire( permits?: number, @@ -99,7 +108,7 @@ export interface AcquireOptions { * import { createTokenBucket } from "@std/rate-limit/token-bucket"; * * using limiter = createTokenBucket({ - * tokenLimit: 10, + * limit: 10, * tokensPerPeriod: 1, * replenishmentPeriod: 1000, * }); From b3f1c342599aef11810801e716ab661218ab92e5 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Tue, 31 Mar 2026 09:06:11 +0200 Subject: [PATCH 12/15] fix leak --- rate_limit/redis_store.ts | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/rate_limit/redis_store.ts b/rate_limit/redis_store.ts index 40886052d7a1..ca04dcb162ed 100644 --- a/rate_limit/redis_store.ts +++ b/rate_limit/redis_store.ts @@ -190,17 +190,22 @@ export function createRedisStore( let consumeScript: CachedScript | undefined; let peekScript: CachedScript | undefined; let deleteScript: CachedScript | undefined; + let initScripts: Promise | undefined; - const initScripts = (async () => { - const [consumeSha, peekSha, deleteSha] = await Promise.all([ - sha1Hex(scripts.consume), - sha1Hex(scripts.peek), - sha1Hex(LUA_DELETE_KEY), - ]); - consumeScript = { source: scripts.consume, sha: consumeSha }; - peekScript = { source: scripts.peek, sha: peekSha }; - deleteScript = { source: LUA_DELETE_KEY, sha: deleteSha }; - })(); + function ensureScripts(): Promise { + if (initScripts) return initScripts; + initScripts = (async () => { + const [consumeSha, peekSha, deleteSha] = await Promise.all([ + sha1Hex(scripts.consume), + sha1Hex(scripts.peek), + sha1Hex(LUA_DELETE_KEY), + ]); + consumeScript = { source: scripts.consume, sha: consumeSha }; + peekScript = { source: scripts.peek, sha: peekSha }; + deleteScript = { source: LUA_DELETE_KEY, sha: deleteSha }; + })(); + return initScripts; + } function redisKey(key: string): string { return `${prefix}:${key}`; @@ -227,7 +232,7 @@ export function createRedisStore( return windowMs; }, async consume(key: string, cost: number): Promise { - await initScripts; + await ensureScripts(); const args = [...baseArgs]; args[2] = String(cost); const raw = await runScript( @@ -239,7 +244,7 @@ export function createRedisStore( return parseResult(raw, limit); }, async peek(key: string, cost: number): Promise { - await initScripts; + await ensureScripts(); const args = [...baseArgs]; args[2] = String(cost); const raw = await runScript( @@ -251,7 +256,7 @@ export function createRedisStore( return parseResult(raw, limit); }, async reset(key: string): Promise { - await initScripts; + await ensureScripts(); await runScript(redis, deleteScript!, [redisKey(key)], []); }, [Symbol.asyncDispose](): Promise { From 02f92b83a5d0b74fed49a8b637d38f01702eeafa Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Tue, 31 Mar 2026 09:09:26 +0200 Subject: [PATCH 13/15] use encodeHex --- rate_limit/_redis_scripts.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/rate_limit/_redis_scripts.ts b/rate_limit/_redis_scripts.ts index a6324b747786..386ace861ea4 100644 --- a/rate_limit/_redis_scripts.ts +++ b/rate_limit/_redis_scripts.ts @@ -1,5 +1,6 @@ // Copyright 2018-2026 the Deno authors. MIT license. +import { encodeHex } from "@std/encoding/hex"; import type { RateLimitResult } from "./rate_limiter.ts"; import type { RedisConnection, RedisEvalConnection } from "./redis_store.ts"; @@ -336,9 +337,7 @@ export function getScripts(algorithm: string): LuaScriptPair { export async function sha1Hex(text: string): Promise { const data = new TextEncoder().encode(text); const hash = await crypto.subtle.digest("SHA-1", data); - return [...new Uint8Array(hash)] - .map((b) => b.toString(16).padStart(2, "0")) - .join(""); + return encodeHex(hash); } export interface CachedScript { From 347287709af546fbc9c01080af590e1df4b0d487 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Tue, 31 Mar 2026 10:28:21 +0200 Subject: [PATCH 14/15] fix browser compatible comments --- rate_limit/_replenishing_limiter.ts | 1 - rate_limit/fixed_window.ts | 1 - rate_limit/memory_store.ts | 1 - rate_limit/rate_limiter.ts | 1 - rate_limit/sliding_window.ts | 1 - rate_limit/token_bucket.ts | 1 - 6 files changed, 6 deletions(-) diff --git a/rate_limit/_replenishing_limiter.ts b/rate_limit/_replenishing_limiter.ts index 2af664a338fe..b35d35c46238 100644 --- a/rate_limit/_replenishing_limiter.ts +++ b/rate_limit/_replenishing_limiter.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. import type { AcquiredLease, diff --git a/rate_limit/fixed_window.ts b/rate_limit/fixed_window.ts index 76caa06029f3..47953e2f64f9 100644 --- a/rate_limit/fixed_window.ts +++ b/rate_limit/fixed_window.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; diff --git a/rate_limit/memory_store.ts b/rate_limit/memory_store.ts index 1e99ab3d9ecb..63fdeeb054c0 100644 --- a/rate_limit/memory_store.ts +++ b/rate_limit/memory_store.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. import type { RateLimitResult } from "./rate_limiter.ts"; import type { AlgorithmOptions, RateLimitStore } from "./store_types.ts"; diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index fdb697852ad3..6bb8894704dc 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. import type { MemoryStoreOptions } from "./memory_store.ts"; import { createMemoryStore } from "./memory_store.ts"; diff --git a/rate_limit/sliding_window.ts b/rate_limit/sliding_window.ts index b79ea3766826..ad09ba780d19 100644 --- a/rate_limit/sliding_window.ts +++ b/rate_limit/sliding_window.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; diff --git a/rate_limit/token_bucket.ts b/rate_limit/token_bucket.ts index ee0819cfae1f..9470f7e8f9ff 100644 --- a/rate_limit/token_bucket.ts +++ b/rate_limit/token_bucket.ts @@ -1,5 +1,4 @@ // Copyright 2018-2026 the Deno authors. MIT license. -// This module is browser compatible. import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; From 53d22fbfe814d36c2da90396691424d7a4e254b3 Mon Sep 17 00:00:00 2001 From: Tomas Zijdemans Date: Tue, 31 Mar 2026 10:47:57 +0200 Subject: [PATCH 15/15] fix browser compat, take 2 --- rate_limit/_redis_scripts.ts | 2 +- rate_limit/memory_store.ts | 7 +++++-- rate_limit/rate_limiter.ts | 37 +++--------------------------------- rate_limit/redis_store.ts | 7 +++++-- rate_limit/store_types.ts | 33 +++++++++++++++++++++++++++++++- 5 files changed, 46 insertions(+), 40 deletions(-) diff --git a/rate_limit/_redis_scripts.ts b/rate_limit/_redis_scripts.ts index 386ace861ea4..15ad083facea 100644 --- a/rate_limit/_redis_scripts.ts +++ b/rate_limit/_redis_scripts.ts @@ -1,7 +1,7 @@ // Copyright 2018-2026 the Deno authors. MIT license. import { encodeHex } from "@std/encoding/hex"; -import type { RateLimitResult } from "./rate_limiter.ts"; +import type { RateLimitResult } from "./store_types.ts"; import type { RedisConnection, RedisEvalConnection } from "./redis_store.ts"; // --- Lua scripts --- diff --git a/rate_limit/memory_store.ts b/rate_limit/memory_store.ts index 63fdeeb054c0..57f0f654bd55 100644 --- a/rate_limit/memory_store.ts +++ b/rate_limit/memory_store.ts @@ -1,7 +1,10 @@ // Copyright 2018-2026 the Deno authors. MIT license. -import type { RateLimitResult } from "./rate_limiter.ts"; -import type { AlgorithmOptions, RateLimitStore } from "./store_types.ts"; +import type { + AlgorithmOptions, + RateLimitResult, + RateLimitStore, +} from "./store_types.ts"; import { assertNonNegativeInteger, assertPositiveFinite, diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts index 6bb8894704dc..2194dd28be45 100644 --- a/rate_limit/rate_limiter.ts +++ b/rate_limit/rate_limiter.ts @@ -2,7 +2,9 @@ import type { MemoryStoreOptions } from "./memory_store.ts"; import { createMemoryStore } from "./memory_store.ts"; -import type { RateLimitStore } from "./store_types.ts"; +import type { RateLimitResult, RateLimitStore } from "./store_types.ts"; + +export type { RateLimitResult } from "./store_types.ts"; /** * Options for {@linkcode KeyedRateLimiter.limit} and @@ -31,39 +33,6 @@ export interface CostOptions { cost?: number; } -/** - * The result of a rate limit check. All fields are present regardless of - * whether the request was allowed. - * - * @experimental **UNSTABLE**: New API, yet to be vetted. - */ -export interface RateLimitResult { - /** Whether the request is allowed. */ - readonly ok: boolean; - /** Best-effort estimate of remaining permits for this key. */ - readonly remaining: number; - /** - * Timestamp (milliseconds since epoch) of the next replenishment event - * (segment rotation, window boundary, or refill cycle). This is *not* - * necessarily when full capacity is restored — for sliding-window and - * token-bucket it may take multiple replenishment cycles. For GCRA this - * is the theoretical arrival time (TAT) at which full burst capacity is - * restored. Useful for the `X-RateLimit-Reset` HTTP header. - */ - readonly resetAt: number; - /** - * Minimum retry delay in milliseconds. `0` when the request is allowed. - * This is the earliest point at which capacity *may* free up. For - * sliding-window, this reflects the next segment rotation and may not - * free enough permits for a high-cost request. For token-bucket and GCRA - * the value accounts for the requested cost. Useful for the - * `Retry-After` HTTP header. - */ - readonly retryAfter: number; - /** The limit configured for this limiter. */ - readonly limit: number; -} - /** * A keyed rate limiter that manages per-key state internally. This is the * primary rate limiting API for the common case of "allow key X at most N diff --git a/rate_limit/redis_store.ts b/rate_limit/redis_store.ts index ca04dcb162ed..e50e8c35ecd5 100644 --- a/rate_limit/redis_store.ts +++ b/rate_limit/redis_store.ts @@ -31,8 +31,11 @@ * @module */ -import type { RateLimitResult } from "./rate_limiter.ts"; -import type { AlgorithmOptions, RateLimitStore } from "./store_types.ts"; +import type { + AlgorithmOptions, + RateLimitResult, + RateLimitStore, +} from "./store_types.ts"; import { assertPositiveFinite, assertPositiveInteger } from "./_validation.ts"; import { type CachedScript, diff --git a/rate_limit/store_types.ts b/rate_limit/store_types.ts index d9a91a9adb21..47423438c266 100644 --- a/rate_limit/store_types.ts +++ b/rate_limit/store_types.ts @@ -1,7 +1,38 @@ // Copyright 2018-2026 the Deno authors. MIT license. // This module is browser compatible. -import type { RateLimitResult } from "./rate_limiter.ts"; +/** + * The result of a rate limit check. All fields are present regardless of + * whether the request was allowed. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RateLimitResult { + /** Whether the request is allowed. */ + readonly ok: boolean; + /** Best-effort estimate of remaining permits for this key. */ + readonly remaining: number; + /** + * Timestamp (milliseconds since epoch) of the next replenishment event + * (segment rotation, window boundary, or refill cycle). This is *not* + * necessarily when full capacity is restored — for sliding-window and + * token-bucket it may take multiple replenishment cycles. For GCRA this + * is the theoretical arrival time (TAT) at which full burst capacity is + * restored. Useful for the `X-RateLimit-Reset` HTTP header. + */ + readonly resetAt: number; + /** + * Minimum retry delay in milliseconds. `0` when the request is allowed. + * This is the earliest point at which capacity *may* free up. For + * sliding-window, this reflects the next segment rotation and may not + * free enough permits for a high-cost request. For token-bucket and GCRA + * the value accounts for the requested cost. Useful for the + * `Retry-After` HTTP header. + */ + readonly retryAfter: number; + /** The limit configured for this limiter. */ + readonly limit: number; +} /** * Algorithm configuration shared by all store backends.