diff --git a/.github/labeler.yml b/.github/labeler.yml index 2e8a8a118dda..1c04226a1eb1 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -88,6 +88,9 @@ path: random: - changed-files: - any-glob-to-any-file: random/** +rate-limit: + - changed-files: + - any-glob-to-any-file: rate_limit/** regexp: - changed-files: - any-glob-to-any-file: regexp/** diff --git a/deno.json b/deno.json index 6363398e4685..1d72a1b7eae6 100644 --- a/deno.json +++ b/deno.json @@ -84,6 +84,7 @@ "./net", "./path", "./random", + "./rate_limit", "./regexp", "./semver", "./streams", diff --git a/import_map.json b/import_map.json index 7aa140dea113..64f5a95462c0 100644 --- a/import_map.json +++ b/import_map.json @@ -36,6 +36,7 @@ "@std/path": "jsr:@std/path@^1.1.4", "@std/regexp": "jsr:@std/regexp@^1.0.1", "@std/random": "jsr:@std/random@^0.1.5", + "@std/rate-limit": "jsr:@std/rate-limit@^0.1.0", "@std/semver": "jsr:@std/semver@^1.0.8", "@std/streams": "jsr:@std/streams@^1.0.17", "@std/tar": "jsr:@std/tar@^0.1.10", diff --git a/rate_limit/_algorithms.ts b/rate_limit/_algorithms.ts new file mode 100644 index 000000000000..81d2d9815bd3 --- /dev/null +++ b/rate_limit/_algorithms.ts @@ -0,0 +1,327 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import { RollingCounter } from "@std/data-structures/unstable-rolling-counter"; +import { assertPositiveFinite, assertPositiveInteger } from "./_validation.ts"; + +/** + * Result returned by algorithm operations. All fields are always present + * regardless of whether the request was allowed. + * + * **Metadata semantics vary by algorithm:** + * + * - `retryAfter` is the *minimum* delay before capacity *may* free up. For + * sliding-window this is the time until the next segment rotation, which may + * not free enough permits for a high-cost request. For token-bucket and GCRA + * the value accounts for the requested cost. + * - `resetAt` is the timestamp of the next replenishment event (segment + * rotation, window boundary, or refill cycle). For sliding-window and + * token-bucket this is *not* necessarily when full capacity is restored. + */ +export interface AlgorithmResult { + readonly ok: boolean; + readonly remaining: number; + readonly resetAt: number; + readonly retryAfter: number; + readonly limit: number; +} + +/** + * Pure state machine for a rate limit algorithm. No Map, no timers, no keys. + * Used by both the keyed layer (Map + eviction) and the primitives (queue + + * timer). + */ +export interface AlgorithmOps { + /** Create initial state for a new key or new instance. */ + create(now: number): S; + /** Advance time (rotate segments, refill tokens, reset window). Mutates state. */ + advance(state: S, now: number): void; + /** Try to consume `cost` permits. Returns true and mutates state if allowed. */ + tryConsume(state: S, cost: number, now: number): boolean; + /** Return whether a request of `cost` would be allowed without mutating state. */ + wouldAllow(state: S, cost: number, now: number): boolean; + /** Replenish permits (one timer tick). Mutates state. No-op for algorithms without timer-based replenishment (e.g. GCRA). */ + replenish(state: S): void; + /** Compute result metadata (remaining, resetAt, retryAfter). */ + result(state: S, ok: boolean, cost: number, now: number): AlgorithmResult; + /** Compute the retry delay for a denied request without allocating a result object. */ + computeRetryAfter(state: S, cost: number, now: number): number; + /** The configured permit limit. */ + readonly limit: number; +} + +// --- Fixed Window --- + +/** State for the fixed-window algorithm: count in current window and window start time. */ +export interface FixedWindowState { + count: number; + windowStart: number; +} + +/** + * Creates ops for the fixed-window algorithm. Callers must pass valid parameters. + * + * @param limit Maximum permits per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @returns Algorithm ops for fixed-window rate limiting. + */ +export function createFixedWindowOps( + limit: number, + window: number, +): AlgorithmOps { + const context = "fixed window"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + return { + limit, + create(now) { + return { count: 0, windowStart: now }; + }, + advance(state, now) { + if (now - state.windowStart >= window) { + state.count = 0; + state.windowStart = state.windowStart + + Math.floor((now - state.windowStart) / window) * window; + } + }, + tryConsume(state, cost, _now) { + if (state.count + cost > limit) return false; + state.count += cost; + return true; + }, + wouldAllow(state, cost, _now) { + return state.count + cost <= limit; + }, + replenish(state) { + state.count = 0; + }, + result(state, ok, cost, now) { + return { + ok, + remaining: Math.max(0, limit - state.count), + resetAt: state.windowStart + window, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), + limit, + }; + }, + computeRetryAfter(state, _cost, now) { + return state.windowStart + window - now; + }, + }; +} + +// --- Sliding Window --- + +/** State for the sliding-window algorithm: segment counter and current segment start time. */ +export interface SlidingWindowState { + counter: RollingCounter; + segmentStart: number; +} + +/** + * Creates ops for the sliding-window algorithm. Callers must pass valid parameters. + * + * @param limit Maximum permits per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @param segmentsPerWindow Number of segments. Must be an integer >= 2. + * @returns Algorithm ops for sliding-window rate limiting. + */ +export function createSlidingWindowOps( + limit: number, + window: number, + segmentsPerWindow: number, +): AlgorithmOps { + const context = "sliding window"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + if (!Number.isInteger(segmentsPerWindow) || segmentsPerWindow < 2) { + throw new RangeError( + `Cannot create ${context}: 'segmentsPerWindow' must be an integer >= 2, received ${segmentsPerWindow}`, + ); + } + if (window % segmentsPerWindow !== 0) { + throw new RangeError( + `Cannot create ${context}: 'window' (${window}) must be evenly divisible by 'segmentsPerWindow' (${segmentsPerWindow})`, + ); + } + const segmentDuration = window / segmentsPerWindow; + + return { + limit, + create(now) { + return { + counter: new RollingCounter(segmentsPerWindow), + segmentStart: now, + }; + }, + advance(state, now) { + const elapsed = now - state.segmentStart; + if (elapsed >= segmentDuration) { + const rotations = Math.floor(elapsed / segmentDuration); + state.counter.rotate(rotations); + state.segmentStart += rotations * segmentDuration; + } + }, + tryConsume(state, cost, _now) { + if (state.counter.total + cost > limit) return false; + state.counter.increment(cost); + return true; + }, + wouldAllow(state, cost, _now) { + return state.counter.total + cost <= limit; + }, + replenish(state) { + state.counter.rotate(); + state.segmentStart += segmentDuration; + }, + result(state, ok, cost, now) { + return { + ok, + remaining: Math.max(0, limit - state.counter.total), + resetAt: state.segmentStart + segmentDuration, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), + limit, + }; + }, + computeRetryAfter(state, _cost, now) { + return state.segmentStart + segmentDuration - now; + }, + }; +} + +// --- Token Bucket --- + +/** State for the token-bucket algorithm: current tokens and last refill time. */ +export interface TokenBucketState { + tokens: number; + lastRefill: number; +} + +/** + * Creates ops for the token-bucket algorithm. Callers must pass valid parameters. + * + * @param limit Maximum tokens (bucket capacity). Must be a positive integer. + * @param window Refill cycle duration in milliseconds. Must be a positive finite number. + * @param tokensPerPeriod Tokens added per replenishment period. Must be a positive integer. + * @returns Algorithm ops for token-bucket rate limiting. + */ +export function createTokenBucketOps( + limit: number, + window: number, + tokensPerPeriod: number, +): AlgorithmOps { + const context = "token bucket"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); + return { + limit, + create(now) { + return { tokens: limit, lastRefill: now }; + }, + advance(state, now) { + const elapsed = now - state.lastRefill; + if (elapsed >= window) { + const cycles = Math.floor(elapsed / window); + state.tokens = Math.min(limit, state.tokens + cycles * tokensPerPeriod); + state.lastRefill += cycles * window; + } + }, + tryConsume(state, cost, _now) { + if (state.tokens < cost) return false; + state.tokens -= cost; + return true; + }, + wouldAllow(state, cost, _now) { + return state.tokens >= cost; + }, + replenish(state) { + state.tokens = Math.min(limit, state.tokens + tokensPerPeriod); + state.lastRefill += window; + }, + result(state, ok, cost, now) { + const remaining = Math.max(0, Math.floor(state.tokens)); + return { + ok, + remaining, + resetAt: state.lastRefill + window, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), + limit, + }; + }, + computeRetryAfter(state, cost, now) { + const deficit = cost - state.tokens; + const cycles = Math.ceil(deficit / tokensPerPeriod); + return Math.max(0, cycles * window - (now - state.lastRefill)); + }, + }; +} + +// --- GCRA (Generic Cell Rate Algorithm) --- + +/** State for GCRA: theoretical arrival time (tat) of the last request. */ +export interface GcraState { + tat: number; +} + +/** + * Creates ops for the GCRA (Generic Cell Rate Algorithm). Callers must pass valid parameters. + * + * @param limit Maximum permits per window. Must be a positive integer. + * @param window Window (tau) in milliseconds. Must be a positive finite number. + * @returns Algorithm ops for GCRA rate limiting. + */ +export function createGcraOps( + limit: number, + window: number, +): AlgorithmOps { + const context = "gcra"; + assertPositiveInteger(context, "limit", limit); + assertPositiveFinite(context, "window", window); + const emissionInterval = window / limit; + const tau = window; + + function remaining(state: GcraState, now: number): number { + const diff = tau - (state.tat - now); + return Math.min(limit, Math.max(0, Math.floor(diff / emissionInterval))); + } + + return { + limit, + create(now) { + return { tat: now }; + }, + advance(_state, _now) {}, + tryConsume(state: GcraState, cost: number, now: number) { + const allowAt = state.tat - tau; + if (now < allowAt) return false; + const newTat = Math.max(state.tat, now) + emissionInterval * cost; + if (newTat - now > tau) return false; + state.tat = newTat; + return true; + }, + wouldAllow(state: GcraState, cost: number, now: number) { + const allowAt = state.tat - tau; + if (now < allowAt) return false; + const newTat = Math.max(state.tat, now) + emissionInterval * cost; + return newTat - now <= tau; + }, + // No-op: GCRA has no timer-based replenishment. + replenish(_state) {}, + result(state, ok, cost, now) { + return { + ok, + remaining: remaining(state, now), + resetAt: state.tat, + retryAfter: ok ? 0 : this.computeRetryAfter(state, cost, now), + limit, + }; + }, + computeRetryAfter(state, cost, now) { + const allowAt = state.tat - tau; + if (now < allowAt) return allowAt - now; + const newTat = Math.max(state.tat, now) + emissionInterval * cost; + return Math.max(0, newTat - tau - now); + }, + }; +} diff --git a/rate_limit/_keyed_algorithms.ts b/rate_limit/_keyed_algorithms.ts new file mode 100644 index 000000000000..df8400b0ca11 --- /dev/null +++ b/rate_limit/_keyed_algorithms.ts @@ -0,0 +1,198 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +import type { AlgorithmOps, AlgorithmResult } from "./_algorithms.ts"; +import { + createFixedWindowOps, + createGcraOps, + createSlidingWindowOps, + createTokenBucketOps, +} from "./_algorithms.ts"; + +/** Internal interface for per-key algorithm strategies. */ +export interface KeyedAlgorithm { + limit(key: string, cost: number, now: number): AlgorithmResult; + peek(key: string, cost: number, now: number): AlgorithmResult; + has(key: string): boolean; + reset(key: string): void; + readonly size: number; + evict(now: number, ttl: number): void; + clear(): void; +} + +/** Options for {@linkcode createKeyedAlgorithm}. */ +export interface KeyedAlgorithmOptions { + /** + * Maximum number of keys to track. When a new key arrives at capacity, + * the least-recently-used key is evicted. `0` disables the limit. + */ + maxKeys?: number; +} + +/** + * Wraps AlgorithmOps with a Map, LRU eviction, and TTL eviction. + * + * LRU tracking exploits Map's insertion-order guarantee: on `limit()` + * the entry is deleted and re-inserted, keeping the least-recently-used + * key at the front. `peek()` is read-only and does not promote the key, + * so a key that is only peeked can still be evicted. Eviction of the + * LRU entry is O(1). + */ +function createKeyedAlgorithm( + ops: AlgorithmOps, + options?: KeyedAlgorithmOptions, +): KeyedAlgorithm { + const maxKeys = options?.maxKeys ?? 0; + const keys = new Map(); + + /** Move `key` to the back of the Map (most-recently-used position). */ + function touch(key: string, state: S & { lastAccess: number }): void { + keys.delete(key); + keys.set(key, state); + } + + function getOrCreate(key: string, now: number): S & { lastAccess: number } { + let state = keys.get(key); + if (state === undefined) { + if (maxKeys > 0 && keys.size >= maxKeys) { + // The first key in the Map is the LRU entry. + const lruKey = keys.keys().next().value; + if (lruKey !== undefined) keys.delete(lruKey); + } + const base = ops.create(now); + (base as S & { lastAccess: number }).lastAccess = now; + state = base as S & { lastAccess: number }; + keys.set(key, state); + } + return state; + } + + function peekDefault(cost: number, now: number): AlgorithmResult { + return { + ok: cost <= ops.limit, + remaining: ops.limit, + resetAt: now, + retryAfter: 0, + limit: ops.limit, + }; + } + + return { + limit(key, cost, now) { + const state = getOrCreate(key, now); + ops.advance(state, now); + state.lastAccess = now; + if (maxKeys > 0) touch(key, state); + const ok = ops.tryConsume(state, cost, now); + return ops.result(state, ok, cost, now); + }, + // Advances time (segment rotation, token refill) so metadata is + // accurate, but does not consume permits or update lastAccess. + peek(key, cost, now) { + const state = keys.get(key); + if (state === undefined) return peekDefault(cost, now); + ops.advance(state, now); + const ok = ops.wouldAllow(state, cost, now); + return ops.result(state, ok, cost, now); + }, + has(key) { + return keys.has(key); + }, + reset(key) { + keys.delete(key); + }, + get size() { + return keys.size; + }, + evict(now, ttl) { + for (const [k, state] of keys) { + if (now - state.lastAccess > ttl) keys.delete(k); + } + }, + clear() { + keys.clear(); + }, + }; +} + +// --- Fixed Window --- + +/** + * Creates a keyed fixed-window rate limit algorithm. + * + * @param limit Maximum permits per key per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @param options Additional keyed algorithm options. + * @returns A keyed algorithm using fixed-window semantics. + */ +export function createFixedWindowAlgorithm( + limit: number, + window: number, + options?: KeyedAlgorithmOptions, +): KeyedAlgorithm { + return createKeyedAlgorithm(createFixedWindowOps(limit, window), options); +} + +// --- Sliding Window --- + +/** + * Creates a keyed sliding-window rate limit algorithm. + * + * @param limit Maximum permits per key per window. Must be a positive integer. + * @param window Window duration in milliseconds. Must be a positive finite number. + * @param segmentsPerWindow Number of segments per window. Must be an integer >= 2. + * @param options Additional keyed algorithm options. + * @returns A keyed algorithm using sliding-window semantics. + */ +export function createSlidingWindowAlgorithm( + limit: number, + window: number, + segmentsPerWindow: number, + options?: KeyedAlgorithmOptions, +): KeyedAlgorithm { + return createKeyedAlgorithm( + createSlidingWindowOps(limit, window, segmentsPerWindow), + options, + ); +} + +// --- Token Bucket --- + +/** + * Creates a keyed token-bucket rate limit algorithm. + * + * @param limit Bucket capacity (max tokens per key). Must be a positive integer. + * @param window Refill cycle duration in milliseconds. Must be a positive finite number. + * @param tokensPerPeriod Tokens added per replenishment period. Must be a positive integer. + * @param options Additional keyed algorithm options. + * @returns A keyed algorithm using token-bucket semantics. + */ +export function createTokenBucketAlgorithm( + limit: number, + window: number, + tokensPerPeriod: number, + options?: KeyedAlgorithmOptions, +): KeyedAlgorithm { + return createKeyedAlgorithm( + createTokenBucketOps(limit, window, tokensPerPeriod), + options, + ); +} + +// --- GCRA --- + +/** + * Creates a keyed GCRA (Generic Cell Rate Algorithm) rate limit algorithm. + * + * @param limit Maximum permits per key per window. Must be a positive integer. + * @param window Window (tau) in milliseconds. Must be a positive finite number. + * @param options Additional keyed algorithm options. + * @returns A keyed algorithm using GCRA semantics. + */ +export function createGcraAlgorithm( + limit: number, + window: number, + options?: KeyedAlgorithmOptions, +): KeyedAlgorithm { + return createKeyedAlgorithm(createGcraOps(limit, window), options); +} diff --git a/rate_limit/_redis_scripts.ts b/rate_limit/_redis_scripts.ts new file mode 100644 index 000000000000..15ad083facea --- /dev/null +++ b/rate_limit/_redis_scripts.ts @@ -0,0 +1,420 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { encodeHex } from "@std/encoding/hex"; +import type { RateLimitResult } from "./store_types.ts"; +import type { RedisConnection, RedisEvalConnection } from "./redis_store.ts"; + +// --- Lua scripts --- +// Each script returns a flat array: [ok, remaining, resetAt, retryAfter, limit] +// `ok` is 1 for allowed, 0 for denied. +// All timestamps are in milliseconds. +// Scripts use redis.call('TIME') for server-side time. + +// Redis TIME returns [seconds, microseconds]. Convert to milliseconds: +const LUA_NOW = ` +local _t = redis.call('TIME') +local now = tonumber(_t[1]) * 1000 + math.floor(tonumber(_t[2]) / 1000) +`; + +const LUA_FIXED_WINDOW_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) + +local data = redis.call('HMGET', key, 'count', 'windowStart') +local count = tonumber(data[1]) or 0 +local windowStart = tonumber(data[2]) or now + +if now - windowStart >= window then + count = 0 + windowStart = windowStart + math.floor((now - windowStart) / window) * window +end + +local resetAt = windowStart + window +local ok = 0 +if count + cost <= limit then + ok = 1 + count = count + cost + redis.call('HMSET', key, 'count', count, 'windowStart', windowStart) + redis.call('PEXPIRE', key, math.ceil(resetAt - now)) +else + redis.call('HMSET', key, 'count', count, 'windowStart', windowStart) + redis.call('PEXPIRE', key, math.ceil(resetAt - now)) +end + +local remaining = math.max(0, limit - count) +local retryAfter = 0 +if ok == 0 then + retryAfter = resetAt - now +end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_FIXED_WINDOW_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) + +local data = redis.call('HMGET', key, 'count', 'windowStart') +local count = tonumber(data[1]) or 0 +local windowStart = tonumber(data[2]) or now + +if now - windowStart >= window then + count = 0 + windowStart = windowStart + math.floor((now - windowStart) / window) * window +end + +local resetAt = windowStart + window +local ok = 0 +if count + cost <= limit then ok = 1 end +local remaining = math.max(0, limit - count) +local retryAfter = 0 +if ok == 0 then retryAfter = resetAt - now end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +// Sliding window: uses a Hash where field = segment start time (ms string), +// value = count in that segment. On each call we remove fields whose segment +// start is older than `now - window`, then sum the remaining values. +const LUA_SLIDING_WINDOW_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local segments = tonumber(ARGV[4]) +local segDur = window / segments + +local segStart = now - (now % segDur) +local cutoff = now - window + +local fields = redis.call('HGETALL', key) +local total = 0 +local toDel = {} +for i = 1, #fields, 2 do + local seg = tonumber(fields[i]) + if seg <= cutoff then + toDel[#toDel + 1] = fields[i] + else + total = total + tonumber(fields[i + 1]) + end +end +if #toDel > 0 then + redis.call('HDEL', key, unpack(toDel)) +end + +local resetAt = segStart + segDur +local ok = 0 +if total + cost <= limit then + ok = 1 + redis.call('HINCRBY', key, tostring(segStart), cost) + total = total + cost + redis.call('PEXPIRE', key, window + segDur) +end + +local remaining = math.max(0, limit - total) +local retryAfter = 0 +if ok == 0 then retryAfter = resetAt - now end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_SLIDING_WINDOW_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local segments = tonumber(ARGV[4]) +local segDur = window / segments + +local segStart = now - (now % segDur) +local cutoff = now - window + +local fields = redis.call('HGETALL', key) +local total = 0 +local toDel = {} +for i = 1, #fields, 2 do + local seg = tonumber(fields[i]) + if seg <= cutoff then + toDel[#toDel + 1] = fields[i] + else + total = total + tonumber(fields[i + 1]) + end +end +if #toDel > 0 then + redis.call('HDEL', key, unpack(toDel)) +end + +local resetAt = segStart + segDur +local ok = 0 +if total + cost <= limit then ok = 1 end +local remaining = math.max(0, limit - total) +local retryAfter = 0 +if ok == 0 then retryAfter = resetAt - now end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_TOKEN_BUCKET_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local tokensPerPeriod = tonumber(ARGV[4]) + +local data = redis.call('HMGET', key, 'tokens', 'lastRefill') +local tokens = tonumber(data[1]) +local lastRefill = tonumber(data[2]) + +if tokens == nil then + tokens = limit + lastRefill = now +else + local elapsed = now - lastRefill + if elapsed >= window then + local cycles = math.floor(elapsed / window) + tokens = math.min(limit, tokens + cycles * tokensPerPeriod) + lastRefill = lastRefill + cycles * window + end +end + +local ok = 0 +if tokens >= cost then + ok = 1 + tokens = tokens - cost +end + +redis.call('HMSET', key, 'tokens', tokens, 'lastRefill', lastRefill) +local resetAt = lastRefill + window +redis.call('PEXPIRE', key, math.max(1, math.ceil(resetAt - now) + window)) + +local remaining = math.max(0, math.floor(tokens)) +local retryAfter = 0 +if ok == 0 then + local deficit = cost - tokens + local cycles = math.ceil(deficit / tokensPerPeriod) + retryAfter = math.max(0, cycles * window - (now - lastRefill)) +end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_TOKEN_BUCKET_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local tokensPerPeriod = tonumber(ARGV[4]) + +local data = redis.call('HMGET', key, 'tokens', 'lastRefill') +local tokens = tonumber(data[1]) +local lastRefill = tonumber(data[2]) + +if tokens == nil then + tokens = limit + lastRefill = now +else + local elapsed = now - lastRefill + if elapsed >= window then + local cycles = math.floor(elapsed / window) + tokens = math.min(limit, tokens + cycles * tokensPerPeriod) + lastRefill = lastRefill + cycles * window + end +end + +local ok = 0 +if tokens >= cost then ok = 1 end +local remaining = math.max(0, math.floor(tokens)) +local resetAt = lastRefill + window +local retryAfter = 0 +if ok == 0 then + local deficit = cost - tokens + local cycles = math.ceil(deficit / tokensPerPeriod) + retryAfter = math.max(0, cycles * window - (now - lastRefill)) +end +return {ok, remaining, tostring(resetAt), tostring(retryAfter), limit} +`; + +const LUA_GCRA_CONSUME = `${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local emissionInterval = window / limit +local tau = window + +local tat = tonumber(redis.call('GET', key)) or now + +local allowAt = tat - tau +if now < allowAt then + local remaining = 0 + local retryAfter = allowAt - now + local resetAt = tat + return {0, remaining, tostring(resetAt), tostring(retryAfter), limit} +end + +local newTat = math.max(tat, now) + emissionInterval * cost +if newTat - now > tau then + local diff = tau - (tat - now) + local remaining = math.min(limit, math.max(0, math.floor(diff / emissionInterval))) + local retryAfter = math.max(0, newTat - tau - now) + local resetAt = tat + return {0, remaining, tostring(resetAt), tostring(retryAfter), limit} +end + +redis.call('SET', key, tostring(newTat), 'PX', math.ceil(newTat - now + tau)) +local diff = tau - (newTat - now) +local remaining = math.min(limit, math.max(0, math.floor(diff / emissionInterval))) +return {1, remaining, tostring(newTat), '0', limit} +`; + +const LUA_GCRA_PEEK = `-- peek-mode +${LUA_NOW} +local key = KEYS[1] +local limit = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local cost = tonumber(ARGV[3]) +local emissionInterval = window / limit +local tau = window + +local tat = tonumber(redis.call('GET', key)) or now + +local allowAt = tat - tau +if now < allowAt then + local remaining = 0 + local retryAfter = allowAt - now + local resetAt = tat + return {0, remaining, tostring(resetAt), tostring(retryAfter), limit} +end + +local newTat = math.max(tat, now) + emissionInterval * cost +local diff = tau - (tat - now) +local remaining = math.min(limit, math.max(0, math.floor(diff / emissionInterval))) +if newTat - now > tau then + local retryAfter = math.max(0, newTat - tau - now) + return {0, remaining, tostring(tat), tostring(retryAfter), limit} +end +return {1, remaining, tostring(tat), '0', limit} +`; + +export const LUA_DELETE_KEY = ` +redis.call('DEL', KEYS[1]) +return 1 +`; + +export interface LuaScriptPair { + consume: string; + peek: string; +} + +export function getScripts(algorithm: string): LuaScriptPair { + switch (algorithm) { + case "fixed-window": + return { + consume: LUA_FIXED_WINDOW_CONSUME, + peek: LUA_FIXED_WINDOW_PEEK, + }; + case "sliding-window": + return { + consume: LUA_SLIDING_WINDOW_CONSUME, + peek: LUA_SLIDING_WINDOW_PEEK, + }; + case "token-bucket": + return { + consume: LUA_TOKEN_BUCKET_CONSUME, + peek: LUA_TOKEN_BUCKET_PEEK, + }; + case "gcra": + return { consume: LUA_GCRA_CONSUME, peek: LUA_GCRA_PEEK }; + default: + throw new TypeError( + `Cannot create redis store: unknown algorithm '${algorithm}'`, + ); + } +} + +export async function sha1Hex(text: string): Promise { + const data = new TextEncoder().encode(text); + const hash = await crypto.subtle.digest("SHA-1", data); + return encodeHex(hash); +} + +export interface CachedScript { + source: string; + sha: string; +} + +/** + * Normalizes any {@linkcode RedisConnection} into the `eval`/`evalsha` + * shape used internally. If the connection already has `eval`/`evalsha`, + * it is returned as-is. If it only has `sendCommand`, a thin adapter is + * created. + */ +export function toEvalConnection( + redis: RedisConnection, +): RedisEvalConnection { + if ("eval" in redis) return redis; + const conn = redis; + return { + eval(script: string, keys: string[], args: string[]): Promise { + return conn.sendCommand(["EVAL", script, keys.length, ...keys, ...args]); + }, + evalsha(sha: string, keys: string[], args: string[]): Promise { + return conn.sendCommand([ + "EVALSHA", + sha, + keys.length, + ...keys, + ...args, + ]); + }, + }; +} + +function isNoscriptError(err: unknown): boolean { + if (err instanceof Error) { + return err.message.includes("NOSCRIPT"); + } + return String(err).includes("NOSCRIPT"); +} + +export async function runScript( + redis: RedisEvalConnection, + script: CachedScript, + keys: string[], + args: string[], +): Promise { + try { + return await redis.evalsha(script.sha, keys, args); + } catch (err) { + if (isNoscriptError(err)) { + return await redis.eval(script.source, keys, args); + } + throw err; + } +} + +export function parseResult(raw: unknown, limit: number): RateLimitResult { + if (!Array.isArray(raw) || raw.length < 4) { + throw new TypeError( + `Cannot parse rate limit result: expected an array of length >= 4, received ${ + JSON.stringify(raw) + }`, + ); + } + const ok = raw[0] === 1; + const remaining = Number(raw[1]); + const resetAt = Number(raw[2]); + const retryAfter = Number(raw[3]); + if ( + Number.isNaN(remaining) || Number.isNaN(resetAt) || Number.isNaN(retryAfter) + ) { + throw new TypeError( + `Cannot parse rate limit result: numeric fields contain NaN (remaining=${ + raw[1] + }, resetAt=${raw[2]}, retryAfter=${raw[3]})`, + ); + } + return { ok, remaining, resetAt, retryAfter, limit }; +} diff --git a/rate_limit/_replenishing_limiter.ts b/rate_limit/_replenishing_limiter.ts new file mode 100644 index 000000000000..b35d35c46238 --- /dev/null +++ b/rate_limit/_replenishing_limiter.ts @@ -0,0 +1,287 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { + AcquiredLease, + AcquireOptions, + RateLimitLease, + RejectedLease, + ReplenishingRateLimiter, +} from "./types.ts"; +import { Deque } from "@std/data-structures/unstable-deque"; + +type RejectionReason = + | "Insufficient permits" + | "Queue limit exceeded" + | "Rate limiter has been disposed" + | "Evicted by newer request"; + +const DISPOSED_REASON: RejectionReason = "Rate limiter has been disposed"; +const ABORTED_REASON = "Acquire was aborted"; + +function noop() {} + +const ACQUIRED_LEASE: AcquiredLease = Object.freeze({ + acquired: true, + [Symbol.dispose]: noop, +}); + +function createRejectedLease( + retryAfter: number, + reason: RejectionReason, +): RejectedLease { + return { + acquired: false as const, + retryAfter, + reason, + [Symbol.dispose]: noop, + }; +} + +/** Pending waiter in the queue. */ +interface Waiter { + readonly permits: number; + resolve(lease: RateLimitLease): void; + onAbort?: (() => void) | undefined; +} + +/** + * Strategy hooks that define how a specific algorithm acquires and + * replenishes permits. Passed to {@linkcode createReplenishingLimiter}. + */ +interface ReplenishingStrategy { + /** Try to consume `permits` from the underlying algorithm. Returns true if acquired. */ + tryAcquirePermits(permits: number): boolean; + /** Advance the algorithm by one replenishment cycle. */ + replenish(): void; + /** Compute the retry delay for a denied request of `permits`. */ + computeRetryAfter(permits: number): number; + /** The maximum permits that can be acquired in a single call. */ + readonly permitLimit: number; +} + +/** Configuration for {@linkcode createReplenishingLimiter}. */ +interface ReplenishingLimiterConfig { + replenishmentPeriod: number; + autoReplenishment: boolean; + queueLimit: number; + queueOrder: "oldest-first" | "newest-first"; +} + +/** + * Create a {@linkcode ReplenishingRateLimiter} that delegates permit + * accounting to the provided strategy. Handles queueing, disposal, + * abort signals, and the replenishment timer. + */ +export function createReplenishingLimiter( + config: ReplenishingLimiterConfig, + strategy: ReplenishingStrategy, +): ReplenishingRateLimiter { + const queue = new Deque(); + let queuedPermits = 0; + let timer: ReturnType | undefined; + let disposed = false; + + if ( + config.queueOrder !== "oldest-first" && + config.queueOrder !== "newest-first" + ) { + throw new TypeError( + `Cannot create limiter: unknown queueOrder '${config + .queueOrder as string}'`, + ); + } + + const isNewestFirst = config.queueOrder === "newest-first"; + + if (config.autoReplenishment) { + timer = setInterval(replenishAndDrain, config.replenishmentPeriod); + if (typeof Deno !== "undefined") Deno.unrefTimer(timer as number); + } + + function peekNext(): Waiter | undefined { + return isNewestFirst ? queue.peekBack() : queue.peekFront(); + } + + function popNext(): Waiter | undefined { + return isNewestFirst ? queue.popBack() : queue.popFront(); + } + + /** + * Replenish permits and drain the queue in priority order. Stops at the + * first waiter whose permit demand exceeds the available supply. With + * `newest-first`, this means a large newest request blocks smaller older + * ones until enough permits accumulate. + */ + function replenishAndDrain(): void { + if (disposed) return; + strategy.replenish(); + + let waiter = peekNext(); + while (waiter !== undefined) { + if (!strategy.tryAcquirePermits(waiter.permits)) break; + popNext(); + queuedPermits -= waiter.permits; + resolveWaiter(waiter, ACQUIRED_LEASE); + waiter = peekNext(); + } + } + + function resolveWaiter(waiter: Waiter, lease: RateLimitLease): void { + try { + if (waiter.onAbort) { + waiter.onAbort(); + waiter.onAbort = undefined; + } + } finally { + waiter.resolve(lease); + } + } + + function removeWaiter(waiter: Waiter): void { + const removed = queue.removeFirst((w) => w === waiter); + if (removed !== undefined) { + queuedPermits -= waiter.permits; + } + } + + /** Evicts the oldest (stalest) waiter to make room, regardless of queue order. */ + function evictOldest(permits: number): void { + while (queuedPermits + permits > config.queueLimit) { + const evicted = queue.popFront(); + if (!evicted) break; + queuedPermits -= evicted.permits; + resolveWaiter( + evicted, + createRejectedLease( + strategy.computeRetryAfter(evicted.permits), + "Evicted by newer request", + ), + ); + } + } + + function tryAcquire(permits = 1): RateLimitLease { + if (disposed) { + return createRejectedLease(0, DISPOSED_REASON); + } + if (permits < 1 || !Number.isInteger(permits)) { + throw new RangeError( + `Cannot acquire: 'permits' must be a positive integer, received ${permits}`, + ); + } + if (permits > strategy.permitLimit) { + throw new RangeError( + `Cannot acquire: 'permits' (${permits}) exceeds the permit limit (${strategy.permitLimit})`, + ); + } + + if (strategy.tryAcquirePermits(permits)) { + return ACQUIRED_LEASE; + } + return createRejectedLease( + strategy.computeRetryAfter(permits), + "Insufficient permits", + ); + } + + function acquire( + permits = 1, + options?: AcquireOptions, + ): Promise { + if (disposed) { + return Promise.reject(new Error(DISPOSED_REASON)); + } + if (permits < 1 || !Number.isInteger(permits)) { + return Promise.reject( + new RangeError( + `Cannot acquire: 'permits' must be a positive integer, received ${permits}`, + ), + ); + } + if (permits > strategy.permitLimit) { + return Promise.reject( + new RangeError( + `Cannot acquire: 'permits' (${permits}) exceeds the permit limit (${strategy.permitLimit})`, + ), + ); + } + + const signal = options?.signal; + if (signal?.aborted) { + return Promise.reject( + signal.reason ?? new DOMException(ABORTED_REASON, "AbortError"), + ); + } + + if (strategy.tryAcquirePermits(permits)) { + return Promise.resolve(ACQUIRED_LEASE); + } + + if (queuedPermits + permits > config.queueLimit) { + const canEvict = queue.length > 0 && + config.queueLimit > 0 && + permits <= config.queueLimit; + + if (!canEvict) { + return Promise.resolve( + createRejectedLease( + strategy.computeRetryAfter(permits), + "Queue limit exceeded", + ), + ); + } + evictOldest(permits); + } + + return new Promise((resolve, reject) => { + const waiter: Waiter = { permits, resolve }; + queue.pushBack(waiter); + queuedPermits += permits; + + if (signal) { + const onAbort = () => { + removeWaiter(waiter); + reject( + signal.reason ?? new DOMException(ABORTED_REASON, "AbortError"), + ); + }; + waiter.onAbort = () => signal.removeEventListener("abort", onAbort); + signal.addEventListener("abort", onAbort, { once: true }); + } + }); + } + + function replenish(): void { + if (config.autoReplenishment) { + throw new Error( + "Cannot replenish: limiter uses automatic replenishment", + ); + } + replenishAndDrain(); + } + + function dispose(): void { + if (disposed) return; + disposed = true; + + if (timer !== undefined) { + clearInterval(timer); + timer = undefined; + } + + const lease = createRejectedLease(0, DISPOSED_REASON); + let waiter = queue.popFront(); + while (waiter !== undefined) { + resolveWaiter(waiter, lease); + waiter = queue.popFront(); + } + queuedPermits = 0; + } + + return { + tryAcquire, + acquire, + replenish, + [Symbol.dispose]: dispose, + }; +} diff --git a/rate_limit/_validation.ts b/rate_limit/_validation.ts new file mode 100644 index 000000000000..9d3594d4623d --- /dev/null +++ b/rate_limit/_validation.ts @@ -0,0 +1,59 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +/** + * Asserts that `value` is a positive integer (>= 1). + * + * @param context Noun phrase for the error prefix, e.g. "token bucket". + * @param name The option name shown in the error message. + * @param value The value to check. + */ +export function assertPositiveInteger( + context: string, + name: string, + value: number, +): void { + if (!Number.isInteger(value) || value < 1) { + throw new RangeError( + `Cannot create ${context}: '${name}' must be a positive integer, received ${value}`, + ); + } +} + +/** + * Asserts that `value` is a non-negative integer (>= 0), if defined. + * + * @param context Noun phrase for the error prefix, e.g. "token bucket". + * @param name The option name shown in the error message. + * @param value The value to check. Skipped when `undefined`. + */ +export function assertNonNegativeInteger( + context: string, + name: string, + value: number | undefined, +): void { + if (value !== undefined && (!Number.isInteger(value) || value < 0)) { + throw new RangeError( + `Cannot create ${context}: '${name}' must be a non-negative integer, received ${value}`, + ); + } +} + +/** + * Asserts that `value` is a positive finite number (> 0). + * + * @param context Noun phrase for the error prefix, e.g. "token bucket". + * @param name The option name shown in the error message. + * @param value The value to check. + */ +export function assertPositiveFinite( + context: string, + name: string, + value: number, +): void { + if (!Number.isFinite(value) || value <= 0) { + throw new RangeError( + `Cannot create ${context}: '${name}' must be a positive finite number, received ${value}`, + ); + } +} diff --git a/rate_limit/deno.json b/rate_limit/deno.json new file mode 100644 index 000000000000..99ca8903bb33 --- /dev/null +++ b/rate_limit/deno.json @@ -0,0 +1,15 @@ +{ + "name": "@std/rate-limit", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./token-bucket": "./token_bucket.ts", + "./fixed-window": "./fixed_window.ts", + "./sliding-window": "./sliding_window.ts", + "./types": "./types.ts", + "./rate-limiter": "./rate_limiter.ts", + "./store-types": "./store_types.ts", + "./memory-store": "./memory_store.ts", + "./redis-store": "./redis_store.ts" + } +} diff --git a/rate_limit/fixed_window.ts b/rate_limit/fixed_window.ts new file mode 100644 index 000000000000..47953e2f64f9 --- /dev/null +++ b/rate_limit/fixed_window.ts @@ -0,0 +1,116 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; +import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; +import { createFixedWindowOps } from "./_algorithms.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; + +/** + * Options for {@linkcode createFixedWindow}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface FixedWindowOptions extends QueueOptions { + /** Maximum permits per window. */ + limit: number; + /** Window duration in milliseconds. */ + window: number; + /** + * Start an internal timer for automatic window rotation. + * + * When `false`, call {@linkcode ReplenishingRateLimiter.replenish} + * manually. + * + * @default {true} + */ + autoReplenishment?: boolean; + /** + * Clock function returning the current time in milliseconds. Override + * for deterministic testing. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Create a fixed window rate limiter. A counter resets at the start of each + * window, making this the simplest time-windowed strategy — ideal for HTTP + * servers and 429 response logic. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts + * import { createFixedWindow } from "@std/rate-limit/fixed-window"; + * import { assert } from "@std/assert"; + * + * using limiter = createFixedWindow({ + * limit: 100, + * window: 60_000, + * }); + * + * using lease = limiter.tryAcquire(); + * assert(lease.acquired); + * ``` + * + * @example Manual replenishment + * ```ts no-assert + * import { createFixedWindow } from "@std/rate-limit/fixed-window"; + * + * using limiter = createFixedWindow({ + * limit: 100, + * window: 60_000, + * autoReplenishment: false, + * }); + * + * limiter.replenish(); + * ``` + * + * @param options Configuration for the fixed window. + * @returns A {@linkcode ReplenishingRateLimiter}. + */ +export function createFixedWindow( + options: FixedWindowOptions, +): ReplenishingRateLimiter { + const context = "fixed window"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + assertNonNegativeInteger(context, "queueLimit", options.queueLimit); + + const { limit, window: windowMs } = options; + const clock = options.clock ?? Date.now; + const ops = createFixedWindowOps(limit, windowMs); + const state = ops.create(clock()); + let lastNow = 0; + + return createReplenishingLimiter( + { + replenishmentPeriod: windowMs, + autoReplenishment: options.autoReplenishment ?? true, + queueLimit: options.queueLimit ?? 0, + queueOrder: options.queueOrder ?? "oldest-first", + }, + { + get permitLimit() { + return ops.limit; + }, + tryAcquirePermits(permits: number): boolean { + lastNow = clock(); + ops.advance(state, lastNow); + return ops.tryConsume(state, permits, lastNow); + }, + replenish(): void { + lastNow = state.windowStart + windowMs; + ops.advance(state, lastNow); + }, + computeRetryAfter(permits: number): number { + return ops.computeRetryAfter(state, permits, lastNow); + }, + }, + ); +} diff --git a/rate_limit/fixed_window_test.ts b/rate_limit/fixed_window_test.ts new file mode 100644 index 000000000000..f64f9f20ea47 --- /dev/null +++ b/rate_limit/fixed_window_test.ts @@ -0,0 +1,626 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { + assert, + assertEquals, + assertFalse, + assertRejects, + assertThrows, +} from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createFixedWindow } from "./fixed_window.ts"; + +// --- Factory validation --- + +Deno.test("createFixedWindow() throws for invalid limit", () => { + assertThrows( + () => createFixedWindow({ limit: 0, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createFixedWindow({ limit: -1, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createFixedWindow({ limit: 1.5, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createFixedWindow({ limit: NaN, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createFixedWindow({ limit: Infinity, window: 1000 }), + RangeError, + "limit", + ); +}); + +Deno.test("createFixedWindow() throws for invalid window", () => { + assertThrows( + () => createFixedWindow({ limit: 10, window: 0 }), + RangeError, + "window", + ); + assertThrows( + () => createFixedWindow({ limit: 10, window: -100 }), + RangeError, + "window", + ); + assertThrows( + () => createFixedWindow({ limit: 10, window: NaN }), + RangeError, + "window", + ); + assertThrows( + () => createFixedWindow({ limit: 10, window: Infinity }), + RangeError, + "window", + ); +}); + +Deno.test("createFixedWindow() throws for invalid queueLimit", () => { + assertThrows( + () => createFixedWindow({ limit: 10, window: 1000, queueLimit: -1 }), + RangeError, + "queueLimit", + ); +}); + +// --- tryAcquire --- + +Deno.test("tryAcquire() succeeds within the window limit", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 3, + window: 1000, + }); + void time; + + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("tryAcquire() acquires multiple permits at once", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 5, + window: 1000, + }); + void time; + + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire(3).acquired); + assert(limiter.tryAcquire(2).acquired); +}); + +Deno.test("tryAcquire() rejects with retryAfter equal to window duration", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 5000, + }); + void time; + + limiter.tryAcquire(); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 5000); +}); + +Deno.test("tryAcquire() throws for invalid permits", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 5, + window: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(0), RangeError); + assertThrows(() => limiter.tryAcquire(-1), RangeError); + assertThrows(() => limiter.tryAcquire(1.5), RangeError); +}); + +Deno.test("tryAcquire() throws when permits exceed limit", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 5, + window: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(6), RangeError, "exceeds"); +}); + +// --- Window reset --- + +Deno.test("permits reset after the window elapses", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ limit: 2, window: 1000 }); + + limiter.tryAcquire(); + limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("full permit count is restored each window", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ limit: 5, window: 500 }); + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(500); + + assert(limiter.tryAcquire(5).acquired); +}); + +// --- Manual replenishment --- + +Deno.test("replenish() throws when autoReplenishment is true", () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 5, + window: 1000, + }); + void time; + + assertThrows( + () => limiter.replenish(), + Error, + "Cannot replenish: limiter uses automatic replenishment", + ); +}); + +Deno.test("replenish() drains queued acquire() waiters", async () => { + const limiter = createFixedWindow({ + limit: 3, + window: 1000, + autoReplenishment: false, + queueLimit: 5, + }); + + limiter.tryAcquire(3); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + limiter.replenish(); + const lease = await promise; + assert(resolved); + assert(lease.acquired); + + limiter[Symbol.dispose](); +}); + +Deno.test("replenish() resets the window when autoReplenishment is false", () => { + const limiter = createFixedWindow({ + limit: 3, + window: 1000, + autoReplenishment: false, + }); + + limiter.tryAcquire(); + limiter.tryAcquire(); + limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + limiter.replenish(); + assert(limiter.tryAcquire().acquired); + + limiter[Symbol.dispose](); +}); + +// --- acquire (async) --- + +Deno.test("acquire() resolves immediately when permits available", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ limit: 5, window: 1000 }); + void time; + + const lease = await limiter.acquire(); + assert(lease.acquired); +}); + +Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 0, + }); + void time; + + limiter.tryAcquire(); + const lease = await limiter.acquire(); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("acquire() queues and resolves after window reset", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 5, + }); + + limiter.tryAcquire(); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +Deno.test("acquire() rejects when aborted via signal", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + const controller = new AbortController(); + const promise = limiter.acquire(1, { signal: controller.signal }); + controller.abort(); + + await assertRejects(() => promise, DOMException); +}); + +Deno.test("acquire() rejects when signal is already aborted", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); +}); + +Deno.test("acquire() with already-aborted signal rejects even when permits are available", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 5, + window: 1000, + queueLimit: 5, + }); + void time; + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); + + assert( + limiter.tryAcquire(5).acquired, + "all 5 permits should still be available", + ); +}); + +Deno.test("acquire() with already-aborted signal does not evict queued waiters", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 1, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const existingPromise = limiter.acquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); + + time.tick(1000); + const lease = await existingPromise; + assert(lease.acquired, "existing waiter should not have been evicted"); +}); + +// --- Disposal --- + +Deno.test("dispose resolves queued waiters with rejected leases", async () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + const promise = limiter.acquire(); + limiter[Symbol.dispose](); + + const lease = await promise; + assertFalse(lease.acquired); + assertEquals(lease.reason, "Rate limiter has been disposed"); +}); + +Deno.test("tryAcquire() returns rejected lease after disposal", () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ limit: 5, window: 1000 }); + void time; + + limiter[Symbol.dispose](); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); +}); + +Deno.test("acquire() rejects after disposal", async () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ limit: 5, window: 1000 }); + void time; + + limiter[Symbol.dispose](); + await assertRejects(() => limiter.acquire(), Error, "disposed"); +}); + +// --- Queue ordering --- + +Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 10, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p1; + time.tick(1000); + await p2; + + assertEquals(order, [1, 2]); +}); + +Deno.test("newest-first queue resolves newest waiter first", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 10, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p2; + time.tick(1000); + await p1; + + assertEquals(order, [2, 1]); +}); + +// --- Eviction --- + +Deno.test("newest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 2, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + const p3 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p3:acquired" : `p3:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + time.tick(1000); + await p3; + time.tick(1000); + await p2; + + assertEquals(results, [ + "p1:Evicted by newer request", + "p3:acquired", + "p2:acquired", + ]); +}); + +Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 1, + window: 1000, + queueLimit: 1, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + time.tick(1000); + await p2; + + assertEquals(results, ["p1:Evicted by newer request", "p2:acquired"]); +}); + +// --- Multi-permit queued waiters --- + +Deno.test("acquire() queues multi-permit waiter spanning multiple windows", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 2, + window: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(2); + + let resolved = false; + const promise = limiter.acquire(2).then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +// --- Multiple waiters resolved in single replenishment --- + +Deno.test("single replenishment resolves multiple queued waiters", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ + limit: 3, + window: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(3); + + const order: number[] = []; + const p1 = limiter.acquire(1).then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + order.push(2); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + order.push(3); + return l; + }); + + await Promise.resolve(); + assertEquals(order, []); + + time.tick(1000); + await Promise.all([p1, p2, p3]); + + assertEquals(order, [1, 2, 3]); + for (const p of [p1, p2, p3]) { + assert((await p).acquired); + } +}); + +// --- acquire() validation --- + +Deno.test("acquire() rejects for invalid permits", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ limit: 5, window: 1000 }); + void time; + + await assertRejects(() => limiter.acquire(0), RangeError); + await assertRejects(() => limiter.acquire(-1), RangeError); + await assertRejects(() => limiter.acquire(1.5), RangeError); +}); + +Deno.test("acquire() rejects when permits exceed limit", async () => { + using time = new FakeTime(0); + using limiter = createFixedWindow({ limit: 5, window: 1000 }); + void time; + + await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); +}); + +// --- Double dispose --- + +Deno.test("double dispose is a no-op", () => { + using time = new FakeTime(0); + const limiter = createFixedWindow({ limit: 5, window: 1000 }); + void time; + + limiter[Symbol.dispose](); + limiter[Symbol.dispose](); +}); diff --git a/rate_limit/memory_store.ts b/rate_limit/memory_store.ts new file mode 100644 index 000000000000..57f0f654bd55 --- /dev/null +++ b/rate_limit/memory_store.ts @@ -0,0 +1,215 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { + AlgorithmOptions, + RateLimitResult, + RateLimitStore, +} from "./store_types.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; +import { + createFixedWindowAlgorithm, + createGcraAlgorithm, + createSlidingWindowAlgorithm, + createTokenBucketAlgorithm, +} from "./_keyed_algorithms.ts"; +import type { + KeyedAlgorithm, + KeyedAlgorithmOptions, +} from "./_keyed_algorithms.ts"; + +/** + * Options for {@linkcode createMemoryStore}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface MemoryStoreOptions extends AlgorithmOptions { + /** + * Time-to-live for idle key state in milliseconds. Set to `0` to disable. + * + * @default {300_000} + */ + evictionTtl?: number; + /** + * How often to scan for and evict idle keys, in milliseconds. Each + * scan iterates all tracked keys, so increase this value (or disable + * TTL eviction entirely with `evictionTtl: 0`) for very high key + * cardinality to avoid event-loop pauses. + * + * @default {60_000} + */ + evictionInterval?: number; + /** + * Maximum number of keys to track. When a new key arrives at capacity, + * the least-recently-used key is evicted to make room. Set to `0` to + * disable (unbounded). + * + * @default {0} + */ + maxKeys?: number; + /** + * Clock function returning the current time in milliseconds. Override + * for testing with `FakeTime`. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * An in-memory {@linkcode RateLimitStore} with additional synchronous + * diagnostics. Extends the base store contract with `has()` and `size` + * for in-memory storage. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface MemoryStore extends RateLimitStore { + /** + * Whether a key has tracked state. + * + * @param key Identifier for the rate limit subject. + * @returns `true` if the key is currently tracked. + */ + has(key: string): boolean; + /** Number of keys currently tracked. */ + readonly size: number; +} + +/** + * Create an in-memory rate limit store backed by a `Map`. This is the + * default store used by `createRateLimiter` when no `store` option is + * provided. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Creating a memory store directly + * ```ts + * import { createMemoryStore } from "@std/rate-limit/memory-store"; + * import { assertEquals } from "@std/assert"; + * + * await using store = createMemoryStore({ + * limit: 5, + * window: 1000, + * algorithm: "fixed-window", + * evictionTtl: 0, + * }); + * + * assertEquals(store.capacity, 5); + * assertEquals(store.window, 1000); + * ``` + * + * @param options Configuration for the memory store. + * @returns A {@linkcode MemoryStore}. + */ +export function createMemoryStore(options: MemoryStoreOptions): MemoryStore { + const context = "memory store"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + + const { + limit, + window: windowMs, + algorithm: algorithmName = "sliding-window", + segmentsPerWindow = 10, + tokensPerPeriod = limit, + evictionTtl = 300_000, + evictionInterval = 60_000, + maxKeys = 0, + clock = Date.now, + } = options; + + if (algorithmName === "token-bucket") { + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); + if (tokensPerPeriod > limit) { + throw new RangeError( + `Cannot create ${context}: 'tokensPerPeriod' (${tokensPerPeriod}) exceeds 'limit' (${limit})`, + ); + } + } + + assertNonNegativeInteger(context, "evictionTtl", evictionTtl); + + if (evictionTtl > 0) { + assertPositiveInteger(context, "evictionInterval", evictionInterval); + } + + assertNonNegativeInteger(context, "maxKeys", maxKeys); + + const keyedOptions: KeyedAlgorithmOptions = { maxKeys }; + + let algorithm: KeyedAlgorithm; + switch (algorithmName) { + case "fixed-window": + algorithm = createFixedWindowAlgorithm(limit, windowMs, keyedOptions); + break; + case "sliding-window": + algorithm = createSlidingWindowAlgorithm( + limit, + windowMs, + segmentsPerWindow, + keyedOptions, + ); + break; + case "token-bucket": + algorithm = createTokenBucketAlgorithm( + limit, + windowMs, + tokensPerPeriod, + keyedOptions, + ); + break; + case "gcra": + algorithm = createGcraAlgorithm(limit, windowMs, keyedOptions); + break; + default: + throw new TypeError( + `Cannot create ${context}: unknown algorithm '${algorithmName as string}'`, + ); + } + + let evictionTimer: ReturnType | undefined; + + if (evictionTtl > 0) { + evictionTimer = setInterval( + () => algorithm.evict(clock(), evictionTtl), + evictionInterval, + ); + if (typeof Deno !== "undefined") Deno.unrefTimer(evictionTimer as number); + } + + return { + get capacity(): number { + return limit; + }, + get window(): number { + return windowMs; + }, + consume(key: string, cost: number): Promise { + return Promise.resolve(algorithm.limit(key, cost, clock())); + }, + peek(key: string, cost: number): Promise { + return Promise.resolve(algorithm.peek(key, cost, clock())); + }, + has(key: string): boolean { + return algorithm.has(key); + }, + reset(key: string): Promise { + algorithm.reset(key); + return Promise.resolve(); + }, + get size(): number { + return algorithm.size; + }, + [Symbol.asyncDispose](): Promise { + if (evictionTimer !== undefined) { + clearInterval(evictionTimer); + evictionTimer = undefined; + } + algorithm.clear(); + return Promise.resolve(); + }, + }; +} diff --git a/rate_limit/mod.ts b/rate_limit/mod.ts new file mode 100644 index 000000000000..e684274782b0 --- /dev/null +++ b/rate_limit/mod.ts @@ -0,0 +1,37 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +/** + * Rate limiting strategies for controlling how many operations can occur over + * time. + * + * The primary API is {@linkcode createRateLimiter}, a keyed rate limiter for + * the common case of "allow key X at most N requests per window." It supports + * fixed-window, sliding-window, token-bucket, and GCRA algorithms and accepts + * a pluggable {@linkcode RateLimitStore} backend (in-memory by default, or + * Redis via {@linkcode createRedisStore} for distributed deployments). + * + * For single-resource limiting, use the primitives: + * {@linkcode createTokenBucket}, {@linkcode createFixedWindow}, and + * {@linkcode createSlidingWindow}. + * + * ```ts + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert } from "@std/assert"; + * + * await using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * + * const result = await limiter.limit("user:123"); + * assert(result.ok); + * ``` + * + * @module + */ + +export * from "./fixed_window.ts"; +export * from "./memory_store.ts"; +export * from "./rate_limiter.ts"; +export * from "./redis_store.ts"; +export * from "./sliding_window.ts"; +export * from "./store_types.ts"; +export * from "./token_bucket.ts"; +export * from "./types.ts"; diff --git a/rate_limit/rate_limiter.ts b/rate_limit/rate_limiter.ts new file mode 100644 index 000000000000..2194dd28be45 --- /dev/null +++ b/rate_limit/rate_limiter.ts @@ -0,0 +1,201 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { MemoryStoreOptions } from "./memory_store.ts"; +import { createMemoryStore } from "./memory_store.ts"; +import type { RateLimitResult, RateLimitStore } from "./store_types.ts"; + +export type { RateLimitResult } from "./store_types.ts"; + +/** + * Options for {@linkcode KeyedRateLimiter.limit} and + * {@linkcode KeyedRateLimiter.peek}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Variable cost per request + * ```ts + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert } from "@std/assert"; + * + * await using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * + * const result = await limiter.limit("user:123", { cost: 5 }); + * assert(result.ok); + * ``` + */ +export interface CostOptions { + /** + * Number of permits to consume (for `limit`) or check (for `peek`). + * Use higher values for expensive operations. + * + * @default {1} + */ + cost?: number; +} + +/** + * A keyed rate limiter that manages per-key state internally. This is the + * primary rate limiting API for the common case of "allow key X at most N + * requests per window." + * + * All methods are async to support pluggable store backends (in-memory, + * Redis, Deno KV). For in-memory stores the returned promises resolve + * synchronously. + * + * **Disposal behavior:** after disposal, `limit()` and `peek()` return a + * result with `ok: false` (remaining/resetAt/retryAfter all `0`), and + * `reset()` is a no-op. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface KeyedRateLimiter extends AsyncDisposable { + /** + * Check whether a request for the given key should be allowed, and + * consume permits if so. + * + * @param key Identifier for the rate limit subject (user ID, IP, etc.). + * @param options Override cost per request. + * @returns A {@linkcode RateLimitResult} with the decision and metadata. + */ + limit(key: string, options?: CostOptions): Promise; + + /** + * Check the current state for a key without consuming any permits. + * Useful for displaying remaining quota in UI or headers without + * affecting the count. + * + * @param key Identifier for the rate limit subject (user ID, IP, etc.). + * @param options Override cost per request. + * @returns A {@linkcode RateLimitResult} with the current state and metadata. + */ + peek(key: string, options?: CostOptions): Promise; + + /** + * Reset all state for a key, restoring it to full capacity. + * + * @param key Identifier for the rate limit subject (user ID, IP, etc.). + */ + reset(key: string): Promise; +} + +/** + * Options when using the default in-memory store. Extends + * {@linkcode MemoryStoreOptions} with a `store?: undefined` discriminant. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type MemoryRateLimiterOptions = MemoryStoreOptions & { + store?: undefined; +}; + +/** + * Options when providing a custom {@linkcode RateLimitStore} backend. + * {@linkcode MemoryStoreOptions} keys (`limit`, `window`, `algorithm`, + * `ttl`, etc.) are typed as `never` to prevent accidentally passing them + * alongside a custom store, since the store owns those settings. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type StoreRateLimiterOptions = + & { /** The store backend to delegate to. */ store: RateLimitStore } + & { [K in keyof MemoryStoreOptions]?: never }; + +/** + * Options for {@linkcode createRateLimiter}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type RateLimiterOptions = + | MemoryRateLimiterOptions + | StoreRateLimiterOptions; + +/** + * Create a keyed rate limiter backed by an in-memory store or a custom + * {@linkcode RateLimitStore}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { assert, assertEquals } from "@std/assert"; + * + * await using limiter = createRateLimiter({ limit: 100, window: 60_000 }); + * const result = await limiter.limit("user:123", { cost: 5 }); + * assert(result.ok); + * assertEquals(result.remaining, 95); + * ``` + * + * @example Custom store backend + * ```ts ignore + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * import { createRedisStore } from "@std/rate-limit/redis-store"; + * + * const store = createRedisStore({ + * redis: myRedisClient, + * algorithm: "sliding-window", + * limit: 100, + * window: 60_000, + * }); + * + * await using limiter = createRateLimiter({ store }); + * ``` + * + * @param options Configuration for the rate limiter. + * @returns A {@linkcode KeyedRateLimiter}. + */ +export function createRateLimiter( + options: RateLimiterOptions, +): KeyedRateLimiter { + const store: RateLimitStore = options.store ?? + createMemoryStore(options as MemoryRateLimiterOptions); + + const limit = store.capacity; + + const DISPOSED_RESULT: RateLimitResult = Object.freeze({ + ok: false as const, + remaining: 0, + resetAt: 0, + retryAfter: 0, + limit, + }); + + let disposed = false; + + function validateCost(method: string, cost: number): void { + if (!Number.isInteger(cost) || cost < 1) { + throw new RangeError( + `Cannot ${method}: 'cost' must be a positive integer, received ${cost}`, + ); + } + if (cost > limit) { + throw new RangeError( + `Cannot ${method}: 'cost' (${cost}) exceeds the limit (${limit})`, + ); + } + } + + return { + limit(key: string, options?: CostOptions): Promise { + if (disposed) return Promise.resolve(DISPOSED_RESULT); + const cost = options?.cost ?? 1; + validateCost("limit", cost); + return store.consume(key, cost); + }, + peek(key: string, options?: CostOptions): Promise { + if (disposed) return Promise.resolve(DISPOSED_RESULT); + const cost = options?.cost ?? 1; + validateCost("peek", cost); + return store.peek(key, cost); + }, + reset(key: string): Promise { + if (disposed) return Promise.resolve(); + return store.reset(key); + }, + async [Symbol.asyncDispose](): Promise { + if (disposed) return; + disposed = true; + await store[Symbol.asyncDispose](); + }, + }; +} diff --git a/rate_limit/rate_limiter_test.ts b/rate_limit/rate_limiter_test.ts new file mode 100644 index 000000000000..dda5367061b8 --- /dev/null +++ b/rate_limit/rate_limiter_test.ts @@ -0,0 +1,1378 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { assert, assertEquals, assertFalse, assertThrows } from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createRateLimiter } from "./rate_limiter.ts"; +import { createMemoryStore } from "./memory_store.ts"; + +// --- Factory validation --- + +Deno.test("createRateLimiter() throws for invalid limit", () => { + assertThrows( + () => createRateLimiter({ limit: 0, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createRateLimiter({ limit: -1, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createRateLimiter({ limit: 1.5, window: 1000 }), + RangeError, + "limit", + ); +}); + +Deno.test("createRateLimiter() throws for invalid window", () => { + assertThrows( + () => createRateLimiter({ limit: 10, window: 0 }), + RangeError, + "window", + ); + assertThrows( + () => createRateLimiter({ limit: 10, window: -100 }), + RangeError, + "window", + ); +}); + +Deno.test("createRateLimiter() throws for invalid segmentsPerWindow", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 1, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 3, + }), + RangeError, + "divisible", + ); +}); + +Deno.test("createRateLimiter() throws for invalid tokensPerPeriod", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 0, + }), + RangeError, + "tokensPerPeriod", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 11, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +Deno.test("createRateLimiter() throws for invalid eviction options when evictionTtl > 0", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: 5000, + evictionInterval: 0, + }), + RangeError, + "evictionInterval", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: 5000, + evictionInterval: -100, + }), + RangeError, + "evictionInterval", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: Infinity, + evictionInterval: 60_000, + }), + RangeError, + "evictionTtl", + ); +}); + +Deno.test("createRateLimiter() throws for negative evictionTtl", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: -1, + }), + RangeError, + "evictionTtl", + ); + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: NaN, + }), + RangeError, + "evictionTtl", + ); +}); + +Deno.test("createRateLimiter() throws for invalid cost", async () => { + using _time = new FakeTime(); + await using limiter = createRateLimiter({ limit: 10, window: 1000 }); + + assertThrows(() => limiter.limit("a", { cost: 0 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: -1 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: 1.5 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: 11 }), RangeError, "exceeds"); +}); + +Deno.test("createRateLimiter() accepts all algorithms", async () => { + using _time = new FakeTime(); + for ( + const algorithm of [ + "fixed-window", + "sliding-window", + "token-bucket", + "gcra", + ] as const + ) { + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm, + }); + const result = await limiter.limit("key"); + assert(result.ok); + } +}); + +// === Fixed window === + +Deno.test("fixed-window: first request allowed with correct remaining", async () => { + const now = 1000; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 4); + assertEquals(r.limit, 5); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("fixed-window: exhausting limit returns ok: false", async () => { + const now = 1000; + await using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + assert((await limiter.limit("a")).ok); + assert((await limiter.limit("a")).ok); + assert((await limiter.limit("a")).ok); + + const r = await limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assert(r.retryAfter > 0); + assertEquals(r.resetAt, 2000); +}); + +Deno.test("fixed-window: permits restore after window elapses", async () => { + let now = 1000; + await using limiter = createRateLimiter({ + limit: 2, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a"); + await limiter.limit("a"); + assertFalse((await limiter.limit("a")).ok); + + now = 2000; + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 1); +}); + +Deno.test("fixed-window: variable cost consumes multiple permits", async () => { + const now = 1000; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const r = await limiter.limit("a", { cost: 7 }); + assert(r.ok); + assertEquals(r.remaining, 3); + + assertFalse((await limiter.limit("a", { cost: 4 })).ok); + assert((await limiter.limit("a", { cost: 3 })).ok); +}); + +// === Sliding window === + +Deno.test("sliding-window: permits freed incrementally as segments rotate", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 4, + window: 400, + algorithm: "sliding-window", + segmentsPerWindow: 4, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 4 }); + assertFalse((await limiter.limit("a")).ok); + + now = 100; + assertFalse((await limiter.limit("a")).ok); + now = 200; + assertFalse((await limiter.limit("a")).ok); + now = 300; + assertFalse((await limiter.limit("a")).ok); + + now = 400; + assert((await limiter.limit("a", { cost: 4 })).ok); +}); + +Deno.test("sliding-window: no boundary burst", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 10 }); + + now = 500; + assertFalse((await limiter.limit("a")).ok); + + now = 1000; + assert((await limiter.limit("a", { cost: 10 })).ok); +}); + +Deno.test("sliding-window: retryAfter reflects next segment rotation", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 1, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 4, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a"); + const r = await limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.retryAfter, 250); +}); + +// === Token bucket === + +Deno.test("token-bucket: starts at full capacity", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "token-bucket", + evictionTtl: 0, + clock: () => now, + }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +Deno.test("token-bucket: tokens refill lazily on access", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 1, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 3 }); + assertFalse((await limiter.limit("a")).ok); + + now = 1000; + assert((await limiter.limit("a")).ok); + assertFalse((await limiter.limit("a")).ok); + + now = 3000; + assert((await limiter.limit("a", { cost: 2 })).ok); +}); + +Deno.test("token-bucket: refill capped at limit", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 3, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a"); + now = 10000; + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("token-bucket: retryAfter reflects time until enough tokens", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 500, + algorithm: "token-bucket", + tokensPerPeriod: 2, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 10 }); + const r = await limiter.limit("a", { cost: 3 }); + assertFalse(r.ok); + assertEquals(r.retryAfter, 1000); +}); + +Deno.test("token-bucket: remaining is integer even with partial-cycle elapsed time", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 300, + algorithm: "token-bucket", + tokensPerPeriod: 3, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 10 }); + + now = 500; + const r = await limiter.limit("a"); + assert(r.ok); + assert( + Number.isInteger(r.remaining), + `remaining (${r.remaining}) should be integer`, + ); + assertEquals(r.remaining, 2); +}); + +Deno.test("token-bucket: exact token boundary with multi-cycle refill", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 7, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 3, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 7 }); + assertFalse((await limiter.limit("a")).ok); + + now = 1000; + assert((await limiter.limit("a", { cost: 3 })).ok); + assertFalse((await limiter.limit("a")).ok); + + now = 2000; + assert((await limiter.limit("a", { cost: 3 })).ok); + assertFalse((await limiter.limit("a")).ok); +}); + +// === GCRA === + +Deno.test("gcra: first request always allowed", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.limit, 10); +}); + +Deno.test("gcra: requests spaced >= emission_interval apart always allowed", async () => { + let now = 0; + const emissionInterval = 100; // window(1000) / limit(10) + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 20; i++) { + const r = await limiter.limit("a"); + assert(r.ok, `request ${i} at now=${now} should be allowed`); + now += emissionInterval; + } +}); + +Deno.test("gcra: burst up to limit requests when idle", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 5; i++) { + assert( + (await limiter.limit("a")).ok, + `burst request ${i} should be allowed`, + ); + } + assertFalse((await limiter.limit("a")).ok); +}); + +Deno.test("gcra: after burst, requests denied until tat drains", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 5; i++) await limiter.limit("a"); + assertFalse((await limiter.limit("a")).ok); + + // emission_interval = 200ms. After 200ms, one slot should free. + now = 200; + assert((await limiter.limit("a")).ok); + assertFalse((await limiter.limit("a")).ok); +}); + +Deno.test("gcra: retryAfter is exact", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + for (let i = 0; i < 5; i++) await limiter.limit("a"); + const r = await limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.retryAfter, 200); +}); + +Deno.test("gcra: variable cost advances tat by emission_interval * cost", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + // emission_interval = 100ms. cost=5 advances tat by 500ms. + const r = await limiter.limit("a", { cost: 5 }); + assert(r.ok); + assertEquals(r.remaining, 5); + + // 5 more slots remain + assert((await limiter.limit("a", { cost: 5 })).ok); + assertFalse((await limiter.limit("a")).ok); +}); + +Deno.test("gcra: remaining derived correctly", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + const r1 = await limiter.limit("a"); + assert(r1.ok); + assertEquals(r1.remaining, 9); + + const r2 = await limiter.limit("a", { cost: 4 }); + assert(r2.ok); + assertEquals(r2.remaining, 5); +}); + +Deno.test("gcra: remaining never exceeds limit after long idle", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a"); + now += 100_000; + + const peek = await limiter.peek("a"); + assert(peek.ok); + assert( + peek.remaining <= 10, + `remaining (${peek.remaining}) should not exceed limit (10)`, + ); + assertEquals(peek.remaining, 10); + + const result = await limiter.limit("a"); + assert(result.ok); + assert( + result.remaining <= 10, + `remaining (${result.remaining}) should not exceed limit (10)`, + ); +}); + +Deno.test("gcra: cost exceeding remaining burst is denied", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 3 }); + const r = await limiter.limit("a", { cost: 4 }); + assertFalse(r.ok); + assert(r.retryAfter > 0); +}); + +Deno.test("gcra: state is a single timestamp per key (minimal memory)", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 100, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + for (let i = 0; i < 1000; i++) { + await limiter.limit(`key-${i}`); + } + assertEquals(store.size, 1000); +}); + +// === peek() === + +Deno.test("peek() returns current state without consuming permits", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a"); + await limiter.limit("a"); + + const p = await limiter.peek("a"); + assert(p.ok); + assertEquals(p.remaining, 3); + + // peek didn't consume — still 3 remaining + assertEquals((await limiter.peek("a")).remaining, 3); +}); + +Deno.test("peek() returns full capacity for unknown key", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + const p = await limiter.peek("unknown"); + assert(p.ok); + assertEquals(p.remaining, 10); + assertEquals(p.limit, 10); +}); + +Deno.test("peek() reflects consumed permits after limit()", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "token-bucket", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 3 }); + const p = await limiter.peek("a"); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +// === reset() === + +Deno.test("reset() restores key to full capacity", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 3, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 3 }); + assertFalse((await limiter.limit("a")).ok); + + await limiter.reset("a"); + assert((await limiter.limit("a")).ok); +}); + +Deno.test("reset() on unknown key is a no-op", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.reset("nonexistent"); // should not throw +}); + +// === size (via MemoryStore) === + +Deno.test("MemoryStore.size tracks number of keys", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + assertEquals(store.size, 0); + await limiter.limit("a"); + assertEquals(store.size, 1); + await limiter.limit("b"); + assertEquals(store.size, 2); + await limiter.limit("a"); // same key + assertEquals(store.size, 2); + await limiter.reset("a"); + assertEquals(store.size, 1); +}); + +// === Eviction === + +Deno.test("keys are evicted after evictionTtl of inactivity", async () => { + using time = new FakeTime(); + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 5000, + evictionInterval: 1000, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + assertEquals(store.size, 2); + + time.tick(6000); + assertEquals(store.size, 0); +}); + +Deno.test("active keys are not evicted", async () => { + using time = new FakeTime(); + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 5000, + evictionInterval: 1000, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + + time.tick(4000); + await limiter.limit("a"); // refresh "a" + + time.tick(2000); // 6s total — "b" should be evicted, "a" should survive + assertEquals(store.size, 1); + assert((await limiter.peek("a")).ok); +}); + +Deno.test("peek() does not refresh activity for TTL eviction", async () => { + using time = new FakeTime(); + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 5000, + evictionInterval: 1000, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + assertEquals(store.size, 1); + + time.tick(4000); + await limiter.peek("a"); // should NOT refresh last-access + + time.tick(2000); // 6s total — "a" should be evicted despite the peek + assertEquals(store.size, 0); +}); + +Deno.test("evictionTtl: 0 disables eviction", async () => { + using time = new FakeTime(); + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + time.tick(1_000_000); + assertEquals(store.size, 1); +}); + +// === Disposal === + +Deno.test("dispose clears all state", async () => { + using _time = new FakeTime(); + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + const limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + assertEquals(store.size, 2); + + await limiter[Symbol.asyncDispose](); + assertEquals(store.size, 0); +}); + +Deno.test("limit() returns ok: false after disposal", async () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await limiter[Symbol.asyncDispose](); + const r = await limiter.limit("a"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assertEquals(r.resetAt, 0); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("peek() returns ok: false after disposal", async () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await limiter[Symbol.asyncDispose](); + const r = await limiter.peek("a"); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assertEquals(r.resetAt, 0); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("reset() is a no-op after disposal", async () => { + using _time = new FakeTime(); + const limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await limiter[Symbol.asyncDispose](); + await limiter.reset("a"); // should not throw +}); + +// === Metadata correctness === + +Deno.test("result.limit matches configured value", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 42, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + assertEquals((await limiter.limit("a")).limit, 42); + assertEquals((await limiter.peek("a")).limit, 42); +}); + +Deno.test("retryAfter is 0 when allowed, positive when denied", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 1, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const allowed = await limiter.limit("a"); + assertEquals(allowed.retryAfter, 0); + + const denied = await limiter.limit("a"); + assert(denied.retryAfter > 0); +}); + +Deno.test("resetAt is a future timestamp", async () => { + const now = 5000; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const r = await limiter.limit("a"); + assert(r.resetAt > now); +}); + +Deno.test("gcra: retryAfter when now < allowAt (request arrives too early)", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + // Fill all 5 slots: tat advances to 1000 + for (let i = 0; i < 5; i++) await limiter.limit("a"); + + // Advance only 100ms — tat is 1000, allowAt = tat - tau = 0. + // A request at now=100 is after allowAt, so this exercises the else branch. + now = 100; + const r1 = await limiter.limit("a"); + assertFalse(r1.ok); + assert(r1.retryAfter > 0); + + // Now set now to -100 (simulating clock skew) — now < allowAt exercises + // the `now < allowAt` branch in result(). + now = -100; + const r2 = await limiter.peek("a"); + assertFalse(r2.ok); + assert(r2.retryAfter > 0); +}); + +// === Per-key isolation === + +Deno.test("keys are isolated from each other", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 2, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 2 }); + assertFalse((await limiter.limit("a")).ok); + + assert((await limiter.limit("b")).ok); + assert((await limiter.limit("b")).ok); +}); + +// === Default algorithm is sliding-window === + +Deno.test("default algorithm is sliding-window", async () => { + let now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 10 }); + + // At half-window, a fixed window would have reset. Sliding window hasn't. + now = 500; + assertFalse((await limiter.limit("a")).ok); + + // After full window, sliding window frees permits. + now = 1000; + assert((await limiter.limit("a")).ok); +}); + +// === Default clock uses Date.now (T-1 test) === + +Deno.test("default clock uses Date.now", async () => { + using _time = new FakeTime(0); + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.resetAt, 1000); +}); + +// === peek() with cost (C-2/A-2) === + +Deno.test("peek() with cost checks whether that cost would be allowed", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + await limiter.limit("a", { cost: 3 }); + + assert((await limiter.peek("a", { cost: 2 })).ok); + assertFalse((await limiter.peek("a", { cost: 3 })).ok); +}); + +Deno.test("peek() validates cost", async () => { + using _time = new FakeTime(); + await using limiter = createRateLimiter({ limit: 10, window: 1000 }); + + assertThrows(() => limiter.peek("a", { cost: 0 }), RangeError, "cost"); + assertThrows(() => limiter.peek("a", { cost: -1 }), RangeError, "cost"); + assertThrows(() => limiter.peek("a", { cost: 1.5 }), RangeError, "cost"); + assertThrows(() => limiter.peek("a", { cost: 11 }), RangeError, "exceeds"); +}); + +// === maxKeys (S-1) === + +Deno.test("maxKeys evicts LRU key when a new key arrives at capacity", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + assert((await limiter.limit("a")).ok); + assert((await limiter.limit("b")).ok); + assertEquals(store.size, 2); + + const r = await limiter.limit("c"); + assert(r.ok); + assertEquals(store.size, 2); + assertFalse(store.has("a")); + assert(store.has("b")); + assert(store.has("c")); +}); + +Deno.test("maxKeys allows existing keys even when at capacity", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + + const r = await limiter.limit("a"); + assert(r.ok); +}); + +Deno.test("maxKeys: 0 disables key limit", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 100, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 0, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + for (let i = 0; i < 1000; i++) { + assert((await limiter.limit(`key:${i}`)).ok); + } + assertEquals(store.size, 1000); +}); + +Deno.test("createRateLimiter() throws for invalid maxKeys", () => { + assertThrows( + () => createRateLimiter({ limit: 10, window: 1000, maxKeys: -1 }), + RangeError, + "maxKeys", + ); + assertThrows( + () => createRateLimiter({ limit: 10, window: 1000, maxKeys: 1.5 }), + RangeError, + "maxKeys", + ); +}); + +Deno.test("maxKeys: peek for unknown key at capacity does not evict", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + + const r = await limiter.peek("c"); + assert(r.ok); + assertEquals(r.remaining, 5); + assertEquals(store.size, 2); + assert(store.has("a")); + assert(store.has("b")); +}); + +Deno.test("maxKeys allows peek for existing key at capacity", async () => { + const now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + + const r = await limiter.peek("a"); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +// === maxKeys + window reset (C-1 regression) === + +Deno.test("maxKeys allows existing key whose window has reset", async () => { + let now = 0; + const store = createMemoryStore({ + limit: 3, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + assertEquals(store.size, 2); + + // Advance past the window so "a" resets to full capacity + now = 2000; + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("maxKeys allows GCRA key after full tat drain", async () => { + let now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 2, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + await limiter.limit("a"); + await limiter.limit("b"); + + // Advance well past the window so "a" drains fully + now = 5000; + const r = await limiter.limit("a"); + assert(r.ok); +}); + +// === peek() unknown key with cost > 1 (T-TEST-3) === + +Deno.test("peek() returns ok for unknown key with cost <= limit", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const p = await limiter.peek("unknown", { cost: 5 }); + assert(p.ok); + assertEquals(p.remaining, 10); + assertEquals(p.limit, 10); +}); + +Deno.test("peek() returns not-ok for unknown key with cost > limit", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + clock: () => now, + }); + + assertThrows( + () => limiter.peek("unknown", { cost: 6 }), + RangeError, + "exceeds", + ); +}); + +// === Unknown algorithm (T-TEST-4) === + +Deno.test("createRateLimiter() throws for unknown algorithm", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "unknown" as "fixed-window", + }), + TypeError, + "unknown", + ); +}); + +// === segmentsPerWindow edge cases in createRateLimiter (T-TEST-5) === + +Deno.test("createRateLimiter() throws for segmentsPerWindow: 0", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 0, + }), + RangeError, + "segmentsPerWindow", + ); +}); + +Deno.test("createRateLimiter() throws for non-integer segmentsPerWindow", () => { + assertThrows( + () => + createRateLimiter({ + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2.5, + }), + RangeError, + "segmentsPerWindow", + ); +}); + +// === Store backend integration === + +Deno.test("createRateLimiter() with custom store delegates correctly", async () => { + const store = createMemoryStore({ + limit: 3, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); + assertEquals(r.limit, 3); +}); + +Deno.test("createRateLimiter() reads capacity/window from store", async () => { + const store = createMemoryStore({ + limit: 42, + window: 5000, + algorithm: "gcra", + evictionTtl: 0, + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.limit, 42); +}); + +// === Concurrent limit() calls === + +Deno.test("concurrent limit() calls on the same key respect the limit", async () => { + const now = 0; + await using limiter = createRateLimiter({ + limit: 2, + window: 1000, + algorithm: "fixed-window", + evictionTtl: 0, + clock: () => now, + }); + + const results = await Promise.all([ + limiter.limit("a"), + limiter.limit("a"), + limiter.limit("a"), + ]); + + const allowed = results.filter((r) => r.ok).length; + const denied = results.filter((r) => !r.ok).length; + assertEquals(allowed, 2); + assertEquals(denied, 1); +}); + +// === LRU eviction ordering === + +Deno.test("maxKeys evicts the least-recently-used key", async () => { + let now = 0; + const store = createMemoryStore({ + limit: 5, + window: 1000, + algorithm: "gcra", + evictionTtl: 0, + maxKeys: 3, + clock: () => now, + }); + await using limiter = createRateLimiter({ store }); + + now = 1; + await limiter.limit("a"); + now = 2; + await limiter.limit("b"); + now = 3; + await limiter.limit("c"); + assertEquals(store.size, 3); + + // Touch "a" so it becomes most-recently-used + now = 4; + await limiter.limit("a"); + + // Insert "d" — should evict "b" (least-recently-used), not "a" + now = 5; + await limiter.limit("d"); + assertEquals(store.size, 3); + assertFalse(store.has("b")); + assert(store.has("a")); + assert(store.has("c")); + assert(store.has("d")); +}); diff --git a/rate_limit/redis_store.ts b/rate_limit/redis_store.ts new file mode 100644 index 000000000000..e50e8c35ecd5 --- /dev/null +++ b/rate_limit/redis_store.ts @@ -0,0 +1,269 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +/** + * A Redis-backed {@linkcode RateLimitStore} for distributed rate limiting. + * + * All rate limit state is stored in Redis and manipulated atomically via + * Lua scripts, making this safe for multi-process / multi-server deployments. + * The store uses `redis.call('TIME')` inside Lua for server-side timestamps, + * so clock skew between application servers does not affect correctness. + * + * The store does not own the Redis connection — disposal is a no-op. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Creating a Redis store + * ```ts ignore + * import { createRedisStore } from "@std/rate-limit/redis-store"; + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * const store = createRedisStore({ + * redis: myRedisClient, + * algorithm: "sliding-window", + * limit: 100, + * window: 60_000, + * }); + * + * await using limiter = createRateLimiter({ store }); + * const result = await limiter.limit(ip); + * ``` + * + * @module + */ + +import type { + AlgorithmOptions, + RateLimitResult, + RateLimitStore, +} from "./store_types.ts"; +import { assertPositiveFinite, assertPositiveInteger } from "./_validation.ts"; +import { + type CachedScript, + getScripts, + LUA_DELETE_KEY, + parseResult, + runScript, + sha1Hex, + toEvalConnection, +} from "./_redis_scripts.ts"; + +/** + * Redis connection that exposes `eval` and `evalsha` methods. This is + * the interface used by clients such as `ioredis`, `node-redis`, and + * `@db/redis`. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RedisEvalConnection { + /** + * Execute a Lua script on the Redis server. + * + * @param script The Lua script source. + * @param keys Redis keys the script operates on. + * @param args Additional arguments passed to the script. + * @returns The script's return value. + */ + eval(script: string, keys: string[], args: string[]): Promise; + + /** + * Execute a cached Lua script by its SHA1 hash. + * + * @param sha The SHA1 digest of the script. + * @param keys Redis keys the script operates on. + * @param args Additional arguments passed to the script. + * @returns The script's return value. + */ + evalsha(sha: string, keys: string[], args: string[]): Promise; +} + +/** + * Redis connection that exposes a single `sendCommand` method. This is + * the interface used by `@iuioiua/redis` and other minimal clients. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RedisSendCommandConnection { + /** + * Send a raw Redis command and return the parsed reply. + * + * @param args The command arguments (e.g. `["SET", "key", "value"]`). + * @returns The server's reply. + */ + sendCommand(args: readonly (string | number)[]): Promise; +} + +/** + * A Redis connection accepted by {@linkcode createRedisStore}. + * + * Supports two shapes: + * - `eval`/`evalsha` methods (ioredis, node-redis, `@db/redis`) + * - `sendCommand` (e.g. `@iuioiua/redis`) + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export type RedisConnection = RedisEvalConnection | RedisSendCommandConnection; + +/** + * Options for {@linkcode createRedisStore}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RedisStoreOptions extends AlgorithmOptions { + /** The Redis connection to use. */ + redis: RedisConnection; + /** + * Key prefix for Redis keys. + * + * @default {"rl"} + */ + prefix?: string; +} + +/** + * Create a Redis-backed rate limit store. All state is stored in Redis + * and manipulated atomically via Lua scripts. + * + * The store does not own the Redis connection — `[Symbol.asyncDispose]` + * is a no-op. The caller is responsible for closing the connection. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts ignore + * import { createRedisStore } from "@std/rate-limit/redis-store"; + * import { createRateLimiter } from "@std/rate-limit/rate-limiter"; + * + * const store = createRedisStore({ + * redis: myRedisClient, + * algorithm: "sliding-window", + * limit: 100, + * window: 60_000, + * }); + * + * await using limiter = createRateLimiter({ store }); + * const result = await limiter.limit(ip); + * ``` + * + * @param options Configuration for the Redis store. + * @returns A {@linkcode RateLimitStore}. + */ +export function createRedisStore( + options: RedisStoreOptions, +): RateLimitStore { + const context = "redis store"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + + const { + redis: rawRedis, + algorithm: algorithmName = "sliding-window", + limit, + window: windowMs, + segmentsPerWindow = 10, + tokensPerPeriod = limit, + prefix = "rl", + } = options; + + const redis = toEvalConnection(rawRedis); + + if (algorithmName === "sliding-window") { + if (!Number.isInteger(segmentsPerWindow) || segmentsPerWindow < 2) { + throw new RangeError( + `Cannot create ${context}: 'segmentsPerWindow' must be an integer >= 2, received ${segmentsPerWindow}`, + ); + } + if (windowMs % segmentsPerWindow !== 0) { + throw new RangeError( + `Cannot create ${context}: 'window' (${windowMs}) must be evenly divisible by 'segmentsPerWindow' (${segmentsPerWindow})`, + ); + } + } + + if (algorithmName === "token-bucket") { + assertPositiveInteger(context, "tokensPerPeriod", tokensPerPeriod); + if (tokensPerPeriod > limit) { + throw new RangeError( + `Cannot create ${context}: 'tokensPerPeriod' (${tokensPerPeriod}) exceeds 'limit' (${limit})`, + ); + } + } + + const scripts = getScripts(algorithmName); + + let consumeScript: CachedScript | undefined; + let peekScript: CachedScript | undefined; + let deleteScript: CachedScript | undefined; + let initScripts: Promise | undefined; + + function ensureScripts(): Promise { + if (initScripts) return initScripts; + initScripts = (async () => { + const [consumeSha, peekSha, deleteSha] = await Promise.all([ + sha1Hex(scripts.consume), + sha1Hex(scripts.peek), + sha1Hex(LUA_DELETE_KEY), + ]); + consumeScript = { source: scripts.consume, sha: consumeSha }; + peekScript = { source: scripts.peek, sha: peekSha }; + deleteScript = { source: LUA_DELETE_KEY, sha: deleteSha }; + })(); + return initScripts; + } + + function redisKey(key: string): string { + return `${prefix}:${key}`; + } + + function buildArgs(): string[] { + const args = [String(limit), String(windowMs)]; + if (algorithmName === "sliding-window") { + return [...args, "", String(segmentsPerWindow)]; + } + if (algorithmName === "token-bucket") { + return [...args, "", String(tokensPerPeriod)]; + } + return [...args, ""]; + } + + const baseArgs = buildArgs(); + + return { + get capacity(): number { + return limit; + }, + get window(): number { + return windowMs; + }, + async consume(key: string, cost: number): Promise { + await ensureScripts(); + const args = [...baseArgs]; + args[2] = String(cost); + const raw = await runScript( + redis, + consumeScript!, + [redisKey(key)], + args, + ); + return parseResult(raw, limit); + }, + async peek(key: string, cost: number): Promise { + await ensureScripts(); + const args = [...baseArgs]; + args[2] = String(cost); + const raw = await runScript( + redis, + peekScript!, + [redisKey(key)], + args, + ); + return parseResult(raw, limit); + }, + async reset(key: string): Promise { + await ensureScripts(); + await runScript(redis, deleteScript!, [redisKey(key)], []); + }, + [Symbol.asyncDispose](): Promise { + return Promise.resolve(); + }, + }; +} diff --git a/rate_limit/redis_store_test.ts b/rate_limit/redis_store_test.ts new file mode 100644 index 000000000000..daa7597926f9 --- /dev/null +++ b/rate_limit/redis_store_test.ts @@ -0,0 +1,1143 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { assert, assertEquals, assertFalse, assertThrows } from "@std/assert"; +import { createRedisStore } from "./redis_store.ts"; +import type { + RedisEvalConnection, + RedisSendCommandConnection, +} from "./redis_store.ts"; +import { createRateLimiter } from "./rate_limiter.ts"; + +/** + * In-memory Redis emulator that supports the subset of commands used + * by the rate limit Lua scripts. Rather than parsing Lua, this class + * implements a small Redis command engine and uses a real Lua-like + * execution model by pre-compiling each script into a sequence of + * command calls via text matching. + * + * For testing purposes, the mock always rejects `evalsha` with NOSCRIPT + * to exercise the fallback path, and `eval` runs the script through + * the built-in command interpreter. + */ +class MockRedis implements RedisEvalConnection { + #strings = new Map(); + #hashes = new Map>(); + #expiries = new Map(); + #nowMs: number; + + constructor(nowMs = 0) { + this.#nowMs = nowMs; + } + + get now(): number { + return this.#nowMs; + } + + set now(ms: number) { + this.#nowMs = ms; + } + + tick(ms: number): void { + this.#nowMs += ms; + } + + eval(script: string, keys: string[], args: string[]): Promise { + return Promise.resolve(this.#runLuaScript(script, keys, args)); + } + + evalsha(_sha: string, _keys: string[], _args: string[]): Promise { + return Promise.reject(new Error("NOSCRIPT No matching script")); + } + + #evictExpired(): void { + for (const [key, expiresAt] of this.#expiries) { + if (this.#nowMs >= expiresAt) { + this.#strings.delete(key); + this.#hashes.delete(key); + this.#expiries.delete(key); + } + } + } + + #pexpire(key: string, ms: number): void { + if ( + this.#strings.has(key) || this.#hashes.has(key) + ) { + this.#expiries.set(key, this.#nowMs + ms); + } + } + + #del(key: string): void { + this.#strings.delete(key); + this.#hashes.delete(key); + this.#expiries.delete(key); + } + + #runLuaScript( + script: string, + keys: string[], + args: string[], + ): unknown { + this.#evictExpired(); + + const now = this.#nowMs; + + if (script.includes("redis.call('DEL'")) { + this.#del(keys[0]!); + return 1; + } + + const key = keys[0]!; + const limit = Number(args[0]); + const window = Number(args[1]); + const cost = Number(args[2]); + + const isPeek = script.includes("-- peek-mode"); + + if (script.includes("HGETALL")) { + const segments = Number(args[3]); + return this.#slidingWindow( + key, + limit, + window, + cost, + segments, + now, + isPeek, + ); + } + + if (script.includes("windowStart")) { + return this.#fixedWindow(key, limit, window, cost, now, isPeek); + } + + if (script.includes("lastRefill")) { + const tokensPerPeriod = Number(args[3]); + return this.#tokenBucket( + key, + limit, + window, + cost, + tokensPerPeriod, + now, + isPeek, + ); + } + + if (script.includes("emissionInterval")) { + return this.#gcra(key, limit, window, cost, now, isPeek); + } + + throw new Error("Unrecognized Lua script"); + } + + #fixedWindow( + key: string, + limit: number, + window: number, + cost: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + let hash = this.#hashes.get(key); + let count = hash ? Number(hash.get("count") ?? 0) : 0; + let windowStart = hash ? Number(hash.get("windowStart") ?? now) : now; + + if (now - windowStart >= window) { + count = 0; + windowStart = windowStart + + Math.floor((now - windowStart) / window) * window; + } + + const resetAt = windowStart + window; + let ok = 0; + + if (count + cost <= limit) { + ok = 1; + if (!peek) { + count += cost; + } + } + + if (!peek) { + if (!hash) { + hash = new Map(); + this.#hashes.set(key, hash); + } + hash.set("count", String(count)); + hash.set("windowStart", String(windowStart)); + this.#pexpire(key, Math.ceil(resetAt - now)); + } + + const remaining = Math.max(0, limit - count); + const retryAfter = ok === 0 ? resetAt - now : 0; + + return [ok, remaining, String(resetAt), String(retryAfter), limit]; + } + + #slidingWindow( + key: string, + limit: number, + window: number, + cost: number, + _segments: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + const segDur = window / _segments; + const segStart = now - (now % segDur); + const cutoff = now - window; + + let hash = this.#hashes.get(key); + if (!hash) { + hash = new Map(); + this.#hashes.set(key, hash); + } + + // Remove segments at or before the cutoff (matches Lua `seg <= cutoff`) + for (const [field] of hash) { + if (Number(field) <= cutoff) { + hash.delete(field); + } + } + + let total = 0; + for (const [, val] of hash) { + total += Number(val); + } + + const resetAt = segStart + segDur; + let ok = 0; + + if (total + cost <= limit) { + ok = 1; + if (!peek) { + const segKey = String(segStart); + hash.set(segKey, String((Number(hash.get(segKey) ?? "0")) + cost)); + total += cost; + this.#pexpire(key, window + segDur); + } + } + + const remaining = Math.max(0, limit - total); + const retryAfter = ok === 0 ? resetAt - now : 0; + + return [ok, remaining, String(resetAt), String(retryAfter), limit]; + } + + #tokenBucket( + key: string, + limit: number, + window: number, + cost: number, + tokensPerPeriod: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + const hash = this.#hashes.get(key); + let tokens: number; + let lastRefill: number; + + if (!hash || !hash.has("tokens")) { + tokens = limit; + lastRefill = now; + } else { + tokens = Number(hash.get("tokens")); + lastRefill = Number(hash.get("lastRefill")); + + const elapsed = now - lastRefill; + if (elapsed >= window) { + const cycles = Math.floor(elapsed / window); + tokens = Math.min(limit, tokens + cycles * tokensPerPeriod); + lastRefill = lastRefill + cycles * window; + } + } + + let ok = 0; + if (tokens >= cost) { + ok = 1; + if (!peek) { + tokens -= cost; + } + } + + if (!peek) { + let h = this.#hashes.get(key); + if (!h) { + h = new Map(); + this.#hashes.set(key, h); + } + h.set("tokens", String(tokens)); + h.set("lastRefill", String(lastRefill)); + const resetAt = lastRefill + window; + this.#pexpire(key, Math.max(1, Math.ceil(resetAt - now) + window)); + } + + const remaining = Math.max(0, Math.floor(tokens)); + const resetAt = lastRefill + window; + let retryAfter = 0; + if (ok === 0) { + const deficit = cost - tokens; + const cycles = Math.ceil(deficit / tokensPerPeriod); + retryAfter = Math.max(0, cycles * window - (now - lastRefill)); + } + + return [ok, remaining, String(resetAt), String(retryAfter), limit]; + } + + #gcra( + key: string, + limit: number, + window: number, + cost: number, + now: number, + peek: boolean, + ): [number, number, string, string, number] { + const emissionInterval = window / limit; + const tau = window; + + const stored = this.#strings.get(key); + const tat = stored !== undefined ? Number(stored) : now; + + const allowAt = tat - tau; + if (now < allowAt) { + const remaining = 0; + const retryAfter = allowAt - now; + return [0, remaining, String(tat), String(retryAfter), limit]; + } + + const newTat = Math.max(tat, now) + emissionInterval * cost; + if (newTat - now > tau) { + const diff = tau - (tat - now); + const remaining = Math.min( + limit, + Math.max(0, Math.floor(diff / emissionInterval)), + ); + const retryAfter = Math.max(0, newTat - tau - now); + return [0, remaining, String(tat), String(retryAfter), limit]; + } + + if (!peek) { + this.#strings.set(key, String(newTat)); + this.#pexpire(key, Math.ceil(newTat - now + tau)); + } + + const tatForRemaining = peek ? tat : newTat; + const diff = tau - (tatForRemaining - now); + const remaining = Math.min( + limit, + Math.max(0, Math.floor(diff / emissionInterval)), + ); + + return [peek ? 1 : 1, remaining, String(peek ? tat : newTat), "0", limit]; + } +} + +// --- Factory validation --- + +Deno.test("createRedisStore() throws for invalid limit", () => { + const redis = new MockRedis(); + assertThrows( + () => createRedisStore({ redis, limit: 0, window: 1000 }), + RangeError, + "limit", + ); + assertThrows( + () => createRedisStore({ redis, limit: -1, window: 1000 }), + RangeError, + "limit", + ); +}); + +Deno.test("createRedisStore() throws for invalid window", () => { + const redis = new MockRedis(); + assertThrows( + () => createRedisStore({ redis, limit: 10, window: 0 }), + RangeError, + "window", + ); +}); + +Deno.test("createRedisStore() throws for unknown algorithm", () => { + const redis = new MockRedis(); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "unknown" as "fixed-window", + }), + TypeError, + "unknown", + ); +}); + +Deno.test("createRedisStore() throws for invalid segmentsPerWindow", () => { + const redis = new MockRedis(); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 1, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 3, + }), + RangeError, + "divisible", + ); +}); + +Deno.test("createRedisStore() throws for invalid tokensPerPeriod", () => { + const redis = new MockRedis(); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 0, + }), + RangeError, + "tokensPerPeriod", + ); + assertThrows( + () => + createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 11, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +// --- Store properties --- + +Deno.test("createRedisStore() exposes capacity and window", () => { + const redis = new MockRedis(); + const store = createRedisStore({ redis, limit: 42, window: 5000 }); + assertEquals(store.capacity, 42); + assertEquals(store.window, 5000); +}); + +// === Fixed Window === + +Deno.test("redis fixed-window: first request allowed", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); + assertEquals(r.limit, 5); + assertEquals(r.retryAfter, 0); +}); + +Deno.test("redis fixed-window: exhausting limit", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "fixed-window", + }); + + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + + const r = await store.consume("a", 1); + assertFalse(r.ok); + assertEquals(r.remaining, 0); + assert(r.retryAfter > 0); +}); + +Deno.test("redis fixed-window: permits restore after window elapses", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 1); + await store.consume("a", 1); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 2000; + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 1); +}); + +Deno.test("redis fixed-window: variable cost", async () => { + const redis = new MockRedis(1000); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await store.consume("a", 7); + assert(r.ok); + assertEquals(r.remaining, 3); + + assertFalse((await store.consume("a", 4)).ok); + assert((await store.consume("a", 3)).ok); +}); + +// === Sliding Window === + +Deno.test("redis sliding-window: permits freed incrementally", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 4, + window: 400, + algorithm: "sliding-window", + segmentsPerWindow: 4, + }); + + await store.consume("a", 4); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 100; + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 400; + assert((await store.consume("a", 4)).ok); +}); + +Deno.test("redis sliding-window: no boundary burst", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + }); + + await store.consume("a", 10); + + redis.now = 500; + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 1000; + assert((await store.consume("a", 10)).ok); +}); + +// === Token Bucket === + +Deno.test("redis token-bucket: starts at full capacity", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "token-bucket", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +Deno.test("redis token-bucket: tokens refill lazily", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 1, + }); + + await store.consume("a", 3); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 1000; + assert((await store.consume("a", 1)).ok); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 3000; + assert((await store.consume("a", 2)).ok); +}); + +Deno.test("redis token-bucket: refill capped at limit", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "token-bucket", + tokensPerPeriod: 3, + }); + + await store.consume("a", 1); + redis.now = 10000; + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("redis token-bucket: retryAfter reflects time until enough tokens", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 500, + algorithm: "token-bucket", + tokensPerPeriod: 2, + }); + + await store.consume("a", 10); + const r = await store.consume("a", 3); + assertFalse(r.ok); + assertEquals(r.retryAfter, 1000); +}); + +// === GCRA === + +Deno.test("redis gcra: first request always allowed", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.limit, 10); +}); + +Deno.test("redis gcra: requests spaced >= emission_interval apart always allowed", async () => { + const redis = new MockRedis(0); + const emissionInterval = 100; + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 20; i++) { + const r = await store.consume("a", 1); + assert(r.ok, `request ${i} at now=${redis.now} should be allowed`); + redis.tick(emissionInterval); + } +}); + +Deno.test("redis gcra: burst up to limit", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) { + assert( + (await store.consume("a", 1)).ok, + `burst request ${i} should be allowed`, + ); + } + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("redis gcra: after burst, requests denied until tat drains", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) await store.consume("a", 1); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 200; + assert((await store.consume("a", 1)).ok); + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("redis gcra: retryAfter is exact", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) await store.consume("a", 1); + const r = await store.consume("a", 1); + assertFalse(r.ok); + assertEquals(r.retryAfter, 200); +}); + +Deno.test("redis gcra: variable cost", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const r = await store.consume("a", 5); + assert(r.ok); + assertEquals(r.remaining, 5); + + assert((await store.consume("a", 5)).ok); + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("redis gcra: remaining derived correctly", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const r1 = await store.consume("a", 1); + assert(r1.ok); + assertEquals(r1.remaining, 9); + + const r2 = await store.consume("a", 4); + assert(r2.ok); + assertEquals(r2.remaining, 5); +}); + +// === peek() === + +Deno.test("redis peek() does not consume permits (fixed-window)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 2); + + const p1 = await store.peek("a", 1); + assert(p1.ok); + assertEquals(p1.remaining, 3); + + const p2 = await store.peek("a", 1); + assertEquals(p2.remaining, 3); +}); + +Deno.test("redis peek() returns full capacity for unknown key (gcra)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 10, + window: 1000, + algorithm: "gcra", + }); + + const p = await store.peek("unknown", 1); + assert(p.ok); + assertEquals(p.remaining, 10); + assertEquals(p.limit, 10); +}); + +Deno.test("redis peek() does not consume permits (token-bucket)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "token-bucket", + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +Deno.test("redis peek() does not consume permits (sliding-window)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 5, + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +Deno.test("redis peek() does not consume permits (gcra)", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); +}); + +// === reset() === + +Deno.test("redis reset() restores key to full capacity", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("a", 3); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +Deno.test("redis reset() on unknown key is a no-op", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + await store.reset("nonexistent"); +}); + +Deno.test("redis reset() works for fixed-window", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +Deno.test("redis reset() works for sliding-window", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +Deno.test("redis reset() works for token-bucket", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "token-bucket", + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + await store.reset("a"); + assert((await store.consume("a", 1)).ok); +}); + +// === Per-key isolation === + +Deno.test("redis keys are isolated", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 2, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("a", 2); + assertFalse((await store.consume("a", 1)).ok); + + assert((await store.consume("b", 1)).ok); + assert((await store.consume("b", 1)).ok); +}); + +// === Key prefix === + +Deno.test("redis store uses configurable prefix", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + prefix: "custom", + }); + + await store.consume("mykey", 1); + assert((await store.peek("mykey", 1)).ok); +}); + +Deno.test("redis store default prefix is 'rl'", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await store.consume("mykey", 1); + assert((await store.peek("mykey", 1)).ok); +}); + +// === Integration with createRateLimiter === + +Deno.test("redis store works with createRateLimiter", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "fixed-window", + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); + assertEquals(r.limit, 3); +}); + +Deno.test("redis store: createRateLimiter reads capacity/window from store", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 42, + window: 5000, + algorithm: "gcra", + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.limit, 42); +}); + +Deno.test("redis store: limiter cost validation with store", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + await using limiter = createRateLimiter({ store }); + + assertThrows(() => limiter.limit("a", { cost: 0 }), RangeError, "cost"); + assertThrows(() => limiter.limit("a", { cost: 6 }), RangeError, "exceeds"); +}); + +// === sendCommand-based connection === + +/** + * Wraps a {@linkcode MockRedis} to expose only `sendCommand`, mimicking + * clients like `@iuioiua/redis` that use a single command method. + */ +class MockSendCommandRedis implements RedisSendCommandConnection { + #inner: MockRedis; + + constructor(inner: MockRedis) { + this.#inner = inner; + } + + get now(): number { + return this.#inner.now; + } + set now(ms: number) { + this.#inner.now = ms; + } + + sendCommand(args: readonly (string | number)[]): Promise { + const strs = args.map(String); + const cmd = strs[0]!.toUpperCase(); + if (cmd === "EVAL") { + const script = strs[1]!; + const numKeys = Number(strs[2]); + const keys = strs.slice(3, 3 + numKeys); + const rest = strs.slice(3 + numKeys); + return this.#inner.eval(script, keys, rest); + } + if (cmd === "EVALSHA") { + const sha = strs[1]!; + const numKeys = Number(strs[2]); + const keys = strs.slice(3, 3 + numKeys); + const rest = strs.slice(3 + numKeys); + return this.#inner.evalsha(sha, keys, rest); + } + return Promise.reject(new Error(`Unsupported command: ${cmd}`)); + } +} + +Deno.test("sendCommand connection: fixed-window works end-to-end", async () => { + const inner = new MockRedis(1000); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "fixed-window", + }); + + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + assert((await store.consume("a", 1)).ok); + assertFalse((await store.consume("a", 1)).ok); + + redis.now = 2000; + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 2); +}); + +Deno.test("sendCommand connection: gcra works end-to-end", async () => { + const inner = new MockRedis(0); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + for (let i = 0; i < 5; i++) { + assert((await store.consume("a", 1)).ok); + } + assertFalse((await store.consume("a", 1)).ok); +}); + +Deno.test("sendCommand connection: peek and reset work", async () => { + const inner = new MockRedis(0); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + await store.consume("a", 3); + const p = await store.peek("a", 1); + assert(p.ok); + assertEquals(p.remaining, 2); + + await store.reset("a"); + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); +}); + +Deno.test("sendCommand connection: works with createRateLimiter", async () => { + const inner = new MockRedis(0); + const redis = new MockSendCommandRedis(inner); + const store = createRedisStore({ + redis, + limit: 3, + window: 1000, + algorithm: "sliding-window", + segmentsPerWindow: 2, + }); + await using limiter = createRateLimiter({ store }); + + const r = await limiter.limit("a"); + assert(r.ok); + assertEquals(r.remaining, 2); + assertEquals(r.limit, 3); +}); + +// === Disposal === + +Deno.test("redis store disposal is a no-op", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "gcra", + }); + + await store[Symbol.asyncDispose](); +}); + +// === EVALSHA fallback === + +Deno.test("redis store falls back from EVALSHA to EVAL on NOSCRIPT", async () => { + const redis = new MockRedis(0); + const store = createRedisStore({ + redis, + limit: 5, + window: 1000, + algorithm: "fixed-window", + }); + + const r = await store.consume("a", 1); + assert(r.ok); + assertEquals(r.remaining, 4); +}); diff --git a/rate_limit/sliding_window.ts b/rate_limit/sliding_window.ts new file mode 100644 index 000000000000..ad09ba780d19 --- /dev/null +++ b/rate_limit/sliding_window.ts @@ -0,0 +1,139 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; +import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; +import { createSlidingWindowOps } from "./_algorithms.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; + +/** + * Options for {@linkcode createSlidingWindow}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface SlidingWindowOptions extends QueueOptions { + /** Maximum permits across the sliding window. */ + limit: number; + /** Total window duration in milliseconds. */ + window: number; + /** + * Number of segments within the window. Higher values give smoother rate + * enforcement at the cost of more frequent timer ticks. Must be at least 2 + * (1 segment degenerates to a fixed window). + */ + segmentsPerWindow: number; + /** + * Start an internal timer for automatic segment rotation. + * + * When `false`, call {@linkcode ReplenishingRateLimiter.replenish} + * manually. + * + * @default {true} + */ + autoReplenishment?: boolean; + /** + * Clock function returning the current time in milliseconds. Override + * for deterministic testing. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Create a sliding window rate limiter. The window is divided into segments + * that rotate individually, giving smoother rate enforcement than a fixed + * window. Unlike a fixed window, a burst at the boundary cannot exceed the + * permit limit. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts + * import { createSlidingWindow } from "@std/rate-limit/sliding-window"; + * import { assert } from "@std/assert"; + * + * using limiter = createSlidingWindow({ + * limit: 100, + * window: 60_000, + * segmentsPerWindow: 6, + * }); + * + * using lease = limiter.tryAcquire(); + * assert(lease.acquired); + * ``` + * + * @example Manual replenishment + * ```ts no-assert + * import { createSlidingWindow } from "@std/rate-limit/sliding-window"; + * + * using limiter = createSlidingWindow({ + * limit: 100, + * window: 60_000, + * segmentsPerWindow: 6, + * autoReplenishment: false, + * }); + * + * limiter.replenish(); + * ``` + * + * @param options Configuration for the sliding window. + * @returns A {@linkcode ReplenishingRateLimiter}. + */ +export function createSlidingWindow( + options: SlidingWindowOptions, +): ReplenishingRateLimiter { + const context = "sliding window"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveFinite(context, "window", options.window); + if ( + !Number.isInteger(options.segmentsPerWindow) || + options.segmentsPerWindow < 2 + ) { + throw new RangeError( + `Cannot create sliding window: 'segmentsPerWindow' must be an integer >= 2, received ${options.segmentsPerWindow}`, + ); + } + if (options.window % options.segmentsPerWindow !== 0) { + throw new RangeError( + `Cannot create sliding window: 'window' (${options.window}) must be evenly divisible by 'segmentsPerWindow' (${options.segmentsPerWindow})`, + ); + } + assertNonNegativeInteger(context, "queueLimit", options.queueLimit); + + const { limit, segmentsPerWindow, window } = options; + const clock = options.clock ?? Date.now; + const segmentDuration = window / segmentsPerWindow; + const ops = createSlidingWindowOps(limit, window, segmentsPerWindow); + const state = ops.create(clock()); + let lastNow = 0; + + return createReplenishingLimiter( + { + replenishmentPeriod: segmentDuration, + autoReplenishment: options.autoReplenishment ?? true, + queueLimit: options.queueLimit ?? 0, + queueOrder: options.queueOrder ?? "oldest-first", + }, + { + get permitLimit() { + return ops.limit; + }, + tryAcquirePermits(permits: number): boolean { + lastNow = clock(); + ops.advance(state, lastNow); + return ops.tryConsume(state, permits, lastNow); + }, + replenish(): void { + lastNow = state.segmentStart + segmentDuration; + ops.replenish(state); + }, + computeRetryAfter(permits: number): number { + return ops.computeRetryAfter(state, permits, lastNow); + }, + }, + ); +} diff --git a/rate_limit/sliding_window_test.ts b/rate_limit/sliding_window_test.ts new file mode 100644 index 000000000000..e0e7f1a525c0 --- /dev/null +++ b/rate_limit/sliding_window_test.ts @@ -0,0 +1,689 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { + assert, + assertEquals, + assertFalse, + assertRejects, + assertThrows, +} from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createSlidingWindow } from "./sliding_window.ts"; + +// --- Factory validation --- + +Deno.test("createSlidingWindow() throws for invalid limit", () => { + assertThrows( + () => + createSlidingWindow({ + limit: 0, + window: 1000, + segmentsPerWindow: 2, + }), + RangeError, + "limit", + ); + assertThrows( + () => + createSlidingWindow({ + limit: -1, + window: 1000, + segmentsPerWindow: 2, + }), + RangeError, + "limit", + ); + assertThrows( + () => + createSlidingWindow({ + limit: 1.5, + window: 1000, + segmentsPerWindow: 2, + }), + RangeError, + "limit", + ); +}); + +Deno.test("createSlidingWindow() throws for invalid window", () => { + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: 0, + segmentsPerWindow: 2, + }), + RangeError, + "window", + ); + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: -100, + segmentsPerWindow: 2, + }), + RangeError, + "window", + ); +}); + +Deno.test("createSlidingWindow() throws for invalid segmentsPerWindow", () => { + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: 1000, + segmentsPerWindow: 1, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: 1000, + segmentsPerWindow: 0, + }), + RangeError, + "segmentsPerWindow", + ); + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: 1000, + segmentsPerWindow: 1.5, + }), + RangeError, + "segmentsPerWindow", + ); +}); + +Deno.test("createSlidingWindow() throws when window is not divisible by segmentsPerWindow", () => { + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: 1000, + segmentsPerWindow: 3, + }), + RangeError, + "divisible", + ); +}); + +Deno.test("createSlidingWindow() throws for invalid queueLimit", () => { + assertThrows( + () => + createSlidingWindow({ + limit: 10, + window: 1000, + segmentsPerWindow: 2, + queueLimit: -1, + }), + RangeError, + "queueLimit", + ); +}); + +// --- tryAcquire --- + +Deno.test("tryAcquire() succeeds within the permit limit", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 3, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("tryAcquire() acquires multiple permits at once", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire(3).acquired); + assert(limiter.tryAcquire(2).acquired); +}); + +Deno.test("tryAcquire() rejects with retryAfter equal to segment duration", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 4, + }); + void time; + + limiter.tryAcquire(); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 250); +}); + +Deno.test("tryAcquire() throws for invalid permits", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assertThrows(() => limiter.tryAcquire(0), RangeError); + assertThrows(() => limiter.tryAcquire(-1), RangeError); + assertThrows(() => limiter.tryAcquire(1.5), RangeError); +}); + +Deno.test("tryAcquire() throws when permits exceed limit", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assertThrows(() => limiter.tryAcquire(6), RangeError, "exceeds"); +}); + +// --- Sliding behavior --- + +Deno.test("permits consumed in segment 0 free after N segment rotations", () => { + using time = new FakeTime(0); + // 4 segments, each 250ms. Full window = 1000ms. + using limiter = createSlidingWindow({ + limit: 4, + window: 1000, + segmentsPerWindow: 4, + }); + + // Fill all permits in segment 0 + limiter.tryAcquire(4); + assertFalse(limiter.tryAcquire().acquired); + + // After 1 segment rotation (250ms), segment 0 is still in the window + time.tick(250); + assertFalse(limiter.tryAcquire().acquired); + + // After 2 rotations (500ms), segment 0 still in window + time.tick(250); + assertFalse(limiter.tryAcquire().acquired); + + // After 3 rotations (750ms), segment 0 still in window + time.tick(250); + assertFalse(limiter.tryAcquire().acquired); + + // After 4 rotations (1000ms), segment 0 is evicted — permits freed + time.tick(250); + assert(limiter.tryAcquire(4).acquired); +}); + +Deno.test("sliding window prevents boundary burst that fixed window allows", () => { + using time = new FakeTime(0); + // 2 segments of 500ms each, limit 10. + using limiter = createSlidingWindow({ + limit: 10, + window: 1000, + segmentsPerWindow: 2, + }); + + // Use all 10 permits in segment 0 + limiter.tryAcquire(10); + assertFalse(limiter.tryAcquire().acquired); + + // After one segment rotation (500ms), only segment 0's permits are still + // counted. A fixed window would have reset entirely, allowing 10 more. + // The sliding window only frees what was in the evicted segment — nothing + // yet, because segment 0 hasn't been evicted (it's now the "oldest" of 2). + time.tick(500); + assertFalse(limiter.tryAcquire().acquired); + + // After the second rotation (1000ms total), segment 0 is finally evicted. + time.tick(500); + assert(limiter.tryAcquire(10).acquired); +}); + +Deno.test("permits spread across segments free incrementally", () => { + using time = new FakeTime(0); + // 3 segments of 100ms each, limit 6. + using limiter = createSlidingWindow({ + limit: 6, + window: 300, + segmentsPerWindow: 3, + }); + + // Segment 0: use 2 + limiter.tryAcquire(2); + // Segment 1: use 2 + time.tick(100); + limiter.tryAcquire(2); + // Segment 2: use 2 — now at limit + time.tick(100); + limiter.tryAcquire(2); + assertFalse(limiter.tryAcquire().acquired); + + // Rotate once: evicts segment 0 (2 permits), freeing 2 + time.tick(100); + assert(limiter.tryAcquire(2).acquired); + assertFalse(limiter.tryAcquire().acquired); + + // Rotate again: evicts segment 1 (2 permits), freeing 2 + time.tick(100); + assert(limiter.tryAcquire(2).acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +// --- Manual replenishment --- + +Deno.test("replenish() throws when autoReplenishment is true", () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + assertThrows( + () => limiter.replenish(), + Error, + "Cannot replenish: limiter uses automatic replenishment", + ); +}); + +Deno.test("replenish() rotates a segment when autoReplenishment is false", () => { + using limiter = createSlidingWindow({ + limit: 4, + window: 1000, + segmentsPerWindow: 4, + autoReplenishment: false, + }); + + limiter.tryAcquire(4); + assertFalse(limiter.tryAcquire().acquired); + + // Each replenish() rotates one segment. Need 4 rotations to evict segment 0. + limiter.replenish(); + assertFalse(limiter.tryAcquire().acquired); + limiter.replenish(); + assertFalse(limiter.tryAcquire().acquired); + limiter.replenish(); + assertFalse(limiter.tryAcquire().acquired); + limiter.replenish(); + assert(limiter.tryAcquire(4).acquired); +}); + +// --- acquire (async) --- + +Deno.test("acquire() resolves immediately when permits available", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + const lease = await limiter.acquire(); + assert(lease.acquired); +}); + +Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 0, + }); + void time; + + limiter.tryAcquire(); + const lease = await limiter.acquire(); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("acquire() queues and resolves after segment rotation frees capacity", async () => { + using time = new FakeTime(0); + // 2 segments of 500ms, limit 1 + using limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + + limiter.tryAcquire(); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + // First rotation doesn't evict the segment with the permit yet + time.tick(500); + await Promise.resolve(); + assertFalse(resolved); + + // Second rotation evicts it + time.tick(500); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +Deno.test("acquire() rejects when aborted via signal", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + const controller = new AbortController(); + const promise = limiter.acquire(1, { signal: controller.signal }); + controller.abort(); + + await assertRejects(() => promise, DOMException); +}); + +Deno.test("acquire() rejects when signal is already aborted", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); +}); + +// --- Disposal --- + +Deno.test("dispose resolves queued waiters with rejected leases", async () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + const promise = limiter.acquire(); + limiter[Symbol.dispose](); + + const lease = await promise; + assertFalse(lease.acquired); + assertEquals(lease.reason, "Rate limiter has been disposed"); +}); + +Deno.test("tryAcquire() returns rejected lease after disposal", () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + limiter[Symbol.dispose](); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); +}); + +Deno.test("acquire() rejects after disposal", async () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + limiter[Symbol.dispose](); + await assertRejects(() => limiter.acquire(), Error, "disposed"); +}); + +// --- Queue ordering --- + +Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { + using time = new FakeTime(0); + // 2 segments of 500ms, limit 1. + using limiter = createSlidingWindow({ + limit: 1, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 10, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + // 4 segment rotations total: first 2 free the original permit (p1 served), + // next 2 free p1's permit (p2 served). + time.tick(2000); + await p1; + await p2; + + assertEquals(order, [1, 2]); +}); + +Deno.test("newest-first queue resolves newest waiter first", async () => { + using time = new FakeTime(0); + // 4 segments of 250ms, limit 2. Two permits available at start. + using limiter = createSlidingWindow({ + limit: 2, + window: 1000, + segmentsPerWindow: 4, + queueLimit: 10, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(2); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + // 4 rotations evicts segment 0 (2 permits). newest-first serves p2 first. + time.tick(1000); + await p2; + await p1; + + assertEquals(order, [2, 1]); +}); + +// --- Eviction --- + +Deno.test("newest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + // 4 segments of 250ms, limit 3, queue holds 2 + using limiter = createSlidingWindow({ + limit: 3, + window: 1000, + segmentsPerWindow: 4, + queueLimit: 2, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(3); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + const p3 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p3:acquired" : `p3:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + // 4 rotations evicts segment 0 (3 permits freed). newest-first: p3 then p2. + time.tick(1000); + await p3; + await p2; + + assertEquals(results, [ + "p1:Evicted by newer request", + "p3:acquired", + "p2:acquired", + ]); +}); + +// --- acquire() validation --- + +Deno.test("acquire() rejects for invalid permits", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + await assertRejects(() => limiter.acquire(0), RangeError); + await assertRejects(() => limiter.acquire(-1), RangeError); + await assertRejects(() => limiter.acquire(1.5), RangeError); +}); + +Deno.test("acquire() rejects when permits exceed limit", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); +}); + +// --- Multiple waiters resolved in single replenishment --- + +Deno.test("single replenishment resolves multiple queued waiters", async () => { + using time = new FakeTime(0); + // 2 segments of 500ms, limit 3. + using limiter = createSlidingWindow({ + limit: 3, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 10, + }); + + limiter.tryAcquire(3); + + const order: number[] = []; + const p1 = limiter.acquire(1).then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + order.push(2); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + order.push(3); + return l; + }); + + await Promise.resolve(); + assertEquals(order, []); + + // 2 rotations evicts segment 0 (3 permits freed), all 3 waiters drain at once + time.tick(1000); + await Promise.all([p1, p2, p3]); + + assertEquals(order, [1, 2, 3]); + for (const p of [p1, p2, p3]) { + assert((await p).acquired); + } +}); + +// --- Queue edge cases --- + +Deno.test("acquire() rejects when permits exceed queueLimit even if queue is empty", async () => { + using time = new FakeTime(0); + using limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + queueLimit: 2, + }); + void time; + + limiter.tryAcquire(5); + + const lease = await limiter.acquire(3); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +// --- Double dispose --- + +Deno.test("double dispose is a no-op", () => { + using time = new FakeTime(0); + const limiter = createSlidingWindow({ + limit: 5, + window: 1000, + segmentsPerWindow: 2, + }); + void time; + + limiter[Symbol.dispose](); + limiter[Symbol.dispose](); +}); diff --git a/rate_limit/store_types.ts b/rate_limit/store_types.ts new file mode 100644 index 000000000000..47423438c266 --- /dev/null +++ b/rate_limit/store_types.ts @@ -0,0 +1,110 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +/** + * The result of a rate limit check. All fields are present regardless of + * whether the request was allowed. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RateLimitResult { + /** Whether the request is allowed. */ + readonly ok: boolean; + /** Best-effort estimate of remaining permits for this key. */ + readonly remaining: number; + /** + * Timestamp (milliseconds since epoch) of the next replenishment event + * (segment rotation, window boundary, or refill cycle). This is *not* + * necessarily when full capacity is restored — for sliding-window and + * token-bucket it may take multiple replenishment cycles. For GCRA this + * is the theoretical arrival time (TAT) at which full burst capacity is + * restored. Useful for the `X-RateLimit-Reset` HTTP header. + */ + readonly resetAt: number; + /** + * Minimum retry delay in milliseconds. `0` when the request is allowed. + * This is the earliest point at which capacity *may* free up. For + * sliding-window, this reflects the next segment rotation and may not + * free enough permits for a high-cost request. For token-bucket and GCRA + * the value accounts for the requested cost. Useful for the + * `Retry-After` HTTP header. + */ + readonly retryAfter: number; + /** The limit configured for this limiter. */ + readonly limit: number; +} + +/** + * Algorithm configuration shared by all store backends. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface AlgorithmOptions { + /** Maximum permits per key per window/cycle. */ + limit: number; + /** Window duration in milliseconds. */ + window: number; + /** + * Algorithm to use. + * + * @default {"sliding-window"} + */ + algorithm?: "fixed-window" | "sliding-window" | "token-bucket" | "gcra"; + /** + * Number of segments for the sliding window algorithm. + * + * @default {10} + */ + segmentsPerWindow?: number; + /** + * For token bucket: tokens added per replenishment period. + * + * @default {limit} + */ + tokensPerPeriod?: number; +} + +/** + * A pluggable backend for keyed rate limiting. Stores own the per-key + * algorithm state and are self-contained: they carry `capacity` and `window` + * so `createRateLimiter` reads configuration from the store rather than + * duplicating it. + * + * Each store owns its own time source. In-memory stores default to + * `Date.now` (overridable via `clock` for `FakeTime` testing); distributed + * stores (e.g. Redis) use server-side time. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RateLimitStore extends AsyncDisposable { + /** The configured permit limit per key per window. */ + readonly capacity: number; + /** The window duration in milliseconds. */ + readonly window: number; + + /** + * Check and consume permits for a key. + * + * @param key Identifier for the rate limit subject. + * @param cost Number of permits to consume. + * @returns The rate limit decision and metadata. + */ + consume(key: string, cost: number): Promise; + + /** + * Check the current state for a key without consuming any permits. + * + * @param key Identifier for the rate limit subject. + * @param cost Number of permits to check. + * @returns The rate limit decision and metadata. + */ + peek(key: string, cost: number): Promise; + + /** + * Reset all state for a key, restoring it to full capacity. + * + * @param key Identifier for the rate limit subject. + * @returns Resolves when the key has been reset. + */ + reset(key: string): Promise; +} diff --git a/rate_limit/token_bucket.ts b/rate_limit/token_bucket.ts new file mode 100644 index 000000000000..9470f7e8f9ff --- /dev/null +++ b/rate_limit/token_bucket.ts @@ -0,0 +1,133 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import type { QueueOptions, ReplenishingRateLimiter } from "./types.ts"; +import { createReplenishingLimiter } from "./_replenishing_limiter.ts"; +import { createTokenBucketOps } from "./_algorithms.ts"; +import { + assertNonNegativeInteger, + assertPositiveFinite, + assertPositiveInteger, +} from "./_validation.ts"; + +/** + * Options for {@linkcode createTokenBucket}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface TokenBucketOptions extends QueueOptions { + /** Maximum tokens the bucket can hold. */ + limit: number; + /** Tokens added each replenishment period. */ + tokensPerPeriod: number; + /** Replenishment interval in milliseconds. */ + replenishmentPeriod: number; + /** + * Start an internal timer for automatic replenishment. + * + * When `false`, call {@linkcode ReplenishingRateLimiter.replenish} + * manually. + * + * @default {true} + */ + autoReplenishment?: boolean; + /** + * Clock function returning the current time in milliseconds. Override + * for deterministic testing. + * + * @default {Date.now} + */ + clock?: () => number; +} + +/** + * Create a token bucket rate limiter. Tokens are added periodically, making + * this strategy ideal for smoothing bursty traffic. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Basic usage + * ```ts + * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * import { assert } from "@std/assert"; + * + * using limiter = createTokenBucket({ + * limit: 10, + * tokensPerPeriod: 1, + * replenishmentPeriod: 1000, + * }); + * + * using lease = limiter.tryAcquire(); + * assert(lease.acquired); + * ``` + * + * @example Manual replenishment + * ```ts no-assert + * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * + * using limiter = createTokenBucket({ + * limit: 10, + * tokensPerPeriod: 5, + * replenishmentPeriod: 1000, + * autoReplenishment: false, + * }); + * + * limiter.replenish(); + * ``` + * + * @param options Configuration for the token bucket. + * @returns A {@linkcode ReplenishingRateLimiter}. + */ +export function createTokenBucket( + options: TokenBucketOptions, +): ReplenishingRateLimiter { + const context = "token bucket"; + assertPositiveInteger(context, "limit", options.limit); + assertPositiveInteger(context, "tokensPerPeriod", options.tokensPerPeriod); + assertPositiveFinite( + context, + "replenishmentPeriod", + options.replenishmentPeriod, + ); + if (options.tokensPerPeriod > options.limit) { + throw new RangeError( + `Cannot create token bucket: 'tokensPerPeriod' (${options.tokensPerPeriod}) exceeds 'limit' (${options.limit})`, + ); + } + assertNonNegativeInteger(context, "queueLimit", options.queueLimit); + + const { limit, tokensPerPeriod, replenishmentPeriod } = options; + const clock = options.clock ?? Date.now; + const ops = createTokenBucketOps( + limit, + replenishmentPeriod, + tokensPerPeriod, + ); + const state = ops.create(clock()); + let lastNow = 0; + + return createReplenishingLimiter( + { + replenishmentPeriod, + autoReplenishment: options.autoReplenishment ?? true, + queueLimit: options.queueLimit ?? 0, + queueOrder: options.queueOrder ?? "oldest-first", + }, + { + get permitLimit() { + return ops.limit; + }, + tryAcquirePermits(permits: number): boolean { + lastNow = clock(); + ops.advance(state, lastNow); + return ops.tryConsume(state, permits, lastNow); + }, + replenish(): void { + lastNow = state.lastRefill + replenishmentPeriod; + ops.replenish(state); + }, + computeRetryAfter(permits: number): number { + return ops.computeRetryAfter(state, permits, lastNow); + }, + }, + ); +} diff --git a/rate_limit/token_bucket_test.ts b/rate_limit/token_bucket_test.ts new file mode 100644 index 000000000000..18f58cc0c09f --- /dev/null +++ b/rate_limit/token_bucket_test.ts @@ -0,0 +1,803 @@ +// Copyright 2018-2026 the Deno authors. MIT license. + +import { + assert, + assertEquals, + assertFalse, + assertRejects, + assertThrows, +} from "@std/assert"; +import { FakeTime } from "@std/testing/time"; +import { createTokenBucket } from "./token_bucket.ts"; + +// --- Factory validation --- + +Deno.test("createTokenBucket() throws for invalid limit", () => { + assertThrows( + () => + createTokenBucket({ + limit: 0, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }), + RangeError, + "limit", + ); + assertThrows( + () => + createTokenBucket({ + limit: -1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }), + RangeError, + "limit", + ); + assertThrows( + () => + createTokenBucket({ + limit: 1.5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }), + RangeError, + "limit", + ); +}); + +Deno.test("createTokenBucket() throws for invalid tokensPerPeriod", () => { + assertThrows( + () => + createTokenBucket({ + limit: 10, + tokensPerPeriod: 0, + replenishmentPeriod: 1000, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +Deno.test("createTokenBucket() throws for invalid replenishmentPeriod", () => { + assertThrows( + () => + createTokenBucket({ + limit: 10, + tokensPerPeriod: 1, + replenishmentPeriod: 0, + }), + RangeError, + "replenishmentPeriod", + ); + assertThrows( + () => + createTokenBucket({ + limit: 10, + tokensPerPeriod: 1, + replenishmentPeriod: -100, + }), + RangeError, + "replenishmentPeriod", + ); +}); + +Deno.test("createTokenBucket() throws when tokensPerPeriod exceeds limit", () => { + assertThrows( + () => + createTokenBucket({ + limit: 5, + tokensPerPeriod: 10, + replenishmentPeriod: 1000, + }), + RangeError, + "tokensPerPeriod", + ); +}); + +Deno.test("createTokenBucket() throws for invalid queueLimit", () => { + assertThrows( + () => + createTokenBucket({ + limit: 10, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: -1, + }), + RangeError, + "queueLimit", + ); +}); + +// --- tryAcquire --- + +Deno.test("tryAcquire() succeeds when tokens are available", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const lease = limiter.tryAcquire(); + assert(lease.acquired); +}); + +Deno.test("tryAcquire() acquires multiple permits", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const lease = limiter.tryAcquire(3); + assert(lease.acquired); + + const lease2 = limiter.tryAcquire(3); + assertFalse(lease2.acquired); +}); + +Deno.test("tryAcquire() returns rejected lease when tokens exhausted", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const first = limiter.tryAcquire(); + assert(first.acquired); + + const second = limiter.tryAcquire(); + assertFalse(second.acquired); + assert(second.retryAfter > 0); + assertEquals(second.reason, "Insufficient permits"); +}); + +Deno.test("tryAcquire() throws for invalid permits", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(0), RangeError); + assertThrows(() => limiter.tryAcquire(-1), RangeError); + assertThrows(() => limiter.tryAcquire(1.5), RangeError); +}); + +Deno.test("tryAcquire() throws when permits exceed limit", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + assertThrows(() => limiter.tryAcquire(6), RangeError, "exceeds"); +}); + +// --- Replenishment --- + +Deno.test("tokens replenish after the configured period", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 2, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + + limiter.tryAcquire(); + limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire().acquired); +}); + +Deno.test("tokens do not exceed limit after replenishment", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 2, + tokensPerPeriod: 2, + replenishmentPeriod: 1000, + }); + + time.tick(5000); + + assert(limiter.tryAcquire(2).acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +// --- Manual replenishment --- + +Deno.test("replenish() throws when autoReplenishment is true", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + assertThrows( + () => limiter.replenish(), + Error, + "Cannot replenish: limiter uses automatic replenishment", + ); +}); + +Deno.test("replenish() replenishes when autoReplenishment is false", () => { + const limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 2, + replenishmentPeriod: 1000, + autoReplenishment: false, + }); + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + limiter.replenish(); + assert(limiter.tryAcquire().acquired); + assert(limiter.tryAcquire().acquired); + assertFalse(limiter.tryAcquire().acquired); + + limiter[Symbol.dispose](); +}); + +Deno.test("replenish() drains queued acquire() waiters", async () => { + const limiter = createTokenBucket({ + limit: 2, + tokensPerPeriod: 2, + replenishmentPeriod: 1000, + autoReplenishment: false, + queueLimit: 5, + }); + + limiter.tryAcquire(2); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + limiter.replenish(); + const lease = await promise; + assert(resolved); + assert(lease.acquired); + + limiter[Symbol.dispose](); +}); + +// --- acquire (async) --- + +Deno.test("acquire() resolves immediately when tokens available", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + const lease = await limiter.acquire(); + assert(lease.acquired); +}); + +Deno.test("acquire() returns rejected lease when queue limit is 0", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 0, + }); + void time; + + limiter.tryAcquire(); + const lease = await limiter.acquire(); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("acquire() queues and resolves after replenishment", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + + limiter.tryAcquire(); + + let resolved = false; + const promise = limiter.acquire().then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +Deno.test("acquire() rejects when aborted via signal", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + const controller = new AbortController(); + const promise = limiter.acquire(1, { signal: controller.signal }); + controller.abort(); + + await assertRejects(() => promise, DOMException); +}); + +Deno.test("acquire() rejects when signal is already aborted", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + + await assertRejects( + () => limiter.acquire(1, { signal: AbortSignal.abort() }), + DOMException, + ); +}); + +// --- retryAfter --- + +Deno.test("retryAfter reflects the deficit in tokens", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 10, + tokensPerPeriod: 2, + replenishmentPeriod: 500, + }); + void time; + + for (let i = 0; i < 10; i++) limiter.tryAcquire(); + + const lease = limiter.tryAcquire(3); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 1000); +}); + +// --- Disposal --- + +Deno.test("dispose resolves queued waiters with rejected leases", async () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 5, + }); + void time; + + limiter.tryAcquire(); + const promise = limiter.acquire(); + limiter[Symbol.dispose](); + + const lease = await promise; + assertFalse(lease.acquired); + assertEquals(lease.reason, "Rate limiter has been disposed"); +}); + +Deno.test("tryAcquire() returns rejected lease after disposal", () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + limiter[Symbol.dispose](); + const lease = limiter.tryAcquire(); + assertFalse(lease.acquired); +}); + +Deno.test("acquire() rejects after disposal", async () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + limiter[Symbol.dispose](); + await assertRejects(() => limiter.acquire(), Error, "disposed"); +}); + +// --- Queue ordering --- + +Deno.test("oldest-first queue resolves waiters in FIFO order", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 10, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p1; + time.tick(1000); + await p2; + + assertEquals(order, [1, 2]); +}); + +Deno.test("newest-first queue resolves newest waiter first", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 10, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(); + + const order: number[] = []; + const p1 = limiter.acquire().then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire().then((l) => { + order.push(2); + return l; + }); + + time.tick(1000); + await p2; + time.tick(1000); + await p1; + + assertEquals(order, [2, 1]); +}); + +// --- Multi-permit queued waiters --- + +Deno.test("acquire() queues multi-permit waiter spanning multiple periods", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 3, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(3); + + let resolved = false; + const promise = limiter.acquire(3).then((lease) => { + resolved = true; + return lease; + }); + + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + await Promise.resolve(); + assertFalse(resolved); + + time.tick(1000); + const lease = await promise; + assert(resolved); + assert(lease.acquired); +}); + +// --- Multiple waiters resolved in single replenishment --- + +Deno.test("single replenishment resolves multiple queued waiters", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 5, + replenishmentPeriod: 1000, + queueLimit: 10, + }); + + limiter.tryAcquire(5); + + const order: number[] = []; + const p1 = limiter.acquire(1).then((l) => { + order.push(1); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + order.push(2); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + order.push(3); + return l; + }); + + await Promise.resolve(); + assertEquals(order, []); + + time.tick(1000); + await Promise.all([p1, p2, p3]); + + assertEquals(order, [1, 2, 3]); + for (const p of [p1, p2, p3]) { + assert((await p).acquired); + } +}); + +// --- acquire() validation --- + +Deno.test("acquire() rejects for invalid permits", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + await assertRejects(() => limiter.acquire(0), RangeError); + await assertRejects(() => limiter.acquire(-1), RangeError); + await assertRejects(() => limiter.acquire(1.5), RangeError); +}); + +Deno.test("acquire() rejects when permits exceed limit", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + await assertRejects(() => limiter.acquire(6), RangeError, "exceeds"); +}); + +// --- Queue edge cases --- + +Deno.test("acquire() rejects when permits exceed queueLimit even if queue is empty", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 2, + }); + void time; + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + + const lease = await limiter.acquire(3); + assertFalse(lease.acquired); + assertEquals(lease.reason, "Queue limit exceeded"); +}); + +Deno.test("oldest-first queue evicts oldest waiter when queue is full", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 1, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + queueLimit: 1, + queueOrder: "oldest-first", + }); + + limiter.tryAcquire(); + + const results: string[] = []; + const p1 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire().then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + + await p1; + assertEquals(results, ["p1:Evicted by newer request"]); + + time.tick(1000); + await p2; + + assertEquals(results, ["p1:Evicted by newer request", "p2:acquired"]); +}); + +Deno.test("eviction evicts multiple waiters to make room for a large request", async () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 3, + tokensPerPeriod: 3, + replenishmentPeriod: 1000, + queueLimit: 3, + queueOrder: "newest-first", + }); + + limiter.tryAcquire(3); + + const results: string[] = []; + const p1 = limiter.acquire(1).then((l) => { + results.push(l.acquired ? "p1:acquired" : `p1:${l.reason}`); + return l; + }); + const p2 = limiter.acquire(1).then((l) => { + results.push(l.acquired ? "p2:acquired" : `p2:${l.reason}`); + return l; + }); + const p3 = limiter.acquire(1).then((l) => { + results.push(l.acquired ? "p3:acquired" : `p3:${l.reason}`); + return l; + }); + + await Promise.resolve(); + assertEquals(results, []); + + const p4 = limiter.acquire(3).then((l) => { + results.push(l.acquired ? "p4:acquired" : `p4:${l.reason}`); + return l; + }); + + await Promise.all([p1, p2, p3]); + assertEquals(results, [ + "p1:Evicted by newer request", + "p2:Evicted by newer request", + "p3:Evicted by newer request", + ]); + + time.tick(1000); + const lease = await p4; + assert(lease.acquired); + assertEquals(results, [ + "p1:Evicted by newer request", + "p2:Evicted by newer request", + "p3:Evicted by newer request", + "p4:acquired", + ]); +}); + +// --- retryAfter after manual replenish --- + +Deno.test("retryAfter is correct after manual replenish", () => { + const limiter = createTokenBucket({ + limit: 3, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + autoReplenishment: false, + }); + + for (let i = 0; i < 3; i++) limiter.tryAcquire(); + limiter.replenish(); + limiter.tryAcquire(); + + const lease = limiter.tryAcquire(3); + assertFalse(lease.acquired); + assert(lease.retryAfter > 0); + assert(Number.isFinite(lease.retryAfter)); + + limiter[Symbol.dispose](); +}); + +// --- Floating-point boundary --- + +Deno.test("remaining uses floor when tokens are at integer boundary", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + + for (let i = 0; i < 5; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + const lease = limiter.tryAcquire(); + assert(lease.acquired); +}); + +Deno.test("tryAcquire() denied at exact token boundary after partial refill", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 10, + tokensPerPeriod: 3, + replenishmentPeriod: 1000, + }); + + for (let i = 0; i < 10; i++) limiter.tryAcquire(); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire().acquired); + + time.tick(1000); + assert(limiter.tryAcquire(3).acquired); + assertFalse(limiter.tryAcquire().acquired); +}); + +Deno.test("retryAfter is correct with non-power-of-two tokensPerPeriod", () => { + using time = new FakeTime(0); + using limiter = createTokenBucket({ + limit: 7, + tokensPerPeriod: 3, + replenishmentPeriod: 1000, + }); + void time; + + for (let i = 0; i < 7; i++) limiter.tryAcquire(); + + const lease = limiter.tryAcquire(5); + assertFalse(lease.acquired); + assertEquals(lease.retryAfter, 2000); +}); + +// --- Double dispose --- + +Deno.test("double dispose is a no-op", () => { + using time = new FakeTime(0); + const limiter = createTokenBucket({ + limit: 5, + tokensPerPeriod: 1, + replenishmentPeriod: 1000, + }); + void time; + + limiter[Symbol.dispose](); + limiter[Symbol.dispose](); +}); diff --git a/rate_limit/types.ts b/rate_limit/types.ts new file mode 100644 index 000000000000..796d497c7caf --- /dev/null +++ b/rate_limit/types.ts @@ -0,0 +1,168 @@ +// Copyright 2018-2026 the Deno authors. MIT license. +// This module is browser compatible. + +/** + * A rate limiter that controls how many permits can be acquired over time or + * concurrently. Implementations are disposable — disposing a limiter cancels + * any internal timers and rejects queued waiters. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Synchronous usage + * ```ts + * import type { RateLimiter } from "@std/rate-limit/types"; + * + * function useRateLimiter(limiter: RateLimiter) { + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * return; // rate limited + * } + * // proceed with work + * } + * ``` + * + * @example Async usage with queuing + * ```ts + * import type { RateLimiter } from "@std/rate-limit/types"; + * + * async function useRateLimiter(limiter: RateLimiter) { + * using lease = await limiter.acquire(1, { + * signal: AbortSignal.timeout(5000), + * }); + * // proceed with work + * } + * ``` + * + * @see {@linkcode createRateLimiter} for keyed rate limiting (primary API). + * @see {@linkcode createTokenBucket} for token bucket rate limiting. + * @see {@linkcode createFixedWindow} for fixed window rate limiting. + * @see {@linkcode createSlidingWindow} for sliding window rate limiting. + */ +export interface RateLimiter extends Disposable { + /** + * Try to acquire permits synchronously. Never blocks. + * + * @param permits Number of permits to acquire. Defaults to `1`. + * @returns A {@linkcode RateLimitLease} indicating success or rejection. + */ + tryAcquire(permits?: number): RateLimitLease; + + /** + * Wait for permits. Resolves immediately when permits are available. + * When no permits are available and a queue is configured, the request + * is queued until permits are replenished. + * + * **Disposal behavior:** calling `acquire()` after the limiter has been + * disposed rejects with {@linkcode Error}. Waiters already queued at the + * time of disposal resolve with a {@linkcode RejectedLease} (not a + * rejection) so they can be handled uniformly via the `acquired` field. + * + * Rejects with {@linkcode DOMException} if the signal is aborted. + * + * @param permits Number of permits to acquire. Defaults to `1`. + * @param options Acquire options (e.g. abort signal). + * @returns A {@linkcode RateLimitLease} indicating success or rejection. + */ + acquire( + permits?: number, + options?: AcquireOptions, + ): Promise; +} + +/** + * A {@linkcode RateLimiter} that replenishes permits on a timer. Extends + * `RateLimiter` with a {@linkcode ReplenishingRateLimiter.replenish} + * method for manual replenishment when `autoReplenishment` is `false`. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface ReplenishingRateLimiter extends RateLimiter { + /** + * Manually trigger a replenishment cycle and drain queued waiters. + * + * @throws {Error} If the limiter uses automatic replenishment. + */ + replenish(): void; +} + +/** + * Options for {@linkcode RateLimiter.acquire}. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface AcquireOptions { + /** Signal to abort the wait. */ + signal?: AbortSignal; +} + +/** + * The result of a rate limit acquisition attempt, discriminated on the + * {@linkcode RateLimitLease.acquired | acquired} field. TypeScript narrows + * the type after checking `acquired`, so `retryAfter` and `reason` are only + * present on rejected leases. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + * + * @example Checking a lease + * ```ts + * import { createTokenBucket } from "@std/rate-limit/token-bucket"; + * + * using limiter = createTokenBucket({ + * limit: 10, + * tokensPerPeriod: 1, + * replenishmentPeriod: 1000, + * }); + * + * using lease = limiter.tryAcquire(); + * if (!lease.acquired) { + * console.log(`Retry after ${lease.retryAfter}ms: ${lease.reason}`); + * } + * ``` + */ +export type RateLimitLease = AcquiredLease | RejectedLease; + +/** + * A lease indicating that permits were successfully acquired. For concurrency + * limiters, disposing the lease releases the permit. For time-based limiters, + * dispose is a no-op. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface AcquiredLease extends Disposable { + /** Whether permits were acquired. Always `true` for this type. */ + readonly acquired: true; +} + +/** + * A lease indicating that permits could not be acquired. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface RejectedLease extends Disposable { + /** Whether permits were acquired. Always `false` for this type. */ + readonly acquired: false; + /** + * Suggested retry delay in milliseconds. A value of `0` means retrying + * will not help (e.g. the limiter has been disposed). + */ + readonly retryAfter: number; + /** Human-readable reason for rejection. */ + readonly reason: string; +} + +/** + * Queue configuration shared across all rate limiter algorithms. + * + * @experimental **UNSTABLE**: New API, yet to be vetted. + */ +export interface QueueOptions { + /** Max permits that can be queued waiting. Defaults to `0` (no queueing). */ + queueLimit?: number; + /** + * Queue processing order. Defaults to `"oldest-first"`. + * + * With `"newest-first"`, the most recently queued request is served first. + * This can starve older waiters when demand consistently exceeds supply. + */ + queueOrder?: "oldest-first" | "newest-first"; +}