Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions logging_rate_limit_observer.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import { type RateLimitInfo, rateLimitUsagePct } from "./rate_limit_headers.ts";
import type { RateLimitInfoCallback } from "./rate_limit_fetch.ts";

/**
* Default usage thresholds (80%, 90%, 95%) at which the observer emits a log.
*/
export const DEFAULT_RATE_LIMIT_THRESHOLDS: readonly number[] = [
0.8,
0.9,
0.95,
];

/**
* Configuration for the LoggingRateLimitObserver.
*/
export interface LoggingRateLimitObserverOptions {
/** Usage fractions (0.0 - 1.0) that should produce a log line. */
thresholds?: readonly number[];
/**
* Function used to emit the log line. Defaults to `console.warn`.
* Useful when injecting a structured logger.
*/
log?: (message: string) => void;
}

/**
* Returns a ready-to-use `onRateLimitInfo` callback that logs a warning each
* time rate limit usage crosses one of the configured thresholds.
*
* Example:
*
* ```ts
* import { Client } from "@getlago/lago-javascript-client";
* import { loggingRateLimitObserver } from "@getlago/lago-javascript-client";
*
* const client = Client(apiKey, {
* rateLimitRetry: { onRateLimitInfo: loggingRateLimitObserver() },
* });
* ```
*/
export function loggingRateLimitObserver(
options: LoggingRateLimitObserverOptions = {},
): RateLimitInfoCallback {
const thresholds = [...(options.thresholds ?? DEFAULT_RATE_LIMIT_THRESHOLDS)]
.sort((a, b) => b - a); // descending
// deno-lint-ignore no-console
const log = options.log ?? ((m: string) => console.warn(m));

return function observe(info: RateLimitInfo): void {
const pct = rateLimitUsagePct(info);
if (pct === null) return;

for (const threshold of thresholds) {
if (pct >= threshold) {
log(
`Lago rate limit at ${
(pct * 100).toFixed(0)
}% (limit=${info.limit}, ` +
`remaining=${info.remaining}, reset=${info.reset}s, ` +
`${info.method} ${info.url})`,
);
return;
}
}
};
}
19 changes: 17 additions & 2 deletions mod.ts
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,22 @@ export async function getLagoError<T>(error: any) {

// Rate limit exports
export { LagoRateLimitError } from "./rate_limit_error.ts";
export { parseRateLimitHeaders, type RateLimitHeaders } from "./rate_limit_headers.ts";
export { createRateLimitFetch, type RateLimitFetchConfig } from "./rate_limit_fetch.ts";
export {
parseRateLimitHeaders,
parseRateLimitInfo,
type RateLimitHeaders,
type RateLimitInfo,
rateLimitUsagePct,
} from "./rate_limit_headers.ts";
export {
createRateLimitFetch,
type RateLimitFetchConfig,
type RateLimitInfoCallback,
} from "./rate_limit_fetch.ts";
export {
DEFAULT_RATE_LIMIT_THRESHOLDS,
loggingRateLimitObserver,
type LoggingRateLimitObserverOptions,
} from "./logging_rate_limit_observer.ts";

export * from "./openapi/client.ts";
60 changes: 58 additions & 2 deletions rate_limit_fetch.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,20 @@
import { LagoRateLimitError } from "./rate_limit_error.ts";
import {
parseRateLimitHeaders,
type RateLimitHeaders,
parseRateLimitInfo,
type RateLimitInfo,
} from "./rate_limit_headers.ts";

/**
* Callback invoked after every successful response with parsed rate limit
* headers. Use this to build observability around the rate limit (warn at
* thresholds, emit metrics, etc.).
*
* Errors thrown by the callback are caught and logged so they cannot break
* the underlying request flow.
*/
export type RateLimitInfoCallback = (info: RateLimitInfo) => void;

/**
* Configuration for rate limit fetch behavior
*/
Expand All @@ -14,6 +25,11 @@ export interface RateLimitFetchConfig {
retryOnRateLimit?: boolean;
/** Maximum delay in milliseconds before a retry (default: 20000) */
maxRetryDelay?: number;
/**
* Optional callback invoked after every successful (non-429) response
* with parsed rate limit headers. See `RateLimitInfoCallback`.
*/
onRateLimitInfo?: RateLimitInfoCallback;
}

/**
Expand All @@ -27,6 +43,7 @@ export function createRateLimitFetch(
const maxRetries = config.maxRetries ?? 3;
const retryOnRateLimit = config.retryOnRateLimit ?? true;
const maxRetryDelay = config.maxRetryDelay ?? 20_000;
const onRateLimitInfo = config.onRateLimitInfo;

return async function rateLimitFetch(
input: RequestInfo | URL,
Expand Down Expand Up @@ -61,7 +78,12 @@ export function createRateLimitFetch(
continue; // Retry
}

// Success or non-rate-limit error - return the response
// Success: emit observability info. Non-2xx, non-429 responses are
// returned as-is without invoking the callback because their headers
// do not carry useful rate limit context.
if (onRateLimitInfo && response.ok) {
emitRateLimitInfo(onRateLimitInfo, response, input, init);
}
return response;
} catch (error) {
lastError = error;
Expand All @@ -82,6 +104,40 @@ export function createRateLimitFetch(
};
}

/**
* Invoke the configured callback with parsed rate limit info, swallowing any
* exception so a buggy observer cannot break the request.
*/
function emitRateLimitInfo(
callback: RateLimitInfoCallback,
response: Response,
input: RequestInfo | URL,
init?: RequestInit,
): void {
try {
// Honor the method on a Request input when init.method is undefined
// (a valid fetch signature: fetch(new Request(url, { method: 'POST' }))).
const method = (
init?.method ?? (input instanceof Request ? input.method : "GET")
).toUpperCase();
const url = requestUrl(input);
const info = parseRateLimitInfo(response.headers, method, url);
if (info === null) return;
callback(info);
} catch (err) {
// Never let observability break the request flow.
// deno-lint-ignore no-console
console.warn("Lago: onRateLimitInfo callback raised:", err);
}
}

function requestUrl(input: RequestInfo | URL): string {
if (typeof input === "string") return input;
if (input instanceof URL) return input.toString();
// Request
return (input as Request).url;
}

/**
* Calculate wait time before retry
* Uses the exact reset time from headers if available, otherwise exponential backoff
Expand Down
46 changes: 46 additions & 0 deletions rate_limit_headers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,32 @@ export interface RateLimitHeaders {
reset: number | null;
}

/**
* Parsed rate limit headers plus the request context they came from.
*
* Delivered to the `onRateLimitInfo` callback after every successful request
* so callers can build observability around the rate limit (warn at thresholds,
* emit metrics, etc.).
*/
export interface RateLimitInfo extends RateLimitHeaders {
/** HTTP method of the call (GET, POST, ...). */
method: string;
/** Request URL. */
url: string;
}

/**
* Returns the fraction of the rate limit currently used in [0.0, 1.0],
* or `null` when the headers aren't usable (missing limit, zero limit,
* missing remaining).
*/
export function rateLimitUsagePct(info: RateLimitHeaders): number | null {
if (info.limit == null || info.remaining == null || info.limit <= 0) {
return null;
}
return 1 - info.remaining / info.limit;
}

/**
* Extract rate limit information from response headers
*/
Expand All @@ -18,6 +44,26 @@ export function parseRateLimitHeaders(headers: Headers): RateLimitHeaders {
};
}

/**
* Returns a `RateLimitInfo` (headers + request context), or `null` when no
* `x-ratelimit-*` headers are present (e.g. self-hosted Lago instance with
* limits disabled). Useful for skipping observability emission entirely when
* there's nothing to report.
*/
export function parseRateLimitInfo(
headers: Headers,
method: string,
url: string,
): RateLimitInfo | null {
const parsed = parseRateLimitHeaders(headers);
if (
parsed.limit == null && parsed.remaining == null && parsed.reset == null
) {
return null;
}
return { ...parsed, method, url };
}

/**
* Helper to parse a header value as a number
*/
Expand Down
Loading
Loading