forked from Mirrowel/LLM-API-Key-Proxy
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
480 lines (418 loc) · 19.4 KB
/
.env.example
File metadata and controls
480 lines (418 loc) · 19.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
# ==============================================================================
# || LLM API Key Proxy - Environment Variable Configuration ||
# ==============================================================================
#
# This file provides an example configuration for the proxy server.
# Copy this file to a new file named '.env' in the same directory
# and replace the placeholder values with your actual credentials and settings.
#
# ------------------------------------------------------------------------------
# | [REQUIRED] Proxy Server Settings |
# ------------------------------------------------------------------------------
# A secret key used to authenticate requests to THIS proxy server.
# This can be any string. Your client application must send this key in the
# 'Authorization' header as a Bearer token (e.g., "Authorization: Bearer YOUR_PROXY_API_KEY").
#PROXY_API_KEY="YOUR_PROXY_API_KEY"
# ------------------------------------------------------------------------------
# | [API KEYS] Provider API Keys |
# ------------------------------------------------------------------------------
#
# The proxy automatically discovers API keys from environment variables.
# To add multiple keys for a single provider, increment the number at the end
# of the variable name (e.g., GEMINI_API_KEY_1, GEMINI_API_KEY_2).
#
# The provider name is derived from the part of the variable name before "_API_KEY".
# For example, 'GEMINI_API_KEY_1' configures the 'gemini' provider.
#
# --- Google Gemini ---
#GEMINI_API_KEY_1="YOUR_GEMINI_API_KEY_1"
#GEMINI_API_KEY_2="YOUR_GEMINI_API_KEY_2"
# --- OpenAI / Azure OpenAI ---
# For Azure, ensure your key has access to the desired models.
#OPENAI_API_KEY_1="YOUR_OPENAI_OR_AZURE_API_KEY"
# --- Anthropic (Claude) ---
#ANTHROPIC_API_KEY_1="YOUR_ANTHROPIC_API_KEY"
# --- OpenRouter ---
#OPENROUTER_API_KEY_1="YOUR_OPENROUTER_API_KEY"
# --- Mistral AI ---
#MISTRAL_API_KEY_1="YOUR_MISTRAL_API_KEY"
# --- NVIDIA NIM ---
#NVIDIA_NIM_API_KEY_1="YOUR_NVIDIA_API_KEY"
# --- Chutes ---
#CHUTES_API_KEY_1="YOUR_CHUTES_API_KEY"
# ------------------------------------------------------------------------------
# | [OAUTH] Provider OAuth 2.0 Credentials |
# ------------------------------------------------------------------------------
#
# The proxy now uses a "local-first" approach for OAuth credentials.
# All OAuth credentials are managed within the 'oauth_creds/' directory.
#
# HOW IT WORKS:
# 1. On the first run, if you provide a path to an existing credential file
# (e.g., from ~/.gemini/), the proxy will COPY it into the local
# 'oauth_creds/' directory with a standardized name (e.g., 'gemini_cli_oauth_1.json').
# 2. On all subsequent runs, the proxy will ONLY use the files found inside
# 'oauth_creds/'. It will no longer scan system-wide directories.
# 3. To add a new account, either use the '--add-credential' tool or manually
# place a new, valid credential file in the 'oauth_creds/' directory.
#
# Use the variables below for the ONE-TIME setup to import existing credentials.
# After the first successful run, you can clear these paths.
#
# --- Google Gemini (gcloud CLI) ---
# Path to your gcloud ADC file (e.g., ~/.config/gcloud/application_default_credentials.json)
# or a credential file from the official 'gemini' CLI (e.g., ~/.gemini/credentials.json).
#GEMINI_CLI_OAUTH_1=""
# --- Qwen / Dashscope (Code Companion) ---
# Path to your Qwen credential file (e.g., ~/.qwen/oauth_creds.json).
#QWEN_CODE_OAUTH_1=""
# --- iFlow ---
# Path to your iFlow credential file (e.g., ~/.iflow/oauth_creds.json).
#IFLOW_OAUTH_1=""
# ------------------------------------------------------------------------------
# | [ADVANCED] Provider-Specific Settings |
# ------------------------------------------------------------------------------
# --- Gemini CLI Project ID ---
# Required if you are using the Gemini CLI OAuth provider and the proxy
# cannot automatically determine your Google Cloud Project ID.
#GEMINI_CLI_PROJECT_ID=""
# --- Model Ignore Lists ---
# Specify a comma-separated list of model names to exclude from a provider's
# available models. This is useful for filtering out models you don't want to use.
#
# Format: IGNORE_MODELS_<PROVIDER_NAME>="model-1,model-2,model-3"
#
# Example:
# IGNORE_MODELS_GEMINI="gemini-1.0-pro-vision-latest,gemini-1.0-pro-latest"
# IGNORE_MODELS_OPENAI="gpt-4-turbo,gpt-3.5-turbo-instruct"
#IGNORE_MODELS_GEMINI=""
#IGNORE_MODELS_OPENAI=""
# --- Model Whitelists (Overrides Blacklists) ---
# Specify a comma-separated list of model names to ALWAYS include from a
# provider's list. This acts as an override for the ignore list.
#
# HOW IT WORKS:
# 1. A model on a whitelist will ALWAYS be available, even if it's also on an
# ignore list (or if the ignore list is set to "*").
# 2. For any models NOT on the whitelist, the standard ignore list logic applies.
#
# This allows for two main use cases:
# - "Pure Whitelist" Mode: Set IGNORE_MODELS_<PROVIDER>="*" and then specify
# only the models you want in WHITELIST_MODELS_<PROVIDER>.
# - "Exemption" Mode: Blacklist a broad range of models (e.g., "*-preview*")
# and then use the whitelist to exempt specific preview models you want to test.
#
# Format: WHITELIST_MODELS_<PROVIDER_NAME>="model-1,model-2"
#
# Example of a pure whitelist for Gemini:
# IGNORE_MODELS_GEMINI="*"
# WHITELIST_MODELS_GEMINI="gemini-1.5-pro-latest,gemini-1.5-flash-latest"
#WHITELIST_MODELS_GEMINI=""
#WHITELIST_MODELS_OPENAI=""
# --- Maximum Concurrent Requests Per Key ---
# Controls how many concurrent requests for the SAME model can use the SAME key.
# This is useful for providers that can handle concurrent requests without rate limiting.
# Default is 1 (no concurrency, current behavior).
#
# Format: MAX_CONCURRENT_REQUESTS_PER_KEY_<PROVIDER_NAME>=<number>
#
# Example:
# MAX_CONCURRENT_REQUESTS_PER_KEY_OPENAI=3 # Allow 3 concurrent requests per OpenAI key
# MAX_CONCURRENT_REQUESTS_PER_KEY_GEMINI=1 # Allow only 1 request per Gemini key (default)
#
#MAX_CONCURRENT_REQUESTS_PER_KEY_OPENAI=1
#MAX_CONCURRENT_REQUESTS_PER_KEY_GEMINI=1
#MAX_CONCURRENT_REQUESTS_PER_KEY_ANTHROPIC=1
#MAX_CONCURRENT_REQUESTS_PER_KEY_IFLOW=1
# --- Credential Rotation Mode ---
# Controls how credentials are rotated when multiple are available for a provider.
# This affects how the proxy selects the next credential to use for requests.
#
# Available modes:
# balanced - (Default) Rotate credentials evenly across requests to distribute load.
# Best for API keys with per-minute rate limits.
# sequential - Use one credential until it's exhausted (429 error), then switch to next.
# Best for credentials with daily/weekly quotas (e.g., free tier accounts).
# When a credential hits quota, it's put on cooldown based on the reset time
# parsed from the provider's error response.
#
# Format: ROTATION_MODE_<PROVIDER_NAME>=<mode>
#
# Provider Defaults:
# - antigravity: sequential (free tier accounts with daily quotas)
# - All others: balanced
#
# Example:
# ROTATION_MODE_GEMINI=sequential # Use Gemini keys until quota exhausted
# ROTATION_MODE_OPENAI=balanced # Distribute load across OpenAI keys (default)
# ROTATION_MODE_ANTIGRAVITY=balanced # Override Antigravity's sequential default
#
# ROTATION_MODE_GEMINI=balanced
# ROTATION_MODE_ANTIGRAVITY=sequential
# --- Priority-Based Concurrency Multipliers ---
# Credentials can be assigned to priority tiers (1=highest, 2, 3, etc.).
# Each tier can have a concurrency multiplier that increases the effective
# concurrent request limit for credentials in that tier.
#
# How it works:
# effective_concurrent_limit = MAX_CONCURRENT_REQUESTS_PER_KEY * tier_multiplier
#
# This allows paid/premium credentials to handle more concurrent requests than
# free tier credentials, regardless of rotation mode.
#
# Provider Defaults (built into provider classes):
# Antigravity:
# Priority 1: 5x (paid ultra tier)
# Priority 2: 3x (standard paid tier)
# Priority 3+: 2x (sequential mode) or 1x (balanced mode)
# Gemini CLI:
# Priority 1: 5x
# Priority 2: 3x
# Others: 1x (all modes)
#
# Format: CONCURRENCY_MULTIPLIER_<PROVIDER>_PRIORITY_<N>=<multiplier>
#
# Mode-specific overrides (optional):
# Format: CONCURRENCY_MULTIPLIER_<PROVIDER>_PRIORITY_<N>_<MODE>=<multiplier>
#
# Examples:
# CONCURRENCY_MULTIPLIER_ANTIGRAVITY_PRIORITY_1=10 # Override P1 to 10x
# CONCURRENCY_MULTIPLIER_ANTIGRAVITY_PRIORITY_3=1 # Override P3 to 1x
# CONCURRENCY_MULTIPLIER_ANTIGRAVITY_PRIORITY_2_BALANCED=1 # P2 = 1x in balanced mode only
# --- Model Quota Groups ---
# Models that share quota/cooldown timing. When one model in a group hits
# quota exhausted (429), all models in the group receive the same cooldown timestamp.
# They also reset (archive stats) together when the quota period expires.
#
# This is useful for providers where multiple model variants share the same
# underlying quota (e.g., Claude Sonnet and Opus on Antigravity).
#
# Format: QUOTA_GROUPS_<PROVIDER>_<GROUP>="model1,model2,model3"
#
# To DISABLE a default group, set it to empty string:
# QUOTA_GROUPS_ANTIGRAVITY_CLAUDE=""
#
# Default groups:
# ANTIGRAVITY.CLAUDE: claude-sonnet-4-5,claude-opus-4-5
#
# Examples:
# QUOTA_GROUPS_ANTIGRAVITY_CLAUDE="claude-sonnet-4-5,claude-opus-4-5"
# QUOTA_GROUPS_ANTIGRAVITY_GEMINI="gemini-3-pro-preview,gemini-3-pro-image-preview"
# ------------------------------------------------------------------------------
# | [ADVANCED] Fair Cycle Rotation |
# ------------------------------------------------------------------------------
#
# Ensures each credential exhausts at least once before any can be reused.
# Prevents one credential from being repeatedly used while others sit idle.
#
# Provider Defaults (see src/rotator_library/config/defaults.py):
# - Enabled: sequential rotation mode only (balanced mode = disabled)
# - Tracking Mode: model_group (track per quota group)
# - Cross-Tier: false (each priority tier cycles independently)
# - Cycle Duration: 86400 seconds (24 hours)
# - Exhaustion Threshold: 300 seconds (5 minutes)
#
# Format: FAIR_CYCLE_{PROVIDER}=true/false
# Example:
# FAIR_CYCLE_ANTIGRAVITY=true
# FAIR_CYCLE_GEMINI_CLI=false
# Tracking mode: "model_group" (per quota group) or "credential" (global per key)
# FAIR_CYCLE_TRACKING_MODE_ANTIGRAVITY=model_group
# Cross-tier: true = ALL credentials must exhaust regardless of tier
# FAIR_CYCLE_CROSS_TIER_ANTIGRAVITY=false
# Cycle duration in seconds
# FAIR_CYCLE_DURATION_ANTIGRAVITY=86400
# Exhaustion threshold - cooldown must exceed this to count as "exhausted"
# EXHAUSTION_COOLDOWN_THRESHOLD_ANTIGRAVITY=300
# EXHAUSTION_COOLDOWN_THRESHOLD=300 # Global fallback for all providers
# ------------------------------------------------------------------------------
# | [ADVANCED] Custom Caps |
# ------------------------------------------------------------------------------
#
# Set custom usage limits per tier, per model/group that are MORE restrictive
# than actual API limits. When the cap is reached, credential goes on cooldown
# BEFORE hitting the actual API limit.
#
# Cap values: absolute number (100) or percentage ("80%")
# Cooldown modes: quota_reset | offset:<seconds> | fixed:<seconds>
#
# Format: CUSTOM_CAP_{PROVIDER}_T{TIER}_{MODEL_OR_GROUP}=<value>
# Format: CUSTOM_CAP_COOLDOWN_{PROVIDER}_T{TIER}_{MODEL_OR_GROUP}=<mode>:<value>
#
# Name transformations for env vars:
# - Dashes (-) -> Underscores (_)
# - Dots (.) -> Underscores (_)
# - All UPPERCASE
# Example: claude-opus-4.5 -> CLAUDE_OPUS_4_5
#
# Tier syntax:
# - Single tier: T2 (tier 2)
# - Multi-tier: T2_3 (tiers 2 and 3 share config)
# - Default: TDEFAULT (fallback for unlisted tiers)
#
# Examples:
# CUSTOM_CAP_ANTIGRAVITY_T2_CLAUDE=100
# CUSTOM_CAP_COOLDOWN_ANTIGRAVITY_T2_CLAUDE=quota_reset
#
# CUSTOM_CAP_ANTIGRAVITY_T3_CLAUDE=30
# CUSTOM_CAP_COOLDOWN_ANTIGRAVITY_T3_CLAUDE=offset:3600
#
# CUSTOM_CAP_ANTIGRAVITY_TDEFAULT_CLAUDE=80%
#
# CUSTOM_CAP_ANTIGRAVITY_T2_3_G25_FLASH=80%
# CUSTOM_CAP_COOLDOWN_ANTIGRAVITY_T2_3_G25_FLASH=offset:1800
# ------------------------------------------------------------------------------
# | [ADVANCED] Proxy Configuration |
# ------------------------------------------------------------------------------
# --- OAuth Refresh Interval ---
# How often, in seconds, the background refresher should check and refresh
# expired OAuth tokens.
# Default: 600 (10 minutes)
# OAUTH_REFRESH_INTERVAL=600
# --- Skip OAuth Initialization ---
# Set to "true" to prevent the proxy from performing the interactive OAuth
# setup/validation flow on startup. This is highly recommended for non-interactive
# environments like Docker containers or automated scripts.
# Ensure your credentials in 'oauth_creds/' are valid before enabling this.
#SKIP_OAUTH_INIT_CHECK=false
# --- Global Request Timeout ---
# Maximum time (in seconds) a request can wait for an available credential.
# If all credentials are on cooldown and none will become available within
# this timeout, the request fails fast with a clear error message.
# Increase this value if you have limited credentials and want to wait
# longer for capacity (e.g., when credentials hit rate limits).
# Default: 30 seconds
# GLOBAL_TIMEOUT=30
# ------------------------------------------------------------------------------
# | [ADVANCED] HTTP Timeout Configuration |
# ------------------------------------------------------------------------------
#
# Controls timeouts for HTTP requests to provider APIs.
# All values are in seconds.
#
# Connection establishment timeout (default: 30)
# TIMEOUT_CONNECT=30
# Request body send timeout (default: 30)
# TIMEOUT_WRITE=30
# Connection pool acquisition timeout (default: 60)
# TIMEOUT_POOL=60
# Read timeout between streaming chunks (default: 300 = 5 minutes)
# If no data arrives for this duration, the connection is considered stalled.
# TIMEOUT_READ_STREAMING=300
# Read timeout for non-streaming responses (default: 600 = 10 minutes)
# Some LLM responses take significant time to generate.
# TIMEOUT_READ_NON_STREAMING=600
# ------------------------------------------------------------------------------
# | [ADVANCED] Antigravity Provider Configuration |
# ------------------------------------------------------------------------------
#
# Configuration for the Antigravity (Google Code Assist) provider.
# These settings control retry behavior and prompt handling.
#
# --- Empty Response Handling ---
# When Antigravity returns an empty response (no content, no tool calls),
# the proxy will automatically retry up to this many attempts.
# Default: 6 attempts
# ANTIGRAVITY_EMPTY_RESPONSE_ATTEMPTS=6
# Delay in seconds between empty response retries.
# Default: 3 seconds
# ANTIGRAVITY_EMPTY_RESPONSE_RETRY_DELAY=3
# --- Malformed Function Call Handling ---
# When Gemini 3 returns MALFORMED_FUNCTION_CALL (invalid JSON syntax),
# the proxy injects corrective messages and retries.
# Default: 2 retries
# ANTIGRAVITY_MALFORMED_CALL_RETRIES=2
# Delay in seconds between malformed call retries.
# Default: 1 second
# ANTIGRAVITY_MALFORMED_CALL_DELAY=1
# --- System Instruction Configuration ---
# When true, prepend the Antigravity agent system instruction.
# Default: true
# ANTIGRAVITY_PREPEND_INSTRUCTION=true
# When true, inject an identity override instruction after the Antigravity prompt.
# This tells the model to disregard the Antigravity identity.
# Default: true
# ANTIGRAVITY_INJECT_IDENTITY_OVERRIDE=true
# When true, use shortened versions of prompts to reduce context bloat.
# Default: true
# ANTIGRAVITY_USE_SHORT_PROMPTS=true
# ------------------------------------------------------------------------------
# | [ADVANCED] Gemini CLI Provider Configuration |
# ------------------------------------------------------------------------------
#
# Configuration for the Gemini CLI (Google Code Assist) provider.
#
# OAuth callback port for interactive re-authentication.
# Default: 8085
# GEMINI_CLI_OAUTH_PORT=8085
# ------------------------------------------------------------------------------
# | [ADVANCED] Antigravity OAuth Configuration |
# ------------------------------------------------------------------------------
#
# OAuth callback port for Antigravity interactive re-authentication.
# Default: 8085 (same as Gemini CLI, shared)
# ANTIGRAVITY_OAUTH_PORT=8085
# ------------------------------------------------------------------------------
# | [CODEX] OpenAI Codex Provider Configuration |
# ------------------------------------------------------------------------------
#
# Codex provider uses OAuth authentication with OpenAI's ChatGPT backend API.
# Credentials are stored in oauth_creds/ directory as codex_oauth_*.json files.
#
# --- Reasoning Effort ---
# Controls how much "thinking" the model does before responding.
# Higher effort = more thorough reasoning but slower responses.
#
# Available levels (model-dependent):
# - low: Minimal reasoning, fastest responses
# - medium: Balanced (default)
# - high: More thorough reasoning
# - xhigh: Maximum reasoning (gpt-5.2, gpt-5.2-codex, gpt-5.3-codex, gpt-5.1-codex-max only)
#
# Can also be controlled per-request via:
# 1. Model suffix: codex/gpt-5.2:high
# 2. Request param: "reasoning_effort": "high"
#
# CODEX_REASONING_EFFORT=medium
# --- Reasoning Summary ---
# Controls how reasoning is summarized in responses.
# Options: auto, concise, detailed, none
# CODEX_REASONING_SUMMARY=auto
# --- Reasoning Output Format ---
# How reasoning/thinking is presented in responses.
# Options:
# - think-tags: Wrap in <think>...</think> tags (default, matches other providers)
# - raw: Include reasoning as-is
# - none: Don't include reasoning in output
# CODEX_REASONING_COMPAT=think-tags
# --- Identity Override ---
# When true, injects an override that tells the model to prioritize
# user-provided system prompts over the required opencode instructions.
# CODEX_INJECT_IDENTITY_OVERRIDE=true
# --- Instruction Injection ---
# When true, injects the required opencode system instruction.
# Only disable if you know what you're doing (API may reject requests).
# CODEX_INJECT_INSTRUCTION=true
# --- Empty Response Handling ---
# Number of retry attempts when receiving empty responses.
# CODEX_EMPTY_RESPONSE_ATTEMPTS=3
# Delay (seconds) between empty response retries.
# CODEX_EMPTY_RESPONSE_RETRY_DELAY=2
# --- OAuth Configuration ---
# OAuth callback port for Codex interactive authentication.
# Default: 8086
# CODEX_OAUTH_PORT=8086
# ------------------------------------------------------------------------------
# | [ADVANCED] Debugging / Logging |
# ------------------------------------------------------------------------------
# --- LiteLLM Pydantic Warning Suppression ---
# LiteLLM produces harmless Pydantic serialization warnings during streaming
# due to a known issue with response types (Message, StreamingChoices) having
# mismatched field counts. These warnings don't affect functionality.
# See: https://github.com/BerriAI/litellm/issues/11759
#
# NOTE: This is a workaround. Remove once litellm patches the issue above.
#
# Set to "0" to show these warnings (useful for debugging).
# Default: "1" (suppress warnings)
# SUPPRESS_LITELLM_SERIALIZATION_WARNINGS=1