-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathconfig_example.toml
More file actions
106 lines (94 loc) · 3.56 KB
/
config_example.toml
File metadata and controls
106 lines (94 loc) · 3.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# ==============================================================================
# Global Environment Variables (injected into each container at runtime)
# ==============================================================================
[env_vars]
HF_ENDPOINT=""
HF_TOKEN=""
GITHUB_TOKEN=""
# ==============================================================================
# DataPipeline Configuration
# ==============================================================================
# LLM Basic Configuration
[llm_config]
llm_name="" # e.g., "azure-gpt-5-fy0828" / "local"
llm_temperature=0.0
llm_max_tokens=65536 # Maximum output tokens (-1 = no limit)
timeout=180
[llm.azure-gpt-5-fy0828]
model = "azure/gpt-5-fy0828"
api_key = ""
base_url = ""
[llm.claude-sonnet-4-20250514]
model = "claude-sonnet-4-20250514"
api_key = ""
base_url = [
"",
"",
]
[llm.local]
backend = "vllm" # Backend type: vllm
api_key = "" # Optional: API key passed to vLLM server
base_url = "http://localhost:8080/v1" # vLLM server URL
# ==============================================================================
# Inference Agent Configuration
# ==============================================================================
# Inference runtime settings
[infer]
# Optional: Host cache directory for agent downloads; will be mounted to /download inside containers
# Set to empty to disable cache mount (each container will re-download/install)
# Use an absolute path here
download_cache_dir="/path/to/FeatureBench/download_cache"
# OpenHands Agent Configuration
[infer_config.openhands]
# Required: LLM API Key (supports OpenAI/Anthropic/Azure, etc.)
LLM_API_KEY = ""
# Optional: LLM API Base URL
LLM_BASE_URL = ""
# Optional: Lock OpenHands version (leave empty to use latest)
OPENHANDS_VERSION = "0.62.0"
# Optional: Reasoning effort for OpenAI o-series models
LLM_REASONING_EFFORT = ""
# Azure API Version (required for Azure models only)
LLM_API_VERSION = ""
# Optional: OpenHands agent max iterations (step limit). Upstream default is 500.
OPENHANDS_MAX_ITERATIONS = ""
# Optional: Save llm completions if true
SAVE_COMPLETIONS = false
# Optional: Enable OpenHands condensation (default false). Set true to enable condensers.
ENABLE_CONDENSER = false
# Optional: Render infer.log mode (compact|full)
INFER_LOG_RENDER_MODE = "full"
# Claude Code Agent Configuration
[infer_config.claude_code]
# Required: Anthropic API Key
ANTHROPIC_API_KEY = ""
# Optional: Custom API Base URL
ANTHROPIC_BASE_URL = ""
# Optional: Lock Claude Code version (leave empty to use latest)
CLAUDE_CODE_VERSION = ""
# Gemini CLI Agent Configuration
[infer_config.gemini_cli]
GEMINI_API_KEY = ""
GOOGLE_GEMINI_BASE_URL = ""
# Optional: Lock Gemini CLI version (leave empty to use latest)
GEMINI_CLI_VERSION = ""
# Codex Agent Configuration (Azure OpenAI supported)
[infer_config.codex]
# Required: OpenAI API Key
OPENAI_API_KEY = ""
# Optional: Custom API Base URL
OPENAI_BASE_URL = ""
# Optional: Reasoning effort level for Codex reasoning models; empty defaults to "medium"
CODEX_REASONING_EFFORT = ""
# Optional: Lock Codex version (leave empty to use latest)
CODEX_VERSION = ""
# mini_swe_agent Configuration
[infer_config.mini_swe_agent]
# Required: Unified mini_swe_agent API key
MSWEA_API_KEY = ""
# Optional: Base URL passed through to provider envs
MSWEA_BASE_URL = ""
# Optional: Cost tracking mode (default behavior in FB adapter is ignore_errors)
MSWEA_COST_TRACKING = ""
# Optional: Lock mini_swe_agent version (leave empty to use latest)
MINI_SWE_AGENT_VERSION = ""