Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,14 @@ PORT=8000
SECRET_KEY=super_long_default_key_change_this_in_production

# ── LLM ──────────────────────────────────────────────────────────────
LLM_PROVIDER=openai
LLM_DEFAULT_MODEL=gpt-5-nano
OPENAI_API_KEY=your_openai_api_key_here
# For Ollama, set LLM_PROVIDER=ollama. FinBot uses OLLAMA_MODEL only for Ollama.
# OLLAMA_MODEL=gemma4:e2b
# OLLAMA_BASE_URL=http://localhost:11434
# If the app runs in Docker and Ollama runs on your host, use:
# OLLAMA_BASE_URL=http://host.docker.internal:11434

# ── Email ────────────────────────────────────────────────────────────
# Use "console" for dev (prints to stdout), "resend" for production
Expand Down
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ uv run python run.py

Platform runs at [http://localhost:8000](http://localhost:8000)

> An LLM API key (OpenAI or Ollama) is needed for AI agent challenges.
> An LLM backend is needed for AI agent challenges: an OpenAI API key or a reachable Ollama server.
> Redis is needed for event-driven challenge detection.
> Without them, you can still explore the UI and codebase.

Expand All @@ -143,6 +143,7 @@ Key environment variables (see `[.env.example](.env.example)` for the full templ
| `DATABASE_TYPE` | `sqlite` | `sqlite` or `postgresql` |
| `OPENAI_API_KEY` | - | Required for AI agent challenges |
| `LLM_PROVIDER` | `openai` | `openai` or `ollama` |
| `OLLAMA_MODEL` | `gemma4:e2b` | What model should FinBot use |
| `REDIS_URL` | `redis://localhost:6379` | Event bus for CTF processing |
| `SECRET_KEY` | dev default | **Change in production** |
| `EMAIL_PROVIDER` | `console` | `console` (dev) or `resend` (prod) |
Expand Down Expand Up @@ -211,4 +212,4 @@ OWASP FinBot CTF is part of the [OWASP GenAI Security Project](https://genai.owa
- **[Abigail Dede Okley](https://www.linkedin.com/in/abigailokley)** -- Chief Cat Herder (project manager, keeping all the cats aligned and on track)
- **[Carolina Steadham](https://www.linkedin.com/in/carolinacsteadham)** -- Guardian of Quality Realms (ensuring every feature meets its highest destiny, safeguarding workstream integrity)

And all the amazing [contributors](https://github.com/GenAI-Security-Project/finbot-ctf/graphs/contributors) who make this project possible.
And all the amazing [contributors](https://github.com/GenAI-Security-Project/finbot-ctf/graphs/contributors) who make this project possible.
104 changes: 73 additions & 31 deletions finbot/agents/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,12 @@
from datetime import UTC, datetime
from typing import Any

from openai import AsyncOpenAI

from finbot.config import settings
from finbot.core.auth.session import SessionContext
from finbot.core.data.database import db_session
from finbot.core.data.models import CTFEvent
from finbot.core.data.models import CTFEvent, LLMRequest
from finbot.core.data.repositories import ChatMessageRepository, VendorRepository
from finbot.core.llm.client import get_llm_client
from finbot.core.messaging import event_bus
from finbot.guardrails.schemas import HookKind
from finbot.guardrails.service import GuardrailHookService
Expand Down Expand Up @@ -66,11 +65,17 @@ def __init__(
self.max_history = max_history
self.agent_name = agent_name
self._workflow_id = self._resolve_workflow_id()
self._client = AsyncOpenAI(
api_key=settings.OPENAI_API_KEY,
timeout=settings.CHAT_STREAM_TIMEOUT,
)
self._model = settings.LLM_DEFAULT_MODEL
self._provider = settings.LLM_PROVIDER.strip().lower()
self._llm_client = get_llm_client()
self._client = None
if self._provider == "openai":
from openai import AsyncOpenAI # pylint: disable=import-outside-toplevel

self._client = AsyncOpenAI(
api_key=settings.OPENAI_API_KEY,
timeout=settings.CHAT_STREAM_TIMEOUT,
)
self._model = self._llm_client.default_model
self._mcp_provider: MCPToolProvider | None = None
self._mcp_connected = False
self._tool_callables = self._build_native_callables()
Expand Down Expand Up @@ -367,24 +372,60 @@ async def stream_response(
user_message=user_message,
)

stream = await self._client.responses.create(**stream_params)

pending_tool_calls: list[dict] = []
append_tool_call_items = True

async for event in stream:
if event.type == "response.output_text.delta":
full_response += event.delta
yield f"data: {json.dumps({'type': 'token', 'content': event.delta})}\n\n"

elif event.type == "response.output_item.done":
if event.item.type == "function_call":
pending_tool_calls.append(
{
"name": event.item.name,
"call_id": event.item.call_id,
"arguments": json.loads(event.item.arguments),
}
try:
if self._provider == "openai":
if self._client is None:
raise RuntimeError("OpenAI chat client is not initialized")

stream = await self._client.responses.create(**stream_params)

async for event in stream:
if event.type == "response.output_text.delta":
full_response += event.delta
yield f"data: {json.dumps({'type': 'token', 'content': event.delta})}\n\n"

elif event.type == "response.output_item.done":
if event.item.type == "function_call":
pending_tool_calls.append(
{
"name": event.item.name,
"call_id": event.item.call_id,
"arguments": json.loads(event.item.arguments),
}
)
else:
append_tool_call_items = False
response = await self._llm_client.chat(
request=LLMRequest(
messages=input_messages,
model=self._model,
temperature=settings.LLM_DEFAULT_TEMPERATURE,
tools=tools,
)
)
if response.messages:
input_messages = response.messages
if not response.success:
raise RuntimeError(response.content or "LLM provider unavailable")

content = response.content or ""
if content:
full_response += content
yield f"data: {json.dumps({'type': 'token', 'content': content})}\n\n"
pending_tool_calls = response.tool_calls or []
except Exception as e: # pylint: disable=broad-exception-caught
logger.error("Chat model call failed: %s", e)
error_msg = (
"The configured AI provider is unavailable. "
"Check LLM_PROVIDER, OLLAMA_MODEL or LLM_DEFAULT_MODEL, "
"and provider credentials or URL."
)
yield f"data: {json.dumps({'type': 'error', 'content': error_msg})}\n\n"
yield f"data: {json.dumps({'type': 'done'})}\n\n"
return

await self._guardrail_service.invoke(
HookKind.after_model,
Expand Down Expand Up @@ -427,14 +468,15 @@ async def _keepalive_emitter() -> None:
summary=f"Chat tool call: {tc['name']}",
)

input_messages.append(
{
"type": "function_call",
"name": tc["name"],
"call_id": tc["call_id"],
"arguments": json.dumps(tc["arguments"]),
}
)
if append_tool_call_items:
input_messages.append(
{
"type": "function_call",
"name": tc["name"],
"call_id": tc["call_id"],
"arguments": json.dumps(tc["arguments"]),
}
)
tool_start = datetime.now(UTC)
result = await self._execute_tool(tc["name"], tc["arguments"])
tool_duration_ms = int(
Expand Down
1 change: 1 addition & 0 deletions finbot/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ class Settings(BaseSettings):

# Ollama
OLLAMA_BASE_URL: str = "http://localhost:11434"
OLLAMA_MODEL: str = "gemma4:e2b"

# Development Config
RELOAD: bool = True
Expand Down
13 changes: 11 additions & 2 deletions finbot/core/llm/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,12 @@ class LLMClient:
"""LLM Client with configurable provider and model"""

def __init__(self):
self.provider = settings.LLM_PROVIDER
self.default_model = settings.LLM_DEFAULT_MODEL
self.provider = settings.LLM_PROVIDER.strip().lower()
self.default_model = (
settings.OLLAMA_MODEL
if self.provider == "ollama"
else settings.LLM_DEFAULT_MODEL
)
self.default_temperature = settings.LLM_DEFAULT_TEMPERATURE
self.client = self._get_client()

Expand All @@ -26,6 +30,11 @@ def _get_client(self):
from finbot.core.llm.openai_client import OpenAIClient

return OpenAIClient()
elif self.provider == "ollama":
# pylint: disable=import-outside-toplevel
from finbot.core.llm.ollama_client import OllamaClient

return OllamaClient()
elif self.provider == "mock":
# pylint: disable=import-outside-toplevel
from finbot.core.llm.mock_client import MockLLMClient
Expand Down
Loading