diff --git a/anton/chat.py b/anton/chat.py
index 71251d2c..858cc477 100644
--- a/anton/chat.py
+++ b/anton/chat.py
@@ -61,6 +61,7 @@
format_file_message,
human_size,
)
+from anton.commands.bug_report import handle_report_bug
from anton.chat_session import build_runtime_context, rebuild_session
from anton.commands.session import handle_resume
from anton.commands.datasource import (
@@ -139,7 +140,6 @@
TOKEN_STATUS_CACHE_TTL = 60.0
-
class ChatSession:
"""Manages a multi-turn conversation with tool-call delegation."""
@@ -238,6 +238,7 @@ def _persist_history(self) -> None:
async def _build_system_prompt(self, user_message: str = "") -> str:
import datetime as _dt
+
_now = _dt.datetime.now()
_current_datetime = _now.strftime("%A, %B %d, %Y at %I:%M %p")
@@ -336,9 +337,9 @@ def _build_tools(self) -> list[dict]:
if self._cortex is not None:
wisdom = self._cortex.get_scratchpad_context()
if wisdom:
- scratchpad_tool[
- "description"
- ] += f"\n\nLessons from past sessions:\n{wisdom}"
+ scratchpad_tool["description"] += (
+ f"\n\nLessons from past sessions:\n{wisdom}"
+ )
tools = [scratchpad_tool]
if self._cortex is not None:
@@ -739,7 +740,10 @@ async def _stream_and_handle_tools(
# Detect max_tokens truncation — the LLM was cut off mid-response.
# Inject a continuation prompt so it can finish what it was doing.
- if llm_response.stop_reason in ("max_tokens", "length") and not llm_response.tool_calls:
+ if (
+ llm_response.stop_reason in ("max_tokens", "length")
+ and not llm_response.tool_calls
+ ):
self._history.append(
{"role": "assistant", "content": llm_response.content or ""}
)
@@ -920,7 +924,8 @@ async def _stream_and_handle_tools(
description=description,
)
elif tc.name == "connect_new_datasource" or (
- tc.name == "publish_or_preview" and tc.input.get("action") == "publish"
+ tc.name == "publish_or_preview"
+ and tc.input.get("action") == "publish"
):
# Interactive tool — pause spinner AND escape watcher
yield StreamTaskProgress(
@@ -1010,7 +1015,10 @@ async def _stream_and_handle_tools(
llm_response = response.response
# Detect max_tokens truncation inside tool loop
- if llm_response.stop_reason in ("max_tokens", "length") and not llm_response.tool_calls:
+ if (
+ llm_response.stop_reason in ("max_tokens", "length")
+ and not llm_response.tool_calls
+ ):
self._history.append(
{"role": "assistant", "content": llm_response.content or ""}
)
@@ -1269,7 +1277,6 @@ def _apply_error_tracking(
return result_text
-
async def _handle_connect(
console: Console,
settings: AntonSettings,
@@ -1331,7 +1338,9 @@ async def _handle_connect(
console.print(" [bold]q[/] Back")
console.print()
- action = await prompt_or_cancel("(anton) Select", choices=["1", "2", "q"], default="q")
+ action = await prompt_or_cancel(
+ "(anton) Select", choices=["1", "2", "q"], default="q"
+ )
if action is None or action == "q":
console.print("[anton.muted]Aborted.[/]")
console.print()
@@ -1392,7 +1401,9 @@ async def _handle_connect(
console.print(f" [bold]{i}[/] {ref_name}")
console.print()
ds_choices = [str(i) for i in range(1, len(mind_datasources) + 1)]
- ds_pick = await prompt_or_cancel("(anton) Select datasource", choices=ds_choices)
+ ds_pick = await prompt_or_cancel(
+ "(anton) Select datasource", choices=ds_choices
+ )
if ds_pick is None:
return session
picked_ds = mind_datasources[int(ds_pick) - 1]
@@ -1404,9 +1415,7 @@ async def _handle_connect(
if ds_name:
try:
- all_datasources = list_datasources(
- minds_url, api_key, verify=ssl_verify
- )
+ all_datasources = list_datasources(minds_url, api_key, verify=ssl_verify)
for ds in all_datasources:
if ds.get("name") == ds_name:
ds_engine = ds.get("engine", "unknown")
@@ -1493,15 +1502,15 @@ async def _handle_connect(
)
-
-
def _extract_html_title(path, re_module) -> str:
"""Extract
content from an HTML file. Returns '' if not found."""
try:
# Read only the first 4KB — title is always near the top
with open(path, "r", encoding="utf-8", errors="ignore") as f:
head = f.read(4096)
- m = re_module.search(r"]*>(.*?)", head, re_module.IGNORECASE | re_module.DOTALL)
+ m = re_module.search(
+ r"]*>(.*?)", head, re_module.IGNORECASE | re_module.DOTALL
+ )
return m.group(1).strip() if m else ""
except Exception:
return ""
@@ -1523,7 +1532,9 @@ async def _handle_publish(
# 1. Ensure Minds API key is available
if not settings.minds_api_key:
- console.print(" [anton.muted]To publish dashboards you need a free Minds account.[/]")
+ console.print(
+ " [anton.muted]To publish dashboards you need a free Minds account.[/]"
+ )
console.print()
has_key = await prompt_or_cancel(
" Do you have an mdb.ai API key?",
@@ -1563,9 +1574,13 @@ async def _handle_publish(
target = Path(settings.workspace_path) / file_arg
else:
# List HTML files sorted by modification time (most recent first)
- html_files = sorted(
- output_dir.glob("*.html"), key=lambda f: f.stat().st_mtime, reverse=True
- ) if output_dir.is_dir() else []
+ html_files = (
+ sorted(
+ output_dir.glob("*.html"), key=lambda f: f.stat().st_mtime, reverse=True
+ )
+ if output_dir.is_dir()
+ else []
+ )
if not html_files:
console.print(" [anton.warning]No HTML files found in .anton/output/[/]")
console.print()
@@ -1575,7 +1590,7 @@ async def _handle_publish(
offset = 0
while True:
- page = html_files[offset:offset + PAGE_SIZE]
+ page = html_files[offset : offset + PAGE_SIZE]
has_more = offset + PAGE_SIZE < len(html_files)
console.print(" [anton.cyan]Available reports:[/]")
@@ -1586,7 +1601,9 @@ async def _handle_publish(
console.print(f" [bold]{i}[/] {label} [anton.muted]{f.name}[/]")
if has_more:
- console.print(f"\n [anton.muted]m Show more ({len(html_files) - offset - PAGE_SIZE} remaining)[/]")
+ console.print(
+ f"\n [anton.muted]m Show more ({len(html_files) - offset - PAGE_SIZE} remaining)[/]"
+ )
console.print()
choice = await prompt_or_cancel(" Select", default="1")
@@ -1619,7 +1636,11 @@ async def _handle_publish(
from rich.live import Live
from rich.spinner import Spinner
- with Live(Spinner("dots", text=" Publishing...", style="anton.cyan"), console=console, transient=True):
+ with Live(
+ Spinner("dots", text=" Publishing...", style="anton.cyan"),
+ console=console,
+ transient=True,
+ ):
try:
result = publish(
target,
@@ -1641,8 +1662,6 @@ async def _handle_publish(
webbrowser.open(view_url)
-
-
async def _handle_unpublish(
console: Console,
settings,
@@ -1655,7 +1674,9 @@ async def _handle_unpublish(
# 1. Ensure Minds API key is available
if not settings.minds_api_key:
- console.print(" [anton.warning]No Minds API key configured. Run /publish first.[/]")
+ console.print(
+ " [anton.warning]No Minds API key configured. Run /publish first.[/]"
+ )
console.print()
return
@@ -1664,7 +1685,11 @@ async def _handle_unpublish(
from rich.spinner import Spinner
reports = []
- with Live(Spinner("dots", text=" Loading published reports...", style="anton.cyan"), console=console, transient=True):
+ with Live(
+ Spinner("dots", text=" Loading published reports...", style="anton.cyan"),
+ console=console,
+ transient=True,
+ ):
try:
reports = list_published(
api_key=settings.minds_api_key,
@@ -1686,7 +1711,7 @@ async def _handle_unpublish(
offset = 0
while True:
- page = reports[offset:offset + PAGE_SIZE]
+ page = reports[offset : offset + PAGE_SIZE]
has_more = offset + PAGE_SIZE < len(reports)
console.print(" [anton.cyan]Published reports:[/]")
@@ -1697,7 +1722,9 @@ async def _handle_unpublish(
console.print(f" [bold]{i}[/] {title} [anton.muted]{url}[/]")
if has_more:
- console.print(f"\n [anton.muted]m Show more ({len(reports) - offset - PAGE_SIZE} remaining)[/]")
+ console.print(
+ f"\n [anton.muted]m Show more ({len(reports) - offset - PAGE_SIZE} remaining)[/]"
+ )
console.print()
choice = await prompt_or_cancel(" Select report to unpublish")
@@ -1735,7 +1762,11 @@ async def _handle_unpublish(
return
# 5. Delete
- with Live(Spinner("dots", text=" Removing...", style="anton.cyan"), console=console, transient=True):
+ with Live(
+ Spinner("dots", text=" Removing...", style="anton.cyan"),
+ console=console,
+ transient=True,
+ ):
try:
unpublish(
selected["md5"],
@@ -1760,7 +1791,9 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
import os as _os
import time as _time
- script_path = Path(__file__).resolve().parent / "demo_data" / "nvda_btc_scratchpad_backup.py"
+ script_path = (
+ Path(__file__).resolve().parent / "demo_data" / "nvda_btc_scratchpad_backup.py"
+ )
if not script_path.is_file():
return None
@@ -1797,8 +1830,31 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
answer_text = (answer or "").strip().lower()
# Classify: does the user want to run it?
- _skip_words = {"no", "n", "skip", "nah", "pass", "nope", "later", "chat", "straight"}
- _go_words = {"yes", "y", "ok", "sure", "go", "yeah", "yep", "run", "do it", "let's go", "lets go", "go for it"}
+ _skip_words = {
+ "no",
+ "n",
+ "skip",
+ "nah",
+ "pass",
+ "nope",
+ "later",
+ "chat",
+ "straight",
+ }
+ _go_words = {
+ "yes",
+ "y",
+ "ok",
+ "sure",
+ "go",
+ "yeah",
+ "yep",
+ "run",
+ "do it",
+ "let's go",
+ "lets go",
+ "go for it",
+ }
wants_demo = None
for w in _go_words:
@@ -1816,20 +1872,25 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
if not wants_demo:
console.print()
- console.print(" [anton.muted]All good! Ask me anything \u2014 data questions, dashboards, analysis, you name it.[/]")
+ console.print(
+ " [anton.muted]All good! Ask me anything \u2014 data questions, dashboards, analysis, you name it.[/]"
+ )
console.print()
return None
# Typed message with ellipsis animation
console.print()
from anton.channel.theme import get_palette as _gp3
+
_c = _gp3().cyan
_r, _g, _b = int(_c[1:3], 16), int(_c[3:5], 16), int(_c[5:7], 16)
_ac = f"\033[1;38;2;{_r};{_g};{_b}m"
_ar = "\033[0m"
_prefix = f"{_ac}anton>{_ar} "
- _typed_msg = "Perfect! Fetching live data, crunching numbers, and building the dashboard"
+ _typed_msg = (
+ "Perfect! Fetching live data, crunching numbers, and building the dashboard"
+ )
console.file.write(_prefix)
console.file.flush()
for ch in _typed_msg:
@@ -1862,13 +1923,12 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
output_html = str(Path(output_dir) / "nvda_btc_dashboard.html")
code = (
f"import os as _os; _os.makedirs({output_dir!r}, exist_ok=True)\n"
- f"__file__ = {str(script_path)!r}\n"
- + code
+ f"__file__ = {str(script_path)!r}\n" + code
)
# Replace the OUTPUT_PATH line so the dashboard goes to .anton/output/
code = code.replace(
'OUTPUT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "nvda_btc_dashboard.html")',
- f'OUTPUT_PATH = {output_html!r}',
+ f"OUTPUT_PATH = {output_html!r}",
)
from anton.scratchpad import Cell
@@ -1879,7 +1939,9 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
pad = await session._scratchpads.get_or_create("main")
# Pre-install dependencies so the main script doesn't fail mid-run
- install_spinner = Text(" Installing dependencies (yfinance, pandas, numpy)...", style="anton.muted")
+ install_spinner = Text(
+ " Installing dependencies (yfinance, pandas, numpy)...", style="anton.muted"
+ )
with Live(
Spinner("dots", text=install_spinner, style="anton.cyan"),
console=console,
@@ -1889,7 +1951,9 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
await pad.install_packages(["yfinance", "pandas", "numpy"])
console.print(f" [anton.success]\u2714[/] [anton.muted]Dependencies ready[/]")
- spinner_text = Text(" Scratchpad(Building NVDA vs BTC dashboard...)", style="anton.muted")
+ spinner_text = Text(
+ " Scratchpad(Building NVDA vs BTC dashboard...)", style="anton.muted"
+ )
cell = None
with Live(
Spinner("dots", text=spinner_text, style="anton.cyan"),
@@ -1918,26 +1982,30 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
console.print()
return None
- console.print(f" [anton.success]\u2714[/] [anton.muted]Dashboard built successfully[/]")
+ console.print(
+ f" [anton.success]\u2714[/] [anton.muted]Dashboard built successfully[/]"
+ )
# Inject context into session history so the LLM knows data is live
_demo_stdout = (cell.stdout or "")[:3000]
- session._history.append({
- "role": "assistant",
- "content": (
- "I built an interactive NVIDIA vs Bitcoin 5-year investment dashboard. "
- "The dashboard HTML is at: " + output_html + "\n\n"
- "The scratchpad 'main' is still running with all data loaded in memory:\n"
- "- prices DataFrame (monthly OHLCV, returns, cumulative, drawdowns)\n"
- "- risk DataFrame (annual stats, Sharpe, Sortino, Calmar, win rate)\n"
- "- annual DataFrame (year-by-year breakdown)\n"
- "- mc DataFrame (1,000-path Monte Carlo, 60 months)\n"
- "- scorecard DataFrame (12-metric head-to-head comparison)\n\n"
- "All variables are live in the 'main' scratchpad — the user can ask "
- "follow-up questions and I can use the existing data without re-fetching.\n\n"
- f"Script output:\n{_demo_stdout}"
- ),
- })
+ session._history.append(
+ {
+ "role": "assistant",
+ "content": (
+ "I built an interactive NVIDIA vs Bitcoin 5-year investment dashboard. "
+ "The dashboard HTML is at: " + output_html + "\n\n"
+ "The scratchpad 'main' is still running with all data loaded in memory:\n"
+ "- prices DataFrame (monthly OHLCV, returns, cumulative, drawdowns)\n"
+ "- risk DataFrame (annual stats, Sharpe, Sortino, Calmar, win rate)\n"
+ "- annual DataFrame (year-by-year breakdown)\n"
+ "- mc DataFrame (1,000-path Monte Carlo, 60 months)\n"
+ "- scorecard DataFrame (12-metric head-to-head comparison)\n\n"
+ "All variables are live in the 'main' scratchpad — the user can ask "
+ "follow-up questions and I can use the existing data without re-fetching.\n\n"
+ f"Script output:\n{_demo_stdout}"
+ ),
+ }
+ )
# Show findings — typed out like the intro message
console.print()
@@ -1953,6 +2021,7 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
"",
]
from anton.channel.theme import get_palette as _gp2
+
_cyan = _gp2().cyan
# Convert hex color to ANSI 24-bit escape
_r, _g, _b = int(_cyan[1:3], 16), int(_cyan[3:5], 16), int(_cyan[5:7], 16)
@@ -1968,7 +2037,9 @@ async def _agent_zero(console: Console, session: "ChatSession", settings) -> str
console.file.write("\n")
console.file.flush()
console.print()
- console.print("[anton.muted] Ask me follow-ups, a completely different question, or connect your own data (using the /connect command).[/]")
+ console.print(
+ "[anton.muted] Ask me follow-ups, a completely different question, or connect your own data (using the /connect command).[/]"
+ )
console.print("[anton.muted] What\u2019s next, boss?[/]")
console.print()
@@ -2087,14 +2158,32 @@ def _desktop_greeting(console: Console, settings) -> None:
def run_chat(
- console: Console, settings: AntonSettings, *, resume: bool = False, first_run: bool = False, desktop_first_run: bool = False
+ console: Console,
+ settings: AntonSettings,
+ *,
+ resume: bool = False,
+ first_run: bool = False,
+ desktop_first_run: bool = False,
) -> None:
"""Launch the interactive chat REPL."""
- asyncio.run(_chat_loop(console, settings, resume=resume, first_run=first_run, desktop_first_run=desktop_first_run))
+ asyncio.run(
+ _chat_loop(
+ console,
+ settings,
+ resume=resume,
+ first_run=first_run,
+ desktop_first_run=desktop_first_run,
+ )
+ )
async def _chat_loop(
- console: Console, settings: AntonSettings, *, resume: bool = False, first_run: bool = False, desktop_first_run: bool = False
+ console: Console,
+ settings: AntonSettings,
+ *,
+ resume: bool = False,
+ first_run: bool = False,
+ desktop_first_run: bool = False,
) -> None:
from anton.context.self_awareness import SelfAwarenessContext
from anton.llm.client import LLMClient
@@ -2225,6 +2314,7 @@ async def _chat_loop(
console.print()
from anton.analytics import send_event
+
_query_count = 0
_total_questions = 0 # tracks first 10 questions for time estimates
@@ -2307,6 +2397,7 @@ def _bottom_toolbar():
try:
from anton.channel.theme import get_palette as _gp
+
_you_color = _gp().user_prompt
user_input = await prompt_session.prompt_async(
[(f"bold fg:{_you_color}", "you>"), ("", " ")]
@@ -2414,9 +2505,7 @@ def _bottom_toolbar():
elif cmd == "/edit":
arg = parts[1].strip() if len(parts) > 1 else ""
if not arg:
- console.print(
- "[anton.warning]Usage: /edit [/]"
- )
+ console.print("[anton.warning]Usage: /edit [/]")
console.print()
else:
session = await handle_connect_datasource(
@@ -2428,9 +2517,7 @@ def _bottom_toolbar():
continue
elif cmd == "/test":
arg = parts[1].strip() if len(parts) > 1 else ""
- await handle_test_datasource(
- console, session._scratchpads, arg
- )
+ await handle_test_datasource(console, session._scratchpads, arg)
continue
elif cmd == "/resume":
session, resumed_id = await handle_resume(
@@ -2458,6 +2545,11 @@ def _bottom_toolbar():
elif cmd == "/unpublish":
await _handle_unpublish(console, settings, workspace)
continue
+ elif cmd == "/report-bug":
+ await handle_report_bug(
+ console, settings, workspace, session, cortex
+ )
+ continue
elif cmd == "/help":
print_slash_help(console)
continue
@@ -2541,9 +2633,13 @@ def _bottom_toolbar():
parts = []
if settings.minds_api_key and settings.minds_url:
- #TODO: Lets check if this is best solution
+ # TODO: Lets check if this is best solution
now = time.monotonic()
- if last_token_status_checked_at is None or (now - last_token_status_checked_at) >= TOKEN_STATUS_CACHE_TTL:
+ if (
+ last_token_status_checked_at is None
+ or (now - last_token_status_checked_at)
+ >= TOKEN_STATUS_CACHE_TTL
+ ):
last_token_status = check_minds_token_limits(
settings.minds_url.rstrip("/"),
settings.minds_api_key,
@@ -2551,8 +2647,14 @@ def _bottom_toolbar():
)
last_token_status_checked_at = now
if last_token_status.billing_cycle_limit > 0:
- _pct = last_token_status.billing_cycle_used * 100 // last_token_status.billing_cycle_limit
- parts.append(f"{last_token_status.billing_cycle_used:,} / {last_token_status.billing_cycle_limit:,} ({_pct}%)")
+ _pct = (
+ last_token_status.billing_cycle_used
+ * 100
+ // last_token_status.billing_cycle_limit
+ )
+ parts.append(
+ f"{last_token_status.billing_cycle_used:,} / {last_token_status.billing_cycle_limit:,} ({_pct}%)"
+ )
parts.append(f"{elapsed:.1f}s")
if not settings.minds_api_key and not settings.minds_url:
@@ -2562,8 +2664,17 @@ def _bottom_toolbar():
toolbar["stats"] = " ".join(parts)
toolbar["status"] = ""
display.finish()
- if settings.minds_api_key and settings.minds_url and last_token_status is not None and last_token_status.status is TokenLimitStatus.WARNING:
- pct = int(last_token_status.used / last_token_status.limit * 100) if last_token_status.limit else 80
+ if (
+ settings.minds_api_key
+ and settings.minds_url
+ and last_token_status is not None
+ and last_token_status.status is TokenLimitStatus.WARNING
+ ):
+ pct = (
+ int(last_token_status.used / last_token_status.limit * 100)
+ if last_token_status.limit
+ else 80
+ )
console.print(
f"[anton.warning]Approaching token limit: {last_token_status.used:,} / "
f"{last_token_status.limit:,} tokens used ({pct}%). "
diff --git a/anton/commands/bug_report.py b/anton/commands/bug_report.py
new file mode 100644
index 00000000..c98de08d
--- /dev/null
+++ b/anton/commands/bug_report.py
@@ -0,0 +1,161 @@
+"""Bug report command handler."""
+
+from __future__ import annotations
+
+import webbrowser
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from rich.console import Console
+
+from anton.diagnostics import collect_diagnostics, save_diagnostics_file
+from anton.utils.prompt import prompt_or_cancel
+
+from anton.publisher import publish_bug_report
+
+if TYPE_CHECKING:
+ from anton.chat_session import ChatSession
+ from anton.config.settings import AntonSettings
+ from anton.memory.cortex import Cortex
+ from anton.workspace import Workspace
+
+
+async def handle_report_bug(
+ console: Console,
+ settings: AntonSettings,
+ workspace: Workspace | None,
+ session: ChatSession | None,
+ cortex: Cortex | None,
+) -> None:
+ """Handle /report-bug command - collect diagnostics and send to bug report endpoint."""
+ console.print()
+ console.print("[anton.cyan]Bug Report[/]")
+ console.print()
+
+ # Privacy consent prompt
+ console.print("[anton.warning]⚠️ Important Privacy Notice[/]")
+ console.print()
+ console.print(" This bug report will include:")
+ console.print(" • Your conversation history from this session")
+ console.print(" • System information and Anton configuration")
+ console.print(" • Connected datasource names (no credentials)")
+ console.print(" • Recent logs and memory state")
+ console.print()
+ console.print(
+ " [bold]Our dev team will be able to see all of this information.[/]"
+ )
+ console.print()
+
+ consent = await prompt_or_cancel(
+ " Do you agree to share this information?",
+ choices=["y", "n"],
+ choices_display="y/n",
+ default="n",
+ )
+
+ if consent is None or consent.lower() != "y":
+ console.print()
+ console.print(" [anton.muted]Bug report cancelled.[/]")
+ console.print()
+ return
+
+ console.print()
+
+ # Optional bug description
+ add_description = await prompt_or_cancel(
+ " Would you like to add a description of the bug?",
+ choices=["y", "n"],
+ choices_display="y/n",
+ default="y",
+ )
+
+ bug_description = None
+ if add_description and add_description.lower() == "y":
+ console.print()
+ console.print(
+ " [anton.muted]Please describe the bug (press Enter when done):[/]"
+ )
+ bug_description = await prompt_or_cancel(" ")
+ if bug_description is None:
+ bug_description = ""
+
+ console.print()
+
+ # Collect diagnostics
+ from rich.live import Live
+ from rich.spinner import Spinner
+
+ with Live(
+ Spinner(
+ "dots", text=" Collecting diagnostic information...", style="anton.cyan"
+ ),
+ console=console,
+ transient=True,
+ ):
+ try:
+ diagnostics = collect_diagnostics(settings, session, workspace, cortex)
+
+ # Add bug description if provided
+ if bug_description:
+ diagnostics["user_description"] = bug_description
+
+ # Save to file
+ output_dir = Path(settings.workspace_path) / ".anton" / "output"
+ diagnostics_file = save_diagnostics_file(diagnostics, output_dir)
+
+ except Exception as e:
+ console.print(f" [anton.error]Failed to collect diagnostics: {e}[/]")
+ console.print()
+ return
+
+ # Ensure Minds API key is available
+ if not settings.minds_api_key:
+ console.print(
+ " [anton.muted]To submit bug reports you need a free Minds account.[/]"
+ )
+ console.print()
+ has_key = await prompt_or_cancel(
+ " Do you have an mdb.ai API key?",
+ choices=["y", "n"],
+ choices_display="y/n",
+ default="y",
+ )
+ if has_key is None:
+ console.print()
+ return
+ if has_key.lower() == "n":
+ webbrowser.open("https://mdb.ai/")
+ console.print()
+
+ api_key = await prompt_or_cancel(" API key", password=True)
+ if api_key is None or not api_key.strip():
+ console.print()
+ return
+ api_key = api_key.strip()
+ settings.minds_api_key = api_key
+ if workspace:
+ workspace.set_secret("ANTON_MINDS_API_KEY", api_key)
+ console.print()
+
+ # Submit bug report
+ with Live(
+ Spinner("dots", text=" Submitting bug report...", style="anton.cyan"),
+ console=console,
+ transient=True,
+ ):
+ try:
+ publish_bug_report(
+ diagnostics_file,
+ api_key=settings.minds_api_key,
+ bug_report_url=settings.bug_report_url or settings.publish_url,
+ ssl_verify=settings.minds_ssl_verify,
+ )
+
+ except Exception as e:
+ console.print(f" [anton.error]Failed to submit bug report: {e}[/]")
+ console.print()
+ return
+
+ console.print(" [anton.success]Bug report submitted successfully![/]")
+ console.print(" [anton.muted]Thank you for helping us improve Anton.[/]")
+ console.print()
diff --git a/anton/commands/ui.py b/anton/commands/ui.py
index f5bcabf6..6e6134ad 100644
--- a/anton/commands/ui.py
+++ b/anton/commands/ui.py
@@ -17,7 +17,9 @@ def handle_theme(console: Console, arg: str) -> None:
elif arg in ("light", "dark"):
new_mode = arg
else:
- console.print(f"[anton.warning]Unknown theme '{arg}'. Use: /theme light | /theme dark[/]")
+ console.print(
+ f"[anton.warning]Unknown theme '{arg}'. Use: /theme light | /theme dark[/]"
+ )
console.print()
return
@@ -37,7 +39,9 @@ def print_slash_help(console: Console) -> None:
console.print(" [bold]/llm[/] — Change LLM provider or API key")
console.print("\n[bold]Data Connections[/]")
- console.print(" [bold]/connect[/] — Connect a database or API to your Local Vault")
+ console.print(
+ " [bold]/connect[/] — Connect a database or API to your Local Vault"
+ )
console.print(" [bold]/list[/] — List all saved connections")
console.print(" [bold]/edit[/] — Edit credentials for an existing connection")
console.print(" [bold]/remove[/] — Remove a saved connection")
@@ -54,6 +58,9 @@ def print_slash_help(console: Console) -> None:
console.print(" [bold]/publish[/] — Publish an HTML report to the web")
console.print(" [bold]/unpublish[/] — Remove a published report")
+ console.print("\n[bold]Support[/]")
+ console.print(" [bold]/report-bug[/] — Submit a bug report with diagnostic info")
+
console.print("\n[bold]General[/]")
console.print(" [bold]/help[/] — Show this help menu")
console.print(" [bold]exit[/] — Exit the chat")
diff --git a/anton/config/settings.py b/anton/config/settings.py
index c12d9d29..6eb59520 100644
--- a/anton/config/settings.py
+++ b/anton/config/settings.py
@@ -22,7 +22,12 @@ def _build_env_files() -> list[str]:
class AntonSettings(BaseSettings):
- model_config = {"env_prefix": "ANTON_", "env_file": _ENV_FILES, "env_file_encoding": "utf-8", "extra": "ignore"}
+ model_config = {
+ "env_prefix": "ANTON_",
+ "env_file": _ENV_FILES,
+ "env_file_encoding": "utf-8",
+ "extra": "ignore",
+ }
planning_provider: str = "anthropic"
planning_model: str = "claude-sonnet-4-6"
@@ -44,7 +49,9 @@ class AntonSettings(BaseSettings):
episodic_memory: bool = True # episodic memory archive — on by default
- proactive_dashboards: bool = False # when True, build HTML dashboards; when False, CLI output only
+ proactive_dashboards: bool = (
+ False # when True, build HTML dashboards; when False, CLI output only
+ )
theme: str = "auto"
@@ -68,6 +75,7 @@ class AntonSettings(BaseSettings):
# Publish service (anton-services API Gateway)
publish_url: str = "https://4nton.ai"
+ bug_report_url: str | None = None # Defaults to publish_url if not set
@field_validator("minds_ssl_verify", mode="before")
@classmethod
@@ -81,7 +89,10 @@ def model_post_init(self, __context) -> None:
if (
self.minds_api_key
and not self.openai_api_key
- and (self.planning_provider == "openai-compatible" or self.coding_provider == "openai-compatible")
+ and (
+ self.planning_provider == "openai-compatible"
+ or self.coding_provider == "openai-compatible"
+ )
):
self.openai_api_key = self.minds_api_key
if not self.openai_base_url:
diff --git a/anton/diagnostics.py b/anton/diagnostics.py
new file mode 100644
index 00000000..1020d09c
--- /dev/null
+++ b/anton/diagnostics.py
@@ -0,0 +1,180 @@
+"""Diagnostic information collection for bug reports."""
+
+from __future__ import annotations
+
+import json
+import platform
+import sys
+from datetime import UTC, datetime
+from pathlib import Path
+from typing import TYPE_CHECKING, Any
+
+from anton import __version__
+from anton.data_vault import DataVault
+
+if TYPE_CHECKING:
+ from anton.chat_session import ChatSession
+ from anton.config.settings import AntonSettings
+ from anton.memory.cortex import Cortex
+ from anton.workspace import Workspace
+
+
+def collect_diagnostics(
+ settings: AntonSettings,
+ session: ChatSession | None = None,
+ workspace: Workspace | None = None,
+ cortex: Cortex | None = None,
+) -> dict[str, Any]:
+ """Collect comprehensive diagnostic information for bug reports.
+
+ Returns a dictionary with:
+ - system_info: OS, Python, Anton versions
+ - packages: Installed package versions
+ - config: Sanitized configuration
+ - datasources: Connected datasource names
+ - workspace: Current workspace info
+ - memory: Memory state if enabled
+ - conversation: Current conversation history
+ - logs: Recent log entries
+ """
+ diagnostics = {
+ "timestamp": datetime.now(UTC).isoformat(),
+ "anton_version": __version__,
+ }
+
+ # System information
+ diagnostics["system_info"] = {
+ "platform": platform.platform(),
+ "python_version": sys.version,
+ "python_executable": sys.executable,
+ "os_name": platform.system(),
+ "os_release": platform.release(),
+ "machine": platform.machine(),
+ }
+
+ # Installed packages
+ try:
+ import importlib.metadata
+
+ packages = {}
+ for pkg in ["anthropic", "openai", "pydantic", "prompt_toolkit", "rich", "typer"]:
+ try:
+ packages[pkg] = importlib.metadata.version(pkg)
+ except importlib.metadata.PackageNotFoundError:
+ packages[pkg] = "not installed"
+ diagnostics["packages"] = packages
+ except Exception:
+ diagnostics["packages"] = {}
+
+ # Sanitized configuration
+ config_dict = settings.model_dump()
+ # Remove sensitive fields
+ sensitive_fields = [
+ "anthropic_api_key",
+ "openai_api_key",
+ "minds_api_key",
+ "google_api_key",
+ "groq_api_key",
+ "aws_access_key_id",
+ "aws_secret_access_key",
+ ]
+ for field in sensitive_fields:
+ if field in config_dict:
+ config_dict[field] = "***REDACTED***" if config_dict[field] else None
+ diagnostics["config"] = config_dict
+
+ # Connected datasources (names only)
+ try:
+ vault = DataVault()
+ connections = vault.list_connections()
+ diagnostics["datasources"] = [
+ {"engine": c["engine"], "name": c["name"]} for c in connections
+ ]
+ except Exception as e:
+ diagnostics["datasources"] = f"Error collecting: {str(e)}"
+
+ # Workspace information
+ if workspace:
+ diagnostics["workspace"] = {
+ "base": str(workspace.base),
+ "name": workspace.name,
+ "has_git": workspace.git_root is not None,
+ }
+ else:
+ diagnostics["workspace"] = None
+
+ # Memory state
+ if cortex:
+ try:
+ diagnostics["memory"] = {
+ "enabled": cortex.enabled,
+ "mode": settings.memory_mode,
+ "episodic_enabled": cortex._episodic.enabled if cortex._episodic else False,
+ }
+ except Exception as e:
+ diagnostics["memory"] = f"Error collecting: {str(e)}"
+ else:
+ diagnostics["memory"] = {"enabled": False}
+
+ # Current conversation history
+ if session:
+ try:
+ # Get conversation history without tool outputs for brevity
+ history = []
+ for msg in session._history:
+ if msg.get("role") in ["user", "assistant"]:
+ entry = {"role": msg["role"]}
+ content = msg.get("content", "")
+ if isinstance(content, str):
+ # Truncate very long messages
+ entry["content"] = (
+ content[:1000] + "..." if len(content) > 1000 else content
+ )
+ else:
+ entry["content"] = "[complex content]"
+ history.append(entry)
+ diagnostics["conversation"] = {
+ "turn_count": session._turn_count,
+ "history_length": len(session._history),
+ "history_sample": history[-10:], # Last 10 messages
+ }
+ except Exception as e:
+ diagnostics["conversation"] = f"Error collecting: {str(e)}"
+ else:
+ diagnostics["conversation"] = None
+
+ # Recent logs
+ try:
+ log_dir = Path(settings.workspace_path) / ".anton" / "logs"
+ if log_dir.exists():
+ log_files = sorted(log_dir.glob("*.log"), key=lambda f: f.stat().st_mtime, reverse=True)
+ if log_files:
+ # Read last 100 lines from most recent log
+ recent_log = log_files[0]
+ lines = recent_log.read_text().splitlines()
+ diagnostics["recent_logs"] = {
+ "log_file": recent_log.name,
+ "last_lines": lines[-100:] if len(lines) > 100 else lines,
+ }
+ else:
+ diagnostics["recent_logs"] = None
+ else:
+ diagnostics["recent_logs"] = None
+ except Exception as e:
+ diagnostics["recent_logs"] = f"Error collecting: {str(e)}"
+
+ return diagnostics
+
+
+def save_diagnostics_file(diagnostics: dict[str, Any], output_dir: Path) -> Path:
+ """Save diagnostics to a JSON file in the output directory."""
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
+ filename = f"bug_report_{timestamp}.json"
+ filepath = output_dir / filename
+
+ with open(filepath, "w") as f:
+ json.dump(diagnostics, f, indent=2, default=str)
+
+ return filepath
diff --git a/anton/publisher.py b/anton/publisher.py
index b6bf9738..770ad0ef 100644
--- a/anton/publisher.py
+++ b/anton/publisher.py
@@ -147,3 +147,34 @@ def unpublish(
url = f"{publish_url.rstrip('/')}/delete/{md5}"
raw = minds_request(url, api_key, method="DELETE", verify=ssl_verify)
return json.loads(raw)
+
+
+def publish_bug_report(
+ file_path: Path,
+ *,
+ api_key: str,
+ bug_report_url: str = DEFAULT_PUBLISH_URL,
+ ssl_verify: bool = True,
+) -> dict:
+ """Upload a bug report JSON file to the bug report endpoint.
+
+ Response keys: status, message, report_id (if available)
+ """
+ if not file_path.exists():
+ raise FileNotFoundError(f"Path not found: {file_path}")
+
+ # Read the JSON file and send it directly
+ with open(file_path, "rb") as f:
+ content = f.read()
+
+ payload = json.dumps(
+ {
+ "bug_report": True,
+ "file_content": base64.b64encode(content).decode(),
+ "filename": file_path.name,
+ }
+ ).encode()
+
+ url = f"{bug_report_url.rstrip('/')}/bug-report"
+ raw = minds_request(url, api_key, method="POST", payload=payload, verify=ssl_verify)
+ return json.loads(raw)
diff --git a/tests/test_bug_report.py b/tests/test_bug_report.py
new file mode 100644
index 00000000..90c4b159
--- /dev/null
+++ b/tests/test_bug_report.py
@@ -0,0 +1,256 @@
+"""Tests for bug report functionality."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from unittest.mock import MagicMock, patch
+
+import pytest
+from rich.console import Console
+
+from anton.commands.bug_report import handle_report_bug
+from anton.config.settings import AntonSettings
+from anton.diagnostics import collect_diagnostics, save_diagnostics_file
+
+
+class TestDiagnostics:
+ """Test diagnostic collection functionality."""
+
+ def test_collect_diagnostics_basic(self):
+ """Test basic diagnostic collection without optional components."""
+ settings = AntonSettings()
+ diagnostics = collect_diagnostics(settings)
+
+ # Check required fields
+ assert "timestamp" in diagnostics
+ assert "anton_version" in diagnostics
+ assert "system_info" in diagnostics
+ assert "packages" in diagnostics
+ assert "config" in diagnostics
+ assert "datasources" in diagnostics
+ assert "workspace" in diagnostics
+ assert "memory" in diagnostics
+ assert "conversation" in diagnostics
+ assert "recent_logs" in diagnostics
+
+ # Check system info
+ sys_info = diagnostics["system_info"]
+ assert "platform" in sys_info
+ assert "python_version" in sys_info
+ assert "os_name" in sys_info
+
+ # Check config sanitization
+ config = diagnostics["config"]
+ assert (
+ "anthropic_api_key" not in config
+ or config["anthropic_api_key"] == "***REDACTED***"
+ )
+ assert (
+ "openai_api_key" not in config
+ or config["openai_api_key"] == "***REDACTED***"
+ )
+ assert (
+ "minds_api_key" not in config or config["minds_api_key"] == "***REDACTED***"
+ )
+
+ def test_collect_diagnostics_with_session(self):
+ """Test diagnostic collection with an active session."""
+ settings = AntonSettings()
+
+ # Mock session
+ session = MagicMock()
+ session._history = [
+ {"role": "user", "content": "Hello"},
+ {"role": "assistant", "content": "Hi there!"},
+ {"role": "user", "content": "How are you?"},
+ {"role": "assistant", "content": "I'm doing well, thank you!"},
+ ]
+ session._turn_count = 2
+
+ diagnostics = collect_diagnostics(settings, session=session)
+
+ assert diagnostics["conversation"] is not None
+ assert diagnostics["conversation"]["turn_count"] == 2
+ assert diagnostics["conversation"]["history_length"] == 4
+ assert len(diagnostics["conversation"]["history_sample"]) == 4
+
+ def test_collect_diagnostics_with_workspace(self):
+ """Test diagnostic collection with workspace info."""
+ settings = AntonSettings()
+
+ # Mock workspace
+ workspace = MagicMock()
+ workspace.base = Path("/test/workspace")
+ workspace.name = "test-workspace"
+ workspace.git_root = Path("/test/workspace/.git")
+
+ diagnostics = collect_diagnostics(settings, workspace=workspace)
+
+ assert diagnostics["workspace"] is not None
+ assert diagnostics["workspace"]["base"] == "/test/workspace"
+ assert diagnostics["workspace"]["name"] == "test-workspace"
+ assert diagnostics["workspace"]["has_git"] is True
+
+ def test_collect_diagnostics_with_cortex(self):
+ """Test diagnostic collection with memory/cortex info."""
+ settings = AntonSettings(memory_mode="autopilot")
+
+ # Mock cortex
+ cortex = MagicMock()
+ cortex.enabled = True
+ cortex._episodic = MagicMock()
+ cortex._episodic.enabled = True
+
+ diagnostics = collect_diagnostics(settings, cortex=cortex)
+
+ assert diagnostics["memory"]["enabled"] is True
+ assert diagnostics["memory"]["mode"] == "autopilot"
+ assert diagnostics["memory"]["episodic_enabled"] is True
+
+ def test_save_diagnostics_file(self, tmp_path):
+ """Test saving diagnostics to file."""
+ diagnostics = {
+ "timestamp": "2024-01-01T00:00:00",
+ "anton_version": "1.0.0",
+ "system_info": {"platform": "test"},
+ }
+
+ output_file = save_diagnostics_file(diagnostics, tmp_path)
+
+ assert output_file.exists()
+ assert output_file.name.startswith("bug_report_")
+ assert output_file.suffix == ".json"
+
+ # Verify content
+ with open(output_file) as f:
+ saved_data = json.load(f)
+ assert saved_data == diagnostics
+
+
+class TestBugReportCommand:
+ """Test bug report command handler."""
+
+ @pytest.mark.asyncio
+ async def test_handle_report_bug_cancelled(self):
+ """Test bug report cancelled by user."""
+ console = Console()
+ settings = AntonSettings()
+
+ with patch("anton.commands.bug_report.prompt_or_cancel", return_value="n"):
+ await handle_report_bug(console, settings, None, None, None)
+ # Should return early without error
+
+ @pytest.mark.asyncio
+ async def test_handle_report_bug_with_description(self):
+ """Test bug report with user description."""
+ console = Console()
+ settings = AntonSettings(minds_api_key="test-key")
+
+ # Mock the prompts
+ prompt_responses = ["y", "y", "This is a test bug description"]
+ with patch(
+ "anton.commands.bug_report.prompt_or_cancel", side_effect=prompt_responses
+ ):
+ with patch("anton.commands.bug_report.collect_diagnostics") as mock_collect:
+ with patch(
+ "anton.commands.bug_report.save_diagnostics_file"
+ ) as mock_save:
+ with patch(
+ "anton.commands.bug_report.publish_bug_report"
+ ) as mock_publish:
+ mock_collect.return_value = {"test": "data"}
+ mock_save.return_value = Path("/test/bug_report.json")
+
+ await handle_report_bug(console, settings, None, None, None)
+
+ # Verify diagnostics were collected
+ mock_collect.assert_called_once()
+
+ # Verify description was added
+ saved_diagnostics = mock_save.call_args[0][0]
+ assert (
+ saved_diagnostics["user_description"]
+ == "This is a test bug description"
+ )
+
+ # Verify publish was called
+ mock_publish.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_handle_report_bug_no_api_key(self):
+ """Test bug report when API key needs to be entered."""
+ console = Console()
+ settings = AntonSettings(minds_api_key=None) # Explicitly set no API key
+
+ # Mock the prompts: consent, bug description, has API key, API key
+ prompt_responses = ["y", "n", "n", "test-api-key"]
+ with patch(
+ "anton.commands.bug_report.prompt_or_cancel", side_effect=prompt_responses
+ ) as mock_prompt:
+ with patch("anton.commands.bug_report.collect_diagnostics") as mock_collect:
+ with patch(
+ "anton.commands.bug_report.save_diagnostics_file"
+ ) as mock_save:
+ with patch(
+ "anton.commands.bug_report.publish_bug_report"
+ ) as mock_publish:
+ with patch(
+ "anton.commands.bug_report.webbrowser.open"
+ ) as mock_browser:
+ mock_collect.return_value = {"test": "data"}
+ mock_save.return_value = Path("/test/bug_report.json")
+
+ await handle_report_bug(console, settings, None, None, None)
+
+ # Debug: Check how many times prompt was called
+ assert mock_prompt.call_count == 4, (
+ f"Expected 4 prompts, got {mock_prompt.call_count}"
+ )
+
+ # Verify browser was opened for registration
+ mock_browser.assert_called_once()
+
+ # Verify API key was set
+ assert settings.minds_api_key == "test-api-key"
+
+ # Verify publish was called
+ mock_publish.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_handle_report_bug_collection_error(self):
+ """Test bug report when diagnostic collection fails."""
+ console = Console()
+ settings = AntonSettings()
+
+ with patch("anton.commands.bug_report.prompt_or_cancel", return_value="y"):
+ with patch(
+ "anton.commands.bug_report.collect_diagnostics",
+ side_effect=Exception("Test error"),
+ ):
+ await handle_report_bug(console, settings, None, None, None)
+ # Should handle error gracefully
+
+ @pytest.mark.asyncio
+ async def test_handle_report_bug_publish_error(self):
+ """Test bug report when publishing fails."""
+ console = Console()
+ settings = AntonSettings(minds_api_key="test-key")
+
+ prompt_responses = ["y", "n"]
+ with patch(
+ "anton.commands.bug_report.prompt_or_cancel", side_effect=prompt_responses
+ ):
+ with patch("anton.commands.bug_report.collect_diagnostics") as mock_collect:
+ with patch(
+ "anton.commands.bug_report.save_diagnostics_file"
+ ) as mock_save:
+ with patch(
+ "anton.commands.bug_report.publish_bug_report",
+ side_effect=Exception("Network error"),
+ ):
+ mock_collect.return_value = {"test": "data"}
+ mock_save.return_value = Path("/test/bug_report.json")
+
+ await handle_report_bug(console, settings, None, None, None)
+ # Should handle error gracefully
diff --git a/tests/test_publisher.py b/tests/test_publisher.py
new file mode 100644
index 00000000..d5346582
--- /dev/null
+++ b/tests/test_publisher.py
@@ -0,0 +1,150 @@
+"""Tests for publisher module including bug report functionality."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from unittest.mock import patch
+
+import pytest
+
+from anton.publisher import (
+ _find_referenced_files,
+ _zip_html,
+ publish,
+ publish_bug_report,
+)
+
+
+class TestPublisher:
+ """Test publisher functionality."""
+
+ def test_zip_html_single_file(self, tmp_path):
+ """Test zipping a single HTML file."""
+ html_file = tmp_path / "test.html"
+ html_file.write_text("Test")
+
+ zipped = _zip_html(html_file)
+ assert isinstance(zipped, bytes)
+ assert len(zipped) > 0
+
+ def test_zip_html_with_references(self, tmp_path):
+ """Test zipping HTML with referenced files."""
+ html_file = tmp_path / "index.html"
+ css_file = tmp_path / "style.css"
+ js_file = tmp_path / "script.js"
+
+ html_file.write_text("""
+
+
+
+
+
+ Test
+
+ """)
+ css_file.write_text("body { color: red; }")
+ js_file.write_text("console.log('test');")
+
+ # Test finding references
+ refs = _find_referenced_files(html_file)
+ assert len(refs) == 2
+ assert css_file in refs
+ assert js_file in refs
+
+ def test_publish_success(self, tmp_path):
+ """Test successful publish."""
+ html_file = tmp_path / "test.html"
+ html_file.write_text("Test")
+
+ mock_response = json.dumps(
+ {
+ "user_prefix": "test-user",
+ "md5": "abc123",
+ "view_url": "https://example.com/view/abc123",
+ "files": ["index.html"],
+ }
+ )
+
+ with patch("anton.publisher.minds_request", return_value=mock_response):
+ result = publish(html_file, api_key="test-key")
+
+ assert result["user_prefix"] == "test-user"
+ assert result["view_url"] == "https://example.com/view/abc123"
+
+ def test_publish_file_not_found(self):
+ """Test publish with non-existent file."""
+ with pytest.raises(FileNotFoundError):
+ publish(Path("/non/existent/file.html"), api_key="test-key")
+
+
+class TestBugReportPublisher:
+ """Test bug report publishing functionality."""
+
+ def test_publish_bug_report_success(self, tmp_path):
+ """Test successful bug report publish."""
+ bug_report_file = tmp_path / "bug_report.json"
+ bug_report_data = {
+ "timestamp": "2024-01-01T00:00:00",
+ "anton_version": "1.0.0",
+ "system_info": {"platform": "test"},
+ "user_description": "Test bug",
+ }
+ bug_report_file.write_text(json.dumps(bug_report_data))
+
+ mock_response = json.dumps(
+ {
+ "status": "success",
+ "message": "Bug report received",
+ "report_id": "BUG-12345",
+ }
+ )
+
+ with patch("anton.publisher.minds_request", return_value=mock_response) as mock_request:
+ result = publish_bug_report(bug_report_file, api_key="test-key")
+
+ # Verify response
+ assert result["status"] == "success"
+ assert "report_id" in result
+
+ # Verify request was made to bug-report endpoint
+ call_args = mock_request.call_args
+ assert "/bug-report" in call_args[0][0]
+
+ # Verify payload structure
+ payload = json.loads(call_args[1]["payload"])
+ assert payload["bug_report"] is True
+ assert payload["filename"] == "bug_report.json"
+ assert "file_content" in payload
+
+ def test_publish_bug_report_custom_url(self, tmp_path):
+ """Test bug report with custom URL."""
+ bug_report_file = tmp_path / "bug_report.json"
+ bug_report_file.write_text(json.dumps({"test": "data"}))
+
+ mock_response = json.dumps({"status": "success"})
+
+ with patch("anton.publisher.minds_request", return_value=mock_response) as mock_request:
+ publish_bug_report(
+ bug_report_file,
+ api_key="test-key",
+ bug_report_url="https://custom.example.com",
+ )
+
+ # Verify custom URL was used
+ call_args = mock_request.call_args
+ assert call_args[0][0] == "https://custom.example.com/bug-report"
+
+ def test_publish_bug_report_file_not_found(self):
+ """Test bug report publish with non-existent file."""
+ with pytest.raises(FileNotFoundError):
+ publish_bug_report(Path("/non/existent/bug_report.json"), api_key="test-key")
+
+ def test_publish_bug_report_network_error(self, tmp_path):
+ """Test bug report publish with network error."""
+ bug_report_file = tmp_path / "bug_report.json"
+ bug_report_file.write_text(json.dumps({"test": "data"}))
+
+ with patch("anton.publisher.minds_request", side_effect=Exception("Network error")):
+ with pytest.raises(Exception, match="Network error"):
+ publish_bug_report(bug_report_file, api_key="test-key")