diff --git a/AGENTS.md b/AGENTS.md index d295d43..b8e6242 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -49,36 +49,29 @@ TaskWing helps turn a goal into executed tasks with persistent context across AI ### Supported Models - [![OpenAI](https://img.shields.io/badge/OpenAI-412991?logo=openai&logoColor=white)](https://platform.openai.com/) [![Anthropic](https://img.shields.io/badge/Anthropic-191919?logo=anthropic&logoColor=white)](https://www.anthropic.com/) [![Google Gemini](https://img.shields.io/badge/Google_Gemini-4285F4?logo=google&logoColor=white)](https://ai.google.dev/) [![AWS Bedrock](https://img.shields.io/badge/AWS_Bedrock-OpenAI--Compatible_Beta-FF9900?logo=amazonaws&logoColor=white)](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-chat-completions.html) [![Ollama](https://img.shields.io/badge/Ollama-Local-000000?logo=ollama&logoColor=white)](https://ollama.com/) - ### Works With - [![Claude Code](https://img.shields.io/badge/Claude_Code-191919?logo=anthropic&logoColor=white)](https://www.anthropic.com/claude-code) [![OpenAI Codex](https://img.shields.io/badge/OpenAI_Codex-412991?logo=openai&logoColor=white)](https://developers.openai.com/codex) [![Cursor](https://img.shields.io/badge/Cursor-111111?logo=cursor&logoColor=white)](https://cursor.com/) [![GitHub Copilot](https://img.shields.io/badge/GitHub_Copilot-181717?logo=githubcopilot&logoColor=white)](https://github.com/features/copilot) [![Gemini CLI](https://img.shields.io/badge/Gemini_CLI-4285F4?logo=google&logoColor=white)](https://github.com/google-gemini/gemini-cli) [![OpenCode](https://img.shields.io/badge/OpenCode-000000?logo=opencode&logoColor=white)](https://opencode.ai/) - - Brand names and logos are trademarks of their respective owners; usage here indicates compatibility, not endorsement. - ### Slash Commands - - /tw-ask - Use when you need to search project knowledge (decisions, patterns, constraints). - /tw-remember - Use when you want to persist a decision, pattern, or insight to project memory. - /tw-next - Use when you are ready to start the next approved TaskWing task with full context. @@ -92,9 +85,8 @@ Brand names and logos are trademarks of their respective owners; usage here indi ### Core Commands - - taskwing bootstrap -- taskwing goal "" +- taskwing plan "" - taskwing ask "" - taskwing task - taskwing plan status @@ -108,31 +100,28 @@ Brand names and logos are trademarks of their respective owners; usage here indi ### MCP Tools (Canonical Contract) - -| Tool | Description | -| -------- | ----------------------------------------------------------------------- | -| ask | Search project knowledge (decisions, patterns, constraints) | -| task | Unified task lifecycle (next, current, start, complete) | -| plan | Plan management (clarify, decompose, expand, generate, finalize, audit) | -| code | Code intelligence (find, search, explain, callers, impact, simplify) | -| debug | Diagnose issues systematically with AI-powered analysis | -| remember | Store knowledge in project memory | - +| Tool | Description | +|------|-------------| +| ask | Search project knowledge (decisions, patterns, constraints) | +| task | Unified task lifecycle (next, current, start, complete) | +| plan | Plan management (clarify, decompose, expand, generate, finalize, audit) | +| code | Code intelligence (find, search, explain, callers, impact, simplify) | +| debug | Diagnose issues systematically with AI-powered analysis | +| remember | Store knowledge in project memory | ### Autonomous Task Execution (Hooks) TaskWing integrates with Claude Code's hook system for autonomous plan execution: -```bash +~~~bash taskwing hook session-init # Initialize session tracking (SessionStart hook) taskwing hook continue-check # Check if should continue to next task (Stop hook) taskwing hook session-end # Cleanup session (SessionEnd hook) taskwing hook status # View current session state -``` +~~~ Circuit breakers prevent runaway execution: - - --max-tasks=5 stops after N tasks for human review. - --max-minutes=30 stops after N minutes. diff --git a/CLAUDE.md b/CLAUDE.md index c0ef0a1..4e519c4 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -83,9 +83,9 @@ TaskWing gives AI coding assistants permanent memory. It extracts architectural cmd/ # Cobra CLI commands ├── root.go # Base command, global flags (--json, --verbose, --preview, --quiet) ├── bootstrap.go # Auto-generate knowledge from repo -├── goal.go # Goal-first flow: clarify -> generate -> activate +├── goal.go # Deprecated: routes to plan command ├── knowledge.go # View stored project knowledge nodes -├── plan.go # Plan lifecycle management +├── plan.go # Plan creation (primary entry), lifecycle management ├── task.go # Task lifecycle management ├── slash.go # Slash command content for assistants ├── mcp_server.go # MCP server for AI tool integration @@ -141,7 +141,7 @@ Uses CloudWeGo Eino for multi-provider support: - Bedrock: Set `BEDROCK_API_KEY`, `TASKWING_LLM_PROVIDER=bedrock`, and `TASKWING_LLM_BEDROCK_REGION=` - Ollama: Set `TASKWING_LLM_PROVIDER=ollama` and `TASKWING_LLM_MODEL=` -**Bootstrap requires an LLM API key by default** to analyze architecture. Use `--skip-analyze` for CI/testing without LLM (hidden flag, deterministic mode only). +**Bootstrap auto-enables architecture analysis** when an AI CLI is detected on PATH. Use `--no-analyze` to skip. ### MCP Server @@ -235,7 +235,7 @@ Increment when: **NOT MINOR**: Internal refactors, new internal modules, code reorganization -Examples: new `taskwing goal` command, new `--format` flag, adding Gemini provider +Examples: new `taskwing plan` shortcut, new `--format` flag, adding Gemini provider ### MAJOR (X.0.0) - Breaking changes only @@ -338,7 +338,6 @@ Brand names and logos are trademarks of their respective owners; usage here indi ### Slash Commands - - /tw-ask - Use when you need to search project knowledge (decisions, patterns, constraints). - /tw-remember - Use when you want to persist a decision, pattern, or insight to project memory. - /tw-next - Use when you are ready to start the next approved TaskWing task with full context. @@ -352,16 +351,16 @@ Brand names and logos are trademarks of their respective owners; usage here indi ### Core Commands -- `taskwing bootstrap` -- `taskwing goal ""` -- `taskwing ask ""` -- `taskwing task` -- `taskwing plan status` -- `taskwing slash` -- `taskwing mcp` -- `taskwing doctor` -- `taskwing config` -- `taskwing start` +- taskwing bootstrap +- taskwing plan "" +- taskwing ask "" +- taskwing task +- taskwing plan status +- taskwing slash +- taskwing mcp +- taskwing doctor +- taskwing config +- taskwing start ### MCP Tools (Canonical Contract) @@ -369,27 +368,26 @@ Brand names and logos are trademarks of their respective owners; usage here indi | Tool | Description | |------|-------------| -| `ask` | Search project knowledge (decisions, patterns, constraints) | -| `task` | Unified task lifecycle (`next`, `current`, `start`, `complete`) | -| `plan` | Plan management (`clarify`, `decompose`, `expand`, `generate`, `finalize`, `audit`) | -| `code` | Code intelligence (`find`, `search`, `explain`, `callers`, `impact`, `simplify`) | -| `debug` | Diagnose issues systematically with AI-powered analysis | -| `remember` | Store knowledge in project memory | +| ask | Search project knowledge (decisions, patterns, constraints) | +| task | Unified task lifecycle (next, current, start, complete) | +| plan | Plan management (clarify, decompose, expand, generate, finalize, audit) | +| code | Code intelligence (find, search, explain, callers, impact, simplify) | +| debug | Diagnose issues systematically with AI-powered analysis | +| remember | Store knowledge in project memory | ### Autonomous Task Execution (Hooks) TaskWing integrates with Claude Code's hook system for autonomous plan execution: -```bash +~~~bash taskwing hook session-init # Initialize session tracking (SessionStart hook) taskwing hook continue-check # Check if should continue to next task (Stop hook) taskwing hook session-end # Cleanup session (SessionEnd hook) taskwing hook status # View current session state -``` +~~~ Circuit breakers prevent runaway execution: - - --max-tasks=5 stops after N tasks for human review. - --max-minutes=30 stops after N minutes. diff --git a/GEMINI.md b/GEMINI.md index ee46cae..a752a19 100644 --- a/GEMINI.md +++ b/GEMINI.md @@ -77,8 +77,7 @@ The system is composed of a CLI tool with an embedded MCP server and a web dashb | Command | Description | | :------------------- | :---------------------------------------- | | `taskwing bootstrap` | Initialize project memory | -| `taskwing goal` | Create and activate a plan | -| `taskwing plan` | Manage development plans | +| `taskwing plan` | Create and manage development plans | | `taskwing task` | Manage execution tasks | | `taskwing start` | Start API/watch/dashboard services | | `taskwing slash` | Output slash command prompts for AI tools | @@ -217,7 +216,6 @@ Brand names and logos are trademarks of their respective owners; usage here indi ### Slash Commands - - /tw-ask - Use when you need to search project knowledge (decisions, patterns, constraints). - /tw-remember - Use when you want to persist a decision, pattern, or insight to project memory. - /tw-next - Use when you are ready to start the next approved TaskWing task with full context. @@ -231,16 +229,16 @@ Brand names and logos are trademarks of their respective owners; usage here indi ### Core Commands -- `taskwing bootstrap` -- `taskwing goal ""` -- `taskwing ask ""` -- `taskwing task` -- `taskwing plan status` -- `taskwing slash` -- `taskwing mcp` -- `taskwing doctor` -- `taskwing config` -- `taskwing start` +- taskwing bootstrap +- taskwing plan "" +- taskwing ask "" +- taskwing task +- taskwing plan status +- taskwing slash +- taskwing mcp +- taskwing doctor +- taskwing config +- taskwing start ### MCP Tools (Canonical Contract) @@ -248,27 +246,26 @@ Brand names and logos are trademarks of their respective owners; usage here indi | Tool | Description | |------|-------------| -| `ask` | Search project knowledge (decisions, patterns, constraints) | -| `task` | Unified task lifecycle (`next`, `current`, `start`, `complete`) | -| `plan` | Plan management (`clarify`, `decompose`, `expand`, `generate`, `finalize`, `audit`) | -| `code` | Code intelligence (`find`, `search`, `explain`, `callers`, `impact`, `simplify`) | -| `debug` | Diagnose issues systematically with AI-powered analysis | -| `remember` | Store knowledge in project memory | +| ask | Search project knowledge (decisions, patterns, constraints) | +| task | Unified task lifecycle (next, current, start, complete) | +| plan | Plan management (clarify, decompose, expand, generate, finalize, audit) | +| code | Code intelligence (find, search, explain, callers, impact, simplify) | +| debug | Diagnose issues systematically with AI-powered analysis | +| remember | Store knowledge in project memory | ### Autonomous Task Execution (Hooks) TaskWing integrates with Claude Code's hook system for autonomous plan execution: -```bash +~~~bash taskwing hook session-init # Initialize session tracking (SessionStart hook) taskwing hook continue-check # Check if should continue to next task (Stop hook) taskwing hook session-end # Cleanup session (SessionEnd hook) taskwing hook status # View current session state -``` +~~~ Circuit breakers prevent runaway execution: - - --max-tasks=5 stops after N tasks for human review. - --max-minutes=30 stops after N minutes. diff --git a/README.md b/README.md index 40cab2f..432326c 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,10 @@
-

Give your AI tools a brain.

+

Your AI assistant forgets everything. Every single session.

- Memory, planning, task execution, and project intelligence — the control plane for AI-native development. + Context compression tools save tokens. TaskWing saves knowledge — decisions, patterns, and architecture that compound across every session.

@@ -24,6 +24,17 @@ License

+## Works With + + +[![Claude Code](https://img.shields.io/badge/Claude_Code-191919?logo=anthropic&logoColor=white)](https://www.anthropic.com/claude-code) +[![OpenAI Codex](https://img.shields.io/badge/OpenAI_Codex-412991?logo=openai&logoColor=white)](https://developers.openai.com/codex) +[![Cursor](https://img.shields.io/badge/Cursor-111111?logo=cursor&logoColor=white)](https://cursor.com/) +[![GitHub Copilot](https://img.shields.io/badge/GitHub_Copilot-181717?logo=githubcopilot&logoColor=white)](https://github.com/features/copilot) +[![Gemini CLI](https://img.shields.io/badge/Gemini_CLI-4285F4?logo=google&logoColor=white)](https://github.com/google-gemini/gemini-cli) +[![OpenCode](https://img.shields.io/badge/OpenCode-000000?logo=opencode&logoColor=white)](https://opencode.ai/) + +

TaskWing ask demo

@@ -32,49 +43,71 @@ ## The Problem -Your AI tools start every session from zero. They don't know your stack, your patterns, or why you chose PostgreSQL over MongoDB. You re-explain the same context hundreds of times. +You explain "we use PostgreSQL, not MongoDB" on Monday. Again on Tuesday. Again on Wednesday. Your AI assistant has no memory. **Every session starts from zero.** + +A typical project accumulates 50+ architectural decisions, dozens of patterns, and critical constraints — none of which survive a session restart. You spend more time re-explaining context than building features. + +Context compression tools reduce token waste within a session. But when the session ends, everything is gone. **The real problem isn't token cost — it's knowledge loss.** -**TaskWing fixes this.** One command extracts your architecture into a local database. Every AI session after that just *knows*. +## How TaskWing Fixes This -## What It Does +``` +Without TaskWing: + Session 1: "We use PostgreSQL, here's why..." (re-explain) + Session 2: "We use PostgreSQL, here's why..." (re-explain) + Session 3: "We use PostgreSQL, here's why..." (re-explain) + +With TaskWing: + taskwing bootstrap → 63 decisions, 28 patterns, 12 constraints extracted + Session 1: AI already knows your architecture + Session 2: AI still knows. Plus what you decided yesterday. + Session 3: AI knows everything. Context compounds. +``` -| Capability | Description | -|:-----------|:------------| -| 🧠 **Memory** | Extracts and persists architectural decisions, patterns, and constraints | -| 📋 **Planning** | Turns a goal into an executable plan with decomposed tasks | -| ⚡ **Task Execution** | AI-driven task lifecycle — next, start, complete, verify | -| 🔍 **Code Intelligence** | Symbol search, call graphs, impact analysis, simplification | -| 🐛 **Debugging** | AI-powered root cause analysis with systematic diagnosis | -| 🔌 **MCP Integration** | Exposes everything to Claude, Cursor, Copilot, Gemini via MCP | +One command extracts your architecture into a local SQLite database. Every AI session after that just *knows* — permanently. -## Install +| What | How | +|:-----|:----| +| **Persistent memory** | Decisions, patterns, and constraints survive every session restart | +| **Goal-to-execution** | Turn "Add Stripe billing" into 5 decomposed, context-rich tasks | +| **Code intelligence** | Symbol search, call graphs, impact analysis across your codebase | +| **Works everywhere** | Claude Code, Cursor, Copilot, Gemini, OpenCode — via MCP | + +## Try It (60 seconds) ```bash -# Homebrew (recommended) +# Install brew install josephgoksu/tap/taskwing -# or curl -curl -fsSL https://taskwing.app/install.sh | sh +# Extract your architecture +cd your-project && taskwing bootstrap +# → "63 decisions, 28 patterns, 12 constraints extracted" + +# Ask it anything about your project +taskwing ask "what database do we use and why?" +# → Returns the decision, reasoning, and tradeoffs — instantly ``` No signup. No account. Works offline. Everything stays local in SQLite. -## Quick Start +## Full Workflow ```bash -# 1. Extract your architecture -cd your-project +# 1. Bootstrap (once per project) taskwing bootstrap -# → 22 decisions, 12 patterns, 9 constraints extracted # 2. Set a goal and generate a plan -taskwing goal "Add Stripe billing" -# → Plan decomposed into 5 executable tasks +taskwing plan "Add Stripe billing" +# → Plan decomposed into 5 executable tasks with architectural context # 3. Execute with your AI assistant -/tw-next # Get next task with full context +/tw-next # Get next task — AI already knows your stack # ...work... /tw-done # Mark complete, advance to next + +# 4. Your AI remembers decisions made today — tomorrow, next week, forever +/tw-ask "why did we choose Stripe over Paddle?" +# → Returns the decision from step 2, with full reasoning ``` ## Supported Models @@ -87,26 +120,28 @@ taskwing goal "Add Stripe billing" [![Ollama](https://img.shields.io/badge/Ollama-Local-000000?logo=ollama&logoColor=white)](https://ollama.com/) -## Works With - - -[![Claude Code](https://img.shields.io/badge/Claude_Code-191919?logo=anthropic&logoColor=white)](https://www.anthropic.com/claude-code) -[![OpenAI Codex](https://img.shields.io/badge/OpenAI_Codex-412991?logo=openai&logoColor=white)](https://developers.openai.com/codex) -[![Cursor](https://img.shields.io/badge/Cursor-111111?logo=cursor&logoColor=white)](https://cursor.com/) -[![GitHub Copilot](https://img.shields.io/badge/GitHub_Copilot-181717?logo=githubcopilot&logoColor=white)](https://github.com/features/copilot) -[![Gemini CLI](https://img.shields.io/badge/Gemini_CLI-4285F4?logo=google&logoColor=white)](https://github.com/google-gemini/gemini-cli) -[![OpenCode](https://img.shields.io/badge/OpenCode-000000?logo=opencode&logoColor=white)](https://opencode.ai/) - - -Brand names and logos are trademarks of their respective owners; usage here indicates compatibility, not endorsement. +Brand names and logos are trademarks of their respective owners; usage here indicates compatibility, not endorsement. +## Why Not Just a CLAUDE.md File? + +| | CLAUDE.md | Context compression tools | TaskWing | +|:--|:---------|:-------------------------|:---------| +| **Survives session restart** | Yes | No | Yes | +| **Auto-extracted from code** | No (hand-written) | No | Yes | +| **Searchable** | No | Session only | Always (FTS + vector + graph) | +| **Grows over time** | Only if you maintain it | No | Automatically | +| **Understands code symbols** | No | No | Call graphs, impact analysis | +| **Plans and tracks tasks** | No | No | Goal → plan → execute → verify | + +CLAUDE.md is a good start. Context compression is useful within a session. TaskWing is what happens when your project intelligence becomes **permanent, searchable, and compounding**. + ## MCP Tools | Tool | Description | -|:-----|:------------| +|------|-------------| | `ask` | Search project knowledge (decisions, patterns, constraints) | | `task` | Unified task lifecycle (`next`, `current`, `start`, `complete`) | | `plan` | Plan management (`clarify`, `decompose`, `expand`, `generate`, `finalize`, `audit`) | @@ -149,18 +184,16 @@ Once connected, use these slash commands from your AI assistant: ## Core Commands -| Command | Description | -|:--------|:------------| -| `taskwing bootstrap` | Extract architecture from your codebase | -| `taskwing goal ""` | Create and activate a plan from a goal | -| `taskwing ask ""` | Query project knowledge | -| `taskwing task` | Manage execution tasks | -| `taskwing plan status` | View current plan progress | -| `taskwing slash` | Output slash command prompts for AI tools | -| `taskwing mcp` | Start the MCP server | -| `taskwing doctor` | Health check for project memory | -| `taskwing config` | Configure LLM provider and settings | -| `taskwing start` | Start API/watch/dashboard services | +- `taskwing bootstrap` +- `taskwing plan ""` +- `taskwing ask ""` +- `taskwing task` +- `taskwing plan status` +- `taskwing slash` +- `taskwing mcp` +- `taskwing doctor` +- `taskwing config` +- `taskwing start` ## Autonomous Task Execution (Hooks) diff --git a/cmd/bootstrap.go b/cmd/bootstrap.go index b5df3eb..3ded780 100644 --- a/cmd/bootstrap.go +++ b/cmd/bootstrap.go @@ -23,6 +23,7 @@ import ( "github.com/josephgoksu/TaskWing/internal/logger" "github.com/josephgoksu/TaskWing/internal/memory" "github.com/josephgoksu/TaskWing/internal/project" + "github.com/josephgoksu/TaskWing/internal/runner" "github.com/josephgoksu/TaskWing/internal/ui" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -31,24 +32,55 @@ import ( // bootstrapCmd represents the bootstrap command var bootstrapCmd = &cobra.Command{ Use: "bootstrap", - Short: "Initialize project memory with LLM-powered analysis", + Short: "Initialize project memory with deterministic code indexing", Long: `Initialize TaskWing for your repository. -Bootstrap analyzes your codebase to extract architectural knowledge: +Bootstrap indexes your codebase deterministically and, when an AI CLI is +detected on PATH, automatically runs architecture analysis: • Creates .taskwing/ directory structure • Sets up AI assistant integration (Claude, Cursor, etc.) • Auto-repairs managed local AI config drift • Indexes code symbols (functions, types, etc.) - • Analyzes code patterns and architecture (requires API key) - • Extracts decisions and understands WHY choices were made + • Runs LLM-powered architecture analysis (auto-enabled when AI CLI detected) + +Use --no-analyze to skip LLM analysis even when an AI CLI is available. Bootstrap does NOT adopt unmanaged AI config automatically and does NOT mutate -global MCP in run mode. Use "taskwing doctor --fix" for explicit repair flows. +global MCP in run mode. Use "taskwing doctor --fix" for explicit repair flows.`, + RunE: runBootstrap, +} -Requires an LLM API key (set via 'taskwing config set' or provider-specific env var). +// bootstrapContext accumulates stats across bootstrap phases for the final summary. +type bootstrapContext struct { + startTime time.Time + totalActions int + currentAction int + filesScanned int + symbolsFound int + callRelationships int + indexDuration time.Duration + metadataItems int + metadataDuration time.Duration + analysisFindings int + analysisRelations int + analysisDuration time.Duration +} -Use --skip-analyze for CI/testing (deterministic, no LLM).`, - RunE: runBootstrap, +func (bc *bootstrapContext) nextPhase() int { + bc.currentAction++ + return bc.currentAction +} + +func (bc *bootstrapContext) toStats() ui.BootstrapStats { + return ui.BootstrapStats{ + FilesScanned: bc.filesScanned, + SymbolsFound: bc.symbolsFound, + CallRelationships: bc.callRelationships, + MetadataItems: bc.metadataItems, + AnalysisFindings: bc.analysisFindings, + AnalysisRelations: bc.analysisRelations, + TotalDuration: time.Since(bc.startTime), + } } // runBootstrap is the main bootstrap command handler. @@ -58,11 +90,15 @@ func runBootstrap(cmd *cobra.Command, args []string) error { // PHASE 0: Parse and Validate Flags // ═══════════════════════════════════════════════════════════════════════ onlyAgents, _ := cmd.Flags().GetStringSlice("only-agents") + noAnalyze := getBoolFlag(cmd, "no-analyze") || getBoolFlag(cmd, "skip-analyze") + analyzeExplicit := getBoolFlag(cmd, "analyze") + autoAnalyze := !noAnalyze && (analyzeExplicit || len(runner.DetectCLIs()) > 0) + timeout, _ := cmd.Flags().GetDuration("timeout") flags := bootstrap.Flags{ Preview: getBoolFlag(cmd, "preview"), SkipInit: getBoolFlag(cmd, "skip-init"), SkipIndex: getBoolFlag(cmd, "skip-index"), - SkipAnalyze: getBoolFlag(cmd, "skip-analyze"), + SkipAnalyze: !autoAnalyze, // Auto-enabled when AI CLI detected; opt out with --no-analyze Force: getBoolFlag(cmd, "force"), Resume: getBoolFlag(cmd, "resume"), OnlyAgents: onlyAgents, @@ -71,7 +107,10 @@ func runBootstrap(cmd *cobra.Command, args []string) error { TraceFile: getStringFlag(cmd, "trace-file"), Verbose: viper.GetBool("verbose"), Quiet: viper.GetBool("quiet"), - Debug: getBoolFlag(cmd, "debug"), + Debug: getBoolFlag(cmd, "debug"), + PreferCLI: getStringFlag(cmd, "prefer-cli"), + Timeout: timeout, + Model: getStringFlag(cmd, "model"), } // Validate flags early - fail fast on contradictions @@ -96,7 +135,7 @@ func runBootstrap(cmd *cobra.Command, args []string) error { } // Track user input for crash logging - logger.SetLastInput(fmt.Sprintf("bootstrap (skip-analyze=%v, dir=%s)", flags.SkipAnalyze, cwd)) + logger.SetLastInput(fmt.Sprintf("bootstrap (no-analyze=%v, dir=%s)", flags.SkipAnalyze, cwd)) // Debug mode: dump diagnostic info early if flags.Debug { @@ -158,24 +197,69 @@ func runBootstrap(cmd *cobra.Command, args []string) error { // ═══════════════════════════════════════════════════════════════════════ plan := bootstrap.DecidePlan(snapshot, flags) - // Always show plan summary (even in quiet mode, single line) - fmt.Print(bootstrap.FormatPlanSummary(plan, flags.Quiet)) + // Show debug line on stderr if --debug + if flags.Debug { + fmt.Fprintln(os.Stderr, bootstrap.FormatPlanDebugLine(plan)) + } // Handle error mode if plan.Mode == bootstrap.ModeError { return plan.Error } + // Show welcome panel and plan box (normal/verbose mode) + if !flags.Quiet { + fmt.Println(ui.RenderBootstrapWelcome()) + + // Project info line + fmt.Printf(" Project: %s", filepath.Base(cwd)) + if len(plan.SuggestedAIs) > 0 { + fmt.Printf(" • AIs: %s", strings.Join(plan.SuggestedAIs, ", ")) + } + fmt.Println() + + // Detected state + fmt.Printf("\n Detected: %s\n", plan.DetectedState) + + // Show plan box with numbered actions + if len(plan.ActionSummary) > 0 { + fmt.Print(ui.RenderPlanBox(plan.ActionSummary)) + } + + // Print drift warnings if any + if len(plan.ManagedDriftAIs) > 0 || len(plan.UnmanagedDriftAIs) > 0 || len(plan.GlobalMCPDriftAIs) > 0 { + fmt.Println() + if len(plan.ManagedDriftAIs) > 0 { + fmt.Printf(" • managed_drift_fixed: %s\n", strings.Join(plan.ManagedDriftAIs, ", ")) + } + if len(plan.UnmanagedDriftAIs) > 0 { + fmt.Printf(" • unmanaged_drift_detected: %s\n", strings.Join(plan.UnmanagedDriftAIs, ", ")) + } + if len(plan.GlobalMCPDriftAIs) > 0 { + fmt.Printf(" • global_mcp_drift_detected: %s\n", strings.Join(plan.GlobalMCPDriftAIs, ", ")) + } + } + + // Show warnings + if len(plan.Warnings) > 0 { + for _, warning := range plan.Warnings { + ui.PrintWarning(warning) + } + } + } + // Handle preview mode - show plan and exit if flags.Preview { - fmt.Println("\n💡 Preview mode - no changes made.") + fmt.Println() + ui.PrintHint("Preview mode - no changes made.") return nil } // Handle NoOp mode if plan.Mode == bootstrap.ModeNoOp { if !flags.Quiet { - fmt.Println("\n✅ Nothing to do - configuration is up to date.") + fmt.Println() + ui.PrintSuccess("Nothing to do - configuration is up to date.") } return nil } @@ -189,7 +273,7 @@ func runBootstrap(cmd *cobra.Command, args []string) error { if plan.RequiresLLMConfig { llmCfg, err = getLLMConfigForRole(cmd, llm.RoleBootstrap) if err != nil { - return fmt.Errorf("TaskWing requires an LLM API key to analyze your architecture.\nConfigure via 'taskwing config set' or set a provider-specific env var (e.g. TASKWING_API_KEY, OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY, BEDROCK_API_KEY).\nUse --skip-analyze for CI/testing without LLM: %w", err) + return fmt.Errorf("LLM API key required for architecture analysis.\nConfigure via 'taskwing config set' or set a provider-specific env var (e.g. OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY).\nUse --no-analyze to skip: %w", err) } } @@ -202,48 +286,59 @@ func runBootstrap(cmd *cobra.Command, args []string) error { if plan.RequiresRepoSelection && slices.Contains(plan.Actions, bootstrap.ActionLLMAnalyze) { if ui.IsInteractive() { fmt.Println() - fmt.Printf("📦 Found %d repositories\n\n", len(plan.DetectedRepos)) + fmt.Printf("%s Found %d repositories\n\n", ui.IconPackage, len(plan.DetectedRepos)) plan.SelectedRepos = promptRepoSelection(plan.DetectedRepos) } else { plan.SelectedRepos = plan.DetectedRepos if !flags.Quiet { - fmt.Printf("📦 Non-interactive mode: bootstrapping all %d repositories\n", len(plan.DetectedRepos)) + fmt.Printf("%s Non-interactive mode: bootstrapping all %d repositories\n", ui.IconPackage, len(plan.DetectedRepos)) } } } // Execute actions in order + bCtx := &bootstrapContext{startTime: time.Now(), totalActions: len(plan.Actions)} + for _, action := range plan.Actions { - if err := executeAction(cmd.Context(), action, svc, cwd, flags, plan, llmCfg); err != nil { + if !flags.Quiet { + ui.PrintPhaseSeparator() + } + if err := executeAction(cmd.Context(), action, svc, cwd, flags, plan, llmCfg, bCtx); err != nil { return err } } - // Final success message + // Final output if !flags.Quiet { - if len(plan.ManagedDriftAIs) > 0 { - fmt.Printf("managed_drift_fixed: %s\n", strings.Join(plan.ManagedDriftAIs, ", ")) - } + ui.PrintPhaseSeparator() + + // Drift warnings if len(plan.UnmanagedDriftAIs) > 0 { - fmt.Printf("unmanaged_drift_detected: %s\n", strings.Join(plan.UnmanagedDriftAIs, ", ")) + fmt.Println() + ui.PrintWarning(fmt.Sprintf("unmanaged_drift_detected: %s", strings.Join(plan.UnmanagedDriftAIs, ", "))) fmt.Println(" ↳ Run `taskwing doctor --fix --adopt-unmanaged` to claim and repair unmanaged TaskWing-like configs.") } if len(plan.GlobalMCPDriftAIs) > 0 { - fmt.Printf("global_mcp_drift_detected: %s\n", strings.Join(plan.GlobalMCPDriftAIs, ", ")) + fmt.Println() + ui.PrintWarning(fmt.Sprintf("global_mcp_drift_detected: %s", strings.Join(plan.GlobalMCPDriftAIs, ", "))) fmt.Println(" ↳ Run `taskwing doctor --fix` to repair global MCP registration.") } + + // Summary panel fmt.Println() - fmt.Println("✅ Bootstrap complete!") + fmt.Println(ui.RenderBootstrapSummary(bCtx.toStats())) + } else { + fmt.Printf("✔ Bootstrap complete (%s)\n", ui.FormatDuration(bCtx.toStats().TotalDuration)) } return nil } // executeAction executes a single bootstrap action. -func executeAction(ctx context.Context, action bootstrap.Action, svc *bootstrap.Service, cwd string, flags bootstrap.Flags, plan *bootstrap.Plan, llmCfg llm.Config) error { +func executeAction(ctx context.Context, action bootstrap.Action, svc *bootstrap.Service, cwd string, flags bootstrap.Flags, plan *bootstrap.Plan, llmCfg llm.Config, bCtx *bootstrapContext) error { switch action { case bootstrap.ActionInitProject: - if err := executeInitProject(svc, flags, plan); err != nil { + if err := executeInitProject(svc, flags, plan, bCtx); err != nil { return err } // Re-detect project context now that local .taskwing/ exists. @@ -255,19 +350,19 @@ func executeAction(ctx context.Context, action bootstrap.Action, svc *bootstrap. return nil case bootstrap.ActionGenerateAIConfigs: - return executeGenerateAIConfigs(svc, flags, plan) + return executeGenerateAIConfigs(svc, flags, plan, bCtx) case bootstrap.ActionInstallMCP: - return executeInstallMCP(cwd, flags, plan) + return executeInstallMCP(cwd, flags, plan, bCtx) case bootstrap.ActionIndexCode: - return executeIndexCode(ctx, cwd, flags) + return executeIndexCode(ctx, cwd, flags, bCtx) case bootstrap.ActionExtractMetadata: - return executeExtractMetadata(ctx, svc, flags) + return executeExtractMetadata(ctx, svc, flags, bCtx) case bootstrap.ActionLLMAnalyze: - return executeLLMAnalyze(ctx, svc, cwd, flags, llmCfg, plan) + return executeLLMAnalyze(ctx, svc, cwd, flags, llmCfg, plan, bCtx) default: return fmt.Errorf("unknown action: %s", action) @@ -275,7 +370,15 @@ func executeAction(ctx context.Context, action bootstrap.Action, svc *bootstrap. } // executeInitProject handles project initialization with user prompts. -func executeInitProject(svc *bootstrap.Service, flags bootstrap.Flags, plan *bootstrap.Plan) error { +func executeInitProject(svc *bootstrap.Service, flags bootstrap.Flags, plan *bootstrap.Plan, bCtx *bootstrapContext) error { + if !flags.Quiet { + ui.PrintPhaseHeader(bCtx.nextPhase(), bCtx.totalActions, ui.IconRocket, + "Initializing Project", + "Creating .taskwing/ directory structure and AI configurations.") + } else { + step := bCtx.nextPhase() + fmt.Printf("[%d/%d] Initializing project... ", step, bCtx.totalActions) + } var selectedAIs []string if plan.RequiresUserInput { @@ -289,9 +392,9 @@ func executeInitProject(svc *bootstrap.Service, flags bootstrap.Flags, plan *boo } if !flags.Quiet { if len(selectedAIs) > 0 { - fmt.Printf("🤖 Non-interactive mode: configuring AI integrations for %s\n", strings.Join(selectedAIs, ", ")) + fmt.Printf("%s Non-interactive mode: configuring AI integrations for %s\n", ui.IconRobot, strings.Join(selectedAIs, ", ")) } else { - fmt.Println("🤖 Non-interactive mode: no AI assistant selected; initializing project memory only") + fmt.Printf("%s Non-interactive mode: no AI assistant selected; initializing project memory only\n", ui.IconRobot) } } } else { @@ -299,19 +402,19 @@ func executeInitProject(svc *bootstrap.Service, flags bootstrap.Flags, plan *boo switch plan.Mode { case bootstrap.ModeFirstTime: if len(plan.SuggestedAIs) > 0 { - fmt.Println("📋 Setting up local project") - fmt.Printf("🔍 Detected global config for: %s\n", strings.Join(plan.SuggestedAIs, ", ")) + fmt.Printf("%s Setting up local project\n", ui.IconTask) + fmt.Printf("%s Detected global config for: %s\n", ui.IconSearch, strings.Join(plan.SuggestedAIs, ", ")) } else { - fmt.Println("🚀 First time setup") + fmt.Printf("%s First time setup\n", ui.IconRocket) } fmt.Println() - fmt.Println("🤖 Which AI assistant(s) do you use?") + fmt.Printf("%s Which AI assistant(s) do you use?\n", ui.IconRobot) fmt.Println() selectedAIs = promptAISelection(plan.SuggestedAIs...) case bootstrap.ModeRepair: if len(plan.AIsNeedingRepair) > 0 { - fmt.Println("🔧 Restoring missing AI configurations") + fmt.Printf("%s Restoring missing AI configurations\n", ui.IconWrench) fmt.Printf(" Missing: %s\n", strings.Join(plan.AIsNeedingRepair, ", ")) fmt.Print(" Restore? [Y/n]: ") var input string @@ -321,21 +424,22 @@ func executeInitProject(svc *bootstrap.Service, flags bootstrap.Flags, plan *boo selectedAIs = plan.AIsNeedingRepair } else { fmt.Println() - fmt.Println("🤖 Which AI assistant(s) do you want to set up?") + fmt.Printf("%s Which AI assistant(s) do you want to set up?\n", ui.IconRobot) selectedAIs = promptAISelection(plan.SuggestedAIs...) } } case bootstrap.ModeReconfigure: - fmt.Println("🔧 No AI configurations found - let's set them up") + fmt.Printf("%s No AI configurations found - let's set them up\n", ui.IconWrench) fmt.Println() - fmt.Println("🤖 Which AI assistant(s) do you use?") + fmt.Printf("%s Which AI assistant(s) do you use?\n", ui.IconRobot) fmt.Println() selectedAIs = promptAISelection() } } if len(selectedAIs) == 0 && !flags.Quiet { - fmt.Println("\n⚠️ No AI assistants selected - continuing with local project initialization only") + fmt.Println() + ui.PrintWarning("No AI assistants selected - continuing with local project initialization only") } } @@ -343,49 +447,58 @@ func executeInitProject(svc *bootstrap.Service, flags bootstrap.Flags, plan *boo plan.SelectedAIs = selectedAIs // Initialize project + start := time.Now() if err := svc.InitializeProject(flags.Verbose, selectedAIs); err != nil { return fmt.Errorf("initialization failed: %w", err) } - fmt.Println("✓ Project initialized") + if !flags.Quiet { + ui.PrintPhaseResult("Project initialized", time.Since(start)) + } else { + fmt.Printf("done (%s)\n", ui.FormatDuration(time.Since(start))) + } return nil } // executeGenerateAIConfigs generates AI slash commands and hooks. // This runs standalone when ActionInitProject isn't in the plan (e.g., ModeRepair with healthy project). -func executeGenerateAIConfigs(svc *bootstrap.Service, flags bootstrap.Flags, plan *bootstrap.Plan) error { +func executeGenerateAIConfigs(svc *bootstrap.Service, flags bootstrap.Flags, plan *bootstrap.Plan, bCtx *bootstrapContext) error { // Determine which AIs to configure var targetAIs []string if len(plan.SelectedAIs) > 0 { - // User already selected AIs (from executeInitProject or previous step) targetAIs = plan.SelectedAIs } else if len(plan.AIsNeedingRepair) > 0 { - // In repair mode, use the AIs that need repair targetAIs = plan.AIsNeedingRepair } if len(targetAIs) == 0 { - // No AIs to configure - this is a no-op return nil } - // Generate configs for each target AI if !flags.Quiet { - fmt.Printf("🔧 Regenerating AI configurations for: %s\n", strings.Join(targetAIs, ", ")) + ui.PrintPhaseHeader(bCtx.nextPhase(), bCtx.totalActions, ui.IconWrench, + "Generating AI Configurations", + fmt.Sprintf("Setting up slash commands and hooks for %s.", strings.Join(targetAIs, ", "))) + } else { + step := bCtx.nextPhase() + fmt.Printf("[%d/%d] Generating AI configs... ", step, bCtx.totalActions) } + start := time.Now() if err := svc.RegenerateAIConfigs(flags.Verbose, targetAIs); err != nil { return fmt.Errorf("regenerate AI configs failed: %w", err) } if !flags.Quiet { - fmt.Println("✓ AI configurations regenerated") + ui.PrintPhaseResult("AI configurations regenerated", time.Since(start)) + } else { + fmt.Printf("done (%s)\n", ui.FormatDuration(time.Since(start))) } return nil } // executeInstallMCP registers MCP servers with AI CLIs. -func executeInstallMCP(cwd string, flags bootstrap.Flags, plan *bootstrap.Plan) error { +func executeInstallMCP(cwd string, flags bootstrap.Flags, plan *bootstrap.Plan, bCtx *bootstrapContext) error { // Determine which AIs need MCP registration var targetAIs []string if len(plan.SelectedAIs) > 0 { @@ -413,53 +526,98 @@ func executeInstallMCP(cwd string, flags bootstrap.Flags, plan *bootstrap.Plan) } } + if !flags.Quiet { + ui.PrintPhaseHeader(bCtx.nextPhase(), bCtx.totalActions, ui.IconPlug, + "Installing MCP Servers", + fmt.Sprintf("Registering TaskWing with %s.", strings.Join(targetAIs, ", "))) + } else { + step := bCtx.nextPhase() + fmt.Printf("[%d/%d] Installing MCP servers... ", step, bCtx.totalActions) + } + if len(aisNeedingRegistration) == 0 { if !flags.Quiet && len(existingGlobalAIs) > 0 { - fmt.Printf("✓ MCP already configured globally for: %s\n", strings.Join(existingGlobalAIs, ", ")) + ui.PrintPhaseResult(fmt.Sprintf("MCP already configured for: %s", strings.Join(existingGlobalAIs, ", ")), 0) + } else if flags.Quiet { + fmt.Println("already configured") } return nil } - if !flags.Quiet { - fmt.Printf("🔌 Installing MCP servers for: %s\n", strings.Join(aisNeedingRegistration, ", ")) - } - + start := time.Now() installMCPServers(cwd, aisNeedingRegistration) if !flags.Quiet { - fmt.Println("✓ MCP servers installed") + ui.PrintPhaseResult(fmt.Sprintf("MCP servers installed for: %s", strings.Join(aisNeedingRegistration, ", ")), time.Since(start)) + } else { + fmt.Printf("done (%s)\n", ui.FormatDuration(time.Since(start))) } return nil } // executeIndexCode runs code symbol indexing. -func executeIndexCode(ctx context.Context, cwd string, flags bootstrap.Flags) error { - if err := runCodeIndexing(ctx, cwd, flags.Force, flags.Quiet); err != nil { +func executeIndexCode(ctx context.Context, cwd string, flags bootstrap.Flags, bCtx *bootstrapContext) error { + if !flags.Quiet { + ui.PrintPhaseHeader(bCtx.nextPhase(), bCtx.totalActions, ui.IconSearch, + "Indexing Code Symbols", + "Scanning source files for functions, types, and call relationships.") + } else { + step := bCtx.nextPhase() + fmt.Printf("[%d/%d] Indexing code symbols... ", step, bCtx.totalActions) + } + + if err := runCodeIndexing(ctx, cwd, flags.Force, flags.Quiet, bCtx); err != nil { // Non-fatal: log and continue if !flags.Quiet { - fmt.Fprintf(os.Stderr, "⚠️ Code indexing failed: %v\n", err) + fmt.Fprintf(os.Stderr, " %s Code indexing failed: %v\n", ui.IconWarn, err) + } else { + fmt.Printf("failed (%v)\n", err) } } return nil } // executeExtractMetadata runs deterministic metadata extraction. -func executeExtractMetadata(ctx context.Context, svc *bootstrap.Service, flags bootstrap.Flags) error { +func executeExtractMetadata(ctx context.Context, svc *bootstrap.Service, flags bootstrap.Flags, bCtx *bootstrapContext) error { + if !flags.Quiet { + ui.PrintPhaseHeader(bCtx.nextPhase(), bCtx.totalActions, ui.IconStats, + "Extracting Project Metadata", + "Collecting git history and documentation files.") + } else { + step := bCtx.nextPhase() + fmt.Printf("[%d/%d] Extracting metadata... ", step, bCtx.totalActions) + } + + start := time.Now() result, err := svc.RunDeterministicBootstrap(ctx, flags.Quiet) if err != nil { if !flags.Quiet { - fmt.Fprintf(os.Stderr, "⚠️ Metadata extraction failed: %v\n", err) + fmt.Fprintf(os.Stderr, " %s Metadata extraction failed: %v\n", ui.IconWarn, err) + } else { + fmt.Printf("failed (%v)\n", err) + } + } else { + if result != nil { + bCtx.metadataItems = result.FindingsCount + bCtx.metadataDuration = time.Since(start) + } + if result != nil && len(result.Warnings) > 0 && flags.Verbose { + for _, w := range result.Warnings { + fmt.Fprintf(os.Stderr, " [warn] %s\n", w) + } } - } else if result != nil && len(result.Warnings) > 0 && flags.Verbose { - for _, w := range result.Warnings { - fmt.Fprintf(os.Stderr, " [warn] %s\n", w) + if flags.Quiet { + fmt.Printf("done (%s)\n", ui.FormatDuration(time.Since(start))) } } return nil } -// executeLLMAnalyze runs LLM-powered deep analysis. -func executeLLMAnalyze(ctx context.Context, svc *bootstrap.Service, cwd string, flags bootstrap.Flags, llmCfg llm.Config, plan *bootstrap.Plan) error { +// executeLLMAnalyze runs architecture analysis via AI CLI runners or LLM agents. +// When AI CLIs (Claude Code, Gemini CLI, Codex CLI) are detected, uses them as +// headless subprocesses (no API keys needed). Falls back to internal LLM agents +// when --prefer-cli is not set and no CLIs are found. +func executeLLMAnalyze(ctx context.Context, svc *bootstrap.Service, cwd string, flags bootstrap.Flags, llmCfg llm.Config, plan *bootstrap.Plan, bCtx *bootstrapContext) error { // Detect workspace type ws, err := project.DetectWorkspace(cwd) if err != nil { @@ -468,15 +626,293 @@ func executeLLMAnalyze(ctx context.Context, svc *bootstrap.Service, cwd string, // Handle multi-repo workspaces if ws.IsMultiRepo() { - // Scope to user-selected repos if len(plan.SelectedRepos) > 0 { ws.Services = plan.SelectedRepos } return runMultiRepoBootstrap(ctx, svc, ws, flags.Preview) } - // Run agent TUI flow with LLM analysis - return runAgentTUI(ctx, svc, cwd, llmCfg, flags) + // Try runner-based analysis (spawns a single AI CLI subprocess, no API keys needed) + preferCLI := runner.CLIType(flags.PreferCLI) + selectedRunner, runnerErr := runner.PreferredRunner(preferCLI) + + if runnerErr == nil { + return runRunnerAnalysis(ctx, svc, cwd, flags, selectedRunner, bCtx) + } + + // Fallback: use internal LLM agents (requires API key) + if llmCfg.APIKey != "" || llmCfg.Provider == "ollama" { + return runAgentTUI(ctx, svc, cwd, llmCfg, flags) + } + + return fmt.Errorf("no AI CLI detected and no LLM API key configured.\nInstall Claude Code, Gemini CLI, or Codex CLI, or configure an API key via 'taskwing config set'") +} + +// runRunnerAnalysis executes bootstrap analysis by spawning parallel AI CLI instances. +func runRunnerAnalysis(ctx context.Context, svc *bootstrap.Service, cwd string, flags bootstrap.Flags, r runner.Runner, bCtx *bootstrapContext) error { + model := flags.Model + + // Print phase header + if !flags.Quiet { + desc := fmt.Sprintf("Using %s", r.Type().String()) + if model != "" { + desc += fmt.Sprintf(" (%s)", model) + } + desc += fmt.Sprintf(" — %d focus areas in parallel.", len(runner.FocusAreas)) + ui.PrintPhaseHeader(bCtx.nextPhase(), bCtx.totalActions, ui.IconRobot, + "Architecture Analysis", desc) + } else { + step := bCtx.nextPhase() + fmt.Printf("[%d/%d] Analyzing architecture... ", step, bCtx.totalActions) + } + + analysisStart := time.Now() + + // Build existing context summary to avoid duplicate findings + existingContext := buildExistingContext() + + // Run all focus areas in parallel through separate AI CLI instances + type jobResult struct { + id string + result *runner.InvokeResult + err error + duration time.Duration + } + results := make([]jobResult, len(runner.FocusAreas)) + + // Build job IDs for TUI + focusIDs := make([]string, len(runner.FocusAreas)) + for i, focus := range runner.FocusAreas { + focusIDs[i] = focusAreaID(focus) + } + + if !flags.Quiet { + // Use Bubble Tea spinner TUI for non-quiet mode + progressModel := ui.NewRunnerProgressModel(focusIDs) + p := tea.NewProgram(progressModel, tea.WithOutput(os.Stderr)) + + for i, focus := range runner.FocusAreas { + go func(idx int, focusArea, jobID string) { + jobStart := time.Now() + + // Build progress callback — suppress heartbeats, show thinking/tool in verbose only + var progressCb runner.ProgressCallback + if flags.Verbose || flags.Debug { + progressCb = func(ev runner.ProgressEvent) { + switch ev.Type { + case runner.ProgressThinking: + fmt.Fprintf(os.Stderr, " [%s] thinking: %s\n", jobID, ev.Summary) + case runner.ProgressToolUse: + fmt.Fprintf(os.Stderr, " [%s] using: %s\n", jobID, ev.Summary) + case runner.ProgressHeartbeat: + // Suppressed + } + } + } + + res, err := r.Invoke(ctx, runner.InvokeRequest{ + Prompt: runner.BootstrapAnalysisPrompt(cwd, existingContext, focusArea), + WorkDir: cwd, + Timeout: flags.Timeout, + OnProgress: progressCb, + Model: model, + }) + + // Log runner stderr in debug mode (even on success) + if flags.Debug && res != nil && res.Stderr != "" { + fmt.Fprintf(os.Stderr, "[debug] [%s] stderr:\n%s\n", jobID, res.Stderr) + } + + dur := time.Since(jobStart) + results[idx] = jobResult{id: jobID, result: res, err: err, duration: dur} + + // Signal TUI + doneMsg := ui.RunnerJobDoneMsg{ID: jobID, Duration: dur} + if err != nil { + doneMsg.ErrMsg = err.Error() + } else if res != nil { + var analysis runner.BootstrapAnalysis + if decErr := res.Decode(&analysis); decErr != nil { + doneMsg.ErrMsg = fmt.Sprintf("parse error: %v", decErr) + } else { + doneMsg.Findings = len(analysis.Findings) + doneMsg.Rels = len(analysis.Relationships) + } + } + p.Send(doneMsg) + }(i, focus, focusIDs[i]) + } + + if _, err := p.Run(); err != nil { + // TUI error is non-fatal, results are still in the slice + fmt.Fprintf(os.Stderr, " %s TUI error: %v\n", ui.IconWarn, err) + } + } else { + // Quiet mode: use simple WaitGroup + var wg sync.WaitGroup + for i, focus := range runner.FocusAreas { + wg.Add(1) + go func(idx int, focusArea, jobID string) { + defer wg.Done() + jobStart := time.Now() + + res, err := r.Invoke(ctx, runner.InvokeRequest{ + Prompt: runner.BootstrapAnalysisPrompt(cwd, existingContext, focusArea), + WorkDir: cwd, + Timeout: flags.Timeout, + Model: model, + }) + + results[idx] = jobResult{id: jobID, result: res, err: err, duration: time.Since(jobStart)} + }(i, focus, focusIDs[i]) + } + wg.Wait() + } + + // Collect findings and relationships from all results + var allFindings []core.Finding + var allRelationships []core.Relationship + var errors []string + sourceAgent := string(r.Type()) + + for _, jr := range results { + if jr.err != nil { + errors = append(errors, fmt.Sprintf("%s: %v", jr.id, jr.err)) + continue + } + + var analysis runner.BootstrapAnalysis + if err := jr.result.Decode(&analysis); err != nil { + errors = append(errors, fmt.Sprintf("%s: parse error: %v", jr.id, err)) + continue + } + + findings := convertRunnerFindings(analysis.Findings, sourceAgent) + allFindings = append(allFindings, findings...) + + rels := convertRunnerRelationships(analysis.Relationships) + allRelationships = append(allRelationships, rels...) + } + + // Update bCtx stats + bCtx.analysisFindings = len(allFindings) + bCtx.analysisRelations = len(allRelationships) + bCtx.analysisDuration = time.Since(analysisStart) + + if len(allFindings) == 0 && len(errors) > 0 { + return fmt.Errorf("all analysis jobs failed:\n %s", strings.Join(errors, "\n ")) + } + + // Report partial failures when some succeeded but others didn't + if len(errors) > 0 && len(allFindings) > 0 && !flags.Quiet { + fmt.Fprintf(os.Stderr, "\n %s %d of %d analysis jobs had errors (partial results saved)\n", + ui.IconWarn, len(errors), len(runner.FocusAreas)) + } + + if flags.Preview { + fmt.Println() + ui.PrintHint(fmt.Sprintf("Preview: %d findings, %d relationships from %d focus areas. Run without --preview to save.", + len(allFindings), len(allRelationships), len(runner.FocusAreas))) + return nil + } + + if flags.Quiet { + fmt.Printf("done (%s)\n", ui.FormatDuration(bCtx.analysisDuration)) + } + + // Ingest findings and relationships into knowledge system + return svc.IngestDirectly(ctx, allFindings, allRelationships, flags.Quiet) +} + +// convertRunnerFindings converts runner findings to core.Finding format, +// mapping all fields including metadata, debt classification, and evidence details. +func convertRunnerFindings(findings []runner.BootstrapFinding, sourceAgent string) []core.Finding { + result := make([]core.Finding, 0, len(findings)) + for _, f := range findings { + cf := core.Finding{ + Type: core.FindingType(f.Type), + Title: f.Title, + Description: f.Description, + Why: f.Why, + Tradeoffs: f.Tradeoffs, + ConfidenceScore: f.ConfidenceScore, + SourceAgent: sourceAgent, + Metadata: f.Metadata, + DebtScore: f.DebtScore, + DebtReason: f.DebtReason, + RefactorHint: f.RefactorHint, + } + for _, ev := range f.Evidence { + cf.Evidence = append(cf.Evidence, core.Evidence{ + FilePath: ev.FilePath, + StartLine: ev.StartLine, + EndLine: ev.EndLine, + Snippet: ev.Snippet, + GrepPattern: ev.GrepPattern, + EvidenceType: ev.EvidenceType, + }) + } + result = append(result, cf) + } + return result +} + +// convertRunnerRelationships converts runner relationships to core.Relationship format. +func convertRunnerRelationships(rels []runner.BootstrapRelationship) []core.Relationship { + result := make([]core.Relationship, 0, len(rels)) + for _, r := range rels { + result = append(result, core.Relationship{ + From: r.From, + To: r.To, + Relation: r.Relation, + Reason: r.Reason, + }) + } + return result +} + +// buildExistingContext reads existing knowledge nodes to provide as context. +func buildExistingContext() string { + memoryPath, err := config.GetMemoryBasePath() + if err != nil { + return "" + } + repo, err := memory.NewDefaultRepository(memoryPath) + if err != nil { + return "" + } + defer func() { _ = repo.Close() }() + + nodes, err := repo.ListNodes("") + if err != nil || len(nodes) == 0 { + return "" + } + + var sb strings.Builder + for i, n := range nodes { + if i >= 20 { // Limit context size + break + } + sb.WriteString(fmt.Sprintf("- [%s] %s\n", n.Type, n.Summary)) + } + return sb.String() +} + +// focusAreaID returns a short identifier for a focus area description. +func focusAreaID(focus string) string { + if strings.Contains(focus, "decisions") { + return "decisions" + } + if strings.Contains(focus, "patterns") { + return "patterns" + } + if strings.Contains(focus, "constraints") { + return "constraints" + } + if strings.Contains(focus, "git history") { + return "git-history" + } + return "analysis" } // Helper functions for flag parsing @@ -495,17 +931,22 @@ func init() { bootstrapCmd.Flags().Bool("skip-init", false, "Skip initialization prompt") bootstrapCmd.Flags().Bool("skip-index", false, "Skip code indexing (symbol extraction)") bootstrapCmd.Flags().Bool("force", false, "Force indexing even for large codebases (>5000 files)") - bootstrapCmd.Flags().Bool("skip-analyze", false, "Skip LLM analysis (for CI/testing)") + bootstrapCmd.Flags().Bool("no-analyze", false, "Skip LLM analysis even when an AI CLI is detected") + bootstrapCmd.Flags().Bool("analyze", false, "Enable LLM-powered architecture analysis (kept for backward compat)") + bootstrapCmd.Flags().Bool("skip-analyze", false, "Legacy alias for --no-analyze") bootstrapCmd.Flags().Bool("resume", false, "Resume from last checkpoint (skip completed agents)") bootstrapCmd.Flags().StringSlice("only-agents", nil, "Run only specified agents (e.g., --only-agents=code,doc)") bootstrapCmd.Flags().Bool("trace", false, "Emit JSON event stream to stderr") bootstrapCmd.Flags().String("trace-file", "", "Write JSON event stream to file (default: .taskwing/logs/bootstrap.trace.jsonl)") bootstrapCmd.Flags().Bool("trace-stdout", false, "Emit JSON event stream to stderr (overrides trace file)") bootstrapCmd.Flags().Bool("debug", false, "Enable debug logging (dumps project context, git paths, agent inputs)") - bootstrapCmd.Flags().Duration("timeout", 0, "LLM request timeout (e.g., 5m, 10m). Overrides TASKWING_LLM_TIMEOUT env var. Default: 5m") + bootstrapCmd.Flags().Duration("timeout", 0, "LLM request timeout (e.g., 5m, 10m). Overrides TASKWING_LLM_TIMEOUT env var. Default: 10m") + bootstrapCmd.Flags().String("prefer-cli", "", "Preferred AI CLI for analysis (claude, gemini, codex)") + bootstrapCmd.Flags().String("model", "haiku", "AI model to use for analysis (e.g., haiku, sonnet, opus)") - // Hide internal flags from main help (documented in CLAUDE.md / finetune docs) + // Hide legacy flags from main help _ = bootstrapCmd.Flags().MarkHidden("skip-analyze") + _ = bootstrapCmd.Flags().MarkHidden("analyze") } // runAgentTUI handles the interactive UI part, delegating work to the service @@ -520,7 +961,7 @@ func runAgentTUI(ctx context.Context, svc *bootstrap.Service, cwd string, llmCfg // Open repository for checkpoint tracking repo, repoErr := openRepo() if repoErr != nil && flags.Resume { - fmt.Fprintf(os.Stderr, "⚠️ Cannot resume: %v\n", repoErr) + fmt.Fprintf(os.Stderr, "%s Cannot resume: %v\n", ui.IconWarn, repoErr) flags.Resume = false } if repo != nil { @@ -532,13 +973,13 @@ func runAgentTUI(ctx context.Context, svc *bootstrap.Service, cwd string, llmCfg // Show skipped agents if len(skippedAgents) > 0 && !flags.Quiet { - fmt.Printf("⏭️ Skipping completed agents: %s\n", strings.Join(skippedAgents, ", ")) + fmt.Printf("%s Skipping completed agents: %s\n", ui.IconSkip, strings.Join(skippedAgents, ", ")) } // If all agents were skipped, nothing to do if len(agentsList) == 0 { if !flags.Quiet { - fmt.Println("✅ All agents already completed. Use 'bootstrap' without --resume to re-run.") + ui.PrintSuccess("All agents already completed. Use 'bootstrap' without --resume to re-run.") } return nil } @@ -571,7 +1012,7 @@ func runAgentTUI(ctx context.Context, svc *bootstrap.Service, cwd string, llmCfg bootstrapModel, ok := finalModel.(ui.BootstrapModel) if !ok || (bootstrapModel.Quitting && len(bootstrapModel.Results) < len(agentsList)) { - fmt.Println("\n⚠️ Bootstrap cancelled.") + fmt.Printf("\n%s Bootstrap cancelled.\n", ui.IconWarn) return nil } @@ -671,7 +1112,7 @@ func runMultiRepoBootstrap(ctx context.Context, svc *bootstrap.Service, ws *proj fmt.Println("") ui.RenderPageHeader("TaskWing Multi-Repo Bootstrap", fmt.Sprintf("Workspace: %s | Services: %d", ws.Name, ws.ServiceCount())) - fmt.Printf("📦 Analyzing %d services. Running parallel analysis...\n", ws.ServiceCount()) + fmt.Printf("%s Analyzing %d services. Running parallel analysis...\n", ui.IconPackage, ws.ServiceCount()) findings, relationships, errs, err := svc.RunMultiRepoAnalysis(ctx, ws) if err != nil { @@ -679,16 +1120,18 @@ func runMultiRepoBootstrap(ctx context.Context, svc *bootstrap.Service, ws *proj } if len(errs) > 0 { - fmt.Println("\n⚠️ Some services had errors:") + fmt.Println() + ui.PrintWarning("Some services had errors:") for _, e := range errs { fmt.Printf(" - %s\n", e) } } - fmt.Printf("📊 Aggregated: %d findings from %d services\n", len(findings), ws.ServiceCount()-len(errs)) + fmt.Printf("%s Aggregated: %d findings from %d services\n", ui.IconStats, len(findings), ws.ServiceCount()-len(errs)) if preview { - fmt.Println("\n💡 This was a preview. Run 'taskwing bootstrap' to save to memory.") + fmt.Println() + ui.PrintHint("This was a preview. Run 'taskwing bootstrap' to save to memory.") return nil } @@ -700,11 +1143,11 @@ func runMultiRepoBootstrap(ctx context.Context, svc *bootstrap.Service, ws *proj func promptRepoSelection(repos []string) []string { selected, err := ui.PromptRepoSelection(repos) if err != nil { - fmt.Fprintf(os.Stderr, "⚠️ Repo selection failed: %v — analyzing all repositories\n", err) + fmt.Fprintf(os.Stderr, "%s Repo selection failed: %v — analyzing all repositories\n", ui.IconWarn, err) return repos } if selected == nil { - fmt.Println("⚠️ Selection cancelled — analyzing all repositories") + fmt.Printf("%s Selection cancelled — analyzing all repositories\n", ui.IconWarn) return repos } return selected @@ -732,7 +1175,7 @@ func installMCPServers(basePath string, selectedAIs []string) { // OpenCode: creates opencode.json at project root // During development, use taskwing-local-dev-mcp for testing changes if err := installOpenCode(binPath, basePath); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ OpenCode MCP installation failed: %v\n", err) + fmt.Fprintf(os.Stderr, "%s OpenCode MCP installation failed: %v\n", ui.IconWarn, err) } } } @@ -764,7 +1207,7 @@ func setupTrace(stream *core.StreamingOutput, trace bool, traceFile string, trac out = f cleanup = func() { _ = f.Close() } if !viper.GetBool("quiet") { - fmt.Fprintf(os.Stderr, "🧾 Trace: %s\n", traceFile) + fmt.Fprintf(os.Stderr, "%s Trace: %s\n", ui.IconInfo, traceFile) } } @@ -799,7 +1242,7 @@ func checkAgentFailures(agents []*ui.AgentState) error { } } if len(failedAgents) > 0 { - fmt.Fprintln(os.Stderr, "\n✗ Bootstrap failed. Some agents errored:") + fmt.Fprintf(os.Stderr, "\n%s Bootstrap failed. Some agents errored:\n", ui.IconFail) for _, line := range failedAgents { fmt.Fprintf(os.Stderr, " - %s\n", line) } @@ -810,7 +1253,7 @@ func checkAgentFailures(agents []*ui.AgentState) error { // runCodeIndexing runs the code intelligence indexer on the codebase. // This extracts symbols (functions, types, etc.) for enhanced search and MCP ask. -func runCodeIndexing(ctx context.Context, basePath string, forceIndex, isQuiet bool) error { +func runCodeIndexing(ctx context.Context, basePath string, forceIndex, isQuiet bool, bCtx *bootstrapContext) error { // Open repository to get database handle repo, err := openRepo() if err != nil { @@ -837,7 +1280,7 @@ func runCodeIndexing(ctx context.Context, basePath string, forceIndex, isQuiet b fileCount, err := indexer.CountSupportedFiles(basePath) if err != nil { if !isQuiet { - fmt.Fprintf(os.Stderr, "⚠️ Could not count files for indexing: %v\n", err) + fmt.Fprintf(os.Stderr, " %s Could not count files for indexing: %v\n", ui.IconWarn, err) } return nil // Non-fatal - skip indexing if we can't count } @@ -845,20 +1288,14 @@ func runCodeIndexing(ctx context.Context, basePath string, forceIndex, isQuiet b // Large codebase safety check const maxFilesWithoutForce = 5000 if fileCount > maxFilesWithoutForce && !forceIndex { - fmt.Println() - fmt.Printf("⚠️ Large codebase detected: %d files to index\n", fileCount) - fmt.Printf(" This may take a while and consume resources.\n") - fmt.Printf(" Run with --force to proceed, or use --skip-index to bypass.\n") + if !isQuiet { + ui.PrintPhaseDetail(fmt.Sprintf("%s Large codebase detected: %d files to index", ui.IconWarn, fileCount)) + ui.PrintPhaseDetail("Run with --force to proceed, or use --skip-index to bypass.") + } return nil // Not an error, just skip } - // Print header - if !isQuiet { - fmt.Println() - fmt.Println("📇 Code Intelligence Indexing") - fmt.Println("────────────────────────────") - fmt.Printf(" 🔍 Scanning %d source files...\n", fileCount) - } + bCtx.filesScanned = fileCount // Configure progress callback with more detail var lastUpdate time.Time @@ -873,7 +1310,7 @@ func runCodeIndexing(ctx context.Context, basePath string, forceIndex, isQuiet b if stats.FilesScanned > 0 { pct = (stats.FilesIndexed * 100) / stats.FilesScanned } - fmt.Fprintf(os.Stderr, "\r ⚡ Progress: %d%% (%d files, %d symbols) ", pct, stats.FilesIndexed, stats.SymbolsFound) + fmt.Fprintf(os.Stderr, "\r %s Progress: %d%% (%d files, %d symbols) ", ui.IconBolt, pct, stats.FilesIndexed, stats.SymbolsFound) } } @@ -886,31 +1323,39 @@ func runCodeIndexing(ctx context.Context, basePath string, forceIndex, isQuiet b // Prune stale files first prunedCount, err := indexer.PruneStaleFiles(ctx) if err != nil && !isQuiet { - fmt.Fprintf(os.Stderr, " ⚠️ Prune failed: %v\n", err) + fmt.Fprintf(os.Stderr, " %s Prune failed: %v\n", ui.IconWarn, err) } // Run incremental indexing stats, err := indexer.IncrementalIndex(ctx, basePath) if err != nil { if !isQuiet { - fmt.Fprintf(os.Stderr, "\r \n") - fmt.Fprintf(os.Stderr, " ⚠️ Indexing failed: %v\n", err) + fmt.Fprintf(os.Stderr, "\r \n") + fmt.Fprintf(os.Stderr, " %s Indexing failed: %v\n", ui.IconWarn, err) } return nil // Non-fatal - bootstrap succeeded even if indexing fails } // Clear progress line and print summary if !isQuiet { - fmt.Fprintf(os.Stderr, "\r \n") + fmt.Fprintf(os.Stderr, "\r \n") duration := time.Since(start) - fmt.Printf(" ✅ Indexed %d updates, pruned %d files in %v\n", - stats.FilesIndexed, prunedCount, duration.Round(time.Millisecond)) + bCtx.symbolsFound = stats.SymbolsFound + bCtx.callRelationships = stats.RelationsFound + bCtx.indexDuration = duration + + ui.PrintPhaseResult(fmt.Sprintf("%d updates, pruned %d stale files", stats.FilesIndexed, prunedCount), duration) if stats.RelationsFound > 0 { - fmt.Printf(" 🔗 Discovered %d call relationships\n", stats.RelationsFound) + ui.PrintPhaseResult(fmt.Sprintf("Discovered %d call relationships", stats.RelationsFound), 0) } if len(stats.Errors) > 0 { - fmt.Printf(" ⚠️ %d files skipped (parse errors)\n", len(stats.Errors)) + ui.PrintPhaseDetail(fmt.Sprintf("%s %d files skipped (parse errors)", ui.IconWarn, len(stats.Errors))) } + } else { + bCtx.symbolsFound = stats.SymbolsFound + bCtx.callRelationships = stats.RelationsFound + bCtx.indexDuration = time.Since(start) + fmt.Printf("done (%s)\n", ui.FormatDuration(bCtx.indexDuration)) } return nil diff --git a/cmd/config_cmd.go b/cmd/config_cmd.go index 44158d4..db4085f 100644 --- a/cmd/config_cmd.go +++ b/cmd/config_cmd.go @@ -379,16 +379,16 @@ func setHooksEnabled(cwd string, enabled bool) error { if err := initializer.InstallHooksConfig("codex", true); err != nil { return err } - fmt.Println("✅ Hooks enabled") + ui.PrintSuccess("Hooks enabled") } else { // Remove hooks from settings files if err := removeHooksFromSettings(filepath.Join(cwd, ".claude", "settings.json")); err != nil { - fmt.Printf("⚠️ Could not update Claude settings: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Could not update Claude settings: %v", err)) } if err := removeHooksFromSettings(filepath.Join(cwd, ".codex", "settings.json")); err != nil { - fmt.Printf("⚠️ Could not update Codex settings: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Could not update Codex settings: %v", err)) } - fmt.Println("✅ Hooks disabled") + ui.PrintSuccess("Hooks disabled") } return nil } @@ -444,7 +444,7 @@ func updateHookCommand(cwd string, modifier func(string) string) error { return fmt.Errorf("no hooks config found. Run: taskwing config set hooks.enabled true") } - fmt.Println("✅ Configuration updated") + ui.PrintSuccess("Configuration updated") return nil } @@ -537,26 +537,26 @@ func runConfigInteractive() error { switch result.Selected { case "bootstrap": if err := configureBootstrapModel(); err != nil { - fmt.Printf("⚠️ %v\n", err) + ui.PrintWarning(fmt.Sprintf("%v", err)) } case "query": if err := configureQueryModel(); err != nil { - fmt.Printf("⚠️ %v\n", err) + ui.PrintWarning(fmt.Sprintf("%v", err)) } case "embedding": if err := configureEmbedding(); err != nil { - fmt.Printf("⚠️ %v\n", err) + ui.PrintWarning(fmt.Sprintf("%v", err)) } case "reranking": if err := configureReranking(); err != nil { - fmt.Printf("⚠️ %v\n", err) + ui.PrintWarning(fmt.Sprintf("%v", err)) } } } } func configureBootstrapModel() error { - fmt.Println("\n🧠 Configure Complex Tasks Model") + fmt.Printf("\n%s Configure Complex Tasks Model\n", ui.IconRobot) fmt.Println(" Used for: bootstrap, planning, deep analysis") fmt.Println() @@ -587,12 +587,13 @@ func configureBootstrapModel() error { return err } - fmt.Printf("\n✅ Complex tasks: %s/%s\n", selection.Provider, selection.Model) + fmt.Println() + ui.PrintSuccess(fmt.Sprintf("Complex tasks: %s/%s", selection.Provider, selection.Model)) return nil } func configureQueryModel() error { - fmt.Println("\n⚡ Configure Fast Queries Model") + fmt.Printf("\n%s Configure Fast Queries Model\n", ui.IconBolt) fmt.Println(" Used for: context lookups, ask queries (cheaper, faster)") fmt.Println() @@ -617,12 +618,13 @@ func configureQueryModel() error { return err } - fmt.Printf("\n✅ Fast queries: %s/%s\n", selection.Provider, selection.Model) + fmt.Println() + ui.PrintSuccess(fmt.Sprintf("Fast queries: %s/%s", selection.Provider, selection.Model)) return nil } func configureEmbedding() error { - fmt.Println("\n📐 Configure Embeddings") + fmt.Printf("\n%s Configure Embeddings\n", ui.IconRuler) fmt.Println(" Used for: semantic search in knowledge base") fmt.Println(" Tip: Ollama is free and runs locally") fmt.Println() @@ -645,22 +647,19 @@ func configureEmbedding() error { if !viper.IsSet("llm.embedding_base_url") { viper.Set("llm.embedding_base_url", llm.DefaultOllamaURL) } - case llm.ProviderTEI: - if !viper.IsSet("llm.embedding_base_url") { - viper.Set("llm.embedding_base_url", llm.DefaultTEIURL) - } } if err := writeConfig(); err != nil { return err } - fmt.Printf("\n✅ Embeddings: %s/%s\n", selection.Provider, selection.Model) + fmt.Println() + ui.PrintSuccess(fmt.Sprintf("Embeddings: %s/%s", selection.Provider, selection.Model)) return nil } func configureReranking() error { - fmt.Println("\n🔄 Configure Reranking") + fmt.Printf("\n%s Configure Reranking\n", ui.IconWrench) fmt.Println(" Optional: improves search result quality") fmt.Println(" Requires: TEI server with reranker model") fmt.Println() @@ -678,7 +677,8 @@ func configureReranking() error { if err := writeConfig(); err != nil { return err } - fmt.Println("\n✅ Reranking disabled") + fmt.Println() + ui.PrintSuccess("Reranking disabled") } } else { fmt.Println(" Currently: DISABLED") @@ -698,7 +698,8 @@ func configureReranking() error { if err := writeConfig(); err != nil { return err } - fmt.Println("\n✅ Reranking enabled") + fmt.Println() + ui.PrintSuccess("Reranking enabled") } } return nil @@ -772,10 +773,10 @@ func runTelemetryStatus() error { fmt.Println() status := "Disabled" - statusIcon := "❌" + statusIcon := ui.IconStop if cfg.IsEnabled() { status = "Enabled" - statusIcon = "✅" + statusIcon = ui.IconDone } fmt.Printf(" Status: %s %s\n", statusIcon, status) @@ -810,7 +811,7 @@ func runTelemetryEnable() error { }) } - fmt.Println("✅ Telemetry enabled") + ui.PrintSuccess("Telemetry enabled") fmt.Println() fmt.Println("Thank you for helping improve TaskWing!") fmt.Println("We collect: command names, duration, success/failure, OS, CLI version") @@ -838,7 +839,7 @@ func runTelemetryDisable() error { }) } - fmt.Println("✅ Telemetry disabled") + ui.PrintSuccess("Telemetry disabled") fmt.Println() fmt.Println("You can re-enable anytime with: taskwing config telemetry enable") return nil diff --git a/cmd/config_helper.go b/cmd/config_helper.go index c9a636e..be2ff5f 100644 --- a/cmd/config_helper.go +++ b/cmd/config_helper.go @@ -17,210 +17,84 @@ import ( "github.com/spf13/viper" ) -// getLLMConfig unifies LLM configuration loading across all CLI commands. -// It respects precedence: Flag > Config File > Environment Variable. -// IMPORTANT: If a model is specified, the provider is inferred from the model name -// to enable seamless cross-provider usage (e.g., switching from gemini to gpt models). +// getLLMConfig loads LLM config with interactive prompts for missing values. +// Delegates core logic to config.LoadLLMConfig (single source of truth). func getLLMConfig(cmd *cobra.Command) (llm.Config, error) { - // Flags support (if the command defined them) - provider, _ := cmd.Flags().GetString("provider") - model, err := cmd.Flags().GetString("model") - if err != nil { - // Fallback: If "model" is a StringSlice (e.g. eval command), take the first one - if s, err2 := cmd.Flags().GetStringSlice("model"); err2 == nil && len(s) > 0 { - model = s[0] - } - } - apiKey, _ := cmd.Flags().GetString("api-key") - ollamaURL, _ := cmd.Flags().GetString("ollama-url") - ollamaURLSet := cmd.Flags().Changed("ollama-url") - - // Track if we need to prompt for interactive setup - providerFromPrompt := false - modelFromPrompt := false - - // 1. Get model first (before provider) - this enables model-based provider inference - if model == "" { - if viper.IsSet("llm.model") { - model = viper.GetString("llm.model") - } - } - - // 2. Provider - with model-based inference - providerInferredFromModel := false - if provider == "" { - // If model is specified, try to infer provider from model name - // This is KEY for cross-provider usage (e.g., -m gpt-5-mini when config says gemini) - if model != "" { - if inferredProvider, ok := llm.InferProviderFromModel(model); ok { - provider = inferredProvider - providerInferredFromModel = true + // Apply flag overrides before loading config + if p, _ := cmd.Flags().GetString("provider"); p != "" { + viper.Set("llm.provider", p) + } + if m, err := cmd.Flags().GetString("model"); err == nil && m != "" { + viper.Set("llm.model", m) + // Infer provider from model if provider not explicitly set + if !cmd.Flags().Changed("provider") { + if inferred, ok := llm.InferProviderFromModel(m); ok { + viper.Set("llm.provider", inferred) } } } - - // Fall back to config if not inferred - if provider == "" { - if viper.IsSet("llm.provider") { - provider = viper.GetString("llm.provider") + if k, _ := cmd.Flags().GetString("api-key"); k != "" { + provider := viper.GetString("llm.provider") + if provider == "" { + provider = llm.DefaultProvider } + viper.Set(fmt.Sprintf("llm.apiKeys.%s", provider), k) } - - // Interactive Provider Selection - // If provider is still empty (not in flag, not in config) and we are interactive, ask the user! - if provider == "" && ui.IsInteractive() { - selectedProvider, err := ui.PromptLLMProvider() - if err == nil && selectedProvider != "" { - provider = selectedProvider - providerFromPrompt = true + if cmd.Flags().Changed("ollama-url") { + if u, _ := cmd.Flags().GetString("ollama-url"); u != "" { + viper.Set("llm.ollamaURL", u) } } - if provider == "" { - provider = "openai" // Default fallback if non-interactive or prompt ignored - } - - llmProvider, err := llm.ValidateProvider(provider) - if err != nil { - return llm.Config{}, fmt.Errorf("invalid provider: %w", err) - } - - // Interactive Model Selection (if provider was just selected or model not configured) - if model == "" && ui.IsInteractive() { - selectedModel, err := ui.PromptModelSelection(provider) - if err == nil && selectedModel != "" { - model = selectedModel - modelFromPrompt = true + // Interactive prompts if running in terminal and values still missing + if ui.IsInteractive() { + if !viper.IsSet("llm.provider") { + if selected, err := ui.PromptLLMProvider(); err == nil && selected != "" { + viper.Set("llm.provider", selected) + } } - } - // Final fallback to provider default - if model == "" { - model = llm.DefaultModelForProvider(string(llmProvider)) - } - - // 3. API Key - resolve for the ACTUAL provider being used - if apiKey == "" { - apiKey = config.ResolveAPIKey(llmProvider) - } - - // Interactive Prompt for API Key (Only if needed for the selected provider) - requiresKey := llmProvider == llm.ProviderOpenAI || - llmProvider == llm.ProviderAnthropic || - llmProvider == llm.ProviderGemini || - llmProvider == llm.ProviderBedrock || - llmProvider == llm.ProviderTaskWing + provider := viper.GetString("llm.provider") + if provider == "" { + provider = llm.DefaultProvider + } - bedrockRegion := "" - if llmProvider == llm.ProviderBedrock { - bedrockRegion = config.ResolveBedrockRegion() - if bedrockRegion == "" && ui.IsInteractive() { - inputRegion, promptErr := promptBedrockRegion() - if promptErr != nil { - fmt.Fprintf(os.Stderr, "Warning: Bedrock region prompt failed: %v\n", promptErr) - } else { - bedrockRegion = inputRegion + if !viper.IsSet("llm.model") { + if selected, err := ui.PromptModelSelection(provider); err == nil && selected != "" { + viper.Set("llm.model", selected) } } - if bedrockRegion == "" { - return llm.Config{}, fmt.Errorf("AWS Bedrock region is required: set config 'llm.bedrock.region' or env var AWS_REGION") - } - viper.Set("llm.bedrock.region", bedrockRegion) - } - if requiresKey && apiKey == "" { - // Only prompt if we are in an interactive terminal - if ui.IsInteractive() { + llmProvider, _ := llm.ValidateProvider(provider) + requiresKey := llmProvider == llm.ProviderOpenAI || + llmProvider == llm.ProviderAnthropic || + llmProvider == llm.ProviderGemini || + llmProvider == llm.ProviderBedrock || + llmProvider == llm.ProviderTaskWing + + apiKey := config.ResolveAPIKey(llmProvider) + if requiresKey && apiKey == "" { fmt.Printf("No API key found for %s.\n", provider) - inputKey, err := ui.PromptAPIKey() - if err != nil { - fmt.Fprintf(os.Stderr, "Warning: API key prompt failed: %v\n", err) - } else if inputKey != "" { - apiKey = inputKey - // Save Config Globally (Provider + Model + Key) - // Only save if NOT inferred from model (to avoid overwriting user's default config) - if !providerInferredFromModel { - if err := config.SaveGlobalLLMConfigWithModel(string(llmProvider), model, apiKey); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to save config: %v\n", err) - } else { - fmt.Println("✓ Configuration saved to ~/.taskwing/config.yaml") - } + if inputKey, err := ui.PromptAPIKey(); err == nil && inputKey != "" { + viper.Set(fmt.Sprintf("llm.apiKeys.%s", llmProvider), inputKey) + model := viper.GetString("llm.model") + if err := config.SaveGlobalLLMConfigWithModel(provider, model, inputKey); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to save config: %v\n", err) } else { - // Just save the API key for this provider, don't change default provider - if err := config.SaveAPIKeyForProvider(string(llmProvider), apiKey); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to save API key: %v\n", err) - } else { - fmt.Printf("✓ API key for %s saved to ~/.taskwing/config.yaml\n", provider) - } + ui.PrintSuccess("Configuration saved to ~/.taskwing/config.yaml") } - if llmProvider == llm.ProviderBedrock { - if err := config.SaveBedrockRegion(bedrockRegion); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to save Bedrock region: %v\n", err) - } - } - // Also update Viper's in-memory config so subsequent calls in this process find the key - viper.Set(fmt.Sprintf("llm.apiKeys.%s", llmProvider), apiKey) - } - } - } else if (providerFromPrompt || modelFromPrompt) && ui.IsInteractive() { - // Save if we interactively selected provider or model - if err := config.SaveGlobalLLMConfigWithModel(provider, model, apiKey); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to save config: %v\n", err) - } - if llmProvider == llm.ProviderBedrock { - if err := config.SaveBedrockRegion(bedrockRegion); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to save Bedrock region: %v\n", err) } } - } - - if requiresKey && apiKey == "" { - return llm.Config{}, fmt.Errorf("API key required for %s: use --api-key, set config 'llm.apiKeys.%s', or set env var (%s)", provider, provider, llm.GetEnvVarForProvider(string(llmProvider))) - } - // 4. Base URL - baseURL := "" - switch llmProvider { - case llm.ProviderOllama: - if ollamaURLSet { - baseURL = ollamaURL - } - case llm.ProviderBedrock: - // Bedrock endpoint is resolved from region/base_url config. - default: - // Cloud providers use their own defaults; --ollama-url flag is ignored. - } - if baseURL == "" { - baseURL, err = config.ResolveProviderBaseURL(llmProvider) - if err != nil { - return llm.Config{}, err + if llmProvider == llm.ProviderBedrock && config.ResolveBedrockRegion() == "" { + if region, err := promptBedrockRegion(); err == nil { + viper.Set("llm.bedrock.region", region) + } } } - // 5. Embedding Model - embeddingModel := viper.GetString("llm.embedding_model") - - // 6. Thinking Budget (for models that support extended thinking) - thinkingBudget := viper.GetInt("llm.thinkingBudget") - if thinkingBudget == 0 && llm.ModelSupportsThinking(model) { - // Default thinking budget for supported models (8K tokens) - thinkingBudget = 8192 - } - - timeout, err := config.ResolveLLMTimeout() - if err != nil { - return llm.Config{}, err - } - - return llm.Config{ - Provider: llmProvider, - Model: model, - EmbeddingModel: embeddingModel, - APIKey: apiKey, - BaseURL: baseURL, - ThinkingBudget: thinkingBudget, - Timeout: timeout, - }, nil + // Delegate to single source of truth + return config.LoadLLMConfig() } func promptBedrockRegion() (string, error) { @@ -239,22 +113,12 @@ func promptBedrockRegion() (string, error) { } // getLLMConfigForRole returns the appropriate LLM config for a specific role. -// It respects precedence: Role-specific config > Default config > Environment. -// -// Role-specific config keys: -// - llm.models.bootstrap: "provider:model" for bootstrap/planning tasks -// - llm.models.query: "provider:model" for context/ask queries -// -// If no role-specific config is set, falls back to getLLMConfig(). +// Checks role-specific config first (llm.models.), then falls back to getLLMConfig. func getLLMConfigForRole(cmd *cobra.Command, role llm.ModelRole) (llm.Config, error) { - // Check for role-specific config roleConfigKey := fmt.Sprintf("llm.models.%s", role) if viper.IsSet(roleConfigKey) { spec := viper.GetString(roleConfigKey) - // Use shared implementation from config package (single source of truth) return config.ParseModelSpec(spec, role) } - - // Fall back to default config (handles interactive prompts and flags) return getLLMConfig(cmd) } diff --git a/cmd/doctor.go b/cmd/doctor.go index acfa4a0..ac01e56 100644 --- a/cmd/doctor.go +++ b/cmd/doctor.go @@ -446,7 +446,7 @@ func checkActivePlan() DoctorCheck { Name: "Active Plan", Status: "warn", Message: "No active plan", - Hint: "Run: taskwing goal \"your goal\"", + Hint: "Run: taskwing plan \"your description\"", } } @@ -516,7 +516,7 @@ func printNextSteps(checks []DoctorCheck) { fmt.Println() fmt.Println("Next steps:") if !hasActivePlan { - fmt.Println(" 1. Create and activate plan: taskwing goal \"your development goal\"") + fmt.Println(" 1. Create and activate plan: taskwing plan \"your development goal\"") fmt.Println(" 2. Open Claude Code and run: /tw-next") } else if !hasSession { fmt.Println(" 1. Open Claude Code (session will auto-initialize)") diff --git a/cmd/execute.go b/cmd/execute.go new file mode 100644 index 0000000..d5c8c02 --- /dev/null +++ b/cmd/execute.go @@ -0,0 +1,233 @@ +/* +Copyright © 2025 Joseph Goksu josephgoksu@gmail.com +*/ +package cmd + +import ( + "fmt" + "os" + "time" + + "github.com/josephgoksu/TaskWing/internal/config" + "github.com/josephgoksu/TaskWing/internal/runner" + "github.com/josephgoksu/TaskWing/internal/task" + "github.com/josephgoksu/TaskWing/internal/ui" + "github.com/spf13/cobra" +) + +var executeCmd = &cobra.Command{ + Use: "execute", + Short: "Execute tasks from the active plan using your AI CLI", + Long: `Execute tasks by spawning your installed AI CLI (Claude Code, Gemini CLI, Codex CLI) +as a headless subprocess. No API keys needed — uses whatever AI CLI you already have. + +The AI CLI gets full tool access to modify files, run commands, and implement changes. + +Examples: + taskwing execute # Execute next pending task + taskwing execute --all # Execute all remaining tasks sequentially + taskwing execute --dry-run # Show what would be executed + taskwing execute --max-tasks 3 # Execute up to 3 tasks then stop`, + RunE: runExecute, +} + +func init() { + rootCmd.AddCommand(executeCmd) + executeCmd.Flags().Bool("all", false, "Execute all remaining tasks sequentially") + executeCmd.Flags().Bool("dry-run", false, "Show what would be executed without running") + executeCmd.Flags().Int("max-tasks", 0, "Maximum number of tasks to execute (0 = unlimited)") + executeCmd.Flags().String("prefer-cli", "", "Preferred AI CLI (claude, gemini, codex)") + executeCmd.Flags().Duration("timeout", 10*time.Minute, "Timeout per task execution") +} + +func runExecute(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + dryRun, _ := cmd.Flags().GetBool("dry-run") + all, _ := cmd.Flags().GetBool("all") + maxTasks, _ := cmd.Flags().GetInt("max-tasks") + preferCLI, _ := cmd.Flags().GetString("prefer-cli") + timeout, _ := cmd.Flags().GetDuration("timeout") + + // Open repo + repo, err := openRepoOrHandleMissingMemory() + if err != nil { + return err + } + if repo == nil { + return nil + } + defer func() { _ = repo.Close() }() + + memoryPath, err := config.GetMemoryBasePath() + if err != nil { + return fmt.Errorf("get memory path: %w", err) + } + svc := task.NewService(repo, memoryPath) + + // Get active plan + planID, err := svc.GetActivePlanID() + if err != nil || planID == "" { + return fmt.Errorf("no active plan. Create one with: taskwing plan \"\"") + } + + plan, err := svc.GetPlanWithTasks(planID) + if err != nil { + return fmt.Errorf("load active plan: %w", err) + } + + // Collect pending tasks + allPending := filterPendingTasks(plan.Tasks) + if len(allPending) == 0 { + if !isQuiet() { + ui.PrintSuccess("All tasks completed. Nothing to execute.") + } + return nil + } + + if !isQuiet() { + fmt.Printf("\n%s Active plan: %s (%d pending tasks)\n", ui.IconTask, plan.Goal, len(allPending)) + } + + // Apply max-tasks limit + pendingTasks := allPending + if !all && maxTasks == 0 { + maxTasks = 1 // Default: execute one task + } + if maxTasks > 0 && len(pendingTasks) > maxTasks { + pendingTasks = pendingTasks[:maxTasks] + } + + // Dry run: just show what would execute + if dryRun { + return showDryRun(pendingTasks) + } + + // Detect AI CLI runner + cliRunner, err := runner.PreferredRunner(runner.CLIType(preferCLI)) + if err != nil { + return fmt.Errorf("no AI CLI found: %w\nInstall Claude Code, Gemini CLI, or Codex CLI", err) + } + + if !isQuiet() { + fmt.Printf("%s Using %s for execution\n\n", ui.IconRobot, cliRunner.Type().String()) + } + + cwd, _ := os.Getwd() + + // Execute tasks sequentially + for i, t := range pendingTasks { + if ctx.Err() != nil { + return ctx.Err() + } + + if !isQuiet() { + fmt.Printf("[%d/%d] %s\n", i+1, len(pendingTasks), t.Title) + fmt.Printf(" [%s working...]\n", cliRunner.Type().String()) + } + + // Mark as in_progress + if err := repo.UpdateTaskStatus(t.ID, task.StatusInProgress); err != nil { + return fmt.Errorf("update task status: %w", err) + } + + // Build execution prompt + prompt := runner.TaskExecutionPrompt( + t.Title, + t.Description, + t.AcceptanceCriteria, + t.ContextSummary, + t.ValidationSteps, + ) + + // Execute via AI CLI with file access + result, err := cliRunner.InvokeWithFiles(ctx, runner.InvokeRequest{ + Prompt: prompt, + WorkDir: cwd, + Timeout: timeout, + }) + + if err != nil { + // Mark as failed + _ = repo.UpdateTaskStatus(t.ID, task.StatusFailed) + if !isQuiet() { + fmt.Printf(" %s Failed: %v\n\n", ui.IconFail, err) + } + return fmt.Errorf("task %q failed: %w", t.Title, err) + } + + // Try to parse execution output + var execOutput runner.ExecuteOutput + if decErr := result.Decode(&execOutput); decErr != nil { + // Even if we can't parse the output, the task may have succeeded + // (the AI CLI may not have output valid JSON) + if !isQuiet() { + fmt.Printf(" %s Complete (output not parseable)\n\n", ui.IconOK) + } + _ = repo.UpdateTaskStatus(t.ID, task.StatusCompleted) + continue + } + + switch execOutput.Status { + case "completed": + _ = repo.UpdateTaskStatus(t.ID, task.StatusCompleted) + if !isQuiet() { + fmt.Printf(" %s Complete: %s\n\n", ui.IconOK, execOutput.Summary) + } + case "partial": + _ = repo.UpdateTaskStatus(t.ID, task.StatusInProgress) + if !isQuiet() { + fmt.Printf(" %s Partial: %s\n\n", ui.IconPartial, execOutput.Summary) + } + return fmt.Errorf("task %q partially completed: %s", t.Title, execOutput.Summary) + default: // "failed" or unknown + _ = repo.UpdateTaskStatus(t.ID, task.StatusFailed) + if !isQuiet() { + fmt.Printf(" %s Failed: %s\n\n", ui.IconFail, execOutput.Error) + } + return fmt.Errorf("task %q failed: %s", t.Title, execOutput.Error) + } + } + + if !isQuiet() { + ui.PrintSuccess(fmt.Sprintf("%d task(s) completed.", len(pendingTasks))) + } + + return nil +} + +// filterPendingTasks returns tasks that are ready to execute (pending or ready status). +func filterPendingTasks(tasks []task.Task) []task.Task { + var pending []task.Task + for _, t := range tasks { + if t.Status == task.StatusPending || t.Status == task.StatusReady { + pending = append(pending, t) + } + } + return pending +} + +// showDryRun displays what would be executed without running. +func showDryRun(tasks []task.Task) error { + // Show detected CLIs + detected := runner.DetectCLIs() + if len(detected) > 0 { + fmt.Println("\nDetected AI CLIs:") + for _, d := range detected { + fmt.Printf(" • %s (%s)\n", d.Type.String(), d.BinaryPath) + } + } else { + fmt.Println() + ui.PrintWarning("No AI CLIs detected") + } + + fmt.Printf("\nTasks to execute (%d):\n", len(tasks)) + for i, t := range tasks { + fmt.Printf(" %d. %s\n", i+1, t.Title) + fmt.Printf(" Complexity: %s | Agent: %s\n", t.Complexity, t.AssignedAgent) + if len(t.AcceptanceCriteria) > 0 { + fmt.Printf(" Criteria: %d items\n", len(t.AcceptanceCriteria)) + } + } + fmt.Printf("\n%s Run without --dry-run to execute.\n", ui.IconHint) + return nil +} diff --git a/cmd/gain.go b/cmd/gain.go new file mode 100644 index 0000000..75e1eb8 --- /dev/null +++ b/cmd/gain.go @@ -0,0 +1,249 @@ +/* +Copyright © 2025 Joseph Goksu josephgoksu@gmail.com +*/ +package cmd + +import ( + "database/sql" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +var gainCmd = &cobra.Command{ + Use: "gain", + Short: "Show token savings from output compression", + Long: `Display a dashboard of token savings achieved by the compression proxy. + +Shows total commands compressed, bytes/tokens saved, efficiency percentage, +and top commands by savings. + +Examples: + taskwing gain # Dashboard summary + taskwing gain --history # Per-command breakdown + taskwing gain --format json # Machine-readable output`, + RunE: runGain, +} + +func init() { + rootCmd.AddCommand(gainCmd) + gainCmd.Flags().Bool("history", false, "Show per-command history") + gainCmd.Flags().String("format", "text", "Output format: text or json") +} + +type gainSummary struct { + TotalCommands int `json:"total_commands"` + TotalInputBytes int64 `json:"total_input_bytes"` + TotalOutputBytes int64 `json:"total_output_bytes"` + TotalSavedBytes int64 `json:"total_saved_bytes"` + TotalSavedTokens int64 `json:"total_saved_tokens"` + AvgCompression float64 `json:"avg_compression_pct"` + TopCommands []gainCommandStat `json:"top_commands"` +} + +type gainCommandStat struct { + Command string `json:"command"` + Count int `json:"count"` + SavedBytes int64 `json:"saved_bytes"` + SavedTokens int64 `json:"saved_tokens"` + AvgRatio float64 `json:"avg_ratio_pct"` +} + +type gainHistoryEntry struct { + Command string `json:"command"` + InputBytes int `json:"input_bytes"` + OutputBytes int `json:"output_bytes"` + SavedTokens int `json:"saved_tokens"` + CompressionRatio float64 `json:"compression_ratio"` + CreatedAt string `json:"created_at"` +} + +func runGain(cmd *cobra.Command, _ []string) error { + history, _ := cmd.Flags().GetBool("history") + format, _ := cmd.Flags().GetString("format") + + repo, err := openRepo() + if err != nil { + if isMissingProjectMemoryError(err) { + fmt.Println("No project memory found. Run 'taskwing init' first.") + return nil + } + return err + } + defer func() { _ = repo.Close() }() + + db := repo.GetDB().DB() + + if history { + return showGainHistory(db, format) + } + return showGainSummary(db, format) +} + +func showGainSummary(db *sql.DB, format string) error { + var summary gainSummary + + // Get totals + err := db.QueryRow(` + SELECT + COUNT(*), + COALESCE(SUM(input_bytes), 0), + COALESCE(SUM(output_bytes), 0), + COALESCE(SUM(saved_bytes), 0), + COALESCE(SUM(saved_tokens), 0) + FROM token_stats + `).Scan( + &summary.TotalCommands, + &summary.TotalInputBytes, + &summary.TotalOutputBytes, + &summary.TotalSavedBytes, + &summary.TotalSavedTokens, + ) + if err != nil { + return fmt.Errorf("query token stats: %w", err) + } + + if summary.TotalInputBytes > 0 { + summary.AvgCompression = float64(summary.TotalSavedBytes) / float64(summary.TotalInputBytes) * 100 + } + + // Get top commands by savings + rows, err := db.Query(` + SELECT + command, + COUNT(*) as cnt, + SUM(saved_bytes) as total_saved, + COALESCE(SUM(saved_tokens), 0) as total_tokens_saved, + AVG(CASE WHEN input_bytes > 0 THEN (1.0 - CAST(output_bytes AS REAL) / input_bytes) * 100 ELSE 0 END) as avg_ratio + FROM token_stats + GROUP BY command + ORDER BY total_saved DESC + LIMIT 10 + `) + if err != nil { + return fmt.Errorf("query top commands: %w", err) + } + defer rows.Close() + + for rows.Next() { + var cs gainCommandStat + if err := rows.Scan(&cs.Command, &cs.Count, &cs.SavedBytes, &cs.SavedTokens, &cs.AvgRatio); err != nil { + continue + } + summary.TopCommands = append(summary.TopCommands, cs) + } + + if format == "json" { + return printJSON(summary) + } + + // Text output + if summary.TotalCommands == 0 { + fmt.Println("No compression data yet. Use 'taskwing proxy ' or enable the PreToolUse hook.") + return nil + } + + fmt.Println() + fmt.Println("Token Savings Dashboard") + fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") + fmt.Printf(" Commands compressed: %d\n", summary.TotalCommands) + fmt.Printf(" Bytes saved: %s → %s (%.0f%% reduction)\n", + formatBytes(summary.TotalInputBytes), formatBytes(summary.TotalOutputBytes), summary.AvgCompression) + fmt.Printf(" Tokens saved: ~%d\n", summary.TotalSavedTokens) + fmt.Println() + + if len(summary.TopCommands) > 0 { + fmt.Println("Top Commands by Savings") + fmt.Println("────────────────────────────────────────") + for _, cs := range summary.TopCommands { + fmt.Printf(" %-30s %3dx %6s saved (%.0f%%)\n", + truncateStr(cs.Command, 30), cs.Count, formatBytes(cs.SavedBytes), cs.AvgRatio) + } + fmt.Println() + } + + return nil +} + +func showGainHistory(db *sql.DB, format string) error { + rows, err := db.Query(` + SELECT command, input_bytes, output_bytes, COALESCE(saved_tokens, 0), COALESCE(compression_ratio, 0), created_at + FROM token_stats + ORDER BY created_at DESC + LIMIT 50 + `) + if err != nil { + return fmt.Errorf("query history: %w", err) + } + defer rows.Close() + + var entries []gainHistoryEntry + for rows.Next() { + var e gainHistoryEntry + if err := rows.Scan(&e.Command, &e.InputBytes, &e.OutputBytes, &e.SavedTokens, &e.CompressionRatio, &e.CreatedAt); err != nil { + continue + } + entries = append(entries, e) + } + + if format == "json" { + data, _ := json.MarshalIndent(entries, "", " ") + fmt.Println(string(data)) + return nil + } + + if len(entries) == 0 { + fmt.Println("No compression history yet.") + return nil + } + + fmt.Println() + fmt.Println("Recent Compression History") + fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━") + fmt.Printf(" %-30s %8s → %8s %6s %s\n", "Command", "Input", "Output", "Saved", "When") + fmt.Println(" ────────────────────────────────────────────────────────────────") + for _, e := range entries { + saved := e.InputBytes - e.OutputBytes + pct := float64(0) + if e.InputBytes > 0 { + pct = float64(saved) / float64(e.InputBytes) * 100 + } + fmt.Printf(" %-30s %8s → %8s %5.0f%% %s\n", + truncateStr(e.Command, 30), + formatBytes(int64(e.InputBytes)), + formatBytes(int64(e.OutputBytes)), + pct, + e.CreatedAt, + ) + } + fmt.Println() + return nil +} + +func formatBytes(b int64) string { + switch { + case b >= 1024*1024: + return fmt.Sprintf("%.1fMB", float64(b)/(1024*1024)) + case b >= 1024: + return fmt.Sprintf("%.1fKB", float64(b)/1024) + default: + return fmt.Sprintf("%dB", b) + } +} + +func truncateStr(s string, max int) string { + if len(s) <= max { + return s + } + return s[:max-3] + "..." +} + +// RecordTokenStats inserts a compression stats row into the database. +func RecordTokenStats(db *sql.DB, command string, inputBytes, outputBytes, savedBytes int, ratio float64, inputTokens, outputTokens, savedTokens int, sessionID string) error { + _, err := db.Exec(` + INSERT INTO token_stats (command, input_bytes, output_bytes, saved_bytes, compression_ratio, input_tokens, output_tokens, saved_tokens, session_id) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + `, command, inputBytes, outputBytes, savedBytes, ratio, inputTokens, outputTokens, savedTokens, sessionID) + return err +} diff --git a/cmd/goal.go b/cmd/goal.go index 922fdac..1020537 100644 --- a/cmd/goal.go +++ b/cmd/goal.go @@ -16,8 +16,9 @@ import ( ) var goalCmd = &cobra.Command{ - Use: "goal \"Goal Description\"", - Short: "Turn a goal into an active execution plan", + Use: "goal \"Goal Description\"", + Short: "Turn a goal into an active execution plan", + Deprecated: "use 'taskwing plan \"\"' instead", Long: `Create and activate a plan from a goal in one command. This command runs clarification and plan generation automatically, then prints diff --git a/cmd/hook.go b/cmd/hook.go index d1c839a..f848f2d 100644 --- a/cmd/hook.go +++ b/cmd/hook.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "io" "os" "path/filepath" "strings" @@ -153,12 +154,28 @@ var hookStatusCmd = &cobra.Command{ }, } +var hookCompressCmd = &cobra.Command{ + Use: "compress", + Short: "Compress Bash tool output (for PreToolUse hook)", + Long: `Called by Claude Code's PreToolUse hook to rewrite Bash commands +through the TaskWing compression proxy. + +Reads a JSON-RPC hook payload from stdin, rewrites the Bash command +to run through 'taskwing proxy', and returns the modified payload. + +Install via: taskwing init --claude-code`, + RunE: func(cmd *cobra.Command, args []string) error { + return runHookCompress() + }, +} + func init() { rootCmd.AddCommand(hookCmd) hookCmd.AddCommand(hookContinueCheckCmd) hookCmd.AddCommand(hookSessionInitCmd) hookCmd.AddCommand(hookSessionEndCmd) hookCmd.AddCommand(hookStatusCmd) + hookCmd.AddCommand(hookCompressCmd) // Circuit breaker flags hookContinueCheckCmd.Flags().Int("max-tasks", DefaultMaxTasksPerSession, "Maximum tasks to complete per session") @@ -531,6 +548,62 @@ func outputHookResponse(resp HookResponse) error { return nil } +// runHookCompress reads a PreToolUse hook payload from stdin and rewrites +// Bash commands to run through taskwing proxy for output compression. +func runHookCompress() error { + data, err := io.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("read stdin: %w", err) + } + + var payload struct { + ToolName string `json:"tool_name"` + Input struct { + Command string `json:"command"` + } `json:"input"` + } + if err := json.Unmarshal(data, &payload); err != nil { + // Not valid JSON — pass through + return nil + } + + // Only rewrite Bash tool calls + if payload.ToolName != "Bash" || payload.Input.Command == "" { + return nil + } + + cmd := payload.Input.Command + + // Skip commands that shouldn't be compressed + skipPrefixes := []string{ + "taskwing ", "tw ", "cd ", "export ", "source ", + "echo ", "printf ", "read ", "sleep ", + } + for _, prefix := range skipPrefixes { + if strings.HasPrefix(cmd, prefix) { + return nil + } + } + + // Find taskwing binary + binPath, err := os.Executable() + if err != nil { + binPath = "taskwing" + } + + // Rewrite command to go through proxy + rewritten := fmt.Sprintf("%s proxy %s", binPath, cmd) + + resp := map[string]any{ + "input": map[string]string{ + "command": rewritten, + }, + } + out, _ := json.Marshal(resp) + fmt.Println(string(out)) + return nil +} + // getLLMConfigFromViper returns LLM config without requiring cobra command. // It checks for role-specific config (query role since hook context is a query op), // falling back to the default config. diff --git a/cmd/init.go b/cmd/init.go new file mode 100644 index 0000000..81ff5e9 --- /dev/null +++ b/cmd/init.go @@ -0,0 +1,196 @@ +/* +Copyright © 2025 Joseph Goksu josephgoksu@gmail.com +*/ +package cmd + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/josephgoksu/TaskWing/internal/bootstrap" + "github.com/josephgoksu/TaskWing/internal/llm" + "github.com/josephgoksu/TaskWing/internal/ui" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var initCmd = &cobra.Command{ + Use: "init", + Short: "Set up TaskWing for your project (zero API keys needed)", + Long: `Initialize TaskWing for your project in one command. + +What it does: + 1. Detects your AI CLI (Claude Code, Gemini CLI, Codex CLI) + 2. Runs deterministic code indexing (no LLM required) + 3. Registers TaskWing MCP server with your AI CLI + 4. Installs session hooks and slash commands + +No API keys needed. Works with whatever AI CLI you already have. + +Examples: + taskwing init # Auto-detect AI CLI + taskwing init --claude-code # Register with Claude Code + taskwing init --gemini # Register with Gemini CLI + taskwing init --codex # Register with Codex CLI`, + RunE: runInit, +} + +func init() { + rootCmd.AddCommand(initCmd) + initCmd.Flags().Bool("claude-code", false, "Register with Claude Code") + initCmd.Flags().Bool("gemini", false, "Register with Gemini CLI") + initCmd.Flags().Bool("codex", false, "Register with Codex CLI") +} + +func runInit(cmd *cobra.Command, _ []string) error { + verbose := viper.GetBool("verbose") + + // Determine target AI CLI + target := detectTarget(cmd) + if target == "" { + return fmt.Errorf("no AI CLI detected. Install Claude Code, Gemini CLI, or Codex CLI, or use --claude-code/--gemini/--codex") + } + + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("get working directory: %w", err) + } + + if !isQuiet() { + fmt.Println() + fmt.Printf("%s TaskWing Init → %s\n", ui.IconRocket, target) + fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━") + } + + // Step 1: Initialize project structure + deterministic indexing (no LLM) + if !isQuiet() { + fmt.Printf("%s Initializing project and indexing codebase...\n", ui.IconPackage) + } + initializer := bootstrap.NewInitializer(cwd) + if err := initializer.Run(verbose, []string{targetToAIName(target)}); err != nil { + if verbose { + fmt.Fprintf(os.Stderr, "Warning: init structure: %v\n", err) + } + } + svc := bootstrap.NewService(cwd, llm.Config{}) + if _, err := svc.RunDeterministicBootstrap(cmd.Context(), isQuiet()); err != nil { + if verbose { + fmt.Fprintf(os.Stderr, "Warning: indexing: %v\n", err) + } + } + + // Step 2: Register MCP server + if !isQuiet() { + fmt.Printf("%s Registering MCP server with %s...\n", ui.IconPlug, target) + } + if err := registerMCP(target, verbose); err != nil { + ui.PrintWarning(fmt.Sprintf("MCP registration: %v", err)) + } + + // Step 3: Install hooks and slash commands + if !isQuiet() { + fmt.Printf("%s Installing hooks and slash commands...\n", ui.IconWrench) + } + aiName := targetToAIName(target) + if err := initializer.InstallHooksConfig(aiName, verbose); err != nil { + if verbose { + fmt.Fprintf(os.Stderr, "Warning: hooks install: %v\n", err) + } + } + if err := initializer.CreateSlashCommands(aiName, verbose); err != nil { + if verbose { + fmt.Fprintf(os.Stderr, "Warning: slash commands: %v\n", err) + } + } + + if !isQuiet() { + fmt.Println() + ui.PrintSuccess("TaskWing ready. No API keys needed.") + fmt.Printf(" MCP tools available in %s.\n", target) + fmt.Println() + } + + return nil +} + +func detectTarget(cmd *cobra.Command) string { + if b, _ := cmd.Flags().GetBool("claude-code"); b { + return "Claude Code" + } + if b, _ := cmd.Flags().GetBool("gemini"); b { + return "Gemini CLI" + } + if b, _ := cmd.Flags().GetBool("codex"); b { + return "Codex CLI" + } + + // Auto-detect + for _, bin := range []struct { + name string + target string + }{ + {"claude", "Claude Code"}, + {"gemini", "Gemini CLI"}, + {"codex", "Codex CLI"}, + } { + if _, err := exec.LookPath(bin.name); err == nil { + return bin.target + } + } + return "" +} + +func targetToAIName(target string) string { + switch target { + case "Claude Code": + return "claude" + case "Gemini CLI": + return "gemini" + case "Codex CLI": + return "codex" + } + return strings.ToLower(strings.Fields(target)[0]) +} + +func registerMCP(target string, verbose bool) error { + binPath, err := os.Executable() + if err != nil { + return fmt.Errorf("determine binary path: %w", err) + } + + switch target { + case "Claude Code": + out, err := exec.Command("claude", "mcp", "add", "taskwing", "--", binPath, "mcp").CombinedOutput() + if err != nil { + // Try scope flag + out, err = exec.Command("claude", "mcp", "add", "--scope", "project", "taskwing", "--", binPath, "mcp").CombinedOutput() + if err != nil { + return fmt.Errorf("claude mcp add: %s: %w", strings.TrimSpace(string(out)), err) + } + } + if verbose { + fmt.Printf(" %s\n", strings.TrimSpace(string(out))) + } + case "Gemini CLI": + out, err := exec.Command("gemini", "mcp", "add", "taskwing", "--", binPath, "mcp").CombinedOutput() + if err != nil { + return fmt.Errorf("gemini mcp add: %s: %w", strings.TrimSpace(string(out)), err) + } + if verbose { + fmt.Printf(" %s\n", strings.TrimSpace(string(out))) + } + case "Codex CLI": + out, err := exec.Command("codex", "mcp", "add", "taskwing", "--", binPath, "mcp").CombinedOutput() + if err != nil { + return fmt.Errorf("codex mcp add: %s: %w", strings.TrimSpace(string(out)), err) + } + if verbose { + fmt.Printf(" %s\n", strings.TrimSpace(string(out))) + } + default: + return fmt.Errorf("unsupported target: %s", target) + } + return nil +} diff --git a/cmd/mcp_install.go b/cmd/mcp_install.go index cd12254..6770db3 100644 --- a/cmd/mcp_install.go +++ b/cmd/mcp_install.go @@ -76,7 +76,7 @@ Examples: case "all": for _, ai := range bootstrap.ValidAINames() { if err := installMCPForTarget(ai, binPath, cwd); err != nil { - fmt.Printf("⚠️ %s install failed: %v\n", ai, err) + ui.PrintWarning(fmt.Sprintf("%s install failed: %v", ai, err)) } } default: @@ -85,7 +85,7 @@ Examples: os.Exit(1) } if err := installMCPForTarget(target, binPath, cwd); err != nil { - fmt.Printf("❌ Failed to install for %s: %v\n", target, err) + ui.PrintError(fmt.Sprintf("Failed to install for %s: %v", target, err)) os.Exit(1) } } @@ -330,10 +330,10 @@ func installLocalMCP(projectDir, configDirName, configFileName, binPath string) Args: []string{"mcp"}, }) if err != nil { - fmt.Printf("❌ Failed to install for %s: %v\n", configDirName, err) + ui.PrintError(fmt.Sprintf("Failed to install for %s: %v", configDirName, err)) return } - fmt.Printf("✅ Installed for %s as '%s' in %s\n", strings.TrimPrefix(configDirName, "."), serverName, configPath) + ui.PrintSuccess(fmt.Sprintf("Installed for %s as '%s' in %s", strings.TrimPrefix(configDirName, "."), serverName, configPath)) } func installClaude(binPath, projectDir string) { @@ -347,7 +347,7 @@ func installClaudeCodeCLI(binPath, projectDir string) { _, err := exec.LookPath("claude") if err != nil { if viper.GetBool("verbose") { - fmt.Println("ℹ️ Claude Code CLI not found (skipping CLI config)") + ui.PrintInfo("Claude Code CLI not found (skipping CLI config)") } return } @@ -355,11 +355,11 @@ func installClaudeCodeCLI(binPath, projectDir string) { serverName := mcpServerName(projectDir) legacyName := legacyServerName(projectDir) - fmt.Println("👉 Configuring Claude Code CLI...") + fmt.Printf("%s Configuring Claude Code CLI...\n", ui.IconRocket) if viper.GetBool("preview") { fmt.Printf("[PREVIEW] Would run: claude mcp remove %s && claude mcp remove %s && claude mcp add --transport stdio %s -- %s mcp\n", legacyName, serverName, serverName, binPath) - fmt.Printf("✅ Would install for Claude Code as '%s'\n", serverName) + ui.PrintSuccess(fmt.Sprintf("Would install for Claude Code as '%s'", serverName)) return } @@ -383,9 +383,9 @@ func installClaudeCodeCLI(binPath, projectDir string) { } if err := cmd.Run(); err != nil { - fmt.Printf("⚠️ Failed to run 'claude mcp add': %v\n", err) + ui.PrintWarning(fmt.Sprintf("Failed to run 'claude mcp add': %v", err)) } else { - fmt.Printf("✅ Installed for Claude Code as '%s'\n", serverName) + ui.PrintSuccess(fmt.Sprintf("Installed for Claude Code as '%s'", serverName)) } } @@ -402,7 +402,7 @@ func installClaudeDesktop(binPath, projectDir string) { return } - fmt.Println("👉 Configuring Claude Desktop App...") + fmt.Printf("%s Configuring Claude Desktop App...\n", ui.IconRocket) serverName := mcpServerName(projectDir) legacyName := legacyServerName(projectDir) @@ -416,10 +416,10 @@ func installClaudeDesktop(binPath, projectDir string) { Env: map[string]string{}, }) if err != nil { - fmt.Printf("⚠️ Failed to configure Claude Desktop: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Failed to configure Claude Desktop: %v", err)) return } - fmt.Printf("✅ Installed for Claude Desktop as '%s' in %s\n", serverName, configPath) + ui.PrintSuccess(fmt.Sprintf("Installed for Claude Desktop as '%s' in %s", serverName, configPath)) fmt.Println(" (You may need to restart Claude Desktop to see the changes)") } @@ -430,7 +430,7 @@ func installCopilot(binPath, projectDir string) { configPath := filepath.Join(projectDir, ".vscode", "mcp.json") serverName := mcpServerName(projectDir) - fmt.Println("👉 Configuring GitHub Copilot (VS Code)...") + fmt.Printf("%s Configuring GitHub Copilot (VS Code)...\n", ui.IconRocket) err := upsertVSCodeMCPServer(configPath, serverName, VSCodeMCPServerConfig{ Type: "stdio", @@ -438,10 +438,10 @@ func installCopilot(binPath, projectDir string) { Args: []string{"mcp"}, }) if err != nil { - fmt.Printf("❌ Failed to install for Copilot: %v\n", err) + ui.PrintError(fmt.Sprintf("Failed to install for Copilot: %v", err)) return } - fmt.Printf("✅ Installed for GitHub Copilot as '%s' in %s\n", serverName, configPath) + ui.PrintSuccess(fmt.Sprintf("Installed for GitHub Copilot as '%s' in %s", serverName, configPath)) fmt.Println(" (Reload VS Code window to activate)") } @@ -449,7 +449,7 @@ func installGeminiCLI(binPath, projectDir string) { // Check if gemini CLI is available _, err := exec.LookPath("gemini") if err != nil { - fmt.Println("❌ 'gemini' CLI not found in PATH.") + ui.PrintError("'gemini' CLI not found in PATH.") fmt.Println(" Please install the Gemini CLI first to use this integration.") fmt.Println(" See: https://geminicli.com/docs/getting-started") return @@ -457,7 +457,7 @@ func installGeminiCLI(binPath, projectDir string) { serverName := mcpServerName(projectDir) legacyName := legacyServerName(projectDir) - fmt.Println("👉 Configuring Gemini CLI...") + fmt.Printf("%s Configuring Gemini CLI...\n", ui.IconRocket) if viper.GetBool("preview") { fmt.Printf("[PREVIEW] Would run: gemini mcp remove -s project %s && gemini mcp add -s project %s %s mcp\n", legacyName, serverName, binPath) @@ -486,9 +486,9 @@ func installGeminiCLI(binPath, projectDir string) { } if err := cmd.Run(); err != nil { - fmt.Printf("⚠️ Failed to run 'gemini mcp add': %v\n", err) + ui.PrintWarning(fmt.Sprintf("Failed to run 'gemini mcp add': %v", err)) } else { - fmt.Printf("✅ Installed for Gemini as '%s'\n", serverName) + ui.PrintSuccess(fmt.Sprintf("Installed for Gemini as '%s'", serverName)) } } @@ -496,7 +496,7 @@ func installCodexGlobal(binPath, projectDir string) { // Check if codex CLI is available _, err := exec.LookPath("codex") if err != nil { - fmt.Println("❌ 'codex' CLI not found in PATH.") + ui.PrintError("'codex' CLI not found in PATH.") fmt.Println(" Please install the OpenAI Codex CLI first to use this integration.") fmt.Println(" See: https://developers.openai.com/codex/mcp/") return @@ -504,7 +504,7 @@ func installCodexGlobal(binPath, projectDir string) { serverName := mcpServerName(projectDir) legacyName := legacyServerName(projectDir) - fmt.Println("👉 Configuring OpenAI Codex...") + fmt.Printf("%s Configuring OpenAI Codex...\n", ui.IconRocket) if viper.GetBool("preview") { fmt.Printf("[PREVIEW] Would run: codex mcp remove %s && codex mcp add %s -- %s mcp\n", legacyName, serverName, binPath) @@ -532,9 +532,9 @@ func installCodexGlobal(binPath, projectDir string) { } if err := cmd.Run(); err != nil { - fmt.Printf("⚠️ Failed to run 'codex mcp add': %v\n", err) + ui.PrintWarning(fmt.Sprintf("Failed to run 'codex mcp add': %v", err)) } else { - fmt.Printf("✅ Installed for Codex as '%s'\n", serverName) + ui.PrintSuccess(fmt.Sprintf("Installed for Codex as '%s'", serverName)) } } @@ -549,7 +549,7 @@ func installOpenCode(binPath, projectDir string) error { configPath := filepath.Join(projectDir, "opencode.json") serverName := mcpServerName(projectDir) - fmt.Println("👉 Configuring OpenCode...") + fmt.Printf("%s Configuring OpenCode...\n", ui.IconRocket) // Create MCP config (opencode.json) if err := upsertOpenCodeMCPServer(configPath, serverName, OpenCodeMCPServerConfig{ @@ -566,16 +566,16 @@ func installOpenCode(binPath, projectDir string) error { // Create slash commands (.opencode/commands/) if err := init.CreateSlashCommands("opencode", verbose); err != nil { - fmt.Printf("⚠️ Failed to create commands: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Failed to create commands: %v", err)) } else { - fmt.Println("✅ Created OpenCode commands in .opencode/commands/") + ui.PrintSuccess("Created OpenCode commands in .opencode/commands/") } // Create hooks plugin (.opencode/plugins/) if err := init.InstallHooksConfig("opencode", verbose); err != nil { - fmt.Printf("⚠️ Failed to create plugin: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Failed to create plugin: %v", err)) } else { - fmt.Println("✅ Created OpenCode plugin in .opencode/plugins/") + ui.PrintSuccess("Created OpenCode plugin in .opencode/plugins/") } return nil @@ -644,7 +644,7 @@ func upsertOpenCodeMCPServer(configPath, serverName string, serverCfg OpenCodeMC return fmt.Errorf("write opencode.json: %w", err) } - fmt.Printf("✅ Installed for OpenCode as '%s' in %s\n", serverName, configPath) + ui.PrintSuccess(fmt.Sprintf("Installed for OpenCode as '%s' in %s", serverName, configPath)) fmt.Println(" (opencode.json is at project root per OpenCode spec)") return nil } diff --git a/cmd/mcp_server.go b/cmd/mcp_server.go index 4ccd40f..3f6c2ff 100644 --- a/cmd/mcp_server.go +++ b/cmd/mcp_server.go @@ -16,6 +16,7 @@ import ( "github.com/josephgoksu/TaskWing/internal/llm" mcppresenter "github.com/josephgoksu/TaskWing/internal/mcp" "github.com/josephgoksu/TaskWing/internal/memory" + "github.com/josephgoksu/TaskWing/internal/ui" mcpsdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -152,7 +153,7 @@ func runMCPServer(ctx context.Context) error { serverOpts := &mcpsdk.ServerOptions{ InitializedHandler: func(ctx context.Context, session *mcpsdk.ServerSession, params *mcpsdk.InitializedParams) { - fmt.Fprintf(os.Stderr, "✓ MCP connection established\n") + fmt.Fprintf(os.Stderr, "%s MCP connection established\n", ui.IconOK) if viper.GetBool("verbose") { fmt.Fprintf(os.Stderr, "[DEBUG] Client initialized\n") } diff --git a/cmd/memory.go b/cmd/memory.go index d8082b6..a3bc1aa 100644 --- a/cmd/memory.go +++ b/cmd/memory.go @@ -51,7 +51,7 @@ and decisions from the current project's memory store.`, ui.RenderPageHeader("TaskWing Memory Reset", "Wiping all project context") force, _ := cmd.Flags().GetBool("force") if !force { - fmt.Print("⚠️ This will delete ALL project memory. Are you sure? [y/N]: ") + fmt.Printf("%s This will delete ALL project memory. Are you sure? [y/N]: ", ui.IconWarn) var response string _, _ = fmt.Scanln(&response) if response != "y" && response != "Y" { @@ -75,7 +75,7 @@ and decisions from the current project's memory store.`, _ = os.Remove(indexPath) _ = os.RemoveAll(featuresDir) - fmt.Println("✓ Project memory wiped successfully.") + ui.PrintSuccess("Project memory wiped successfully.") return nil }, } @@ -149,7 +149,7 @@ Checks for: // Show embedding stats first if embErr == nil && embStats != nil { - fmt.Println("📊 Knowledge Embeddings:") + fmt.Printf("%s Knowledge Embeddings:\n", ui.IconStats) fmt.Printf(" Total nodes: %d\n", embStats.TotalNodes) fmt.Printf(" With embeddings: %d\n", embStats.NodesWithEmbeddings) fmt.Printf(" Missing: %d\n", embStats.NodesWithoutEmbeddings) @@ -160,14 +160,14 @@ Checks for: // Warn about missing embeddings if embStats.NodesWithoutEmbeddings > 0 { - fmt.Printf("⚠ %d nodes are missing embeddings.\n", embStats.NodesWithoutEmbeddings) + ui.PrintWarning(fmt.Sprintf("%d nodes are missing embeddings.", embStats.NodesWithoutEmbeddings)) fmt.Println(" Run 'taskwing memory generate-embeddings' to backfill.") fmt.Println() } // Warn about mixed dimensions if embStats.MixedDimensions { - fmt.Println("⚠ WARNING: Mixed embedding dimensions detected!") + ui.PrintWarning("WARNING: Mixed embedding dimensions detected!") fmt.Println(" This can happen when switching between different embedding models.") fmt.Println(" Run 'taskwing memory rebuild-embeddings' to regenerate all embeddings.") fmt.Println() @@ -176,7 +176,7 @@ Checks for: // Show symbol index stats if symbolStats != nil && symbolStats.TotalSymbols > 0 { - fmt.Println("💻 Code Symbol Index:") + fmt.Printf("%s Code Symbol Index:\n", ui.IconCode) fmt.Printf(" Total symbols: %d\n", symbolStats.TotalSymbols) fmt.Printf(" Indexed files: %d\n", symbolStats.TotalFiles) fmt.Printf(" Relations: %d\n", symbolStats.TotalRelations) @@ -199,7 +199,7 @@ Checks for: // Warn about stale files if len(staleFiles) > 0 { - fmt.Printf("⚠ %d indexed files no longer exist:\n", len(staleFiles)) + fmt.Printf("%s %d indexed files no longer exist:\n", ui.IconWarn, len(staleFiles)) maxShow := 5 for i, f := range staleFiles { if i >= maxShow { @@ -212,13 +212,13 @@ Checks for: fmt.Println() } } else if symbolStats != nil { - fmt.Println("💻 Code Symbol Index: (empty)") + fmt.Printf("%s Code Symbol Index: (empty)\n", ui.IconCode) fmt.Println(" Run 'taskwing bootstrap' to index your codebase.") fmt.Println() } if len(issues) == 0 { - fmt.Println("✓ No integrity issues found") + ui.PrintSuccess("No integrity issues found") return nil } @@ -256,7 +256,7 @@ Actions: // First check what needs repair issues, _ := repo.Check() if len(issues) == 0 { - fmt.Println("✓ No issues to repair") + ui.PrintSuccess("No issues to repair") return nil } @@ -269,9 +269,9 @@ Actions: // Verify repair remaining, _ := repo.Check() if len(remaining) == 0 { - fmt.Println("✓ All issues repaired") + ui.PrintSuccess("All issues repaired") } else { - fmt.Printf("⚠ %d issues remain after repair\n", len(remaining)) + ui.PrintWarning(fmt.Sprintf("%d issues remain after repair", len(remaining))) } return nil @@ -301,7 +301,7 @@ This is useful if the search index is out of sync with the database.`, } nodes, _ := repo.ListNodes("") - fmt.Printf("✓ FTS index rebuilt with %d nodes\n", len(nodes)) + ui.PrintSuccess(fmt.Sprintf("FTS index rebuilt with %d nodes", len(nodes))) return nil }, } @@ -357,7 +357,7 @@ Requires an API key for the configured provider (OpenAI/Gemini) or a local Ollam } if len(toProcess) == 0 { - fmt.Println("✓ All nodes already have embeddings") + ui.PrintSuccess("All nodes already have embeddings") return nil } @@ -388,22 +388,23 @@ Requires an API key for the configured provider (OpenAI/Gemini) or a local Ollam for _, n := range toProcess { embedding, err := knowledge.GenerateEmbedding(ctx, n.Text(), llmCfg) if err != nil { - fmt.Printf(" ✗ %s: %v\n", n.ID, err) + fmt.Printf(" %s %s: %v\n", ui.IconFail, n.ID, err) continue } if err := repo.UpdateNodeEmbedding(n.ID, embedding); err != nil { - fmt.Printf(" ✗ %s: save failed\n", n.ID) + fmt.Printf(" %s %s: save failed\n", ui.IconFail, n.ID) continue } generated++ if !viper.GetBool("quiet") { - fmt.Printf(" ✓ %s\n", n.Summary) + fmt.Printf(" %s %s\n", ui.IconOK, n.Summary) } } - fmt.Printf("\n✓ Generated %d/%d embeddings\n", generated, len(toProcess)) + fmt.Println() + ui.PrintSuccess(fmt.Sprintf("Generated %d/%d embeddings", generated, len(toProcess))) return nil }, } @@ -448,7 +449,7 @@ Examples: } archPath := filepath.Join(memoryPath, "ARCHITECTURE.md") - fmt.Printf("✓ Generated %s\n", archPath) + ui.PrintSuccess(fmt.Sprintf("Generated %s", archPath)) return nil }, } @@ -473,7 +474,7 @@ WARNING: This can be expensive if you have many nodes and are using a paid API.` force, _ := cmd.Flags().GetBool("force") if !force { - fmt.Print("⚠ This will regenerate ALL embeddings. Are you sure? [y/N]: ") + fmt.Printf("%s This will regenerate ALL embeddings. Are you sure? [y/N]: ", ui.IconWarn) var response string _, _ = fmt.Scanln(&response) if response != "y" && response != "Y" { @@ -528,24 +529,25 @@ WARNING: This can be expensive if you have many nodes and are using a paid API.` embedding, err := knowledge.GenerateEmbedding(ctx, fullNode.Text(), llmCfg) if err != nil { - fmt.Printf(" ✗ %s: %v\n", n.ID, err) + fmt.Printf(" %s %s: %v\n", ui.IconFail, n.ID, err) failed++ continue } if err := repo.UpdateNodeEmbedding(n.ID, embedding); err != nil { - fmt.Printf(" ✗ %s: save failed\n", n.ID) + fmt.Printf(" %s %s: save failed\n", ui.IconFail, n.ID) failed++ continue } generated++ if !viper.GetBool("quiet") { - fmt.Printf(" ✓ %s (dim: %d)\n", fullNode.Summary, len(embedding)) + fmt.Printf(" %s %s (dim: %d)\n", ui.IconOK, fullNode.Summary, len(embedding)) } } - fmt.Printf("\n✓ Regenerated %d/%d embeddings", generated, len(nodes)) + fmt.Println() + fmt.Printf("%s Regenerated %d/%d embeddings", ui.IconOK, generated, len(nodes)) if failed > 0 { fmt.Printf(" (%d failed)", failed) } @@ -613,8 +615,8 @@ Examples: ui.RenderPageHeader("TaskWing Memory Inspect", fmt.Sprintf("Query: %q", query)) // Pipeline info - fmt.Printf("📊 Pipeline: %s\n", formatPipeline(result.Pipeline)) - fmt.Printf("🔍 Total candidates: %d\n", result.TotalCandidates) + fmt.Printf("%s Pipeline: %s\n", ui.IconStats, formatPipeline(result.Pipeline)) + fmt.Printf("%s Total candidates: %d\n", ui.IconSearch, result.TotalCandidates) // Timings if verbose { @@ -786,10 +788,10 @@ Examples: // Show workspace detection results if wsInfo != nil && len(wsInfo.Services) > 0 { - fmt.Printf("📂 Detected workspace: %s (%d services)\n", wsInfo.Type.String(), len(wsInfo.Services)) + fmt.Printf("%s Detected workspace: %s (%d services)\n", ui.IconFolder, wsInfo.Type.String(), len(wsInfo.Services)) fmt.Printf(" Services: %v\n\n", wsInfo.Services) } else { - fmt.Println("📂 No monorepo detected (single workspace mode)") + fmt.Printf("%s No monorepo detected (single workspace mode)\n", ui.IconFolder) fmt.Println() } @@ -825,12 +827,12 @@ Examples: } else { // Update the node's workspace if err := repo.UpdateNodeWorkspace(fullNode.ID, inferredWS); err != nil { - fmt.Printf(" ✗ %s: %v\n", fullNode.ID, err) + fmt.Printf(" %s %s: %v\n", ui.IconFail, fullNode.ID, err) skipped++ continue } if !viper.GetBool("quiet") { - fmt.Printf(" ✓ %s → workspace=%q\n", fullNode.ID, inferredWS) + fmt.Printf(" %s %s → workspace=%q\n", ui.IconOK, fullNode.ID, inferredWS) } updated++ } @@ -838,15 +840,15 @@ Examples: fmt.Println() if dryRun { - fmt.Printf("📊 Dry-run summary: %d would be updated, %d unchanged, %d skipped\n", updated, unchanged, skipped) + fmt.Printf("%s Dry-run summary: %d would be updated, %d unchanged, %d skipped\n", ui.IconStats, updated, unchanged, skipped) fmt.Println("\nRun without --dry-run to apply changes.") } else { - fmt.Printf("✓ Backfill complete: %d updated, %d unchanged, %d skipped\n", updated, unchanged, skipped) + ui.PrintSuccess(fmt.Sprintf("Backfill complete: %d updated, %d unchanged, %d skipped", updated, unchanged, skipped)) // Rebuild FTS index if changes were made if updated > 0 { if err := repo.RebuildFTS(); err != nil { - fmt.Printf("⚠ Warning: failed to rebuild FTS index: %v\n", err) + ui.PrintWarning(fmt.Sprintf("failed to rebuild FTS index: %v", err)) } } } diff --git a/cmd/plan.go b/cmd/plan.go index 73d99f0..35b6f3f 100644 --- a/cmd/plan.go +++ b/cmd/plan.go @@ -16,6 +16,7 @@ import ( "github.com/josephgoksu/TaskWing/internal/knowledge" "github.com/josephgoksu/TaskWing/internal/llm" "github.com/josephgoksu/TaskWing/internal/logger" + "github.com/josephgoksu/TaskWing/internal/runner" "github.com/josephgoksu/TaskWing/internal/task" "github.com/josephgoksu/TaskWing/internal/ui" "github.com/josephgoksu/TaskWing/internal/util" @@ -38,12 +39,16 @@ func init() { planCmd.AddCommand(planStartCmd) planCmd.AddCommand(planStatusCmd) + // planCmd flags (for positional-arg shortcut that routes to runGoal) + planCmd.Flags().Bool("auto-answer", false, "Automatically answer clarification questions using project context") + planCmd.Flags().Int("max-rounds", 5, "Maximum clarify rounds before stopping") + // Flags planNewCmd.Flags().Bool("no-export", false, "Skip automatic export") planNewCmd.Flags().String("export-path", "", "Custom path to export plan") planNewCmd.Flags().Bool("non-interactive", false, "Run without user interaction (headless)") planNewCmd.Flags().Bool("offline", false, "Disable LLM usage (create a draft plan without tasks)") - planNewCmd.Flags().Bool("no-llm", false, "Alias for --offline") + planNewCmd.Flags().String("prefer-cli", "", "Preferred AI CLI for plan generation (claude, gemini, codex)") planExportCmd.Flags().Bool("stdout", false, "Print to stdout") planExportCmd.Flags().StringP("output", "o", "", "Custom output path") @@ -81,15 +86,25 @@ func runWithService(runFunc func(svc *task.Service, cmd *cobra.Command, args []s } var planCmd = &cobra.Command{ - Use: "plan", - Short: "Manage development plans", - Long: `Create, view, and export development plans using AI agents. + Use: "plan [\"description\"]", + Short: "Create and manage development plans", + Long: `Create, view, and manage development plans. -Examples: - taskwing goal "Add OAuth2 authentication" +When called with a description, creates and activates a plan in one step: + taskwing plan "Add OAuth2 authentication" + +Subcommands provide full plan lifecycle management: taskwing plan list + taskwing plan new "description" (advanced TUI mode) taskwing plan export latest taskwing plan start latest`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + return runGoal(cmd, args) + } + return cmd.Help() + }, } var planNewCmd = &cobra.Command{ @@ -119,13 +134,9 @@ var planNewCmd = &cobra.Command{ svc := task.NewService(repo, memoryPath) offline, _ := cmd.Flags().GetBool("offline") - noLLM, _ := cmd.Flags().GetBool("no-llm") - if noLLM { - offline = true - } if offline { if !isQuiet() && !isJSON() { - fmt.Fprintln(os.Stderr, "⚠️ Offline mode: LLM disabled. Creating a draft plan without tasks.") + fmt.Fprintf(os.Stderr, "%s Offline mode: LLM disabled. Creating a draft plan without tasks.\n", ui.IconWarn) } draft := &task.Plan{ Goal: goal, @@ -157,21 +168,34 @@ var planNewCmd = &cobra.Command{ return nil } - cfg, err := getLLMConfigForRole(cmd, llm.RoleBootstrap) - if err != nil { - return fmt.Errorf("llm config: %w", err) + // Detect AI CLI runner (no API key needed) or fall back to LLM API + preferCLI, _ := cmd.Flags().GetString("prefer-cli") + preferCLIType := runner.CLIType(preferCLI) + + var appCtx *app.Context + cliRunner, runnerErr := runner.PreferredRunner(preferCLIType) + if runnerErr == nil { + if !isQuiet() && !isJSON() { + fmt.Printf("%s Using %s for planning...\n", ui.IconRobot, cliRunner.Type().String()) + } + appCtx = app.NewContextWithConfig(repo, llm.Config{}) + appCtx.Runner = cliRunner + } else { + // Fallback: use internal LLM agents (requires API key) + cfg, err := getLLMConfigForRole(cmd, llm.RoleBootstrap) + if err != nil { + return fmt.Errorf("no AI CLI detected and no LLM API key configured.\nInstall Claude Code, Gemini CLI, or Codex CLI, or configure an API key: %w", err) + } + appCtx = app.NewContextWithConfig(repo, cfg) } - // Initialize App Layer - // Agents are now managed internally by PlanApp methods - appCtx := app.NewContextWithConfig(repo, cfg) planApp := app.NewPlanApp(appCtx) nonInteractive, _ := cmd.Flags().GetBool("non-interactive") if !nonInteractive && !hasTTY() { nonInteractive = true if !isQuiet() && !isJSON() { - fmt.Fprintln(os.Stderr, "⚠️ No TTY detected; falling back to --non-interactive") + fmt.Fprintf(os.Stderr, "%s No TTY detected; falling back to --non-interactive\n", ui.IconWarn) } } if nonInteractive { @@ -254,7 +278,7 @@ var planNewCmd = &cobra.Command{ return nil } - ks := knowledge.NewService(repo, cfg) + ks := knowledge.NewService(repo, appCtx.LLMCfg) stream := core.NewStreamingOutput(100) defer stream.Close() @@ -354,10 +378,10 @@ var planListCmd = &cobra.Command{ } func printPlanTable(plans []task.Plan) { - headerStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Bold(true) - idStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("75")) - dateStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("241")) - goalStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("255")) + headerStyle := ui.StyleSubtle.Bold(true) + idStyle := lipgloss.NewStyle().Foreground(ui.ColorBlue) + dateStyle := ui.StyleSubtle + goalStyle := ui.StyleText fmt.Printf("%-18s %-12s %-6s %s\n", headerStyle.Render("ID"), headerStyle.Render("CREATED"), headerStyle.Render("TASKS"), headerStyle.Render("GOAL")) @@ -367,16 +391,13 @@ func printPlanTable(plans []task.Plan) { if len(goal) > 60 { goal = goal[:57] + "..." } - // Tasks count - service ListPlans probably returns plans without tasks or with? - // ListPlans sets TaskCount but leaves Tasks nil for efficiency. - // Use GetTaskCount() to get the count regardless of how the plan was loaded. fmt.Printf("%-18s %-12s %-6d %s\n", idStyle.Render(p.ID), dateStyle.Render(p.CreatedAt.Format("2006-01-02")), p.GetTaskCount(), goalStyle.Render(goal)) } - fmt.Printf("\n%s\n", lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Render(fmt.Sprintf("Total: %d plan(s)", len(plans)))) + fmt.Printf("\n%s\n", ui.StyleSubtle.Render(fmt.Sprintf("Total: %d plan(s)", len(plans)))) } var planExportCmd = &cobra.Command{ @@ -401,7 +422,8 @@ var planExportCmd = &cobra.Command{ return err } - fmt.Printf("\n✓ Plan exported to %s\n", outputPath) + fmt.Println() + ui.PrintSuccess(fmt.Sprintf("Plan exported to %s", outputPath)) return nil }), } @@ -443,7 +465,7 @@ var planDeleteCmd = &cobra.Command{ } if !isQuiet() && !isJSON() { - fmt.Printf("✓ Deleted plan %s\n", planID) + ui.PrintSuccess(fmt.Sprintf("Deleted plan %s", planID)) } return nil }), @@ -468,7 +490,7 @@ var planUpdateCmd = &cobra.Command{ } if !isQuiet() && !isJSON() { - fmt.Printf("✓ Updated plan %s\n", args[0]) + ui.PrintSuccess(fmt.Sprintf("Updated plan %s", args[0])) } return nil }), @@ -483,7 +505,7 @@ var planRenameCmd = &cobra.Command{ return err } if !isQuiet() { - fmt.Printf("✓ Renamed plan %s\n", args[0]) + ui.PrintSuccess(fmt.Sprintf("Renamed plan %s", args[0])) } return nil }), @@ -498,7 +520,7 @@ var planArchiveCmd = &cobra.Command{ return err } if !isQuiet() { - fmt.Printf("✓ Archived plan %s\n", args[0]) + ui.PrintSuccess(fmt.Sprintf("Archived plan %s", args[0])) } return nil }), @@ -513,7 +535,7 @@ var planUnarchiveCmd = &cobra.Command{ return err } if !isQuiet() { - fmt.Printf("✓ Unarchived plan %s\n", args[0]) + ui.PrintSuccess(fmt.Sprintf("Unarchived plan %s", args[0])) } return nil }), @@ -530,9 +552,11 @@ var planStartCmd = &cobra.Command{ plan, _ := svc.GetPlanWithTasks(args[0]) // Get resolved plan details if !isQuiet() { - fmt.Printf("\n✓ Active plan: %s\n", plan.ID) - fmt.Printf(" Goal: %s\n", plan.Goal) - fmt.Printf(" Tasks: %d\n\n", len(plan.Tasks)) + fmt.Println() + ui.PrintSuccess(fmt.Sprintf("Active plan: %s", plan.ID)) + ui.PrintKeyValue("Goal", plan.Goal) + ui.PrintKeyValue("Tasks", fmt.Sprintf("%d", len(plan.Tasks))) + fmt.Println() } return nil }), @@ -557,7 +581,7 @@ Examples: "message": "No active plan", }) } - fmt.Println("No active plan. Set one with: taskwing goal \"\"") + fmt.Println("No active plan. Set one with: taskwing plan \"\"") return nil } @@ -644,7 +668,7 @@ func printStatus(plan *task.Plan) { progressPct = done * 100 / total } - fmt.Printf("\n📋 Active Plan: %s\n", plan.ID) + fmt.Printf("\n%s Active Plan: %s\n", ui.IconTask, plan.ID) fmt.Printf(" %s\n\n", plan.Goal) barWidth := 30 @@ -653,9 +677,9 @@ func printStatus(plan *task.Plan) { fmt.Printf(" Progress: [%s] %d%% (%d/%d)\n\n", bar, progressPct, done, total) - passStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("42")) - pendingStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("214")) - dimStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("241")) + passStyle := ui.StyleSuccess + pendingStyle := ui.StyleWarning + dimStyle := ui.StyleSubtle fmt.Println(" Tasks:") for _, t := range plan.Tasks { @@ -663,11 +687,10 @@ func printStatus(plan *task.Plan) { title := t.Title if t.Status == task.StatusCompleted { - statusMarker = passStyle.Render("[✓]") + statusMarker = passStyle.Render("[" + ui.IconOK.Emoji + "]") title = dimStyle.Render(title) } - // Use ShortID for consistent task ID display tid := util.ShortID(t.ID, util.TaskIDLength) fmt.Printf(" %s %s %s\n", statusMarker, dimStyle.Render(tid), title) } diff --git a/cmd/proxy.go b/cmd/proxy.go new file mode 100644 index 0000000..4f79393 --- /dev/null +++ b/cmd/proxy.go @@ -0,0 +1,124 @@ +/* +Copyright © 2025 Joseph Goksu josephgoksu@gmail.com +*/ +package cmd + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/josephgoksu/TaskWing/internal/compress" + "github.com/spf13/cobra" +) + +var proxyCmd = &cobra.Command{ + Use: "proxy [command] [args...]", + Short: "Execute a command and compress its output for token savings", + Long: `Execute a shell command, then compress its output to reduce token usage. + +Examples: + taskwing proxy git status + taskwing proxy git log --oneline -20 + taskwing proxy go test ./... + taskwing proxy --raw git diff # Pass through without compression + taskwing proxy --ultra git log # Maximum compression`, + Args: cobra.MinimumNArgs(1), + DisableFlagParsing: false, + RunE: runProxy, +} + +func init() { + rootCmd.AddCommand(proxyCmd) + proxyCmd.Flags().Bool("raw", false, "Pass through without compression (for debugging)") + proxyCmd.Flags().Bool("ultra", false, "Use ultra-compact compression mode") + proxyCmd.Flags().Bool("stats", false, "Print compression stats to stderr") +} + +func runProxy(cmd *cobra.Command, args []string) error { + raw, _ := cmd.Flags().GetBool("raw") + ultra, _ := cmd.Flags().GetBool("ultra") + showStats, _ := cmd.Flags().GetBool("stats") + + // Build the command string for pipeline selection + cmdStr := strings.Join(args, " ") + + // Execute the command + child := exec.Command(args[0], args[1:]...) + child.Stdin = os.Stdin + child.Stderr = os.Stderr + + output, err := child.Output() + exitErr := err + + if raw { + os.Stdout.Write(output) + if exitErr != nil { + if ee, ok := exitErr.(*exec.ExitError); ok { + os.Exit(ee.ExitCode()) + } + return exitErr + } + return nil + } + + // Compress the output + var compressed []byte + var stats compress.Stats + if ultra { + compressed, stats = compress.CompressWithLevel(cmdStr, output, true) + } else { + compressed, stats = compress.Compress(cmdStr, output) + } + + os.Stdout.Write(compressed) + if len(compressed) > 0 && compressed[len(compressed)-1] != '\n' { + fmt.Println() + } + + // Record stats to database (best-effort, don't fail proxy on DB errors) + inputTokens := compress.EstimateTokens(output) + outputTokens := compress.EstimateTokens(compressed) + savedTokens := inputTokens - outputTokens + savedBytes := stats.InputBytes - stats.OutputBytes + recordProxyStats(cmdStr, stats, savedBytes, inputTokens, outputTokens, savedTokens) + + if showStats { + savedPct := stats.Saved() + fmt.Fprintf(os.Stderr, "[compress] %s: %d→%d bytes (%.0f%% saved, ~%d tokens saved)\n", + cmdStr, stats.InputBytes, stats.OutputBytes, savedPct, savedTokens) + } + + // Preserve exit code from child process + if exitErr != nil { + if ee, ok := exitErr.(*exec.ExitError); ok { + os.Exit(ee.ExitCode()) + } + return exitErr + } + + return nil +} + +// recordProxyStats writes compression stats to the database (best-effort). +func recordProxyStats(cmdStr string, stats compress.Stats, savedBytes, inputTokens, outputTokens, savedTokens int) { + repo, err := openRepo() + if err != nil { + return // No database available — skip silently + } + defer func() { _ = repo.Close() }() + + sessionID := "" + if session, sessionErr := loadHookSession(); sessionErr == nil && session != nil { + sessionID = session.SessionID + } + + _ = RecordTokenStats( + repo.GetDB().DB(), + cmdStr, + stats.InputBytes, stats.OutputBytes, savedBytes, + stats.Ratio(), inputTokens, outputTokens, savedTokens, + sessionID, + ) +} diff --git a/cmd/root.go b/cmd/root.go index dddfedb..cd8cc48 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -121,8 +121,8 @@ func getCommandHint(cmd string) string { hints := map[string]string{ "plans": "Hint: To list plans, use: taskwing plan list", "tasks": "Hint: To list tasks, use: taskwing task list", - "create": "Hint: To create and activate a plan, use: taskwing goal \"\"", - "new": "Hint: To create and activate a plan, use: taskwing goal \"\"", + "create": "Hint: To create and activate a plan, use: taskwing plan \"\"", + "new": "Hint: To create and activate a plan, use: taskwing plan \"\"", "install": "Hint: To install MCP, use: taskwing mcp install", } diff --git a/cmd/slash_content.go b/cmd/slash_content.go index 86cdce9..c73b5cd 100644 --- a/cmd/slash_content.go +++ b/cmd/slash_content.go @@ -510,8 +510,8 @@ After all phases are expanded, call MCP tool ` + "`plan`" + ` with action=finali ## Fallback (No MCP) ` + "```bash" + ` -taskwing goal "Your goal description" # Preferred -taskwing plan new "Your goal description" # Advanced mode +taskwing plan "Your goal description" # Preferred +taskwing plan new "Your goal description" # Advanced TUI mode taskwing plan new --non-interactive "Your goal description" # Headless mode ` + "```" + ` ` diff --git a/cmd/start.go b/cmd/start.go index ea39887..e84b201 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -18,40 +18,31 @@ import ( "syscall" "time" - "github.com/josephgoksu/TaskWing/internal/agents/core" - "github.com/josephgoksu/TaskWing/internal/agents/impl" "github.com/josephgoksu/TaskWing/internal/config" - "github.com/josephgoksu/TaskWing/internal/knowledge" "github.com/josephgoksu/TaskWing/internal/llm" - "github.com/josephgoksu/TaskWing/internal/memory" "github.com/josephgoksu/TaskWing/internal/server" + "github.com/josephgoksu/TaskWing/internal/ui" "github.com/spf13/cobra" - "github.com/spf13/viper" ) var ( startPort int startHost string noDashboard bool - noWatch bool dashboardURL string ) var startCmd = &cobra.Command{ Use: "start", - Short: "Start TaskWing with API server, watch mode, and dashboard", + Short: "Start TaskWing with API server and dashboard", Long: `Start TaskWing with all services running: - HTTP API server for dashboard communication - - Watch mode for continuous file analysis - Auto-open dashboard in browser -This single command replaces running 'serve' and 'watch' separately. - Examples: taskwing start # Start everything taskwing start --host 0.0.0.0 # Expose API on all interfaces taskwing start --no-dashboard # Don't open browser - taskwing start --no-watch # Server only, no file watching taskwing start --port 8080 # Use custom port`, RunE: runStart, } @@ -63,10 +54,9 @@ func init() { startCmd.Flags().IntVarP(&startPort, "port", "p", 5001, "API server port") startCmd.Flags().StringVar(&startHost, "host", "127.0.0.1", "API server host bind address") startCmd.Flags().BoolVar(&noDashboard, "no-dashboard", false, "Don't auto-open dashboard in browser") - startCmd.Flags().BoolVar(&noWatch, "no-watch", false, "Don't run watch mode (server only)") startCmd.Flags().StringVar(&dashboardURL, "dashboard-url", "", "Dashboard URL (default: https://hub.taskwing.app, use http://localhost:5173 for local dev)") - // LLM configuration (reuse from watch) + // LLM configuration startCmd.Flags().String("provider", "", "LLM provider (openai, ollama, anthropic, bedrock, gemini)") startCmd.Flags().String("model", "", "Model to use") startCmd.Flags().String("api-key", "", "LLM API key (or set provider-specific env var)") @@ -74,7 +64,6 @@ func init() { } func runStart(cmd *cobra.Command, args []string) error { - verbose := viper.GetBool("verbose") startHost = strings.TrimSpace(startHost) if startHost == "" { startHost = "127.0.0.1" @@ -90,13 +79,10 @@ func runStart(cmd *cobra.Command, args []string) error { // Print banner if !isQuiet() { fmt.Println() - fmt.Println("🚀 TaskWing Starting...") + fmt.Printf("%s TaskWing Starting...\n", ui.IconRocket) fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━") - fmt.Printf("📁 Project: %s\n", cwd) - fmt.Printf("🌐 API: %s\n", apiURL(startHost, startPort)) - if !noWatch { - fmt.Println("👁️ Watch: enabled") - } + fmt.Printf("%s Project: %s\n", ui.IconFolder, cwd) + fmt.Printf("%s API: %s\n", ui.IconGlobe, apiURL(startHost, startPort)) fmt.Println() } @@ -130,15 +116,7 @@ func runStart(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to start API server: %w", err) } - // Start watch mode if enabled - var watchAgent *impl.WatchAgent - if !noWatch { - watchAgent, err = startWatchMode(cwd, verbose, llmConfig, &wg, errChan) - if err != nil { - _ = srv.Shutdown(context.Background()) - return fmt.Errorf("failed to start watch mode: %w", err) - } - } + // Watch mode removed (WatchAgent deleted) // Open dashboard in browser if !noDashboard { @@ -146,17 +124,17 @@ func runStart(cmd *cobra.Command, args []string) error { time.Sleep(500 * time.Millisecond) if err := openBrowser(resolvedDashboardURL); err != nil { if !isQuiet() { - fmt.Printf("⚠️ Could not open browser: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Could not open browser: %v", err)) fmt.Printf(" Open manually: %s\n", resolvedDashboardURL) } } else if !isQuiet() { - fmt.Printf("🌐 Dashboard opened: %s\n", resolvedDashboardURL) + fmt.Printf("%s Dashboard opened: %s\n", ui.IconGlobe, resolvedDashboardURL) } } if !isQuiet() { fmt.Println() - fmt.Println("✅ TaskWing is running! Press Ctrl+C to stop") + ui.PrintSuccess("TaskWing is running! Press Ctrl+C to stop") fmt.Println() } @@ -166,15 +144,10 @@ func runStart(cmd *cobra.Command, args []string) error { select { case sig := <-sigChan: - fmt.Printf("\n\n⏹️ Received %v, shutting down...\n", sig) + fmt.Printf("\n\n%s Received %v, shutting down...\n", ui.IconStop, sig) case err := <-errChan: - fmt.Printf("\n\n❌ Error: %v\n", err) - } - - // Stop watch agent - if watchAgent != nil { - fmt.Println(" Stopping watch mode...") - watchAgent.Stop() + fmt.Print("\n\n") + ui.PrintError(fmt.Sprintf("Error: %v", err)) } // Shutdown HTTP server with timeout @@ -182,53 +155,15 @@ func runStart(cmd *cobra.Command, args []string) error { shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) defer shutdownCancel() if err := srv.Shutdown(shutdownCtx); err != nil { - fmt.Printf(" ⚠️ Server shutdown error: %v\n", err) + ui.PrintWarning(fmt.Sprintf("Server shutdown error: %v", err)) } wg.Wait() - fmt.Println("✅ TaskWing stopped") + ui.PrintSuccess("TaskWing stopped") return nil } -// startWatchMode starts the watch agent in a goroutine -func startWatchMode(watchPath string, verbose bool, llmConfig llm.Config, wg *sync.WaitGroup, errChan chan<- error) (*impl.WatchAgent, error) { - // Initialize knowledge service first (needed for context injection) - memoryPath, err := config.GetMemoryBasePath() - if err != nil { - return nil, fmt.Errorf("get memory path: %w", err) - } - repo, err := memory.NewDefaultRepository(memoryPath) - if err != nil { - return nil, fmt.Errorf("create memory repository: %w", err) - } - - ks := knowledge.NewService(repo, llmConfig) - - // Create watch agent with knowledge service - watchAgent, err := impl.NewWatchAgent(impl.WatchConfig{ - BasePath: watchPath, - LLMConfig: llmConfig, - Verbose: verbose, - Service: ks, - }) - if err != nil { - return nil, fmt.Errorf("create watch agent: %w", err) - } - - // Set up findings handler - watchAgent.SetFindingsHandler(func(ctx context.Context, findings []core.Finding, filePaths []string) error { - return ks.IngestFindings(ctx, findings, filePaths, verbose) - }) - - // Start watching - if err := watchAgent.Start(); err != nil { - return nil, fmt.Errorf("start watch: %w", err) - } - - return watchAgent, nil -} - // openBrowser opens the URL in the default browser func openBrowser(url string) error { var cmd *exec.Cmd diff --git a/cmd/task.go b/cmd/task.go index 6eb2570..6f9cd87 100644 --- a/cmd/task.go +++ b/cmd/task.go @@ -69,7 +69,7 @@ func runTaskList(cmd *cobra.Command, args []string) error { if isJSON() { return printJSON([]any{}) } - fmt.Println("No plans found. Create one with: taskwing goal \"Your goal\"") + fmt.Println("No plans found. Create one with: taskwing plan \"Your description\"") return nil } @@ -162,19 +162,18 @@ func runTaskList(cmd *cobra.Command, args []string) error { ui.RenderPageHeader("TaskWing Task List", "") if len(allTasks) == 0 { - subtle := lipgloss.NewStyle().Foreground(lipgloss.Color("241")) - fmt.Println(subtle.Render("\nNo tasks match the filters.")) + fmt.Println(ui.StyleSubtle.Render("\nNo tasks match the filters.")) if statusFilter != "" || priorityFilter > 0 || scopeFilter != "" { - fmt.Println(subtle.Render("Try adjusting your filter criteria.")) + fmt.Println(ui.StyleSubtle.Render("Try adjusting your filter criteria.")) } return nil } // Styles for colored output - idStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("14")).Bold(true) // Cyan, bold - titleStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("255")) // White - scopeStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Italic(true) - subtle := lipgloss.NewStyle().Foreground(lipgloss.Color("241")) + idStyle := ui.StyleSelectActive // Cyan, bold + titleStyle := ui.StyleText // Readable text + scopeStyle := ui.StyleSubtle.Italic(true) + subtle := ui.StyleSubtle // Group tasks by plan for display tasksByPlan := make(map[string][]taskWithPlan) @@ -186,7 +185,7 @@ func runTaskList(cmd *cobra.Command, args []string) error { tasksByPlan[tp.PlanID] = append(tasksByPlan[tp.PlanID], tp) } - planHeader := lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true) + planHeader := ui.StylePrimary.Bold(true) for _, planID := range planOrder { tasks := tasksByPlan[planID] @@ -258,50 +257,40 @@ func runTaskList(cmd *cobra.Command, args []string) error { func formatTaskStatus(status task.TaskStatus) string { switch status { case task.StatusCompleted, "done": - // Green - successfully completed - return lipgloss.NewStyle().Foreground(lipgloss.Color("42")).Render("[done] ") + return ui.StyleSuccess.Render("[done] ") case task.StatusInProgress: - // Orange/bold - actively working - return lipgloss.NewStyle().Foreground(lipgloss.Color("214")).Bold(true).Render("[active] ") + return ui.StyleWarning.Bold(true).Render("[active] ") case task.StatusFailed: - // Red - execution or verification failed - return lipgloss.NewStyle().Foreground(lipgloss.Color("196")).Render("[failed] ") + return ui.StyleError.Render("[failed] ") case task.StatusVerifying: - // Blue - running validation - return lipgloss.NewStyle().Foreground(lipgloss.Color("33")).Render("[verify] ") + return ui.StylePrimary.Render("[verify] ") case task.StatusPending: - // Gray - ready to be picked up - return lipgloss.NewStyle().Foreground(lipgloss.Color("245")).Render("[pending] ") + return ui.StyleSubtle.Render("[pending] ") case task.StatusDraft: - // Dim gray - initial creation, not ready - return lipgloss.NewStyle().Foreground(lipgloss.Color("240")).Render("[draft] ") + return ui.StyleSelectDim.Render("[draft] ") case task.StatusBlocked: - // Red/dim - waiting on dependencies - return lipgloss.NewStyle().Foreground(lipgloss.Color("124")).Render("[blocked] ") + return ui.StyleError.Render("[blocked] ") case task.StatusReady: - // Green/dim - dependencies met, ready for execution - return lipgloss.NewStyle().Foreground(lipgloss.Color("28")).Render("[ready] ") + return ui.StyleSuccess.Render("[ready] ") default: - // Unknown status - neutral gray with label showing the actual value - // This ensures we never panic on unexpected values - return lipgloss.NewStyle().Foreground(lipgloss.Color("245")).Render("[unknown] ") + return ui.StyleSubtle.Render("[unknown] ") } } // formatPriority returns a color-coded priority string. func formatPriority(priority int) string { - var color lipgloss.Color + var style lipgloss.Style switch { case priority <= 20: - color = lipgloss.Color("196") // Red - critical + style = ui.StyleError // Red - critical case priority <= 50: - color = lipgloss.Color("214") // Orange - high + style = ui.StyleWarning // Orange - high case priority <= 75: - color = lipgloss.Color("226") // Yellow - medium + style = ui.StylePrimary // Pink - medium default: - color = lipgloss.Color("245") // Gray - low + style = ui.StyleSubtle // Gray - low } - return lipgloss.NewStyle().Foreground(color).Render(fmt.Sprintf("%3d ", priority)) + return style.Render(fmt.Sprintf("%3d ", priority)) } var taskShowCmd = &cobra.Command{ @@ -343,27 +332,27 @@ Examples: return printJSON(t) } - fmt.Printf("Task: %s\n", t.ID) - fmt.Printf("Plan: %s\n", t.PlanID) - fmt.Printf("Title: %s\n", t.Title) + ui.PrintSectionHeader(ui.IconTask, t.Title) + ui.PrintKeyValue("ID", t.ID) + ui.PrintKeyValue("Plan", t.PlanID) if t.Description != "" { - fmt.Printf("Description: %s\n", t.Description) + ui.PrintKeyValue("Description", t.Description) } - fmt.Printf("Status: %s\n", t.Status) - fmt.Printf("Priority: %d\n", t.Priority) + ui.PrintKeyValue("Status", string(t.Status)) + ui.PrintKeyValue("Priority", fmt.Sprintf("%d", t.Priority)) if t.AssignedAgent != "" { - fmt.Printf("Assigned Agent: %s\n", t.AssignedAgent) + ui.PrintKeyValue("Agent", t.AssignedAgent) } if len(t.AcceptanceCriteria) > 0 { - fmt.Println("\nAcceptance Criteria:") + ui.PrintSectionHeader(ui.IconTarget, "Acceptance Criteria") for _, a := range t.AcceptanceCriteria { - fmt.Printf(" - %s\n", a) + fmt.Printf(" - %s\n", a) } } if len(t.ValidationSteps) > 0 { - fmt.Println("\nValidation Steps:") + ui.PrintSectionHeader(ui.IconDone, "Validation Steps") for _, v := range t.ValidationSteps { - fmt.Printf(" - %s\n", v) + fmt.Printf(" - %s\n", v) } } return nil @@ -404,7 +393,7 @@ var taskUpdateCmd = &cobra.Command{ } if !isQuiet() { - fmt.Printf("✓ Updated task %s\n", taskID) + ui.PrintSuccess(fmt.Sprintf("Updated task %s", taskID)) } return nil }, @@ -462,15 +451,15 @@ func runTaskComplete(cmd *cobra.Command, args []string) error { } if !isQuiet() { - fmt.Printf("✓ %s\n", result.Message) + ui.PrintSuccess(result.Message) if result.Hint != "" { - fmt.Printf(" %s\n", result.Hint) + ui.PrintHint(result.Hint) } if result.GitWorkflowApplied { - fmt.Printf(" Git: committed to %s\n", result.GitBranch) + ui.PrintKeyValue("Git", fmt.Sprintf("committed to %s", result.GitBranch)) } if result.PRCreated { - fmt.Printf(" PR: %s\n", result.PRURL) + ui.PrintKeyValue("PR", result.PRURL) } } return nil @@ -500,7 +489,7 @@ var taskDeleteCmd = &cobra.Command{ } fmt.Printf("\n Task: %s\n", t.ID) fmt.Printf(" Title: %s\n\n", t.Title) - if !confirmOrAbort("⚠️ Delete this task? [y/N]: ") { + if !confirmOrAbort(fmt.Sprintf("%s Delete this task? [y/N]: ", ui.IconWarn)) { return nil } } @@ -517,18 +506,19 @@ var taskDeleteCmd = &cobra.Command{ } if !isQuiet() { - fmt.Printf("✓ Deleted task %s\n", taskID) + ui.PrintSuccess(fmt.Sprintf("Deleted task %s", taskID)) } return nil }, } var taskValidateCmd = &cobra.Command{ - Use: "validate [task-id]", - Short: "Validate a task (Placeholder)", - Args: cobra.ExactArgs(1), + Use: "validate [task-id]", + Short: "Validate a task (Placeholder)", + Hidden: true, + Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { - fmt.Println("🕵️ Validation agent coming in V2.1") + ui.PrintInfo("Validation agent coming in V2.1") fmt.Println(" For now, please run the validation steps manually:") // Logic to fetch task and print validation steps would go here @@ -573,7 +563,7 @@ func runTaskNext(cmd *cobra.Command, args []string) error { taskApp := app.NewTaskApp(appCtx) if !isQuiet() && !isJSON() { - fmt.Fprint(os.Stderr, "🔍 Getting next task...") + fmt.Fprintf(os.Stderr, "%s Getting next task...", ui.IconSearch) } ctx := context.Background() @@ -597,17 +587,17 @@ func runTaskNext(cmd *cobra.Command, args []string) error { } if !result.Success { - fmt.Printf("⚠️ %s\n", result.Message) + ui.PrintWarning(result.Message) if result.Hint != "" { - fmt.Printf("💡 %s\n", result.Hint) + ui.PrintHint(result.Hint) } return nil } if result.Task == nil { - fmt.Printf("✓ %s\n", result.Message) + ui.PrintSuccess(result.Message) if result.Hint != "" { - fmt.Printf("💡 %s\n", result.Hint) + ui.PrintHint(result.Hint) } return nil } @@ -616,20 +606,21 @@ func runTaskNext(cmd *cobra.Command, args []string) error { ui.RenderPageHeader("Next Task", "") if result.GitWorkflowApplied { - fmt.Printf("🌿 Branch: %s\n\n", result.GitBranch) + fmt.Printf("%s Branch: %s\n\n", ui.IconBranch, result.GitBranch) } - fmt.Printf("📋 %s\n", result.Task.Title) - fmt.Printf(" ID: %s\n", result.Task.ID) - fmt.Printf(" Status: %s\n", result.Task.Status) - fmt.Printf(" Priority: %d\n", result.Task.Priority) + ui.PrintSectionHeader(ui.IconTask, result.Task.Title) + ui.PrintKeyValue("ID", result.Task.ID) + ui.PrintKeyValue("Status", string(result.Task.Status)) + ui.PrintKeyValue("Priority", fmt.Sprintf("%d", result.Task.Priority)) if result.Task.Description != "" { - fmt.Printf("\n📝 %s\n", result.Task.Description) + fmt.Printf("\n%s %s\n", ui.IconDesc, result.Task.Description) } if result.Hint != "" { - fmt.Printf("\n💡 %s\n", result.Hint) + fmt.Println() + ui.PrintHint(result.Hint) } return nil @@ -678,9 +669,9 @@ func runTaskCurrent(cmd *cobra.Command, args []string) error { } if !result.Success || result.Task == nil { - fmt.Printf("ℹ️ %s\n", result.Message) + ui.PrintInfo(result.Message) if result.Hint != "" { - fmt.Printf("💡 %s\n", result.Hint) + ui.PrintHint(result.Hint) } return nil } @@ -688,15 +679,15 @@ func runTaskCurrent(cmd *cobra.Command, args []string) error { // Show task details ui.RenderPageHeader("Current Task", "") - fmt.Printf("📋 %s\n", result.Task.Title) - fmt.Printf(" ID: %s\n", result.Task.ID) - fmt.Printf(" Status: %s\n", result.Task.Status) + ui.PrintSectionHeader(ui.IconTask, result.Task.Title) + ui.PrintKeyValue("ID", result.Task.ID) + ui.PrintKeyValue("Status", string(result.Task.Status)) if !result.Task.ClaimedAt.IsZero() { - fmt.Printf(" Started: %s\n", result.Task.ClaimedAt.Format("2006-01-02 15:04")) + ui.PrintKeyValue("Started", result.Task.ClaimedAt.Format("2006-01-02 15:04")) } if result.Task.Description != "" { - fmt.Printf("\n📝 %s\n", result.Task.Description) + fmt.Printf("\n%s %s\n", ui.IconDesc, result.Task.Description) } return nil @@ -751,16 +742,17 @@ func runTaskStart(cmd *cobra.Command, args []string) error { } if !result.Success { - fmt.Printf("⚠️ %s\n", result.Message) + ui.PrintWarning(result.Message) return nil } if !isQuiet() { - fmt.Printf("✓ Started task: %s\n", result.Task.Title) - fmt.Printf(" ID: %s\n", result.Task.ID) + ui.PrintSuccess(fmt.Sprintf("Started task: %s", result.Task.Title)) + ui.PrintKeyValue("ID", result.Task.ID) if result.Hint != "" { - fmt.Printf("\n💡 %s\n", result.Hint) + fmt.Println() + ui.PrintHint(result.Hint) } } @@ -861,9 +853,9 @@ func runTaskAdd(cmd *cobra.Command, args []string) error { return printJSON(newTask) } - fmt.Printf("✓ Created task: %s\n", newTask.Title) - fmt.Printf(" ID: %s\n", newTask.ID) - fmt.Printf(" Plan: %s\n", planID) + ui.PrintSuccess(fmt.Sprintf("Created task: %s", newTask.Title)) + ui.PrintKeyValue("ID", newTask.ID) + ui.PrintKeyValue("Plan", planID) return nil } diff --git a/docs/PRODUCT_VISION.md b/docs/PRODUCT_VISION.md index 822b4cf..0a067b8 100644 --- a/docs/PRODUCT_VISION.md +++ b/docs/PRODUCT_VISION.md @@ -40,7 +40,7 @@ Brand names and logos are trademarks of their respective owners; usage here indi ```text ┌─────────────────────────────────────────────────────────┐ │ USER INTERFACE │ -│ taskwing goal "..." │ /tw-next │ /tw-done │ +│ taskwing plan "..." │ /tw-next │ /tw-done │ └─────────────────────────────────────────────────────────┘ │ ▼ @@ -66,7 +66,7 @@ Brand names and logos are trademarks of their respective owners; usage here indi - `taskwing bootstrap` -- `taskwing goal ""` +- `taskwing plan ""` - `taskwing ask ""` - `taskwing task` - `taskwing plan status` @@ -93,7 +93,7 @@ Brand names and logos are trademarks of their respective owners; usage here indi ## Success Metrics 1. Task accuracy: generated tasks reference correct files and patterns. -2. Developer adoption: daily active users running `taskwing goal`. +2. Developer adoption: daily active users running `taskwing plan`. 3. Context utilization: MCP queries per plan execution. 4. Time-to-root-cause: bug investigations with TaskWing context vs. without. diff --git a/docs/TUTORIAL.md b/docs/TUTORIAL.md index bf6fae4..fd5cd57 100644 --- a/docs/TUTORIAL.md +++ b/docs/TUTORIAL.md @@ -39,10 +39,10 @@ This creates `.taskwing/` and installs AI assistant integration files. ## 2. Create and Activate a Plan ```bash -taskwing goal "Add user authentication" +taskwing plan "Add user authentication" ``` -`taskwing goal` runs clarify -> generate -> activate in one step. +`taskwing plan` runs clarify -> generate -> activate in one step. ## 3. Execute with Slash Commands @@ -154,7 +154,7 @@ Recommended Bedrock model IDs: - `taskwing bootstrap` -- `taskwing goal ""` +- `taskwing plan ""` - `taskwing ask ""` - `taskwing task` - `taskwing plan status` diff --git a/docs/WORKFLOW_PACK.md b/docs/WORKFLOW_PACK.md index b3f1ddc..c2e0011 100644 --- a/docs/WORKFLOW_PACK.md +++ b/docs/WORKFLOW_PACK.md @@ -18,7 +18,7 @@ Get users to one visible success loop in under 15 minutes: ## First-Run Activation Path 1. `taskwing bootstrap` -2. `taskwing goal ""` +2. `taskwing plan ""` 3. `/tw-next` 4. Implement scoped change 5. `/tw-done` diff --git a/docs/_partials/core_commands.md b/docs/_partials/core_commands.md index ecaf254..6d3f94f 100644 --- a/docs/_partials/core_commands.md +++ b/docs/_partials/core_commands.md @@ -1,5 +1,5 @@ - `taskwing bootstrap` -- `taskwing goal ""` +- `taskwing plan ""` - `taskwing ask ""` - `taskwing task` - `taskwing plan status` diff --git a/docs/user-stories.md b/docs/user-stories.md new file mode 100644 index 0000000..5517eb3 --- /dev/null +++ b/docs/user-stories.md @@ -0,0 +1,139 @@ +# TaskWing User Stories + +## 1. Backend Engineer Debugging a Production Bug + +> "Our payment webhook is silently dropping events. I need to find the root cause and fix it." + +```bash +taskwing bootstrap # Index codebase, auto-analyze architecture +taskwing plan "Fix payment webhook dropping events silently" +taskwing execute # AI CLI investigates, traces the bug, applies the fix +``` + +TaskWing feeds the AI CLI your architecture context — retry policies, error handling patterns, webhook middleware chain — so it doesn't waste time rediscovering your codebase. The fix lands with tests that cover the exact failure mode. + +--- + +## 2. Full-Stack Engineer Building a Feature Across Layers + +> "We need user profile avatars — upload on the frontend, storage on the backend, CDN serving." + +```bash +taskwing plan "Add user profile avatar upload with CDN-backed serving" +taskwing plan status # Review: API endpoint, S3 upload, React component, CDN config +taskwing execute --all # Executes tasks in dependency order across the stack +``` + +TaskWing decomposes the goal into ordered tasks — backend storage first, then API endpoint, then frontend component — each with acceptance criteria. The AI CLI implements each layer knowing how your existing upload patterns, auth middleware, and component library work. + +--- + +## 3. DevOps Engineer Setting Up CI/CD for a New Service + +> "We spun up a new Go microservice. It needs the same CI/CD pipeline as our other services." + +```bash +taskwing bootstrap +taskwing plan "Add CI/CD pipeline matching existing service patterns" +taskwing execute --all +``` + +Bootstrap captures your existing GitHub Actions workflows, Docker build conventions, and deployment constraints. The AI CLI replicates the pattern for the new service — same linting, same test matrix, same deploy targets — without you copying and pasting YAML. + +--- + +## 4. Tech Lead Paying Down Technical Debt + +> "Our auth module has three different token validation paths. Consolidate them before they cause a security incident." + +```bash +taskwing bootstrap # Auto-surfaces the debt: three validation paths, confidence 0.9 +taskwing plan "Consolidate auth token validation into a single middleware" +taskwing plan status # Review: extract shared validator, migrate callers, remove dead paths +taskwing execute +``` + +Bootstrap's debt classification flags the problem with evidence — file paths, line numbers, grep patterns. The plan ensures callers are migrated one by one with tests at each step, not a risky big-bang rewrite. + +--- + +## 5. Mobile Engineer Adding Offline Support + +> "The app crashes when users lose connectivity mid-sync. We need proper offline queueing." + +```bash +taskwing plan "Add offline request queueing with retry on reconnection" +taskwing execute --all +``` + +TaskWing knows your networking layer, state management patterns, and existing retry logic from bootstrap. Tasks are ordered: queue data structure first, then interceptor integration, then UI indicators, then edge-case tests. + +--- + +## 6. Junior Developer Onboarding to a New Codebase + +> "I just joined the team. I need to add a simple health check endpoint but I don't know where anything is." + +```bash +taskwing bootstrap # Builds a knowledge map of the codebase (auto-analyzes) +taskwing plan "Add /healthz endpoint with database connectivity check" +taskwing execute +``` + +The junior doesn't need to spend days reading code. TaskWing's bootstrap extracts the routing patterns, middleware chain, and database connection setup. The AI CLI follows the existing conventions exactly — same error format, same middleware stack, same test style. + +--- + +## 7. Data Engineer Building a New Pipeline Stage + +> "We need to add a deduplication step between ingestion and transformation in our ETL pipeline." + +```bash +taskwing bootstrap +taskwing plan "Add deduplication stage between ingestion and transformation" +taskwing execute --all +``` + +Bootstrap maps the pipeline architecture — message formats, checkpoint patterns, idempotency keys. The AI CLI adds the new stage following the same patterns: same logging, same metrics, same error recovery, same config structure. + +--- + +## 8. Security Engineer Hardening an API + +> "Pen test flagged rate limiting gaps and missing input validation on three endpoints." + +```bash +taskwing plan "Add rate limiting and input validation to user, payment, and admin endpoints" +taskwing plan status # Review: rate limiter middleware, per-endpoint validation, integration tests +taskwing execute --all +``` + +TaskWing decomposes the hardening into isolated tasks — middleware first, then endpoint-specific validation, then abuse-scenario tests. Each task carries the security constraints from bootstrap so the AI CLI knows your existing auth patterns and validation library. + +--- + +## 9. Platform Engineer Migrating a Database + +> "We're moving from Postgres to CockroachDB. Need to audit and fix all incompatible queries." + +```bash +taskwing bootstrap # Auto-captures all SQL patterns, ORM usage, query builders +taskwing plan "Migrate from Postgres to CockroachDB — fix incompatible queries" +taskwing execute +``` + +Bootstrap inventories every raw SQL query, ORM call pattern, and migration file. The plan targets each incompatibility — serial vs UUID, transaction semantics, upsert syntax — as a separate task with validation queries to prove correctness. + +--- + +## 10. Founder Shipping an MVP Feature Solo + +> "I need to add Stripe subscription billing by Friday. Backend, frontend, webhooks, the works." + +```bash +taskwing plan "Add Stripe subscription billing with plan selection UI and webhook handling" +taskwing plan status # 6 tasks: config, models, API, webhooks, UI, e2e tests +taskwing execute --all +``` + +One person, one command. TaskWing breaks the feature into the right build order, gives each task the full architecture context, and the AI CLI implements it end-to-end. You review the code, not write it from scratch. diff --git a/internal/agents/impl/analysis_code.go b/internal/agents/impl/analysis_code.go deleted file mode 100644 index 93dabe9..0000000 --- a/internal/agents/impl/analysis_code.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Package analysis provides the ReAct code agent for dynamic codebase exploration. -*/ -package impl - -import ( - "context" - "fmt" - "time" - - "github.com/cloudwego/eino/callbacks" - "github.com/cloudwego/eino/components/model" - "github.com/cloudwego/eino/components/tool" - "github.com/cloudwego/eino/compose" - "github.com/cloudwego/eino/flow/agent/react" - "github.com/cloudwego/eino/schema" - "github.com/josephgoksu/TaskWing/internal/agents/core" - agenttools "github.com/josephgoksu/TaskWing/internal/agents/tools" - "github.com/josephgoksu/TaskWing/internal/config" - "github.com/josephgoksu/TaskWing/internal/llm" -) - -// ReactAgent uses Eino's built-in ReAct agent for dynamic codebase exploration. -type ReactAgent struct { - core.BaseAgent - basePath string - maxSteps int - verbose bool -} - -// NewReactAgent creates a new ReAct-powered code analysis agent. -func NewReactAgent(cfg llm.Config, basePath string) *ReactAgent { - return &ReactAgent{ - BaseAgent: core.NewBaseAgent("react", "Dynamically explores codebase using tools to identify architectural patterns", cfg), - basePath: basePath, - maxSteps: 20, - verbose: false, - } -} - -// SetVerbose enables detailed logging of agent actions. -func (a *ReactAgent) SetVerbose(v bool) { a.verbose = v } - -// SetMaxIterations sets the maximum number of ReAct steps. -func (a *ReactAgent) SetMaxIterations(n int) { - if n > 0 && n <= 80 { - a.maxSteps = n - } -} - -// Run executes the agent using Eino's built-in react.Agent. -func (a *ReactAgent) Run(ctx context.Context, input core.Input) (core.Output, error) { - var output core.Output - output.AgentName = a.Name() - start := time.Now() - - closeableChatModel, err := llm.NewCloseableChatModel(ctx, a.LLMConfig()) - if err != nil { - return output, fmt.Errorf("create chat model: %w", err) - } - defer func() { _ = closeableChatModel.Close() }() - - baseChatModel := closeableChatModel.BaseChatModel - toolCallingModel, ok := baseChatModel.(model.ToolCallingChatModel) - if !ok { - return output, fmt.Errorf("model %q does not support tool calling, which is required for code analysis", a.LLMConfig().Model) - } - - basePath := input.BasePath - if basePath == "" { - basePath = a.basePath - } - - einoTools := agenttools.CreateEinoTools(basePath) - baseTools := make([]tool.BaseTool, len(einoTools)) - for i, t := range einoTools { - baseTools[i] = t - } - - agent, err := react.NewAgent(ctx, &react.AgentConfig{ - ToolCallingModel: toolCallingModel, - ToolsConfig: compose.ToolsNodeConfig{Tools: baseTools}, - MaxStep: a.maxSteps, - MessageModifier: func(ctx context.Context, msgs []*schema.Message) []*schema.Message { - return append([]*schema.Message{schema.SystemMessage(config.SystemPromptReactAgent)}, msgs...) - }, - }) - if err != nil { - return output, fmt.Errorf("create ReAct agent: %w", err) - } - - if a.verbose { - handler := callbacks.NewHandlerBuilder().Build() - runInfo := &callbacks.RunInfo{Name: a.Name(), Type: "ReActAgent"} - ctx = callbacks.InitCallbacks(ctx, runInfo, handler) - } - - userMsg := []*schema.Message{ - schema.UserMessage(fmt.Sprintf( - "Analyze the architectural patterns and key decisions in project: %s\n\nStart by exploring the directory structure.", - input.ProjectName, - )), - } - - resp, err := agent.Generate(ctx, userMsg) - if err != nil { - return output, fmt.Errorf("agent generate failed: %w", err) - } - - output.RawOutput = resp.Content - output.Duration = time.Since(start) - - if output.RawOutput != "" { - findings, err := a.parseFindings(output.RawOutput) - if err != nil && a.verbose { - fmt.Printf(" [ReAct] Parse warning: %v\n", err) - } - output.Findings = findings - } - - return output, nil -} - -type reactParseResult struct { - Decisions []struct { - Title string `json:"title"` - Component string `json:"component"` - What string `json:"what"` - Why string `json:"why"` - Tradeoffs string `json:"tradeoffs"` - Confidence any `json:"confidence"` - Evidence []core.EvidenceJSON `json:"evidence"` - DebtScore any `json:"debt_score"` // Debt classification - DebtReason string `json:"debt_reason"` // Why this is considered debt - RefactorHint string `json:"refactor_hint"` // How to eliminate the debt - } `json:"decisions"` - Patterns []struct { - Name string `json:"name"` - Context string `json:"context"` - Solution string `json:"solution"` - Consequences string `json:"consequences"` - Confidence any `json:"confidence"` - Evidence []core.EvidenceJSON `json:"evidence"` - DebtScore any `json:"debt_score"` // Debt classification - DebtReason string `json:"debt_reason"` // Why this is considered debt - RefactorHint string `json:"refactor_hint"` // How to eliminate the debt - } `json:"patterns"` -} - -func (a *ReactAgent) parseFindings(response string) ([]core.Finding, error) { - parsed, err := core.ParseJSONResponse[reactParseResult](response) - if err != nil { - return nil, err - } - - var findings []core.Finding - for _, d := range parsed.Decisions { - findings = append(findings, core.NewFindingWithDebt( - core.FindingTypeDecision, - d.Title, d.What, d.Why, d.Tradeoffs, - d.Confidence, d.Evidence, a.Name(), - map[string]any{"component": d.Component}, - core.DebtInfo{DebtScore: d.DebtScore, DebtReason: d.DebtReason, RefactorHint: d.RefactorHint}, - )) - } - - for _, p := range parsed.Patterns { - findings = append(findings, core.NewFindingWithDebt( - core.FindingTypePattern, - p.Name, p.Context, "", p.Consequences, - p.Confidence, p.Evidence, a.Name(), - map[string]any{"context": p.Context, "solution": p.Solution, "consequences": p.Consequences}, - core.DebtInfo{DebtScore: p.DebtScore, DebtReason: p.DebtReason, RefactorHint: p.RefactorHint}, - )) - } - - return findings, nil -} - -func init() { - core.RegisterAgent("react", func(cfg llm.Config, basePath string) core.Agent { - return NewReactAgent(cfg, basePath) - }, "ReAct Explorer", "Dynamically explores codebase using tools to identify architectural patterns") -} diff --git a/internal/agents/impl/utility_agents.go b/internal/agents/impl/utility_agents.go deleted file mode 100644 index fde416e..0000000 --- a/internal/agents/impl/utility_agents.go +++ /dev/null @@ -1,377 +0,0 @@ -/* -Package impl provides utility agents for code simplification, explanation, and debugging. -*/ -package impl - -import ( - "context" - "fmt" - "io" - - "github.com/josephgoksu/TaskWing/internal/agents/core" - "github.com/josephgoksu/TaskWing/internal/config" - "github.com/josephgoksu/TaskWing/internal/llm" -) - -// ============================================================================= -// SimplifyAgent -// ============================================================================= - -// SimplifyAgent reduces code complexity and line count. -// Call Close() when done to release resources. -type SimplifyAgent struct { - core.BaseAgent - chain *core.DeterministicChain[SimplifyOutput] - modelCloser io.Closer -} - -// SimplifyChange describes a single simplification made. -type SimplifyChange struct { - What string `json:"what"` - Why string `json:"why"` - Risk string `json:"risk"` -} - -// SimplifyOutput defines the structured response from the LLM. -type SimplifyOutput struct { - SimplifiedCode string `json:"simplified_code"` - OriginalLines int `json:"original_lines"` - SimplifiedLines int `json:"simplified_lines"` - ReductionPercentage int `json:"reduction_percentage"` - Changes []SimplifyChange `json:"changes"` - RiskAssessment string `json:"risk_assessment"` -} - -// NewSimplifyAgent creates a new agent for code simplification. -func NewSimplifyAgent(cfg llm.Config) *SimplifyAgent { - return &SimplifyAgent{ - BaseAgent: core.NewBaseAgent("simplify", "Reduces code complexity and line count", cfg), - } -} - -// Close releases LLM resources. Safe to call multiple times. -func (a *SimplifyAgent) Close() error { - if a.modelCloser != nil { - return a.modelCloser.Close() - } - return nil -} - -// Run executes the simplification using Eino Chain. -func (a *SimplifyAgent) Run(ctx context.Context, input core.Input) (core.Output, error) { - if a.chain == nil { - chatModel, err := a.CreateCloseableChatModel(ctx) - if err != nil { - return core.Output{}, err - } - a.modelCloser = chatModel - chain, err := core.NewDeterministicChain[SimplifyOutput]( - ctx, - a.Name(), - chatModel.BaseChatModel, - config.SystemPromptSimplifyAgent, - ) - if err != nil { - return core.Output{}, fmt.Errorf("create chain: %w", err) - } - a.chain = chain - } - - code, ok := input.ExistingContext["code"].(string) - if !ok || code == "" { - return core.Output{}, fmt.Errorf("missing 'code' in input context") - } - - filePath, _ := input.ExistingContext["file_path"].(string) - kgContext, _ := input.ExistingContext["context"].(string) - - chainInput := map[string]any{ - "Code": code, - "FilePath": filePath, - "Context": kgContext, - } - - parsed, raw, duration, err := a.chain.Invoke(ctx, chainInput) - if err != nil { - return core.Output{ - AgentName: a.Name(), - Error: fmt.Errorf("chain invoke: %w", err), - Duration: duration, - RawOutput: raw, - }, nil - } - - return core.BuildOutput( - a.Name(), - []core.Finding{{ - Type: "simplification", - Title: "Code Simplification", - Description: fmt.Sprintf("Reduced from %d to %d lines (%d%% reduction)", parsed.OriginalLines, parsed.SimplifiedLines, parsed.ReductionPercentage), - Metadata: map[string]any{ - "simplified_code": parsed.SimplifiedCode, - "original_lines": parsed.OriginalLines, - "simplified_lines": parsed.SimplifiedLines, - "reduction_percentage": parsed.ReductionPercentage, - "changes": parsed.Changes, - "risk_assessment": parsed.RiskAssessment, - }, - }}, - "JSON handled by Eino", - duration, - ), nil -} - -// ============================================================================= -// ExplainAgent -// ============================================================================= - -// ExplainAgent provides deep-dive explanations of code and concepts. -// Call Close() when done to release resources. -type ExplainAgent struct { - core.BaseAgent - chain *core.DeterministicChain[ExplainOutput] - modelCloser io.Closer -} - -// ExplainConnection describes a relationship to another component. -type ExplainConnection struct { - Target string `json:"target"` - Relationship string `json:"relationship"` - Description string `json:"description"` -} - -// ExplainExample provides a usage example. -type ExplainExample struct { - Description string `json:"description"` - Code string `json:"code"` -} - -// ExplainOutput defines the structured response from the LLM. -type ExplainOutput struct { - Summary string `json:"summary"` - Explanation string `json:"explanation"` - Connections []ExplainConnection `json:"connections"` - Pitfalls []string `json:"pitfalls"` - Examples []ExplainExample `json:"examples"` -} - -// NewExplainAgent creates a new agent for code explanation. -func NewExplainAgent(cfg llm.Config) *ExplainAgent { - return &ExplainAgent{ - BaseAgent: core.NewBaseAgent("explain", "Provides deep-dive explanations of code and concepts", cfg), - } -} - -// Close releases LLM resources. Safe to call multiple times. -func (a *ExplainAgent) Close() error { - if a.modelCloser != nil { - return a.modelCloser.Close() - } - return nil -} - -// Run executes the explanation using Eino Chain. -func (a *ExplainAgent) Run(ctx context.Context, input core.Input) (core.Output, error) { - if a.chain == nil { - chatModel, err := a.CreateCloseableChatModel(ctx) - if err != nil { - return core.Output{}, err - } - a.modelCloser = chatModel - chain, err := core.NewDeterministicChain[ExplainOutput]( - ctx, - a.Name(), - chatModel.BaseChatModel, - config.SystemPromptExplainAgent, - ) - if err != nil { - return core.Output{}, fmt.Errorf("create chain: %w", err) - } - a.chain = chain - } - - query, ok := input.ExistingContext["query"].(string) - if !ok || query == "" { - return core.Output{}, fmt.Errorf("missing 'query' in input context") - } - - symbol, _ := input.ExistingContext["symbol"].(string) - code, _ := input.ExistingContext["code"].(string) - kgContext, _ := input.ExistingContext["context"].(string) - - chainInput := map[string]any{ - "Query": query, - "Symbol": symbol, - "Code": code, - "Context": kgContext, - } - - parsed, raw, duration, err := a.chain.Invoke(ctx, chainInput) - if err != nil { - return core.Output{ - AgentName: a.Name(), - Error: fmt.Errorf("chain invoke: %w", err), - Duration: duration, - RawOutput: raw, - }, nil - } - - return core.BuildOutput( - a.Name(), - []core.Finding{{ - Type: "explanation", - Title: parsed.Summary, - Description: parsed.Explanation, - Metadata: map[string]any{ - "connections": parsed.Connections, - "pitfalls": parsed.Pitfalls, - "examples": parsed.Examples, - }, - }}, - "JSON handled by Eino", - duration, - ), nil -} - -// ============================================================================= -// DebugAgent -// ============================================================================= - -// DebugAgent helps developers diagnose issues systematically. -// Call Close() when done to release resources. -type DebugAgent struct { - core.BaseAgent - chain *core.DeterministicChain[DebugOutput] - modelCloser io.Closer -} - -// DebugHypothesis represents a possible cause of the issue. -type DebugHypothesis struct { - Cause string `json:"cause"` - Likelihood string `json:"likelihood"` - Reasoning string `json:"reasoning"` - CodeLocations []string `json:"code_locations"` -} - -// DebugInvestigationStep is a step to investigate the issue. -type DebugInvestigationStep struct { - Step int `json:"step"` - Action string `json:"action"` - Command string `json:"command"` - ExpectedFinding string `json:"expected_finding"` -} - -// DebugQuickFix is a quick fix suggestion. -type DebugQuickFix struct { - Fix string `json:"fix"` - When string `json:"when"` -} - -// DebugOutput defines the structured response from the LLM. -type DebugOutput struct { - Hypotheses []DebugHypothesis `json:"hypotheses"` - InvestigationSteps []DebugInvestigationStep `json:"investigation_steps"` - QuickFixes []DebugQuickFix `json:"quick_fixes"` -} - -// NewDebugAgent creates a new agent for issue debugging. -func NewDebugAgent(cfg llm.Config) *DebugAgent { - return &DebugAgent{ - BaseAgent: core.NewBaseAgent("debug", "Helps diagnose issues systematically", cfg), - } -} - -// Close releases LLM resources. Safe to call multiple times. -func (a *DebugAgent) Close() error { - if a.modelCloser != nil { - return a.modelCloser.Close() - } - return nil -} - -// Run executes the debugging analysis using Eino Chain. -func (a *DebugAgent) Run(ctx context.Context, input core.Input) (core.Output, error) { - if a.chain == nil { - chatModel, err := a.CreateCloseableChatModel(ctx) - if err != nil { - return core.Output{}, err - } - a.modelCloser = chatModel - chain, err := core.NewDeterministicChain[DebugOutput]( - ctx, - a.Name(), - chatModel.BaseChatModel, - config.SystemPromptDebugAgent, - ) - if err != nil { - return core.Output{}, fmt.Errorf("create chain: %w", err) - } - a.chain = chain - } - - problem, ok := input.ExistingContext["problem"].(string) - if !ok || problem == "" { - return core.Output{}, fmt.Errorf("missing 'problem' in input context") - } - - errorMsg, _ := input.ExistingContext["error"].(string) - stackTrace, _ := input.ExistingContext["stack_trace"].(string) - kgContext, _ := input.ExistingContext["context"].(string) - - chainInput := map[string]any{ - "Problem": problem, - "Error": errorMsg, - "StackTrace": stackTrace, - "Context": kgContext, - } - - parsed, raw, duration, err := a.chain.Invoke(ctx, chainInput) - if err != nil { - return core.Output{ - AgentName: a.Name(), - Error: fmt.Errorf("chain invoke: %w", err), - Duration: duration, - RawOutput: raw, - }, nil - } - - // Build summary from hypotheses - summary := "Unknown issue" - if len(parsed.Hypotheses) > 0 { - summary = parsed.Hypotheses[0].Cause - } - - return core.BuildOutput( - a.Name(), - []core.Finding{{ - Type: "debug", - Title: "Debug Analysis", - Description: summary, - Metadata: map[string]any{ - "hypotheses": parsed.Hypotheses, - "investigation_steps": parsed.InvestigationSteps, - "quick_fixes": parsed.QuickFixes, - }, - }}, - "JSON handled by Eino", - duration, - ), nil -} - -// ============================================================================= -// Agent Registration -// ============================================================================= - -func init() { - core.RegisterAgent("simplify", func(cfg llm.Config, basePath string) core.Agent { - return NewSimplifyAgent(cfg) - }, "Code Simplification", "Reduces code complexity and line count") - - core.RegisterAgent("explain", func(cfg llm.Config, basePath string) core.Agent { - return NewExplainAgent(cfg) - }, "Code Explanation", "Provides deep-dive explanations of code and concepts") - - core.RegisterAgent("debug", func(cfg llm.Config, basePath string) core.Agent { - return NewDebugAgent(cfg) - }, "Debug Helper", "Helps diagnose issues systematically") -} diff --git a/internal/agents/impl/watch_activity.go b/internal/agents/impl/watch_activity.go deleted file mode 100644 index abaf2f1..0000000 --- a/internal/agents/impl/watch_activity.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright © 2025 Joseph Goksu josephgoksu@gmail.com - -Package watch provides activity logging for watch mode. -*/ -package impl - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "sync" - "time" -) - -// ActivityLog records watch mode activity -type ActivityLog struct { - logPath string - entries []ActivityEntry - mu sync.RWMutex - maxSize int -} - -// ActivityEntry represents a single activity log entry -type ActivityEntry struct { - ID int64 `json:"id"` - Timestamp time.Time `json:"timestamp"` - Type string `json:"type"` // file_change, agent_run, finding, error - Category string `json:"category,omitempty"` - Path string `json:"path,omitempty"` - Agent string `json:"agent,omitempty"` - Message string `json:"message"` - Details any `json:"details,omitempty"` -} - -// NewActivityLog creates a new activity logger -func NewActivityLog(basePath string) *ActivityLog { - logPath := filepath.Join(basePath, ".taskwing", "activity.json") - - log := &ActivityLog{ - logPath: logPath, - entries: make([]ActivityEntry, 0), - maxSize: 500, // Keep last 500 entries - } - - // Try to load existing log - log.load() - - return log -} - -// LogFileChange records a file change event -func (l *ActivityLog) LogFileChange(path string, operation string, category FileCategory) { - l.addEntry(ActivityEntry{ - Type: "file_change", - Category: string(category), - Path: path, - Message: fmt.Sprintf("%s: %s", operation, path), - }) -} - -// LogAgentRun records an agent execution -func (l *ActivityLog) LogAgentRun(agent string, findingCount int, duration time.Duration, err error) { - msg := fmt.Sprintf("%s completed: %d findings in %.1fs", agent, findingCount, duration.Seconds()) - details := map[string]any{ - "findings": findingCount, - "duration_ms": duration.Milliseconds(), - } - - entryType := "agent_run" - if err != nil { - entryType = "error" - msg = fmt.Sprintf("%s error: %v", agent, err) - details["error"] = err.Error() - } - - l.addEntry(ActivityEntry{ - Type: entryType, - Agent: agent, - Message: msg, - Details: details, - }) -} - -// LogFinding records a new finding -func (l *ActivityLog) LogFinding(agent string, title string, findingType string) { - l.addEntry(ActivityEntry{ - Type: "finding", - Agent: agent, - Message: title, - Details: map[string]string{"finding_type": findingType}, - }) -} - -// addEntry adds an entry to the log -func (l *ActivityLog) addEntry(entry ActivityEntry) { - l.mu.Lock() - - entry.ID = time.Now().UnixNano() - entry.Timestamp = time.Now() - - l.entries = append(l.entries, entry) - - // Trim to max size - if len(l.entries) > l.maxSize { - l.entries = l.entries[len(l.entries)-l.maxSize:] - } - - // Copy data for async save to avoid holding lock - entriesCopy := make([]ActivityEntry, len(l.entries)) - copy(entriesCopy, l.entries) - l.mu.Unlock() - - // Async save with copied data - go l.saveEntries(entriesCopy) -} - -// GetRecent returns the most recent N entries -func (l *ActivityLog) GetRecent(count int) []ActivityEntry { - l.mu.RLock() - defer l.mu.RUnlock() - - if count > len(l.entries) { - count = len(l.entries) - } - - start := len(l.entries) - count - result := make([]ActivityEntry, count) - copy(result, l.entries[start:]) - - // Return in reverse order (newest first) - for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 { - result[i], result[j] = result[j], result[i] - } - - return result -} - -// save writes the log to disk (reads from entries with lock) -func (l *ActivityLog) save() { - l.mu.RLock() - entriesCopy := make([]ActivityEntry, len(l.entries)) - copy(entriesCopy, l.entries) - l.mu.RUnlock() - - l.saveEntries(entriesCopy) -} - -// saveEntries writes pre-copied entries to disk (no lock needed) -func (l *ActivityLog) saveEntries(entries []ActivityEntry) { - data, err := json.MarshalIndent(entries, "", " ") - if err != nil { - return - } - - // Ensure directory exists - dir := filepath.Dir(l.logPath) - _ = os.MkdirAll(dir, 0755) - - _ = os.WriteFile(l.logPath, data, 0644) -} - -// load reads the log from disk -func (l *ActivityLog) load() { - data, err := os.ReadFile(l.logPath) - if err != nil { - return - } - - var entries []ActivityEntry - if err := json.Unmarshal(data, &entries); err != nil { - return - } - - l.mu.Lock() - l.entries = entries - l.mu.Unlock() -} - -// Clear clears the activity log -func (l *ActivityLog) Clear() { - l.mu.Lock() - l.entries = make([]ActivityEntry, 0) - l.mu.Unlock() - l.save() -} - -// Summary returns a summary of activity -func (l *ActivityLog) Summary() ActivitySummary { - l.mu.RLock() - defer l.mu.RUnlock() - - summary := ActivitySummary{ - TotalEntries: len(l.entries), - } - - for _, e := range l.entries { - switch e.Type { - case "file_change": - summary.FileChanges++ - case "agent_run": - summary.AgentRuns++ - case "finding": - summary.Findings++ - case "error": - summary.Errors++ - } - } - - if len(l.entries) > 0 { - summary.OldestEntry = l.entries[0].Timestamp - summary.NewestEntry = l.entries[len(l.entries)-1].Timestamp - } - - return summary -} - -// ActivitySummary provides a summary of activity -type ActivitySummary struct { - TotalEntries int `json:"total_entries"` - FileChanges int `json:"file_changes"` - AgentRuns int `json:"agent_runs"` - Findings int `json:"findings"` - Errors int `json:"errors"` - OldestEntry time.Time `json:"oldest_entry,omitempty"` - NewestEntry time.Time `json:"newest_entry,omitempty"` -} diff --git a/internal/agents/impl/watch_agent.go b/internal/agents/impl/watch_agent.go deleted file mode 100644 index cfcf5b1..0000000 --- a/internal/agents/impl/watch_agent.go +++ /dev/null @@ -1,572 +0,0 @@ -/* -Copyright © 2025 Joseph Goksu josephgoksu@gmail.com - -Package watch provides the WatchAgent for continuous codebase monitoring. -It watches for file changes and triggers appropriate agents for incremental analysis. -*/ -package impl - -import ( - "context" - "crypto/md5" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - "github.com/josephgoksu/TaskWing/internal/agents/core" - "github.com/josephgoksu/TaskWing/internal/knowledge" - "github.com/josephgoksu/TaskWing/internal/llm" - "github.com/josephgoksu/TaskWing/internal/patterns" -) - -// FileCategory represents the type of file for routing purposes -type FileCategory string - -const ( - FileCategoryDocs FileCategory = "docs" // *.md, docs/* - FileCategoryCode FileCategory = "code" // *.go, *.ts, *.js, etc. - FileCategoryDeps FileCategory = "deps" // go.mod, package.json - FileCategoryConfig FileCategory = "config" // *.yaml, *.json configs - FileCategoryGit FileCategory = "git" // .git/HEAD changes - FileCategoryIgnore FileCategory = "ignore" // Files to skip -) - -// FileChangeEvent represents a batched file change -type FileChangeEvent struct { - Path string - Operation string - Category FileCategory - Timestamp time.Time -} - -// WatchAgent monitors the filesystem and triggers agents on changes -type WatchAgent struct { - basePath string - llmConfig llm.Config - watcher *fsnotify.Watcher - debouncer *ChangeDebouncer - dispatcher *AgentDispatcher - stream *core.StreamingOutput - activityLog *ActivityLog - hashTracker *ContentHashTracker - verbose bool - ks *knowledge.Service - - // Control - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup -} - -// WatchConfig holds configuration for the watch agent -type WatchConfig struct { - BasePath string - LLMConfig llm.Config - Verbose bool - IncludeGlobs []string // Only watch paths matching these globs - ExcludeGlobs []string // Skip paths matching these globs - Stream *core.StreamingOutput - Service *knowledge.Service -} - -// NewWatchAgent creates a new file watching agent -func NewWatchAgent(cfg WatchConfig) (*WatchAgent, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, fmt.Errorf("create fsnotify watcher: %w", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - - w := &WatchAgent{ - basePath: cfg.BasePath, - llmConfig: cfg.LLMConfig, - watcher: watcher, - verbose: cfg.Verbose, - stream: cfg.Stream, - ks: cfg.Service, - ctx: ctx, - cancel: cancel, - } - - // Initialize debouncer - w.debouncer = NewChangeDebouncer(w.handleBatch) - - // Initialize dispatcher with activity log - w.activityLog = NewActivityLog(cfg.BasePath) - w.dispatcher = NewAgentDispatcherWithLog(cfg.LLMConfig, cfg.BasePath, w.activityLog, cfg.Service) - - // Initialize content hash tracker for deduplication - w.hashTracker = NewContentHashTracker() - - return w, nil -} - -// Start begins watching for file changes -func (w *WatchAgent) Start() error { - // Add base path recursively - if err := w.addWatchRecursive(w.basePath); err != nil { - return fmt.Errorf("add watch paths: %w", err) - } - - if w.verbose { - fmt.Printf("📁 Watching: %s\n", w.basePath) - } - - // Start event loop - w.wg.Add(1) - go w.eventLoop() - - return nil -} - -// Stop stops the watch agent -func (w *WatchAgent) Stop() { - w.cancel() - _ = w.watcher.Close() - w.debouncer.Stop() - w.wg.Wait() -} - -// SetFindingsHandler sets the callback for handling agent findings. -// This MUST be set for proper deduplication via knowledge.Service.IngestFindings. -func (w *WatchAgent) SetFindingsHandler(handler FindingsHandler) { - w.dispatcher.SetFindingsHandler(handler) -} - -// eventLoop processes filesystem events -func (w *WatchAgent) eventLoop() { - defer w.wg.Done() - - for { - select { - case event, ok := <-w.watcher.Events: - if !ok { - return - } - w.handleEvent(event) - - case err, ok := <-w.watcher.Errors: - if !ok { - return - } - if w.verbose { - fmt.Printf("⚠️ Watch error: %v\n", err) - } - - case <-w.ctx.Done(): - return - } - } -} - -// handleEvent processes a single filesystem event -func (w *WatchAgent) handleEvent(event fsnotify.Event) { - // Get relative path - relPath, err := filepath.Rel(w.basePath, event.Name) - if err != nil { - return - } - - // Categorize the file - category := w.categorize(relPath) - if category == FileCategoryIgnore { - return - } - - // Determine operation - op := "modify" - switch { - case event.Op&fsnotify.Create != 0: - op = "create" - // If a new directory is created, watch it - if info, err := os.Stat(event.Name); err == nil && info.IsDir() { - _ = w.watcher.Add(event.Name) - } - case event.Op&fsnotify.Remove != 0: - op = "delete" - // Clear hash on delete - w.hashTracker.Remove(event.Name) - case event.Op&fsnotify.Rename != 0: - op = "rename" - w.hashTracker.Remove(event.Name) - } - - // For modify operations, check if content actually changed - if op == "modify" { - if !w.hashTracker.HasChanged(event.Name) { - if w.verbose { - fmt.Printf("⏭️ skip (no change): %s\n", relPath) - } - return - } - } - - // Queue the event for debouncing - change := FileChangeEvent{ - Path: relPath, - Operation: op, - Category: category, - Timestamp: time.Now(), - } - - w.debouncer.Add(change) - - // Log the file change - if w.activityLog != nil { - w.activityLog.LogFileChange(relPath, op, category) - } - - if w.verbose { - fmt.Printf("📝 %s: %s (%s)\n", op, relPath, category) - } - - if w.stream != nil { - w.stream.Emit(core.EventAgentStart, "watch", fmt.Sprintf("%s: %s", op, relPath), map[string]any{ - "category": string(category), - }) - } -} - -// categorize determines the FileCategory for a path -func (w *WatchAgent) categorize(relPath string) FileCategory { - name := filepath.Base(relPath) - ext := strings.ToLower(filepath.Ext(name)) - dir := filepath.Dir(relPath) - - // Ignore hidden files (except .env.example) - if strings.HasPrefix(name, ".") && name != ".env.example" { - return FileCategoryIgnore - } - - // Check ignored directories using centralized patterns - for ig := range patterns.IgnoredDirs { - if strings.Contains(relPath, ig+string(os.PathSeparator)) || name == ig { - return FileCategoryIgnore - } - } - - // Dependency files (high priority) - if patterns.IsDependencyFile(name) { - return FileCategoryDeps - } - - // Documentation - if ext == ".md" || strings.HasPrefix(dir, "docs") { - return FileCategoryDocs - } - - // Config files - if patterns.IsConfigFile(name, ext) { - return FileCategoryConfig - } - - // Code files - if patterns.IsCodeFile(ext) { - return FileCategoryCode - } - - return FileCategoryIgnore -} - -// handleBatch processes a batch of debounced changes -func (w *WatchAgent) handleBatch(changes []FileChangeEvent) { - if len(changes) == 0 { - return - } - - // Group by category - byCategory := make(map[FileCategory][]FileChangeEvent) - for _, c := range changes { - byCategory[c.Category] = append(byCategory[c.Category], c) - } - - if w.verbose { - fmt.Printf("🔄 Processing batch: %d changes\n", len(changes)) - } - - // Dispatch to appropriate agents - for category, categoryChanges := range byCategory { - w.dispatcher.Dispatch(w.ctx, category, categoryChanges) - } -} - -// addWatchRecursive adds the directory and all subdirectories to the watcher -func (w *WatchAgent) addWatchRecursive(dir string) error { - return filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { - if err != nil { - return nil - } - - if !d.IsDir() { - return nil - } - - name := d.Name() - - // Skip ignored directories (build caches, dependencies, generated files) - ignoreDirs := []string{ - "node_modules", "vendor", ".git", "dist", "build", "__pycache__", ".next", - "test-results", "tmp", "go-build", ".taskwing", "coverage", ".cache", - "target", "bin", "obj", ".pytest_cache", ".mypy_cache", ".tox", - } - for _, ig := range ignoreDirs { - if name == ig { - return filepath.SkipDir - } - } - - if strings.HasPrefix(name, ".") && name != ".github" { - return filepath.SkipDir - } - - return w.watcher.Add(path) - }) -} - -// ChangeDebouncer batches rapid file changes -type ChangeDebouncer struct { - pending []FileChangeEvent - timer *time.Timer - mu sync.Mutex - onFlush func([]FileChangeEvent) - delay time.Duration - stopped bool -} - -// NewChangeDebouncer creates a new debouncer with the given flush callback -func NewChangeDebouncer(onFlush func([]FileChangeEvent)) *ChangeDebouncer { - return &ChangeDebouncer{ - pending: make([]FileChangeEvent, 0), - onFlush: onFlush, - delay: 500 * time.Millisecond, - } -} - -// Add queues a change event -func (d *ChangeDebouncer) Add(change FileChangeEvent) { - d.mu.Lock() - defer d.mu.Unlock() - - if d.stopped { - return - } - - d.pending = append(d.pending, change) - - // Reset timer - if d.timer != nil { - d.timer.Stop() - } - - // Determine delay based on category - delay := d.delay - switch change.Category { - case FileCategoryDeps: - delay = 2 * time.Second // Larger delay for deps - case FileCategoryDocs: - delay = 1 * time.Second - } - - d.timer = time.AfterFunc(delay, d.flush) -} - -// flush sends pending events to the handler -func (d *ChangeDebouncer) flush() { - d.mu.Lock() - events := d.pending - d.pending = make([]FileChangeEvent, 0) - d.mu.Unlock() - - if len(events) > 0 && d.onFlush != nil { - d.onFlush(events) - } -} - -// Stop stops the debouncer -func (d *ChangeDebouncer) Stop() { - d.mu.Lock() - defer d.mu.Unlock() - - d.stopped = true - if d.timer != nil { - d.timer.Stop() - } -} - -// FindingsHandler is called when an agent produces findings. -// This allows the caller to handle persistence (e.g., via knowledge.Service.IngestFindings). -type FindingsHandler func(ctx context.Context, findings []core.Finding, filePaths []string) error - -// AgentDispatcher routes file changes to appropriate agents -type AgentDispatcher struct { - llmConfig llm.Config - basePath string - activityLog *ActivityLog - findingsHandler FindingsHandler - ks *knowledge.Service - mu sync.Mutex -} - -// NewAgentDispatcherWithLog creates a dispatcher with activity logging -func NewAgentDispatcherWithLog(cfg llm.Config, basePath string, log *ActivityLog, ks *knowledge.Service) *AgentDispatcher { - return &AgentDispatcher{ - llmConfig: cfg, - basePath: basePath, - activityLog: log, - ks: ks, - } -} - -// SetFindingsHandler sets the callback for handling agent findings. -// This MUST be set for proper deduplication - without it, findings are logged but not persisted. -func (d *AgentDispatcher) SetFindingsHandler(handler FindingsHandler) { - d.mu.Lock() - defer d.mu.Unlock() - d.findingsHandler = handler -} - -// Dispatch routes changes to the appropriate agent -func (d *AgentDispatcher) Dispatch(ctx context.Context, category FileCategory, changes []FileChangeEvent) { - d.mu.Lock() - handler := d.findingsHandler - d.mu.Unlock() - - // Determine which agent to use - var agent core.Agent - switch category { - case FileCategoryCode: - // Use deterministic CodeAgent for code analysis - agent = NewCodeAgent(d.llmConfig, d.basePath) - case FileCategoryDocs: - agent = NewDocAgent(d.llmConfig) - case FileCategoryDeps: - agent = NewDepsAgent(d.llmConfig) - default: - return - } - - // Build input with changed files context - changedPaths := make([]string, len(changes)) - for i, c := range changes { - changedPaths[i] = c.Path - } - - input := core.Input{ - BasePath: d.basePath, - ProjectName: filepath.Base(d.basePath), - Mode: core.ModeWatch, - ChangedFiles: changedPaths, - ExistingContext: make(map[string]any), - } - - // Fetch existing nodes for context (Phase 3: No knowledge comparison fix) - if d.ks != nil { - agentName := agent.Name() - if existingNodes, err := d.ks.GetNodesByFiles(agentName, changedPaths); err == nil && len(existingNodes) > 0 { - input.ExistingContext["existing_nodes"] = existingNodes - - } - } - - // Run agent in background - actLog := d.activityLog - go func() { - // Close agent when goroutine exits to release LLM resources - if closeable, ok := agent.(core.CloseableAgent); ok { - defer func() { _ = closeable.Close() }() - } - - fmt.Printf(" 🤖 Running %s agent for %d changed files...\n", agent.Name(), len(changes)) - output, err := agent.Run(ctx, input) - if err != nil { - fmt.Printf(" ⚠️ %s agent error: %v\n", agent.Name(), err) - if actLog != nil { - actLog.LogAgentRun(agent.Name(), 0, 0, err) - } - return - } - fmt.Printf(" ✓ %s agent found %d findings (%.1fs)\n", agent.Name(), len(output.Findings), output.Duration.Seconds()) - - // Log successful run and findings - if actLog != nil { - actLog.LogAgentRun(agent.Name(), len(output.Findings), output.Duration, nil) - for _, f := range output.Findings { - actLog.LogFinding(agent.Name(), f.Title, string(f.Type)) - } - } - - // Persist findings via handler (uses knowledge.Service.IngestFindings for proper deduplication) - if handler != nil && len(output.Findings) > 0 { - if err := handler(ctx, output.Findings, changedPaths); err != nil { - fmt.Printf(" ⚠️ persist findings error: %v\n", err) - } - } else if handler == nil && len(output.Findings) > 0 { - fmt.Printf(" ⚠️ no findings handler configured - findings not persisted\n") - } - }() -} - -// ContentHashTracker tracks file content hashes to detect actual changes -type ContentHashTracker struct { - hashes map[string]string - mu sync.RWMutex -} - -// NewContentHashTracker creates a new content hash tracker -func NewContentHashTracker() *ContentHashTracker { - return &ContentHashTracker{ - hashes: make(map[string]string), - } -} - -// HasChanged checks if a file's content has changed since last check -// Returns true if file is new or content changed, false if unchanged -func (t *ContentHashTracker) HasChanged(path string) bool { - hash, err := t.computeHash(path) - if err != nil { - // If we can't read the file, assume it changed - return true - } - - t.mu.Lock() - defer t.mu.Unlock() - - oldHash, exists := t.hashes[path] - t.hashes[path] = hash - - if !exists { - // First time seeing this file - return true - } - - return hash != oldHash -} - -// Remove removes a file from the tracker -func (t *ContentHashTracker) Remove(path string) { - t.mu.Lock() - defer t.mu.Unlock() - delete(t.hashes, path) -} - -// computeHash calculates MD5 hash of file content -func (t *ContentHashTracker) computeHash(path string) (string, error) { - f, err := os.Open(path) - if err != nil { - return "", err - } - defer func() { _ = f.Close() }() - - h := md5.New() - if _, err := io.Copy(h, f); err != nil { - return "", err - } - - return fmt.Sprintf("%x", h.Sum(nil)), nil -} diff --git a/internal/app/ask.go b/internal/app/ask.go index 8483ce5..d1eea70 100644 --- a/internal/app/ask.go +++ b/internal/app/ask.go @@ -152,9 +152,6 @@ func (a *AskApp) Query(ctx context.Context, query string, opts AskOptions) (*Ask retrievalCfg.VectorWeight = 0 retrievalCfg.FTSWeight = 1.0 } - if opts.DisableRerank { - retrievalCfg.RerankingEnabled = false - } embeddingConfigWarning := "" if !opts.DisableVector && retrievalCfg.VectorWeight > 0 { embeddingProvider := a.ctx.LLMCfg.EmbeddingProvider @@ -167,8 +164,7 @@ func (a *AskApp) Query(ctx context.Context, query string, opts AskOptions) (*Ask } supportsEmbeddings := embeddingProvider == llm.ProviderOpenAI || embeddingProvider == llm.ProviderOllama || - embeddingProvider == llm.ProviderGemini || - embeddingProvider == llm.ProviderTEI + embeddingProvider == llm.ProviderGemini if !supportsEmbeddings { retrievalCfg.VectorWeight = 0 retrievalCfg.FTSWeight = 1.0 @@ -231,9 +227,6 @@ func (a *AskApp) Query(ctx context.Context, query string, opts AskOptions) (*Ask if cfg.QueryRewriteEnabled && !opts.NoRewrite { pipelineParts = append(pipelineParts, "Rewrite") } - if cfg.RerankingEnabled { - pipelineParts = append(pipelineParts, "Rerank") - } if cfg.GraphExpansionEnabled { pipelineParts = append(pipelineParts, "Graph") } diff --git a/internal/app/base.go b/internal/app/base.go index 22e43b1..d80de19 100644 --- a/internal/app/base.go +++ b/internal/app/base.go @@ -7,6 +7,7 @@ import ( "github.com/josephgoksu/TaskWing/internal/config" "github.com/josephgoksu/TaskWing/internal/llm" "github.com/josephgoksu/TaskWing/internal/memory" + "github.com/josephgoksu/TaskWing/internal/runner" ) // Context holds shared dependencies for all app services. @@ -15,7 +16,8 @@ import ( type Context struct { Repo *memory.Repository LLMCfg llm.Config - BasePath string // Project root path for source code access + BasePath string // Project root path for source code access + Runner runner.Runner // Optional: nil means use LLM API directly } // NewContext creates an app context with standard initialization. diff --git a/internal/app/plan.go b/internal/app/plan.go index d6825d1..92bb249 100644 --- a/internal/app/plan.go +++ b/internal/app/plan.go @@ -22,6 +22,7 @@ import ( "github.com/josephgoksu/TaskWing/internal/knowledge" "github.com/josephgoksu/TaskWing/internal/llm" "github.com/josephgoksu/TaskWing/internal/planner" + "github.com/josephgoksu/TaskWing/internal/runner" "github.com/josephgoksu/TaskWing/internal/task" _ "modernc.org/sqlite" // SQLite driver @@ -125,29 +126,58 @@ const ( // PlanApp provides plan lifecycle operations. // This is THE implementation - CLI and MCP both call these methods. type PlanApp struct { - ctx *Context - Repo task.Repository - ClarifierFactory func(llm.Config) GoalsClarifier - PlannerFactory func(llm.Config) TaskPlanner - ContextRetriever func(ctx context.Context, ks *knowledge.Service, goal, memoryPath string) (impl.SearchStrategyResult, error) + ctx *Context + Repo task.Repository + ClarifierFactory func(llm.Config) GoalsClarifier + PlannerFactory func(llm.Config) TaskPlanner + DecomposerFactory func(llm.Config) PhaseGoalDecomposer + ExpanderFactory func(llm.Config) PhaseExpander + ContextRetriever func(ctx context.Context, ks *knowledge.Service, goal, memoryPath string) (impl.SearchStrategyResult, error) // TaskEnricher executes ask queries to populate task ContextSummary. // If nil, tasks will not have embedded context (legacy behavior). TaskEnricher TaskContextEnricher } // NewPlanApp creates a new plan application service. +// If ctx.Runner is set, uses runner-backed agents (no API key needed). +// Otherwise falls back to LLM API-based agents. func NewPlanApp(ctx *Context) *PlanApp { pa := &PlanApp{ - ctx: ctx, - Repo: ctx.Repo, - ClarifierFactory: func(cfg llm.Config) GoalsClarifier { + ctx: ctx, + Repo: ctx.Repo, + ContextRetriever: impl.RetrieveContext, + } + + if ctx.Runner != nil { + // Runner-based: no API key needed + pa.ClarifierFactory = func(_ llm.Config) GoalsClarifier { + return runner.NewRunnerClarifier(ctx.Runner) + } + pa.PlannerFactory = func(_ llm.Config) TaskPlanner { + return runner.NewRunnerPlanner(ctx.Runner) + } + pa.DecomposerFactory = func(_ llm.Config) PhaseGoalDecomposer { + return runner.NewRunnerDecomposer(ctx.Runner) + } + pa.ExpanderFactory = func(_ llm.Config) PhaseExpander { + return runner.NewRunnerExpander(ctx.Runner) + } + } else { + // Fallback: existing LLM API agents (requires API key) + pa.ClarifierFactory = func(cfg llm.Config) GoalsClarifier { return impl.NewClarifyingAgent(cfg) - }, - PlannerFactory: func(cfg llm.Config) TaskPlanner { + } + pa.PlannerFactory = func(cfg llm.Config) TaskPlanner { return impl.NewPlanningAgent(cfg) - }, - ContextRetriever: impl.RetrieveContext, + } + pa.DecomposerFactory = func(cfg llm.Config) PhaseGoalDecomposer { + return impl.NewDecompositionAgent(cfg) + } + pa.ExpanderFactory = func(cfg llm.Config) PhaseExpander { + return impl.NewExpandAgent(cfg) + } } + // Initialize default TaskEnricher using AskApp pa.TaskEnricher = pa.defaultTaskEnricher return pa @@ -947,7 +977,9 @@ func (a *PlanApp) Audit(ctx context.Context, opts AuditOptions) (*AuditResult, e } reportJSON, marshalErr := json.Marshal(report) if marshalErr == nil { - _ = repo.UpdatePlanAuditReport(plan.ID, newStatus, string(reportJSON)) + if err := repo.UpdatePlanAuditReport(plan.ID, newStatus, string(reportJSON)); err != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to update audit report: %v\n", err) + } } return result, nil @@ -994,7 +1026,9 @@ func (a *PlanApp) Audit(ctx context.Context, opts AuditOptions) (*AuditResult, e auditReport := autoFixResult.ToAuditReportWithFixes() reportJSON, marshalErr := json.Marshal(auditReport) if marshalErr == nil { - _ = repo.UpdatePlanAuditReport(plan.ID, newStatus, string(reportJSON)) + if err := repo.UpdatePlanAuditReport(plan.ID, newStatus, string(reportJSON)); err != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to update audit report: %v\n", err) + } } return result, nil @@ -1347,8 +1381,8 @@ func (a *PlanApp) Decompose(ctx context.Context, opts DecomposeOptions) (*Decomp } } - // Create and run DecompositionAgent - decomposeAgent := impl.NewDecompositionAgent(llmCfg) + // Create and run DecompositionAgent (runner-backed or LLM API-backed) + decomposeAgent := a.DecomposerFactory(llmCfg) defer func() { _ = decomposeAgent.Close() }() input := core.Input{ @@ -1428,7 +1462,9 @@ func (a *PlanApp) Decompose(ctx context.Context, opts DecomposeOptions) (*Decomp LastUpdated: time.Now().UTC().Format(time.RFC3339), } if draftJSON, err := json.Marshal(draftState); err == nil { - _ = repo.UpdatePlanDraftState(plan.ID, string(draftJSON)) + if err := repo.UpdatePlanDraftState(plan.ID, string(draftJSON)); err != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to update draft state: %v\n", err) + } } return &DecomposeResult{ @@ -1521,8 +1557,8 @@ func (a *PlanApp) Expand(ctx context.Context, opts ExpandOptions) (*ExpandResult } } - // Create and run ExpandAgent - expandAgent := impl.NewExpandAgent(llmCfg) + // Create and run ExpandAgent (runner-backed or LLM API-backed) + expandAgent := a.ExpanderFactory(llmCfg) defer func() { _ = expandAgent.Close() }() input := core.Input{ @@ -1694,7 +1730,9 @@ func (a *PlanApp) Finalize(ctx context.Context, opts FinalizeOptions) (*Finalize } // Clear draft state - _ = repo.UpdatePlanDraftState(plan.ID, "") + if err := repo.UpdatePlanDraftState(plan.ID, ""); err != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to clear draft state: %v\n", err) + } return &FinalizeResult{ Success: true, diff --git a/internal/app/task.go b/internal/app/task.go index dae400a..3969d73 100644 --- a/internal/app/task.go +++ b/internal/app/task.go @@ -103,7 +103,7 @@ func (a *TaskApp) Next(ctx context.Context, opts TaskNextOptions) (*TaskResult, if activePlan == nil { return &TaskResult{ Success: false, - Message: "No active plan found. Create one with 'taskwing goal \"\"'.", + Message: "No active plan found. Create one with 'taskwing plan \"\"'.", }, nil } planID = activePlan.ID @@ -174,7 +174,9 @@ func (a *TaskApp) Next(ctx context.Context, opts TaskNextOptions) (*TaskResult, verifier := task.NewGitVerifier(workDir) baseline, baselineErr := verifier.GetActualModifications(ctx) if baselineErr == nil && len(baseline) > 0 { - _ = repo.SetGitBaseline(nextTask.ID, baseline) + if err := repo.SetGitBaseline(nextTask.ID, baseline); err != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to set git baseline: %v\n", err) + } } } @@ -295,8 +297,9 @@ func (a *TaskApp) Start(ctx context.Context, opts TaskStartOptions) (*TaskResult verifier := task.NewGitVerifier(workDir) baseline, err := verifier.GetActualModifications(ctx) if err == nil && len(baseline) > 0 { - // Save baseline - ignore errors, this is best-effort - _ = repo.SetGitBaseline(opts.TaskID, baseline) + if setErr := repo.SetGitBaseline(opts.TaskID, baseline); setErr != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to set git baseline: %v\n", setErr) + } } } @@ -469,7 +472,7 @@ func (a *TaskApp) Complete(ctx context.Context, opts TaskCompleteOptions) (*Task // Commit task progress with conventional commit message if err := gitClient.CommitTaskProgress(taskBeforeComplete.Title, taskBeforeComplete.Scope); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ git commit failed: %v\n", err) + fmt.Fprintf(os.Stderr, "[warn] git commit failed: %v\n", err) } else { gitCommitApplied = true } @@ -477,7 +480,7 @@ func (a *TaskApp) Complete(ctx context.Context, opts TaskCompleteOptions) (*Task // Push to remote if we have a branch and commit was successful if gitCommitApplied && gitBranch != "" { if err := gitClient.PushTaskProgress(gitBranch); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ git push failed: %v\n", err) + fmt.Fprintf(os.Stderr, "[warn] git push failed: %v\n", err) } else { gitPushApplied = true } @@ -543,7 +546,9 @@ func (a *TaskApp) Complete(ctx context.Context, opts TaskCompleteOptions) (*Task auditReport := auditResult.ToAuditReportWithFixes() reportJSON, marshalErr := json.Marshal(auditReport) if marshalErr == nil { - _ = repo.UpdatePlanAuditReport(plan.ID, auditPlanStatus, string(reportJSON)) + if err := repo.UpdatePlanAuditReport(plan.ID, auditPlanStatus, string(reportJSON)); err != nil { + fmt.Fprintf(os.Stderr, "[debug] failed to update audit report: %v\n", err) + } } } diff --git a/internal/bootstrap/initializer.go b/internal/bootstrap/initializer.go index 934552e..fa333fc 100644 --- a/internal/bootstrap/initializer.go +++ b/internal/bootstrap/initializer.go @@ -11,6 +11,8 @@ import ( "sort" "strings" "time" + + "github.com/josephgoksu/TaskWing/internal/ui" ) // Initializer handles the setup of TaskWing project structure and integrations. @@ -141,7 +143,7 @@ func (i *Initializer) AdoptAIConfig(aiName string, verbose bool) (*AdoptionResul } if verbose { - fmt.Printf(" ✓ Adopted unmanaged config for %s (backup: %s)\n", aiName, backupDir) + fmt.Printf(" %s Adopted unmanaged config for %s (backup: %s)\n", ui.IconOK, aiName, backupDir) } return &AdoptionResult{ @@ -161,19 +163,19 @@ func (i *Initializer) setupAIIntegrations(verbose bool, selectedAIs []string, sh if _, ok := aiHelpers[ai]; ok { validAIs = append(validAIs, ai) } else if verbose { - fmt.Fprintf(os.Stderr, "⚠️ Unknown AI assistant '%s' (skipping)\n", ai) + fmt.Fprintf(os.Stderr, "%s Unknown AI assistant '%s' (skipping)\n", ui.IconWarn, ai) } } if len(validAIs) == 0 { if verbose { - fmt.Println("⚠️ No valid AI assistants specified") + fmt.Printf("%s No valid AI assistants specified\n", ui.IconWarn) } return nil } if showHeader { - fmt.Printf("🔧 Setting up AI integrations for: %s\n", strings.Join(validAIs, ", ")) + fmt.Printf("%s Setting up AI integrations for: %s\n", ui.IconWrench, strings.Join(validAIs, ", ")) } for _, ai := range validAIs { @@ -184,24 +186,24 @@ func (i *Initializer) setupAIIntegrations(verbose bool, selectedAIs []string, sh // Install hooks config if err := i.InstallHooksConfig(ai, verbose); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ Failed to install hooks for %s: %v\n", ai, err) + fmt.Fprintf(os.Stderr, "%s Failed to install hooks for %s: %v\n", ui.IconWarn, ai, err) } if showHeader { - fmt.Printf(" ✓ Created local config for %s\n", ai) + fmt.Printf(" %s Created local config for %s\n", ui.IconOK, ai) } } // Update agent docs once (applies to all: CLAUDE.md, GEMINI.md, AGENTS.md) if err := i.updateAgentDocs(verbose); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ Failed to update agent docs: %v\n", err) + fmt.Fprintf(os.Stderr, "%s Failed to update agent docs: %v\n", ui.IconWarn, err) } return nil } func (i *Initializer) createStructure(verbose bool) error { - fmt.Println("📁 Creating .taskwing/ structure...") + fmt.Printf("%s Creating .taskwing/ structure...\n", ui.IconFolder) dirs := []string{ ".taskwing", ".taskwing/memory", @@ -213,7 +215,7 @@ func (i *Initializer) createStructure(verbose bool) error { return fmt.Errorf("create %s: %w", dir, err) } if verbose { - fmt.Printf(" ✓ Created %s\n", dir) + fmt.Printf(" %s Created %s\n", ui.IconOK, dir) } } return nil @@ -310,13 +312,13 @@ func MCPToolNames() []string { // CoreCommand describes a CLI command included in documentation. type CoreCommand struct { - Display string `json:"display"` // e.g. "taskwing goal \"\"" + Display string `json:"display"` // e.g. "taskwing plan \"\"" } // CoreCommands is the curated list of CLI commands shown in documentation. var CoreCommands = []CoreCommand{ {"taskwing bootstrap"}, - {"taskwing goal \"\""}, + {"taskwing plan \"\""}, {"taskwing ask \"\""}, {"taskwing task"}, {"taskwing plan status"}, @@ -419,7 +421,7 @@ func pruneStaleSlashCommands(commandsDir, ext string, verbose bool) error { return fmt.Errorf("remove stale slash command %s: %w", name, err) } if verbose { - fmt.Printf(" ✓ Removed stale command %s\n", filepath.Join(commandsDir, name)) + fmt.Printf(" %s Removed stale command %s\n", ui.IconOK, filepath.Join(commandsDir, name)) } } @@ -485,7 +487,7 @@ description: %s return fmt.Errorf("create %s: %w", fileName, err) } if verbose { - fmt.Printf(" ✓ Created %s/%s\n", cfg.commandsDir, fileName) + fmt.Printf(" %s Created %s/%s\n", ui.IconOK, cfg.commandsDir, fileName) } } @@ -541,7 +543,7 @@ func (i *Initializer) createSingleFileInstructions(aiName string, verbose bool) return fmt.Errorf("backup legacy directory: %w", err) } if verbose { - fmt.Printf(" ✓ Backed up legacy %s/ directory\n", legacyDirName) + fmt.Printf(" %s Backed up legacy %s/ directory\n", ui.IconOK, legacyDirName) } } } @@ -552,7 +554,7 @@ func (i *Initializer) createSingleFileInstructions(aiName string, verbose bool) if !strings.Contains(string(existingContent), "") { // User owns this file - do not overwrite if verbose { - fmt.Printf(" ⚠️ Skipping %s - file exists and is user-managed\n", cfg.singleFileName) + fmt.Printf(" %s Skipping %s - file exists and is user-managed\n", ui.IconWarn, cfg.singleFileName) } // Clean up backup since we're not proceeding if legacyBackup != "" { @@ -612,12 +614,12 @@ func (i *Initializer) createSingleFileInstructions(aiName string, verbose bool) if legacyBackup != "" { _ = os.RemoveAll(legacyBackup) if verbose { - fmt.Printf(" ✓ Removed legacy %s/ directory\n", legacyDirName) + fmt.Printf(" %s Removed legacy %s/ directory\n", ui.IconOK, legacyDirName) } } if verbose { - fmt.Printf(" ✓ Created %s/%s\n", cfg.commandsDir, cfg.singleFileName) + fmt.Printf(" %s Created %s/%s\n", ui.IconOK, cfg.commandsDir, cfg.singleFileName) } return nil @@ -681,7 +683,7 @@ description: %s } if verbose { - fmt.Printf(" ✓ Created %s/%s.md\n", cfg.commandsDir, cmd.BaseName) + fmt.Printf(" %s Created %s/%s.md\n", ui.IconOK, cfg.commandsDir, cmd.BaseName) } } @@ -904,7 +906,7 @@ func (i *Initializer) InstallHooksConfig(aiName string, verbose bool) error { } if verbose { - fmt.Printf(" ✓ Created hooks config: %s\n", settingsPath) + fmt.Printf(" %s Created hooks config: %s\n", ui.IconOK, settingsPath) fmt.Println(" ℹ️ If Claude Code is already running, review/reload hooks from /hooks for changes to take effect.") } return nil @@ -977,7 +979,7 @@ func (i *Initializer) installOpenCodePlugin(verbose bool) error { if !strings.Contains(string(existingContent), "TASKWING_MANAGED_PLUGIN") { // User owns this file - do not overwrite if verbose { - fmt.Printf(" ⚠️ Skipping taskwing-hooks.js - file exists and is user-managed\n") + fmt.Printf(" %s Skipping taskwing-hooks.js - file exists and is user-managed\n", ui.IconWarn) } return nil } @@ -991,7 +993,7 @@ func (i *Initializer) installOpenCodePlugin(verbose bool) error { } if verbose { - fmt.Printf(" ✓ Created OpenCode plugin: .opencode/plugins/taskwing-hooks.js\n") + fmt.Printf(" %s Created OpenCode plugin: .opencode/plugins/taskwing-hooks.js\n", ui.IconOK) } return nil } @@ -1134,7 +1136,7 @@ func (i *Initializer) updateAgentDocs(verbose bool) error { action = "updated" } else if hasStartMarker != hasEndMarker { // Partial markers - warn and skip to avoid corruption - fmt.Fprintf(os.Stderr, " ⚠️ %s has incomplete TaskWing markers - skipping (please fix manually)\n", fileName) + fmt.Fprintf(os.Stderr, " %s %s has incomplete TaskWing markers - skipping (please fix manually)\n", ui.IconWarn, fileName) continue } else if legacyStart, legacyEnd := findLegacyTaskWingSection(contentStr); legacyStart != -1 { // Legacy content without markers - replace with new marked section @@ -1156,7 +1158,7 @@ func (i *Initializer) updateAgentDocs(verbose bool) error { return fmt.Errorf("update %s: %w", fileName, err) } if verbose { - fmt.Printf(" ✓ TaskWing docs %s in %s\n", action, fileName) + fmt.Printf(" %s TaskWing docs %s in %s\n", ui.IconOK, action, fileName) } } else if verbose { fmt.Printf(" ℹ️ TaskWing docs unchanged in %s\n", fileName) diff --git a/internal/bootstrap/planner.go b/internal/bootstrap/planner.go index 600e8ac..604b1e7 100644 --- a/internal/bootstrap/planner.go +++ b/internal/bootstrap/planner.go @@ -7,8 +7,10 @@ import ( "slices" "sort" "strings" + "time" "github.com/josephgoksu/TaskWing/internal/project" + "github.com/josephgoksu/TaskWing/internal/ui" ) // BootstrapMode represents the high-level mode of operation. @@ -109,7 +111,7 @@ type Flags struct { Preview bool `json:"preview"` // Dry-run, no writes SkipInit bool `json:"skip_init"` // Skip initialization phase SkipIndex bool `json:"skip_index"` // Skip code indexing - SkipAnalyze bool `json:"skip_analyze"` // Skip LLM analysis (for CI/testing) + SkipAnalyze bool `json:"skip_analyze"` // Skip LLM analysis (auto-enabled when AI CLI detected; opt out with --no-analyze) Force bool `json:"force"` // Force index even on large codebases (--force flag) Resume bool `json:"resume"` // Resume from last checkpoint (skip completed agents) OnlyAgents []string `json:"only_agents"` // Run only specified agents @@ -118,7 +120,10 @@ type Flags struct { TraceFile string `json:"trace_file,omitempty"` Verbose bool `json:"verbose"` Quiet bool `json:"quiet"` - Debug bool `json:"debug"` // Enable debug logging (dumps project context, git paths, etc.) + Debug bool `json:"debug"` // Enable debug logging (dumps project context, git paths, etc.) + PreferCLI string `json:"prefer_cli"` // Preferred AI CLI for runner-based analysis (claude, gemini, codex) + Timeout time.Duration `json:"timeout,omitempty"` // Max time per runner invocation + Model string `json:"model,omitempty"` // Model override for AI CLI (e.g., "sonnet", "opus") } // Plan captures the decisions about what to do. @@ -349,7 +354,7 @@ func DecidePlan(snap *Snapshot, flags Flags) *Plan { } } - // LLM analysis runs by default unless --skip-analyze is set + // LLM analysis runs when auto-detected or explicitly requested; skipped with --no-analyze if !flags.SkipAnalyze { plan.RequiresLLMConfig = true if !slices.Contains(plan.Actions, ActionLLMAnalyze) { @@ -423,7 +428,7 @@ func decideActions(snap *Snapshot, flags Flags, mode BootstrapMode) []Action { actions = append(actions, ActionExtractMetadata) } - // LLM analysis runs by default unless skipped + // LLM analysis runs when auto-detected or explicitly enabled if !flags.SkipAnalyze { actions = append(actions, ActionLLMAnalyze) } @@ -526,7 +531,7 @@ func generateSkippedActions(snap *Snapshot, flags Flags) []string { } if flags.SkipAnalyze { - skipped = append(skipped, "llm_analyze (reason: --skip-analyze flag)") + skipped = append(skipped, "llm_analyze (reason: --no-analyze flag or no AI CLI detected)") } if flags.Preview { @@ -774,12 +779,11 @@ func countSourceFiles(basePath string) int { return count } -// FormatPlanSummary returns a human-readable summary of the plan. -// Always shown, even in quiet mode. -func FormatPlanSummary(plan *Plan, quiet bool) string { +// FormatPlanDebugLine returns a single machine-readable line for --debug mode. +// Example: Bootstrap: mode=run actions=[index_code,extract_metadata,llm_analyze] warnings=0 +func FormatPlanDebugLine(plan *Plan) string { var sb strings.Builder - // Always show single-line status fmt.Fprintf(&sb, "Bootstrap: mode=%s", plan.Mode) if len(plan.Actions) > 0 { @@ -803,6 +807,17 @@ func FormatPlanSummary(plan *Plan, quiet bool) string { fmt.Fprintf(&sb, " global_mcp_drift_detected=%s", strings.Join(plan.GlobalMCPDriftAIs, ",")) } + return sb.String() +} + +// FormatPlanSummary returns a human-readable summary of the plan. +// In quiet mode, returns only the debug line. In normal mode, returns +// detected state, drift info, skipped actions, and warnings. +func FormatPlanSummary(plan *Plan, quiet bool) string { + var sb strings.Builder + + // Always include the debug line for backward compatibility + sb.WriteString(FormatPlanDebugLine(plan)) sb.WriteString("\n") // Detailed output (not in quiet mode) @@ -813,12 +828,6 @@ func FormatPlanSummary(plan *Plan, quiet bool) string { fmt.Fprintf(&sb, "Workspace: Multi-repo (%d repositories detected)\n", len(plan.DetectedRepos)) } - if len(plan.Actions) > 0 { - sb.WriteString("\nActions:\n") - for _, summary := range plan.ActionSummary { - fmt.Fprintf(&sb, " • %s\n", summary) - } - } if len(plan.ManagedDriftAIs) > 0 || len(plan.UnmanagedDriftAIs) > 0 || len(plan.GlobalMCPDriftAIs) > 0 { sb.WriteString("\nDrift:\n") if len(plan.ManagedDriftAIs) > 0 { @@ -842,14 +851,7 @@ func FormatPlanSummary(plan *Plan, quiet bool) string { if len(plan.Warnings) > 0 { sb.WriteString("\nWarnings:\n") for _, warning := range plan.Warnings { - fmt.Fprintf(&sb, " ⚠️ %s\n", warning) - } - } - - if len(plan.Reasons) > 0 { - sb.WriteString("\nWhy:\n") - for _, reason := range plan.Reasons { - fmt.Fprintf(&sb, " → %s\n", reason) + fmt.Fprintf(&sb, " %s %s\n", ui.IconWarn, warning) } } } diff --git a/internal/bootstrap/service.go b/internal/bootstrap/service.go index dc42026..70d58d4 100644 --- a/internal/bootstrap/service.go +++ b/internal/bootstrap/service.go @@ -14,6 +14,7 @@ import ( "github.com/josephgoksu/TaskWing/internal/llm" "github.com/josephgoksu/TaskWing/internal/memory" "github.com/josephgoksu/TaskWing/internal/project" + "github.com/josephgoksu/TaskWing/internal/ui" ) // Service handles the bootstrapping process of extracting architectural knowledge. @@ -123,14 +124,14 @@ func (s *Service) ProcessAndSaveResults(ctx context.Context, results []core.Outp reportPath := filepath.Join(s.basePath, ".taskwing", "last-bootstrap-report.json") if err := saveReport(reportPath, report); err != nil { // Non-fatal warning - fmt.Fprintf(os.Stderr, "⚠️ Failed to save bootstrap report: %v\n", err) + fmt.Fprintf(os.Stderr, "%s Failed to save bootstrap report: %v\n", ui.IconWarn, err) } // 2. Print summary (could serve as return value if we want pure separation, but fine here for CLI svc) printCoverageSummary(report) if isPreview { - fmt.Println("\n💡 This was a preview. Run 'taskwing bootstrap' to save to memory.") + ui.PrintHint("This was a preview. Run 'taskwing bootstrap' to save to memory.") return nil } @@ -175,9 +176,9 @@ func (s *Service) ingestToMemory(ctx context.Context, findings []core.Finding, r projectName := filepath.Base(s.basePath) if err := repo.GenerateArchitectureMD(projectName); err != nil { // Log warning but don't fail bootstrap - fmt.Fprintf(os.Stderr, "⚠️ Failed to generate ARCHITECTURE.md: %v\n", err) + fmt.Fprintf(os.Stderr, "%s Failed to generate ARCHITECTURE.md: %v\n", ui.IconWarn, err) } else if !isQuiet { - fmt.Println(" ✓ Generated .taskwing/ARCHITECTURE.md") + fmt.Printf(" %s Generated .taskwing/ARCHITECTURE.md\n", ui.IconOK) } return nil @@ -191,13 +192,13 @@ func (s *Service) generateOverviewIfNeeded(ctx context.Context, repo *memory.Rep } if existing != nil { if verbose { - fmt.Println("\n📋 Project overview already exists (re-run bootstrap with --force to refresh)") + fmt.Printf("\n%s Project overview already exists (re-run bootstrap with --force to refresh)\n", ui.IconTask) } return nil } if verbose { - fmt.Println("\n📋 Generating project overview...") + fmt.Printf("\n%s Generating project overview...\n", ui.IconTask) } analyzer := NewOverviewAnalyzer(s.llmCfg, s.basePath) @@ -211,7 +212,7 @@ func (s *Service) generateOverviewIfNeeded(ctx context.Context, repo *memory.Rep } if verbose { - fmt.Println(" ✓ Project overview generated") + fmt.Printf(" %s Project overview generated\n", ui.IconOK) fmt.Printf(" \"%s\"\n", overview.ShortDescription) } return nil @@ -235,32 +236,22 @@ func (s *Service) RunDeterministicBootstrap(ctx context.Context, isQuiet bool) ( } defer func() { _ = repo.Close() }() - if !isQuiet { - fmt.Println() - fmt.Println("📊 Extracting Project Metadata") - fmt.Println("──────────────────────────────") - } + // Phase header is now printed by the caller (executeExtractMetadata) var findings []core.Finding startTime := time.Now() // 1. Extract Git Statistics (deterministic) - if !isQuiet { - fmt.Print(" 📈 Analyzing git history...") - } gitParser := NewGitStatParser(s.basePath) gitStats, err := gitParser.Parse() if err != nil { - // Track warning instead of silently swallowing result.Warnings = append(result.Warnings, fmt.Sprintf("git stats: %v", err)) - if !isQuiet { - fmt.Printf(" skipped (%v)\n", err) - } } else { if !isQuiet { - fmt.Printf(" %d commits, %d contributors\n", gitStats.TotalCommits, len(gitStats.Contributors)) + ui.PrintPhaseResult( + fmt.Sprintf("Git: %d commits, %d contributors", gitStats.TotalCommits, len(gitStats.Contributors)), + time.Since(startTime)) } - // Convert to finding for storage (deterministic bootstrap data) findings = append(findings, core.Finding{ Type: memory.NodeTypeMetadata, Title: "Git Repository Statistics", @@ -275,35 +266,15 @@ func (s *Service) RunDeterministicBootstrap(ctx context.Context, isQuiet bool) ( } // 2. Load Documentation Files (deterministic) - if !isQuiet { - fmt.Print(" 📄 Loading documentation...") - } + docStart := time.Now() docLoader := NewDocLoader(s.basePath) docs, err := docLoader.Load() if err != nil { - // Track warning instead of silently swallowing result.Warnings = append(result.Warnings, fmt.Sprintf("doc loader: %v", err)) - if !isQuiet { - fmt.Printf(" failed (%v)\n", err) - } } else { if !isQuiet { - // Show category breakdown for better visibility - categories := make(map[string]int) - for _, doc := range docs { - categories[doc.Category]++ - } - fmt.Printf(" %d files", len(docs)) - if len(categories) > 0 { - var parts []string - for cat, count := range categories { - parts = append(parts, fmt.Sprintf("%d %s", count, cat)) - } - fmt.Printf(" (%s)", joinMax(parts, 3)) - } - fmt.Println() + ui.PrintPhaseResult(fmt.Sprintf("Docs: %d files loaded", len(docs)), time.Since(docStart)) } - // Convert each doc to a finding for storage and RAG retrieval for _, doc := range docs { findings = append(findings, core.Finding{ Type: memory.NodeTypeDocumentation, @@ -321,7 +292,7 @@ func (s *Service) RunDeterministicBootstrap(ctx context.Context, isQuiet bool) ( if len(findings) == 0 { if !isQuiet { - fmt.Println(" ⚠️ No metadata extracted (not a git repo or no docs)") + fmt.Printf(" %s No metadata extracted (not a git repo or no docs)\n", ui.IconWarn) } result.Warnings = append(result.Warnings, "no metadata extracted (not a git repo or no docs)") return result, nil @@ -331,21 +302,13 @@ func (s *Service) RunDeterministicBootstrap(ctx context.Context, isQuiet bool) ( ks := knowledge.NewService(repo, s.llmCfg) ks.SetBasePath(s.basePath) - if !isQuiet { - fmt.Print(" 💾 Storing to memory...") - } - + storeStart := time.Now() if err := ks.IngestFindings(ctx, findings, nil, false); err != nil { - if !isQuiet { - fmt.Println(" failed") - } return nil, fmt.Errorf("ingest metadata: %w", err) } - elapsed := time.Since(startTime).Round(time.Millisecond) if !isQuiet { - fmt.Printf(" done (%v)\n", elapsed) - fmt.Printf("\n ✅ Extracted %d items in %v\n", len(findings), elapsed) + ui.PrintPhaseResult(fmt.Sprintf("Stored %d items to memory", len(findings)), time.Since(storeStart)) } result.FindingsCount = len(findings) @@ -412,7 +375,7 @@ func saveReport(path string, report *core.BootstrapReport) error { func printCoverageSummary(report *core.BootstrapReport) { fmt.Println() - fmt.Println("📊 Bootstrap Coverage Report") + fmt.Printf("%s Bootstrap Coverage Report\n", ui.IconStats) fmt.Println("────────────────────────────") fmt.Printf(" Files analyzed: %d\n", report.Coverage.FilesAnalyzed) fmt.Printf(" Files skipped: %d\n", report.Coverage.FilesSkipped) @@ -430,9 +393,9 @@ func printCoverageSummary(report *core.BootstrapReport) { fmt.Println() fmt.Println(" Per-agent coverage:") for name, ar := range report.AgentReports { - status := "✓" + status := ui.IconOK.String() if ar.Error != "" { - status = "✗" + status = ui.IconFail.String() } fileWord := "files" if ar.Coverage.FilesAnalyzed == 1 { @@ -446,5 +409,5 @@ func printCoverageSummary(report *core.BootstrapReport) { } fmt.Println() - fmt.Printf("📄 Full report: .taskwing/last-bootstrap-report.json\n") + fmt.Printf("%s Full report: .taskwing/last-bootstrap-report.json\n", ui.IconDesc) } diff --git a/internal/compress/filters.go b/internal/compress/filters.go new file mode 100644 index 0000000..49043a9 --- /dev/null +++ b/internal/compress/filters.go @@ -0,0 +1,219 @@ +package compress + +import ( + "bytes" + "fmt" + "regexp" + "strings" +) + +var ansiRegex = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]`) + +// StripANSI removes ANSI escape codes from output. +func StripANSI(data []byte) []byte { + return ansiRegex.ReplaceAll(data, nil) +} + +// CollapseWhitespace normalizes excessive blank lines to at most one. +func CollapseWhitespace(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + var result [][]byte + blankCount := 0 + for _, line := range lines { + trimmed := bytes.TrimSpace(line) + if len(trimmed) == 0 { + blankCount++ + if blankCount <= 1 { + result = append(result, nil) + } + } else { + blankCount = 0 + result = append(result, line) + } + } + return bytes.Join(result, []byte("\n")) +} + +// TruncatePaths converts absolute paths to relative paths based on common prefixes. +func TruncatePaths(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + // Find common path prefix + var prefix string + for _, line := range lines { + s := string(line) + if idx := strings.Index(s, "/Users/"); idx >= 0 { + end := strings.IndexByte(s[idx:], ' ') + if end < 0 { + end = len(s) - idx + } + path := s[idx : idx+end] + parts := strings.Split(path, "/") + if len(parts) > 4 { + candidate := strings.Join(parts[:4], "/") + if prefix == "" { + prefix = candidate + } + } + } + } + if prefix == "" { + return data + } + return bytes.ReplaceAll(data, []byte(prefix+"/"), []byte("./")) +} + +// DeduplicateLines collapses consecutive identical lines with a count. +func DeduplicateLines(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + if len(lines) <= 1 { + return data + } + + var result [][]byte + current := lines[0] + count := 1 + + for i := 1; i < len(lines); i++ { + if bytes.Equal(lines[i], current) { + count++ + } else { + if count > 2 { + result = append(result, current) + result = append(result, []byte(fmt.Sprintf(" ... (%d identical lines)", count-1))) + } else { + for j := 0; j < count; j++ { + result = append(result, current) + } + } + current = lines[i] + count = 1 + } + } + // Flush last group + if count > 2 { + result = append(result, current) + result = append(result, []byte(fmt.Sprintf(" ... (%d identical lines)", count-1))) + } else { + for j := 0; j < count; j++ { + result = append(result, current) + } + } + + return bytes.Join(result, []byte("\n")) +} + +var progressRegex = regexp.MustCompile(`(?m)^.*(\r|[\[=>#\-]{3,}|\.{3,}|\d+%|ETA|eta).*$`) + +// StripProgress removes progress bars, spinners, and percentage indicators. +func StripProgress(data []byte) []byte { + return progressRegex.ReplaceAll(data, nil) +} + +// StripComments removes comment lines from linter output (lines starting with // or #). +func StripComments(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + var result [][]byte + for _, line := range lines { + trimmed := bytes.TrimSpace(line) + if bytes.HasPrefix(trimmed, []byte("//")) || bytes.HasPrefix(trimmed, []byte("#")) { + continue + } + result = append(result, line) + } + return bytes.Join(result, []byte("\n")) +} + +// LimitLineCount caps output at N lines with a summary of truncated lines. +func LimitLineCount(maxLines int) Filter { + return func(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + if len(lines) <= maxLines { + return data + } + truncated := lines[:maxLines] + truncated = append(truncated, []byte(fmt.Sprintf("\n... (%d more lines truncated)", len(lines)-maxLines))) + return bytes.Join(truncated, []byte("\n")) + } +} + +// GroupByDirectory groups file listings by directory. +func GroupByDirectory(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + groups := make(map[string][]string) + var order []string + + for _, line := range lines { + s := strings.TrimSpace(string(line)) + if s == "" { + continue + } + dir := "." + if idx := strings.LastIndex(s, "/"); idx >= 0 { + dir = s[:idx] + } + if _, exists := groups[dir]; !exists { + order = append(order, dir) + } + groups[dir] = append(groups[dir], s) + } + + var buf bytes.Buffer + for _, dir := range order { + files := groups[dir] + buf.WriteString(fmt.Sprintf("%s/ (%d files)\n", dir, len(files))) + for _, f := range files { + buf.WriteString(" " + f + "\n") + } + } + return buf.Bytes() +} + +// TruncateJSON truncates large JSON values to prevent bloated output. +func TruncateJSON(data []byte) []byte { + // Simple heuristic: if a line is very long (>500 chars) and looks like JSON, truncate it + lines := bytes.Split(data, []byte("\n")) + var result [][]byte + for _, line := range lines { + if len(line) > 500 { + // Keep first 200 chars + indicator + truncated := make([]byte, 200) + copy(truncated, line[:200]) + truncated = append(truncated, []byte(fmt.Sprintf("... (%d chars truncated)", len(line)-200))...) + result = append(result, truncated) + } else { + result = append(result, line) + } + } + return bytes.Join(result, []byte("\n")) +} + +// SmartSummary replaces large repeated sections with one-liners. +func SmartSummary(data []byte) []byte { + // Collapse "ok" test results that repeat + lines := bytes.Split(data, []byte("\n")) + if len(lines) < 10 { + return data + } + + var result [][]byte + okCount := 0 + for _, line := range lines { + s := string(line) + if strings.HasPrefix(s, "ok \t") || strings.HasPrefix(s, "ok \t") { + okCount++ + if okCount <= 3 { + result = append(result, line) + } + } else { + if okCount > 3 { + result = append(result, []byte(fmt.Sprintf(" ... (%d more passing packages)", okCount-3))) + okCount = 0 + } + result = append(result, line) + } + } + if okCount > 3 { + result = append(result, []byte(fmt.Sprintf(" ... (%d more passing packages)", okCount-3))) + } + return bytes.Join(result, []byte("\n")) +} diff --git a/internal/compress/generic.go b/internal/compress/generic.go new file mode 100644 index 0000000..2fb36fb --- /dev/null +++ b/internal/compress/generic.go @@ -0,0 +1,139 @@ +package compress + +import ( + "bytes" + "fmt" + "strings" +) + +// genericPipeline returns a compression pipeline for common shell commands. +func genericPipeline(cmd string) *Pipeline { + base := baseCommand(cmd) + + switch base { + case "ls", "find", "fd": + return NewPipeline( + StripANSI, + CollapseWhitespace, + GroupByDirectory, + LimitLineCount(100), + ) + case "grep", "rg", "ag": + return NewPipeline( + StripANSI, + CollapseWhitespace, + TruncatePaths, + DeduplicateLines, + LimitLineCount(100), + ) + case "cat", "head", "tail", "less": + return NewPipeline( + StripANSI, + TruncateJSON, + LimitLineCount(200), + ) + case "docker": + return NewPipeline( + StripANSI, + StripProgress, + CollapseWhitespace, + DeduplicateLines, + LimitLineCount(80), + ) + case "curl", "wget": + return NewPipeline( + StripANSI, + StripProgress, + TruncateJSON, + LimitLineCount(100), + ) + default: + return NewPipeline( + StripANSI, + CollapseWhitespace, + TruncatePaths, + DeduplicateLines, + LimitLineCount(200), + ) + } +} + +// UltraCompact is an opt-in extreme compression mode. +// It strips comments, collapses aggressively, and caps output at 50 lines. +func UltraCompact(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + var result [][]byte + + for _, line := range lines { + trimmed := bytes.TrimSpace(line) + // Skip empty lines + if len(trimmed) == 0 { + continue + } + // Skip comment lines + if bytes.HasPrefix(trimmed, []byte("//")) || bytes.HasPrefix(trimmed, []byte("#")) { + continue + } + // Truncate long lines + if len(line) > 120 { + truncated := make([]byte, 120) + copy(truncated, line[:120]) + truncated = append(truncated, []byte("...")...) + result = append(result, truncated) + } else { + result = append(result, line) + } + } + + if len(result) > 50 { + kept := result[:50] + kept = append(kept, []byte(fmt.Sprintf("\n... (%d more lines, ultra-compact mode)", len(result)-50))) + return bytes.Join(kept, []byte("\n")) + } + return bytes.Join(result, []byte("\n")) +} + +// ForCommandUltra returns an ultra-compact pipeline for maximum compression. +func ForCommandUltra(command string) *Pipeline { + return NewPipeline( + StripANSI, + StripProgress, + StripComments, + CollapseWhitespace, + TruncatePaths, + DeduplicateLines, + UltraCompact, + ) +} + +// CompressWithLevel runs compression at a specified level. +func CompressWithLevel(command string, raw []byte, ultra bool) ([]byte, Stats) { + var pipeline *Pipeline + if ultra { + pipeline = ForCommandUltra(command) + } else { + pipeline = ForCommand(command) + } + output := pipeline.Run(raw) + return output, Stats{ + InputBytes: len(raw), + OutputBytes: len(output), + Command: command, + } +} + +// EstimateTokens gives a rough token count estimate (~4 chars per token). +func EstimateTokens(data []byte) int { + // Conservative estimate: ~4 bytes per token for English text + trimmed := bytes.TrimSpace(data) + if len(trimmed) == 0 { + return 0 + } + words := len(strings.Fields(string(trimmed))) + // Tokens ≈ 0.75 * words for code/CLI output + tokens := int(float64(words) * 0.75) + if tokens < 1 && len(trimmed) > 0 { + return 1 + } + return tokens +} diff --git a/internal/compress/git.go b/internal/compress/git.go new file mode 100644 index 0000000..6b76e2b --- /dev/null +++ b/internal/compress/git.go @@ -0,0 +1,91 @@ +package compress + +import ( + "bytes" + "fmt" + "strings" +) + +// gitPipeline returns a compression pipeline tuned for git commands. +func gitPipeline(cmd string) *Pipeline { + fields := strings.Fields(cmd) + subcmd := "" + if len(fields) > 1 { + subcmd = fields[1] + } + + switch subcmd { + case "status": + return NewPipeline( + StripANSI, + CollapseWhitespace, + TruncatePaths, + GroupByDirectory, + ) + case "log": + return NewPipeline( + StripANSI, + CollapseWhitespace, + DeduplicateLines, + LimitLineCount(50), + ) + case "diff": + return NewPipeline( + StripANSI, + CollapseWhitespace, + CollapseDiffContext, + TruncatePaths, + LimitLineCount(200), + ) + case "branch": + return NewPipeline( + StripANSI, + CollapseWhitespace, + DeduplicateLines, + ) + default: + return NewPipeline( + StripANSI, + CollapseWhitespace, + TruncatePaths, + DeduplicateLines, + ) + } +} + +// CollapseDiffContext reduces unchanged context lines in diffs to save tokens. +func CollapseDiffContext(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + var result [][]byte + contextRun := 0 + + for _, line := range lines { + isDiffMeta := bytes.HasPrefix(line, []byte("diff ")) || + bytes.HasPrefix(line, []byte("index ")) || + bytes.HasPrefix(line, []byte("--- ")) || + bytes.HasPrefix(line, []byte("+++ ")) || + bytes.HasPrefix(line, []byte("@@ ")) + isChange := bytes.HasPrefix(line, []byte("+")) || bytes.HasPrefix(line, []byte("-")) + + if isDiffMeta || isChange { + if contextRun > 6 { + // Keep first 3 and last 3 context lines, collapse middle + collapsed := contextRun - 6 + result = append(result, []byte(fmt.Sprintf(" ... (%d unchanged lines)", collapsed))) + } + contextRun = 0 + result = append(result, line) + } else { + contextRun++ + if contextRun <= 3 { + result = append(result, line) + } + // Lines 4+ are buffered; if we hit a change, we'll emit the last 3 + } + } + if contextRun > 6 { + result = append(result, []byte(fmt.Sprintf(" ... (%d unchanged lines)", contextRun-3))) + } + + return bytes.Join(result, []byte("\n")) +} diff --git a/internal/compress/pipeline.go b/internal/compress/pipeline.go new file mode 100644 index 0000000..1579f89 --- /dev/null +++ b/internal/compress/pipeline.go @@ -0,0 +1,59 @@ +// Package compress provides output compression for CLI command results. +// It reduces token usage by filtering, deduplicating, and truncating output. +package compress + +// Filter transforms raw output bytes into compressed output. +type Filter func([]byte) []byte + +// Pipeline runs a sequence of filters on input data. +type Pipeline struct { + filters []Filter +} + +// NewPipeline creates a pipeline from the given filters. +func NewPipeline(filters ...Filter) *Pipeline { + return &Pipeline{filters: filters} +} + +// Run applies all filters in order to the input. +func (p *Pipeline) Run(input []byte) []byte { + data := input + for _, f := range p.filters { + data = f(data) + if len(data) == 0 { + return data + } + } + return data +} + +// Stats tracks compression metrics. +type Stats struct { + InputBytes int + OutputBytes int + Command string +} + +// Ratio returns the compression ratio (0.0 = perfect, 1.0 = no compression). +func (s Stats) Ratio() float64 { + if s.InputBytes == 0 { + return 1.0 + } + return float64(s.OutputBytes) / float64(s.InputBytes) +} + +// Saved returns the percentage of bytes saved. +func (s Stats) Saved() float64 { + return (1.0 - s.Ratio()) * 100 +} + +// Compress runs the appropriate pipeline for a command and returns compressed output + stats. +func Compress(command string, raw []byte) ([]byte, Stats) { + pipeline := ForCommand(command) + output := pipeline.Run(raw) + return output, Stats{ + InputBytes: len(raw), + OutputBytes: len(output), + Command: command, + } +} diff --git a/internal/compress/registry.go b/internal/compress/registry.go new file mode 100644 index 0000000..79faac7 --- /dev/null +++ b/internal/compress/registry.go @@ -0,0 +1,59 @@ +package compress + +import "strings" + +// ForCommand returns the appropriate compression pipeline for a given command. +func ForCommand(command string) *Pipeline { + cmd := strings.TrimSpace(command) + base := baseCommand(cmd) + + switch { + // Git commands + case base == "git": + return gitPipeline(cmd) + // Test runners + case isTestCommand(base, cmd): + return testPipeline(cmd) + // Generic commands + default: + return genericPipeline(cmd) + } +} + +// baseCommand extracts the first word of a command string. +func baseCommand(cmd string) string { + fields := strings.Fields(cmd) + if len(fields) == 0 { + return "" + } + return fields[0] +} + +// isTestCommand checks if a command is a test runner. +func isTestCommand(base, cmd string) bool { + testBases := map[string]bool{ + "go": strings.Contains(cmd, " test"), + "cargo": strings.Contains(cmd, " test"), + "npm": strings.Contains(cmd, " test"), + "npx": strings.Contains(cmd, "jest") || strings.Contains(cmd, "vitest"), + "pytest": true, + "python": strings.Contains(cmd, "-m pytest") || strings.Contains(cmd, "-m unittest"), + "jest": true, + "vitest": true, + "make": strings.Contains(cmd, "test"), + } + if match, ok := testBases[base]; ok { + return match + } + return false +} + +// DefaultPipeline returns a minimal compression pipeline. +func DefaultPipeline() *Pipeline { + return NewPipeline( + StripANSI, + CollapseWhitespace, + TruncatePaths, + DeduplicateLines, + ) +} diff --git a/internal/compress/test_cmd.go b/internal/compress/test_cmd.go new file mode 100644 index 0000000..54746ce --- /dev/null +++ b/internal/compress/test_cmd.go @@ -0,0 +1,94 @@ +package compress + +import ( + "bytes" + "fmt" + "strings" +) + +// testPipeline returns a compression pipeline tuned for test runner output. +func testPipeline(_ string) *Pipeline { + return NewPipeline( + StripANSI, + StripProgress, + CollapseWhitespace, + ExtractFailuresOnly, + SmartSummary, + TruncatePaths, + LimitLineCount(150), + ) +} + +// ExtractFailuresOnly keeps failure details and collapses passing tests. +func ExtractFailuresOnly(data []byte) []byte { + lines := bytes.Split(data, []byte("\n")) + if len(lines) < 5 { + return data + } + + var result [][]byte + passCount := 0 + inFailure := false + + for _, line := range lines { + s := string(line) + + // Detect failure markers across test frameworks + isFailure := strings.Contains(s, "FAIL") || + strings.Contains(s, "FAILED") || + strings.Contains(s, "Error:") || + strings.Contains(s, "panic:") || + strings.Contains(s, "ERRORS") || + strings.HasPrefix(s, "E ") // pytest style + + isPass := strings.HasPrefix(s, "ok ") || + strings.Contains(s, "PASS") || + strings.Contains(s, "passed") || + strings.HasPrefix(s, " ✓") || + strings.HasPrefix(s, " √") + + isSummary := strings.HasPrefix(s, "---") || + strings.HasPrefix(s, "===") || + strings.Contains(s, "test result:") || + strings.Contains(s, "Tests:") || + strings.Contains(s, "Test Suites:") + + if isFailure { + // Flush pass count before failure block + if passCount > 0 { + result = append(result, []byte(fmt.Sprintf(" ... (%d passing tests)", passCount))) + passCount = 0 + } + inFailure = true + result = append(result, line) + } else if inFailure { + // Keep lines following a failure until a blank line or pass + if len(bytes.TrimSpace(line)) == 0 { + inFailure = false + result = append(result, line) + } else if isPass { + inFailure = false + passCount++ + } else { + result = append(result, line) + } + } else if isPass { + passCount++ + } else if isSummary { + if passCount > 0 { + result = append(result, []byte(fmt.Sprintf(" ... (%d passing tests)", passCount))) + passCount = 0 + } + result = append(result, line) + } else { + // Non-test output (compilation errors, warnings, etc.) — keep + result = append(result, line) + } + } + + if passCount > 0 { + result = append(result, []byte(fmt.Sprintf(" ... (%d passing tests)", passCount))) + } + + return bytes.Join(result, []byte("\n")) +} diff --git a/internal/config/llm_loader.go b/internal/config/llm_loader.go index 27d73e2..af11d7e 100644 --- a/internal/config/llm_loader.go +++ b/internal/config/llm_loader.go @@ -191,8 +191,6 @@ func LoadLLMConfig() (llm.Config, error) { switch embeddingProvider { case llm.ProviderOllama: embeddingBaseURL = llm.DefaultOllamaURL - case llm.ProviderTEI: - embeddingBaseURL = llm.DefaultTEIURL } } diff --git a/internal/config/retrieval.go b/internal/config/retrieval.go index 6fb6ce5..7850e95 100644 --- a/internal/config/retrieval.go +++ b/internal/config/retrieval.go @@ -25,16 +25,6 @@ type RetrievalConfig struct { GraphExpansionMinEdgeConfidence float64 `mapstructure:"graph_expansion_min_edge_confidence"` GraphExpansionReservedSlots int `mapstructure:"graph_expansion_reserved_slots"` - // TEI (Text Embeddings Inference) settings - TEIBaseURL string `mapstructure:"tei_base_url"` - TEIModelName string `mapstructure:"tei_model_name"` - - // Reranking settings - RerankingEnabled bool `mapstructure:"reranking_enabled"` - RerankBaseURL string `mapstructure:"rerank_base_url"` - RerankTopK int `mapstructure:"rerank_top_k"` - RerankModelName string `mapstructure:"rerank_model_name"` - // Query rewriting settings QueryRewriteEnabled bool `mapstructure:"query_rewrite_enabled"` } @@ -62,16 +52,6 @@ func DefaultRetrievalConfig() RetrievalConfig { GraphExpansionMinEdgeConfidence: 0.5, GraphExpansionReservedSlots: 2, - // TEI settings - TEIBaseURL: "http://localhost:8080", - TEIModelName: "Qwen/Qwen3-Embedding-8B", - - // Reranking - RerankingEnabled: false, // Off by default until TEI is configured - RerankBaseURL: "http://localhost:8081", - RerankTopK: 20, - RerankModelName: "Qwen/Qwen3-Reranker-8B", - // Query rewriting QueryRewriteEnabled: true, // Enabled by default - improves search quality } @@ -101,16 +81,6 @@ func LoadRetrievalConfig() RetrievalConfig { GraphExpansionMinEdgeConfidence: getFloat64WithDefault("retrieval.graph.min_edge_confidence", defaults.GraphExpansionMinEdgeConfidence), GraphExpansionReservedSlots: getIntWithDefault("retrieval.graph.reserved_slots", defaults.GraphExpansionReservedSlots), - // TEI settings - TEIBaseURL: getStringWithDefault("retrieval.tei.base_url", defaults.TEIBaseURL), - TEIModelName: getStringWithDefault("retrieval.tei.model_name", defaults.TEIModelName), - - // Reranking - RerankingEnabled: getBoolWithDefault("retrieval.reranking.enabled", defaults.RerankingEnabled), - RerankBaseURL: getStringWithDefault("retrieval.reranking.base_url", defaults.RerankBaseURL), - RerankTopK: getIntWithDefault("retrieval.reranking.top_k", defaults.RerankTopK), - RerankModelName: getStringWithDefault("retrieval.reranking.model_name", defaults.RerankModelName), - // Query rewriting QueryRewriteEnabled: getBoolWithDefault("retrieval.query_rewrite.enabled", defaults.QueryRewriteEnabled), } @@ -138,10 +108,3 @@ func getBoolWithDefault(key string, defaultVal bool) bool { } return defaultVal } - -func getStringWithDefault(key string, defaultVal string) string { - if viper.IsSet(key) { - return viper.GetString(key) - } - return defaultVal -} diff --git a/internal/knowledge/ingest.go b/internal/knowledge/ingest.go index 84fe143..613338f 100644 --- a/internal/knowledge/ingest.go +++ b/internal/knowledge/ingest.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "strings" "time" @@ -55,7 +56,7 @@ func (s *Service) IngestFindingsWithRelationships(ctx context.Context, findings } // 3. Link Knowledge Graph (evidence-based + semantic) - evidenceEdges, semanticEdges, err := s.linkKnowledgeGraph(verbose) + evidenceEdges, semanticEdges, err := s.linkKnowledgeGraph() if err != nil { return err } @@ -66,12 +67,12 @@ func (s *Service) IngestFindingsWithRelationships(ctx context.Context, findings totalEdges := evidenceEdges + semanticEdges + llmEdges if verbose { - fmt.Println(" done") + fmt.Printf(" Linking knowledge graph done (%d evidence, %d semantic, %d llm)\n", evidenceEdges, semanticEdges, llmEdges) if rejectedCount > 0 { - fmt.Printf("\n⚠️ Rejected %d findings with unverifiable evidence.\n", rejectedCount) + fmt.Printf(" [warn] Rejected %d findings with unverifiable evidence.\n", rejectedCount) } - fmt.Printf("\n✅ Saved %d knowledge nodes (%d duplicates skipped), %d edges (%d evidence, %d semantic, %d llm) to memory.\n", - nodesCreated, skippedDuplicates, totalEdges, evidenceEdges, semanticEdges, llmEdges) + fmt.Printf(" Saved %d knowledge nodes (%d duplicates skipped), %d edges to memory.\n", + nodesCreated, skippedDuplicates, totalEdges) } return nil @@ -95,7 +96,7 @@ func (s *Service) verifyFindings(ctx context.Context, findings []core.Finding, v case core.VerificationStatusVerified: verifiedCount++ if verbose { - fmt.Print("✓") + fmt.Print(".") } case core.VerificationStatusPartial: verifiedCount++ // Partial counts as verified (kept) @@ -105,7 +106,7 @@ func (s *Service) verifyFindings(ctx context.Context, findings []core.Finding, v case core.VerificationStatusRejected: rejectedCount++ if verbose { - fmt.Print("✗") + fmt.Print("x") } default: if verbose { @@ -266,13 +267,19 @@ func (s *Service) ingestNodesWithIndex(ctx context.Context, findings []core.Find } } - if err := s.repo.UpsertNodeBySummary(node); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ failed to upsert node %q: %v\n", f.Title, err) + actualID, err := s.repo.UpsertNodeBySummary(node) + if err != nil { + fmt.Fprintf(os.Stderr, "[warn] failed to upsert node %q: %v\n", f.Title, err) } else { nodesCreated++ - nodesByTitle[strings.ToLower(f.Title)] = nodeID + nodesByTitle[strings.ToLower(f.Title)] = actualID } } + + if verbose { + fmt.Println() // Close the "Generating embeddings..." dots line + } + return nodesCreated, skippedDuplicates, nodesByTitle, nil } @@ -280,11 +287,7 @@ func (s *Service) ingestNodesWithIndex(ctx context.Context, findings []core.Find // 1. Shared evidence (nodes referencing the same files) // 2. Semantic similarity (embedding-based) // Returns (evidenceEdges, semanticEdges, error) -func (s *Service) linkKnowledgeGraph(verbose bool) (int, int, error) { - if verbose { - fmt.Print(" Linking knowledge graph") - } - +func (s *Service) linkKnowledgeGraph() (int, int, error) { allNodes, err := s.repo.ListNodes("") if err != nil { return 0, 0, err @@ -304,6 +307,7 @@ func (s *Service) linkKnowledgeGraph(verbose bool) (int, int, error) { // Note: allNodes must include Evidence fields (use ListNodes which now includes them). func (s *Service) linkByEvidence(allNodes []memory.Node) int { count := 0 + failures := 0 // Build map: file path -> list of node IDs that reference it fileToNodes := make(map[string][]string) @@ -370,7 +374,7 @@ func (s *Service) linkByEvidence(allNodes []memory.Node) int { "shared_count": sharedFiles, } if err := s.repo.LinkNodes(nodeA, nodeB, memory.NodeRelationSharesEvidence, weight, props); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ failed to link nodes (evidence): %v\n", err) + failures++ } else { count++ } @@ -378,6 +382,10 @@ func (s *Service) linkByEvidence(allNodes []memory.Node) int { } } + if failures > 0 { + slog.Debug("evidence linking had failures", "failed", failures, "succeeded", count) + } + return count } @@ -398,6 +406,7 @@ func countSharedFiles(filesA, filesB []string) int { func (s *Service) linkSemantic(allNodes []memory.Node) int { count := 0 + failures := 0 threshold := SemanticSimilarityThreshold // Fetch all nodes with embeddings in a single query (no N+1) @@ -420,13 +429,17 @@ func (s *Service) linkSemantic(allNodes []memory.Node) int { if similarity >= float32(threshold) { props := map[string]any{"similarity": similarity} if err := s.repo.LinkNodes(nodeA.ID, nodeB.ID, memory.NodeRelationSemanticallySimilar, float64(similarity), props); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ failed to link nodes (semantic): %v\n", err) + failures++ } else { count++ } } } } + + if failures > 0 { + slog.Debug("semantic linking had failures", "failed", failures, "succeeded", count) + } return count } @@ -438,14 +451,17 @@ func (s *Service) linkByLLMRelationships(relationships []core.Relationship, node } count := 0 + failures := 0 for _, rel := range relationships { // Look up node IDs by title (case-insensitive) fromID := nodesByTitle[strings.ToLower(rel.From)] toID := nodesByTitle[strings.ToLower(rel.To)] - if fromID == "" || toID == "" { - // Try partial matching if exact match fails + // Try partial matching only for missing IDs + if fromID == "" { fromID = findNodeByPartialTitle(nodesByTitle, rel.From) + } + if toID == "" { toID = findNodeByPartialTitle(nodesByTitle, rel.To) } @@ -474,12 +490,16 @@ func (s *Service) linkByLLMRelationships(relationships []core.Relationship, node } if err := s.repo.LinkNodes(fromID, toID, relationType, weight, props); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ failed to link nodes (llm): %v\n", err) + failures++ } else { count++ } } + if failures > 0 { + slog.Debug("LLM relationship linking had failures", "failed", failures, "succeeded", count) + } + return count } diff --git a/internal/knowledge/reranker.go b/internal/knowledge/reranker.go index cca5e4e..cbad5e6 100644 --- a/internal/knowledge/reranker.go +++ b/internal/knowledge/reranker.go @@ -3,11 +3,6 @@ package knowledge import ( "context" "errors" - "log/slog" - "sync" - "time" - - "github.com/josephgoksu/TaskWing/internal/llm" ) // ErrRerankerDisabled is returned when reranking is disabled after repeated failures. @@ -28,195 +23,10 @@ type RerankResult struct { } // RerankerFactory creates a Reranker from config. -// Returns nil if reranking is disabled. +// Returns nil if reranking is not configured. type RerankerFactory func(ctx context.Context, cfg RetrievalConfig) (Reranker, error) -// DefaultRerankerFactory creates a TEI reranker if enabled in config. -var DefaultRerankerFactory RerankerFactory = func(ctx context.Context, cfg RetrievalConfig) (Reranker, error) { - if !cfg.RerankingEnabled { - return nil, nil - } - - reranker, err := llm.NewTeiReranker(ctx, &llm.TeiRerankerConfig{ - BaseURL: cfg.RerankBaseURL, - Model: cfg.RerankModelName, - TopK: cfg.RerankTopK, - Timeout: 5 * time.Second, // Default timeout for reranking - }) - if err != nil { - return nil, err - } - - return newCircuitBreakerReranker(&teiRerankerAdapter{reranker: reranker}), nil -} - -// teiRerankerAdapter adapts llm.TeiReranker to knowledge.Reranker interface. -type teiRerankerAdapter struct { - reranker *llm.TeiReranker -} - -func (a *teiRerankerAdapter) Rerank(ctx context.Context, query string, documents []string) ([]RerankResult, error) { - results, err := a.reranker.Rerank(ctx, query, documents) - if err != nil { - return nil, err - } - - // Convert tei.RerankResult to knowledge.RerankResult - converted := make([]RerankResult, len(results)) - for i, r := range results { - converted[i] = RerankResult{ - Index: r.Index, - Score: r.Score, - } - } - return converted, nil -} - -func (a *teiRerankerAdapter) Close() error { - return a.reranker.Close() -} - -type circuitBreakerReranker struct { - inner Reranker - threshold int - mu sync.Mutex - failures int - disabled bool -} - -func newCircuitBreakerReranker(inner Reranker) Reranker { - return &circuitBreakerReranker{ - inner: inner, - threshold: 1, - } -} - -func (c *circuitBreakerReranker) Rerank(ctx context.Context, query string, documents []string) ([]RerankResult, error) { - c.mu.Lock() - if c.disabled { - c.mu.Unlock() - return nil, ErrRerankerDisabled - } - c.mu.Unlock() - - results, err := c.inner.Rerank(ctx, query, documents) - if err != nil { - c.mu.Lock() - c.failures++ - if c.failures >= c.threshold { - c.disabled = true - } - c.mu.Unlock() - if c.disabled { - slog.Warn("reranker disabled after failure", "error", err) - return nil, ErrRerankerDisabled - } - return nil, err - } - - c.mu.Lock() - c.failures = 0 - c.mu.Unlock() - return results, nil -} - -func (c *circuitBreakerReranker) Close() error { - return c.inner.Close() -} - -// rerankResults applies reranking to scored nodes with timeout and fallback. -// If reranking fails or times out, returns the original results unchanged. -// Preserves reranker ordering and normalizes scores to meaningful display range. -func rerankResults(ctx context.Context, reranker Reranker, query string, scored []ScoredNode, timeout time.Duration) []ScoredNode { - if reranker == nil || len(scored) == 0 { - return scored - } - - // Create timeout context - rerankCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - // Extract document contents for reranking - documents := make([]string, len(scored)) - originalScores := make([]float32, len(scored)) - for i, sn := range scored { - // Use content + summary for better reranking - documents[i] = sn.Node.Summary + "\n" + sn.Node.Text() - originalScores[i] = sn.Score - } - - // Attempt reranking - results, err := reranker.Rerank(rerankCtx, query, documents) - if err != nil { - if errors.Is(err, ErrRerankerDisabled) { - return scored - } - // Log warning and return original results - slog.Warn("reranking failed, using original scores", - "error", err, - "timeout", timeout, - "candidates", len(scored)) - return scored - } - - if len(results) == 0 { - return scored - } - - // Find min/max reranker scores for normalization - minRerankScore := results[0].Score - maxRerankScore := results[0].Score - for _, r := range results { - if r.Score < minRerankScore { - minRerankScore = r.Score - } - if r.Score > maxRerankScore { - maxRerankScore = r.Score - } - } - rerankRange := maxRerankScore - minRerankScore - - // Find max original score as the ceiling for normalized scores - var maxOrigScore float32 - for _, s := range originalScores { - if s > maxOrigScore { - maxOrigScore = s - } - } - if maxOrigScore < 0.3 { - maxOrigScore = 0.3 // Minimum ceiling - } - - // Create reranked results with normalized scores - // Normalize reranker scores to [0.15, maxOrigScore] range - // This preserves relative differences while ensuring meaningful display - reranked := make([]ScoredNode, 0, len(results)) - for _, r := range results { - if r.Index < len(scored) { - node := scored[r.Index] - - // Normalize reranker score to 0-1 range, then scale to display range - var normalizedScore float32 - if rerankRange > 0.0001 { // Avoid division by near-zero - normalizedScore = float32((r.Score - minRerankScore) / rerankRange) - } else { - normalizedScore = 1.0 // All scores equal, treat as max - } - - // Scale to [0.15, maxOrigScore] range - // This preserves relative differences from the reranker - displayScore := 0.15 + normalizedScore*(maxOrigScore-0.15) - - node.Score = displayScore - reranked = append(reranked, node) - } - } - - slog.Debug("reranking complete", - "input_count", len(scored), - "output_count", len(reranked), - "rerank_range", rerankRange, - "max_orig_score", maxOrigScore) - - return reranked +// DefaultRerankerFactory returns nil (reranking disabled — TEI layer removed). +var DefaultRerankerFactory RerankerFactory = func(_ context.Context, _ RetrievalConfig) (Reranker, error) { + return nil, nil } diff --git a/internal/knowledge/service.go b/internal/knowledge/service.go index 75d9280..73e75d4 100644 --- a/internal/knowledge/service.go +++ b/internal/knowledge/service.go @@ -24,7 +24,7 @@ type Repository interface { // Write operations CreateNode(n *memory.Node) error - UpsertNodeBySummary(n memory.Node) error + UpsertNodeBySummary(n memory.Node) (string, error) DeleteNodesByAgent(agent string) error DeleteNodesByFiles(agent string, filePaths []string) error GetNodesByFiles(agent string, filePaths []string) ([]memory.Node, error) @@ -289,16 +289,8 @@ func (s *Service) searchInternal(ctx context.Context, query string, typeFilter s vectorThreshold := float32(cfg.VectorScoreThreshold) minResultThreshold := float32(cfg.MinResultScoreThreshold) - // Two-stage retrieval: fetch more candidates for reranking - // Stage 1 (Candidate retrieval): Fetch Top-25 candidates using hybrid search - candidateLimit := cfg.RerankTopK - if candidateLimit <= 0 { - candidateLimit = 25 // Default candidates - } - if !cfg.RerankingEnabled { - // If reranking disabled, just fetch what we need - candidateLimit = limit * 2 // Fetch 2x for graph expansion buffer - } + // Fetch candidates for graph expansion buffer + candidateLimit := limit * 2 // Collect results from both search methods scoreByID := make(map[string]float32) @@ -407,16 +399,7 @@ func (s *Service) searchInternal(ctx context.Context, query string, typeFilter s scored = scored[:candidateLimit] } - // 4. Stage 2 (Precision): Rerank using TEI if enabled - if cfg.RerankingEnabled && len(scored) > 0 { - reranker := s.getReranker(ctx) - if reranker != nil { - // Apply reranking with 5s timeout and fallback - scored = rerankResults(ctx, reranker, query, scored, 5*time.Second) - } - } - - // 5. Graph Expansion: Add connected nodes via knowledge graph edges + // 4. Graph Expansion: Add connected nodes via knowledge graph edges if cfg.GraphExpansionEnabled && len(scored) > 0 { scored = s.expandViaGraph(scored, cfg) } @@ -652,7 +635,7 @@ func (s *Service) AddNode(ctx context.Context, input NodeInput) (*memory.Node, e } // 3. Save to Repo (upsert for dedup — matches by summary with Jaccard similarity) - if err := s.repo.UpsertNodeBySummary(*node); err != nil { + if _, err := s.repo.UpsertNodeBySummary(*node); err != nil { return nil, fmt.Errorf("save node: %w", err) } @@ -795,10 +778,7 @@ func (s *Service) SearchDebug(ctx context.Context, query string, limit int) (*De vectorWeight := float32(cfg.VectorWeight) vectorThreshold := float32(cfg.VectorScoreThreshold) - candidateLimit := cfg.RerankTopK - if candidateLimit <= 0 { - candidateLimit = 25 - } + candidateLimit := 25 // Track individual scores per node type nodeScores struct { @@ -906,25 +886,7 @@ func (s *Service) SearchDebug(ctx context.Context, query string, limit int) (*De } response.TotalCandidates = len(scored) - // 5. Reranking (if enabled) - startRerank := time.Now() - if cfg.RerankingEnabled && len(scored) > 0 { - reranker := s.getReranker(ctx) - if reranker != nil { - pipeline = append(pipeline, "Rerank") - scored = rerankResults(ctx, reranker, query, scored, 5*time.Second) - // Update rerank scores - for i, sn := range scored { - if ns, ok := scoreMap[sn.Node.ID]; ok { - ns.rerankScore = sn.Score - // Recalculate rank-based score - ns.combined = sn.Score - scored[i] = ScoredNode{Node: sn.Node, Score: sn.Score} - } - } - } - } - response.Timings["rerank"] = time.Since(startRerank).Milliseconds() + response.Timings["rerank"] = int64(0) // 6. Graph Expansion startGraph := time.Now() diff --git a/internal/llm/client.go b/internal/llm/client.go index 6880ea2..fff9d73 100644 --- a/internal/llm/client.go +++ b/internal/llm/client.go @@ -249,8 +249,6 @@ func ValidateProvider(p string) (Provider, error) { return ProviderGemini, nil case ProviderBedrock: return ProviderBedrock, nil - case ProviderTEI: - return ProviderTEI, nil case ProviderTaskWing: return ProviderTaskWing, nil default: @@ -391,23 +389,6 @@ func NewCloseableEmbedder(ctx context.Context, cfg Config) (*CloseableEmbedder, closer: &genaiClientCloser{client: genaiClient}, }, nil - case ProviderTEI: - teiBaseURL := baseURL - if teiBaseURL == "" { - teiBaseURL = DefaultTEIURL - } - modelName := cfg.EmbeddingModel - // TEI doesn't require a model name - it uses whatever model the server was started with - - e, err := NewTeiEmbedder(ctx, &TeiConfig{ - BaseURL: teiBaseURL, - Model: modelName, - }) - if err != nil { - return nil, fmt.Errorf("failed to create TEI embedder: %w", err) - } - return &CloseableEmbedder{Embedder: e, closer: e}, nil - default: return nil, fmt.Errorf("unsupported embedding provider: %s", embeddingProvider) } diff --git a/internal/llm/constants.go b/internal/llm/constants.go index f487cf5..e06a831 100644 --- a/internal/llm/constants.go +++ b/internal/llm/constants.go @@ -20,20 +20,12 @@ const ( // ProviderBedrock represents AWS Bedrock OpenAI-compatible runtime ProviderBedrock = "bedrock" - // ProviderTEI represents Text Embeddings Inference (embeddings only) - // TEI is a high-performance embedding server from Hugging Face - // See: https://github.com/huggingface/text-embeddings-inference - ProviderTEI = "tei" - // ProviderTaskWing represents the TaskWing managed inference service. // Uses fine-tuned models optimized for architecture extraction. // OpenAI-compatible API; requires TASKWING_API_KEY. ProviderTaskWing = "taskwing" ) -// DefaultTEIURL is the default URL for TEI server -const DefaultTEIURL = "http://localhost:8080" - // Embedding model constants const ( // DefaultOpenAIEmbeddingModel is the default embedding model for OpenAI diff --git a/internal/llm/embeddings.go b/internal/llm/embeddings.go index f55cb81..b0270f2 100644 --- a/internal/llm/embeddings.go +++ b/internal/llm/embeddings.go @@ -91,18 +91,6 @@ var EmbeddingRegistry = []EmbeddingModel{ PricePer1M: 0.00, }, - // ============================================ - // TEI (Text Embeddings Inference) - Custom endpoint - // ============================================ - { - ID: "custom", - Provider: "TEI", - ProviderID: ProviderTEI, - Dimensions: 0, // Depends on model loaded in TEI - MaxTokens: 0, - PricePer1M: 0.00, // Self-hosted - IsDefault: true, - }, } // embeddingIndex is built at init time for fast lookups @@ -201,7 +189,7 @@ func GetEmbeddingProviders() []EmbeddingProviderInfo { providerMap[m.ProviderID] = &EmbeddingProviderInfo{ ID: m.ProviderID, DisplayName: m.Provider, - IsLocal: m.ProviderID == ProviderOllama || m.ProviderID == ProviderTEI, + IsLocal: m.ProviderID == ProviderOllama, IsFree: m.PricePer1M == 0, } } @@ -210,7 +198,7 @@ func GetEmbeddingProviders() []EmbeddingProviderInfo { // Return in consistent order var providers []EmbeddingProviderInfo - providerOrder := []string{ProviderOllama, ProviderOpenAI, ProviderGemini, ProviderTEI} + providerOrder := []string{ProviderOllama, ProviderOpenAI, ProviderGemini} for _, id := range providerOrder { if p, exists := providerMap[id]; exists { providers = append(providers, *p) diff --git a/internal/llm/models.go b/internal/llm/models.go index 8303160..6a83e0f 100644 --- a/internal/llm/models.go +++ b/internal/llm/models.go @@ -498,20 +498,6 @@ func GetRecommendedModelForRole(providerID string, role ModelRole) *Model { return GetDefaultModel(providerID) } -// GetCategoryBadge returns an emoji badge for the model category. -func GetCategoryBadge(category ModelCategory) string { - switch category { - case CategoryReasoning: - return "🧠" - case CategoryBalanced: - return "⚡" - case CategoryFast: - return "🚀" - default: - return "" - } -} - // InferProvider attempts to determine the provider from a model name. // Returns the provider ID and true if inference succeeded. func InferProvider(modelID string) (string, bool) { @@ -624,17 +610,6 @@ func formatPriceInfo(providerID string, input, output float64) string { return fmt.Sprintf("$%.2f/$%.2f per 1M tokens", input, output) } -// CalculateCost calculates cost in USD for token usage. -func CalculateCost(modelID string, inputTokens, outputTokens int) float64 { - m := GetModel(modelID) - if m == nil { - return 0 - } - inputCost := float64(inputTokens) / 1_000_000 * m.InputPer1M - outputCost := float64(outputTokens) / 1_000_000 * m.OutputPer1M - return inputCost + outputCost -} - // ModelSupportsThinking returns true if the model supports extended thinking mode. func ModelSupportsThinking(modelID string) bool { m := GetModel(modelID) diff --git a/internal/llm/tei_embedder.go b/internal/llm/tei_embedder.go deleted file mode 100644 index c7dcb3c..0000000 --- a/internal/llm/tei_embedder.go +++ /dev/null @@ -1,210 +0,0 @@ -// TEI (Text Embeddings Inference) embedder client. -// TEI is a high-performance embedding server that supports OpenAI-compatible APIs. -// See: https://github.com/huggingface/text-embeddings-inference -package llm - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "github.com/cloudwego/eino/components/embedding" -) - -// TeiConfig holds configuration for the TEI embedder. -type TeiConfig struct { - // BaseURL is the TEI server URL (e.g., "http://localhost:8080") - BaseURL string - - // Model is the model name (optional, TEI typically uses single model) - Model string - - // Timeout for HTTP requests (default: 30s) - Timeout time.Duration -} - -// TeiEmbedder implements the eino embedding.Embedder interface for TEI servers. -// It uses the OpenAI-compatible /v1/embeddings endpoint. -type TeiEmbedder struct { - baseURL string - model string - client *http.Client -} - -// teiEmbeddingRequest is the request payload for /v1/embeddings -type teiEmbeddingRequest struct { - Input []string `json:"input"` - Model string `json:"model,omitempty"` -} - -// teiEmbeddingResponse is the response from /v1/embeddings -type teiEmbeddingResponse struct { - Object string `json:"object"` - Data []struct { - Object string `json:"object"` - Embedding []float64 `json:"embedding"` - Index int `json:"index"` - } `json:"data"` - Model string `json:"model"` - Usage struct { - PromptTokens int `json:"prompt_tokens"` - TotalTokens int `json:"total_tokens"` - } `json:"usage"` -} - -// teiNativeEmbedRequest is the native TEI /embed request format -type teiNativeEmbedRequest struct { - Inputs []string `json:"inputs"` - Truncate bool `json:"truncate,omitempty"` -} - -// NewTeiEmbedder creates a new TEI embedder. -func NewTeiEmbedder(ctx context.Context, cfg *TeiConfig) (*TeiEmbedder, error) { - if cfg.BaseURL == "" { - return nil, fmt.Errorf("TEI base URL is required") - } - - timeout := cfg.Timeout - if timeout == 0 { - timeout = 30 * time.Second - } - - return &TeiEmbedder{ - baseURL: cfg.BaseURL, - model: cfg.Model, - client: &http.Client{ - Timeout: timeout, - }, - }, nil -} - -// EmbedStrings implements the embedding.Embedder interface. -// It sends texts to TEI and returns embeddings as [][]float64. -func (e *TeiEmbedder) EmbedStrings(ctx context.Context, texts []string, opts ...embedding.Option) ([][]float64, error) { - if len(texts) == 0 { - return nil, nil - } - - // Try OpenAI-compatible endpoint first - embeddings, err := e.embedViaOpenAI(ctx, texts) - if err != nil { - // Fallback to native TEI endpoint - embeddings, err = e.embedViaNative(ctx, texts) - if err != nil { - return nil, fmt.Errorf("TEI embedding failed: %w", err) - } - } - - return embeddings, nil -} - -// embedViaOpenAI uses the OpenAI-compatible /v1/embeddings endpoint. -func (e *TeiEmbedder) embedViaOpenAI(ctx context.Context, texts []string) ([][]float64, error) { - reqBody := teiEmbeddingRequest{ - Input: texts, - Model: e.model, - } - - body, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("marshal request: %w", err) - } - - url := e.baseURL + "/v1/embeddings" - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) - if err != nil { - return nil, fmt.Errorf("create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - - resp, err := e.client.Do(req) - if err != nil { - return nil, fmt.Errorf("HTTP request: %w", err) - } - defer func() { _ = resp.Body.Close() }() - - if resp.StatusCode != http.StatusOK { - respBody, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("TEI returned status %d: %s", resp.StatusCode, string(respBody)) - } - - var embResp teiEmbeddingResponse - if err := json.NewDecoder(resp.Body).Decode(&embResp); err != nil { - return nil, fmt.Errorf("decode response: %w", err) - } - - // Extract embeddings in order - embeddings := make([][]float64, len(texts)) - for _, d := range embResp.Data { - if d.Index < len(embeddings) { - embeddings[d.Index] = d.Embedding - } - } - - return embeddings, nil -} - -// embedViaNative uses the native TEI /embed endpoint. -func (e *TeiEmbedder) embedViaNative(ctx context.Context, texts []string) ([][]float64, error) { - reqBody := teiNativeEmbedRequest{ - Inputs: texts, - Truncate: true, - } - - body, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("marshal request: %w", err) - } - - url := e.baseURL + "/embed" - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) - if err != nil { - return nil, fmt.Errorf("create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - - resp, err := e.client.Do(req) - if err != nil { - return nil, fmt.Errorf("HTTP request: %w", err) - } - defer func() { _ = resp.Body.Close() }() - - if resp.StatusCode != http.StatusOK { - respBody, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("TEI returned status %d: %s", resp.StatusCode, string(respBody)) - } - - // Native TEI returns [][]float64 directly - var embeddings [][]float64 - if err := json.NewDecoder(resp.Body).Decode(&embeddings); err != nil { - return nil, fmt.Errorf("decode response: %w", err) - } - - return embeddings, nil -} - -// GetDimensions returns the embedding dimension by making a test request. -// This is useful for validating compatibility with stored embeddings. -func (e *TeiEmbedder) GetDimensions(ctx context.Context) (int, error) { - embeddings, err := e.EmbedStrings(ctx, []string{"test"}) - if err != nil { - return 0, fmt.Errorf("test embedding: %w", err) - } - if len(embeddings) == 0 || len(embeddings[0]) == 0 { - return 0, fmt.Errorf("empty embedding returned") - } - return len(embeddings[0]), nil -} - -// Close releases any resources held by the embedder. -func (e *TeiEmbedder) Close() error { - // HTTP client doesn't need explicit cleanup - return nil -} - -// Verify interface compliance at compile time -var _ embedding.Embedder = (*TeiEmbedder)(nil) diff --git a/internal/llm/tei_reranker.go b/internal/llm/tei_reranker.go deleted file mode 100644 index a9860dc..0000000 --- a/internal/llm/tei_reranker.go +++ /dev/null @@ -1,178 +0,0 @@ -// TEI (Text Embeddings Inference) reranker client. -package llm - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "sort" - "time" -) - -// TeiRerankerConfig holds configuration for the TEI reranker. -type TeiRerankerConfig struct { - // BaseURL is the TEI server URL (e.g., "http://localhost:8081") - BaseURL string - - // Model is the reranker model name (optional) - Model string - - // Timeout for HTTP requests (default: 30s) - Timeout time.Duration - - // TopK is the number of top results to return after reranking - TopK int -} - -// TeiReranker provides reranking functionality using TEI's /rerank endpoint. -type TeiReranker struct { - baseURL string - model string - topK int - client *http.Client -} - -// TeiRerankRequest is the request payload for TEI /rerank endpoint. -type TeiRerankRequest struct { - Query string `json:"query"` - Texts []string `json:"texts"` - RawScores bool `json:"raw_scores,omitempty"` - Truncate bool `json:"truncate,omitempty"` -} - -// TeiRerankResponse is a single rerank result from TEI. -type TeiRerankResponse struct { - Index int `json:"index"` - Score float64 `json:"score"` - Text string `json:"text,omitempty"` // Only if return_text=true -} - -// TeiRerankResult represents a document with its reranked score. -type TeiRerankResult struct { - Index int // Original index in the input slice - Score float64 // Relevance score from reranker - OriginalText string // The original text (for reference) -} - -// NewTeiReranker creates a new TEI reranker client. -func NewTeiReranker(ctx context.Context, cfg *TeiRerankerConfig) (*TeiReranker, error) { - if cfg.BaseURL == "" { - return nil, fmt.Errorf("TEI base URL is required") - } - - timeout := cfg.Timeout - if timeout == 0 { - timeout = 30 * time.Second - } - - topK := cfg.TopK - if topK <= 0 { - topK = 5 // Default to top 5 - } - - return &TeiReranker{ - baseURL: cfg.BaseURL, - model: cfg.Model, - topK: topK, - client: &http.Client{ - Timeout: timeout, - }, - }, nil -} - -// Rerank reorders documents by relevance to the query. -// Returns results sorted by score (highest first), limited to TopK. -func (r *TeiReranker) Rerank(ctx context.Context, query string, documents []string) ([]TeiRerankResult, error) { - if len(documents) == 0 { - return nil, nil - } - - reqBody := TeiRerankRequest{ - Query: query, - Texts: documents, - RawScores: false, - Truncate: true, - } - - body, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("marshal request: %w", err) - } - - url := r.baseURL + "/rerank" - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) - if err != nil { - return nil, fmt.Errorf("create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - - resp, err := r.client.Do(req) - if err != nil { - return nil, fmt.Errorf("HTTP request: %w", err) - } - defer func() { _ = resp.Body.Close() }() - - if resp.StatusCode != http.StatusOK { - respBody, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("TEI rerank returned status %d: %s", resp.StatusCode, string(respBody)) - } - - var rerankResp []TeiRerankResponse - if err := json.NewDecoder(resp.Body).Decode(&rerankResp); err != nil { - return nil, fmt.Errorf("decode response: %w", err) - } - - // Convert to results with original text reference - results := make([]TeiRerankResult, len(rerankResp)) - for i, rr := range rerankResp { - originalText := "" - if rr.Index < len(documents) { - originalText = documents[rr.Index] - } - results[i] = TeiRerankResult{ - Index: rr.Index, - Score: rr.Score, - OriginalText: originalText, - } - } - - // Sort by score (highest first) - TEI may already return sorted, but ensure consistency - sort.Slice(results, func(i, j int) bool { - return results[i].Score > results[j].Score - }) - - // Limit to TopK - if len(results) > r.topK { - results = results[:r.topK] - } - - return results, nil -} - -// RerankWithScores returns just the scores aligned with input document indices. -// Useful when you need to merge scores with existing data structures. -func (r *TeiReranker) RerankWithScores(ctx context.Context, query string, documents []string) ([]float64, error) { - results, err := r.Rerank(ctx, query, documents) - if err != nil { - return nil, err - } - - // Create score array aligned with original indices - scores := make([]float64, len(documents)) - for _, res := range results { - if res.Index < len(scores) { - scores[res.Index] = res.Score - } - } - - return scores, nil -} - -// Close releases any resources held by the reranker. -func (r *TeiReranker) Close() error { - // HTTP client doesn't need explicit cleanup - return nil -} diff --git a/internal/mcp/handlers.go b/internal/mcp/handlers.go index 705dff8..9caa277 100644 --- a/internal/mcp/handlers.go +++ b/internal/mcp/handlers.go @@ -9,15 +9,25 @@ import ( "path/filepath" "strings" - agentcore "github.com/josephgoksu/TaskWing/internal/agents/core" - agentimpl "github.com/josephgoksu/TaskWing/internal/agents/impl" "github.com/josephgoksu/TaskWing/internal/app" "github.com/josephgoksu/TaskWing/internal/codeintel" "github.com/josephgoksu/TaskWing/internal/config" "github.com/josephgoksu/TaskWing/internal/llm" "github.com/josephgoksu/TaskWing/internal/memory" + "github.com/josephgoksu/TaskWing/internal/runner" ) +// newPlanAppContext creates an app.Context with runner detection for plan operations. +// If an AI CLI is available, uses it (no API key needed). Otherwise falls back to LLM API. +func newPlanAppContext(repo *memory.Repository) *app.Context { + if r, err := runner.PreferredRunner(""); err == nil { + ctx := app.NewContextWithConfig(repo, llm.Config{}) + ctx.Runner = r + return ctx + } + return app.NewContextForRole(repo, llm.RoleBootstrap) +} + // CodeToolResult represents the response from the unified code tool. type CodeToolResult struct { Action string `json:"action"` @@ -310,44 +320,25 @@ func handleCodeSimplify(ctx context.Context, repo *memory.Repository, params Cod } } - // Create and run the SimplifyAgent - llmCfg, err := config.LoadLLMConfigForRole(llm.RoleQuery) - if err != nil { - return &CodeToolResult{ - Action: "simplify", - Error: fmt.Sprintf("failed to load LLM config: %v", err), - }, nil - } - agent := agentimpl.NewSimplifyAgent(llmCfg) - defer func() { _ = agent.Close() }() - - input := agentcore.Input{ - ExistingContext: map[string]any{ - "code": code, - "file_path": filePath, - "context": kgContext, - }, - } - - output, err := agent.Run(ctx, input) - if err != nil { - return &CodeToolResult{ - Action: "simplify", - Error: fmt.Sprintf("agent error: %v", err), - }, nil + // Return code + architectural context for AI CLI to simplify + var sb strings.Builder + sb.WriteString("## Simplify Request\n\n") + if filePath != "" { + sb.WriteString(fmt.Sprintf("**File:** `%s`\n\n", filePath)) } - - if output.Error != nil { - return &CodeToolResult{ - Action: "simplify", - Error: output.Error.Error(), - }, nil + if kgContext != "" { + sb.WriteString("### Architectural Context\n") + sb.WriteString(kgContext) + sb.WriteString("\n\n") } + sb.WriteString("### Code\n```\n") + sb.WriteString(code) + sb.WriteString("\n```\n\n") + sb.WriteString("Simplify this code while preserving behavior. Consider the architectural context above.\n") - // Format the output return &CodeToolResult{ Action: "simplify", - Content: FormatSimplifyResult(output.Findings), + Content: sb.String(), }, nil } @@ -464,41 +455,27 @@ func HandleDebugTool(ctx context.Context, repo *memory.Repository, params DebugT kgContext = formatAskContext(result) } - // Create and run the DebugAgent - llmCfg, err := config.LoadLLMConfigForRole(llm.RoleQuery) - if err != nil { - return &DebugToolResult{ - Error: fmt.Sprintf("failed to load LLM config: %v", err), - }, nil - } - agent := agentimpl.NewDebugAgent(llmCfg) - defer func() { _ = agent.Close() }() - - input := agentcore.Input{ - ExistingContext: map[string]any{ - "problem": problem, - "error": params.Error, - "stack_trace": params.StackTrace, - "context": kgContext, - }, + // Return problem description + architectural context for AI CLI to debug + var sb strings.Builder + sb.WriteString("## Debug Request\n\n") + sb.WriteString(fmt.Sprintf("**Problem:** %s\n\n", problem)) + if params.Error != "" { + sb.WriteString(fmt.Sprintf("**Error:** %s\n\n", params.Error)) } - - output, err := agent.Run(ctx, input) - if err != nil { - return &DebugToolResult{ - Error: fmt.Sprintf("agent error: %v", err), - }, nil + if params.StackTrace != "" { + sb.WriteString("### Stack Trace\n```\n") + sb.WriteString(params.StackTrace) + sb.WriteString("\n```\n\n") } - - if output.Error != nil { - return &DebugToolResult{ - Error: output.Error.Error(), - }, nil + if kgContext != "" { + sb.WriteString("### Architectural Context\n") + sb.WriteString(kgContext) + sb.WriteString("\n\n") } + sb.WriteString("Diagnose the root cause. Provide hypotheses ranked by likelihood, investigation steps, and quick fixes.\n") - // Format the output return &DebugToolResult{ - Content: FormatDebugResult(output.Findings), + Content: sb.String(), }, nil } @@ -791,8 +768,8 @@ func handlePlanClarify(ctx context.Context, repo *memory.Repository, params Plan }) } - // Use RoleBootstrap for planning operations - appCtx := app.NewContextForRole(repo, llm.RoleBootstrap) + // Detect runner or fall back to LLM API for planning + appCtx := newPlanAppContext(repo) planApp := app.NewPlanApp(appCtx) result, err := planApp.Clarify(ctx, app.ClarifyOptions{ @@ -850,8 +827,8 @@ func handlePlanGenerate(ctx context.Context, repo *memory.Repository, params Pla save = *params.Save } - // Use RoleBootstrap for planning operations - appCtx := app.NewContextForRole(repo, llm.RoleBootstrap) + // Detect runner or fall back to LLM API for planning + appCtx := newPlanAppContext(repo) planApp := app.NewPlanApp(appCtx) result, err := planApp.Generate(ctx, app.GenerateOptions{ @@ -890,8 +867,8 @@ func handlePlanDecompose(ctx context.Context, repo *memory.Repository, params Pl }, nil } - // Use RoleBootstrap for planning operations - appCtx := app.NewContextForRole(repo, llm.RoleBootstrap) + // Detect runner or fall back to LLM API for planning + appCtx := newPlanAppContext(repo) planApp := app.NewPlanApp(appCtx) result, err := planApp.Decompose(ctx, app.DecomposeOptions{ @@ -944,8 +921,8 @@ func handlePlanExpand(ctx context.Context, repo *memory.Repository, params PlanT }, nil } - // Use RoleBootstrap for planning operations - appCtx := app.NewContextForRole(repo, llm.RoleBootstrap) + // Detect runner or fall back to LLM API for planning + appCtx := newPlanAppContext(repo) planApp := app.NewPlanApp(appCtx) opts := app.ExpandOptions{ @@ -990,8 +967,8 @@ func handlePlanFinalize(ctx context.Context, repo *memory.Repository, params Pla }, nil } - // Use RoleBootstrap for planning operations - appCtx := app.NewContextForRole(repo, llm.RoleBootstrap) + // Detect runner or fall back to LLM API for planning + appCtx := newPlanAppContext(repo) planApp := app.NewPlanApp(appCtx) result, err := planApp.Finalize(ctx, app.FinalizeOptions{ @@ -1018,8 +995,8 @@ func handlePlanAudit(ctx context.Context, repo *memory.Repository, params PlanTo autoFix = *params.AutoFix } - // Use RoleBootstrap for audit operations - appCtx := app.NewContextForRole(repo, llm.RoleBootstrap) + // Detect runner or fall back to LLM API for audit + appCtx := newPlanAppContext(repo) planApp := app.NewPlanApp(appCtx) result, err := planApp.Audit(ctx, app.AuditOptions{ diff --git a/internal/mcp/presenter.go b/internal/mcp/presenter.go index 9aa9ac6..b1eae92 100644 --- a/internal/mcp/presenter.go +++ b/internal/mcp/presenter.go @@ -8,7 +8,6 @@ import ( "strings" agentcore "github.com/josephgoksu/TaskWing/internal/agents/core" - agentimpl "github.com/josephgoksu/TaskWing/internal/agents/impl" "github.com/josephgoksu/TaskWing/internal/app" "github.com/josephgoksu/TaskWing/internal/codeintel" "github.com/josephgoksu/TaskWing/internal/knowledge" @@ -1117,19 +1116,45 @@ func FormatDebugResult(findings []agentcore.Finding) string { return strings.TrimSpace(sb.String()) } +// Local types for debug/simplify presenter (original types removed with dead agents). +type debugHypothesis struct { + Cause string `json:"cause"` + Likelihood string `json:"likelihood"` + Reasoning string `json:"reasoning"` + CodeLocations []string `json:"code_locations"` +} + +type debugInvestigationStep struct { + Step int `json:"step"` + Action string `json:"action"` + Command string `json:"command"` + ExpectedFinding string `json:"expected_finding"` +} + +type debugQuickFix struct { + Fix string `json:"fix"` + When string `json:"when"` +} + +type simplifyChange struct { + What string `json:"what"` + Why string `json:"why"` + Risk string `json:"risk"` +} + // extractDebugHypotheses safely extracts hypotheses from interface{}. // Handles both direct []DebugHypothesis and []interface{} from JSON. -func extractDebugHypotheses(raw interface{}) []agentimpl.DebugHypothesis { +func extractDebugHypotheses(raw interface{}) []debugHypothesis { // Direct type match (from agent output before serialization) - if typed, ok := raw.([]agentimpl.DebugHypothesis); ok { + if typed, ok := raw.([]debugHypothesis); ok { return typed } // Handle []interface{} from JSON unmarshaling if arr, ok := raw.([]interface{}); ok { - result := make([]agentimpl.DebugHypothesis, 0, len(arr)) + result := make([]debugHypothesis, 0, len(arr)) for _, item := range arr { if m, ok := item.(map[string]interface{}); ok { - h := agentimpl.DebugHypothesis{ + h := debugHypothesis{ Cause: getStringField(m, "cause"), Likelihood: getStringField(m, "likelihood"), Reasoning: getStringField(m, "reasoning"), @@ -1150,15 +1175,15 @@ func extractDebugHypotheses(raw interface{}) []agentimpl.DebugHypothesis { } // extractDebugSteps safely extracts investigation steps from interface{}. -func extractDebugSteps(raw interface{}) []agentimpl.DebugInvestigationStep { - if typed, ok := raw.([]agentimpl.DebugInvestigationStep); ok { +func extractDebugSteps(raw interface{}) []debugInvestigationStep { + if typed, ok := raw.([]debugInvestigationStep); ok { return typed } if arr, ok := raw.([]interface{}); ok { - result := make([]agentimpl.DebugInvestigationStep, 0, len(arr)) + result := make([]debugInvestigationStep, 0, len(arr)) for _, item := range arr { if m, ok := item.(map[string]interface{}); ok { - s := agentimpl.DebugInvestigationStep{ + s := debugInvestigationStep{ Step: getIntField(m, "step"), Action: getStringField(m, "action"), Command: getStringField(m, "command"), @@ -1173,15 +1198,15 @@ func extractDebugSteps(raw interface{}) []agentimpl.DebugInvestigationStep { } // extractDebugFixes safely extracts quick fixes from interface{}. -func extractDebugFixes(raw interface{}) []agentimpl.DebugQuickFix { - if typed, ok := raw.([]agentimpl.DebugQuickFix); ok { +func extractDebugFixes(raw interface{}) []debugQuickFix { + if typed, ok := raw.([]debugQuickFix); ok { return typed } if arr, ok := raw.([]interface{}); ok { - result := make([]agentimpl.DebugQuickFix, 0, len(arr)) + result := make([]debugQuickFix, 0, len(arr)) for _, item := range arr { if m, ok := item.(map[string]interface{}); ok { - f := agentimpl.DebugQuickFix{ + f := debugQuickFix{ Fix: getStringField(m, "fix"), When: getStringField(m, "when"), } @@ -1264,17 +1289,17 @@ func FormatSimplifyResult(findings []agentcore.Finding) string { // extractSimplifyChanges safely extracts changes from interface{}. // Handles both direct []SimplifyChange and []interface{} from JSON. -func extractSimplifyChanges(raw interface{}) []agentimpl.SimplifyChange { +func extractSimplifyChanges(raw interface{}) []simplifyChange { // Direct type match (from agent output before serialization) - if typed, ok := raw.([]agentimpl.SimplifyChange); ok { + if typed, ok := raw.([]simplifyChange); ok { return typed } // Handle []interface{} from JSON unmarshaling if arr, ok := raw.([]interface{}); ok { - result := make([]agentimpl.SimplifyChange, 0, len(arr)) + result := make([]simplifyChange, 0, len(arr)) for _, item := range arr { if m, ok := item.(map[string]interface{}); ok { - c := agentimpl.SimplifyChange{ + c := simplifyChange{ What: getStringField(m, "what"), Why: getStringField(m, "why"), Risk: getStringField(m, "risk"), diff --git a/internal/memory/repository_knowledge.go b/internal/memory/repository_knowledge.go index b3ab412..819741f 100644 --- a/internal/memory/repository_knowledge.go +++ b/internal/memory/repository_knowledge.go @@ -70,7 +70,7 @@ func (r *Repository) GetNodesByFiles(agent string, filePaths []string) ([]Node, return r.db.GetNodesByFiles(agent, filePaths) } -func (r *Repository) UpsertNodeBySummary(n Node) error { +func (r *Repository) UpsertNodeBySummary(n Node) (string, error) { return r.db.UpsertNodeBySummary(n) } diff --git a/internal/memory/sqlite.go b/internal/memory/sqlite.go index f55e01c..10156ff 100644 --- a/internal/memory/sqlite.go +++ b/internal/memory/sqlite.go @@ -446,6 +446,27 @@ func (s *SQLiteStore) initSchema() error { CREATE INDEX IF NOT EXISTS idx_policy_decisions_session ON policy_decisions(session_id); CREATE INDEX IF NOT EXISTS idx_policy_decisions_result ON policy_decisions(result); CREATE INDEX IF NOT EXISTS idx_policy_decisions_evaluated_at ON policy_decisions(evaluated_at); + + -- === Token Compression Analytics === + -- Tracks compression stats from taskwing proxy for the gain command. + + CREATE TABLE IF NOT EXISTS token_stats ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + command TEXT NOT NULL, + input_bytes INTEGER NOT NULL, + output_bytes INTEGER NOT NULL, + saved_bytes INTEGER NOT NULL, + compression_ratio REAL, + input_tokens INTEGER, + output_tokens INTEGER, + saved_tokens INTEGER, + session_id TEXT, + created_at TEXT NOT NULL DEFAULT (datetime('now')) + ); + + CREATE INDEX IF NOT EXISTS idx_token_stats_command ON token_stats(command); + CREATE INDEX IF NOT EXISTS idx_token_stats_created_at ON token_stats(created_at); + CREATE INDEX IF NOT EXISTS idx_token_stats_session ON token_stats(session_id); ` // Execute main schema @@ -1305,7 +1326,7 @@ func (s *SQLiteStore) ClearAllKnowledge() error { // If no exact match is found, it checks for semantically similar summaries and updates those instead // to prevent duplicate nodes from accumulating. // Uses a transaction to prevent race conditions in concurrent watch mode. -func (s *SQLiteStore) UpsertNodeBySummary(n Node) error { +func (s *SQLiteStore) UpsertNodeBySummary(n Node) (string, error) { if n.ID == "" { n.ID = "n-" + uuid.New().String()[:8] } @@ -1322,7 +1343,7 @@ func (s *SQLiteStore) UpsertNodeBySummary(n Node) error { // Use IMMEDIATE transaction to prevent race conditions in concurrent watch mode tx, err := s.db.Begin() if err != nil { - return fmt.Errorf("begin transaction: %w", err) + return "", fmt.Errorf("begin transaction: %w", err) } defer func() { rollbackWithLog(tx, "sqlite") }() @@ -1343,9 +1364,12 @@ func (s *SQLiteStore) UpsertNodeBySummary(n Node) error { n.Evidence, n.VerificationStatus, n.VerificationResult, n.ConfidenceScore, n.DebtScore, n.DebtReason, n.RefactorHint, existingID) if err != nil { - return fmt.Errorf("update existing node: %w", err) + return "", fmt.Errorf("update existing node: %w", err) } - return tx.Commit() + if err := tx.Commit(); err != nil { + return "", fmt.Errorf("commit: %w", err) + } + return existingID, nil } // No exact match - check for semantically similar summaries from same agent @@ -1354,7 +1378,7 @@ func (s *SQLiteStore) UpsertNodeBySummary(n Node) error { SELECT id, summary, content FROM nodes WHERE source_agent = ? `, n.SourceAgent) if err != nil { - return fmt.Errorf("query similar nodes: %w", err) + return "", fmt.Errorf("query similar nodes: %w", err) } var similarID string @@ -1388,14 +1412,17 @@ func (s *SQLiteStore) UpsertNodeBySummary(n Node) error { n.DebtScore, n.DebtReason, n.RefactorHint, similarID) } if err != nil { - return fmt.Errorf("update similar node: %w", err) + return "", fmt.Errorf("update similar node: %w", err) + } + if err := tx.Commit(); err != nil { + return "", fmt.Errorf("commit: %w", err) } - return tx.Commit() + return similarID, nil } } if err := checkRowsErr(rows); err != nil { _ = rows.Close() - return fmt.Errorf("iterate similar nodes: %w", err) + return "", fmt.Errorf("iterate similar nodes: %w", err) } _ = rows.Close() @@ -1410,10 +1437,13 @@ func (s *SQLiteStore) UpsertNodeBySummary(n Node) error { n.DebtScore, n.DebtReason, n.RefactorHint) if err != nil { - return fmt.Errorf("insert node: %w", err) + return "", fmt.Errorf("insert node: %w", err) } - return tx.Commit() + if err := tx.Commit(); err != nil { + return "", fmt.Errorf("commit: %w", err) + } + return n.ID, nil } // UpdateNodeEmbedding updates the embedding for an existing node. diff --git a/internal/runner/claude.go b/internal/runner/claude.go new file mode 100644 index 0000000..dee8aa9 --- /dev/null +++ b/internal/runner/claude.go @@ -0,0 +1,145 @@ +package runner + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "strings" + "time" +) + +// claudeRunner implements Runner for Claude Code. +// Uses `claude -p "" --output-format json` for headless invocation. +type claudeRunner struct { + binaryPath string +} + +func (r *claudeRunner) Type() CLIType { return CLIClaude } +func (r *claudeRunner) BinaryPath() string { return r.binaryPath } + +func (r *claudeRunner) Available() bool { + _, err := exec.LookPath(r.binaryPath) + return err == nil +} + +// Invoke runs Claude Code in read-only print mode with JSON output. +// When req.OnProgress is set, emits periodic heartbeat events. +// +// Note: Claude Code's stream-json mode requires --verbose, which changes +// Claude's behavior (spawns subagents, more thorough analysis) and causes +// timeouts on complex prompts. We use buffered json mode with heartbeats +// until Claude Code supports stream-json without --verbose. +func (r *claudeRunner) Invoke(ctx context.Context, req InvokeRequest) (*InvokeResult, error) { + args := []string{ + "-p", req.Prompt, + "--output-format", "json", + "--no-session-persistence", + } + + if req.Model != "" { + args = append(args, "--model", req.Model) + } + if req.SystemPrompt != "" { + args = append(args, "--system-prompt", req.SystemPrompt) + } + + if req.OnProgress != nil { + return r.runWithHeartbeat(ctx, req, args) + } + return r.run(ctx, req, args) +} + +// InvokeWithFiles runs Claude Code with full tool access for file modifications. +// When req.OnProgress is set, emits periodic heartbeat events. +func (r *claudeRunner) InvokeWithFiles(ctx context.Context, req InvokeRequest) (*InvokeResult, error) { + args := []string{ + "-p", req.Prompt, + "--output-format", "json", + "--no-session-persistence", + "--allowedTools", "Edit,Write,Bash,Read,Glob,Grep", + } + + if req.Model != "" { + args = append(args, "--model", req.Model) + } + if req.SystemPrompt != "" { + args = append(args, "--system-prompt", req.SystemPrompt) + } + + if req.OnProgress != nil { + return r.runWithHeartbeat(ctx, req, args) + } + return r.run(ctx, req, args) +} + +// runWithHeartbeat wraps run() with a goroutine that emits ProgressHeartbeat events +// every 15 seconds, since Claude Code's stream-json requires --verbose which +// changes behavior and causes timeouts. +func (r *claudeRunner) runWithHeartbeat(ctx context.Context, req InvokeRequest, args []string) (*InvokeResult, error) { + done := make(chan struct{}) + go func() { + ticker := time.NewTicker(15 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + req.OnProgress(ProgressEvent{ + Type: ProgressHeartbeat, + Summary: "still working...", + }) + case <-done: + return + } + } + }() + + result, err := r.run(ctx, req, args) + close(done) + return result, err +} + +func (r *claudeRunner) run(ctx context.Context, req InvokeRequest, args []string) (*InvokeResult, error) { + timeout := req.effectiveTimeout() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, r.binaryPath, args...) + if req.WorkDir != "" { + cmd.Dir = req.WorkDir + } + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + result := &InvokeResult{ + RawOutput: stdout.String(), + Stderr: stderr.String(), + CLIType: CLIClaude, + } + + if cmd.ProcessState != nil { + result.ExitCode = cmd.ProcessState.ExitCode() + } + + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return result, fmt.Errorf("claude invocation timed out after %v", timeout) + } + return result, fmt.Errorf("claude invocation failed (exit %d): %w\nstderr: %s", + result.ExitCode, err, truncate(result.Stderr, 500)) + } + + return result, nil +} + +// truncate shortens a string to maxLen, appending "..." if truncated. +func truncate(s string, maxLen int) string { + s = strings.TrimSpace(s) + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} diff --git a/internal/runner/codex.go b/internal/runner/codex.go new file mode 100644 index 0000000..4b96972 --- /dev/null +++ b/internal/runner/codex.go @@ -0,0 +1,83 @@ +package runner + +import ( + "bytes" + "context" + "fmt" + "os/exec" +) + +// codexRunner implements Runner for OpenAI Codex CLI. +// Uses `codex -q ""` for quiet/headless invocation. +type codexRunner struct { + binaryPath string +} + +func (r *codexRunner) Type() CLIType { return CLICodex } +func (r *codexRunner) BinaryPath() string { return r.binaryPath } + +func (r *codexRunner) Available() bool { + _, err := exec.LookPath(r.binaryPath) + return err == nil +} + +// Invoke runs Codex CLI in quiet mode (read-only, no file changes). +// When req.OnProgress is set, uses exec --json mode for real-time progress. +func (r *codexRunner) Invoke(ctx context.Context, req InvokeRequest) (*InvokeResult, error) { + if req.OnProgress != nil { + args := []string{"exec", req.Prompt, "--json"} + return r.runStreaming(ctx, req, args) + } + args := []string{"-q", req.Prompt} + return r.run(ctx, req, args) +} + +// InvokeWithFiles runs Codex CLI with full-auto approval for file modifications. +// When req.OnProgress is set, uses exec --json mode for real-time progress. +func (r *codexRunner) InvokeWithFiles(ctx context.Context, req InvokeRequest) (*InvokeResult, error) { + if req.OnProgress != nil { + args := []string{"exec", req.Prompt, "--json", "--approval-mode", "full-auto"} + return r.runStreaming(ctx, req, args) + } + args := []string{ + "-q", req.Prompt, + "--approval-mode", "full-auto", + } + return r.run(ctx, req, args) +} + +func (r *codexRunner) run(ctx context.Context, req InvokeRequest, args []string) (*InvokeResult, error) { + timeout := req.effectiveTimeout() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, r.binaryPath, args...) + if req.WorkDir != "" { + cmd.Dir = req.WorkDir + } + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + result := &InvokeResult{ + RawOutput: stdout.String(), + Stderr: stderr.String(), + CLIType: CLICodex, + } + + if cmd.ProcessState != nil { + result.ExitCode = cmd.ProcessState.ExitCode() + } + + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return result, fmt.Errorf("codex invocation timed out after %v", timeout) + } + return result, fmt.Errorf("codex invocation failed (exit %d): %w\nstderr: %s", + result.ExitCode, err, truncate(result.Stderr, 500)) + } + + return result, nil +} diff --git a/internal/runner/codex_stream.go b/internal/runner/codex_stream.go new file mode 100644 index 0000000..b6a1a8c --- /dev/null +++ b/internal/runner/codex_stream.go @@ -0,0 +1,168 @@ +package runner + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// runStreaming runs Codex CLI with `exec --json` and parses the JSONL event stream, +// emitting ProgressEvents via req.OnProgress. The last assistant text content is +// accumulated and returned as InvokeResult.RawOutput. +func (r *codexRunner) runStreaming(ctx context.Context, req InvokeRequest, args []string) (*InvokeResult, error) { + timeout := req.effectiveTimeout() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, r.binaryPath, args...) + if req.WorkDir != "" { + cmd.Dir = req.WorkDir + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("codex stdout pipe: %w", err) + } + + var stderr bytes.Buffer + cmd.Stderr = &stderr + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("codex start: %w", err) + } + + // Parse JSONL stream + var lastAssistantText string + lastSummary := "" + + scanner := bufio.NewScanner(stdout) + scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) == "" { + continue + } + + var event codexStreamEvent + if err := json.Unmarshal([]byte(line), &event); err != nil { + continue + } + + switch event.Type { + case "item.started": + if event.Item.Role == "assistant" { + emitDeduped(req.OnProgress, &lastSummary, ProgressEvent{ + Type: ProgressThinking, + Summary: "Thinking...", + }) + } + if event.Item.Type == "function_call" { + summary := event.Item.Name + if summary == "" { + summary = "tool" + } + emitDeduped(req.OnProgress, &lastSummary, ProgressEvent{ + Type: ProgressToolUse, + Summary: summary, + }) + } + + case "item.completed": + if event.Item.Type == "function_call_output" { + emitDeduped(req.OnProgress, &lastSummary, ProgressEvent{ + Type: ProgressToolResult, + Summary: "done", + }) + } + // Capture last assistant text + if event.Item.Role == "assistant" { + for _, c := range event.Item.Content { + if c.Type == "text" && c.Text != "" { + lastAssistantText = c.Text + emitDeduped(req.OnProgress, &lastSummary, ProgressEvent{ + Type: ProgressText, + Summary: truncateStream(c.Text, 80), + }) + } + } + } + + case "turn.completed": + // Extract final assistant text from the completed turn + if event.Turn.Role == "assistant" { + for _, c := range event.Turn.Content { + if c.Type == "text" && c.Text != "" { + lastAssistantText = c.Text + } + } + } + } + } + + waitErr := cmd.Wait() + + result := &InvokeResult{ + RawOutput: lastAssistantText, + Stderr: stderr.String(), + CLIType: CLICodex, + } + if cmd.ProcessState != nil { + result.ExitCode = cmd.ProcessState.ExitCode() + } + + if waitErr != nil { + if ctx.Err() == context.DeadlineExceeded { + return result, fmt.Errorf("codex invocation timed out after %v", timeout) + } + return result, fmt.Errorf("codex invocation failed (exit %d): %w\nstderr: %s", + result.ExitCode, waitErr, truncate(result.Stderr, 500)) + } + + return result, nil +} + +// emitDeduped sends a progress event only if the summary differs from the last one. +func emitDeduped(cb ProgressCallback, lastSummary *string, ev ProgressEvent) { + if cb == nil { + return + } + if ev.Summary == *lastSummary { + return + } + *lastSummary = ev.Summary + cb(ev) +} + +// truncateStream shortens a string to maxLen for display, trimming whitespace. +func truncateStream(s string, maxLen int) string { + s = strings.TrimSpace(s) + s = strings.ReplaceAll(s, "\n", " ") + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} + +// codexStreamEvent represents a single JSONL event from Codex CLI's exec --json output. +type codexStreamEvent struct { + Type string `json:"type"` + Item codexStreamItem `json:"item"` + Turn codexStreamItem `json:"turn"` +} + +type codexStreamItem struct { + Role string `json:"role,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` // Function name for function_call + Content []codexStreamContent `json:"content,omitempty"` +} + +type codexStreamContent struct { + Type string `json:"type"` + Text string `json:"text,omitempty"` +} diff --git a/internal/runner/detect.go b/internal/runner/detect.go new file mode 100644 index 0000000..1798df8 --- /dev/null +++ b/internal/runner/detect.go @@ -0,0 +1,34 @@ +package runner + +import "os/exec" + +// cliCandidate maps a binary name to a CLIType. +type cliCandidate struct { + binary string + cliType CLIType +} + +// defaultCandidates defines the search order for AI CLI binaries. +// Priority: Claude Code > Gemini CLI > Codex CLI. +var defaultCandidates = []cliCandidate{ + {"claude", CLIClaude}, + {"gemini", CLIGemini}, + {"codex", CLICodex}, +} + +// DetectCLIs scans PATH for installed AI CLI binaries. +// Returns all detected CLIs in priority order. +func DetectCLIs() []DetectedCLI { + var detected []DetectedCLI + for _, c := range defaultCandidates { + path, err := exec.LookPath(c.binary) + if err == nil { + detected = append(detected, DetectedCLI{ + Type: c.cliType, + BinaryPath: path, + }) + } + } + return detected +} + diff --git a/internal/runner/gemini.go b/internal/runner/gemini.go new file mode 100644 index 0000000..db5ca56 --- /dev/null +++ b/internal/runner/gemini.go @@ -0,0 +1,107 @@ +package runner + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "time" +) + +// geminiRunner implements Runner for Gemini CLI. +// Uses `gemini -p ""` for headless invocation. +type geminiRunner struct { + binaryPath string +} + +func (r *geminiRunner) Type() CLIType { return CLIGemini } +func (r *geminiRunner) BinaryPath() string { return r.binaryPath } + +func (r *geminiRunner) Available() bool { + _, err := exec.LookPath(r.binaryPath) + return err == nil +} + +// Invoke runs Gemini CLI in print mode (read-only, no file changes). +// When req.OnProgress is set, emits periodic heartbeat events (Gemini has no streaming mode). +func (r *geminiRunner) Invoke(ctx context.Context, req InvokeRequest) (*InvokeResult, error) { + args := []string{"-p", req.Prompt} + if req.OnProgress != nil { + return r.runWithHeartbeat(ctx, req, args) + } + return r.run(ctx, req, args) +} + +// InvokeWithFiles runs Gemini CLI with tool access for file modifications. +// Note: Gemini CLI's -p flag runs in prompt mode which may limit tool access. +// This is functionally equivalent to Invoke until Gemini CLI exposes a +// headless mode with explicit file modification permissions. +// When req.OnProgress is set, emits periodic heartbeat events. +func (r *geminiRunner) InvokeWithFiles(ctx context.Context, req InvokeRequest) (*InvokeResult, error) { + args := []string{"-p", req.Prompt} + if req.OnProgress != nil { + return r.runWithHeartbeat(ctx, req, args) + } + return r.run(ctx, req, args) +} + +// runWithHeartbeat wraps run() with a goroutine that emits ProgressHeartbeat events +// every 15 seconds, since Gemini CLI has no streaming output mode. +func (r *geminiRunner) runWithHeartbeat(ctx context.Context, req InvokeRequest, args []string) (*InvokeResult, error) { + done := make(chan struct{}) + go func() { + ticker := time.NewTicker(15 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + req.OnProgress(ProgressEvent{ + Type: ProgressHeartbeat, + Summary: "still working...", + }) + case <-done: + return + } + } + }() + + result, err := r.run(ctx, req, args) + close(done) + return result, err +} + +func (r *geminiRunner) run(ctx context.Context, req InvokeRequest, args []string) (*InvokeResult, error) { + timeout := req.effectiveTimeout() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, r.binaryPath, args...) + if req.WorkDir != "" { + cmd.Dir = req.WorkDir + } + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + result := &InvokeResult{ + RawOutput: stdout.String(), + Stderr: stderr.String(), + CLIType: CLIGemini, + } + + if cmd.ProcessState != nil { + result.ExitCode = cmd.ProcessState.ExitCode() + } + + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return result, fmt.Errorf("gemini invocation timed out after %v", timeout) + } + return result, fmt.Errorf("gemini invocation failed (exit %d): %w\nstderr: %s", + result.ExitCode, err, truncate(result.Stderr, 500)) + } + + return result, nil +} diff --git a/internal/runner/planning.go b/internal/runner/planning.go new file mode 100644 index 0000000..6bbd75d --- /dev/null +++ b/internal/runner/planning.go @@ -0,0 +1,346 @@ +package runner + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/josephgoksu/TaskWing/internal/agents/core" + "github.com/josephgoksu/TaskWing/internal/agents/impl" + "github.com/josephgoksu/TaskWing/internal/config" +) + +// RunnerClarifier implements app.GoalsClarifier using an AI CLI runner. +type RunnerClarifier struct { + runner Runner +} + +// NewRunnerClarifier creates a clarifier backed by an AI CLI runner. +func NewRunnerClarifier(r Runner) *RunnerClarifier { + return &RunnerClarifier{runner: r} +} + +// Close is a no-op for runner-backed agents (no persistent resources). +func (rc *RunnerClarifier) Close() error { return nil } + +// Run executes clarification via the AI CLI runner. +func (rc *RunnerClarifier) Run(ctx context.Context, input core.Input) (core.Output, error) { + goal, _ := input.ExistingContext["goal"].(string) + if goal == "" { + return core.Output{}, fmt.Errorf("missing 'goal' in input context") + } + + history, _ := input.ExistingContext["history"].(string) + kgContext, _ := input.ExistingContext["context"].(string) + + prompt, err := RenderTemplate(config.SystemPromptClarifyingAgent, map[string]any{ + "Goal": goal, + "History": history, + "Context": kgContext, + }) + if err != nil { + return core.Output{}, fmt.Errorf("render clarify template: %w", err) + } + + prompt += "\n\nRespond with ONLY a JSON object matching the Output Format above." + + result, err := RetryableInvoke(ctx, rc.runner, InvokeRequest{ + Prompt: prompt, + WorkDir: workDir(), + Timeout: 5 * time.Minute, + }, defaultRetries) + if err != nil { + return core.Output{Error: fmt.Errorf("runner clarify: %w", err)}, nil + } + + var parsed impl.ClarifyingOutput + if err := result.Decode(&parsed); err != nil { + return core.Output{Error: fmt.Errorf("decode clarify output: %w", err)}, nil + } + + return core.BuildOutput( + "runner-clarifier", + []core.Finding{{ + Type: "refinement", + Title: "Goal Clarification", + Description: parsed.EnrichedGoal, + Metadata: map[string]any{ + "questions": parsed.Questions, + "is_ready_to_plan": parsed.IsReadyToPlan, + "goal_summary": parsed.GoalSummary, + "enriched_goal": parsed.EnrichedGoal, + }, + }}, + "runner-backed", + 0, + ), nil +} + +// AutoAnswer uses the runner to autonomously answer clarification questions. +func (rc *RunnerClarifier) AutoAnswer(ctx context.Context, currentSpec string, questions []string, kgContext string) (string, error) { + var prompt string + + if currentSpec == "" && len(questions) == 1 { + prompt = fmt.Sprintf(`You are a Senior Architect answering a clarification question. + +**Project Context:** +%s + +**Question:** +%s + +**Instructions:** +- FIRST: Check Project Context above for the answer - extract and use it if found +- Answer in 1-3 sentences maximum +- Be direct and specific - no hedging +- Do not ask follow-up questions +- If context doesn't have the answer, infer from the project's patterns + +Answer:`, kgContext, questions[0]) + } else { + qs := strings.Join(questions, "\n- ") + prompt = fmt.Sprintf(`You are the Senior Architect of this project. +Your goal is to refine a technical specification by addressing remaining ambiguities using your architectural knowledge. + +**Context (Source of Truth):** +%s + +**Remaining Questions/Ambiguities:** +- %s + +**Current Specification Draft:** +%s + +**Your Mission:** +Incorporate the most suitable, professional, and minimal architectural decisions into the specification to address the questions. +Act as if the user said "Yes, proceed with the best practice for these questions". +Respond ONLY with the FULL, UPDATED technical specification. Use professional language.`, kgContext, qs, currentSpec) + } + + result, err := RetryableInvoke(ctx, rc.runner, InvokeRequest{ + Prompt: prompt, + WorkDir: workDir(), + Timeout: 3 * time.Minute, + }, defaultRetries) + if err != nil { + return "", fmt.Errorf("runner auto-answer: %w", err) + } + + // AutoAnswer returns raw text, not JSON. + // For Claude, unwrap the JSON envelope to get the text content. + raw := result.RawOutput + if result.CLIType == CLIClaude { + raw = unwrapClaudeEnvelope(raw) + } + return strings.TrimSpace(raw), nil +} + +// RunnerPlanner implements app.TaskPlanner using an AI CLI runner. +type RunnerPlanner struct { + runner Runner +} + +// NewRunnerPlanner creates a planner backed by an AI CLI runner. +func NewRunnerPlanner(r Runner) *RunnerPlanner { + return &RunnerPlanner{runner: r} +} + +// Close is a no-op for runner-backed agents. +func (rp *RunnerPlanner) Close() error { return nil } + +// Run executes planning via the AI CLI runner. +func (rp *RunnerPlanner) Run(ctx context.Context, input core.Input) (core.Output, error) { + goal, _ := input.ExistingContext["enriched_goal"].(string) + if goal == "" { + goal, _ = input.ExistingContext["goal"].(string) + } + if goal == "" { + return core.Output{}, fmt.Errorf("missing 'enriched_goal' or 'goal' in input context") + } + + kgContext, _ := input.ExistingContext["context"].(string) + if kgContext == "" { + kgContext = "No specific knowledge graph context provided." + } + + prompt, err := RenderTemplate(config.SystemPromptPlanningAgent, map[string]any{ + "Goal": goal, + "Context": kgContext, + }) + if err != nil { + return core.Output{}, fmt.Errorf("render planning template: %w", err) + } + + prompt += "\n\nRespond with ONLY a JSON object matching the Output Format above." + + result, err := RetryableInvoke(ctx, rp.runner, InvokeRequest{ + Prompt: prompt, + WorkDir: workDir(), + Timeout: 5 * time.Minute, + }, defaultRetries) + if err != nil { + return core.Output{Error: fmt.Errorf("runner planning: %w", err)}, nil + } + + var parsed impl.PlanningOutput + if err := result.Decode(&parsed); err != nil { + return core.Output{Error: fmt.Errorf("decode planning output: %w", err)}, nil + } + + return core.BuildOutput( + "runner-planner", + []core.Finding{{ + Type: "plan", + Title: "Implementation Plan", + Description: parsed.Rationale, + Metadata: map[string]any{"tasks": parsed.Tasks}, + }}, + "runner-backed", + 0, + ), nil +} + +// RunnerDecomposer implements app.PhaseGoalDecomposer using an AI CLI runner. +type RunnerDecomposer struct { + runner Runner +} + +// NewRunnerDecomposer creates a decomposer backed by an AI CLI runner. +func NewRunnerDecomposer(r Runner) *RunnerDecomposer { + return &RunnerDecomposer{runner: r} +} + +// Close is a no-op for runner-backed agents. +func (rd *RunnerDecomposer) Close() error { return nil } + +// Run executes decomposition via the AI CLI runner. +func (rd *RunnerDecomposer) Run(ctx context.Context, input core.Input) (core.Output, error) { + enrichedGoal, _ := input.ExistingContext["enriched_goal"].(string) + if enrichedGoal == "" { + return core.Output{}, fmt.Errorf("missing 'enriched_goal' in input context") + } + + kgContext, _ := input.ExistingContext["context"].(string) + if kgContext == "" { + kgContext = "No specific knowledge graph context provided." + } + + prompt, err := RenderTemplate(config.SystemPromptDecompositionAgent, map[string]any{ + "EnrichedGoal": enrichedGoal, + "Context": kgContext, + }) + if err != nil { + return core.Output{}, fmt.Errorf("render decomposition template: %w", err) + } + + prompt += "\n\nRespond with ONLY a JSON object matching the Output Format above." + + result, err := RetryableInvoke(ctx, rd.runner, InvokeRequest{ + Prompt: prompt, + WorkDir: workDir(), + Timeout: 5 * time.Minute, + }, defaultRetries) + if err != nil { + return core.Output{Error: fmt.Errorf("runner decomposition: %w", err)}, nil + } + + var parsed impl.DecompositionOutput + if err := result.Decode(&parsed); err != nil { + return core.Output{Error: fmt.Errorf("decode decomposition output: %w", err)}, nil + } + + return core.BuildOutput( + "runner-decomposer", + []core.Finding{{ + Type: "decomposition", + Title: "Goal Decomposition", + Description: parsed.Rationale, + Metadata: map[string]any{ + "phases": parsed.Phases, + "rationale": parsed.Rationale, + }, + }}, + "runner-backed", + 0, + ), nil +} + +// RunnerExpander implements app.PhaseExpander using an AI CLI runner. +type RunnerExpander struct { + runner Runner +} + +// NewRunnerExpander creates an expander backed by an AI CLI runner. +func NewRunnerExpander(r Runner) *RunnerExpander { + return &RunnerExpander{runner: r} +} + +// Close is a no-op for runner-backed agents. +func (re *RunnerExpander) Close() error { return nil } + +// Run executes phase expansion via the AI CLI runner. +func (re *RunnerExpander) Run(ctx context.Context, input core.Input) (core.Output, error) { + phaseTitle, _ := input.ExistingContext["phase_title"].(string) + if phaseTitle == "" { + return core.Output{}, fmt.Errorf("missing 'phase_title' in input context") + } + + phaseDescription, _ := input.ExistingContext["phase_description"].(string) + enrichedGoal, _ := input.ExistingContext["enriched_goal"].(string) + kgContext, _ := input.ExistingContext["context"].(string) + if kgContext == "" { + kgContext = "No specific knowledge graph context provided." + } + + prompt, err := RenderTemplate(config.SystemPromptExpandAgent, map[string]any{ + "PhaseTitle": phaseTitle, + "PhaseDescription": phaseDescription, + "EnrichedGoal": enrichedGoal, + "Context": kgContext, + }) + if err != nil { + return core.Output{}, fmt.Errorf("render expand template: %w", err) + } + + prompt += "\n\nRespond with ONLY a JSON object matching the Output Format above." + + result, err := RetryableInvoke(ctx, re.runner, InvokeRequest{ + Prompt: prompt, + WorkDir: workDir(), + Timeout: 5 * time.Minute, + }, defaultRetries) + if err != nil { + return core.Output{Error: fmt.Errorf("runner expand: %w", err)}, nil + } + + var parsed impl.ExpandOutput + if err := result.Decode(&parsed); err != nil { + return core.Output{Error: fmt.Errorf("decode expand output: %w", err)}, nil + } + + return core.BuildOutput( + "runner-expander", + []core.Finding{{ + Type: "expansion", + Title: "Phase Expansion: " + phaseTitle, + Description: parsed.Rationale, + Metadata: map[string]any{ + "tasks": parsed.Tasks, + "rationale": parsed.Rationale, + }, + }}, + "runner-backed", + 0, + ), nil +} + +// workDir returns the current working directory, falling back to ".". +func workDir() string { + wd, err := os.Getwd() + if err != nil { + return "." + } + return wd +} diff --git a/internal/runner/planning_prompts.go b/internal/runner/planning_prompts.go new file mode 100644 index 0000000..c622c44 --- /dev/null +++ b/internal/runner/planning_prompts.go @@ -0,0 +1,24 @@ +package runner + +import ( + "bytes" + "fmt" + "text/template" +) + +// RenderTemplate renders a Go text/template string with the given variables. +// This is used to render system prompt templates (e.g., SystemPromptClarifyingAgent) +// with input variables before sending them as prompts to AI CLI runners. +func RenderTemplate(templateStr string, vars map[string]any) (string, error) { + tmpl, err := template.New("prompt").Option("missingkey=zero").Parse(templateStr) + if err != nil { + return "", fmt.Errorf("parse template: %w", err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, vars); err != nil { + return "", fmt.Errorf("execute template: %w", err) + } + + return buf.String(), nil +} diff --git a/internal/runner/pool.go b/internal/runner/pool.go new file mode 100644 index 0000000..3847db1 --- /dev/null +++ b/internal/runner/pool.go @@ -0,0 +1,116 @@ +package runner + +import ( + "context" + "fmt" + "sync" +) + +// Job represents a single unit of work to be executed by a Runner. +type Job struct { + ID string // Identifier for tracking (e.g., "decisions", "patterns") + Request InvokeRequest // The invocation request +} + +// JobResult holds the outcome of a single job execution. +type JobResult struct { + JobID string // Matches Job.ID + Result *InvokeResult // The invocation result (nil on error) + Error error // Non-nil if the job failed + RunnerType CLIType // Which CLI handled the job +} + +// Pool manages parallel and sequential execution of jobs across multiple runners. +type Pool struct { + runners []Runner + concurrency int +} + +// NewPool creates a pool from detected runners. +// Concurrency defaults to len(runners) if not specified. +func NewPool(runners []Runner, concurrency int) *Pool { + if concurrency <= 0 { + concurrency = len(runners) + } + if concurrency == 0 { + concurrency = 1 + } + return &Pool{ + runners: runners, + concurrency: concurrency, + } +} + +// Execute runs jobs in parallel, distributing them round-robin across available runners. +// Use this for read-only operations (analysis, planning) where jobs are independent. +func (p *Pool) Execute(ctx context.Context, jobs []Job) []JobResult { + if len(p.runners) == 0 { + results := make([]JobResult, len(jobs)) + for i, j := range jobs { + results[i] = JobResult{JobID: j.ID, Error: fmt.Errorf("no runners available")} + } + return results + } + + results := make([]JobResult, len(jobs)) + var wg sync.WaitGroup + sem := make(chan struct{}, p.concurrency) + + for i, job := range jobs { + runner := p.runners[i%len(p.runners)] + + wg.Add(1) + go func(idx int, j Job, r Runner) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + result, err := r.Invoke(ctx, j.Request) + results[idx] = JobResult{ + JobID: j.ID, + Result: result, + Error: err, + RunnerType: r.Type(), + } + }(i, job, runner) + } + + wg.Wait() + return results +} + +// ExecuteSequential runs jobs one at a time using the first runner. +// Use this for file-modifying operations (task execution) where order matters. +func (p *Pool) ExecuteSequential(ctx context.Context, jobs []Job) []JobResult { + if len(p.runners) == 0 { + return []JobResult{{Error: fmt.Errorf("no runners available")}} + } + + runner := p.runners[0] + results := make([]JobResult, len(jobs)) + + for i, job := range jobs { + if ctx.Err() != nil { + results[i] = JobResult{ + JobID: job.ID, + Error: ctx.Err(), + } + break + } + + result, err := runner.InvokeWithFiles(ctx, job.Request) + results[i] = JobResult{ + JobID: job.ID, + Result: result, + Error: err, + RunnerType: runner.Type(), + } + + // Stop on failure + if err != nil { + break + } + } + + return results +} diff --git a/internal/runner/prompts.go b/internal/runner/prompts.go new file mode 100644 index 0000000..fac8bcb --- /dev/null +++ b/internal/runner/prompts.go @@ -0,0 +1,270 @@ +package runner + +import ( + "fmt" + "strings" +) + +// BootstrapAnalysisPrompt builds a prompt for AI CLI to analyze a codebase +// and extract architectural findings with full fidelity. +func BootstrapAnalysisPrompt(projectPath string, existingContext string, focusArea string) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "Analyze the codebase at %s", projectPath) + + if focusArea != "" { + fmt.Fprintf(&sb, " with a focus on %s", focusArea) + } + sb.WriteString(".\n\n") + + sb.WriteString(`IMPORTANT: You MUST use your tools to read actual source files before making findings. +- Use Read/Glob to explore the directory structure and key files +- Use Bash to run ` + "`git log --oneline -100`" + ` for commit history +- Read go.mod/package.json/Cargo.toml for dependency analysis +- Read CI/CD configs (.github/workflows/, Makefile, Dockerfile, etc.) +Do NOT guess from the path alone — verify every finding with evidence from actual files. + +ANALYSIS CATEGORIES (be thorough across ALL of these): + +1. Architecture & Design Patterns + - Code organization (MVC, Clean, Hexagonal, etc.) + - Repository/Factory/DI patterns + - Key abstractions, interfaces, core types + - Integration patterns (events, queues, APIs) + +2. Error Handling & Logging + - Error creation, wrapping, propagation patterns + - Logging library and conventions + - HTTP/API error response formats + - Custom error types or codes + +3. Security & Middleware + - CORS settings, allowed origins/methods + - Rate limiting configuration + - Authentication patterns (JWT, sessions, cookies) + - Request validation and sanitization + +4. Performance & Resilience + - Connection pool sizes, cache TTLs + - Timeout values (request, DB, external calls) + - Circuit breakers, retry policies, backoff + +5. Data Models & Configuration + - Database table/collection structures + - API request/response types + - Configuration structs with default values + - Key interfaces and implementations + +6. CI/CD & Build System + - Build commands and toolchain + - CI pipeline steps and permissions + - Deployment targets and constraints + - Release process + +7. Dependencies & Technology Choices + - Framework selections and rationale + - Database drivers and ORMs + - Testing frameworks + - Notable library choices (observability, etc.) + +8. Git History & Project Evolution + - Major milestones and feature additions + - Architecture changes over time + - Active development areas + +Extract architectural findings from the codebase. For each finding, identify: + +FINDING TYPES (use exactly one): +- "decision" — Architectural choice (framework, library, design pattern selection) +- "pattern" — Recurring code pattern, convention, or workflow +- "constraint" — Limitation, requirement, build rule, or security policy +- "feature" — Product capability or user-facing functionality +- "metadata" — Project metadata (git stats, build config, CI/CD setup) +- "documentation" — Documentation conventions (README structure, doc standards) + +REQUIRED FIELDS: +- type: One of the types above +- title: Concise finding title +- description: Detailed explanation of what this finding represents +- confidence_score: 0.0 to 1.0 indicating how confident you are + +RECOMMENDED FIELDS: +- why: Rationale for why this approach was chosen (especially for decisions/patterns) +- tradeoffs: Benefits and drawbacks of the approach +- evidence: Array of file references proving this finding exists +- metadata: Key-value pairs with additional context + +EVIDENCE FIELDS: +- file_path: Relative path to the source file +- start_line / end_line: Line range where evidence exists +- snippet: Short relevant code excerpt +- grep_pattern: A regex pattern to find this evidence (for re-verification) +- evidence_type: "file" (default, source code) or "git" (git history/logs) + +METADATA KEYS (include when relevant): +- "component": Category/scope (e.g., "auth", "api", "build", "testing") +- "severity": For constraints — "critical", "high", or "medium" +- "type": Sub-classification (e.g., "workflow" for pattern findings) +- "trigger": What triggers a workflow pattern +- "steps": Steps in a workflow pattern + +DEBT CLASSIFICATION (assess for EVERY finding): +- debt_score: 0.0 (clean/intentional) to 1.0 (pure technical debt to be eliminated) +- debt_reason: Why this is considered technical debt +- refactor_hint: How to eliminate or improve this debt +Indicators of debt: TODO/FIXME/HACK comments, compatibility shims, workarounds, missing error handling + +RELATIONSHIPS between findings (at the top level): +- from: Title of the source finding +- to: Title of the target finding +- relation: "depends_on", "affects", "extends", or "relates_to" +- reason: Why they are related +`) + + if existingContext != "" { + sb.WriteString("\nExisting project context (avoid duplicating these findings):\n") + sb.WriteString(existingContext) + sb.WriteString("\n\n") + } + + sb.WriteString(`Respond with ONLY a JSON object in this exact format: +{ + "findings": [ + { + "type": "decision", + "title": "Concise finding title", + "description": "What this finding represents", + "why": "Why this approach was chosen", + "tradeoffs": "Benefits and drawbacks", + "confidence_score": 0.85, + "evidence": [ + { + "file_path": "relative/path/to/file.go", + "start_line": 10, + "end_line": 25, + "snippet": "relevant code snippet", + "grep_pattern": "func NewService", + "evidence_type": "file" + } + ], + "metadata": { + "component": "api" + }, + "debt_score": 0.0, + "debt_reason": "", + "refactor_hint": "" + } + ], + "relationships": [ + { + "from": "Finding title A", + "to": "Finding title B", + "relation": "depends_on", + "reason": "A requires B to function" + } + ] +}`) + + return sb.String() +} + +// FocusAreas defines the parallel bootstrap analysis focus areas. +// Covers all FindingTypes: decisions, patterns, constraints, features, metadata, documentation. +var FocusAreas = []string{ + "architectural decisions, features, and technology choices - framework selections, design patterns, product capabilities, dependency choices (read go.mod/package.json), and rationale behind each decision", + "code patterns, conventions, and implementation details - recurring patterns, error handling, logging, security middleware, performance configuration (timeouts, pools, caches), data model schemas, and development conventions", + "constraints, CI/CD, and documentation - build requirements, deployment constraints, security policies, system limitations, CI/CD pipeline configuration (.github/workflows/), release process, and documentation standards", + "git history and project evolution - run 'git log --oneline -150' to analyze major milestones, architecture changes, feature additions, active development areas, and foundational decisions from commit history", +} + +// PlanGenerationPrompt builds a prompt for AI CLI to generate a task plan from a goal. +func PlanGenerationPrompt(goal string, projectContext string) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "Generate a development plan to achieve this goal: %q\n\n", goal) + + if projectContext != "" { + sb.WriteString("Project context (architectural decisions, patterns, constraints):\n") + sb.WriteString(projectContext) + sb.WriteString("\n\n") + } + + sb.WriteString(`Create a plan with concrete, actionable tasks. Each task should be: +- Small enough for a single work session +- Have clear acceptance criteria +- Include validation steps (CLI commands to verify completion) +- Specify which type of agent should handle it (coder, qa, architect, researcher) +- Include scope and keywords for context retrieval + +Order tasks by dependency - tasks that depend on others should come later. +Priority is 0-100 where lower numbers = higher priority. + +Respond with ONLY a JSON object in this exact format: +{ + "goal_summary": "Concise summary (max 100 chars)", + "rationale": "Why this plan is structured this way (min 20 chars)", + "tasks": [ + { + "title": "Action-oriented task title (max 200 chars)", + "description": "Detailed task description (min 10 chars)", + "priority": 10, + "complexity": "low|medium|high", + "assigned_agent": "coder|qa|architect|researcher", + "acceptance_criteria": ["Criterion 1", "Criterion 2"], + "validation_steps": ["go test ./...", "go build ./..."], + "depends_on": [], + "expected_files": ["path/to/file.go"], + "scope": "api", + "keywords": ["auth", "middleware", "jwt"] + } + ], + "estimated_complexity": "low|medium|high", + "prerequisites": ["Optional prerequisite 1"], + "risk_factors": ["Optional risk 1"] +}`) + + return sb.String() +} + +// TaskExecutionPrompt builds a prompt for AI CLI to implement a specific task. +func TaskExecutionPrompt(taskTitle, taskDescription string, acceptanceCriteria []string, contextSummary string, validationSteps []string) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "Implement this task: %s\n\n", taskTitle) + fmt.Fprintf(&sb, "Description: %s\n\n", taskDescription) + + if len(acceptanceCriteria) > 0 { + sb.WriteString("Acceptance Criteria:\n") + for _, ac := range acceptanceCriteria { + fmt.Fprintf(&sb, "- %s\n", ac) + } + sb.WriteString("\n") + } + + if contextSummary != "" { + sb.WriteString("Project Context:\n") + sb.WriteString(contextSummary) + sb.WriteString("\n\n") + } + + if len(validationSteps) > 0 { + sb.WriteString("Validation Steps (run these to verify your work):\n") + for _, vs := range validationSteps { + fmt.Fprintf(&sb, " %s\n", vs) + } + sb.WriteString("\n") + } + + sb.WriteString(`Implement the changes needed to satisfy the acceptance criteria. +After making changes, run the validation steps to verify your work. + +When you are done, respond with ONLY a JSON object summarizing what you did: +{ + "status": "completed|failed|partial", + "summary": "Brief description of what was implemented", + "files_modified": ["list", "of", "modified", "files"], + "error": "Error message if status is failed" +}`) + + return sb.String() +} diff --git a/internal/runner/result.go b/internal/runner/result.go new file mode 100644 index 0000000..84a8d77 --- /dev/null +++ b/internal/runner/result.go @@ -0,0 +1,239 @@ +package runner + +import ( + "encoding/json" + "fmt" + "strings" +) + +// InvokeResult holds the output from an AI CLI invocation. +type InvokeResult struct { + // RawOutput is the full stdout from the CLI subprocess. + RawOutput string + + // ExitCode from the subprocess. + ExitCode int + + // Stderr output (for debugging). + Stderr string + + // Runner that produced this result. + CLIType CLIType +} + +// Decode unmarshals the JSON content from the result into the target struct. +// It uses ExtractJSON to handle various output formats. +// For Claude Code, it first unwraps the JSON envelope {"type":"result","result":"..."}. +func (r *InvokeResult) Decode(target any) error { + raw := r.RawOutput + + // Claude Code wraps output in a JSON envelope: {"type":"result","result":""} + // Unwrap before extracting the inner JSON. + if r.CLIType == CLIClaude { + raw = unwrapClaudeEnvelope(raw) + } + + jsonStr, err := ExtractJSON(raw) + if err != nil { + return fmt.Errorf("extract JSON from %s output: %w", r.CLIType, err) + } + if err := json.Unmarshal([]byte(jsonStr), target); err != nil { + return fmt.Errorf("unmarshal %s output: %w", r.CLIType, err) + } + return nil +} + +// unwrapClaudeEnvelope extracts the inner content from Claude Code's JSON envelope. +// Claude Code outputs: {"type":"result","subtype":"success","cost_usd":...,"result":""} +// Returns the inner result string if envelope is detected, otherwise returns raw unchanged. +func unwrapClaudeEnvelope(raw string) string { + trimmed := strings.TrimSpace(raw) + var envelope struct { + Type string `json:"type"` + Result string `json:"result"` + } + if err := json.Unmarshal([]byte(trimmed), &envelope); err != nil { + return raw + } + if envelope.Type == "result" && envelope.Result != "" { + return envelope.Result + } + return raw +} + +// ExtractJSON attempts to extract a JSON object or array from mixed text output. +// It tries three strategies in order: +// 1. Full parse — entire string is valid JSON +// 2. Markdown code block — extract from ```json ... ``` fences +// 3. Brace matching — find the outermost { } or [ ] pair +func ExtractJSON(raw string) (string, error) { + trimmed := strings.TrimSpace(raw) + + // Strategy 1: Full parse + if json.Valid([]byte(trimmed)) { + return trimmed, nil + } + + // Strategy 2: Markdown code block + if idx := strings.Index(trimmed, "```json"); idx >= 0 { + start := idx + len("```json") + end := strings.Index(trimmed[start:], "```") + if end > 0 { + candidate := strings.TrimSpace(trimmed[start : start+end]) + if json.Valid([]byte(candidate)) { + return candidate, nil + } + } + } + + // Also try generic code blocks + if idx := strings.Index(trimmed, "```\n"); idx >= 0 { + start := idx + len("```\n") + end := strings.Index(trimmed[start:], "```") + if end > 0 { + candidate := strings.TrimSpace(trimmed[start : start+end]) + if json.Valid([]byte(candidate)) { + return candidate, nil + } + } + } + + // Strategy 3: Brace matching — find outermost { } or [ ] + for _, pair := range [][2]byte{{'{', '}'}, {'[', ']'}} { + if result, ok := extractBraceMatched(trimmed, pair[0], pair[1]); ok { + return result, nil + } + } + + return "", fmt.Errorf("no valid JSON found in output (%d bytes)", len(raw)) +} + +// extractBraceMatched finds the outermost matched braces in a string. +func extractBraceMatched(s string, open, close byte) (string, bool) { + start := strings.IndexByte(s, open) + if start < 0 { + return "", false + } + + depth := 0 + inString := false + escaped := false + + for i := start; i < len(s); i++ { + ch := s[i] + + if escaped { + escaped = false + continue + } + + if ch == '\\' && inString { + escaped = true + continue + } + + if ch == '"' { + inString = !inString + continue + } + + if inString { + continue + } + + if ch == open { + depth++ + } else if ch == close { + depth-- + if depth == 0 { + candidate := s[start : i+1] + if json.Valid([]byte(candidate)) { + return candidate, true + } + return "", false + } + } + } + return "", false +} + +// BootstrapAnalysis is the expected JSON output from a bootstrap analysis invocation. +type BootstrapAnalysis struct { + Findings []BootstrapFinding `json:"findings"` + Relationships []BootstrapRelationship `json:"relationships,omitempty"` +} + +// BootstrapFinding represents a single architectural finding from AI CLI analysis. +// Maps 1:1 to core.Finding fields to preserve full fidelity through the pipeline. +type BootstrapFinding struct { + // Core fields + Type string `json:"type"` // "decision", "pattern", "constraint", "feature", "metadata", "documentation" + Title string `json:"title"` + Description string `json:"description"` + Why string `json:"why,omitempty"` + Tradeoffs string `json:"tradeoffs,omitempty"` + ConfidenceScore float64 `json:"confidence_score"` // 0.0-1.0 + + // Evidence with full fields + Evidence []BootstrapEvidence `json:"evidence,omitempty"` + + // Metadata — agent-specific key-value data + // Common keys: "component", "severity", "type", "trigger", "steps", "service" + Metadata map[string]any `json:"metadata,omitempty"` + + // Debt classification — distinguishes essential from accidental complexity + DebtScore float64 `json:"debt_score,omitempty"` // 0.0=clean, 1.0=pure technical debt + DebtReason string `json:"debt_reason,omitempty"` // Why this is considered debt + RefactorHint string `json:"refactor_hint,omitempty"` // How to eliminate the debt +} + +// BootstrapEvidence represents verifiable proof for a finding. +type BootstrapEvidence struct { + FilePath string `json:"file_path"` + StartLine int `json:"start_line,omitempty"` + EndLine int `json:"end_line,omitempty"` + Snippet string `json:"snippet,omitempty"` + GrepPattern string `json:"grep_pattern,omitempty"` // Pattern to find this evidence + EvidenceType string `json:"evidence_type,omitempty"` // "file" (default) or "git" +} + +// BootstrapRelationship represents a link between two findings. +type BootstrapRelationship struct { + From string `json:"from"` // Title of source finding + To string `json:"to"` // Title of target finding + Relation string `json:"relation"` // depends_on, affects, extends, relates_to + Reason string `json:"reason"` // Why they are related +} + +// PlanOutput is the expected JSON output from a plan generation invocation. +type PlanOutput struct { + GoalSummary string `json:"goal_summary"` + Rationale string `json:"rationale"` + Tasks []PlanTaskOutput `json:"tasks"` + EstimatedComplexity string `json:"estimated_complexity"` + Prerequisites []string `json:"prerequisites,omitempty"` + RiskFactors []string `json:"risk_factors,omitempty"` +} + +// PlanTaskOutput represents a single task in the plan output. +type PlanTaskOutput struct { + Title string `json:"title"` + Description string `json:"description"` + Priority int `json:"priority"` + Complexity string `json:"complexity"` + AssignedAgent string `json:"assigned_agent"` + AcceptanceCriteria []string `json:"acceptance_criteria"` + ValidationSteps []string `json:"validation_steps,omitempty"` + DependsOn []int `json:"depends_on,omitempty"` + ExpectedFiles []string `json:"expected_files,omitempty"` + Scope string `json:"scope,omitempty"` // e.g., "auth", "api", "vectorsearch" + Keywords []string `json:"keywords,omitempty"` // Extracted from title/description +} + +// ExecuteOutput is the expected JSON output from a task execution invocation. +type ExecuteOutput struct { + Status string `json:"status"` // "completed", "failed", "partial" + Summary string `json:"summary"` // What was done + FilesModified []string `json:"files_modified"` // Files that were changed + Error string `json:"error,omitempty"` // Error message if failed +} diff --git a/internal/runner/retry.go b/internal/runner/retry.go new file mode 100644 index 0000000..79a2907 --- /dev/null +++ b/internal/runner/retry.go @@ -0,0 +1,80 @@ +package runner + +import ( + "context" + "fmt" + "math" + "math/rand" + "strings" + "time" +) + +const ( + retryBaseDelay = 500 * time.Millisecond + retryMaxDelay = 30 * time.Second + defaultRetries = 4 +) + +// RetryableInvoke calls runner.Invoke with exponential backoff and jitter. +// It retries on timeout, CLI failure, and JSON parse errors. +// It does not retry on context cancellation. +func RetryableInvoke(ctx context.Context, r Runner, req InvokeRequest, maxRetries int) (*InvokeResult, error) { + if maxRetries <= 0 { + maxRetries = defaultRetries + } + + var lastErr error + for attempt := 0; attempt <= maxRetries; attempt++ { + if ctx.Err() != nil { + return nil, ctx.Err() + } + + result, err := r.Invoke(ctx, req) + if err == nil { + return result, nil + } + + if !isRetryable(err) { + return nil, err + } + + lastErr = err + + if attempt < maxRetries { + delay := backoffDelay(attempt) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + } + } + } + + return nil, fmt.Errorf("failed after %d retries: %w", maxRetries, lastErr) +} + +// isRetryable returns true for errors that warrant a retry. +func isRetryable(err error) bool { + if err == nil { + return false + } + msg := err.Error() + // Retry on timeouts, CLI failures, and JSON parse errors + for _, keyword := range []string{"timeout", "timed out", "JSON", "json", "exit status", "signal"} { + if strings.Contains(msg, keyword) { + return true + } + } + return false +} + +// backoffDelay returns exponential backoff with jitter. +func backoffDelay(attempt int) time.Duration { + delay := time.Duration(float64(retryBaseDelay) * math.Pow(2, float64(attempt))) + if delay > retryMaxDelay { + delay = retryMaxDelay + } + // Add jitter: ±25% + jitter := time.Duration(float64(delay) * (0.5*rand.Float64() - 0.25)) + return delay + jitter +} diff --git a/internal/runner/runner.go b/internal/runner/runner.go new file mode 100644 index 0000000..23978c2 --- /dev/null +++ b/internal/runner/runner.go @@ -0,0 +1,155 @@ +// Package runner provides an abstraction for spawning AI CLI tools (Claude Code, +// Gemini CLI, Codex CLI) as headless worker subprocesses. This inverts the control +// flow: instead of requiring users to configure API keys for direct LLM calls, +// TaskWing orchestrates the user's already-installed AI CLI. +package runner + +import ( + "context" + "fmt" + "time" +) + +// CLIType identifies a supported AI CLI tool. +type CLIType string + +const ( + CLIClaude CLIType = "claude" + CLIGemini CLIType = "gemini" + CLICodex CLIType = "codex" +) + +// String returns the human-readable name for the CLI type. +func (c CLIType) String() string { + switch c { + case CLIClaude: + return "Claude Code" + case CLIGemini: + return "Gemini CLI" + case CLICodex: + return "Codex CLI" + default: + return string(c) + } +} + +// DefaultTimeout is the default timeout for AI CLI invocations. +const DefaultTimeout = 10 * time.Minute + +// ProgressEventType classifies streaming progress events from AI CLIs. +type ProgressEventType int + +const ( + ProgressThinking ProgressEventType = iota // AI is reasoning + ProgressText // AI produced output text + ProgressToolUse // AI is using a tool (Read, Grep, etc.) + ProgressToolResult // Tool returned a result + ProgressHeartbeat // Periodic "still alive" signal +) + +// ProgressEvent is a single progress update from a streaming AI CLI invocation. +type ProgressEvent struct { + Type ProgressEventType + Summary string // Human-readable short summary (e.g., "Reading src/main.go", "Thinking...") +} + +// ProgressCallback receives streaming progress events during invocation. +type ProgressCallback func(ProgressEvent) + +// InvokeRequest configures a single AI CLI invocation. +type InvokeRequest struct { + Prompt string // The prompt to send to the AI CLI + SystemPrompt string // Optional system prompt (not all CLIs support this) + WorkDir string // Working directory for the subprocess + Timeout time.Duration // Max time for the invocation (0 = DefaultTimeout) + OnProgress ProgressCallback // nil = buffered mode (no streaming) + Model string // Model override (e.g., "sonnet", "opus"). Empty = CLI default. +} + +// effectiveTimeout returns the timeout to use, falling back to DefaultTimeout. +func (r *InvokeRequest) effectiveTimeout() time.Duration { + if r.Timeout > 0 { + return r.Timeout + } + return DefaultTimeout +} + +// Runner is the interface for spawning an AI CLI as a headless subprocess. +type Runner interface { + // Type returns the CLI type this runner wraps. + Type() CLIType + + // Available returns true if the CLI binary is found on PATH. + Available() bool + + // BinaryPath returns the resolved path to the CLI binary. + BinaryPath() string + + // Invoke runs the AI CLI in read-only mode (structured JSON output, no file changes). + // Use this for analysis and planning operations. + Invoke(ctx context.Context, req InvokeRequest) (*InvokeResult, error) + + // InvokeWithFiles runs the AI CLI with full tool access (can modify files). + // Use this for task execution where the AI CLI implements changes directly. + InvokeWithFiles(ctx context.Context, req InvokeRequest) (*InvokeResult, error) +} + +// DetectedCLI represents a CLI tool found on the system. +type DetectedCLI struct { + Type CLIType + BinaryPath string +} + +// NewRunner creates a Runner for the given CLI type and binary path. +func NewRunner(cli DetectedCLI) Runner { + switch cli.Type { + case CLIClaude: + return &claudeRunner{binaryPath: cli.BinaryPath} + case CLIGemini: + return &geminiRunner{binaryPath: cli.BinaryPath} + case CLICodex: + return &codexRunner{binaryPath: cli.BinaryPath} + default: + return nil + } +} + +// DetectAndCreateRunners finds all installed AI CLIs and creates runners for them. +func DetectAndCreateRunners() ([]Runner, error) { + detected := DetectCLIs() + if len(detected) == 0 { + return nil, fmt.Errorf("no AI CLI found. Install Claude Code, Gemini CLI, or Codex CLI") + } + + runners := make([]Runner, 0, len(detected)) + for _, cli := range detected { + if r := NewRunner(cli); r != nil { + runners = append(runners, r) + } + } + if len(runners) == 0 { + return nil, fmt.Errorf("no AI CLI found. Install Claude Code, Gemini CLI, or Codex CLI") + } + return runners, nil +} + +// PreferredRunner returns the best available runner, optionally preferring a specific CLI type. +// Priority: preferred (if set and available) > Claude > Gemini > Codex. +func PreferredRunner(preferred CLIType) (Runner, error) { + runners, err := DetectAndCreateRunners() + if err != nil { + return nil, err + } + + // If a preference is set, look for it first + if preferred != "" { + for _, r := range runners { + if r.Type() == preferred { + return r, nil + } + } + } + + // Return the first available (detection order is Claude > Gemini > Codex) + return runners[0], nil +} diff --git a/internal/server/handlers.go b/internal/server/handlers.go index 1d89436..5196dce 100644 --- a/internal/server/handlers.go +++ b/internal/server/handlers.go @@ -6,10 +6,8 @@ import ( "fmt" "net/http" "os" - "strconv" "github.com/josephgoksu/TaskWing/internal/agents/core" - "github.com/josephgoksu/TaskWing/internal/agents/impl" "github.com/josephgoksu/TaskWing/internal/bootstrap" "github.com/josephgoksu/TaskWing/internal/config" "github.com/josephgoksu/TaskWing/internal/knowledge" @@ -271,32 +269,16 @@ func (s *Server) handleBootstrap(w http.ResponseWriter, r *http.Request) { writeAPIJSON(w, stats) } -// handleActivity -func (s *Server) handleActivity(w http.ResponseWriter, r *http.Request) { - activityLog := impl.NewActivityLog(s.cwd) - - limitStr := r.URL.Query().Get("limit") - limit := 50 - if limitStr != "" { - if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 200 { - limit = l - } - } - - entries := activityLog.GetRecent(limit) - summary := activityLog.Summary() - +// handleActivity returns empty activity (WatchAgent removed). +func (s *Server) handleActivity(w http.ResponseWriter, _ *http.Request) { writeAPIJSON(w, map[string]any{ - "entries": entries, - "summary": summary, + "entries": []any{}, + "summary": map[string]any{}, }) } -// handleClearActivity -func (s *Server) handleClearActivity(w http.ResponseWriter, r *http.Request) { - activityLog := impl.NewActivityLog(s.cwd) - activityLog.Clear() - +// handleClearActivity is a no-op (WatchAgent removed). +func (s *Server) handleClearActivity(w http.ResponseWriter, _ *http.Request) { writeAPIJSON(w, map[string]any{ "success": true, }) @@ -353,51 +335,8 @@ func (s *Server) handlePromoteToTask(w http.ResponseWriter, r *http.Request) { return } - activityLog := impl.NewActivityLog(s.cwd) - entries := activityLog.GetRecent(500) // Large enough to find the id - - var finding *impl.ActivityEntry - for i := range entries { - if entries[i].ID == req.FindingID { - finding = &entries[i] - break - } - } - - if finding == nil { - http.Error(w, "finding not found in activity log", http.StatusNotFound) - return - } - - // Create a task from the finding - planID := req.PlanID - if planID == "" { - newPlan := &task.Plan{ - Goal: fmt.Sprintf("Address finding: %s", finding.Message), - } - if err := s.repo.CreatePlan(newPlan); err != nil { - http.Error(w, fmt.Sprintf("create plan failed: %v", err), http.StatusInternalServerError) - return - } - planID = newPlan.ID - } - - newTask := &task.Task{ - PlanID: planID, - Title: finding.Message, - Description: fmt.Sprintf("Automatically promoted from activity finding. Original agent: %s", finding.Agent), - Status: task.StatusPending, - Priority: 50, - } - // Populate AI integration fields (scope, keywords, suggested_ask_queries) - newTask.EnrichAIFields() - - if err := s.repo.CreateTask(newTask); err != nil { - http.Error(w, fmt.Sprintf("create task failed: %v", err), http.StatusInternalServerError) - return - } - - writeAPIJSON(w, newTask) + // Activity log removed (WatchAgent deleted) + http.Error(w, "activity log not available", http.StatusGone) } func writeAPIJSON(w http.ResponseWriter, data interface{}) { diff --git a/internal/ui/config_menu.go b/internal/ui/config_menu.go index 0409310..3006bb4 100644 --- a/internal/ui/config_menu.go +++ b/internal/ui/config_menu.go @@ -110,7 +110,7 @@ func buildConfigItems() []ConfigItem { rerankURL := viper.GetString("retrieval.reranking.base_url") if rerankURL != "" { rerankValue = rerankURL - rerankStatus = "🔌" + rerankStatus = IconPlug.Emoji } } @@ -153,9 +153,9 @@ func getKeyStatus(provider string) string { return "🏠" // local default: if config.ResolveAPIKey(llm.Provider(provider)) != "" { - return "✅" // key set + return IconDone.Emoji // key set } - return "❌" // key missing + return IconStop.Emoji // key missing } } @@ -194,16 +194,16 @@ func (m configMenuModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { var ( configTitleStyle = lipgloss.NewStyle(). Bold(true). - Foreground(lipgloss.Color("39")) + Foreground(ColorCyan) configActiveStyle = lipgloss.NewStyle(). - Foreground(lipgloss.Color("86")) + Foreground(ColorSuccess) configDimStyle = lipgloss.NewStyle(). - Foreground(lipgloss.Color("240")) + Foreground(ColorDim) configValueStyle = lipgloss.NewStyle(). - Foreground(lipgloss.Color("229")) + Foreground(ColorWarning) ) func (m configMenuModel) View() string { diff --git a/internal/ui/context_view.go b/internal/ui/context_view.go index 81a1559..60fb494 100644 --- a/internal/ui/context_view.go +++ b/internal/ui/context_view.go @@ -40,25 +40,25 @@ func RenderContextResultsWithSymbolsVerbose(query string, scored []knowledge.Sco } func renderContextInternal(query string, scored []knowledge.ScoredNode, answer string, verbose bool) { - // Styles + // Reuse centralized styles var ( - titleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true) - sectionStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("141")).Bold(true) + titleStyle = StyleAskHeader + sectionStyle = lipgloss.NewStyle().Foreground(ColorAccent).Bold(true) ) // Render Answer Panel if answer != "" { fmt.Println() - fmt.Println(titleStyle.Render(fmt.Sprintf("📖 %s", query))) + fmt.Println(titleStyle.Render(fmt.Sprintf("%s %s", IconBook, query))) fmt.Println(RenderInfoPanel("Answer", answer)) } else { - fmt.Println(titleStyle.Render(fmt.Sprintf("🔍 Context for: \"%s\"", query))) + fmt.Println(titleStyle.Render(fmt.Sprintf("%s Context for: \"%s\"", IconSearch, query))) } // Render Sources fmt.Println() if answer != "" { - fmt.Println(sectionStyle.Render("📚 Sources")) + fmt.Println(sectionStyle.Render(fmt.Sprintf("%s Sources", IconBooks))) } // Calculate max score for relative scaling @@ -77,13 +77,13 @@ func renderContextInternal(query string, scored []knowledge.ScoredNode, answer s // renderScoredNodePanel renders a single knowledge result as a styled panel. func renderScoredNodePanel(index int, s knowledge.ScoredNode, maxScore float32, verbose bool) { - // Styles + // Styles — use centralized colors var ( - headerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("14")).Bold(true) // Cyan for headers - metaStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241")) // Dim for metadata - contentStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("252")) // Light for content - barFull = lipgloss.NewStyle().Foreground(lipgloss.Color("42")) // Green - barEmpty = lipgloss.NewStyle().Foreground(lipgloss.Color("237")) // Dark gray + headerStyle = lipgloss.NewStyle().Foreground(ColorCyan).Bold(true) + metaStyle = lipgloss.NewStyle().Foreground(ColorDim) + contentStyle = lipgloss.NewStyle().Foreground(ColorText) + barFull = lipgloss.NewStyle().Foreground(ColorSuccess) + barEmpty = lipgloss.NewStyle().Foreground(ColorBarEmpty) panelBorder = lipgloss.NewStyle().Border(lipgloss.RoundedBorder()).BorderForeground(scoreToColor(s.Score, maxScore)).Padding(0, 1).MarginTop(1) ) @@ -121,7 +121,7 @@ func renderScoredNodePanel(index int, s knowledge.ScoredNode, maxScore float32, // Graph expansion indicator expandedIndicator := "" if s.ExpandedFrom != "" { - expandedIndicator = " 🔗" + expandedIndicator = fmt.Sprintf(" %s", IconLink) } // Build panel content @@ -167,42 +167,42 @@ func renderScoredNodePanel(index int, s knowledge.ScoredNode, maxScore float32, } // scoreToColor returns a border color based on the score (green for high, yellow for medium, gray for low). -func scoreToColor(score, maxScore float32) lipgloss.Color { +func scoreToColor(score, maxScore float32) lipgloss.TerminalColor { relative := score / maxScore switch { case relative >= 0.8: - return lipgloss.Color("42") // Green - high relevance + return ColorSuccess case relative >= 0.5: - return lipgloss.Color("214") // Orange - medium relevance + return ColorWarning default: - return lipgloss.Color("241") // Gray - lower relevance + return ColorSecondary } } // renderContextWithSymbolsInternal displays knowledge results and code symbols. func renderContextWithSymbolsInternal(query string, scored []knowledge.ScoredNode, symbols []app.SymbolResponse, answer string, verbose bool) { - // Styles + // Reuse centralized styles var ( - titleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true) - sectionStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("141")).Bold(true) + titleStyle = StyleAskHeader + sectionStyle = lipgloss.NewStyle().Foreground(ColorAccent).Bold(true) ) // Render Answer Panel if answer != "" { fmt.Println() - fmt.Println(titleStyle.Render(fmt.Sprintf("📖 %s", query))) + fmt.Println(titleStyle.Render(fmt.Sprintf("%s %s", IconBook, query))) fmt.Println(RenderInfoPanel("Answer", answer)) } else { - fmt.Println(titleStyle.Render(fmt.Sprintf("🔍 Context for: \"%s\"", query))) + fmt.Println(titleStyle.Render(fmt.Sprintf("%s Context for: \"%s\"", IconSearch, query))) } // Render Knowledge Results section if len(scored) > 0 { fmt.Println() if answer != "" { - fmt.Println(sectionStyle.Render("📚 Knowledge")) + fmt.Println(sectionStyle.Render(fmt.Sprintf("%s Knowledge", IconBooks))) } else { - fmt.Println(sectionStyle.Render("📚 Architectural Knowledge")) + fmt.Println(sectionStyle.Render(fmt.Sprintf("%s Architectural Knowledge", IconBooks))) } // Calculate max score for relative scaling @@ -221,7 +221,7 @@ func renderContextWithSymbolsInternal(query string, scored []knowledge.ScoredNod // Render Code Symbols section if len(symbols) > 0 { fmt.Println() - fmt.Println(sectionStyle.Render("💻 Code Symbols")) + fmt.Println(sectionStyle.Render(fmt.Sprintf("%s Code Symbols", IconCode))) for i, sym := range symbols { renderSymbolPanel(i+1, sym, verbose) @@ -231,12 +231,12 @@ func renderContextWithSymbolsInternal(query string, scored []knowledge.ScoredNod // renderSymbolPanel renders a code symbol as a styled panel. func renderSymbolPanel(index int, sym app.SymbolResponse, verbose bool) { - // Styles + // Styles — use centralized colors var ( - headerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("14")).Bold(true) - metaStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241")) - locationStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("39")) - panelBorder = lipgloss.NewStyle().Border(lipgloss.RoundedBorder()).BorderForeground(lipgloss.Color("63")).Padding(0, 1).MarginTop(1) + headerStyle = lipgloss.NewStyle().Foreground(ColorCyan).Bold(true) + metaStyle = lipgloss.NewStyle().Foreground(ColorDim) + locationStyle = lipgloss.NewStyle().Foreground(ColorBlue) + panelBorder = lipgloss.NewStyle().Border(lipgloss.RoundedBorder()).BorderForeground(lipgloss.AdaptiveColor{Light: "97", Dark: "63"}).Padding(0, 1).MarginTop(1) ) icon := symbolKindIcon(sym.Kind) @@ -308,7 +308,7 @@ func getContentWithoutSummary(content, summary string) string { // RenderAskResult displays a complete AskResult from the ask pipeline. // This is the primary rendering function for the `taskwing ask` command. func RenderAskResult(result *app.AskResult, verbose bool) { - sectionStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("141")).Bold(true) + sectionStyle := lipgloss.NewStyle().Foreground(ColorAccent).Bold(true) // Header with query in a styled box headerBox := lipgloss.NewStyle(). @@ -349,7 +349,7 @@ func RenderAskResult(result *app.AskResult, verbose bool) { Border(lipgloss.RoundedBorder()). BorderForeground(ColorBlue). Padding(1, 2). - Width(80) + Width(ContentWidth()) fmt.Println(answerBox.Render(result.Answer)) } @@ -478,7 +478,7 @@ func symbolKindIcon(kind string) string { case "variable": return "ν" case "package", "module": - return "📦" + return IconPackage.Emoji case "field": return "·" case "decorator": diff --git a/internal/ui/drift.go b/internal/ui/drift.go index c07a5da..a203e11 100644 --- a/internal/ui/drift.go +++ b/internal/ui/drift.go @@ -17,7 +17,7 @@ func RenderDriftReport(report *app.DriftReport, verbose bool) { // No rules found if report.RulesChecked == 0 { - fmt.Println("📋 No architectural rules found in knowledge base.") + fmt.Printf("%s No architectural rules found in knowledge base.\n", IconTask) fmt.Println(" Run 'taskwing bootstrap' to extract rules from your codebase,") fmt.Println(" or refresh rules with 'taskwing bootstrap --force'") return @@ -25,7 +25,7 @@ func RenderDriftReport(report *app.DriftReport, verbose bool) { // Violations if len(report.Violations) > 0 { - fmt.Printf("❌ %s (%d)\n", StyleBold("VIOLATIONS"), len(report.Violations)) + fmt.Printf("%s %s (%d)\n", IconStop, StyleBold("VIOLATIONS"), len(report.Violations)) fmt.Println("────────────────────────") fmt.Println() @@ -54,7 +54,7 @@ func RenderDriftReport(report *app.DriftReport, verbose bool) { // Warnings if len(report.Warnings) > 0 { - fmt.Printf("⚠️ %s (%d)\n", StyleBold("WARNINGS"), len(report.Warnings)) + fmt.Printf("%s %s (%d)\n", IconWarn, StyleBold("WARNINGS"), len(report.Warnings)) fmt.Println("────────────────────────") fmt.Println() @@ -70,17 +70,17 @@ func RenderDriftReport(report *app.DriftReport, verbose bool) { // Passed rules if len(report.Passed) > 0 { - fmt.Printf("✅ %s (%d)\n", StyleBold("PASSED"), len(report.Passed)) + fmt.Printf("%s %s (%d)\n", IconDone, StyleBold("PASSED"), len(report.Passed)) fmt.Println("────────────────────────") for _, name := range report.Passed { - fmt.Printf(" ✓ %s\n", name) + fmt.Printf(" %s %s\n", IconOK, name) } fmt.Println() } // Summary fmt.Println("────────────────────────") - fmt.Printf("📊 %s: ", StyleBold("Summary")) + fmt.Printf("%s %s: ", IconStats, StyleBold("Summary")) parts := []string{} if report.Summary.Violations > 0 { @@ -102,7 +102,7 @@ func RenderDriftReport(report *app.DriftReport, verbose bool) { // Hint for fixes if report.Summary.Violations > 0 { fmt.Println() - fmt.Println("💡 Review violations and update code to match documented architecture.") + PrintHint("Review violations and update code to match documented architecture.") } } diff --git a/internal/ui/embedding_select.go b/internal/ui/embedding_select.go index 9611b32..5e49baf 100644 --- a/internal/ui/embedding_select.go +++ b/internal/ui/embedding_select.go @@ -12,7 +12,7 @@ import ( type EmbeddingSelection struct { Provider string Model string - BaseURL string // For TEI/Ollama + BaseURL string // For Ollama } // PromptEmbeddingSelection runs an interactive embedding provider then model selection. @@ -47,8 +47,6 @@ func PromptEmbeddingProvider() (string, error) { switch { case p.ID == llm.ProviderOllama: desc = fmt.Sprintf("Local • %d models • free", p.ModelCount) - case p.ID == llm.ProviderTEI: - desc = "Self-hosted TEI server" case p.IsFree: desc = fmt.Sprintf("%d models • free", p.ModelCount) default: @@ -89,10 +87,6 @@ func PromptEmbeddingProvider() (string, error) { func PromptEmbeddingModel(provider string) (string, error) { models := llm.GetEmbeddingModelsForProvider(provider) if len(models) == 0 { - // No models defined, use "custom" for TEI or empty - if provider == llm.ProviderTEI { - return "custom", nil - } return "", nil } @@ -157,7 +151,7 @@ func (m embeddingProviderSelectModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } func (m embeddingProviderSelectModel) View() string { - s := "\n" + StyleSelectTitle.Render("📐 Select Embedding Provider") + "\n\n" + s := "\n" + StyleSelectTitle.Render(fmt.Sprintf("%s Select Embedding Provider", IconRuler)) + "\n\n" for i, opt := range m.options { cursor := " " @@ -213,7 +207,7 @@ func (m embeddingModelSelectModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } func (m embeddingModelSelectModel) View() string { - s := "\n" + StyleSelectTitle.Render(fmt.Sprintf("📐 Select Embedding Model (%s)", m.provider)) + "\n\n" + s := "\n" + StyleSelectTitle.Render(fmt.Sprintf("%s Select Embedding Model (%s)", IconRuler, m.provider)) + "\n\n" for i, model := range m.models { cursor := " " diff --git a/internal/ui/eval_report.go b/internal/ui/eval_report.go index e2245a1..e9f1cce 100644 --- a/internal/ui/eval_report.go +++ b/internal/ui/eval_report.go @@ -72,7 +72,7 @@ var ( Foreground(ColorSuccess) scoreBarEmpty = lipgloss.NewStyle(). - Foreground(lipgloss.Color("237")) + Foreground(ColorDim) sectionStyle = lipgloss.NewStyle(). Foreground(ColorPrimary). diff --git a/internal/ui/explain.go b/internal/ui/explain.go index a181205..1db16c0 100644 --- a/internal/ui/explain.go +++ b/internal/ui/explain.go @@ -5,13 +5,14 @@ import ( "fmt" "strings" + "github.com/charmbracelet/lipgloss" "github.com/josephgoksu/TaskWing/internal/app" ) // RenderExplainResult renders a deep explanation to the terminal. func RenderExplainResult(result *app.ExplainResult, verbose bool) { // Symbol header - fmt.Printf("\n%s Symbol: %s (%s)\n", StyleBold("🔍"), result.Symbol.Name, result.Symbol.Kind) + fmt.Printf("\n%s Symbol: %s (%s)\n", StyleBold(IconSearch.String()), result.Symbol.Name, result.Symbol.Kind) fmt.Printf(" Location: %s\n", result.Symbol.Location) if result.Symbol.Signature != "" { fmt.Printf(" Signature: %s\n", result.Symbol.Signature) @@ -21,7 +22,7 @@ func RenderExplainResult(result *app.ExplainResult, verbose bool) { } // Call graph - fmt.Printf("\n%s System Context\n", StyleBold("📊")) + fmt.Printf("\n%s System Context\n", StyleBold(IconStats.String())) fmt.Println("───────────────") // Callers @@ -53,7 +54,7 @@ func RenderExplainResult(result *app.ExplainResult, verbose bool) { } // Impact stats - fmt.Printf("\n%s Impact Analysis:\n", StyleBold("🔗")) + fmt.Printf("\n%s Impact Analysis:\n", StyleBold(IconLink.String())) fmt.Printf(" Direct callers: %d\n", result.ImpactStats.DirectCallers) fmt.Printf(" Direct callees: %d\n", result.ImpactStats.DirectCallees) if result.ImpactStats.TransitiveDependents > 0 { @@ -66,7 +67,7 @@ func RenderExplainResult(result *app.ExplainResult, verbose bool) { // Related decisions if len(result.Decisions) > 0 { - fmt.Printf("\n%s Related Decisions:\n", StyleBold("📋")) + fmt.Printf("\n%s Related Decisions:\n", StyleBold(IconTask.String())) for _, d := range result.Decisions { fmt.Printf(" • %s\n", d.Summary) } @@ -74,7 +75,7 @@ func RenderExplainResult(result *app.ExplainResult, verbose bool) { // Related patterns if len(result.Patterns) > 0 { - fmt.Printf("\n%s Related Patterns:\n", StyleBold("📐")) + fmt.Printf("\n%s Related Patterns:\n", StyleBold(IconRuler.String())) for _, p := range result.Patterns { fmt.Printf(" • %s\n", p.Summary) } @@ -82,7 +83,7 @@ func RenderExplainResult(result *app.ExplainResult, verbose bool) { // Source code (only if verbose) if verbose && len(result.SourceCode) > 0 { - fmt.Printf("\n%s Source Code:\n", StyleBold("📁")) + fmt.Printf("\n%s Source Code:\n", StyleBold(IconFolder.String())) for _, snippet := range result.SourceCode { fmt.Printf("\n %s %s (%s):\n", snippet.Kind, snippet.SymbolName, snippet.FilePath) // Indent code @@ -104,7 +105,7 @@ func RenderExplainResult(result *app.ExplainResult, verbose bool) { // RenderExplainHeader renders the header before streaming starts. func RenderExplainHeader(symbolName string) { - fmt.Printf("\n%s Analyzing: %s\n", StyleBold("🔍"), symbolName) + fmt.Printf("\n%s Analyzing: %s\n", StyleBold(IconSearch.String()), symbolName) fmt.Println("───────────────────────────") } @@ -113,7 +114,7 @@ func RenderExplainExplanation(explanation string) { if explanation == "" { return } - fmt.Printf("\n%s Explanation:\n", StyleBold("💬")) + fmt.Printf("\n%s Explanation:\n", StyleBold(IconChat.String())) // Wrap text at 80 chars with indent wrapped := wrapText(explanation, 76) for _, line := range strings.Split(wrapped, "\n") { @@ -161,8 +162,10 @@ func truncate(s string, max int) string { return s[:max-3] + "..." } -// StyleBold returns the text with bold ANSI codes. -// This is a simple implementation - could use lipgloss for more styling. +// styleBold is the lipgloss style used by StyleBold. +var styleBold = lipgloss.NewStyle().Bold(true) + +// StyleBold returns the text rendered in bold via lipgloss (no raw ANSI). func StyleBold(s string) string { - return "\033[1m" + s + "\033[0m" + return styleBold.Render(s) } diff --git a/internal/ui/icons.go b/internal/ui/icons.go new file mode 100644 index 0000000..126ab6d --- /dev/null +++ b/internal/ui/icons.go @@ -0,0 +1,46 @@ +package ui + +// Icon holds an emoji and ASCII fallback for terminals without emoji support. +type Icon struct { + Emoji string + Fallback string +} + +// String returns the emoji representation. Fallback can be enabled later +// via --no-emoji flag or TERM detection without changing call sites. +func (i Icon) String() string { return i.Emoji } + +// Centralized icon registry — all user-facing icons should reference these +// instead of hardcoding emoji strings in cmd/ or internal/ui/ files. +var ( + IconTask = Icon{"📋", "[T]"} + IconDesc = Icon{"📝", ">"} + IconStats = Icon{"📊", "#"} + IconPackage = Icon{"📦", "[P]"} + IconSearch = Icon{"🔍", "?"} + IconRobot = Icon{"🤖", "[AI]"} + IconRocket = Icon{"🚀", ">>"} + IconWrench = Icon{"🔧", "*"} + IconBranch = Icon{"🌿", "|-"} + IconBolt = Icon{"⚡", "!"} + IconHint = Icon{"💡", "->"} + IconCode = Icon{"💻", ""} + IconBooks = Icon{"📚", "[@]"} + IconLink = Icon{"🔗", "<->"} + IconPlug = Icon{"🔌", "[+]"} + IconGlobe = Icon{"🌐", "(o)"} + IconSkip = Icon{"⏭️", ">>"} + IconTarget = Icon{"🎯", "(*)"} + IconFolder = Icon{"📁", "[/]"} + IconChat = Icon{"💬", "\""} + IconBook = Icon{"📖", "[B]"} + IconRuler = Icon{"📐", "[R]"} + + IconOK = Icon{"✔", "[OK]"} + IconFail = Icon{"✖", "[FAIL]"} + IconWarn = Icon{"⚠", "[WARN]"} + IconInfo = Icon{"ℹ", "[i]"} + IconDone = Icon{"✅", "[v]"} + IconStop = Icon{"❌", "[x]"} + IconPartial = Icon{"◑", "[~]"} +) diff --git a/internal/ui/model_select.go b/internal/ui/model_select.go index db37b4a..6c3617a 100644 --- a/internal/ui/model_select.go +++ b/internal/ui/model_select.go @@ -72,7 +72,7 @@ func (m modelSelectModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } func (m modelSelectModel) View() string { - s := "\n" + StyleSelectTitle.Render(fmt.Sprintf("🧠 Select Model for %s", m.provider)) + "\n\n" + s := "\n" + StyleSelectTitle.Render(fmt.Sprintf("%s Select Model for %s", IconRobot, m.provider)) + "\n\n" for i, model := range m.models { cursor := " " diff --git a/internal/ui/output.go b/internal/ui/output.go new file mode 100644 index 0000000..e409394 --- /dev/null +++ b/internal/ui/output.go @@ -0,0 +1,198 @@ +package ui + +import ( + "fmt" + "strings" + "time" + + "github.com/charmbracelet/lipgloss" +) + +// PrintSuccess prints a green success message: ✔ msg +func PrintSuccess(msg string) { + icon := lipgloss.NewStyle().Foreground(ColorSuccess).Render(IconOK.Emoji) + fmt.Printf("%s %s\n", icon, msg) +} + +// PrintWarning prints a yellow warning message: ⚠ msg +func PrintWarning(msg string) { + icon := lipgloss.NewStyle().Foreground(ColorWarning).Render(IconWarn.Emoji) + fmt.Printf("%s %s\n", icon, msg) +} + +// PrintError prints a red error message: ✖ msg +func PrintError(msg string) { + icon := lipgloss.NewStyle().Foreground(ColorError).Render(IconFail.Emoji) + fmt.Printf("%s %s\n", icon, msg) +} + +// PrintHint prints a dim hint message: 💡 msg +func PrintHint(msg string) { + style := lipgloss.NewStyle().Foreground(ColorDim) + fmt.Printf("%s %s\n", IconHint.Emoji, style.Render(msg)) +} + +// PrintInfo prints a subtle info message: ℹ msg +func PrintInfo(msg string) { + style := lipgloss.NewStyle().Foreground(ColorDim) + fmt.Printf("%s %s\n", IconInfo.Emoji, style.Render(msg)) +} + +// PrintKeyValue prints a key-value pair with aligned formatting. +// +// Key: value +func PrintKeyValue(key, value string) { + keyStyle := lipgloss.NewStyle().Foreground(ColorDim) + fmt.Printf(" %s %s\n", keyStyle.Render(key+":"), value) +} + +// PrintSectionHeader prints a styled section header with an icon. +// +// \n📊 Title +func PrintSectionHeader(icon Icon, title string) { + style := lipgloss.NewStyle().Bold(true).Underline(true).Foreground(ColorText) + fmt.Printf("\n%s %s\n", icon.Emoji, style.Render(title)) +} + +// PrintDivider prints a subtle horizontal divider line. +func PrintDivider() { + style := lipgloss.NewStyle().Foreground(ColorDim) + fmt.Println(style.Render(strings.Repeat("─", 40))) +} + +// BootstrapStats holds accumulated stats for the final summary panel. +type BootstrapStats struct { + FilesScanned int + SymbolsFound int + CallRelationships int + MetadataItems int + AnalysisFindings int + AnalysisRelations int + TotalDuration time.Duration +} + +// PrintPhaseHeader prints a numbered phase with icon, title, and description. +// +// [1/3] 🔍 Indexing Code Symbols +// Scanning source files for functions, types, and call relationships. +func PrintPhaseHeader(step, total int, icon Icon, title, description string) { + stepStyle := lipgloss.NewStyle().Foreground(ColorDim) + titleStyle := lipgloss.NewStyle().Bold(true).Foreground(ColorText) + descStyle := lipgloss.NewStyle().Foreground(ColorDim) + + fmt.Printf("\n %s %s %s\n", + stepStyle.Render(fmt.Sprintf("[%d/%d]", step, total)), + icon.Emoji, + titleStyle.Render(title)) + if description != "" { + // Indent description to align with title text + fmt.Printf(" %s\n", descStyle.Render(description)) + } + fmt.Println() +} + +// PrintPhaseResult prints a check mark + message + right-aligned dim duration. +// +// ✔ Indexed 12 updates 1.23s +func PrintPhaseResult(msg string, duration time.Duration) { + icon := lipgloss.NewStyle().Foreground(ColorSuccess).Render(IconOK.Emoji) + durStyle := lipgloss.NewStyle().Foreground(ColorDim) + durStr := FormatDuration(duration) + + w := ContentWidth() + // 8 chars for leading indent, 2 for icon+space + msgWidth := w - 10 - len(durStr) - 1 + if msgWidth < 20 { + msgWidth = 20 + } + padded := msg + if len(padded) < msgWidth { + padded = padded + strings.Repeat(" ", msgWidth-len(padded)) + } + + fmt.Printf(" %s %s %s\n", icon, padded, durStyle.Render(durStr)) +} + +// PrintPhaseDetail prints an indented detail line (no icon). +func PrintPhaseDetail(msg string) { + fmt.Printf(" %s\n", msg) +} + +// PrintPhaseSeparator prints a dim heavy horizontal rule at ContentWidth. +func PrintPhaseSeparator() { + style := lipgloss.NewStyle().Foreground(ColorDim) + fmt.Println() + fmt.Printf(" %s\n", style.Render(strings.Repeat("━", ContentWidth()-4))) +} + +// RenderBootstrapWelcome returns the welcome panel string. +func RenderBootstrapWelcome() string { + content := "Analyzes your codebase to build persistent architectural\n" + + "memory for AI assistants — indexing symbols, extracting\n" + + "metadata, and discovering patterns and decisions." + return RenderInfoPanel(fmt.Sprintf("%s TaskWing Bootstrap", IconRobot.Emoji), content) +} + +// RenderBootstrapSummary returns the final summary panel string. +func RenderBootstrapSummary(stats BootstrapStats) string { + var lines []string + + if stats.FilesScanned > 0 || stats.SymbolsFound > 0 { + line := fmt.Sprintf(" Symbols %d files, %d symbols", stats.FilesScanned, stats.SymbolsFound) + if stats.CallRelationships > 0 { + line += fmt.Sprintf(", %d relationships", stats.CallRelationships) + } + lines = append(lines, line) + } + if stats.MetadataItems > 0 { + lines = append(lines, fmt.Sprintf(" Metadata %d items", stats.MetadataItems)) + } + if stats.AnalysisFindings > 0 { + line := fmt.Sprintf(" Analysis %d findings", stats.AnalysisFindings) + if stats.AnalysisRelations > 0 { + line += fmt.Sprintf(", %d relationships", stats.AnalysisRelations) + } + lines = append(lines, line) + } + + lines = append(lines, "") + lines = append(lines, fmt.Sprintf(" %s Run `taskwing ask \"your question\"` to query knowledge", IconHint.Emoji)) + + title := fmt.Sprintf("%s Bootstrap Complete", IconOK.Emoji) + durStr := FormatDuration(stats.TotalDuration) + title += strings.Repeat(" ", max(0, ContentWidth()-len(title)-len(durStr)-6)) + durStr + + return RenderSuccessPanel(title, strings.Join(lines, "\n")) +} + +// RenderPlanBox returns a numbered action list inside a bordered box. +func RenderPlanBox(actions []string) string { + var lines []string + for i, action := range actions { + lines = append(lines, fmt.Sprintf(" %d. %s", i+1, action)) + } + content := strings.Join(lines, "\n") + + style := lipgloss.NewStyle(). + Border(lipgloss.RoundedBorder()). + BorderForeground(ColorSecondary). + Padding(0, 1). + Width(ContentWidth()) + + titleStyle := lipgloss.NewStyle().Foreground(ColorDim) + return "\n" + style.Render(titleStyle.Render("Plan")+"\n"+content) + "\n" +} + +// formatDuration formats a duration for display: "1.23s", "2m12s", etc. +func FormatDuration(d time.Duration) string { + if d < time.Second { + return fmt.Sprintf("%dms", d.Milliseconds()) + } + if d < time.Minute { + return fmt.Sprintf("%.2fs", d.Seconds()) + } + mins := int(d.Minutes()) + secs := int(d.Seconds()) % 60 + return fmt.Sprintf("%dm%02ds", mins, secs) +} + diff --git a/internal/ui/prompt_key.go b/internal/ui/prompt_key.go index 2b01e88..1f65723 100644 --- a/internal/ui/prompt_key.go +++ b/internal/ui/prompt_key.go @@ -66,8 +66,8 @@ func (m apiKeyModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } func (m apiKeyModel) View() string { - titleStyle := lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("12")) - dimStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("240")) + titleStyle := lipgloss.NewStyle().Bold(true).Foreground(ColorHighlight) + dimStyle := lipgloss.NewStyle().Foreground(ColorDim) s := "\n" + titleStyle.Render("🔑 API Key required") + "\n" s += dimStyle.Render("It will be stored locally in ~/.taskwing/config.yaml") + "\n\n" diff --git a/internal/ui/provider_select.go b/internal/ui/provider_select.go index dd38bc6..7c3c07b 100644 --- a/internal/ui/provider_select.go +++ b/internal/ui/provider_select.go @@ -30,7 +30,7 @@ func buildProviderOptions() []ProviderOption { // Show price range and model count desc = fmt.Sprintf("$%.2f-$%.2f/1M • %d models", p.MinPrice, p.MaxPrice, p.ModelCount) if !hasKey { - desc += " • ❌ key not set" + desc += " • " + IconStop.Emoji + " key not set" } } diff --git a/internal/ui/runner_progress.go b/internal/ui/runner_progress.go new file mode 100644 index 0000000..e8c5b41 --- /dev/null +++ b/internal/ui/runner_progress.go @@ -0,0 +1,153 @@ +package ui + +import ( + "fmt" + "strings" + "time" + + "github.com/charmbracelet/bubbles/spinner" + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" +) + +// RunnerJobStatus represents the state of a runner analysis job. +type RunnerJobStatus int + +const ( + RunnerJobRunning RunnerJobStatus = iota + RunnerJobDone + RunnerJobError +) + +// RunnerJobState holds the display state for one analysis job. +type RunnerJobState struct { + ID string + Status RunnerJobStatus + Message string + Spinner spinner.Model + StartAt time.Time + Duration time.Duration +} + +// RunnerJobDoneMsg signals that a runner job has completed. +type RunnerJobDoneMsg struct { + ID string + Findings int + Rels int + ErrMsg string + Duration time.Duration +} + +// RunnerProgressModel is a lightweight Bubble Tea model for showing +// spinner progress during parallel runner analysis. +type RunnerProgressModel struct { + Jobs []*RunnerJobState + done int +} + +// NewRunnerProgressModel creates a new progress model for the given job IDs. +func NewRunnerProgressModel(jobIDs []string) RunnerProgressModel { + s := spinner.New() + s.Spinner = spinner.Dot + s.Style = lipgloss.NewStyle().Foreground(ColorPrimary) + + jobs := make([]*RunnerJobState, len(jobIDs)) + for i, id := range jobIDs { + jobs[i] = &RunnerJobState{ + ID: id, + Status: RunnerJobRunning, + Message: "analyzing...", + Spinner: s, + StartAt: time.Now(), + } + } + + return RunnerProgressModel{Jobs: jobs} +} + +func (m RunnerProgressModel) Init() tea.Cmd { + if len(m.Jobs) == 0 { + return tea.Quit + } + return m.Jobs[0].Spinner.Tick +} + +func (m RunnerProgressModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { + switch msg := msg.(type) { + case tea.KeyMsg: + if msg.String() == "q" || msg.String() == "ctrl+c" { + return m, tea.Quit + } + + case spinner.TickMsg: + var cmds []tea.Cmd + for _, job := range m.Jobs { + if job.Status == RunnerJobRunning { + var cmd tea.Cmd + job.Spinner, cmd = job.Spinner.Update(msg) + cmds = append(cmds, cmd) + } + } + return m, tea.Batch(cmds...) + + case RunnerJobDoneMsg: + for _, job := range m.Jobs { + if job.ID == msg.ID { + job.Duration = msg.Duration + if msg.ErrMsg != "" { + job.Status = RunnerJobError + job.Message = msg.ErrMsg + } else { + job.Status = RunnerJobDone + job.Message = fmt.Sprintf("%d findings, %d relationships", msg.Findings, msg.Rels) + } + m.done++ + break + } + } + + if m.done >= len(m.Jobs) { + return m, tea.Quit + } + } + + return m, nil +} + +func (m RunnerProgressModel) View() string { + var s strings.Builder + + for _, job := range m.Jobs { + s.WriteString(" ") + switch job.Status { + case RunnerJobRunning: + s.WriteString(job.Spinner.View()) + s.WriteString(" ") + s.WriteString(StyleTitle.Render(fmt.Sprintf("%-14s", job.ID))) + s.WriteString(" ") + s.WriteString(StyleSubtle.Render(job.Message)) + elapsed := time.Since(job.StartAt).Round(time.Second) + s.WriteString(StyleSubtle.Render(fmt.Sprintf(" %s", elapsed))) + case RunnerJobDone: + icon := lipgloss.NewStyle().Foreground(ColorSuccess).Render(IconOK.Emoji) + durStyle := lipgloss.NewStyle().Foreground(ColorDim) + s.WriteString(icon) + s.WriteString(" ") + s.WriteString(fmt.Sprintf("%-14s", job.ID)) + s.WriteString(" ") + s.WriteString(StyleSuccess.Render(job.Message)) + s.WriteString(" ") + s.WriteString(durStyle.Render(FormatDuration(job.Duration))) + case RunnerJobError: + icon := lipgloss.NewStyle().Foreground(ColorWarning).Render(IconWarn.Emoji) + s.WriteString(icon) + s.WriteString(" ") + s.WriteString(fmt.Sprintf("%-14s", job.ID)) + s.WriteString(" ") + s.WriteString(StyleError.Render(job.Message)) + } + s.WriteString("\n") + } + + return s.String() +} diff --git a/internal/ui/styles.go b/internal/ui/styles.go index 06e713f..c56ff39 100644 --- a/internal/ui/styles.go +++ b/internal/ui/styles.go @@ -1,3 +1,9 @@ +// Package ui provides centralized styling for all terminal output. +// +// Convention: All colors use lipgloss.AdaptiveColor for light/dark theme support. +// Use the Color* variables (not lipgloss.Color directly) in cmd/ files. +// Use Icon* constants from icons.go instead of raw emoji strings. +// Use Print* helpers from output.go instead of raw fmt.Printf with emojis. package ui import ( @@ -7,19 +13,22 @@ import ( ) var ( - // Colors - ColorPrimary = lipgloss.Color("205") // Pink - ColorSecondary = lipgloss.Color("241") // Gray - ColorSuccess = lipgloss.Color("42") // Green - ColorError = lipgloss.Color("160") // Red - ColorWarning = lipgloss.Color("214") // Orange/Yellow - ColorText = lipgloss.Color("252") // White/Gray - ColorCyan = lipgloss.Color("87") // Cyan for strategy - ColorBlue = lipgloss.Color("75") // Blue for answers - ColorHighlight = lipgloss.Color("12") // Blue for titles/highlights - ColorSelected = lipgloss.Color("10") // Green for selected items - ColorDim = lipgloss.Color("240") // Dim gray for secondary text - ColorYellow = lipgloss.Color("11") // Yellow for badges/accents + // Colors — AdaptiveColor picks Light on light backgrounds, Dark on dark. + // lipgloss.TerminalColor is the interface both Color and AdaptiveColor implement. + ColorPrimary lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "125", Dark: "205"} + ColorSecondary lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "244", Dark: "241"} + ColorSuccess lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "28", Dark: "42"} + ColorError lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "160", Dark: "160"} + ColorWarning lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "208", Dark: "214"} + ColorText lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "235", Dark: "252"} + ColorCyan lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "30", Dark: "87"} + ColorBlue lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "27", Dark: "75"} + ColorHighlight lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "21", Dark: "12"} + ColorSelected lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "28", Dark: "10"} + ColorDim lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "246", Dark: "240"} + ColorAccent lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "97", Dark: "141"} + ColorBarEmpty lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "250", Dark: "237"} + ColorYellow lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "136", Dark: "11"} // Base Styles StyleTitle = lipgloss.NewStyle().Foreground(ColorText).Bold(true) @@ -84,8 +93,8 @@ var ( StyleSelectBadge = lipgloss.NewStyle().Foreground(ColorYellow).Bold(true) // Table Styles (alternating rows) - ColorTableRowEven = lipgloss.Color("236") // Subtle dark background - ColorTableRowOdd = lipgloss.Color("234") // Slightly darker + ColorTableRowEven lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "255", Dark: "236"} + ColorTableRowOdd lipgloss.TerminalColor = lipgloss.AdaptiveColor{Light: "253", Dark: "234"} StyleTableRowEven = lipgloss.NewStyle().Foreground(ColorText) StyleTableRowOdd = lipgloss.NewStyle().Foreground(ColorDim) StyleTableHeader = lipgloss.NewStyle().Bold(true).Foreground(ColorPrimary).Underline(true) @@ -106,24 +115,24 @@ var ( // CategoryBadge returns a styled badge string for a knowledge node type. func CategoryBadge(nodeType string) string { - colors := map[string]lipgloss.Color{ - "decision": lipgloss.Color("205"), // Pink - "feature": lipgloss.Color("75"), // Blue - "constraint": lipgloss.Color("214"), // Orange - "pattern": lipgloss.Color("141"), // Purple - "plan": lipgloss.Color("42"), // Green - "note": lipgloss.Color("252"), // White - "metadata": lipgloss.Color("87"), // Cyan - "documentation": lipgloss.Color("11"), // Yellow + colors := map[string]lipgloss.TerminalColor{ + "decision": lipgloss.AdaptiveColor{Light: "125", Dark: "205"}, + "feature": lipgloss.AdaptiveColor{Light: "27", Dark: "75"}, + "constraint": lipgloss.AdaptiveColor{Light: "208", Dark: "214"}, + "pattern": lipgloss.AdaptiveColor{Light: "97", Dark: "141"}, + "plan": lipgloss.AdaptiveColor{Light: "28", Dark: "42"}, + "note": lipgloss.AdaptiveColor{Light: "235", Dark: "252"}, + "metadata": lipgloss.AdaptiveColor{Light: "30", Dark: "87"}, + "documentation": lipgloss.AdaptiveColor{Light: "136", Dark: "11"}, } color, ok := colors[nodeType] if !ok { - color = lipgloss.Color("241") + color = lipgloss.AdaptiveColor{Light: "244", Dark: "241"} } badge := lipgloss.NewStyle(). - Foreground(lipgloss.Color("0")). + Foreground(lipgloss.AdaptiveColor{Light: "255", Dark: "0"}). Background(color). Padding(0, 1). Bold(true) diff --git a/internal/ui/utils.go b/internal/ui/utils.go index 235132a..84d1451 100644 --- a/internal/ui/utils.go +++ b/internal/ui/utils.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/charmbracelet/lipgloss" + "golang.org/x/term" ) // IsInteractive checks if stdout is a terminal. @@ -23,11 +24,12 @@ func RenderPageHeader(title, subtitle string) { Padding(0, 1). Border(lipgloss.RoundedBorder()). BorderForeground(ColorSecondary). - MarginBottom(1) + MarginBottom(1). + Width(ContentWidth()) - fmt.Println(titleStyle.Render(fmt.Sprintf("🤖 %s", title))) + fmt.Println(titleStyle.Render(fmt.Sprintf("%s %s", IconRobot, title))) if subtitle != "" { - fmt.Printf(" ⚡ %s\n", subtitle) + fmt.Printf(" %s %s\n", IconBolt, subtitle) } } @@ -36,7 +38,7 @@ func RenderPageHeader(title, subtitle string) { type Panel struct { Title string Content string - BorderColor lipgloss.Color + BorderColor lipgloss.TerminalColor Width int } @@ -51,7 +53,7 @@ func NewPanel(title, content string) *Panel { } // WithBorderColor sets the border color and returns the panel. -func (p *Panel) WithBorderColor(color lipgloss.Color) *Panel { +func (p *Panel) WithBorderColor(color lipgloss.TerminalColor) *Panel { p.BorderColor = color return p } @@ -71,6 +73,8 @@ func (p *Panel) Render() string { if p.Width > 0 { style = style.Width(p.Width) + } else { + style = style.Width(ContentWidth()) } var content string @@ -109,6 +113,28 @@ func RenderWarningPanel(title, content string) string { return NewPanel(title, content).WithBorderColor(ColorWarning).Render() } +// TerminalWidth returns the current terminal width, defaulting to 80 if detection fails. +func TerminalWidth() int { + w, _, err := term.GetSize(int(os.Stdout.Fd())) + if err != nil || w <= 0 { + return 80 + } + return w +} + +// ContentWidth returns a clamped width suitable for content rendering. +// It caps at 120, floors at 40, and subtracts 2 for border padding. +func ContentWidth() int { + w := TerminalWidth() + if w > 120 { + return 120 + } + if w < 40 { + return 40 + } + return w - 2 +} + // Truncate truncates a string to maxLen characters, adding ellipsis if needed. func Truncate(s string, maxLen int) string { if maxLen <= 0 {