diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 1875881..5af45b1 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -43,3 +43,20 @@ jobs:
- name: Test
run: go test ./...
+ bootstrap-integration:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '1.24.x'
+ cache: true
+
+ - name: Bootstrap regression tests
+ run: |
+ go test -v -count=1 -failfast \
+ -run "TestBootstrapRepro|TestClaudeDriftDetection|TestKnowledgeLinking_NoFK|TestSubrepoMetadata|TestDocIngestion|TestRootPathResolution|TestIsMonorepoDetection|TestWorkspaceRootSelection|TestGate3_Enforcement|TestParseJSONResponse_Hallucination" \
+ ./...
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 790da4f..1de8a08 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -28,6 +28,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
+- **RootPath resolution**: Reject `MarkerNone` contexts in `GetMemoryBasePath` to prevent accidental writes to `~/.taskwing/memory.db`. Also reject `.taskwing` markers above multi-repo workspaces during detection walk-up. (`TestRootPathResolution`, `TestBootstrapRepro_RootPathResolvesToHome`)
+- **FK constraint failures**: `LinkNodes` now pre-checks node existence before INSERT to avoid SQLite error 787. Duplicate edges handled gracefully. (`TestKnowledgeLinking_NoFK`)
+- **IsMonorepo misclassification**: `Detect()` now checks `hasNestedProjects()` in the `MarkerNone` fallback, so multi-repo workspaces are correctly classified. Resolves disagreement between `Detect()` and `DetectWorkspace()`. (`TestIsMonorepoDetection`, `TestBootstrapRepro_IsMonorepoMisclassification`)
+- **Zero docs loaded**: Added `LoadForServices` to `DocLoader` for multi-repo workspaces. Wired into `RunDeterministicBootstrap` via workspace auto-detection. (`TestDocIngestion`, `TestSubrepoMetadataExtraction`)
+- **Sub-repo metadata**: Verified per-repo workspace context in node storage with proper isolation and cross-workspace linking. (`TestSubrepoMetadataPresent`)
+- **Claude MCP drift**: Added filesystem-based drift detection tests with evidence traceability and Gate 3 consent enforcement for global mutations. (`TestClaudeDriftDetection`)
+- **Hallucinated findings**: Gate 3 enforcement in `NewFindingWithEvidence` — findings without evidence start as "skipped". Added `HasEvidence()` and `NeedsHumanVerification()` to `Finding`. (`TestGate3_Enforcement`, `TestParseJSONResponse_Hallucination`)
- Priority scheduling semantics corrected (lower numeric priority executes first).
- Unknown slash subcommands now fail explicitly instead of silently falling back.
- MCP plan action descriptions aligned with implemented behavior.
diff --git a/README.md b/README.md
index 40cab2f..26b71d4 100644
--- a/README.md
+++ b/README.md
@@ -99,14 +99,14 @@ taskwing goal "Add Stripe billing"
-Brand names and logos are trademarks of their respective owners; usage here indicates compatibility, not endorsement.
+Brand names and logos are trademarks of their respective owners; usage here indicates compatibility, not endorsement.
## MCP Tools
| Tool | Description |
-|:-----|:------------|
+|------|-------------|
| `ask` | Search project knowledge (decisions, patterns, constraints) |
| `task` | Unified task lifecycle (`next`, `current`, `start`, `complete`) |
| `plan` | Plan management (`clarify`, `decompose`, `expand`, `generate`, `finalize`, `audit`) |
@@ -149,18 +149,16 @@ Once connected, use these slash commands from your AI assistant:
## Core Commands
-| Command | Description |
-|:--------|:------------|
-| `taskwing bootstrap` | Extract architecture from your codebase |
-| `taskwing goal ""` | Create and activate a plan from a goal |
-| `taskwing ask ""` | Query project knowledge |
-| `taskwing task` | Manage execution tasks |
-| `taskwing plan status` | View current plan progress |
-| `taskwing slash` | Output slash command prompts for AI tools |
-| `taskwing mcp` | Start the MCP server |
-| `taskwing doctor` | Health check for project memory |
-| `taskwing config` | Configure LLM provider and settings |
-| `taskwing start` | Start API/watch/dashboard services |
+- `taskwing bootstrap`
+- `taskwing goal ""`
+- `taskwing ask ""`
+- `taskwing task`
+- `taskwing plan status`
+- `taskwing slash`
+- `taskwing mcp`
+- `taskwing doctor`
+- `taskwing config`
+- `taskwing start`
## Autonomous Task Execution (Hooks)
diff --git a/cmd/bootstrap.go b/cmd/bootstrap.go
index b5df3eb..fce5ebb 100644
--- a/cmd/bootstrap.go
+++ b/cmd/bootstrap.go
@@ -721,7 +721,9 @@ func installMCPServers(basePath string, selectedAIs []string) {
case "claude":
installClaude(binPath, basePath)
case "gemini":
- installGeminiCLI(binPath, basePath)
+ if err := installGeminiCLI(binPath, basePath); err != nil {
+ fmt.Printf("⚠️ Gemini MCP install failed: %v\n", err)
+ }
case "codex":
installCodexGlobal(binPath, basePath)
case "cursor":
diff --git a/cmd/mcp_install.go b/cmd/mcp_install.go
index cd12254..00dbd2a 100644
--- a/cmd/mcp_install.go
+++ b/cmd/mcp_install.go
@@ -107,8 +107,7 @@ func installMCPForTarget(target, binPath, cwd string) error {
installCodexGlobal(binPath, cwd)
return nil
case "gemini":
- installGeminiCLI(binPath, cwd)
- return nil
+ return installGeminiCLI(binPath, cwd)
case "copilot":
installCopilot(binPath, cwd)
return nil
@@ -445,14 +444,20 @@ func installCopilot(binPath, projectDir string) {
fmt.Println(" (Reload VS Code window to activate)")
}
-func installGeminiCLI(binPath, projectDir string) {
+func installGeminiCLI(binPath, projectDir string) error {
// Check if gemini CLI is available
- _, err := exec.LookPath("gemini")
+ geminiPath, err := exec.LookPath("gemini")
if err != nil {
- fmt.Println("❌ 'gemini' CLI not found in PATH.")
- fmt.Println(" Please install the Gemini CLI first to use this integration.")
- fmt.Println(" See: https://geminicli.com/docs/getting-started")
- return
+ return fmt.Errorf("'gemini' CLI not found in PATH: install from https://geminicli.com/docs/getting-started")
+ }
+
+ // Check gemini version to detect compatibility issues
+ versionCmd := exec.Command(geminiPath, "--version")
+ versionOut, versionErr := versionCmd.Output()
+ if versionErr != nil {
+ fmt.Printf("⚠️ Could not determine gemini version: %v\n", versionErr)
+ } else if viper.GetBool("verbose") {
+ fmt.Printf(" gemini version: %s\n", strings.TrimSpace(string(versionOut)))
}
serverName := mcpServerName(projectDir)
@@ -461,35 +466,42 @@ func installGeminiCLI(binPath, projectDir string) {
if viper.GetBool("preview") {
fmt.Printf("[PREVIEW] Would run: gemini mcp remove -s project %s && gemini mcp add -s project %s %s mcp\n", legacyName, serverName, binPath)
- return
+ return nil
}
// Remove legacy server name (migration cleanup)
- legacyRemoveCmd := exec.Command("gemini", "mcp", "remove", "-s", "project", legacyName)
+ legacyRemoveCmd := exec.Command(geminiPath, "mcp", "remove", "-s", "project", legacyName)
legacyRemoveCmd.Dir = projectDir
_ = legacyRemoveCmd.Run() // Ignore error - server may not exist
// Remove current server name (idempotent reinstall)
- removeCmd := exec.Command("gemini", "mcp", "remove", "-s", "project", serverName)
+ removeCmd := exec.Command(geminiPath, "mcp", "remove", "-s", "project", serverName)
removeCmd.Dir = projectDir
_ = removeCmd.Run() // Ignore error - server may not exist
// Run: gemini mcp add -s project [args...]
// Uses -s project for project-level config (stored in .gemini/settings.json)
- cmd := exec.Command("gemini", "mcp", "add", "-s", "project", serverName, binPath, "mcp")
+ cmd := exec.Command(geminiPath, "mcp", "add", "-s", "project", serverName, binPath, "mcp")
cmd.Dir = projectDir
- // Capture output to suppress noise, unless verbose
+ var stderrBuf strings.Builder
if viper.GetBool("verbose") {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
+ } else {
+ cmd.Stderr = &stderrBuf
}
if err := cmd.Run(); err != nil {
- fmt.Printf("⚠️ Failed to run 'gemini mcp add': %v\n", err)
- } else {
- fmt.Printf("✅ Installed for Gemini as '%s'\n", serverName)
+ stderrMsg := strings.TrimSpace(stderrBuf.String())
+ if stderrMsg != "" {
+ return fmt.Errorf("'gemini mcp add' failed (exit %v): %s", err, stderrMsg)
+ }
+ return fmt.Errorf("'gemini mcp add' failed: %w", err)
}
+
+ fmt.Printf("✅ Installed for Gemini as '%s'\n", serverName)
+ return nil
}
func installCodexGlobal(binPath, projectDir string) {
diff --git a/internal/agents/core/parsers.go b/internal/agents/core/parsers.go
index a2e86d2..42a275d 100644
--- a/internal/agents/core/parsers.go
+++ b/internal/agents/core/parsers.go
@@ -59,6 +59,17 @@ func NewFindingWithEvidence(
metadata map[string]any,
) Finding {
confidenceScore, confidenceLabel := ParseConfidence(confidence)
+ convertedEvidence := ConvertEvidence(evidence)
+
+ // Gate 3: Findings without verifiable evidence (non-empty FilePath) start as
+ // "skipped" rather than "pending" to prevent hallucinated findings from being
+ // auto-linked into the knowledge graph.
+ verificationStatus := VerificationStatusPending
+ tempFinding := Finding{Evidence: convertedEvidence}
+ if !tempFinding.HasEvidence() {
+ verificationStatus = VerificationStatusSkipped
+ }
+
return Finding{
Type: findingType,
Title: title,
@@ -67,8 +78,8 @@ func NewFindingWithEvidence(
Tradeoffs: tradeoffs,
ConfidenceScore: confidenceScore,
Confidence: confidenceLabel,
- Evidence: ConvertEvidence(evidence),
- VerificationStatus: VerificationStatusPending,
+ Evidence: convertedEvidence,
+ VerificationStatus: verificationStatus,
SourceAgent: sourceAgent,
Metadata: metadata,
}
diff --git a/internal/agents/core/parsers_test.go b/internal/agents/core/parsers_test.go
new file mode 100644
index 0000000..26b62cc
--- /dev/null
+++ b/internal/agents/core/parsers_test.go
@@ -0,0 +1,221 @@
+package core
+
+import (
+ "testing"
+)
+
+// =============================================================================
+// TestParseJSONResponse_Hallucination: Reject or flag hallucinated outputs
+// =============================================================================
+
+func TestParseJSONResponse_Hallucination(t *testing.T) {
+ type simpleResponse struct {
+ Title string `json:"title"`
+ Description string `json:"description"`
+ Evidence []EvidenceJSON `json:"evidence"`
+ }
+
+ tests := []struct {
+ name string
+ input string
+ wantErr bool
+ wantTitle string
+ }{
+ {
+ name: "empty string rejected",
+ input: "",
+ wantErr: true,
+ },
+ {
+ name: "plain text without JSON rejected",
+ input: "This is just some text with no JSON at all.",
+ wantErr: true,
+ },
+ {
+ name: "valid JSON accepted",
+ input: `{"title": "Valid Finding", "description": "desc", "evidence": [{"file_path": "main.go"}]}`,
+ wantErr: false,
+ wantTitle: "Valid Finding",
+ },
+ {
+ name: "JSON with markdown fences accepted",
+ input: "```json\n{\"title\": \"Fenced\", \"description\": \"d\"}\n```",
+ wantErr: false,
+ wantTitle: "Fenced",
+ },
+ {
+ name: "truncated JSON repaired when simple",
+ input: `{"title": "Truncated", "description": "cut off here`,
+ wantErr: false, // Simple truncation is repairable (closing quote + brace)
+ wantTitle: "Truncated",
+ },
+ {
+ name: "JSON with trailing comma repaired",
+ input: `{"title": "Trailing", "description": "desc",}`,
+ wantErr: false,
+ wantTitle: "Trailing",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := ParseJSONResponse[simpleResponse](tt.input)
+ if tt.wantErr {
+ if err == nil {
+ t.Error("expected error, got nil")
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if tt.wantTitle != "" && result.Title != tt.wantTitle {
+ t.Errorf("Title = %q, want %q", result.Title, tt.wantTitle)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// TestGate3_Enforcement: Findings without evidence are flagged
+// =============================================================================
+
+func TestGate3_Enforcement(t *testing.T) {
+ t.Run("finding_with_evidence_is_pending", func(t *testing.T) {
+ f := NewFindingWithEvidence(
+ FindingTypeDecision,
+ "Has Evidence", "desc", "why", "tradeoffs",
+ 0.8,
+ []EvidenceJSON{{FilePath: "main.go", StartLine: 1, EndLine: 5, Snippet: "func main()"}},
+ "test-agent",
+ nil,
+ )
+
+ if f.VerificationStatus != VerificationStatusPending {
+ t.Errorf("VerificationStatus = %q, want %q", f.VerificationStatus, VerificationStatusPending)
+ }
+ if !f.HasEvidence() {
+ t.Error("HasEvidence() = false, want true")
+ }
+ if f.NeedsHumanVerification() {
+ t.Error("NeedsHumanVerification() = true for finding with evidence and high confidence")
+ }
+ })
+
+ t.Run("finding_without_evidence_is_skipped", func(t *testing.T) {
+ f := NewFindingWithEvidence(
+ FindingTypeDecision,
+ "No Evidence", "desc", "why", "tradeoffs",
+ 0.9,
+ nil, // No evidence
+ "test-agent",
+ nil,
+ )
+
+ if f.VerificationStatus != VerificationStatusSkipped {
+ t.Errorf("VerificationStatus = %q, want %q (Gate 3: no evidence)", f.VerificationStatus, VerificationStatusSkipped)
+ }
+ if f.HasEvidence() {
+ t.Error("HasEvidence() = true, want false")
+ }
+ })
+
+ t.Run("finding_with_empty_evidence_is_skipped", func(t *testing.T) {
+ f := NewFindingWithEvidence(
+ FindingTypeDecision,
+ "Empty Evidence", "desc", "why", "tradeoffs",
+ 0.8,
+ []EvidenceJSON{}, // Empty slice
+ "test-agent",
+ nil,
+ )
+
+ if f.VerificationStatus != VerificationStatusSkipped {
+ t.Errorf("VerificationStatus = %q, want %q (Gate 3: empty evidence)", f.VerificationStatus, VerificationStatusSkipped)
+ }
+ })
+
+ t.Run("finding_with_low_confidence_needs_verification", func(t *testing.T) {
+ f := NewFindingWithEvidence(
+ FindingTypeDecision,
+ "Low Confidence", "desc", "why", "tradeoffs",
+ 0.3, // Below 0.5 threshold
+ []EvidenceJSON{{FilePath: "main.go"}},
+ "test-agent",
+ nil,
+ )
+
+ if !f.NeedsHumanVerification() {
+ t.Error("NeedsHumanVerification() = false for low-confidence finding, want true")
+ }
+ })
+
+ t.Run("finding_with_empty_file_path_has_no_evidence", func(t *testing.T) {
+ f := NewFindingWithEvidence(
+ FindingTypeDecision,
+ "Bad Evidence", "desc", "why", "tradeoffs",
+ 0.8,
+ []EvidenceJSON{{FilePath: "", Snippet: "some code"}}, // Empty file path
+ "test-agent",
+ nil,
+ )
+
+ if f.HasEvidence() {
+ t.Error("HasEvidence() = true for evidence with empty FilePath, want false")
+ }
+ })
+
+ t.Run("debt_finding_with_evidence_not_flagged", func(t *testing.T) {
+ f := NewFindingWithDebt(
+ FindingTypeDecision,
+ "Debt Finding", "desc", "why", "tradeoffs",
+ 0.8,
+ []EvidenceJSON{{FilePath: "legacy.go", StartLine: 10}},
+ "test-agent",
+ nil,
+ DebtInfo{DebtScore: 0.8, DebtReason: "legacy pattern"},
+ )
+
+ if f.VerificationStatus != VerificationStatusPending {
+ t.Errorf("VerificationStatus = %q, want %q", f.VerificationStatus, VerificationStatusPending)
+ }
+ if !f.IsDebt() {
+ t.Error("IsDebt() = false, want true")
+ }
+ })
+}
+
+// =============================================================================
+// TestParseConfidence: Confidence parsing edge cases
+// =============================================================================
+
+func TestParseConfidence(t *testing.T) {
+ tests := []struct {
+ name string
+ input any
+ wantScore float64
+ wantLabel string
+ }{
+ {"high float", 0.9, 0.9, "high"},
+ {"medium float", 0.6, 0.6, "medium"},
+ {"low float", 0.3, 0.3, "low"},
+ {"integer 1", 1, 1.0, "high"},
+ {"integer 0", 0, 0.0, "low"},
+ {"string high", "high", 0.9, "high"},
+ {"string medium", "medium", 0.7, "medium"},
+ {"string low", "low", 0.4, "low"},
+ {"nil defaults to medium", nil, 0.5, "medium"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ score, label := ParseConfidence(tt.input)
+ if score != tt.wantScore {
+ t.Errorf("score = %f, want %f", score, tt.wantScore)
+ }
+ if label != tt.wantLabel {
+ t.Errorf("label = %q, want %q", label, tt.wantLabel)
+ }
+ })
+ }
+}
diff --git a/internal/agents/core/types.go b/internal/agents/core/types.go
index 6316694..dbddc7f 100644
--- a/internal/agents/core/types.go
+++ b/internal/agents/core/types.go
@@ -5,6 +5,10 @@ package core
import "time"
+// MinConfidenceForAutoApply is the minimum confidence score for a finding
+// to be automatically applied without human verification.
+const MinConfidenceForAutoApply = 0.5
+
// FindingType categorizes what kind of discovery was made.
type FindingType string
@@ -55,6 +59,25 @@ func (f *Finding) IsDebt() bool {
return f.DebtScore >= 0.7
}
+// HasEvidence returns true if the finding has at least one evidence item
+// with a non-empty file path. Findings without evidence should not be
+// auto-linked into the knowledge graph (Gate 3 enforcement).
+func (f *Finding) HasEvidence() bool {
+ for _, e := range f.Evidence {
+ if e.FilePath != "" {
+ return true
+ }
+ }
+ return false
+}
+
+// NeedsHumanVerification returns true if this finding should be flagged
+// for human review rather than automatically applied. A finding needs
+// verification when it lacks evidence or has low confidence.
+func (f *Finding) NeedsHumanVerification() bool {
+ return !f.HasEvidence() || f.ConfidenceScore < MinConfidenceForAutoApply
+}
+
// Evidence represents verifiable proof for a Finding.
type Evidence struct {
FilePath string `json:"file_path"`
diff --git a/internal/agents/impl/analysis_git.go b/internal/agents/impl/analysis_git.go
index 1429968..fd2cc60 100644
--- a/internal/agents/impl/analysis_git.go
+++ b/internal/agents/impl/analysis_git.go
@@ -14,6 +14,7 @@ import (
"github.com/josephgoksu/TaskWing/internal/agents/core"
"github.com/josephgoksu/TaskWing/internal/config"
+ gitpkg "github.com/josephgoksu/TaskWing/internal/git"
"github.com/josephgoksu/TaskWing/internal/llm"
"github.com/josephgoksu/TaskWing/internal/project"
)
@@ -248,6 +249,15 @@ func gatherGitChunks(basePath string, verbose bool) ([]string, string) {
basePath, scopePath)
}
+ // Determine work directory and validate it is a git repository
+ workDir := getGitWorkDir(projectCtx, basePath)
+ if !gitpkg.IsGitRepository(workDir) {
+ if verbose {
+ log.Printf("[git] skipping git log: %q is not a git repository", workDir)
+ }
+ return nil, ""
+ }
+
// Build git log command with optional path scoping
args := []string{"log", "--format=%h %ad %s", "--date=short", fmt.Sprintf("-%d", gitMaxCommits)}
if scopePath != "" {
@@ -256,7 +266,6 @@ func gatherGitChunks(basePath string, verbose bool) ([]string, string) {
}
cmd := exec.Command("git", args...)
- workDir := getGitWorkDir(projectCtx, basePath)
cmd.Dir = workDir
// Task 5: Add error logging when git command fails
diff --git a/internal/bootstrap/bootstrap_repro_test.go b/internal/bootstrap/bootstrap_repro_test.go
new file mode 100644
index 0000000..0b5457b
--- /dev/null
+++ b/internal/bootstrap/bootstrap_repro_test.go
@@ -0,0 +1,753 @@
+package bootstrap_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/josephgoksu/TaskWing/internal/agents/core"
+ "github.com/josephgoksu/TaskWing/internal/agents/verification"
+ "github.com/josephgoksu/TaskWing/internal/bootstrap"
+ "github.com/josephgoksu/TaskWing/internal/git"
+ "github.com/josephgoksu/TaskWing/internal/memory"
+ "github.com/josephgoksu/TaskWing/internal/project"
+ "github.com/spf13/afero"
+)
+
+// =============================================================================
+// Defect 1: RootPath resolves to HOME when .taskwing exists at HOME
+// =============================================================================
+
+func TestBootstrapRepro_RootPathResolvesToHome(t *testing.T) {
+ // Reproduce: When ~/.taskwing exists and user runs from a workspace
+ // subdirectory that has its own .git, Detect() should NOT climb to HOME.
+ // The CRITICAL FIX in detect.go should prevent this, but the multi-repo
+ // workspace scenario (no .git at workspace root) still resolves to HOME.
+
+ tests := []struct {
+ name string
+ paths []string
+ startPath string
+ wantRootPath string
+ wantNotHome bool // if true, assert RootPath != simulated home
+ }{
+ {
+ name: "taskwing at home should not be used when git repo below",
+ paths: []string{
+ "/home/user/.taskwing/",
+ "/home/user/workspace/project/.git/",
+ "/home/user/workspace/project/go.mod",
+ },
+ startPath: "/home/user/workspace/project",
+ wantRootPath: "/home/user/workspace/project",
+ },
+ {
+ name: "multi-repo workspace without git root - should not climb to home",
+ paths: []string{
+ "/home/user/.taskwing/",
+ "/home/user/workspace/cli/.git/",
+ "/home/user/workspace/cli/go.mod",
+ "/home/user/workspace/app/.git/",
+ "/home/user/workspace/app/package.json",
+ },
+ // Running from the workspace dir (not a git repo itself)
+ startPath: "/home/user/workspace",
+ wantNotHome: true, // Should NOT resolve to /home/user
+ },
+ {
+ name: "taskwing at project level is valid",
+ paths: []string{
+ "/home/user/.taskwing/",
+ "/home/user/workspace/.taskwing/",
+ "/home/user/workspace/.git/",
+ "/home/user/workspace/go.mod",
+ },
+ startPath: "/home/user/workspace",
+ wantRootPath: "/home/user/workspace",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fs := setupFS(tt.paths)
+ d := project.NewDetector(fs)
+ ctx, err := d.Detect(tt.startPath)
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ if tt.wantRootPath != "" && ctx.RootPath != tt.wantRootPath {
+ t.Errorf("RootPath = %q, want %q", ctx.RootPath, tt.wantRootPath)
+ }
+ if tt.wantNotHome && ctx.RootPath == "/home/user" {
+ t.Errorf("RootPath resolved to HOME (%q) - this is the bug", ctx.RootPath)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Defect 2: FK constraint failures in knowledge graph linking
+// =============================================================================
+
+func TestBootstrapRepro_FKConstraintOnLinkNodes(t *testing.T) {
+ // Reproduce the real bug: The bootstrap output shows:
+ // "failed to link nodes (llm): constraint failed: FOREIGN KEY constraint failed (787)"
+ // This happens in knowledge/ingest.go when LLM-extracted relationships reference
+ // node IDs that were looked up from an in-memory map but have been purged from the DB.
+ //
+ // The INSERT OR IGNORE in LinkNodes silently drops FK violations for evidence/semantic
+ // edges, but the LLM linking path hits a different code path where errors surface.
+ // We test both behaviors.
+
+ store, err := memory.NewSQLiteStore(":memory:")
+ if err != nil {
+ t.Fatalf("NewSQLiteStore: %v", err)
+ }
+ defer store.Close()
+
+ t.Run("insert_or_ignore_silently_drops_fk_violations", func(t *testing.T) {
+ // LinkNodes pre-checks node existence, so linking nonexistent nodes must error.
+ err := store.LinkNodes("n-nonexistent1", "n-nonexistent2", "relates_to", 0.8, nil)
+ if err == nil {
+ t.Error("LinkNodes with nonexistent nodes should return error, got nil")
+ }
+ })
+
+ t.Run("purge_then_link_race_condition", func(t *testing.T) {
+ // Simulate the real bootstrap flow:
+ // 1. Create nodes (bootstrap agents produce findings)
+ // 2. Purge old nodes by agent (bootstrap cleans stale data)
+ // 3. Re-insert new nodes (from current run)
+ // 4. LLM linking tries to connect nodes using an in-memory title->ID map
+ // that may contain stale IDs from before the purge
+ node1 := &memory.Node{
+ ID: "n-purge01", Content: "Will be purged", Type: "decision",
+ Summary: "Purge test 1", SourceAgent: "test-agent", CreatedAt: time.Now(),
+ }
+ node2 := &memory.Node{
+ ID: "n-purge02", Content: "Will be purged", Type: "feature",
+ Summary: "Purge test 2", SourceAgent: "test-agent", CreatedAt: time.Now(),
+ }
+ if err := store.CreateNode(node1); err != nil {
+ t.Fatalf("CreateNode: %v", err)
+ }
+ if err := store.CreateNode(node2); err != nil {
+ t.Fatalf("CreateNode: %v", err)
+ }
+
+ // Build title->ID map (simulates what ingest.go does before linking)
+ titleMap := map[string]string{
+ "purge test 1": "n-purge01",
+ "purge test 2": "n-purge02",
+ }
+
+ // Purge by agent (simulates bootstrap "Purging all stale nodes" step)
+ if err := store.DeleteNodesByAgent("test-agent"); err != nil {
+ t.Fatalf("DeleteNodesByAgent: %v", err)
+ }
+
+ // LLM linking uses stale title map — IDs no longer exist in DB
+ fromID := titleMap["purge test 1"]
+ toID := titleMap["purge test 2"]
+ err := store.LinkNodes(fromID, toID, "relates_to", 1.0, map[string]any{"llm_extracted": true})
+ if err == nil {
+ t.Error("LinkNodes after purge should return error for deleted nodes, got nil")
+ }
+ })
+}
+
+// =============================================================================
+// Defect 3: Git log exit 128 for sub-repos
+// =============================================================================
+
+// mockCommander implements git.Commander for testing
+type mockCommander struct {
+ responses map[string]mockResponse
+}
+
+type mockResponse struct {
+ output string
+ err error
+}
+
+func (m *mockCommander) Run(name string, args ...string) (string, error) {
+ return m.RunInDir("", name, args...)
+}
+
+func (m *mockCommander) RunInDir(dir, name string, args ...string) (string, error) {
+ key := fmt.Sprintf("%s:%s %s", dir, name, strings.Join(args, " "))
+ if resp, ok := m.responses[key]; ok {
+ return resp.output, resp.err
+ }
+ // Also try without dir prefix
+ keyNoDir := fmt.Sprintf("%s %s", name, strings.Join(args, " "))
+ if resp, ok := m.responses[keyNoDir]; ok {
+ return resp.output, resp.err
+ }
+ return "", fmt.Errorf("exit status 128: fatal: not a git repository")
+}
+
+func TestBootstrapRepro_GitLogExit128(t *testing.T) {
+ // Reproduce: git log fails with exit 128 when run against a non-git directory
+
+ tests := []struct {
+ name string
+ workDir string
+ responses map[string]mockResponse
+ wantIsRepo bool
+ }{
+ {
+ name: "non-git directory returns exit 128",
+ workDir: "/workspace/not-a-repo",
+ responses: map[string]mockResponse{
+ "/workspace/not-a-repo:git rev-parse --is-inside-work-tree": {
+ output: "",
+ err: fmt.Errorf("exit status 128: fatal: not a git repository"),
+ },
+ },
+ wantIsRepo: false,
+ },
+ {
+ name: "valid git repo succeeds",
+ workDir: "/workspace/valid-repo",
+ responses: map[string]mockResponse{
+ "/workspace/valid-repo:git rev-parse --is-inside-work-tree": {
+ output: "true",
+ err: nil,
+ },
+ },
+ wantIsRepo: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cmd := &mockCommander{responses: tt.responses}
+ client := git.NewClientWithCommander(tt.workDir, cmd)
+
+ got := client.IsRepository()
+ if got != tt.wantIsRepo {
+ t.Errorf("IsRepository() = %v, want %v", got, tt.wantIsRepo)
+ }
+ })
+ }
+}
+
+func TestBootstrapRepro_GitLogOnRealTempDirs(t *testing.T) {
+ // Create a real temp dir without git init - git log should fail gracefully
+ tmpDir := t.TempDir()
+ nonGitDir := filepath.Join(tmpDir, "not-a-repo")
+ if err := os.MkdirAll(nonGitDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ client := git.NewClient(nonGitDir)
+ if client.IsRepository() {
+ t.Error("IsRepository() = true for non-git dir, want false")
+ }
+
+ // Create a dir with .git init and verify it works
+ gitDir := filepath.Join(tmpDir, "has-git")
+ if err := os.MkdirAll(gitDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Actually init git so we can test the real path
+ cmd := git.NewClient(gitDir)
+ // Use os/exec directly for setup since Commander doesn't expose init
+ initCmd := exec.Command("git", "init", gitDir)
+ if err := initCmd.Run(); err != nil {
+ t.Skipf("git not available: %v", err)
+ }
+
+ if !cmd.IsRepository() {
+ t.Error("IsRepository() = false for git-inited dir, want true")
+ }
+}
+
+// =============================================================================
+// Defect 4: Gemini MCP install failure (exit status 41)
+// =============================================================================
+
+func TestBootstrapRepro_GeminiMCPInstallFails(t *testing.T) {
+ // This test verifies the behavior when gemini CLI is not available.
+ // The install function uses exec.LookPath("gemini") which we can't easily mock
+ // without refactoring. Instead, we verify the code path handles missing CLI.
+
+ // Verify LookPath returns an error for a non-existent binary
+ _, err := lookPath("gemini-nonexistent-binary-xyz")
+ if err == nil {
+ t.Fatal("LookPath should fail for non-existent binary")
+ }
+
+ // The actual fix requires making installGeminiCLI testable via interfaces.
+ // For now, this test documents that the current code does check LookPath
+ // but doesn't handle version-specific exit codes (like 41) gracefully.
+ t.Log("installGeminiCLI checks exec.LookPath but doesn't handle non-standard exit codes")
+}
+
+// =============================================================================
+// Defect 5: Zero docs loaded
+// =============================================================================
+
+func TestBootstrapRepro_ZeroDocsLoaded(t *testing.T) {
+ // Reproduce: DocLoader finds 0 files when basePath has no docs at root
+ // but sub-repos contain documentation
+
+ tests := []struct {
+ name string
+ setup func(t *testing.T, dir string)
+ wantMin int // minimum expected doc count
+ wantZero bool
+ }{
+ {
+ name: "empty directory yields zero docs",
+ setup: func(t *testing.T, dir string) {
+ // Nothing to create
+ },
+ wantZero: true,
+ },
+ {
+ name: "root with README yields docs",
+ setup: func(t *testing.T, dir string) {
+ writeFile(t, filepath.Join(dir, "README.md"), "# Project\nDescription here")
+ },
+ wantMin: 1,
+ },
+ {
+ name: "docs only in sub-repos not found by root loader",
+ setup: func(t *testing.T, dir string) {
+ // Sub-repo has docs but root DocLoader doesn't scan sub-repos
+ subrepo := filepath.Join(dir, "cli")
+ os.MkdirAll(subrepo, 0755)
+ writeFile(t, filepath.Join(subrepo, "README.md"), "# CLI Readme")
+ writeFile(t, filepath.Join(subrepo, "ARCHITECTURE.md"), "# Architecture")
+ // Docs dir inside sub-repo
+ os.MkdirAll(filepath.Join(subrepo, "docs"), 0755)
+ writeFile(t, filepath.Join(subrepo, "docs", "guide.md"), "# Guide")
+ },
+ // DocLoader at workspace root won't find sub-repo docs
+ // because it only scans basePath/README.md, basePath/docs/, basePath/.taskwing/
+ wantZero: true,
+ },
+ {
+ name: "docs directory at root is found",
+ setup: func(t *testing.T, dir string) {
+ os.MkdirAll(filepath.Join(dir, "docs"), 0755)
+ writeFile(t, filepath.Join(dir, "docs", "api.md"), "# API Docs")
+ writeFile(t, filepath.Join(dir, "docs", "guide.md"), "# User Guide")
+ },
+ wantMin: 2,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dir := t.TempDir()
+ tt.setup(t, dir)
+
+ loader := bootstrap.NewDocLoader(dir)
+ docs, err := loader.Load()
+ if err != nil {
+ t.Fatalf("Load() error: %v", err)
+ }
+
+ if tt.wantZero && len(docs) > 0 {
+ t.Errorf("Load() returned %d docs, want 0", len(docs))
+ }
+ if tt.wantMin > 0 && len(docs) < tt.wantMin {
+ t.Errorf("Load() returned %d docs, want >= %d", len(docs), tt.wantMin)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Defect 6: Claude MCP drift detected but not fixed
+// =============================================================================
+
+func TestBootstrapRepro_MCPDriftDetection(t *testing.T) {
+ // This test verifies drift detection logic. The actual detectClaudeMCP()
+ // function uses exec.LookPath and exec.Command which require a real
+ // claude binary. The test documents the behavior and prepares for the fix.
+
+ // Test that the detection functions handle missing binaries gracefully
+ t.Run("missing_claude_binary", func(t *testing.T) {
+ // detectClaudeMCP() should return false if claude is not in PATH
+ // We can't call it directly from bootstrap_test package, but we verify
+ // the underlying mechanism works
+ _, err := lookPath("claude-nonexistent-xyz")
+ if err == nil {
+ t.Error("expected error for missing binary")
+ }
+ })
+
+ // The core issue: drift is DETECTED but bootstrap refuses to FIX it.
+ // The fix needs: a --fix-mcp flag or interactive prompt.
+ t.Log("MCP drift fix requires: (1) safe re-registration flow, (2) --fix-mcp flag or interactive prompt, (3) dry-run mode")
+}
+
+// =============================================================================
+// Defect 7: Hallucinated findings rejected by verifier
+// =============================================================================
+
+func TestBootstrapRepro_HallucinatedFindingsRejected(t *testing.T) {
+ // Reproduce: LLM generates findings with evidence pointing to non-existent files
+ dir := t.TempDir()
+
+ // Create a real file for valid evidence
+ writeFile(t, filepath.Join(dir, "main.go"), `package main
+
+func main() {
+ println("hello")
+}
+`)
+
+ verifier := verification.NewAgent(dir)
+
+ findings := []core.Finding{
+ {
+ Title: "Valid finding with real file",
+ Description: "This finding has correct evidence",
+ ConfidenceScore: 0.8,
+ Evidence: []core.Evidence{
+ {
+ FilePath: "main.go",
+ StartLine: 3,
+ EndLine: 5,
+ Snippet: `func main() {`,
+ },
+ },
+ },
+ {
+ Title: "Hallucinated finding with fake file",
+ Description: "This finding references a non-existent file",
+ ConfidenceScore: 0.9,
+ Evidence: []core.Evidence{
+ {
+ FilePath: "nonexistent/hallucinated.go",
+ StartLine: 10,
+ EndLine: 20,
+ Snippet: `func DoSomething() error {`,
+ },
+ },
+ },
+ {
+ Title: "Hallucinated finding with wrong line numbers",
+ Description: "File exists but snippet at wrong lines",
+ ConfidenceScore: 0.7,
+ Evidence: []core.Evidence{
+ {
+ FilePath: "main.go",
+ StartLine: 100,
+ EndLine: 110,
+ Snippet: `func completelyWrongContent() {`,
+ },
+ },
+ },
+ }
+
+ verified := verifier.VerifyFindings(context.Background(), findings)
+ if len(verified) != 3 {
+ t.Fatalf("VerifyFindings returned %d findings, want 3", len(verified))
+ }
+
+ // First finding should be verified or partial (real file, real snippet)
+ if verified[0].VerificationStatus == core.VerificationStatusRejected {
+ t.Errorf("Finding 0 (valid) was rejected, want verified or partial")
+ }
+
+ // Second finding should be rejected (non-existent file)
+ if verified[1].VerificationStatus != core.VerificationStatusRejected {
+ t.Errorf("Finding 1 (hallucinated file) status = %q, want %q",
+ verified[1].VerificationStatus, core.VerificationStatusRejected)
+ }
+
+ // Verify confidence was penalized for rejected finding
+ if verified[1].ConfidenceScore >= 0.9 {
+ t.Errorf("Rejected finding confidence = %f, should be penalized below 0.9", verified[1].ConfidenceScore)
+ }
+
+ // Filter should remove rejected findings
+ filtered := verification.FilterVerifiedFindings(verified)
+ for _, f := range filtered {
+ if f.VerificationStatus == core.VerificationStatusRejected {
+ t.Errorf("FilterVerifiedFindings kept rejected finding: %q", f.Title)
+ }
+ }
+}
+
+// =============================================================================
+// Defect 8: No metadata from sub-repos
+// =============================================================================
+
+func TestBootstrapRepro_NoMetadataFromSubRepos(t *testing.T) {
+ // Reproduce: Metadata extraction runs only on parent dir.
+ // If parent is not a git repo, no metadata is extracted even though
+ // sub-repos have valid git history.
+
+ dir := t.TempDir()
+
+ // Create sub-repo structures (without actual .git - just markers)
+ subrepos := []string{"cli", "app", "macos"}
+ for _, name := range subrepos {
+ subDir := filepath.Join(dir, name)
+ os.MkdirAll(subDir, 0755)
+ writeFile(t, filepath.Join(subDir, "go.mod"), fmt.Sprintf("module example.com/%s", name))
+ writeFile(t, filepath.Join(subDir, "README.md"), fmt.Sprintf("# %s\nProject description", name))
+ }
+
+ // DocLoader from parent dir should NOT find sub-repo docs (current behavior = bug)
+ loader := bootstrap.NewDocLoader(dir)
+ docs, _ := loader.Load()
+
+ if len(docs) > 0 {
+ t.Logf("Root loader found %d docs (unexpected for multi-repo workspace without root docs)", len(docs))
+ }
+
+ // Each sub-repo DocLoader should find its own docs
+ for _, name := range subrepos {
+ subLoader := bootstrap.NewDocLoader(filepath.Join(dir, name))
+ subDocs, _ := subLoader.Load()
+ if len(subDocs) == 0 {
+ t.Errorf("Sub-repo %q DocLoader found 0 docs, want >= 1 (has README.md)", name)
+ }
+ }
+
+ // The fix: bootstrap should iterate sub-repos and aggregate docs
+ t.Log("Fix needed: bootstrap should run DocLoader per sub-repo and aggregate results")
+}
+
+// =============================================================================
+// Defect 9: IsMonorepo=false despite 4 detected repositories
+// =============================================================================
+
+func TestBootstrapRepro_IsMonorepoMisclassification(t *testing.T) {
+ // Reproduce: workspace has 4 sub-repos but no .git at root.
+ // project.Detect() finds no marker and falls back to startPath with
+ // IsMonorepo=false. Meanwhile, workspace.DetectWorkspace() correctly
+ // identifies it as multi-repo. The two APIs disagree.
+
+ t.Run("detector_says_not_monorepo_for_multi_repo_workspace", func(t *testing.T) {
+ // Simulate: /workspace has 4 sub-repos with .git each, no root .git
+ fs := setupFS([]string{
+ "/workspace/cli/.git/",
+ "/workspace/cli/go.mod",
+ "/workspace/app/.git/",
+ "/workspace/app/package.json",
+ "/workspace/macos/.git/",
+ "/workspace/macos/Package.swift",
+ "/workspace/karluk/.git/",
+ "/workspace/karluk/pyproject.toml",
+ })
+
+ d := project.NewDetector(fs)
+ ctx, err := d.Detect("/workspace")
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ // Current behavior: falls through to CWD fallback with no marker
+ // and IsMonorepo=false, which is wrong for a multi-repo workspace
+ if ctx.MarkerType == project.MarkerNone && !ctx.IsMonorepo {
+ t.Log("BUG CONFIRMED: Detect() returns MarkerNone + IsMonorepo=false for 4-repo workspace")
+ t.Errorf("IsMonorepo = false for workspace with 4 sub-repos, want true (or use WorkspaceTypeMultiRepo)")
+ }
+ })
+
+ t.Run("workspace_detector_correctly_identifies_multi_repo", func(t *testing.T) {
+ // The workspace detector (using real FS) correctly identifies multi-repo
+ dir := t.TempDir()
+ repos := []struct {
+ name string
+ marker string
+ }{
+ {"cli", "go.mod"},
+ {"app", "package.json"},
+ {"macos", "go.mod"},
+ {"karluk", "pyproject.toml"},
+ }
+ for _, r := range repos {
+ subDir := filepath.Join(dir, r.name)
+ os.MkdirAll(subDir, 0755)
+ writeFile(t, filepath.Join(subDir, r.marker), "")
+ }
+
+ ws, err := project.DetectWorkspace(dir)
+ if err != nil {
+ t.Fatalf("DetectWorkspace() error: %v", err)
+ }
+
+ if ws.Type == project.WorkspaceTypeSingle {
+ t.Errorf("DetectWorkspace Type = Single, want MultiRepo or Monorepo")
+ }
+ if len(ws.Services) != 4 {
+ t.Errorf("DetectWorkspace found %d services, want 4", len(ws.Services))
+ }
+ })
+
+ t.Run("disagreement_between_detect_and_workspace", func(t *testing.T) {
+ // Both APIs should agree on multi-repo detection
+ dir := t.TempDir()
+ for _, name := range []string{"svc-a", "svc-b", "svc-c"} {
+ subDir := filepath.Join(dir, name)
+ os.MkdirAll(subDir, 0755)
+ writeFile(t, filepath.Join(subDir, "go.mod"), fmt.Sprintf("module %s", name))
+ }
+
+ // project.Detect uses OsFs internally
+ detectCtx, _ := project.Detect(dir)
+ wsInfo, _ := project.DetectWorkspace(dir)
+
+ if wsInfo.Type == project.WorkspaceTypeMultiRepo && !detectCtx.IsMonorepo {
+ t.Error("DISAGREEMENT: DetectWorkspace says MultiRepo but Detect says IsMonorepo=false")
+ }
+ })
+}
+
+// =============================================================================
+// DB Integration: Verify node+edge transactional integrity
+// =============================================================================
+
+func TestBootstrapIntegration_NodeEdgeTransactionIntegrity(t *testing.T) {
+ store, err := memory.NewSQLiteStore(":memory:")
+ if err != nil {
+ t.Fatalf("NewSQLiteStore: %v", err)
+ }
+ defer store.Close()
+
+ // Create a batch of nodes
+ nodeIDs := make([]string, 10)
+ for i := range nodeIDs {
+ nodeIDs[i] = fmt.Sprintf("n-batch%03d", i)
+ node := &memory.Node{
+ ID: nodeIDs[i],
+ Content: fmt.Sprintf("Node %d content", i),
+ Type: memory.NodeTypeDecision,
+ Summary: fmt.Sprintf("Decision %d", i),
+ SourceAgent: "test",
+ CreatedAt: time.Now(),
+ }
+ if err := store.CreateNode(node); err != nil {
+ t.Fatalf("CreateNode %d: %v", i, err)
+ }
+ }
+
+ // Link valid nodes - should succeed
+ for i := 0; i < len(nodeIDs)-1; i++ {
+ err := store.LinkNodes(nodeIDs[i], nodeIDs[i+1], "relates_to", 0.9, nil)
+ if err != nil {
+ t.Errorf("LinkNodes(%s, %s) failed: %v", nodeIDs[i], nodeIDs[i+1], err)
+ }
+ }
+
+ // Verify edges were created
+ edges, err := store.GetAllNodeEdges()
+ if err != nil {
+ t.Fatalf("GetAllNodeEdges: %v", err)
+ }
+ if len(edges) != 9 {
+ t.Errorf("got %d edges, want 9", len(edges))
+ }
+
+ // Now simulate the purge-then-link bug:
+ // Delete all nodes by agent, then try to create edges
+ if err := store.DeleteNodesByAgent("test"); err != nil {
+ t.Fatalf("DeleteNodesByAgent: %v", err)
+ }
+
+ // With ON DELETE CASCADE, edges should also be deleted
+ edgesAfterPurge, err := store.GetAllNodeEdges()
+ if err != nil {
+ t.Fatalf("GetAllNodeEdges after purge: %v", err)
+ }
+ if len(edgesAfterPurge) != 0 {
+ t.Errorf("got %d edges after cascade delete, want 0", len(edgesAfterPurge))
+ }
+
+ // Try to link purged nodes - should error with existence pre-check
+ err = store.LinkNodes("n-batch000", "n-batch001", "depends_on", 1.0, nil)
+ if err == nil {
+ t.Error("LinkNodes with purged nodes should return error, got nil")
+ }
+}
+
+// =============================================================================
+// DocLoader with .taskwing memory path
+// =============================================================================
+
+func TestBootstrapIntegration_DocLoaderWithTaskwingDir(t *testing.T) {
+ dir := t.TempDir()
+ taskwingDir := filepath.Join(dir, ".taskwing")
+ os.MkdirAll(taskwingDir, 0755)
+
+ writeFile(t, filepath.Join(dir, "README.md"), "# Root Readme")
+ writeFile(t, filepath.Join(taskwingDir, "ARCHITECTURE.md"), "# Generated Architecture")
+
+ loader := bootstrap.NewDocLoader(dir)
+ docs, err := loader.Load()
+ if err != nil {
+ t.Fatalf("Load() error: %v", err)
+ }
+
+ foundReadme := false
+ foundArch := false
+ for _, doc := range docs {
+ if doc.Name == "README.md" && doc.Category == "readme" {
+ foundReadme = true
+ }
+ if doc.Name == "ARCHITECTURE.md" && doc.Category == "architecture" && strings.HasPrefix(doc.Path, ".taskwing/") {
+ foundArch = true
+ }
+ }
+
+ if !foundReadme {
+ t.Error("DocLoader did not find README.md")
+ }
+ if !foundArch {
+ t.Error("DocLoader did not find .taskwing/ARCHITECTURE.md")
+ }
+}
+
+// =============================================================================
+// Helpers
+// =============================================================================
+
+// setupFS creates an in-memory filesystem with the given paths.
+// Paths ending with "/" are created as directories, others as files.
+func setupFS(paths []string) afero.Fs {
+ fs := afero.NewMemMapFs()
+ for _, p := range paths {
+ if p[len(p)-1] == '/' {
+ _ = fs.MkdirAll(p, 0755)
+ } else {
+ dir := filepath.Dir(p)
+ _ = fs.MkdirAll(dir, 0755)
+ _ = afero.WriteFile(fs, p, []byte(""), 0644)
+ }
+ }
+ return fs
+}
+
+// writeFile is a test helper that creates a file with content.
+func writeFile(t *testing.T, path, content string) {
+ t.Helper()
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ t.Fatalf("MkdirAll %s: %v", dir, err)
+ }
+ if err := os.WriteFile(path, []byte(content), 0644); err != nil {
+ t.Fatalf("WriteFile %s: %v", path, err)
+ }
+}
+
+// lookPath wraps exec.LookPath for testability.
+func lookPath(name string) (string, error) {
+ return exec.LookPath(name)
+}
diff --git a/internal/bootstrap/doc_loader.go b/internal/bootstrap/doc_loader.go
index efaf6d5..28e73a8 100644
--- a/internal/bootstrap/doc_loader.go
+++ b/internal/bootstrap/doc_loader.go
@@ -100,6 +100,45 @@ func (l *DocLoader) Load() ([]DocFile, error) {
return docs, nil
}
+// LoadForServices scans the root path and each service sub-directory for docs.
+// Service paths are relative to basePath. This ensures sub-repo docs are discovered
+// in multi-repo workspaces where the root DocLoader would miss them.
+func (l *DocLoader) LoadForServices(services []string) ([]DocFile, error) {
+ // Load root-level docs first
+ docs, err := l.Load()
+ if err != nil {
+ return nil, err
+ }
+
+ // Track already-loaded paths to avoid duplicates
+ loaded := make(map[string]bool, len(docs))
+ for _, d := range docs {
+ loaded[d.Path] = true
+ }
+
+ // Scan each service sub-directory
+ for _, service := range services {
+ servicePath := filepath.Join(l.basePath, service)
+ subLoader := &DocLoader{basePath: servicePath, maxSize: l.maxSize}
+ subDocs, subErr := subLoader.Load()
+ if subErr != nil {
+ continue // Skip services that fail to load
+ }
+
+ for _, doc := range subDocs {
+ // Prefix path with service name for workspace context
+ doc.Path = filepath.Join(service, doc.Path)
+ if loaded[doc.Path] {
+ continue
+ }
+ loaded[doc.Path] = true
+ docs = append(docs, doc)
+ }
+ }
+
+ return docs, nil
+}
+
func (l *DocLoader) loadFile(path string) (string, error) {
info, err := os.Stat(path)
if err != nil {
diff --git a/internal/bootstrap/doc_loader_test.go b/internal/bootstrap/doc_loader_test.go
new file mode 100644
index 0000000..d4effd1
--- /dev/null
+++ b/internal/bootstrap/doc_loader_test.go
@@ -0,0 +1,217 @@
+package bootstrap
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestDocIngestion(t *testing.T) {
+ t.Run("root_docs_always_loaded", func(t *testing.T) {
+ dir := t.TempDir()
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# My Project"), 0o644)
+ os.WriteFile(filepath.Join(dir, "ARCHITECTURE.md"), []byte("# Architecture"), 0o644)
+ os.MkdirAll(filepath.Join(dir, "docs"), 0o755)
+ os.WriteFile(filepath.Join(dir, "docs", "setup.md"), []byte("# Setup Guide"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.Load()
+ if err != nil {
+ t.Fatalf("Load error: %v", err)
+ }
+
+ if len(docs) == 0 {
+ t.Error("docs count = 0, want > 0")
+ }
+
+ // Verify specific files found
+ found := make(map[string]bool)
+ for _, d := range docs {
+ found[d.Path] = true
+ }
+ for _, expected := range []string{"README.md", "ARCHITECTURE.md", filepath.Join("docs", "setup.md")} {
+ if !found[expected] {
+ t.Errorf("Missing expected doc: %q", expected)
+ }
+ }
+ })
+
+ t.Run("categories_assigned_correctly", func(t *testing.T) {
+ dir := t.TempDir()
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Readme"), 0o644)
+ os.WriteFile(filepath.Join(dir, "CONTRIBUTING.md"), []byte("# Contributing"), 0o644)
+ os.WriteFile(filepath.Join(dir, "CLAUDE.md"), []byte("# Claude"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.Load()
+ if err != nil {
+ t.Fatalf("Load error: %v", err)
+ }
+
+ cats := make(map[string]string)
+ for _, d := range docs {
+ cats[d.Name] = d.Category
+ }
+
+ if cats["README.md"] != "readme" {
+ t.Errorf("README.md category = %q, want readme", cats["README.md"])
+ }
+ if cats["CONTRIBUTING.md"] != "contributing" {
+ t.Errorf("CONTRIBUTING.md category = %q, want contributing", cats["CONTRIBUTING.md"])
+ }
+ if cats["CLAUDE.md"] != "ai-instructions" {
+ t.Errorf("CLAUDE.md category = %q, want ai-instructions", cats["CLAUDE.md"])
+ }
+ })
+
+ t.Run("large_files_skipped", func(t *testing.T) {
+ dir := t.TempDir()
+ // Create a file larger than 512KB
+ bigContent := make([]byte, 600*1024)
+ os.WriteFile(filepath.Join(dir, "README.md"), bigContent, 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.Load()
+ if err != nil {
+ t.Fatalf("Load error: %v", err)
+ }
+
+ if len(docs) != 0 {
+ t.Errorf("docs count = %d, want 0 (oversized file should be skipped)", len(docs))
+ }
+ })
+
+ t.Run("non_doc_files_ignored", func(t *testing.T) {
+ dir := t.TempDir()
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Root"), 0o644)
+ os.MkdirAll(filepath.Join(dir, "docs"), 0o755)
+ os.WriteFile(filepath.Join(dir, "docs", "guide.md"), []byte("# Guide"), 0o644)
+ os.WriteFile(filepath.Join(dir, "docs", "image.png"), []byte("binary"), 0o644)
+ os.WriteFile(filepath.Join(dir, "docs", "data.json"), []byte("{}"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.Load()
+ if err != nil {
+ t.Fatalf("Load error: %v", err)
+ }
+
+ for _, d := range docs {
+ ext := filepath.Ext(d.Path)
+ if ext == ".png" || ext == ".json" {
+ t.Errorf("Non-doc file included: %q", d.Path)
+ }
+ }
+ })
+}
+
+func TestSubrepoMetadataExtraction(t *testing.T) {
+ t.Run("loads_docs_from_sub_repos", func(t *testing.T) {
+ dir := t.TempDir()
+
+ // Root has a README
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Workspace"), 0o644)
+
+ // Sub-repo "api" has docs
+ apiDir := filepath.Join(dir, "api")
+ os.MkdirAll(filepath.Join(apiDir, "docs"), 0o755)
+ os.WriteFile(filepath.Join(apiDir, "README.md"), []byte("# API Service"), 0o644)
+ os.WriteFile(filepath.Join(apiDir, "ARCHITECTURE.md"), []byte("# API Arch"), 0o644)
+ os.WriteFile(filepath.Join(apiDir, "docs", "guide.md"), []byte("# API Guide"), 0o644)
+
+ // Sub-repo "web" has docs
+ webDir := filepath.Join(dir, "web")
+ os.MkdirAll(webDir, 0o755)
+ os.WriteFile(filepath.Join(webDir, "README.md"), []byte("# Web App"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.LoadForServices([]string{"api", "web"})
+ if err != nil {
+ t.Fatalf("LoadForServices error: %v", err)
+ }
+
+ // Should find: root README + api/README + api/ARCHITECTURE + api/docs/guide + web/README
+ if len(docs) < 4 {
+ t.Errorf("LoadForServices found %d docs, want >= 4", len(docs))
+ }
+
+ // Verify sub-repo docs have prefixed paths
+ foundAPIReadme := false
+ foundWebReadme := false
+ foundAPIGuide := false
+ for _, doc := range docs {
+ switch doc.Path {
+ case filepath.Join("api", "README.md"):
+ foundAPIReadme = true
+ case filepath.Join("web", "README.md"):
+ foundWebReadme = true
+ case filepath.Join("api", "docs", "guide.md"):
+ foundAPIGuide = true
+ }
+ }
+
+ if !foundAPIReadme {
+ t.Error("Missing api/README.md in loaded docs")
+ }
+ if !foundWebReadme {
+ t.Error("Missing web/README.md in loaded docs")
+ }
+ if !foundAPIGuide {
+ t.Error("Missing api/docs/guide.md in loaded docs")
+ }
+ })
+
+ t.Run("root_only_when_no_services", func(t *testing.T) {
+ dir := t.TempDir()
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Root"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.LoadForServices(nil)
+ if err != nil {
+ t.Fatalf("LoadForServices error: %v", err)
+ }
+
+ if len(docs) != 1 {
+ t.Errorf("LoadForServices found %d docs, want 1 (root README only)", len(docs))
+ }
+ })
+
+ t.Run("missing_service_dir_skipped", func(t *testing.T) {
+ dir := t.TempDir()
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Root"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.LoadForServices([]string{"nonexistent"})
+ if err != nil {
+ t.Fatalf("LoadForServices error: %v", err)
+ }
+
+ if len(docs) != 1 {
+ t.Errorf("LoadForServices found %d docs, want 1 (root only, missing service skipped)", len(docs))
+ }
+ })
+
+ t.Run("no_duplicate_paths", func(t *testing.T) {
+ dir := t.TempDir()
+ os.WriteFile(filepath.Join(dir, "README.md"), []byte("# Root"), 0o644)
+
+ // Create sub-dir that also has root-level patterns
+ subDir := filepath.Join(dir, "svc")
+ os.MkdirAll(subDir, 0o755)
+ os.WriteFile(filepath.Join(subDir, "README.md"), []byte("# Svc"), 0o644)
+
+ loader := NewDocLoader(dir)
+ docs, err := loader.LoadForServices([]string{"svc"})
+ if err != nil {
+ t.Fatalf("LoadForServices error: %v", err)
+ }
+
+ // Check for duplicates
+ seen := make(map[string]int)
+ for _, doc := range docs {
+ seen[doc.Path]++
+ if seen[doc.Path] > 1 {
+ t.Errorf("Duplicate doc path: %q", doc.Path)
+ }
+ }
+ })
+}
diff --git a/internal/bootstrap/mcp_healthcheck_test.go b/internal/bootstrap/mcp_healthcheck_test.go
new file mode 100644
index 0000000..2fa315b
--- /dev/null
+++ b/internal/bootstrap/mcp_healthcheck_test.go
@@ -0,0 +1,397 @@
+package bootstrap
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+// =============================================================================
+// TestMCPInstallAndHealthcheck: Drift detection and report generation
+// =============================================================================
+
+func TestMCPInstallAndHealthcheck(t *testing.T) {
+ t.Run("global_mcp_drift_detection", func(t *testing.T) {
+ // Simulate reports where Claude has global MCP drift
+ reports := map[string]IntegrationReport{
+ "claude": {
+ AI: "claude",
+ GlobalMCPDrift: true,
+ Issues: []IntegrationIssue{
+ {
+ AI: "claude",
+ Component: AIComponentMCPGlobal,
+ Ownership: OwnershipManaged,
+ Status: ComponentStatusStale,
+ Reason: "Global MCP config references stale binary path",
+ AutoFixable: false,
+ MutatesGlobal: true,
+ },
+ },
+ },
+ "gemini": {
+ AI: "gemini",
+ GlobalMCPDrift: false,
+ },
+ "codex": {
+ AI: "codex",
+ GlobalMCPDrift: false,
+ },
+ }
+
+ driftAIs := GlobalMCPDriftAIs(reports)
+ if len(driftAIs) != 1 || driftAIs[0] != "claude" {
+ t.Errorf("GlobalMCPDriftAIs = %v, want [claude]", driftAIs)
+ }
+ })
+
+ t.Run("no_drift_when_all_healthy", func(t *testing.T) {
+ reports := map[string]IntegrationReport{
+ "claude": {AI: "claude", GlobalMCPDrift: false},
+ "gemini": {AI: "gemini", GlobalMCPDrift: false},
+ }
+
+ driftAIs := GlobalMCPDriftAIs(reports)
+ if len(driftAIs) != 0 {
+ t.Errorf("GlobalMCPDriftAIs = %v, want empty", driftAIs)
+ }
+ })
+
+ t.Run("multiple_drifts_detected", func(t *testing.T) {
+ reports := map[string]IntegrationReport{
+ "claude": {AI: "claude", GlobalMCPDrift: true},
+ "gemini": {AI: "gemini", GlobalMCPDrift: true},
+ "codex": {AI: "codex", GlobalMCPDrift: false},
+ }
+
+ driftAIs := GlobalMCPDriftAIs(reports)
+ if len(driftAIs) != 2 {
+ t.Errorf("GlobalMCPDriftAIs returned %d AIs, want 2", len(driftAIs))
+ }
+ })
+
+ t.Run("managed_local_drift_detection", func(t *testing.T) {
+ reports := map[string]IntegrationReport{
+ "claude": {
+ AI: "claude",
+ ManagedLocalDrift: true,
+ Issues: []IntegrationIssue{
+ {
+ AI: "claude",
+ Component: AIComponentCommands,
+ Ownership: OwnershipManaged,
+ Status: ComponentStatusStale,
+ Reason: "Managed commands directory is stale",
+ AutoFixable: true,
+ },
+ },
+ },
+ }
+
+ if !HasManagedLocalDrift(reports) {
+ t.Error("HasManagedLocalDrift = false, want true")
+ }
+ })
+
+ t.Run("drift_issue_provides_root_cause_evidence", func(t *testing.T) {
+ // Verify that drift issues include reason (root-cause evidence)
+ issue := IntegrationIssue{
+ AI: "claude",
+ Component: AIComponentMCPGlobal,
+ Status: ComponentStatusStale,
+ Reason: "Global MCP config references stale binary path",
+ MutatesGlobal: true,
+ }
+
+ if issue.Reason == "" {
+ t.Error("Issue.Reason is empty - drift detection must provide root-cause evidence")
+ }
+ if !issue.MutatesGlobal {
+ t.Error("Issue.MutatesGlobal should be true for global MCP drift")
+ }
+ })
+}
+
+// =============================================================================
+// TestRepairPlan: Verify repair plan generation from drift reports
+// =============================================================================
+
+func TestRepairPlanFromDriftReports(t *testing.T) {
+ t.Run("generates_actions_for_managed_drift", func(t *testing.T) {
+ reports := map[string]IntegrationReport{
+ "claude": {
+ AI: "claude",
+ ManagedLocalDrift: true,
+ Issues: []IntegrationIssue{
+ {
+ AI: "claude",
+ Component: AIComponentCommands,
+ Ownership: OwnershipManaged,
+ Status: ComponentStatusStale,
+ AutoFixable: true,
+ },
+ },
+ },
+ }
+
+ plan := BuildRepairPlan(reports, RepairPlanOptions{
+ TargetAIs: []string{"claude"},
+ })
+
+ if len(plan.Actions) == 0 {
+ t.Error("BuildRepairPlan returned 0 actions for managed drift, want >= 1")
+ }
+ })
+
+ t.Run("disables_global_mcp_mutations_by_default", func(t *testing.T) {
+ reports := map[string]IntegrationReport{
+ "claude": {
+ AI: "claude",
+ GlobalMCPDrift: true,
+ Issues: []IntegrationIssue{
+ {
+ AI: "claude",
+ Component: AIComponentMCPGlobal,
+ Status: ComponentStatusStale,
+ MutatesGlobal: true,
+ AutoFixable: true,
+ },
+ },
+ },
+ }
+
+ plan := BuildRepairPlan(reports, RepairPlanOptions{
+ TargetAIs: []string{"claude"},
+ IncludeGlobalMutations: false, // default: don't mutate global
+ })
+
+ // Global MCP action should exist but with Apply=false
+ foundGlobal := false
+ for _, action := range plan.Actions {
+ if action.Component == AIComponentMCPGlobal {
+ foundGlobal = true
+ if action.Apply {
+ t.Error("Global MCP action has Apply=true without IncludeGlobalMutations - should be disabled")
+ }
+ if action.Reason == "" {
+ t.Error("Global MCP action should have a reason when disabled")
+ }
+ }
+ }
+ if !foundGlobal {
+ t.Error("BuildRepairPlan should include global MCP action (disabled) for reporting")
+ }
+ })
+}
+
+// =============================================================================
+// TestClaudeDriftDetection: Filesystem-based Claude MCP drift scenarios
+// =============================================================================
+
+func TestClaudeDriftDetection(t *testing.T) {
+ t.Run("missing_commands_dir_no_global_mcp", func(t *testing.T) {
+ // Empty project: no .claude directory, no global MCP
+ basePath := t.TempDir()
+
+ report := EvaluateIntegration(basePath, "claude", false)
+
+ // No commands dir + no global MCP = nothing configured, no drift
+ if report.CommandsDirExists {
+ t.Error("CommandsDirExists should be false for empty project")
+ }
+ if report.GlobalMCPDrift {
+ t.Error("GlobalMCPDrift should be false when nothing is configured")
+ }
+ })
+
+ t.Run("managed_commands_stale_version_triggers_drift", func(t *testing.T) {
+ basePath := t.TempDir()
+ commandsDir := filepath.Join(basePath, ".claude", "commands")
+ if err := os.MkdirAll(commandsDir, 0o755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write managed marker with a stale version
+ markerPath := filepath.Join(commandsDir, TaskWingManagedFile)
+ if err := os.WriteFile(markerPath, []byte("# Version: 0.0.0-old\n"), 0o644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write all expected command files so the version check is reached
+ for name := range expectedSlashCommandFiles(".md") {
+ if err := os.WriteFile(filepath.Join(commandsDir, name), []byte("test"), 0o644); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ report := EvaluateIntegration(basePath, "claude", true)
+
+ if !report.ManagedLocalDrift {
+ t.Error("ManagedLocalDrift should be true for stale managed version")
+ }
+ // Verify issue has root-cause evidence
+ foundDrift := false
+ for _, issue := range report.Issues {
+ if issue.Component == AIComponentCommands && issue.Ownership == OwnershipManaged {
+ foundDrift = true
+ if issue.Status != ComponentStatusStale {
+ t.Errorf("Expected stale status for version mismatch, got %q", issue.Status)
+ }
+ if issue.Reason == "" {
+ t.Error("Stale commands issue must include reason (root-cause evidence)")
+ }
+ if !issue.AutoFixable {
+ t.Error("Managed stale commands should be auto-fixable")
+ }
+ }
+ }
+ if !foundDrift {
+ t.Error("Expected managed commands drift issue for version mismatch")
+ }
+ })
+
+ t.Run("local_configured_but_global_mcp_missing_triggers_drift", func(t *testing.T) {
+ basePath := t.TempDir()
+ commandsDir := filepath.Join(basePath, ".claude", "commands")
+ if err := os.MkdirAll(commandsDir, 0o755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write managed marker with current version
+ markerPath := filepath.Join(commandsDir, TaskWingManagedFile)
+ currentVersion := AIToolConfigVersion("claude")
+ if err := os.WriteFile(markerPath, []byte("# Version: "+currentVersion+"\n"), 0o644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write expected slash command files
+ for name := range expectedSlashCommandFiles(".md") {
+ if err := os.WriteFile(filepath.Join(commandsDir, name), []byte("test"), 0o644); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // globalMCPExists=false means drift
+ report := EvaluateIntegration(basePath, "claude", false)
+
+ if !report.GlobalMCPDrift {
+ t.Error("GlobalMCPDrift should be true when local is configured but global MCP is missing")
+ }
+
+ // Verify the issue provides evidence and is marked as global mutation
+ foundGlobalIssue := false
+ for _, issue := range report.Issues {
+ if issue.Component == AIComponentMCPGlobal {
+ foundGlobalIssue = true
+ if issue.Reason == "" {
+ t.Error("Global MCP issue must include reason")
+ }
+ if !issue.MutatesGlobal {
+ t.Error("Global MCP issue must be flagged as MutatesGlobal")
+ }
+ }
+ }
+ if !foundGlobalIssue {
+ t.Error("Expected global MCP issue when local is configured but global is missing")
+ }
+ })
+
+ t.Run("repair_plan_requires_explicit_consent_for_global_fix", func(t *testing.T) {
+ // Simulate Claude with both local and global drift
+ reports := map[string]IntegrationReport{
+ "claude": {
+ AI: "claude",
+ GlobalMCPDrift: true,
+ ManagedLocalDrift: true,
+ Issues: []IntegrationIssue{
+ {
+ AI: "claude",
+ Component: AIComponentCommands,
+ Ownership: OwnershipManaged,
+ Status: ComponentStatusStale,
+ Reason: "managed marker version mismatch",
+ AutoFixable: true,
+ },
+ {
+ AI: "claude",
+ Component: AIComponentMCPGlobal,
+ Ownership: OwnershipNone,
+ Status: ComponentStatusMissing,
+ Reason: "global taskwing-mcp registration missing",
+ AutoFixable: true,
+ MutatesGlobal: true,
+ },
+ },
+ },
+ }
+
+ // Default: no global mutations allowed
+ plan := BuildRepairPlan(reports, RepairPlanOptions{
+ TargetAIs: []string{"claude"},
+ })
+
+ localApplied := false
+ globalApplied := false
+ for _, action := range plan.Actions {
+ if action.Component == AIComponentCommands && action.Apply {
+ localApplied = true
+ }
+ if action.Component == AIComponentMCPGlobal {
+ if action.Apply {
+ globalApplied = true
+ }
+ }
+ }
+
+ if !localApplied {
+ t.Error("Local managed fix should be auto-applied")
+ }
+ if globalApplied {
+ t.Error("Global MCP fix must NOT be auto-applied without explicit consent (Gate 3)")
+ }
+
+ // Now with explicit consent
+ planWithConsent := BuildRepairPlan(reports, RepairPlanOptions{
+ TargetAIs: []string{"claude"},
+ IncludeGlobalMutations: true,
+ })
+
+ globalConsentApplied := false
+ for _, action := range planWithConsent.Actions {
+ if action.Component == AIComponentMCPGlobal && action.Apply {
+ globalConsentApplied = true
+ }
+ }
+ if !globalConsentApplied {
+ t.Error("Global MCP fix should be applied when IncludeGlobalMutations=true")
+ }
+ })
+
+ t.Run("all_drift_issues_carry_evidence", func(t *testing.T) {
+ // Every IntegrationIssue must have a non-empty Reason for traceability
+ reports := map[string]IntegrationReport{
+ "claude": {
+ AI: "claude",
+ GlobalMCPDrift: true,
+ Issues: []IntegrationIssue{
+ {AI: "claude", Component: AIComponentMCPGlobal, Status: ComponentStatusMissing, Reason: "global taskwing-mcp registration missing", MutatesGlobal: true},
+ {AI: "claude", Component: AIComponentCommands, Status: ComponentStatusStale, Reason: "managed marker version mismatch"},
+ {AI: "claude", Component: AIComponentHooks, Status: ComponentStatusInvalid, Reason: "required Stop hook missing"},
+ },
+ },
+ }
+
+ for _, issue := range reports["claude"].Issues {
+ if issue.Reason == "" {
+ t.Errorf("Issue for component %q has empty Reason - all drift must carry root-cause evidence", issue.Component)
+ }
+ }
+
+ plan := BuildRepairPlan(reports, RepairPlanOptions{TargetAIs: []string{"claude"}})
+ for _, action := range plan.Actions {
+ if action.Reason == "" && !action.Apply {
+ t.Errorf("Disabled action for %q has empty Reason - must explain why disabled", action.Component)
+ }
+ }
+ })
+}
diff --git a/internal/bootstrap/service.go b/internal/bootstrap/service.go
index dc42026..ffa6938 100644
--- a/internal/bootstrap/service.go
+++ b/internal/bootstrap/service.go
@@ -275,11 +275,18 @@ func (s *Service) RunDeterministicBootstrap(ctx context.Context, isQuiet bool) (
}
// 2. Load Documentation Files (deterministic)
+ // For multi-repo workspaces, also scan sub-repo directories
if !isQuiet {
fmt.Print(" 📄 Loading documentation...")
}
docLoader := NewDocLoader(s.basePath)
- docs, err := docLoader.Load()
+ ws, wsErr := project.DetectWorkspace(s.basePath)
+ var docs []DocFile
+ if wsErr == nil && len(ws.Services) > 0 && !containsDot(ws.Services) {
+ docs, err = docLoader.LoadForServices(ws.Services)
+ } else {
+ docs, err = docLoader.Load()
+ }
if err != nil {
// Track warning instead of silently swallowing
result.Warnings = append(result.Warnings, fmt.Sprintf("doc loader: %v", err))
@@ -352,6 +359,16 @@ func (s *Service) RunDeterministicBootstrap(ctx context.Context, isQuiet bool) (
return result, nil
}
+// containsDot returns true if any element equals ".".
+func containsDot(ss []string) bool {
+ for _, s := range ss {
+ if s == "." {
+ return true
+ }
+ }
+ return false
+}
+
// joinMax joins up to n strings with commas.
func joinMax(parts []string, n int) string {
if len(parts) <= n {
diff --git a/internal/config/paths.go b/internal/config/paths.go
index e594723..2dbe08a 100644
--- a/internal/config/paths.go
+++ b/internal/config/paths.go
@@ -120,6 +120,12 @@ func GetMemoryBasePath() (string, error) {
return "", fmt.Errorf("project context has empty RootPath")
}
+ // Reject CWD-fallback contexts (MarkerNone) to prevent accidental writes to HOME.
+ // A project must have at least a .git, language manifest, or .taskwing marker.
+ if ctx.MarkerType == project.MarkerNone {
+ return "", fmt.Errorf("no project marker found at %q: run 'taskwing bootstrap' in a project directory", ctx.RootPath)
+ }
+
taskwingDir := filepath.Join(ctx.RootPath, ".taskwing")
if info, err := os.Stat(taskwingDir); err != nil || !info.IsDir() {
return "", fmt.Errorf("%w: %s", ErrNoTaskWingDir, taskwingDir)
diff --git a/internal/config/paths_test.go b/internal/config/paths_test.go
new file mode 100644
index 0000000..263e721
--- /dev/null
+++ b/internal/config/paths_test.go
@@ -0,0 +1,153 @@
+package config
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/josephgoksu/TaskWing/internal/project"
+)
+
+func TestRootPathResolution(t *testing.T) {
+ // Save and restore global state
+ origGetGlobalConfigDir := GetGlobalConfigDir
+ t.Cleanup(func() {
+ ClearProjectContext()
+ GetGlobalConfigDir = origGetGlobalConfigDir
+ })
+
+ t.Run("project_with_taskwing_marker_resolves_correctly", func(t *testing.T) {
+ ClearProjectContext()
+ projectDir := t.TempDir()
+ taskwingDir := filepath.Join(projectDir, ".taskwing")
+ memoryDir := filepath.Join(taskwingDir, "memory")
+ if err := os.MkdirAll(memoryDir, 0o755); err != nil {
+ t.Fatal(err)
+ }
+
+ ctx := &project.Context{
+ RootPath: projectDir,
+ MarkerType: project.MarkerTaskWing,
+ }
+ if err := SetProjectContext(ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ got, err := GetMemoryBasePath()
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != memoryDir {
+ t.Errorf("GetMemoryBasePath() = %q, want %q", got, memoryDir)
+ }
+ })
+
+ t.Run("project_with_git_marker_resolves_correctly", func(t *testing.T) {
+ ClearProjectContext()
+ projectDir := t.TempDir()
+ // Create .git and .taskwing dirs
+ if err := os.MkdirAll(filepath.Join(projectDir, ".git"), 0o755); err != nil {
+ t.Fatal(err)
+ }
+ taskwingDir := filepath.Join(projectDir, ".taskwing")
+ memoryDir := filepath.Join(taskwingDir, "memory")
+ if err := os.MkdirAll(memoryDir, 0o755); err != nil {
+ t.Fatal(err)
+ }
+
+ ctx := &project.Context{
+ RootPath: projectDir,
+ MarkerType: project.MarkerGit,
+ GitRoot: projectDir,
+ }
+ if err := SetProjectContext(ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ got, err := GetMemoryBasePath()
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != memoryDir {
+ t.Errorf("GetMemoryBasePath() = %q, want %q", got, memoryDir)
+ }
+ })
+
+ t.Run("marker_none_rejected_even_with_taskwing_dir", func(t *testing.T) {
+ // Simulates running from HOME where ~/.taskwing exists
+ ClearProjectContext()
+ fakeHome := t.TempDir()
+ taskwingDir := filepath.Join(fakeHome, ".taskwing")
+ if err := os.MkdirAll(filepath.Join(taskwingDir, "memory"), 0o755); err != nil {
+ t.Fatal(err)
+ }
+
+ ctx := &project.Context{
+ RootPath: fakeHome,
+ MarkerType: project.MarkerNone, // CWD fallback
+ }
+ if err := SetProjectContext(ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ _, err := GetMemoryBasePath()
+ if err == nil {
+ t.Error("GetMemoryBasePath() should fail for MarkerNone context (CWD fallback to HOME)")
+ }
+ })
+
+ t.Run("nil_context_returns_error", func(t *testing.T) {
+ ClearProjectContext()
+
+ _, err := GetMemoryBasePath()
+ if err == nil {
+ t.Error("GetMemoryBasePath() should fail when project context is nil")
+ }
+ })
+
+ t.Run("global_fallback_only_via_GetMemoryBasePathOrGlobal", func(t *testing.T) {
+ ClearProjectContext()
+ fakeGlobal := t.TempDir()
+ GetGlobalConfigDir = func() (string, error) {
+ return fakeGlobal, nil
+ }
+
+ // GetMemoryBasePath should fail
+ _, err := GetMemoryBasePath()
+ if err == nil {
+ t.Error("GetMemoryBasePath() should fail without project context")
+ }
+
+ // GetMemoryBasePathOrGlobal should fall back to global
+ got, err := GetMemoryBasePathOrGlobal()
+ if err != nil {
+ t.Fatalf("GetMemoryBasePathOrGlobal() unexpected error: %v", err)
+ }
+ want := filepath.Join(fakeGlobal, "memory")
+ if got != want {
+ t.Errorf("GetMemoryBasePathOrGlobal() = %q, want %q", got, want)
+ }
+ })
+
+ t.Run("no_accidental_writes_to_home", func(t *testing.T) {
+ // Verify that a MarkerNone context pointing to HOME is rejected
+ ClearProjectContext()
+ home, err := os.UserHomeDir()
+ if err != nil {
+ t.Skip("cannot determine HOME")
+ }
+
+ ctx := &project.Context{
+ RootPath: home,
+ MarkerType: project.MarkerNone,
+ }
+ if err := SetProjectContext(ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = GetMemoryBasePath()
+ if err == nil {
+ t.Error("GetMemoryBasePath() must reject HOME as RootPath when MarkerType is None")
+ }
+ })
+}
diff --git a/internal/git/git.go b/internal/git/git.go
index 5233649..b0bd115 100644
--- a/internal/git/git.go
+++ b/internal/git/git.go
@@ -99,6 +99,12 @@ func (c *Client) IsRepository() bool {
return err == nil
}
+// IsGitRepository checks if a directory is inside a git work tree.
+// Routes through a temporary Client to maintain testability via Commander.
+func IsGitRepository(dir string) bool {
+ return NewClient(dir).IsRepository()
+}
+
// IsDirty checks if the working directory has uncommitted changes.
func (c *Client) IsDirty() (bool, error) {
output, err := c.commander.RunInDir(c.workDir, "git", "status", "--porcelain")
diff --git a/internal/git/git_test.go b/internal/git/git_test.go
new file mode 100644
index 0000000..c4f2597
--- /dev/null
+++ b/internal/git/git_test.go
@@ -0,0 +1,210 @@
+package git
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// mockCommander implements Commander for deterministic testing.
+type mockCommander struct {
+ responses map[string]struct {
+ output string
+ err error
+ }
+}
+
+func (m *mockCommander) Run(name string, args ...string) (string, error) {
+ return m.RunInDir("", name, args...)
+}
+
+func (m *mockCommander) RunInDir(dir, name string, args ...string) (string, error) {
+ key := fmt.Sprintf("%s:%s %s", dir, name, strings.Join(args, " "))
+ if resp, ok := m.responses[key]; ok {
+ return resp.output, resp.err
+ }
+ // Also try without dir prefix for convenience
+ keyNoDir := fmt.Sprintf("%s %s", name, strings.Join(args, " "))
+ if resp, ok := m.responses[keyNoDir]; ok {
+ return resp.output, resp.err
+ }
+ return "", fmt.Errorf("exit status 128: fatal: not a git repository")
+}
+
+// =============================================================================
+// TestGitWrapper_Exit128: Verify Client handles exit 128 gracefully
+// =============================================================================
+
+func TestGitWrapper_Exit128(t *testing.T) {
+ tests := []struct {
+ name string
+ workDir string
+ responses map[string]struct{ output string; err error }
+ wantIsRepo bool
+ }{
+ {
+ name: "non-git directory returns false",
+ workDir: "/workspace/not-a-repo",
+ responses: map[string]struct{ output string; err error }{
+ "/workspace/not-a-repo:git rev-parse --is-inside-work-tree": {
+ err: fmt.Errorf("exit status 128: fatal: not a git repository"),
+ },
+ },
+ wantIsRepo: false,
+ },
+ {
+ name: "valid git repo returns true",
+ workDir: "/workspace/valid-repo",
+ responses: map[string]struct{ output string; err error }{
+ "/workspace/valid-repo:git rev-parse --is-inside-work-tree": {
+ output: "true",
+ },
+ },
+ wantIsRepo: true,
+ },
+ {
+ name: "empty directory with no response returns false",
+ workDir: "/tmp/empty",
+ responses: map[string]struct{ output string; err error }{},
+ wantIsRepo: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cmd := &mockCommander{responses: tt.responses}
+ client := NewClientWithCommander(tt.workDir, cmd)
+
+ got := client.IsRepository()
+ if got != tt.wantIsRepo {
+ t.Errorf("IsRepository() = %v, want %v", got, tt.wantIsRepo)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// TestIsGitRepository: Standalone function with real temp dirs
+// =============================================================================
+
+func TestIsGitRepository(t *testing.T) {
+ // Skip if git is not installed
+ if _, err := exec.LookPath("git"); err != nil {
+ t.Skip("git not available")
+ }
+
+ t.Run("non-git directory returns false", func(t *testing.T) {
+ dir := t.TempDir()
+ if IsGitRepository(dir) {
+ t.Error("IsGitRepository() = true for non-git dir, want false")
+ }
+ })
+
+ t.Run("git-inited directory returns true", func(t *testing.T) {
+ dir := t.TempDir()
+ cmd := exec.Command("git", "init", dir)
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("git init: %v", err)
+ }
+ if !IsGitRepository(dir) {
+ t.Error("IsGitRepository() = false for git-inited dir, want true")
+ }
+ })
+
+ t.Run("nonexistent directory returns false", func(t *testing.T) {
+ if IsGitRepository("/nonexistent/path/xyz123") {
+ t.Error("IsGitRepository() = true for nonexistent path, want false")
+ }
+ })
+
+ t.Run("subdirectory of git repo returns true", func(t *testing.T) {
+ dir := t.TempDir()
+ cmd := exec.Command("git", "init", dir)
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("git init: %v", err)
+ }
+ subDir := filepath.Join(dir, "subdir")
+ if err := os.MkdirAll(subDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+ if !IsGitRepository(subDir) {
+ t.Error("IsGitRepository() = false for subdir of git repo, want true")
+ }
+ })
+}
+
+// =============================================================================
+// TestBootstrapContinueOnGitError: Verify bootstrap-like flow continues
+// =============================================================================
+
+func TestBootstrapContinueOnGitError(t *testing.T) {
+ // Simulate a multi-repo workspace where some repos have git and some don't.
+ // Bootstrap should continue processing remaining repos when one fails.
+
+ if _, err := exec.LookPath("git"); err != nil {
+ t.Skip("git not available")
+ }
+
+ tmpDir := t.TempDir()
+
+ // Create 3 "repos": 2 with git, 1 without
+ repos := []struct {
+ name string
+ hasGit bool
+ }{
+ {"cli", true},
+ {"app", true},
+ {"macos", false}, // This simulates the exit 128 scenario
+ }
+
+ for _, r := range repos {
+ repoDir := filepath.Join(tmpDir, r.name)
+ if err := os.MkdirAll(repoDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+ if r.hasGit {
+ cmd := exec.Command("git", "init", repoDir)
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("git init %s: %v", r.name, err)
+ }
+ // Create a dummy commit so git log works
+ dummyFile := filepath.Join(repoDir, "README.md")
+ if err := os.WriteFile(dummyFile, []byte("# "+r.name), 0644); err != nil {
+ t.Fatal(err)
+ }
+ addCmd := exec.Command("git", "-C", repoDir, "add", ".")
+ addCmd.Run()
+ commitCmd := exec.Command("git", "-C", repoDir, "commit", "-m", "init", "--allow-empty")
+ commitCmd.Env = append(os.Environ(),
+ "GIT_AUTHOR_NAME=Test",
+ "GIT_AUTHOR_EMAIL=test@test.com",
+ "GIT_COMMITTER_NAME=Test",
+ "GIT_COMMITTER_EMAIL=test@test.com",
+ )
+ commitCmd.Run()
+ }
+ }
+
+ // Simulate bootstrap: iterate repos, skip non-git ones
+ var processed []string
+ var skipped []string
+
+ for _, r := range repos {
+ repoDir := filepath.Join(tmpDir, r.name)
+ if !IsGitRepository(repoDir) {
+ skipped = append(skipped, r.name)
+ continue
+ }
+ processed = append(processed, r.name)
+ }
+
+ if len(processed) != 2 {
+ t.Errorf("processed %d repos, want 2: %v", len(processed), processed)
+ }
+ if len(skipped) != 1 || skipped[0] != "macos" {
+ t.Errorf("skipped = %v, want [macos]", skipped)
+ }
+}
diff --git a/internal/memory/linknodes_test.go b/internal/memory/linknodes_test.go
new file mode 100644
index 0000000..e286805
--- /dev/null
+++ b/internal/memory/linknodes_test.go
@@ -0,0 +1,115 @@
+package memory
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestKnowledgeLinking_NoFK(t *testing.T) {
+ store, err := NewSQLiteStore(":memory:")
+ if err != nil {
+ t.Fatalf("NewSQLiteStore: %v", err)
+ }
+ defer store.Close()
+
+ t.Run("link_existing_nodes_succeeds", func(t *testing.T) {
+ node1 := &Node{Summary: "Node A", Type: "feature", Content: "First node"}
+ node2 := &Node{Summary: "Node B", Type: "feature", Content: "Second node"}
+
+ if err := store.CreateNode(node1); err != nil {
+ t.Fatalf("CreateNode A: %v", err)
+ }
+ if err := store.CreateNode(node2); err != nil {
+ t.Fatalf("CreateNode B: %v", err)
+ }
+
+ err := store.LinkNodes(node1.ID, node2.ID, NodeRelationRelatesTo, 0.9, map[string]any{"test": true})
+ if err != nil {
+ t.Errorf("LinkNodes with existing nodes should succeed, got: %v", err)
+ }
+
+ edges, err := store.GetNodeEdges(node1.ID)
+ if err != nil {
+ t.Fatalf("GetNodeEdges: %v", err)
+ }
+ if len(edges) == 0 {
+ t.Error("Expected at least 1 edge after LinkNodes")
+ }
+ })
+
+ t.Run("link_missing_from_node_returns_error_not_panic", func(t *testing.T) {
+ node := &Node{Summary: "Existing Node", Type: "feature", Content: "exists"}
+ if err := store.CreateNode(node); err != nil {
+ t.Fatalf("CreateNode: %v", err)
+ }
+
+ err := store.LinkNodes("nonexistent-id", node.ID, NodeRelationDependsOn, 1.0, nil)
+ if err == nil {
+ t.Error("LinkNodes with missing from_node should return error")
+ }
+ if strings.Contains(err.Error(), "FOREIGN KEY") {
+ t.Error("Got raw FK constraint error — should be handled gracefully")
+ }
+ if !strings.Contains(err.Error(), "link skipped") {
+ t.Errorf("Expected 'link skipped' error, got: %v", err)
+ }
+ })
+
+ t.Run("link_missing_to_node_returns_error_not_panic", func(t *testing.T) {
+ node := &Node{Summary: "Another Node", Type: "feature", Content: "exists"}
+ if err := store.CreateNode(node); err != nil {
+ t.Fatalf("CreateNode: %v", err)
+ }
+
+ err := store.LinkNodes(node.ID, "nonexistent-id", NodeRelationDependsOn, 1.0, nil)
+ if err == nil {
+ t.Error("LinkNodes with missing to_node should return error")
+ }
+ if strings.Contains(err.Error(), "FOREIGN KEY") {
+ t.Error("Got raw FK constraint error — should be handled gracefully")
+ }
+ })
+
+ t.Run("link_both_missing_returns_error", func(t *testing.T) {
+ err := store.LinkNodes("fake-a", "fake-b", NodeRelationRelatesTo, 1.0, nil)
+ if err == nil {
+ t.Error("LinkNodes with both nodes missing should return error")
+ }
+ })
+
+ t.Run("duplicate_link_ignored", func(t *testing.T) {
+ node1 := &Node{Summary: "Dup A", Type: "feature", Content: "a"}
+ node2 := &Node{Summary: "Dup B", Type: "feature", Content: "b"}
+ store.CreateNode(node1)
+ store.CreateNode(node2)
+
+ err := store.LinkNodes(node1.ID, node2.ID, NodeRelationRelatesTo, 0.8, nil)
+ if err != nil {
+ t.Fatalf("First LinkNodes: %v", err)
+ }
+
+ // Duplicate should be silently ignored (INSERT OR IGNORE on unique constraint)
+ err = store.LinkNodes(node1.ID, node2.ID, NodeRelationRelatesTo, 0.9, nil)
+ if err != nil {
+ t.Errorf("Duplicate LinkNodes should be ignored, got: %v", err)
+ }
+ })
+
+ t.Run("node_deletion_cascades_edges", func(t *testing.T) {
+ node1 := &Node{Summary: "Cascade A", Type: "feature", Content: "a"}
+ node2 := &Node{Summary: "Cascade B", Type: "feature", Content: "b"}
+ store.CreateNode(node1)
+ store.CreateNode(node2)
+
+ store.LinkNodes(node1.ID, node2.ID, NodeRelationRelatesTo, 1.0, nil)
+
+ store.DeleteNode(node1.ID)
+
+ edges, _ := store.GetNodeEdges(node2.ID)
+ for _, e := range edges {
+ if e.FromNode == node1.ID || e.ToNode == node1.ID {
+ t.Error("Edge referencing deleted node should be cascaded")
+ }
+ }
+ })
+}
diff --git a/internal/memory/sqlite.go b/internal/memory/sqlite.go
index f55e01c..243020e 100644
--- a/internal/memory/sqlite.go
+++ b/internal/memory/sqlite.go
@@ -1463,12 +1463,36 @@ func (s *SQLiteStore) LinkNodes(from, to, relation string, confidence float64, p
}
}
- _, err := s.db.Exec(`
+ // Use a transaction to atomically verify node existence and insert the edge,
+ // preventing TOCTOU races where nodes could be deleted between check and insert.
+ tx, txErr := s.db.Begin()
+ if txErr != nil {
+ return fmt.Errorf("begin transaction: %w", txErr)
+ }
+ defer func() { _ = tx.Rollback() }() // no-op after commit
+
+ // Handle self-referential edges: IN deduplicates identical values,
+ // so COUNT(*) returns 1 even when the node exists. Check accordingly.
+ expectedCount := 2
+ if from == to {
+ expectedCount = 1
+ }
+ var existCount int
+ if qErr := tx.QueryRow(`SELECT COUNT(*) FROM nodes WHERE id IN (?, ?)`, from, to).Scan(&existCount); qErr != nil {
+ return fmt.Errorf("check node existence: %w", qErr)
+ }
+ if existCount < expectedCount {
+ return fmt.Errorf("link skipped: one or both nodes not found (from=%q, to=%q)", from, to)
+ }
+
+ if _, execErr := tx.Exec(`
INSERT OR IGNORE INTO node_edges (from_node, to_node, relation, properties, confidence, created_at)
VALUES (?, ?, ?, ?, ?, ?)
- `, from, to, relation, propsJSON, confidence, time.Now().UTC().Format(time.RFC3339))
+ `, from, to, relation, propsJSON, confidence, time.Now().UTC().Format(time.RFC3339)); execErr != nil {
+ return execErr
+ }
- return err
+ return tx.Commit()
}
// GetNodeEdges returns all edges for a node.
diff --git a/internal/memory/subrepo_metadata_test.go b/internal/memory/subrepo_metadata_test.go
new file mode 100644
index 0000000..2fb0878
--- /dev/null
+++ b/internal/memory/subrepo_metadata_test.go
@@ -0,0 +1,132 @@
+package memory
+
+import (
+ "testing"
+)
+
+func TestSubrepoMetadataPresent(t *testing.T) {
+ store, err := NewSQLiteStore(":memory:")
+ if err != nil {
+ t.Fatalf("NewSQLiteStore: %v", err)
+ }
+ defer store.Close()
+
+ t.Run("nodes_stored_with_workspace_context", func(t *testing.T) {
+ // Simulate multi-repo bootstrap: each sub-repo gets its own workspace tag
+ subrepos := []struct {
+ workspace string
+ summary string
+ }{
+ {"api", "API Service Architecture"},
+ {"api", "API Authentication Pattern"},
+ {"web", "Web Frontend React Setup"},
+ {"cli", "CLI Go Module Structure"},
+ }
+
+ for _, sr := range subrepos {
+ node := &Node{
+ Summary: sr.summary,
+ Type: "feature",
+ Content: "Description of " + sr.summary,
+ Workspace: sr.workspace,
+ }
+ if err := store.CreateNode(node); err != nil {
+ t.Fatalf("CreateNode %q: %v", sr.summary, err)
+ }
+ }
+
+ // Query by workspace and verify isolation
+ apiNodes, err := store.ListNodesFiltered(NodeFilter{Workspace: "api"})
+ if err != nil {
+ t.Fatalf("ListNodesFiltered(api): %v", err)
+ }
+ if len(apiNodes) != 2 {
+ t.Errorf("api workspace has %d nodes, want 2", len(apiNodes))
+ }
+
+ webNodes, err := store.ListNodesFiltered(NodeFilter{Workspace: "web"})
+ if err != nil {
+ t.Fatalf("ListNodesFiltered(web): %v", err)
+ }
+ if len(webNodes) != 1 {
+ t.Errorf("web workspace has %d nodes, want 1", len(webNodes))
+ }
+
+ cliNodes, err := store.ListNodesFiltered(NodeFilter{Workspace: "cli"})
+ if err != nil {
+ t.Fatalf("ListNodesFiltered(cli): %v", err)
+ }
+ if len(cliNodes) != 1 {
+ t.Errorf("cli workspace has %d nodes, want 1", len(cliNodes))
+ }
+ })
+
+ t.Run("root_workspace_used_as_default", func(t *testing.T) {
+ node := &Node{
+ Summary: "Global Pattern",
+ Type: "pattern",
+ Content: "A global pattern",
+ Workspace: "root",
+ }
+ if err := store.CreateNode(node); err != nil {
+ t.Fatalf("CreateNode: %v", err)
+ }
+
+ rootNodes, err := store.ListNodesFiltered(NodeFilter{Workspace: "root"})
+ if err != nil {
+ t.Fatalf("ListNodesFiltered(root): %v", err)
+ }
+
+ found := false
+ for _, n := range rootNodes {
+ if n.Summary == "Global Pattern" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Error("Root workspace node not found")
+ }
+ })
+
+ t.Run("edges_between_workspaces_succeed", func(t *testing.T) {
+ nodeA := &Node{Summary: "Cross WS A", Type: "feature", Content: "a", Workspace: "api"}
+ nodeB := &Node{Summary: "Cross WS B", Type: "feature", Content: "b", Workspace: "web"}
+ store.CreateNode(nodeA)
+ store.CreateNode(nodeB)
+
+ // Cross-workspace linking should work
+ err := store.LinkNodes(nodeA.ID, nodeB.ID, NodeRelationRelatesTo, 0.8, nil)
+ if err != nil {
+ t.Errorf("Cross-workspace LinkNodes should succeed, got: %v", err)
+ }
+ })
+
+ t.Run("metadata_fields_populated", func(t *testing.T) {
+ node := &Node{
+ Summary: "Service Metadata",
+ Type: "metadata",
+ Content: "git stats and docs metadata",
+ Workspace: "backend",
+ SourceAgent: "git-stats",
+ }
+ if err := store.CreateNode(node); err != nil {
+ t.Fatalf("CreateNode: %v", err)
+ }
+
+ // Retrieve and verify fields
+ retrieved, err := store.GetNode(node.ID)
+ if err != nil {
+ t.Fatalf("GetNode: %v", err)
+ }
+ if retrieved.Workspace != "backend" {
+ t.Errorf("Workspace = %q, want backend", retrieved.Workspace)
+ }
+ if retrieved.SourceAgent != "git-stats" {
+ t.Errorf("SourceAgent = %q, want git-stats", retrieved.SourceAgent)
+ }
+ if retrieved.Type != "metadata" {
+ t.Errorf("Type = %q, want metadata", retrieved.Type)
+ }
+ })
+}
diff --git a/internal/project/detect.go b/internal/project/detect.go
index e0582c4..6bcd3f8 100644
--- a/internal/project/detect.go
+++ b/internal/project/detect.go
@@ -74,15 +74,18 @@ func (d *detector) Detect(startPath string) (*Context, error) {
// If .taskwing found, check if it's a valid project marker
if marker == MarkerTaskWing {
- // CRITICAL FIX: If we've already found a .git directory below this .taskwing,
- // don't use this .taskwing as the project root. It's likely global config
- // or belongs to a different project (e.g., ~/.taskwing).
- // A valid project .taskwing should be AT or BELOW the git root, not above it.
+ // CRITICAL FIX: Reject .taskwing directories that are above the real project.
+ // Cases where .taskwing should be SKIPPED:
+ // 1. A .git was found below — .taskwing is above gitRoot (likely global config)
+ // 2. .taskwing is above startPath and startPath has nested projects (multi-repo workspace)
+ skipTaskWing := false
if gitRoot != "" && gitRoot != current {
- // .taskwing is above gitRoot - skip it, continue looking for language manifests
- // or fall back to gitRoot
- } else {
- // .taskwing is at or below gitRoot (or no git found yet) - use it
+ skipTaskWing = true // .taskwing is above gitRoot
+ } else if current != absPath && d.hasNestedProjects(absPath) {
+ skipTaskWing = true // .taskwing is above a multi-repo workspace
+ }
+
+ if !skipTaskWing {
if gitRoot == "" {
gitRoot = d.findGitRoot(current)
}
@@ -134,7 +137,17 @@ func (d *detector) Detect(startPath string) (*Context, error) {
}, nil
}
- // No project marker found, use startPath as fallback
+ // No project marker found. Check if startPath is a multi-repo workspace
+ // (directory containing multiple independent projects/repos).
+ if d.hasNestedProjects(absPath) {
+ return &Context{
+ RootPath: absPath,
+ MarkerType: MarkerNone,
+ GitRoot: "",
+ IsMonorepo: true, // Multi-repo workspace acts like a monorepo for scoping
+ }, nil
+ }
+
return &Context{
RootPath: absPath,
MarkerType: MarkerNone,
diff --git a/internal/project/detect_test.go b/internal/project/detect_test.go
index 186457b..ce18798 100644
--- a/internal/project/detect_test.go
+++ b/internal/project/detect_test.go
@@ -400,6 +400,111 @@ func TestLoadGitignore_NoFile(t *testing.T) {
}
}
+func TestIsMonorepoDetection(t *testing.T) {
+ t.Run("multi_repo_no_root_git_detected_as_monorepo", func(t *testing.T) {
+ // Directory with multiple sub-repos but no root .git
+ fs := setupFS([]string{
+ "/workspace/svc-a/.git/",
+ "/workspace/svc-a/go.mod",
+ "/workspace/svc-b/.git/",
+ "/workspace/svc-b/package.json",
+ "/workspace/svc-c/pyproject.toml",
+ })
+
+ d := NewDetector(fs)
+ ctx, err := d.Detect("/workspace")
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ if !ctx.IsMonorepo {
+ t.Error("IsMonorepo = false, want true for multi-repo workspace with 3 nested projects")
+ }
+ })
+
+ t.Run("single_project_not_monorepo", func(t *testing.T) {
+ fs := setupFS([]string{
+ "/project/.git/",
+ "/project/go.mod",
+ "/project/main.go",
+ })
+
+ d := NewDetector(fs)
+ ctx, err := d.Detect("/project")
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ if ctx.IsMonorepo {
+ t.Error("IsMonorepo = true, want false for single project")
+ }
+ })
+
+ t.Run("empty_dir_not_monorepo", func(t *testing.T) {
+ fs := setupFS([]string{
+ "/empty/",
+ })
+
+ d := NewDetector(fs)
+ ctx, err := d.Detect("/empty")
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ if ctx.IsMonorepo {
+ t.Error("IsMonorepo = true, want false for empty directory")
+ }
+ if ctx.MarkerType != MarkerNone {
+ t.Errorf("MarkerType = %v, want MarkerNone", ctx.MarkerType)
+ }
+ })
+}
+
+func TestWorkspaceRootSelection(t *testing.T) {
+ t.Run("monorepo_root_from_subdirectory", func(t *testing.T) {
+ fs := setupFS([]string{
+ "/monorepo/.git/",
+ "/monorepo/frontend/package.json",
+ "/monorepo/backend/go.mod",
+ })
+
+ d := NewDetector(fs)
+ ctx, err := d.Detect("/monorepo/backend")
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ if ctx.RootPath != "/monorepo/backend" {
+ t.Errorf("RootPath = %q, want /monorepo/backend", ctx.RootPath)
+ }
+ if ctx.GitRoot != "/monorepo" {
+ t.Errorf("GitRoot = %q, want /monorepo", ctx.GitRoot)
+ }
+ if !ctx.IsMonorepo {
+ t.Error("IsMonorepo = false, want true")
+ }
+ })
+
+ t.Run("home_dir_without_projects_not_selected_as_root", func(t *testing.T) {
+ // Simulate HOME with just .taskwing (global config) and no nested projects
+ fs := setupFS([]string{
+ "/home/user/.taskwing/",
+ "/home/user/.config/",
+ })
+
+ d := NewDetector(fs)
+ ctx, err := d.Detect("/home/user")
+ if err != nil {
+ t.Fatalf("Detect() error: %v", err)
+ }
+
+ // Should get MarkerTaskWing but NOT IsMonorepo
+ if ctx.IsMonorepo {
+ t.Error("IsMonorepo = true, want false for HOME with only global .taskwing")
+ }
+ })
+}
+
func TestDetect_GitignoreWithAnchoredPattern(t *testing.T) {
// /dist in gitignore should match top-level "dist" dir
fs := afero.NewMemMapFs()