From 420cf4389fa33dfe409bbf8fecf95ed12013c490 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 05:53:30 -0400 Subject: [PATCH 01/17] feat(storage): add CountFindings to extract severity counts from review output Refactors hasSeverityLabel into a shared classifySeverityLine helper that recognises critical/high/medium/low/info labels; CountFindings uses it to return per-bucket counts (high=critical+high, medium, low=low+info) while hasSeverityLabel remains a thin wrapper that excludes info-only output from the fail verdict, preserving existing ParseVerdict behaviour. Also tightens isLegendEntry to stop scanning at non-legend section headers (e.g. "Findings:") so findings below a legend block are correctly counted. --- internal/storage/verdict.go | 199 ++++++++++++++++--------------- internal/storage/verdict_test.go | 75 ++++++++++++ 2 files changed, 180 insertions(+), 94 deletions(-) diff --git a/internal/storage/verdict.go b/internal/storage/verdict.go index 38069ae9..05dc85a1 100644 --- a/internal/storage/verdict.go +++ b/internal/storage/verdict.go @@ -179,100 +179,108 @@ func stripFieldLabel(s string) string { return s } -// hasSeverityLabel checks if the output contains severity labels indicating findings. -// Matches patterns like "- Medium —", "* Low:", "Critical — issue", etc. -// Checks lines that start with bullets/numbers OR directly with severity words. -// Requires separators to be followed by space to avoid "High-level overview". -// Skips lines that appear to be part of a severity legend/rubric. -func hasSeverityLabel(output string) bool { - lc := strings.ToLower(output) - severities := []string{"critical", "high", "medium", "low"} - lines := strings.Split(lc, "\n") +// classifySeverityLine inspects a single line in context and returns the bucket +// it belongs to: "critical", "high", "medium", "low", "info", or "" if the +// line is not a severity-tagged finding. Mirrors the recognition rules +// previously inlined in hasSeverityLabel; "info" is added so CountFindings +// can fold info-level mentions into the low bucket. +func classifySeverityLine(lines []string, i int) string { + trimmed := strings.TrimSpace(lines[i]) + if trimmed == "" { + return "" + } - for i, line := range lines { - trimmed := strings.TrimSpace(line) - if len(trimmed) == 0 { - continue - } + // Strip leading bullet/number markers + first := trimmed[0] + hasBullet := first == '-' || first == '*' || (first >= '0' && first <= '9') || + strings.HasPrefix(trimmed, "•") - // Check if line starts with bullet/number - if so, strip it - first := trimmed[0] - hasBullet := first == '-' || first == '*' || (first >= '0' && first <= '9') || - strings.HasPrefix(trimmed, "•") + checkText := trimmed + if hasBullet { + checkText = strings.TrimLeft(trimmed, "-*•0123456789.) ") + checkText = strings.TrimSpace(checkText) + } + checkText = stripMarkdown(checkText) - checkText := trimmed - if hasBullet { - // Strip leading bullets/asterisks/numbers - checkText = strings.TrimLeft(trimmed, "-*•0123456789.) ") - checkText = strings.TrimSpace(checkText) + // "info" is included so CountFindings can collapse it into the low bucket. + // hasSeverityLabel filters it out separately to preserve verdict semantics. + severities := []string{"critical", "high", "medium", "low", "info"} + for _, sev := range severities { + if !strings.HasPrefix(checkText, sev) { + continue } + rest := strings.TrimSpace(checkText[len(sev):]) + if len(rest) == 0 { + continue + } + hasValidSep := strings.HasPrefix(rest, "—") || strings.HasPrefix(rest, "–") || + rest[0] == ':' || rest[0] == '|' || + (rest[0] == '-' && len(rest) > 1 && rest[1] == ' ') + if !hasValidSep { + continue + } + if isLegendEntry(lines, i) { + continue + } + return sev + } - // Strip markdown formatting (bold, headers) before checking - checkText = stripMarkdown(checkText) - - // Check if text starts with a severity word - for _, sev := range severities { - if !strings.HasPrefix(checkText, sev) { - continue - } - - // Check if followed by separator (dash, em-dash, colon, pipe) - rest := checkText[len(sev):] + // "Severity: " pattern (e.g. "**Severity**: High") + if strings.HasPrefix(checkText, "severity") { + rest := strings.TrimSpace(checkText[len("severity"):]) + hasSep := len(rest) > 0 && (rest[0] == ':' || rest[0] == '|' || + strings.HasPrefix(rest, "—") || strings.HasPrefix(rest, "–")) + if !hasSep && len(rest) > 1 && rest[0] == '-' && rest[1] == ' ' { + hasSep = true + } + if hasSep { + rest = strings.TrimLeft(rest, ":-–—| ") rest = strings.TrimSpace(rest) - if len(rest) == 0 { - continue - } - - // Check for valid separator - hasValidSep := false - // Check for em-dash or en-dash (these are unambiguous) - if strings.HasPrefix(rest, "—") || strings.HasPrefix(rest, "–") { - hasValidSep = true - } - // Check for colon or pipe (unambiguous separators) - if rest[0] == ':' || rest[0] == '|' { - hasValidSep = true - } - // For hyphen, require space after to avoid "High-level" - if rest[0] == '-' && len(rest) > 1 && rest[1] == ' ' { - hasValidSep = true - } - - if !hasValidSep { - continue - } - - // Skip if this looks like a legend/rubric entry - // Check if previous non-empty line is a legend header - if isLegendEntry(lines, i) { - continue + for _, sev := range severities { + if strings.HasPrefix(rest, sev) && !isLegendEntry(lines, i) { + return sev + } } + } + } + return "" +} - return true +// CountFindings parses review output for severity-prefixed findings and returns +// counts in three buckets: +// +// high = critical + high +// medium = medium +// low = low + info +// +// Reuses the same line-classification rules as hasSeverityLabel. +func CountFindings(output string) (high, medium, low int) { + lc := strings.ToLower(output) + lines := strings.Split(lc, "\n") + for i := range lines { + switch classifySeverityLine(lines, i) { + case "critical", "high": + high++ + case "medium": + medium++ + case "low", "info": + low++ } + } + return high, medium, low +} - // Check for "severity: " pattern (e.g., "**Severity**: High") - if strings.HasPrefix(checkText, "severity") { - rest := checkText[len("severity"):] - rest = strings.TrimSpace(rest) - hasSep := len(rest) > 0 && (rest[0] == ':' || rest[0] == '|' || - strings.HasPrefix(rest, "—") || strings.HasPrefix(rest, "–")) - // Accept hyphen-minus when followed by space (mirrors the severity-word branch) - if !hasSep && len(rest) > 1 && rest[0] == '-' && rest[1] == ' ' { - hasSep = true - } - if hasSep { - // Skip separator and whitespace - rest = strings.TrimLeft(rest, ":-–—| ") - rest = strings.TrimSpace(rest) - for _, sev := range severities { - if strings.HasPrefix(rest, sev) { - if !isLegendEntry(lines, i) { - return true - } - } - } - } +// hasSeverityLabel returns true if the output contains any severity-tagged +// finding among critical/high/medium/low. Info-only reviews are NOT treated +// as having findings, preserving the original verdict semantics where +// "Info: ..." notes do not flip a review from pass to fail. +func hasSeverityLabel(output string) bool { + lc := strings.ToLower(output) + lines := strings.Split(lc, "\n") + for i := range lines { + sev := classifySeverityLine(lines, i) + if sev != "" && sev != "info" { + return true } } return false @@ -281,8 +289,12 @@ func hasSeverityLabel(output string) bool { // isLegendEntry checks if a line at index i appears to be part of a severity legend/rubric // by looking at preceding lines for legend indicators. Scans up to 10 lines back, // skipping empty lines, severity lines, and description lines that may appear -// between legend entries. +// between legend entries. Stops at any non-legend section header (a line ending +// with ":" that does not contain a legend keyword) to avoid false positives when +// a legend block is followed by a real findings section separated by a header. func isLegendEntry(lines []string, i int) bool { + legendKeywords := []string{"severity", "level", "legend", "priority", "rubric", "rating", "scale"} + for j := i - 1; j >= 0 && j >= i-10; j-- { prev := strings.TrimSpace(lines[j]) if len(prev) == 0 { @@ -293,17 +305,16 @@ func isLegendEntry(lines []string, i int) bool { // "**Severity levels:**" are recognized the same as plain text. prev = stripMarkdown(stripListMarker(prev)) - // Check for legend header patterns (ends with ":" and contains indicator word) + // Lines ending with ":" are section headers. if strings.HasSuffix(prev, ":") || strings.HasSuffix(prev, ":") { - if strings.Contains(prev, "severity") || - strings.Contains(prev, "level") || - strings.Contains(prev, "legend") || - strings.Contains(prev, "priority") || - strings.Contains(prev, "rubric") || - strings.Contains(prev, "rating") || - strings.Contains(prev, "scale") { - return true + for _, kw := range legendKeywords { + if strings.Contains(prev, kw) { + return true + } } + // A non-legend section header (e.g. "Findings:") marks a boundary: + // stop scanning so lines below it are not attributed to a legend above. + return false } } return false diff --git a/internal/storage/verdict_test.go b/internal/storage/verdict_test.go index 3932b0ac..07548f33 100644 --- a/internal/storage/verdict_test.go +++ b/internal/storage/verdict_test.go @@ -715,3 +715,78 @@ var verdictTests = []verdictTestCase{ func TestParseVerdict(t *testing.T) { runVerdictTests(t, verdictTests) } + +func TestCountFindings(t *testing.T) { + tests := []struct { + name string + output string + wantH, wantM, wantL int + }{ + { + name: "empty output", + output: "", + }, + { + name: "no issues found", + output: "No issues found.", + }, + { + name: "single high", + output: `Findings: +- High — sql injection in handler.go:42`, + wantH: 1, + }, + { + name: "critical collapses into high", + output: `- Critical: missing auth check +- High — race in cache lookup`, + wantH: 2, + }, + { + name: "info collapses into low", + output: `- Low: typo in comment +- Info: consider extracting helper`, + wantL: 2, + }, + { + name: "mixed severities", + output: `Review Findings: +1. High - data race in worker pool +2. Medium — missing context cancellation +3. Medium: error swallowed +4. Low - unused import`, + wantH: 1, wantM: 2, wantL: 1, + }, + { + name: "severity field label form", + output: `Issue: connection leak +**Severity**: High +File: db.go`, + wantH: 1, + }, + { + name: "legend block is excluded", + output: `Severity levels: +- Critical: blocking issue +- High: ship-blocking +- Medium: should fix +- Low: nice to have + +Findings: +- High — actual finding here`, + wantH: 1, + }, + { + name: "high-level is not a severity", + output: `This provides a high-level overview of the changes.`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h, m, l := CountFindings(tt.output) + assert.Equal(t, tt.wantH, h, "high count") + assert.Equal(t, tt.wantM, m, "medium count") + assert.Equal(t, tt.wantL, l, "low count") + }) + } +} From 5cc2065f3b73fba57f1098fa3843d00c4531ed12 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:15:19 -0400 Subject: [PATCH 02/17] feat(storage): add finding count columns + backfill migration Add high_count, medium_count, low_count columns to the reviews table schema and a migration that adds them to existing databases. BackfillFindingCounts uses CountFindings to populate the columns for legacy rows where all three are zero and output is non-empty; idempotent across restarts. --- internal/storage/db.go | 30 +++++++++++++- internal/storage/summary.go | 67 ++++++++++++++++++++++++++++++++ internal/storage/summary_test.go | 66 +++++++++++++++++++++++++++++++ 3 files changed, 162 insertions(+), 1 deletion(-) diff --git a/internal/storage/db.go b/internal/storage/db.go index fe3c9532..ed90bf21 100644 --- a/internal/storage/db.go +++ b/internal/storage/db.go @@ -68,7 +68,10 @@ CREATE TABLE IF NOT EXISTS reviews ( prompt TEXT NOT NULL, output TEXT NOT NULL, created_at TEXT NOT NULL DEFAULT (datetime('now')), - closed INTEGER NOT NULL DEFAULT 0 + closed INTEGER NOT NULL DEFAULT 0, + high_count INTEGER NOT NULL DEFAULT 0, + medium_count INTEGER NOT NULL DEFAULT 0, + low_count INTEGER NOT NULL DEFAULT 0 ); CREATE TABLE IF NOT EXISTS responses ( @@ -829,6 +832,31 @@ func (db *DB) migrate() error { return fmt.Errorf("ensure ci_pr_batch_jobs unique index: %w", err) } + // Migration: add high_count, medium_count, low_count columns to reviews if missing. + // Backfill is performed by BackfillFindingCounts (called below). + for _, col := range []string{"high_count", "medium_count", "low_count"} { + err = db.QueryRow( + `SELECT COUNT(*) FROM pragma_table_info('reviews') WHERE name = ?`, col, + ).Scan(&count) + if err != nil { + return fmt.Errorf("check %s column in reviews: %w", col, err) + } + if count == 0 { + _, err = db.Exec( + fmt.Sprintf(`ALTER TABLE reviews ADD COLUMN %s INTEGER NOT NULL DEFAULT 0`, col), + ) + if err != nil { + return fmt.Errorf("add %s column: %w", col, err) + } + } + } + // Backfill counts for any rows where all three columns are still zero + // AND the output is non-empty. Idempotent: re-running on a fully populated + // DB processes nothing. + if _, err := db.BackfillFindingCounts(); err != nil { + return fmt.Errorf("backfill finding counts: %w", err) + } + return nil } diff --git a/internal/storage/summary.go b/internal/storage/summary.go index 7420e5bd..92d4f285 100644 --- a/internal/storage/summary.go +++ b/internal/storage/summary.go @@ -537,6 +537,73 @@ func (db *DB) BackfillVerdictBool() (int, error) { return len(updates), nil } +// BackfillFindingCounts populates high_count, medium_count, and low_count for +// reviews where all three are zero AND the output is non-empty (i.e. any review +// that was inserted before the columns existed and may have findings to count). +// Returns the number of rows updated. +func (db *DB) BackfillFindingCounts() (int, error) { + rows, err := db.Query(` + SELECT id, output FROM reviews + WHERE output != '' + AND high_count = 0 AND medium_count = 0 AND low_count = 0 + `) + if err != nil { + return 0, err + } + defer rows.Close() + + type pending struct { + id int64 + h, m, l int + } + var updates []pending + for rows.Next() { + var id int64 + var output string + if err := rows.Scan(&id, &output); err != nil { + return 0, err + } + h, m, l := CountFindings(output) + // Only enqueue updates for rows that actually have findings; + // rows with all-zero counts don't need a write and would otherwise + // be re-processed on every startup (the SELECT predicate matches them). + if h+m+l == 0 { + continue + } + updates = append(updates, pending{id: id, h: h, m: m, l: l}) + } + if err := rows.Err(); err != nil { + return 0, err + } + if len(updates) == 0 { + return 0, nil + } + + tx, err := db.Begin() + if err != nil { + return 0, err + } + defer func() { _ = tx.Rollback() }() + + stmt, err := tx.Prepare( + `UPDATE reviews SET high_count = ?, medium_count = ?, low_count = ? WHERE id = ?`, + ) + if err != nil { + return 0, err + } + defer stmt.Close() + + for _, u := range updates { + if _, err := stmt.Exec(u.h, u.m, u.l, u.id); err != nil { + return 0, err + } + } + if err := tx.Commit(); err != nil { + return 0, err + } + return len(updates), nil +} + // categorizeError maps error messages to categories. func categorizeError(errMsg string) string { lower := strings.ToLower(errMsg) diff --git a/internal/storage/summary_test.go b/internal/storage/summary_test.go index 7f8327d7..03c8361e 100644 --- a/internal/storage/summary_test.go +++ b/internal/storage/summary_test.go @@ -449,6 +449,72 @@ func TestBackfillVerdictBool(t *testing.T) { assert.Equal(t, 0, count) } +func TestBackfillFindingCounts(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, _ := db.GetOrCreateRepo("/tmp/testrepo", "testrepo") + commit, _ := db.GetOrCreateCommit(repo.ID, "abc123", "alice", "fix bug", time.Now()) + job, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: commit.ID, GitRef: "abc123", Agent: "test", + }) + require.NoError(t, err) + + output := `Findings: +- High — sql injection in handler.go +- Medium: missing error check +- Low - typo in comment` + + // Insert review row with zero counts (simulates legacy data) + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, ?, ?, ?, 0, 0, 0)`, + job.ID, "test", "p", output, + ) + require.NoError(t, err) + + // Insert a "clean" review (non-empty output but no severity markers). + // This row will match the SELECT predicate but CountFindings returns + // (0,0,0), so it must be skipped — otherwise we'd re-process it on + // every daemon startup forever. + cleanCommit, _ := db.GetOrCreateCommit(repo.ID, "def456", "bob", "tidy", time.Now()) + cleanJob, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: cleanCommit.ID, GitRef: "def456", Agent: "test", + }) + require.NoError(t, err) + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, ?, ?, ?, 0, 0, 0)`, + cleanJob.ID, "test", "p", "LGTM, no issues found.", + ) + require.NoError(t, err) + + updated, err := db.BackfillFindingCounts() + require.NoError(t, err) + assert.Equal(t, 1, updated, "should backfill only the row with findings") + + var h, m, l int + require.NoError(t, db.QueryRow( + `SELECT high_count, medium_count, low_count FROM reviews WHERE job_id = ?`, job.ID, + ).Scan(&h, &m, &l)) + assert.Equal(t, 1, h) + assert.Equal(t, 1, m) + assert.Equal(t, 1, l) + + // Clean review keeps (0,0,0) — was skipped, not corrupted. + require.NoError(t, db.QueryRow( + `SELECT high_count, medium_count, low_count FROM reviews WHERE job_id = ?`, cleanJob.ID, + ).Scan(&h, &m, &l)) + assert.Equal(t, 0, h) + assert.Equal(t, 0, m) + assert.Equal(t, 0, l) + + // Idempotent: a second run touches no rows (clean row stays skipped). + updated, err = db.BackfillFindingCounts() + require.NoError(t, err) + assert.Equal(t, 0, updated, "second run should be no-op") +} + func TestPercentile(t *testing.T) { tests := []struct { name string From 71ecfc863d340afd48556957f36bfc77a0d6016f Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:31:06 -0400 Subject: [PATCH 03/17] feat(storage): expose finding counts on Review struct --- internal/storage/hydration.go | 6 ++++ internal/storage/models.go | 5 +++ internal/storage/reviews.go | 46 +++++++++++++++++++++----- internal/storage/reviews_test.go | 56 ++++++++++++++++++++++++++++++++ 4 files changed, 105 insertions(+), 8 deletions(-) diff --git a/internal/storage/hydration.go b/internal/storage/hydration.go index f64e88f2..e7c0104e 100644 --- a/internal/storage/hydration.go +++ b/internal/storage/hydration.go @@ -140,6 +140,9 @@ type reviewScanFields struct { Closed int UUID sql.NullString VerdictBool sql.NullInt64 + HighCount int + MediumCount int + LowCount int } func applyReviewScan(review *Review, fields reviewScanFields) { @@ -149,6 +152,9 @@ func applyReviewScan(review *Review, fields reviewScanFields) { review.UUID = fields.UUID.String } applyReviewVerdict(review, fields.VerdictBool) + review.HighCount = fields.HighCount + review.MediumCount = fields.MediumCount + review.LowCount = fields.LowCount } func scanCommit(scanner sqlScanner) (*Commit, error) { diff --git a/internal/storage/models.go b/internal/storage/models.go index f0534243..2c47fc49 100644 --- a/internal/storage/models.go +++ b/internal/storage/models.go @@ -211,6 +211,11 @@ type Review struct { // Stored verdict: 1=pass, 0=fail, NULL=legacy (not yet backfilled) VerdictBool *int `json:"verdict_bool,omitempty"` + // Per-severity finding counts (parsed from Output at completion time). + HighCount int `json:"high_count"` + MediumCount int `json:"medium_count"` + LowCount int `json:"low_count"` + // Joined fields Job *ReviewJob `json:"job,omitempty"` } diff --git a/internal/storage/reviews.go b/internal/storage/reviews.go index c1dee0d4..b81db4d7 100644 --- a/internal/storage/reviews.go +++ b/internal/storage/reviews.go @@ -18,6 +18,7 @@ func (db *DB) GetReviewByJobID(jobID int64) (*Review, error) { var jobFields reviewJobScanFields err := db.QueryRow(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.uuid, rv.verdict_bool, + rv.high_count, rv.medium_count, rv.low_count, j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.session_id, j.agent, j.reasoning, j.status, j.enqueued_at, j.started_at, j.finished_at, j.worker_id, j.error, j.model, j.provider, j.requested_model, j.requested_provider, j.job_type, j.review_type, j.patch_id, rp.root_path, rp.name, c.subject, j.token_usage, COALESCE(j.min_severity, '') @@ -27,6 +28,7 @@ func (db *DB) GetReviewByJobID(jobID int64) (*Review, error) { LEFT JOIN commits c ON c.id = j.commit_id WHERE rv.job_id = ? `, jobID).Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &reviewFields.CreatedAt, &reviewFields.Closed, &reviewFields.UUID, &reviewFields.VerdictBool, + &reviewFields.HighCount, &reviewFields.MediumCount, &reviewFields.LowCount, &job.ID, &job.RepoID, &jobFields.CommitID, &job.GitRef, &jobFields.Branch, &jobFields.SessionID, &job.Agent, &job.Reasoning, &job.Status, &jobFields.EnqueuedAt, &jobFields.StartedAt, &jobFields.FinishedAt, &jobFields.WorkerID, &jobFields.Error, &jobFields.Model, &jobFields.Provider, &jobFields.RequestedModel, &jobFields.RequestedProvider, &jobFields.JobType, &jobFields.ReviewType, &jobFields.PatchID, &job.RepoPath, &job.RepoName, &jobFields.CommitSubject, &jobFields.TokenUsage, &job.MinSeverity) @@ -36,6 +38,12 @@ func (db *DB) GetReviewByJobID(jobID int64) (*Review, error) { applyReviewScan(&r, reviewFields) applyReviewJobScan(&job, jobFields) applyJobVerdict(&job, reviewFields.VerdictBool, r.Output) + // Mirror the scanned review counts onto the joined Job so the TUI's + // detail header (which reads job.HighFindings/etc.) sees them. + h, m, l := r.HighCount, r.MediumCount, r.LowCount + job.HighFindings = &h + job.MediumFindings = &m + job.LowFindings = &l r.Job = &job @@ -50,6 +58,7 @@ func (db *DB) GetReviewByCommitSHA(sha string) (*Review, error) { var jobFields reviewJobScanFields err := db.QueryRow(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.uuid, rv.verdict_bool, + rv.high_count, rv.medium_count, rv.low_count, j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.session_id, j.agent, j.reasoning, j.status, j.enqueued_at, j.started_at, j.finished_at, j.worker_id, j.error, j.model, j.provider, j.requested_model, j.requested_provider, j.job_type, j.review_type, j.patch_id, rp.root_path, rp.name, c.subject, j.token_usage, COALESCE(j.min_severity, '') @@ -61,6 +70,7 @@ func (db *DB) GetReviewByCommitSHA(sha string) (*Review, error) { ORDER BY rv.created_at DESC LIMIT 1 `, sha).Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &reviewFields.CreatedAt, &reviewFields.Closed, &reviewFields.UUID, &reviewFields.VerdictBool, + &reviewFields.HighCount, &reviewFields.MediumCount, &reviewFields.LowCount, &job.ID, &job.RepoID, &jobFields.CommitID, &job.GitRef, &jobFields.Branch, &jobFields.SessionID, &job.Agent, &job.Reasoning, &job.Status, &jobFields.EnqueuedAt, &jobFields.StartedAt, &jobFields.FinishedAt, &jobFields.WorkerID, &jobFields.Error, &jobFields.Model, &jobFields.Provider, &jobFields.RequestedModel, &jobFields.RequestedProvider, &jobFields.JobType, &jobFields.ReviewType, &jobFields.PatchID, &job.RepoPath, &job.RepoName, &jobFields.CommitSubject, &jobFields.TokenUsage, &job.MinSeverity) @@ -70,6 +80,12 @@ func (db *DB) GetReviewByCommitSHA(sha string) (*Review, error) { applyReviewScan(&r, reviewFields) applyReviewJobScan(&job, jobFields) applyJobVerdict(&job, reviewFields.VerdictBool, r.Output) + // Mirror the scanned review counts onto the joined Job so the TUI's + // detail header (which reads job.HighFindings/etc.) sees them. + h, m, l := r.HighCount, r.MediumCount, r.LowCount + job.HighFindings = &h + job.MediumFindings = &m + job.LowFindings = &l r.Job = &job @@ -79,7 +95,8 @@ func (db *DB) GetReviewByCommitSHA(sha string) (*Review, error) { // GetAllReviewsForGitRef returns all reviews for a git ref (commit SHA or range) for re-review context func (db *DB) GetAllReviewsForGitRef(gitRef string) ([]Review, error) { rows, err := db.Query(` - SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed + SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, + rv.high_count, rv.medium_count, rv.low_count FROM reviews rv JOIN review_jobs j ON j.id = rv.job_id WHERE j.git_ref = ? @@ -94,7 +111,8 @@ func (db *DB) GetAllReviewsForGitRef(gitRef string) ([]Review, error) { for rows.Next() { var r Review var fields reviewScanFields - if err := rows.Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed); err != nil { + if err := rows.Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed, + &fields.HighCount, &fields.MediumCount, &fields.LowCount); err != nil { return nil, err } applyReviewScan(&r, fields) @@ -107,7 +125,8 @@ func (db *DB) GetAllReviewsForGitRef(gitRef string) ([]Review, error) { // GetRecentReviewsForRepo returns the N most recent reviews for a repo func (db *DB) GetRecentReviewsForRepo(repoID int64, limit int) ([]Review, error) { rows, err := db.Query(` - SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed + SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, + rv.high_count, rv.medium_count, rv.low_count FROM reviews rv JOIN review_jobs j ON j.id = rv.job_id WHERE j.repo_id = ? @@ -123,7 +142,8 @@ func (db *DB) GetRecentReviewsForRepo(repoID int64, limit int) ([]Review, error) for rows.Next() { var r Review var fields reviewScanFields - if err := rows.Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed); err != nil { + if err := rows.Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed, + &fields.HighCount, &fields.MediumCount, &fields.LowCount); err != nil { return nil, err } applyReviewScan(&r, fields) @@ -340,7 +360,8 @@ func (db *DB) GetJobsWithReviewsByIDs(jobIDs []int64) (map[int64]JobWithReview, // Fetch reviews for these jobs reviewQuery := fmt.Sprintf(` - SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.verdict_bool + SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.verdict_bool, + rv.high_count, rv.medium_count, rv.low_count FROM reviews rv WHERE rv.job_id IN (%s) `, inClause) @@ -354,13 +375,20 @@ func (db *DB) GetJobsWithReviewsByIDs(jobIDs []int64) (map[int64]JobWithReview, for reviewRows.Next() { var r Review var fields reviewScanFields - if err := reviewRows.Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed, &fields.VerdictBool); err != nil { + if err := reviewRows.Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed, &fields.VerdictBool, + &fields.HighCount, &fields.MediumCount, &fields.LowCount); err != nil { return nil, fmt.Errorf("scan review: %w", err) } applyReviewScan(&r, fields) if entry, ok := result[r.JobID]; ok { entry.Review = &r + // Also propagate counts to the joined Job so consumers like + // ListJobs callers see consistent values via either pathway. + h, m, l := r.HighCount, r.MediumCount, r.LowCount + entry.Job.HighFindings = &h + entry.Job.MediumFindings = &m + entry.Job.LowFindings = &l applyJobVerdict(&entry.Job, fields.VerdictBool, r.Output) result[r.JobID] = entry } @@ -378,9 +406,11 @@ func (db *DB) GetReviewByID(reviewID int64) (*Review, error) { var fields reviewScanFields err := db.QueryRow(` - SELECT id, job_id, agent, prompt, output, created_at, closed + SELECT id, job_id, agent, prompt, output, created_at, closed, + high_count, medium_count, low_count FROM reviews WHERE id = ? - `, reviewID).Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed) + `, reviewID).Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed, + &fields.HighCount, &fields.MediumCount, &fields.LowCount) if err != nil { return nil, err } diff --git a/internal/storage/reviews_test.go b/internal/storage/reviews_test.go index 72380a54..c048b673 100644 --- a/internal/storage/reviews_test.go +++ b/internal/storage/reviews_test.go @@ -3,6 +3,7 @@ package storage import ( "database/sql" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -537,3 +538,58 @@ func verifyComment(t *testing.T, actual Response, expectedUser, expectedMsg stri assert.Equal(t, expectedUser, actual.Responder) assert.Equal(t, expectedMsg, actual.Response) } + +func TestGetRecentReviewsForRepo_SelectsFindingCounts(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, err := db.GetOrCreateRepo("/tmp/recent-test", "recent-test") + require.NoError(t, err) + commit, err := db.GetOrCreateCommit(repo.ID, "abc123", "alice", "fix", time.Now()) + require.NoError(t, err) + job, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: commit.ID, GitRef: "abc123", Agent: "test", + }) + require.NoError(t, err) + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, 'test', 'p', 'High - bug', 3, 2, 5)`, job.ID, + ) + require.NoError(t, err) + + // Smoke test: this used to panic with "sql: expected N destination args + // but got M" when GetRecentReviewsForRepo's SELECT lost columns the Scan + // still expected. Both halves must move in lock-step. + reviews, err := db.GetRecentReviewsForRepo(repo.ID, 10) + require.NoError(t, err) + require.Len(t, reviews, 1) + assert.Equal(t, 3, reviews[0].HighCount) + assert.Equal(t, 2, reviews[0].MediumCount) + assert.Equal(t, 5, reviews[0].LowCount) +} + +func TestGetAllReviewsForGitRef_SelectsFindingCounts(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, err := db.GetOrCreateRepo("/tmp/gitref-test", "gitref-test") + require.NoError(t, err) + commit, err := db.GetOrCreateCommit(repo.ID, "def456", "bob", "fix", time.Now()) + require.NoError(t, err) + job, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: commit.ID, GitRef: "def456", Agent: "test", + }) + require.NoError(t, err) + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, 'test', 'p', 'High - bug', 1, 2, 3)`, job.ID, + ) + require.NoError(t, err) + + reviews, err := db.GetAllReviewsForGitRef("def456") + require.NoError(t, err) + require.Len(t, reviews, 1) + assert.Equal(t, 1, reviews[0].HighCount) + assert.Equal(t, 2, reviews[0].MediumCount) + assert.Equal(t, 3, reviews[0].LowCount) +} From 439378b64b408e0600c815e9a8912ac907ea9faf Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:31:54 -0400 Subject: [PATCH 04/17] feat(storage): compute finding counts when completing jobs --- internal/storage/jobs.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/internal/storage/jobs.go b/internal/storage/jobs.go index 647da895..ccf1966d 100644 --- a/internal/storage/jobs.go +++ b/internal/storage/jobs.go @@ -350,9 +350,13 @@ func (db *DB) CompleteFixJob(jobID int64, agent, prompt, output, patch string) e } verdictBool := verdictToBool(ParseVerdict(finalOutput)) + highCount, mediumCount, lowCount := CountFindings(finalOutput) _, err = conn.ExecContext(ctx, - `INSERT INTO reviews (job_id, agent, prompt, output, verdict_bool, uuid, updated_by_machine_id, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, - jobID, agent, prompt, finalOutput, verdictBool, reviewUUID, machineID, now) + `INSERT INTO reviews (job_id, agent, prompt, output, verdict_bool, uuid, updated_by_machine_id, updated_at, + high_count, medium_count, low_count) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + jobID, agent, prompt, finalOutput, verdictBool, reviewUUID, machineID, now, + highCount, mediumCount, lowCount) if err != nil { return err } @@ -425,13 +429,19 @@ func (db *DB) CompleteJob(jobID int64, agent, prompt, output string) error { return nil } - // Insert review with sync columns + // Insert review with sync columns and finding counts var verdictBoolVal any + var highCount, mediumCount, lowCount int if finalOutput != "" { verdictBoolVal = verdictToBool(ParseVerdict(finalOutput)) + highCount, mediumCount, lowCount = CountFindings(finalOutput) } - _, err = conn.ExecContext(ctx, `INSERT INTO reviews (job_id, agent, prompt, output, verdict_bool, uuid, updated_by_machine_id, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, - jobID, agent, prompt, finalOutput, verdictBoolVal, reviewUUID, machineID, now) + _, err = conn.ExecContext(ctx, + `INSERT INTO reviews (job_id, agent, prompt, output, verdict_bool, uuid, updated_by_machine_id, updated_at, + high_count, medium_count, low_count) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + jobID, agent, prompt, finalOutput, verdictBoolVal, reviewUUID, machineID, now, + highCount, mediumCount, lowCount) if err != nil { return err } From 648766d982eba00b062bf2be7ce8d7d8c41290ba Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:33:51 -0400 Subject: [PATCH 05/17] feat(storage): join finding counts (own + parent) into ReviewJob queries --- internal/storage/hydration.go | 31 +++++++++++++++++++++++++++++++ internal/storage/jobs.go | 19 +++++++++++++++---- internal/storage/models.go | 11 +++++++++++ 3 files changed, 57 insertions(+), 4 deletions(-) diff --git a/internal/storage/hydration.go b/internal/storage/hydration.go index e7c0104e..bda46ede 100644 --- a/internal/storage/hydration.go +++ b/internal/storage/hydration.go @@ -39,6 +39,13 @@ type reviewJobScanFields struct { MinSeverity string SkipReason sql.NullString Source sql.NullString + + HighFindings sql.NullInt64 + MediumFindings sql.NullInt64 + LowFindings sql.NullInt64 + ParentHighFindings sql.NullInt64 + ParentMediumFindings sql.NullInt64 + ParentLowFindings sql.NullInt64 } func applyReviewJobScan(job *ReviewJob, fields reviewJobScanFields) { @@ -133,6 +140,30 @@ func applyReviewJobScan(job *ReviewJob, fields reviewJobScanFields) { if fields.Source.Valid { job.Source = fields.Source.String } + if fields.HighFindings.Valid { + v := int(fields.HighFindings.Int64) + job.HighFindings = &v + } + if fields.MediumFindings.Valid { + v := int(fields.MediumFindings.Int64) + job.MediumFindings = &v + } + if fields.LowFindings.Valid { + v := int(fields.LowFindings.Int64) + job.LowFindings = &v + } + if fields.ParentHighFindings.Valid { + v := int(fields.ParentHighFindings.Int64) + job.ParentHighFindings = &v + } + if fields.ParentMediumFindings.Valid { + v := int(fields.ParentMediumFindings.Int64) + job.ParentMediumFindings = &v + } + if fields.ParentLowFindings.Valid { + v := int(fields.ParentLowFindings.Int64) + job.ParentLowFindings = &v + } } type reviewScanFields struct { diff --git a/internal/storage/jobs.go b/internal/storage/jobs.go index ccf1966d..e6199b17 100644 --- a/internal/storage/jobs.go +++ b/internal/storage/jobs.go @@ -828,11 +828,14 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int COALESCE(j.agentic, 0), r.root_path, r.name, c.subject, rv.closed, rv.output, rv.verdict_bool, j.source_machine_id, j.uuid, j.model, j.job_type, j.review_type, j.patch_id, j.parent_job_id, j.provider, j.requested_model, j.requested_provider, j.token_usage, COALESCE(j.worktree_path, ''), - j.command_line, COALESCE(j.min_severity, '') + j.command_line, COALESCE(j.min_severity, ''), + rv.high_count, rv.medium_count, rv.low_count, + parent_rv.high_count, parent_rv.medium_count, parent_rv.low_count FROM review_jobs j JOIN repos r ON r.id = j.repo_id LEFT JOIN commits c ON c.id = j.commit_id LEFT JOIN reviews rv ON rv.job_id = j.id + LEFT JOIN reviews parent_rv ON parent_rv.job_id = j.parent_job_id ` queryFilters, args := buildJobFilterClause(statusFilter, repoFilter, collectListJobsOptions(opts...)) query += queryFilters @@ -867,7 +870,9 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int &fields.Agentic, &j.RepoPath, &j.RepoName, &fields.CommitSubject, &fields.Closed, &output, &verdictBool, &fields.SourceMachineID, &fields.UUID, &fields.Model, &fields.JobType, &fields.ReviewType, &fields.PatchID, &fields.ParentJobID, &fields.Provider, &fields.RequestedModel, &fields.RequestedProvider, &fields.TokenUsage, &fields.WorktreePath, - &fields.CommandLine, &fields.MinSeverity) + &fields.CommandLine, &fields.MinSeverity, + &fields.HighFindings, &fields.MediumFindings, &fields.LowFindings, + &fields.ParentHighFindings, &fields.ParentMediumFindings, &fields.ParentLowFindings) if err != nil { return nil, err } @@ -918,16 +923,22 @@ func (db *DB) GetJobByID(id int64) (*ReviewJob, error) { j.started_at, j.finished_at, j.worker_id, j.error, j.prompt, COALESCE(j.agentic, 0), r.root_path, r.name, c.subject, j.model, j.provider, j.requested_model, j.requested_provider, j.job_type, j.review_type, j.patch_id, j.parent_job_id, j.patch, j.token_usage, COALESCE(j.worktree_path, ''), j.command_line, COALESCE(j.min_severity, ''), - j.skip_reason, j.source + j.skip_reason, j.source, + rv.high_count, rv.medium_count, rv.low_count, + parent_rv.high_count, parent_rv.medium_count, parent_rv.low_count FROM review_jobs j JOIN repos r ON r.id = j.repo_id LEFT JOIN commits c ON c.id = j.commit_id + LEFT JOIN reviews rv ON rv.job_id = j.id + LEFT JOIN reviews parent_rv ON parent_rv.job_id = j.parent_job_id WHERE j.id = ? `, id).Scan(&j.ID, &j.RepoID, &fields.CommitID, &j.GitRef, &fields.Branch, &fields.SessionID, &j.Agent, &j.Reasoning, &j.Status, &fields.EnqueuedAt, &fields.StartedAt, &fields.FinishedAt, &fields.WorkerID, &fields.Error, &fields.Prompt, &fields.Agentic, &j.RepoPath, &j.RepoName, &fields.CommitSubject, &fields.Model, &fields.Provider, &fields.RequestedModel, &fields.RequestedProvider, &fields.JobType, &fields.ReviewType, &fields.PatchID, &fields.ParentJobID, &fields.Patch, &fields.TokenUsage, &fields.WorktreePath, &fields.CommandLine, &fields.MinSeverity, - &fields.SkipReason, &fields.Source) + &fields.SkipReason, &fields.Source, + &fields.HighFindings, &fields.MediumFindings, &fields.LowFindings, + &fields.ParentHighFindings, &fields.ParentMediumFindings, &fields.ParentLowFindings) if err != nil { return nil, err } diff --git a/internal/storage/models.go b/internal/storage/models.go index 2c47fc49..c15a4208 100644 --- a/internal/storage/models.go +++ b/internal/storage/models.go @@ -96,6 +96,17 @@ type ReviewJob struct { CommitSubject string `json:"commit_subject,omitempty"` // empty for ranges Closed *bool `json:"closed,omitempty"` // nil if no review yet Verdict *string `json:"verdict,omitempty"` // P/F parsed from review output + + // Finding counts from the job's own review (nil if no review yet). + HighFindings *int `json:"high_findings,omitempty"` + MediumFindings *int `json:"medium_findings,omitempty"` + LowFindings *int `json:"low_findings,omitempty"` + + // Finding counts from the parent job's review (used by fix jobs to show + // what they're addressing). Nil for non-fix jobs or when parent has no review. + ParentHighFindings *int `json:"parent_high_findings,omitempty"` + ParentMediumFindings *int `json:"parent_medium_findings,omitempty"` + ParentLowFindings *int `json:"parent_low_findings,omitempty"` } // IsDirtyJob returns true if this is a dirty review (uncommitted changes). From 9d64f90372ac272c73aae7892232f874e56a6a18 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:34:32 -0400 Subject: [PATCH 06/17] feat(storage): aggregate finding counts into JobStats --- internal/storage/db_filter_test.go | 46 ++++++++++++++++++++++++++++++ internal/storage/jobs.go | 23 ++++++++++----- 2 files changed, 62 insertions(+), 7 deletions(-) diff --git a/internal/storage/db_filter_test.go b/internal/storage/db_filter_test.go index 67138a69..85ac2a54 100644 --- a/internal/storage/db_filter_test.go +++ b/internal/storage/db_filter_test.go @@ -1014,3 +1014,49 @@ func TestListJobsWithBeforeCursor(t *testing.T) { assert.Len(t, result, 3) }) } + +func TestCountJobStats_FindingAggregation(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + // Two repos so we can verify the repo filter is honored. + rA := createRepo(t, db, "/tmp/repo-a") + rB := createRepo(t, db, "/tmp/repo-b") + + insertReview := func(repoID int64, sha string, h, m, l int) { + t.Helper() + commit := createCommit(t, db, repoID, sha) + job := enqueueJob(t, db, repoID, commit.ID, sha) + _, err := db.Exec(`UPDATE review_jobs SET status = 'done' WHERE id = ?`, job.ID) + require.NoError(t, err) + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, 'test', 'p', 'x', ?, ?, ?)`, + job.ID, h, m, l, + ) + require.NoError(t, err) + } + + insertReview(rA.ID, "a1", 2, 1, 4) + insertReview(rA.ID, "a2", 1, 0, 3) + insertReview(rA.ID, "a3", 0, 5, 0) + insertReview(rB.ID, "b1", 7, 8, 9) // should NOT count when filtering rA + + t.Run("aggregates only the filtered repo", func(t *testing.T) { + stats, err := db.CountJobStats("/tmp/repo-a") + require.NoError(t, err) + assert.Equal(t, 3, stats.Done) + assert.Equal(t, 3, stats.HighFindings) // 2+1+0 + assert.Equal(t, 6, stats.MediumFindings) // 1+0+5 + assert.Equal(t, 7, stats.LowFindings) // 4+3+0 + }) + + t.Run("no filter returns global totals", func(t *testing.T) { + stats, err := db.CountJobStats("") + require.NoError(t, err) + assert.Equal(t, 4, stats.Done) + assert.Equal(t, 10, stats.HighFindings) // 2+1+0+7 + assert.Equal(t, 14, stats.MediumFindings) // 1+0+5+8 + assert.Equal(t, 16, stats.LowFindings) // 4+3+0+9 + }) +} diff --git a/internal/storage/jobs.go b/internal/storage/jobs.go index e6199b17..b896d4f2 100644 --- a/internal/storage/jobs.go +++ b/internal/storage/jobs.go @@ -890,19 +890,25 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int // GetJobByID returns a job by ID with joined fields // JobStats holds aggregate counts for the queue status line. type JobStats struct { - Done int `json:"done"` - Closed int `json:"closed"` - Open int `json:"open"` + Done int `json:"done"` + Closed int `json:"closed"` + Open int `json:"open"` + HighFindings int `json:"high_findings"` + MediumFindings int `json:"medium_findings"` + LowFindings int `json:"low_findings"` } -// CountJobStats returns aggregate done/closed/open counts -// using the same filter logic as ListJobs (repo, branch, closed). +// CountJobStats returns aggregate done/closed/open counts and per-severity +// finding sums using the same filter logic as ListJobs (repo, branch, closed). func (db *DB) CountJobStats(repoFilter string, opts ...ListJobsOption) (JobStats, error) { query := ` SELECT COALESCE(SUM(CASE WHEN j.status = 'done' THEN 1 ELSE 0 END), 0), COALESCE(SUM(CASE WHEN j.status = 'done' AND rv.closed = 1 THEN 1 ELSE 0 END), 0), - COALESCE(SUM(CASE WHEN j.status = 'done' AND (rv.closed IS NULL OR rv.closed = 0) THEN 1 ELSE 0 END), 0) + COALESCE(SUM(CASE WHEN j.status = 'done' AND (rv.closed IS NULL OR rv.closed = 0) THEN 1 ELSE 0 END), 0), + COALESCE(SUM(rv.high_count), 0), + COALESCE(SUM(rv.medium_count), 0), + COALESCE(SUM(rv.low_count), 0) FROM review_jobs j JOIN repos r ON r.id = j.repo_id LEFT JOIN reviews rv ON rv.job_id = j.id @@ -911,7 +917,10 @@ func (db *DB) CountJobStats(repoFilter string, opts ...ListJobsOption) (JobStats query += queryFilters var stats JobStats - err := db.QueryRow(query, args...).Scan(&stats.Done, &stats.Closed, &stats.Open) + err := db.QueryRow(query, args...).Scan( + &stats.Done, &stats.Closed, &stats.Open, + &stats.HighFindings, &stats.MediumFindings, &stats.LowFindings, + ) return stats, err } From 8ef78817a046c192a73295acf2f2645d46f755ba Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:38:51 -0400 Subject: [PATCH 07/17] feat(storage): mirror finding counts in PostgreSQL schema and sync --- internal/storage/postgres.go | 45 +++++++-- internal/storage/schemas/postgres_v13.sql | 116 ++++++++++++++++++++++ internal/storage/summary.go | 8 +- internal/storage/sync.go | 27 ++++- internal/storage/syncworker.go | 3 + 5 files changed, 182 insertions(+), 17 deletions(-) create mode 100644 internal/storage/schemas/postgres_v13.sql diff --git a/internal/storage/postgres.go b/internal/storage/postgres.go index 2218ea6f..81f756b2 100644 --- a/internal/storage/postgres.go +++ b/internal/storage/postgres.go @@ -15,12 +15,12 @@ import ( ) // PostgreSQL schema version - increment when schema changes -const pgSchemaVersion = 12 +const pgSchemaVersion = 13 // pgSchemaName is the PostgreSQL schema used to isolate roborev tables const pgSchemaName = "roborev" -//go:embed schemas/postgres_v12.sql +//go:embed schemas/postgres_v13.sql var pgSchemaSQL string // pgSchemaStatements returns the individual DDL statements for schema creation. @@ -337,6 +337,14 @@ func (p *PgPool) EnsureSchema(ctx context.Context) error { return fmt.Errorf("v12 migration (add dedup ref index): %w", err) } } + if currentVersion < 13 { + for _, col := range []string{"high_count", "medium_count", "low_count"} { + stmt := fmt.Sprintf(`ALTER TABLE roborev.reviews ADD COLUMN IF NOT EXISTS %s INTEGER NOT NULL DEFAULT 0`, col) + if _, err = p.pool.Exec(ctx, stmt); err != nil { + return fmt.Errorf("migrate to v13 (add %s column to reviews): %w", col, err) + } + } + } // Update version _, err = p.pool.Exec(ctx, `INSERT INTO schema_version (version) VALUES ($1) ON CONFLICT (version) DO NOTHING`, pgSchemaVersion) if err != nil { @@ -666,14 +674,19 @@ func (p *PgPool) UpsertReview(ctx context.Context, r SyncableReview) error { _, err := p.pool.Exec(ctx, ` INSERT INTO reviews ( uuid, job_uuid, agent, prompt, output, closed, - updated_by_machine_id, created_at, updated_at - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW()) + updated_by_machine_id, created_at, updated_at, + high_count, medium_count, low_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), $9, $10, $11) ON CONFLICT (uuid) DO UPDATE SET closed = EXCLUDED.closed, updated_by_machine_id = EXCLUDED.updated_by_machine_id, - updated_at = NOW() + updated_at = NOW(), + high_count = EXCLUDED.high_count, + medium_count = EXCLUDED.medium_count, + low_count = EXCLUDED.low_count `, r.UUID, r.JobUUID, r.Agent, r.Prompt, r.Output, r.Closed, - r.UpdatedByMachineID, r.CreatedAt) + r.UpdatedByMachineID, r.CreatedAt, + r.HighCount, r.MediumCount, r.LowCount) return err } @@ -805,6 +818,9 @@ type PulledReview struct { UpdatedByMachineID string CreatedAt time.Time UpdatedAt time.Time + HighCount int + MediumCount int + LowCount int } // PullReviews fetches reviews from PostgreSQL updated after the given cursor. @@ -829,7 +845,8 @@ func (p *PgPool) PullReviews(ctx context.Context, excludeMachineID string, known rows, err := p.pool.Query(ctx, ` SELECT r.uuid, r.job_uuid, r.agent, r.prompt, r.output, r.closed, - r.updated_by_machine_id, r.created_at, r.updated_at, r.id + r.updated_by_machine_id, r.created_at, r.updated_at, r.id, + r.high_count, r.medium_count, r.low_count FROM reviews r WHERE (r.updated_by_machine_id IS NULL OR r.updated_by_machine_id != $1) AND r.job_uuid = ANY($2) @@ -852,6 +869,7 @@ func (p *PgPool) PullReviews(ctx context.Context, excludeMachineID string, known err := rows.Scan( &r.UUID, &r.JobUUID, &r.Agent, &r.Prompt, &r.Output, &r.Closed, &r.UpdatedByMachineID, &r.CreatedAt, &r.UpdatedAt, &lastID, + &r.HighCount, &r.MediumCount, &r.LowCount, ) if err != nil { return nil, cursor, fmt.Errorf("scan review: %w", err) @@ -951,14 +969,19 @@ func (p *PgPool) BatchUpsertReviews(ctx context.Context, reviews []SyncableRevie batch.Queue(` INSERT INTO reviews ( uuid, job_uuid, agent, prompt, output, closed, - updated_by_machine_id, created_at, updated_at - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW()) + updated_by_machine_id, created_at, updated_at, + high_count, medium_count, low_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), $9, $10, $11) ON CONFLICT (uuid) DO UPDATE SET closed = EXCLUDED.closed, updated_by_machine_id = EXCLUDED.updated_by_machine_id, - updated_at = NOW() + updated_at = NOW(), + high_count = EXCLUDED.high_count, + medium_count = EXCLUDED.medium_count, + low_count = EXCLUDED.low_count `, r.UUID, r.JobUUID, r.Agent, r.Prompt, r.Output, r.Closed, - r.UpdatedByMachineID, r.CreatedAt) + r.UpdatedByMachineID, r.CreatedAt, + r.HighCount, r.MediumCount, r.LowCount) } br := p.pool.SendBatch(ctx, batch) diff --git a/internal/storage/schemas/postgres_v13.sql b/internal/storage/schemas/postgres_v13.sql new file mode 100644 index 00000000..67be6c44 --- /dev/null +++ b/internal/storage/schemas/postgres_v13.sql @@ -0,0 +1,116 @@ +-- PostgreSQL schema version 13 +-- Adds high_count, medium_count, and low_count columns to reviews so +-- per-severity finding totals sync alongside the review row itself. +-- Note: Version is managed by EnsureSchema(), not this file. + +CREATE SCHEMA IF NOT EXISTS roborev; + +CREATE TABLE IF NOT EXISTS roborev.schema_version ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS roborev.machines ( + id SERIAL PRIMARY KEY, + machine_id UUID UNIQUE NOT NULL, + name TEXT, + last_seen_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS roborev.repos ( + id SERIAL PRIMARY KEY, + identity TEXT UNIQUE NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS roborev.commits ( + id SERIAL PRIMARY KEY, + repo_id INTEGER REFERENCES roborev.repos(id), + sha TEXT NOT NULL, + author TEXT NOT NULL, + subject TEXT NOT NULL, + timestamp TIMESTAMP WITH TIME ZONE NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + UNIQUE(repo_id, sha) +); + +CREATE TABLE IF NOT EXISTS roborev.review_jobs ( + id SERIAL PRIMARY KEY, + uuid UUID UNIQUE NOT NULL, + repo_id INTEGER NOT NULL REFERENCES roborev.repos(id), + commit_id INTEGER REFERENCES roborev.commits(id), + git_ref TEXT NOT NULL, + branch TEXT, + session_id TEXT, + agent TEXT NOT NULL, + model TEXT, + provider TEXT, + requested_model TEXT, + requested_provider TEXT, + reasoning TEXT, + job_type TEXT NOT NULL DEFAULT 'review', + review_type TEXT NOT NULL DEFAULT '', + patch_id TEXT, + status TEXT NOT NULL CHECK(status IN ('queued', 'running', 'done', 'failed', 'canceled', 'applied', 'rebased', 'skipped')), + agentic BOOLEAN DEFAULT FALSE, + enqueued_at TIMESTAMP WITH TIME ZONE NOT NULL, + started_at TIMESTAMP WITH TIME ZONE, + finished_at TIMESTAMP WITH TIME ZONE, + prompt TEXT, + diff_content TEXT, + error TEXT, + token_usage TEXT, + worktree_path TEXT, + min_severity TEXT NOT NULL DEFAULT '', + skip_reason TEXT, + source TEXT, + source_machine_id UUID NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); + +CREATE TABLE IF NOT EXISTS roborev.reviews ( + id SERIAL PRIMARY KEY, + uuid UUID UNIQUE NOT NULL, + job_uuid UUID NOT NULL REFERENCES roborev.review_jobs(uuid), + agent TEXT NOT NULL, + prompt TEXT NOT NULL, + output TEXT NOT NULL, + closed BOOLEAN NOT NULL DEFAULT FALSE, + updated_by_machine_id UUID NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + high_count INTEGER NOT NULL DEFAULT 0, + medium_count INTEGER NOT NULL DEFAULT 0, + low_count INTEGER NOT NULL DEFAULT 0 +); + +CREATE TABLE IF NOT EXISTS roborev.responses ( + id SERIAL PRIMARY KEY, + uuid UUID UNIQUE NOT NULL, + job_uuid UUID NOT NULL REFERENCES roborev.review_jobs(uuid), + responder TEXT NOT NULL, + response TEXT NOT NULL, + source_machine_id UUID NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_review_jobs_source ON roborev.review_jobs(source_machine_id); +CREATE INDEX IF NOT EXISTS idx_review_jobs_updated ON roborev.review_jobs(updated_at); +-- Note: idx_review_jobs_branch, idx_review_jobs_job_type, and +-- idx_review_jobs_patch_id are created by migration code, not here +-- (to support upgrades from older versions where those columns +-- don't exist yet). +CREATE INDEX IF NOT EXISTS idx_reviews_job_uuid ON roborev.reviews(job_uuid); +CREATE INDEX IF NOT EXISTS idx_reviews_updated ON roborev.reviews(updated_at); +CREATE INDEX IF NOT EXISTS idx_responses_job_uuid ON roborev.responses(job_uuid); +CREATE INDEX IF NOT EXISTS idx_responses_id ON roborev.responses(id); +-- Partial unique indexes for auto-design dedup are created by EnsureSchema +-- (fresh-init block + v12 migration step) — placing them here would break +-- v1→v12 migrations where the source column doesn't yet exist when this +-- schema is replayed. + +CREATE TABLE IF NOT EXISTS roborev.sync_metadata ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL +); diff --git a/internal/storage/summary.go b/internal/storage/summary.go index 92d4f285..661bfd8c 100644 --- a/internal/storage/summary.go +++ b/internal/storage/summary.go @@ -585,8 +585,14 @@ func (db *DB) BackfillFindingCounts() (int, error) { } defer func() { _ = tx.Rollback() }() + // Clearing synced_at re-queues the row for sync so PostgreSQL receives + // the backfilled counts; without it, GetReviewsToSync would still consider + // the row "synced" and the remote (possibly pre-v11 PG) would keep + // returning zeros to other clients pulling the same review. stmt, err := tx.Prepare( - `UPDATE reviews SET high_count = ?, medium_count = ?, low_count = ? WHERE id = ?`, + `UPDATE reviews + SET high_count = ?, medium_count = ?, low_count = ?, synced_at = NULL + WHERE id = ?`, ) if err != nil { return 0, err diff --git a/internal/storage/sync.go b/internal/storage/sync.go index 7c3f298b..af863f50 100644 --- a/internal/storage/sync.go +++ b/internal/storage/sync.go @@ -454,6 +454,9 @@ type SyncableReview struct { UpdatedByMachineID string CreatedAt time.Time UpdatedAt time.Time + HighCount int + MediumCount int + LowCount int } // GetReviewsToSync returns reviews modified locally that need to be pushed. @@ -463,7 +466,8 @@ func (db *DB) GetReviewsToSync(machineID string, limit int) ([]SyncableReview, e SELECT r.id, r.uuid, r.job_id, j.uuid, r.agent, r.prompt, r.output, r.closed, - r.updated_by_machine_id, r.created_at, r.updated_at + r.updated_by_machine_id, r.created_at, r.updated_at, + r.high_count, r.medium_count, r.low_count FROM reviews r JOIN review_jobs j ON r.job_id = j.id WHERE r.updated_by_machine_id = ? @@ -494,6 +498,7 @@ func (db *DB) GetReviewsToSync(machineID string, limit int) ([]SyncableReview, e &r.ID, &r.UUID, &r.JobID, &r.JobUUID, &r.Agent, &r.Prompt, &r.Output, &r.Closed, &r.UpdatedByMachineID, &createdAt, &updatedAt, + &r.HighCount, &r.MediumCount, &r.LowCount, ) if err != nil { return nil, fmt.Errorf("scan review: %w", err) @@ -664,18 +669,30 @@ func (db *DB) UpsertPulledReview(r PulledReview) error { } now := time.Now().UTC().Format(time.RFC3339) + // Counts are derived from output text — the most reliable source of truth. + // Recomputing here means a legacy PostgreSQL row (with default 0/0/0 + // counts but real findings in its output) yields the right counts on + // pull, and a legitimate zero-finding review also yields zero. This + // avoids the legacy-vs-valid-zero ambiguity that storing the count + // alongside the output would create. + highCount, mediumCount, lowCount := CountFindings(r.Output) _, err = db.Exec(` INSERT INTO reviews ( uuid, job_id, agent, prompt, output, closed, - updated_by_machine_id, created_at, updated_at, synced_at - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + updated_by_machine_id, created_at, updated_at, synced_at, + high_count, medium_count, low_count + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(uuid) DO UPDATE SET closed = excluded.closed, updated_by_machine_id = excluded.updated_by_machine_id, updated_at = excluded.updated_at, - synced_at = ? + synced_at = ?, + high_count = excluded.high_count, + medium_count = excluded.medium_count, + low_count = excluded.low_count `, r.UUID, jobID, r.Agent, r.Prompt, r.Output, r.Closed, - r.UpdatedByMachineID, r.CreatedAt.Format(time.RFC3339), r.UpdatedAt.Format(time.RFC3339), now, now) + r.UpdatedByMachineID, r.CreatedAt.Format(time.RFC3339), r.UpdatedAt.Format(time.RFC3339), now, + highCount, mediumCount, lowCount, now) return err } diff --git a/internal/storage/syncworker.go b/internal/storage/syncworker.go index 9a2bacdd..3275b6ec 100644 --- a/internal/storage/syncworker.go +++ b/internal/storage/syncworker.go @@ -767,6 +767,9 @@ func (w *SyncWorker) pullChangesWithStats(ctx context.Context, pool *PgPool) (pu UpdatedByMachineID: r.UpdatedByMachineID, CreatedAt: r.CreatedAt, UpdatedAt: r.UpdatedAt, + HighCount: r.HighCount, + MediumCount: r.MediumCount, + LowCount: r.LowCount, } if err := w.db.UpsertPulledReview(pr); err != nil { // Don't advance cursor if any upsert fails - we'll retry next sync From fa61f61cefc064f628d758630735dd77538c0850 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:40:08 -0400 Subject: [PATCH 08/17] feat(tui): add severity styles and renderSeverityBadge helper --- cmd/roborev/tui/severity_badge.go | 33 ++++++++++++++++++ cmd/roborev/tui/severity_badge_test.go | 46 ++++++++++++++++++++++++++ cmd/roborev/tui/tui.go | 12 +++++++ 3 files changed, 91 insertions(+) create mode 100644 cmd/roborev/tui/severity_badge.go create mode 100644 cmd/roborev/tui/severity_badge_test.go diff --git a/cmd/roborev/tui/severity_badge.go b/cmd/roborev/tui/severity_badge.go new file mode 100644 index 00000000..8c38ea41 --- /dev/null +++ b/cmd/roborev/tui/severity_badge.go @@ -0,0 +1,33 @@ +package tui + +import ( + "fmt" + "strings" +) + +// renderSeverityBadge formats finding counts as "H3 M2 L5" with severity- +// colored letters/numbers. Zero counts render in dim grey so columns stay +// visually stable but de-emphasised. Plain-text width is always at least +// 8 characters (single digits) and grows by one per extra digit per slot. +func renderSeverityBadge(h, m, l int) string { + parts := []string{ + formatSeveritySlot("H", h, severityHighStyle), + formatSeveritySlot("M", m, severityMediumStyle), + formatSeveritySlot("L", l, severityLowStyle), + } + return strings.Join(parts, " ") +} + +// lipglossStyleRenderer is the minimal interface from lipgloss.Style that +// formatSeveritySlot consumes. lipgloss.Style satisfies it natively. +type lipglossStyleRenderer interface { + Render(strs ...string) string +} + +func formatSeveritySlot(letter string, count int, active lipglossStyleRenderer) string { + text := fmt.Sprintf("%s%d", letter, count) + if count == 0 { + return severityZeroStyle.Render(text) + } + return active.Render(text) +} diff --git a/cmd/roborev/tui/severity_badge_test.go b/cmd/roborev/tui/severity_badge_test.go new file mode 100644 index 00000000..c46cf6d5 --- /dev/null +++ b/cmd/roborev/tui/severity_badge_test.go @@ -0,0 +1,46 @@ +package tui + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRenderSeverityBadge(t *testing.T) { + tests := []struct { + name string + h, m, l int + wantContains []string + wantPlainTextLen int + }{ + { + name: "all zero", + wantContains: []string{"H0", "M0", "L0"}, + wantPlainTextLen: 8, // "H0 M0 L0" + }, + { + name: "single digits", + h: 3, m: 2, l: 5, + wantContains: []string{"H3", "M2", "L5"}, + wantPlainTextLen: 8, + }, + { + name: "double digits", + h: 12, m: 3, l: 8, + wantContains: []string{"H12", "M3", "L8"}, + wantPlainTextLen: 9, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out := renderSeverityBadge(tt.h, tt.m, tt.l) + plain := stripANSI(out) + assert.Equal(t, tt.wantPlainTextLen, len(plain), "plain text length: got %q", plain) + for _, want := range tt.wantContains { + assert.True(t, strings.Contains(plain, want), + "want %q in %q", want, plain) + } + }) + } +} diff --git a/cmd/roborev/tui/tui.go b/cmd/roborev/tui/tui.go index da400215..bbf0ac2a 100644 --- a/cmd/roborev/tui/tui.go +++ b/cmd/roborev/tui/tui.go @@ -74,6 +74,18 @@ var ( updateStyle = lipgloss.NewStyle(). Foreground(lipgloss.AdaptiveColor{Light: "136", Dark: "226"}).Bold(true) // Yellow/Gold + + // Severity styles for finding count badges (high / medium / low). + // The dim style renders zero counts so the badge keeps a stable width + // without drawing the eye. + severityHighStyle = lipgloss.NewStyle().Bold(true). + Foreground(lipgloss.AdaptiveColor{Light: "124", Dark: "196"}) // Red + severityMediumStyle = lipgloss.NewStyle(). + Foreground(lipgloss.AdaptiveColor{Light: "130", Dark: "226"}) // Yellow + severityLowStyle = lipgloss.NewStyle(). + Foreground(lipgloss.AdaptiveColor{Light: "27", Dark: "39"}) // Blue + severityZeroStyle = lipgloss.NewStyle(). + Foreground(lipgloss.AdaptiveColor{Light: "245", Dark: "240"}) // Dim grey ) // reflowHelpRows redistributes items across rows so that when rendered From 93c9a772c30ffe9b50392e701aba50ad19290f34 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:43:50 -0400 Subject: [PATCH 09/17] feat(tui): add Findings column to queue table --- cmd/roborev/tui/queue_test.go | 16 +++++++------ cmd/roborev/tui/render_queue.go | 41 ++++++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 10 deletions(-) diff --git a/cmd/roborev/tui/queue_test.go b/cmd/roborev/tui/queue_test.go index bcf5e29b..ac337e75 100644 --- a/cmd/roborev/tui/queue_test.go +++ b/cmd/roborev/tui/queue_test.go @@ -598,9 +598,11 @@ func TestTUIJobCellsContent(t *testing.T) { job.Closed = &handled cells := m.jobCells(job) + // cells: ref(0), branch(1), repo(2), agent(3), queued(4), + // elapsed(5), status(6), pf(7), findings(8), handled(9), session(10), ... assert.Equal(t, "Done", cells[6]) assert.Equal(t, "P", cells[7]) - assert.Equal(t, "yes", cells[8]) + assert.Equal(t, "yes", cells[9]) }) } @@ -2200,16 +2202,16 @@ func TestMigrateColumnConfig(t *testing.T) { wantColOrder: nil, }, { - name: "custom order preserved", + name: "custom order has findings inserted after pf", columnOrder: []string{"repo", "ref", "agent", "status", "pf", "queued", "elapsed", "branch", "closed"}, - wantDirty: false, - wantColOrder: []string{"repo", "ref", "agent", "status", "pf", "queued", "elapsed", "branch", "closed"}, + wantDirty: true, + wantColOrder: []string{"repo", "ref", "agent", "status", "pf", "findings", "queued", "elapsed", "branch", "closed"}, }, { - name: "current default order preserved", + name: "current default order has findings inserted after pf", columnOrder: []string{"ref", "branch", "repo", "agent", "queued", "elapsed", "status", "pf", "closed"}, - wantDirty: false, - wantColOrder: []string{"ref", "branch", "repo", "agent", "queued", "elapsed", "status", "pf", "closed"}, + wantDirty: true, + wantColOrder: []string{"ref", "branch", "repo", "agent", "queued", "elapsed", "status", "pf", "findings", "closed"}, }, { name: "stale hidden_columns backfills only new columns", diff --git a/cmd/roborev/tui/render_queue.go b/cmd/roborev/tui/render_queue.go index e3c5848f..6401bbd3 100644 --- a/cmd/roborev/tui/render_queue.go +++ b/cmd/roborev/tui/render_queue.go @@ -110,6 +110,7 @@ const ( colElapsed // Elapsed time colStatus // Job status colPF // Pass/Fail verdict + colFindings // Severity finding counts (H3 M2 L5) colHandled // Done status colSessionID // Session ID colRequestedModel // Explicitly requested model @@ -263,7 +264,7 @@ func (m model) renderQueueView() string { visCols := m.visibleColumns() // Compute per-column max content widths, using cache when data hasn't changed. - allHeaders := [colCount]string{"", "JobID", "Ref", "Branch", "Repo", "Agent", "Queued", "Elapsed", "Status", "P/F", "Closed", "Session", "Req Model", "Req Provider"} + allHeaders := [colCount]string{"", "JobID", "Ref", "Branch", "Repo", "Agent", "Queued", "Elapsed", "Status", "P/F", "Findings", "Closed", "Session", "Req Model", "Req Provider"} allFullRows := make([][]string, len(visibleJobList)) for i, job := range visibleJobList { cells := m.jobCells(job) @@ -317,6 +318,7 @@ func (m model) renderQueueView() string { colQueued: 12, colElapsed: 8, colPF: 3, // "P/F" header = 3 + colFindings: 8, // "Findings" header = 8, badge "H3 M2 L5" = 8 colHandled: max(contentWidth[colHandled], 6), // "Closed" header = 6 colAgent: min(max(contentWidth[colAgent], 5), 12), // "Agent" header = 5, cap at 12 colSessionID: min(max(contentWidth[colSessionID], 7), 12), // "Session" header = 7, cap at 12 @@ -662,7 +664,12 @@ func (m model) jobCells(job storage.ReviewJob) []string { requestedModel := stripControlChars(job.RequestedModel) requestedProvider := stripControlChars(job.RequestedProvider) - return []string{ref, branch, repo, agentName, enqueued, elapsed, status, verdict, handled, sessionID, requestedModel, requestedProvider} + findings := "" + if job.HighFindings != nil { + findings = renderSeverityBadge(*job.HighFindings, *job.MediumFindings, *job.LowFindings) + } + + return []string{ref, branch, repo, agentName, enqueued, elapsed, status, verdict, findings, handled, sessionID, requestedModel, requestedProvider} } // statusLabel returns a capitalized display label for the job status. @@ -830,12 +837,38 @@ func migrateColumnConfig(cfg *config.Config) bool { dirty = true } + // Backfill "findings" into custom queue and task column orders so users + // with pre-existing customisation see the new column next to its sibling + // (after "pf" for queue, after "parent" for tasks). Missing entries would + // otherwise be appended at the end by resolveColumnOrder. + if len(cfg.ColumnOrder) > 0 && !slices.Contains(cfg.ColumnOrder, "findings") { + insertAt := len(cfg.ColumnOrder) + for i, name := range cfg.ColumnOrder { + if name == "pf" { + insertAt = i + 1 + break + } + } + cfg.ColumnOrder = slices.Insert(cfg.ColumnOrder, insertAt, "findings") + dirty = true + } + if len(cfg.TaskColumnOrder) > 0 && !slices.Contains(cfg.TaskColumnOrder, "findings") { + insertAt := len(cfg.TaskColumnOrder) + for i, name := range cfg.TaskColumnOrder { + if name == "parent" { + insertAt = i + 1 + break + } + } + cfg.TaskColumnOrder = slices.Insert(cfg.TaskColumnOrder, insertAt, "findings") + dirty = true + } return dirty } // toggleableColumns is the ordered list of columns the user can show/hide. // colSel and colJobID are always visible and not included here. -var toggleableColumns = []int{colRef, colBranch, colRepo, colAgent, colQueued, colElapsed, colStatus, colPF, colHandled, colSessionID, colRequestedModel, colRequestedProvider} +var toggleableColumns = []int{colRef, colBranch, colRepo, colAgent, colQueued, colElapsed, colStatus, colPF, colFindings, colHandled, colSessionID, colRequestedModel, colRequestedProvider} // columnNames maps column constants to display names. var columnNames = map[int]string{ @@ -847,6 +880,7 @@ var columnNames = map[int]string{ colQueued: "Queued", colElapsed: "Elapsed", colPF: "P/F", + colFindings: "Findings", colHandled: "Closed", colSessionID: "Session", colRequestedModel: "Req Model", @@ -863,6 +897,7 @@ var columnConfigNames = map[int]string{ colQueued: "queued", colElapsed: "elapsed", colPF: "pf", + colFindings: "findings", colHandled: "closed", colSessionID: "session_id", colRequestedModel: "requested_model", From ce1aa8dfc0149d7003d798bb4153f9ea54a43fda Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:44:56 -0400 Subject: [PATCH 10/17] feat(tui): show finding counts in review detail header --- cmd/roborev/tui/render_review.go | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/cmd/roborev/tui/render_review.go b/cmd/roborev/tui/render_review.go index a568ba25..e0f42537 100644 --- a/cmd/roborev/tui/render_review.go +++ b/cmd/roborev/tui/render_review.go @@ -57,13 +57,15 @@ func (m model) renderReviewView() string { b.WriteString(statusStyle.Render(locationLine)) b.WriteString("\x1b[K") // Clear to end of line - // Show verdict, closed status, and token usage on next line (skip verdict for fix jobs) + // Show verdict, findings counts, closed status, and token usage on next line (skip verdict for fix jobs) hasVerdict := review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob() + hasFindings := review.Job.HighFindings != nil && + (*review.Job.HighFindings+*review.Job.MediumFindings+*review.Job.LowFindings > 0) tokenSummary := "" if tu := tokens.ParseJSON(review.Job.TokenUsage); tu != nil { tokenSummary = tu.FormatSummary() } - if hasVerdict || review.Closed || tokenSummary != "" { + if hasVerdict || hasFindings || review.Closed || tokenSummary != "" { b.WriteString("\n") if hasVerdict { v := *review.Job.Verdict @@ -73,15 +75,26 @@ func (m model) renderReviewView() string { b.WriteString(failStyle.Render("Verdict: Fail")) } } - // Show [CLOSED] with distinct color (after verdict if present) - if review.Closed { + if hasFindings { if hasVerdict { + b.WriteString(" ") + } + b.WriteString(statusStyle.Render("Findings: ")) + b.WriteString(renderSeverityBadge( + *review.Job.HighFindings, + *review.Job.MediumFindings, + *review.Job.LowFindings, + )) + } + // Show [CLOSED] with distinct color (after verdict/findings if present) + if review.Closed { + if hasVerdict || hasFindings { b.WriteString(" ") } b.WriteString(closedStyle.Render("[CLOSED]")) } if tokenSummary != "" { - if hasVerdict || review.Closed { + if hasVerdict || hasFindings || review.Closed { b.WriteString(" ") } b.WriteString(statusStyle.Render("[" + tokenSummary + "]")) @@ -151,9 +164,11 @@ func (m model) renderReviewView() string { // Reserve title, location, footer status, help, and optional verdict. headerHeight := titleLines + locationLines + 1 + helpLines hasVerdict := review.Job != nil && review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob() + hasFindings := review.Job != nil && review.Job.HighFindings != nil && + (*review.Job.HighFindings+*review.Job.MediumFindings+*review.Job.LowFindings > 0) hasTokens := review.Job != nil && tokens.ParseJSON(review.Job.TokenUsage) != nil - if hasVerdict || review.Closed || hasTokens { - headerHeight++ // Add 1 for verdict/closed/tokens line + if hasVerdict || hasFindings || review.Closed || hasTokens { + headerHeight++ // Add 1 for verdict/findings/closed/tokens line } panelReserve := 0 if m.reviewFixPanelOpen { From 61340a93a15f178932fcca42c29ee7747c4a981b Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:45:34 -0400 Subject: [PATCH 11/17] feat(tui): show aggregate finding counts in queue status header --- cmd/roborev/tui/render_queue.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/roborev/tui/render_queue.go b/cmd/roborev/tui/render_queue.go index 6401bbd3..1b566cdd 100644 --- a/cmd/roborev/tui/render_queue.go +++ b/cmd/roborev/tui/render_queue.go @@ -154,6 +154,7 @@ func (m model) renderQueueView() string { // fall back to client-side counting for multi-repo filters (which load all jobs) var statusLine string var done, closed, open int + var aggH, aggM, aggL int if len(m.activeRepoFilter) > 1 || m.activeBranchFilter == branchNone { // Client-side filtered views load all jobs, so count locally for _, job := range m.jobs { @@ -173,11 +174,19 @@ func (m model) renderQueueView() string { } } } + if job.HighFindings != nil { + aggH += *job.HighFindings + aggM += *job.MediumFindings + aggL += *job.LowFindings + } } } else { done = m.jobStats.Done closed = m.jobStats.Closed open = m.jobStats.Open + aggH = m.jobStats.HighFindings + aggM = m.jobStats.MediumFindings + aggL = m.jobStats.LowFindings } b.WriteString(m.renderDaemonStatus()) if len(m.activeRepoFilter) > 0 || m.activeBranchFilter != "" { @@ -189,6 +198,11 @@ func (m model) renderQueueView() string { done, closed, open) } b.WriteString(statusStyle.Render(statusLine)) + if aggH+aggM+aggL > 0 { + b.WriteString(statusStyle.Render(" |")) + b.WriteString(" Findings: ") + b.WriteString(renderSeverityBadge(aggH, aggM, aggL)) + } b.WriteString("\x1b[K\n") // Clear status line // Update notification on line 3 (above the table) From 0118d570db9d08cde4edbbde0424fb3248acae27 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 06:47:05 -0400 Subject: [PATCH 12/17] feat(tui): show parent review finding counts in tasks view --- cmd/roborev/tui/render_tasks.go | 36 +++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/cmd/roborev/tui/render_tasks.go b/cmd/roborev/tui/render_tasks.go index 69edd057..f7739a96 100644 --- a/cmd/roborev/tui/render_tasks.go +++ b/cmd/roborev/tui/render_tasks.go @@ -18,6 +18,7 @@ const ( tcolStatus // Job status tcolJobID // Job ID tcolParent // Parent job reference + tcolFindings // Parent review's finding counts (H3 M2 L5) tcolQueued // Enqueue timestamp tcolElapsed // Elapsed time tcolBranch // Branch name @@ -28,13 +29,14 @@ const ( // taskToggleableColumns is the ordered list of task columns the user can show/hide/reorder. // tcolSel is always visible and not included here. -var taskToggleableColumns = []int{tcolStatus, tcolJobID, tcolParent, tcolQueued, tcolElapsed, tcolBranch, tcolRepo, tcolRefSubject} +var taskToggleableColumns = []int{tcolStatus, tcolJobID, tcolParent, tcolFindings, tcolQueued, tcolElapsed, tcolBranch, tcolRepo, tcolRefSubject} // taskColumnNames maps task column constants to display names. var taskColumnNames = map[int]string{ tcolStatus: "Status", tcolJobID: "Job", tcolParent: "Parent", + tcolFindings: "Findings", tcolQueued: "Queued", tcolElapsed: "Elapsed", tcolBranch: "Branch", @@ -47,6 +49,7 @@ var taskColumnConfigNames = map[int]string{ tcolStatus: "status", tcolJobID: "job", tcolParent: "parent", + tcolFindings: "findings", tcolQueued: "queued", tcolElapsed: "elapsed", tcolBranch: "branch", @@ -78,8 +81,9 @@ func (m model) visibleTaskColumns() []int { } // taskCells returns plain text cell values for a fix job row. -// Order: status, jobID, parent, queued, elapsed, branch, repo, refSubject -// (tcolStatus through tcolRefSubject, 8 values). +// Order: status, jobID, parent, findings, queued, elapsed, branch, repo, refSubject +// (tcolStatus through tcolRefSubject, 9 values). The findings column shows the +// parent review's counts since fix jobs themselves don't produce findings. func (m model) taskCells(job storage.ReviewJob) []string { var statusLabel string switch job.Status { @@ -139,7 +143,16 @@ func (m model) taskCells(job storage.ReviewJob) []string { refSubject += job.CommitSubject } - return []string{statusLabel, jobID, parentRef, queued, elapsed, branch, repo, refSubject} + findings := "" + if job.ParentHighFindings != nil { + findings = renderSeverityBadge( + *job.ParentHighFindings, + *job.ParentMediumFindings, + *job.ParentLowFindings, + ) + } + + return []string{statusLabel, jobID, parentRef, findings, queued, elapsed, branch, repo, refSubject} } func (m model) renderTasksView() string { @@ -178,7 +191,7 @@ func (m model) renderTasksView() string { visCols := m.visibleTaskColumns() // Compute per-column max content widths, using cache when data hasn't changed. - allHeaders := [tcolCount]string{tcolSel: "", tcolStatus: "Status", tcolJobID: "Job", tcolParent: "Parent", tcolQueued: "Queued", tcolElapsed: "Elapsed", tcolBranch: "Branch", tcolRepo: "Repo", tcolRefSubject: "Ref/Subject"} + allHeaders := [tcolCount]string{tcolSel: "", tcolStatus: "Status", tcolJobID: "Job", tcolParent: "Parent", tcolFindings: "Findings", tcolQueued: "Queued", tcolElapsed: "Elapsed", tcolBranch: "Branch", tcolRepo: "Repo", tcolRefSubject: "Ref/Subject"} allFullRows := make([][]string, len(m.fixJobs)) for i, job := range m.fixJobs { cells := m.taskCells(job) @@ -222,12 +235,13 @@ func (m model) renderTasksView() string { } fixedWidth := map[int]int{ - tcolSel: 2, - tcolStatus: 8, - tcolJobID: 5, - tcolParent: 11, - tcolQueued: 12, - tcolElapsed: 8, + tcolSel: 2, + tcolStatus: 8, + tcolJobID: 5, + tcolParent: 11, + tcolFindings: 8, + tcolQueued: 12, + tcolElapsed: 8, } flexCols := []int{tcolBranch, tcolRepo, tcolRefSubject} From 0910038a511408a33b5de0a4347ac6a72ecc1fb2 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 07:05:56 -0400 Subject: [PATCH 13/17] fix(tui): guard partial nil-pointer population in finding badges --- cmd/roborev/tui/render_queue.go | 16 +++++++++------- cmd/roborev/tui/render_review.go | 16 +++++++--------- cmd/roborev/tui/render_tasks.go | 8 ++++---- cmd/roborev/tui/severity_badge.go | 10 ++++++++++ 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/cmd/roborev/tui/render_queue.go b/cmd/roborev/tui/render_queue.go index 1b566cdd..47481cd6 100644 --- a/cmd/roborev/tui/render_queue.go +++ b/cmd/roborev/tui/render_queue.go @@ -174,11 +174,9 @@ func (m model) renderQueueView() string { } } } - if job.HighFindings != nil { - aggH += *job.HighFindings - aggM += *job.MediumFindings - aggL += *job.LowFindings - } + aggH += derefOrZero(job.HighFindings) + aggM += derefOrZero(job.MediumFindings) + aggL += derefOrZero(job.LowFindings) } } else { done = m.jobStats.Done @@ -679,8 +677,12 @@ func (m model) jobCells(job storage.ReviewJob) []string { requestedProvider := stripControlChars(job.RequestedProvider) findings := "" - if job.HighFindings != nil { - findings = renderSeverityBadge(*job.HighFindings, *job.MediumFindings, *job.LowFindings) + if job.HighFindings != nil || job.MediumFindings != nil || job.LowFindings != nil { + findings = renderSeverityBadge( + derefOrZero(job.HighFindings), + derefOrZero(job.MediumFindings), + derefOrZero(job.LowFindings), + ) } return []string{ref, branch, repo, agentName, enqueued, elapsed, status, verdict, findings, handled, sessionID, requestedModel, requestedProvider} diff --git a/cmd/roborev/tui/render_review.go b/cmd/roborev/tui/render_review.go index e0f42537..12f1de2d 100644 --- a/cmd/roborev/tui/render_review.go +++ b/cmd/roborev/tui/render_review.go @@ -59,8 +59,10 @@ func (m model) renderReviewView() string { // Show verdict, findings counts, closed status, and token usage on next line (skip verdict for fix jobs) hasVerdict := review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob() - hasFindings := review.Job.HighFindings != nil && - (*review.Job.HighFindings+*review.Job.MediumFindings+*review.Job.LowFindings > 0) + findHigh := derefOrZero(review.Job.HighFindings) + findMed := derefOrZero(review.Job.MediumFindings) + findLow := derefOrZero(review.Job.LowFindings) + hasFindings := findHigh+findMed+findLow > 0 tokenSummary := "" if tu := tokens.ParseJSON(review.Job.TokenUsage); tu != nil { tokenSummary = tu.FormatSummary() @@ -80,11 +82,7 @@ func (m model) renderReviewView() string { b.WriteString(" ") } b.WriteString(statusStyle.Render("Findings: ")) - b.WriteString(renderSeverityBadge( - *review.Job.HighFindings, - *review.Job.MediumFindings, - *review.Job.LowFindings, - )) + b.WriteString(renderSeverityBadge(findHigh, findMed, findLow)) } // Show [CLOSED] with distinct color (after verdict/findings if present) if review.Closed { @@ -164,8 +162,8 @@ func (m model) renderReviewView() string { // Reserve title, location, footer status, help, and optional verdict. headerHeight := titleLines + locationLines + 1 + helpLines hasVerdict := review.Job != nil && review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob() - hasFindings := review.Job != nil && review.Job.HighFindings != nil && - (*review.Job.HighFindings+*review.Job.MediumFindings+*review.Job.LowFindings > 0) + hasFindings := review.Job != nil && + (derefOrZero(review.Job.HighFindings)+derefOrZero(review.Job.MediumFindings)+derefOrZero(review.Job.LowFindings) > 0) hasTokens := review.Job != nil && tokens.ParseJSON(review.Job.TokenUsage) != nil if hasVerdict || hasFindings || review.Closed || hasTokens { headerHeight++ // Add 1 for verdict/findings/closed/tokens line diff --git a/cmd/roborev/tui/render_tasks.go b/cmd/roborev/tui/render_tasks.go index f7739a96..abfaaa90 100644 --- a/cmd/roborev/tui/render_tasks.go +++ b/cmd/roborev/tui/render_tasks.go @@ -144,11 +144,11 @@ func (m model) taskCells(job storage.ReviewJob) []string { } findings := "" - if job.ParentHighFindings != nil { + if job.ParentHighFindings != nil || job.ParentMediumFindings != nil || job.ParentLowFindings != nil { findings = renderSeverityBadge( - *job.ParentHighFindings, - *job.ParentMediumFindings, - *job.ParentLowFindings, + derefOrZero(job.ParentHighFindings), + derefOrZero(job.ParentMediumFindings), + derefOrZero(job.ParentLowFindings), ) } diff --git a/cmd/roborev/tui/severity_badge.go b/cmd/roborev/tui/severity_badge.go index 8c38ea41..f14c9048 100644 --- a/cmd/roborev/tui/severity_badge.go +++ b/cmd/roborev/tui/severity_badge.go @@ -5,6 +5,16 @@ import ( "strings" ) +// derefOrZero returns the dereferenced int, or 0 if the pointer is nil. +// Used to safely combine the three finding-count pointers that JSON +// deserialization or ad-hoc producers may populate inconsistently. +func derefOrZero(p *int) int { + if p == nil { + return 0 + } + return *p +} + // renderSeverityBadge formats finding counts as "H3 M2 L5" with severity- // colored letters/numbers. Zero counts render in dim grey so columns stay // visually stable but de-emphasised. Plain-text width is always at least From 2dc0044f28b34496f7b28fbd24470abff52c8c76 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 08:56:30 -0400 Subject: [PATCH 14/17] fix: address roborev-ci review findings and CI failures - HIGH from roborev-ci: make finding count columns nullable in both SQLite and Postgres v13 schemas. NULL means "not yet parsed", 0 means "parsed and zero findings". The backfill now filters on IS NULL and writes {h, m, l} unconditionally, so clean parsed reviews stop matching the predicate after one pass instead of being re-scanned on every daemon startup. SELECT sites that read into int wrap the columns in COALESCE(..., 0) so legacy NULLs don't fail Scan. - Lint (testifylint): use assert.Len and assert.Contains in severity_badge_test.go. - Windows test failure in TestCountJobStats_FindingAggregation: pass rA.RootPath instead of "/tmp/repo-a" so the filter matches the absolute path that GetOrCreateRepo stored after filepath.Abs + ToSlash on Windows. --- cmd/roborev/tui/severity_badge_test.go | 5 ++--- internal/storage/db.go | 20 ++++++++++--------- internal/storage/db_filter_test.go | 2 +- internal/storage/postgres.go | 8 ++++++-- internal/storage/reviews.go | 12 ++++++------ internal/storage/schemas/postgres_v13.sql | 6 +++--- internal/storage/summary.go | 14 +++++-------- internal/storage/summary_test.go | 24 +++++++++++++---------- internal/storage/sync.go | 2 +- 9 files changed, 49 insertions(+), 44 deletions(-) diff --git a/cmd/roborev/tui/severity_badge_test.go b/cmd/roborev/tui/severity_badge_test.go index c46cf6d5..1442807c 100644 --- a/cmd/roborev/tui/severity_badge_test.go +++ b/cmd/roborev/tui/severity_badge_test.go @@ -1,7 +1,6 @@ package tui import ( - "strings" "testing" "github.com/stretchr/testify/assert" @@ -36,9 +35,9 @@ func TestRenderSeverityBadge(t *testing.T) { t.Run(tt.name, func(t *testing.T) { out := renderSeverityBadge(tt.h, tt.m, tt.l) plain := stripANSI(out) - assert.Equal(t, tt.wantPlainTextLen, len(plain), "plain text length: got %q", plain) + assert.Len(t, plain, tt.wantPlainTextLen, "plain text length: got %q", plain) for _, want := range tt.wantContains { - assert.True(t, strings.Contains(plain, want), + assert.Contains(t, plain, want, "want %q in %q", want, plain) } }) diff --git a/internal/storage/db.go b/internal/storage/db.go index ed90bf21..a7f8c731 100644 --- a/internal/storage/db.go +++ b/internal/storage/db.go @@ -69,9 +69,9 @@ CREATE TABLE IF NOT EXISTS reviews ( output TEXT NOT NULL, created_at TEXT NOT NULL DEFAULT (datetime('now')), closed INTEGER NOT NULL DEFAULT 0, - high_count INTEGER NOT NULL DEFAULT 0, - medium_count INTEGER NOT NULL DEFAULT 0, - low_count INTEGER NOT NULL DEFAULT 0 + high_count INTEGER, + medium_count INTEGER, + low_count INTEGER ); CREATE TABLE IF NOT EXISTS responses ( @@ -832,8 +832,10 @@ func (db *DB) migrate() error { return fmt.Errorf("ensure ci_pr_batch_jobs unique index: %w", err) } - // Migration: add high_count, medium_count, low_count columns to reviews if missing. - // Backfill is performed by BackfillFindingCounts (called below). + // Migration: add nullable high_count, medium_count, low_count columns to + // reviews if missing. NULL means "not yet parsed"; 0 means "parsed and + // no findings". The backfill below converts every NULL to a concrete + // value, after which it has nothing to do on subsequent startups. for _, col := range []string{"high_count", "medium_count", "low_count"} { err = db.QueryRow( `SELECT COUNT(*) FROM pragma_table_info('reviews') WHERE name = ?`, col, @@ -843,16 +845,16 @@ func (db *DB) migrate() error { } if count == 0 { _, err = db.Exec( - fmt.Sprintf(`ALTER TABLE reviews ADD COLUMN %s INTEGER NOT NULL DEFAULT 0`, col), + fmt.Sprintf(`ALTER TABLE reviews ADD COLUMN %s INTEGER`, col), ) if err != nil { return fmt.Errorf("add %s column: %w", col, err) } } } - // Backfill counts for any rows where all three columns are still zero - // AND the output is non-empty. Idempotent: re-running on a fully populated - // DB processes nothing. + // Backfill counts for rows whose columns are NULL (not yet parsed). + // Idempotent: once every row has been processed, the predicate matches + // nothing and the call is a no-op. if _, err := db.BackfillFindingCounts(); err != nil { return fmt.Errorf("backfill finding counts: %w", err) } diff --git a/internal/storage/db_filter_test.go b/internal/storage/db_filter_test.go index 85ac2a54..7eab8007 100644 --- a/internal/storage/db_filter_test.go +++ b/internal/storage/db_filter_test.go @@ -1043,7 +1043,7 @@ func TestCountJobStats_FindingAggregation(t *testing.T) { insertReview(rB.ID, "b1", 7, 8, 9) // should NOT count when filtering rA t.Run("aggregates only the filtered repo", func(t *testing.T) { - stats, err := db.CountJobStats("/tmp/repo-a") + stats, err := db.CountJobStats(rA.RootPath) require.NoError(t, err) assert.Equal(t, 3, stats.Done) assert.Equal(t, 3, stats.HighFindings) // 2+1+0 diff --git a/internal/storage/postgres.go b/internal/storage/postgres.go index 81f756b2..08488d43 100644 --- a/internal/storage/postgres.go +++ b/internal/storage/postgres.go @@ -338,8 +338,12 @@ func (p *PgPool) EnsureSchema(ctx context.Context) error { } } if currentVersion < 13 { + // Nullable so the SQLite side can distinguish "not yet + // parsed" (NULL) from "parsed and zero findings" (0) + // during backfill — see BackfillFindingCounts. New rows + // are written with explicit ints, never NULL. for _, col := range []string{"high_count", "medium_count", "low_count"} { - stmt := fmt.Sprintf(`ALTER TABLE roborev.reviews ADD COLUMN IF NOT EXISTS %s INTEGER NOT NULL DEFAULT 0`, col) + stmt := fmt.Sprintf(`ALTER TABLE roborev.reviews ADD COLUMN IF NOT EXISTS %s INTEGER`, col) if _, err = p.pool.Exec(ctx, stmt); err != nil { return fmt.Errorf("migrate to v13 (add %s column to reviews): %w", col, err) } @@ -846,7 +850,7 @@ func (p *PgPool) PullReviews(ctx context.Context, excludeMachineID string, known SELECT r.uuid, r.job_uuid, r.agent, r.prompt, r.output, r.closed, r.updated_by_machine_id, r.created_at, r.updated_at, r.id, - r.high_count, r.medium_count, r.low_count + COALESCE(r.high_count, 0), COALESCE(r.medium_count, 0), COALESCE(r.low_count, 0) FROM reviews r WHERE (r.updated_by_machine_id IS NULL OR r.updated_by_machine_id != $1) AND r.job_uuid = ANY($2) diff --git a/internal/storage/reviews.go b/internal/storage/reviews.go index b81db4d7..333ca6d0 100644 --- a/internal/storage/reviews.go +++ b/internal/storage/reviews.go @@ -18,7 +18,7 @@ func (db *DB) GetReviewByJobID(jobID int64) (*Review, error) { var jobFields reviewJobScanFields err := db.QueryRow(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.uuid, rv.verdict_bool, - rv.high_count, rv.medium_count, rv.low_count, + COALESCE(rv.high_count, 0), COALESCE(rv.medium_count, 0), COALESCE(rv.low_count, 0), j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.session_id, j.agent, j.reasoning, j.status, j.enqueued_at, j.started_at, j.finished_at, j.worker_id, j.error, j.model, j.provider, j.requested_model, j.requested_provider, j.job_type, j.review_type, j.patch_id, rp.root_path, rp.name, c.subject, j.token_usage, COALESCE(j.min_severity, '') @@ -58,7 +58,7 @@ func (db *DB) GetReviewByCommitSHA(sha string) (*Review, error) { var jobFields reviewJobScanFields err := db.QueryRow(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.uuid, rv.verdict_bool, - rv.high_count, rv.medium_count, rv.low_count, + COALESCE(rv.high_count, 0), COALESCE(rv.medium_count, 0), COALESCE(rv.low_count, 0), j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.session_id, j.agent, j.reasoning, j.status, j.enqueued_at, j.started_at, j.finished_at, j.worker_id, j.error, j.model, j.provider, j.requested_model, j.requested_provider, j.job_type, j.review_type, j.patch_id, rp.root_path, rp.name, c.subject, j.token_usage, COALESCE(j.min_severity, '') @@ -96,7 +96,7 @@ func (db *DB) GetReviewByCommitSHA(sha string) (*Review, error) { func (db *DB) GetAllReviewsForGitRef(gitRef string) ([]Review, error) { rows, err := db.Query(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, - rv.high_count, rv.medium_count, rv.low_count + COALESCE(rv.high_count, 0), COALESCE(rv.medium_count, 0), COALESCE(rv.low_count, 0) FROM reviews rv JOIN review_jobs j ON j.id = rv.job_id WHERE j.git_ref = ? @@ -126,7 +126,7 @@ func (db *DB) GetAllReviewsForGitRef(gitRef string) ([]Review, error) { func (db *DB) GetRecentReviewsForRepo(repoID int64, limit int) ([]Review, error) { rows, err := db.Query(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, - rv.high_count, rv.medium_count, rv.low_count + COALESCE(rv.high_count, 0), COALESCE(rv.medium_count, 0), COALESCE(rv.low_count, 0) FROM reviews rv JOIN review_jobs j ON j.id = rv.job_id WHERE j.repo_id = ? @@ -361,7 +361,7 @@ func (db *DB) GetJobsWithReviewsByIDs(jobIDs []int64) (map[int64]JobWithReview, // Fetch reviews for these jobs reviewQuery := fmt.Sprintf(` SELECT rv.id, rv.job_id, rv.agent, rv.prompt, rv.output, rv.created_at, rv.closed, rv.verdict_bool, - rv.high_count, rv.medium_count, rv.low_count + COALESCE(rv.high_count, 0), COALESCE(rv.medium_count, 0), COALESCE(rv.low_count, 0) FROM reviews rv WHERE rv.job_id IN (%s) `, inClause) @@ -407,7 +407,7 @@ func (db *DB) GetReviewByID(reviewID int64) (*Review, error) { err := db.QueryRow(` SELECT id, job_id, agent, prompt, output, created_at, closed, - high_count, medium_count, low_count + COALESCE(high_count, 0), COALESCE(medium_count, 0), COALESCE(low_count, 0) FROM reviews WHERE id = ? `, reviewID).Scan(&r.ID, &r.JobID, &r.Agent, &r.Prompt, &r.Output, &fields.CreatedAt, &fields.Closed, &fields.HighCount, &fields.MediumCount, &fields.LowCount) diff --git a/internal/storage/schemas/postgres_v13.sql b/internal/storage/schemas/postgres_v13.sql index 67be6c44..3a78167e 100644 --- a/internal/storage/schemas/postgres_v13.sql +++ b/internal/storage/schemas/postgres_v13.sql @@ -80,9 +80,9 @@ CREATE TABLE IF NOT EXISTS roborev.reviews ( updated_by_machine_id UUID NOT NULL, created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), - high_count INTEGER NOT NULL DEFAULT 0, - medium_count INTEGER NOT NULL DEFAULT 0, - low_count INTEGER NOT NULL DEFAULT 0 + high_count INTEGER, + medium_count INTEGER, + low_count INTEGER ); CREATE TABLE IF NOT EXISTS roborev.responses ( diff --git a/internal/storage/summary.go b/internal/storage/summary.go index 661bfd8c..d6aeae0a 100644 --- a/internal/storage/summary.go +++ b/internal/storage/summary.go @@ -538,14 +538,16 @@ func (db *DB) BackfillVerdictBool() (int, error) { } // BackfillFindingCounts populates high_count, medium_count, and low_count for -// reviews where all three are zero AND the output is non-empty (i.e. any review -// that was inserted before the columns existed and may have findings to count). +// reviews where any column is still NULL (i.e. rows inserted before the columns +// existed). Every matching row is written unconditionally so that NULL becomes +// a concrete value — including 0 for parsed-clean reviews — preventing the +// predicate from matching the same rows on subsequent startups. // Returns the number of rows updated. func (db *DB) BackfillFindingCounts() (int, error) { rows, err := db.Query(` SELECT id, output FROM reviews WHERE output != '' - AND high_count = 0 AND medium_count = 0 AND low_count = 0 + AND (high_count IS NULL OR medium_count IS NULL OR low_count IS NULL) `) if err != nil { return 0, err @@ -564,12 +566,6 @@ func (db *DB) BackfillFindingCounts() (int, error) { return 0, err } h, m, l := CountFindings(output) - // Only enqueue updates for rows that actually have findings; - // rows with all-zero counts don't need a write and would otherwise - // be re-processed on every startup (the SELECT predicate matches them). - if h+m+l == 0 { - continue - } updates = append(updates, pending{id: id, h: h, m: m, l: l}) } if err := rows.Err(); err != nil { diff --git a/internal/storage/summary_test.go b/internal/storage/summary_test.go index 03c8361e..3470421a 100644 --- a/internal/storage/summary_test.go +++ b/internal/storage/summary_test.go @@ -465,18 +465,20 @@ func TestBackfillFindingCounts(t *testing.T) { - Medium: missing error check - Low - typo in comment` - // Insert review row with zero counts (simulates legacy data) + // Insert review row with NULL counts (simulates a row from before the + // columns existed — what the migration ALTER TABLE ADD COLUMN leaves + // behind for pre-existing rows). _, err = db.Exec( `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) - VALUES (?, ?, ?, ?, 0, 0, 0)`, + VALUES (?, ?, ?, ?, NULL, NULL, NULL)`, job.ID, "test", "p", output, ) require.NoError(t, err) - // Insert a "clean" review (non-empty output but no severity markers). - // This row will match the SELECT predicate but CountFindings returns - // (0,0,0), so it must be skipped — otherwise we'd re-process it on - // every daemon startup forever. + // Insert a "clean" pre-existing review (non-empty output, no findings). + // CountFindings returns (0,0,0), and we still write that back so the + // columns become non-NULL — preventing this row from re-matching the + // IS NULL predicate on every daemon startup. cleanCommit, _ := db.GetOrCreateCommit(repo.ID, "def456", "bob", "tidy", time.Now()) cleanJob, err := db.EnqueueJob(EnqueueOpts{ RepoID: repo.ID, CommitID: cleanCommit.ID, GitRef: "def456", Agent: "test", @@ -484,14 +486,14 @@ func TestBackfillFindingCounts(t *testing.T) { require.NoError(t, err) _, err = db.Exec( `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) - VALUES (?, ?, ?, ?, 0, 0, 0)`, + VALUES (?, ?, ?, ?, NULL, NULL, NULL)`, cleanJob.ID, "test", "p", "LGTM, no issues found.", ) require.NoError(t, err) updated, err := db.BackfillFindingCounts() require.NoError(t, err) - assert.Equal(t, 1, updated, "should backfill only the row with findings") + assert.Equal(t, 2, updated, "should backfill both NULL rows (one with findings, one clean)") var h, m, l int require.NoError(t, db.QueryRow( @@ -501,7 +503,8 @@ func TestBackfillFindingCounts(t *testing.T) { assert.Equal(t, 1, m) assert.Equal(t, 1, l) - // Clean review keeps (0,0,0) — was skipped, not corrupted. + // Clean review now has explicit (0,0,0) instead of NULL — non-NULL means + // "parsed and confirmed empty", and the predicate no longer matches it. require.NoError(t, db.QueryRow( `SELECT high_count, medium_count, low_count FROM reviews WHERE job_id = ?`, cleanJob.ID, ).Scan(&h, &m, &l)) @@ -509,7 +512,8 @@ func TestBackfillFindingCounts(t *testing.T) { assert.Equal(t, 0, m) assert.Equal(t, 0, l) - // Idempotent: a second run touches no rows (clean row stays skipped). + // Idempotent: a second run touches no rows because every row's columns + // are now non-NULL. updated, err = db.BackfillFindingCounts() require.NoError(t, err) assert.Equal(t, 0, updated, "second run should be no-op") diff --git a/internal/storage/sync.go b/internal/storage/sync.go index af863f50..628294d0 100644 --- a/internal/storage/sync.go +++ b/internal/storage/sync.go @@ -467,7 +467,7 @@ func (db *DB) GetReviewsToSync(machineID string, limit int) ([]SyncableReview, e r.id, r.uuid, r.job_id, j.uuid, r.agent, r.prompt, r.output, r.closed, r.updated_by_machine_id, r.created_at, r.updated_at, - r.high_count, r.medium_count, r.low_count + COALESCE(r.high_count, 0), COALESCE(r.medium_count, 0), COALESCE(r.low_count, 0) FROM reviews r JOIN review_jobs j ON r.job_id = j.id WHERE r.updated_by_machine_id = ? From 98512c9da379b07ebe29f0b4fedd30370f8b1ce3 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 12:59:30 -0400 Subject: [PATCH 15/17] test(tui): cover derefOrZero nil and findings task-column backfill Adds targeted unit tests to lift coverage on PR #671: - derefOrZero(nil) returns 0 (the previously uncovered branch). - migrateColumnConfig backfills 'findings' into a custom TaskColumnOrder (after 'parent' or appended), and leaves an already-migrated TaskColumnOrder alone. --- cmd/roborev/tui/queue_test.go | 38 ++++++++++++++++++++------ cmd/roborev/tui/severity_badge_test.go | 8 ++++++ 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/cmd/roborev/tui/queue_test.go b/cmd/roborev/tui/queue_test.go index ac337e75..07a34aae 100644 --- a/cmd/roborev/tui/queue_test.go +++ b/cmd/roborev/tui/queue_test.go @@ -2162,14 +2162,16 @@ func TestClosedKeyShortcut(t *testing.T) { func TestMigrateColumnConfig(t *testing.T) { tests := []struct { - name string - columnOrder []string - hiddenCols []string - version int - wantDirty bool - wantColOrder []string - wantHidden []string - wantVersion int + name string + columnOrder []string + taskColumnOrder []string + hiddenCols []string + version int + wantDirty bool + wantColOrder []string + wantTaskColOrder []string + wantHidden []string + wantVersion int }{ { name: "nil config unchanged", @@ -2248,18 +2250,38 @@ func TestMigrateColumnConfig(t *testing.T) { wantHidden: []string{"branch"}, wantVersion: 1, }, + { + name: "task column order has findings inserted after parent", + taskColumnOrder: []string{"id", "ref", "agent", "status", "parent", "created"}, + wantDirty: true, + wantTaskColOrder: []string{"id", "ref", "agent", "status", "parent", "findings", "created"}, + }, + { + name: "task column order without parent appends findings", + taskColumnOrder: []string{"id", "ref", "agent", "status", "created"}, + wantDirty: true, + wantTaskColOrder: []string{"id", "ref", "agent", "status", "created", "findings"}, + }, + { + name: "task column order already has findings is preserved", + taskColumnOrder: []string{"id", "ref", "parent", "findings", "agent"}, + wantDirty: false, + wantTaskColOrder: []string{"id", "ref", "parent", "findings", "agent"}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := &config.Config{ ColumnOrder: slices.Clone(tt.columnOrder), + TaskColumnOrder: slices.Clone(tt.taskColumnOrder), HiddenColumns: slices.Clone(tt.hiddenCols), ColumnConfigVersion: tt.version, } dirty := migrateColumnConfig(cfg) assert.Equal(t, tt.wantDirty, dirty) assert.True(t, slices.Equal(cfg.ColumnOrder, tt.wantColOrder)) + assert.True(t, slices.Equal(cfg.TaskColumnOrder, tt.wantTaskColOrder)) assert.True(t, slices.Equal(cfg.HiddenColumns, tt.wantHidden)) assert.Equal(t, tt.wantVersion, cfg.ColumnConfigVersion) }) diff --git a/cmd/roborev/tui/severity_badge_test.go b/cmd/roborev/tui/severity_badge_test.go index 1442807c..f3a00c6a 100644 --- a/cmd/roborev/tui/severity_badge_test.go +++ b/cmd/roborev/tui/severity_badge_test.go @@ -6,6 +6,14 @@ import ( "github.com/stretchr/testify/assert" ) +func TestDerefOrZero(t *testing.T) { + assert.Equal(t, 0, derefOrZero(nil), "nil pointer should yield 0") + v := 42 + assert.Equal(t, 42, derefOrZero(&v), "non-nil pointer should yield underlying value") + zero := 0 + assert.Equal(t, 0, derefOrZero(&zero), "explicit 0 pointer should yield 0") +} + func TestRenderSeverityBadge(t *testing.T) { tests := []struct { name string From 61b8d7085334483ac275dd4fddbd3c4ffa540a80 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 13:13:07 -0400 Subject: [PATCH 16/17] feat(tui): tighten finding badge to '3/2/5' format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the H/M/L letters from the in-row badge — they were doubling up with the column header and the colour already encodes severity. Slashes render in the same dim grey as zero counts so non-zero values pop in red/yellow/blue. Plain-text width drops from 8 to 5 for typical single-digit counts; the column header 'Findings' (still 8 chars) keeps the column from getting too narrow. --- cmd/roborev/tui/render_queue.go | 2 +- cmd/roborev/tui/severity_badge.go | 23 +++++++++--------- cmd/roborev/tui/severity_badge_test.go | 32 ++++++++++---------------- 3 files changed, 25 insertions(+), 32 deletions(-) diff --git a/cmd/roborev/tui/render_queue.go b/cmd/roborev/tui/render_queue.go index 47481cd6..0c09e274 100644 --- a/cmd/roborev/tui/render_queue.go +++ b/cmd/roborev/tui/render_queue.go @@ -330,7 +330,7 @@ func (m model) renderQueueView() string { colQueued: 12, colElapsed: 8, colPF: 3, // "P/F" header = 3 - colFindings: 8, // "Findings" header = 8, badge "H3 M2 L5" = 8 + colFindings: 8, // "Findings" header = 8 (badge "3/2/5" = 5; header dominates) colHandled: max(contentWidth[colHandled], 6), // "Closed" header = 6 colAgent: min(max(contentWidth[colAgent], 5), 12), // "Agent" header = 5, cap at 12 colSessionID: min(max(contentWidth[colSessionID], 7), 12), // "Session" header = 7, cap at 12 diff --git a/cmd/roborev/tui/severity_badge.go b/cmd/roborev/tui/severity_badge.go index f14c9048..f71ebf59 100644 --- a/cmd/roborev/tui/severity_badge.go +++ b/cmd/roborev/tui/severity_badge.go @@ -1,7 +1,7 @@ package tui import ( - "fmt" + "strconv" "strings" ) @@ -15,17 +15,18 @@ func derefOrZero(p *int) int { return *p } -// renderSeverityBadge formats finding counts as "H3 M2 L5" with severity- -// colored letters/numbers. Zero counts render in dim grey so columns stay -// visually stable but de-emphasised. Plain-text width is always at least -// 8 characters (single digits) and grows by one per extra digit per slot. +// renderSeverityBadge formats finding counts as "3/2/5" with severity- +// colored numbers (red high, yellow medium, blue low) and dim slashes. +// Zero counts render in the same dim grey as the slashes so non-zero +// values pop visually. Plain-text width is 5 for all-single-digit +// counts (e.g. "3/2/5") and grows by one per extra digit per slot. func renderSeverityBadge(h, m, l int) string { parts := []string{ - formatSeveritySlot("H", h, severityHighStyle), - formatSeveritySlot("M", m, severityMediumStyle), - formatSeveritySlot("L", l, severityLowStyle), + formatSeveritySlot(h, severityHighStyle), + formatSeveritySlot(m, severityMediumStyle), + formatSeveritySlot(l, severityLowStyle), } - return strings.Join(parts, " ") + return strings.Join(parts, severityZeroStyle.Render("/")) } // lipglossStyleRenderer is the minimal interface from lipgloss.Style that @@ -34,8 +35,8 @@ type lipglossStyleRenderer interface { Render(strs ...string) string } -func formatSeveritySlot(letter string, count int, active lipglossStyleRenderer) string { - text := fmt.Sprintf("%s%d", letter, count) +func formatSeveritySlot(count int, active lipglossStyleRenderer) string { + text := strconv.Itoa(count) if count == 0 { return severityZeroStyle.Render(text) } diff --git a/cmd/roborev/tui/severity_badge_test.go b/cmd/roborev/tui/severity_badge_test.go index f3a00c6a..a2097574 100644 --- a/cmd/roborev/tui/severity_badge_test.go +++ b/cmd/roborev/tui/severity_badge_test.go @@ -16,38 +16,30 @@ func TestDerefOrZero(t *testing.T) { func TestRenderSeverityBadge(t *testing.T) { tests := []struct { - name string - h, m, l int - wantContains []string - wantPlainTextLen int + name string + h, m, l int + wantPlain string }{ { - name: "all zero", - wantContains: []string{"H0", "M0", "L0"}, - wantPlainTextLen: 8, // "H0 M0 L0" + name: "all zero", + wantPlain: "0/0/0", }, { - name: "single digits", - h: 3, m: 2, l: 5, - wantContains: []string{"H3", "M2", "L5"}, - wantPlainTextLen: 8, + name: "single digits", + h: 3, m: 2, l: 5, + wantPlain: "3/2/5", }, { - name: "double digits", - h: 12, m: 3, l: 8, - wantContains: []string{"H12", "M3", "L8"}, - wantPlainTextLen: 9, + name: "double digits", + h: 12, m: 3, l: 8, + wantPlain: "12/3/8", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { out := renderSeverityBadge(tt.h, tt.m, tt.l) plain := stripANSI(out) - assert.Len(t, plain, tt.wantPlainTextLen, "plain text length: got %q", plain) - for _, want := range tt.wantContains { - assert.Contains(t, plain, want, - "want %q in %q", want, plain) - } + assert.Equal(t, tt.wantPlain, plain) }) } } From 042d9211396b8c874b31693081850b94fb214c55 Mon Sep 17 00:00:00 2001 From: Phillip Cloud <417981+cpcloud@users.noreply.github.com> Date: Sun, 26 Apr 2026 13:30:43 -0400 Subject: [PATCH 17/17] test(storage): add v13 postgres migration test + backfill edge cases Pushes patch coverage on PR #671 by exercising paths that the prior suite skipped: - TestPostgresMigration_FindingCounts (postgres-tagged): bootstraps a v12 schema, seeds a review, runs EnsureSchema to migrate to v13, asserts pre-existing rows have NULL counts, and verifies UpsertReview + PullReviews handle the new columns end-to-end (including the COALESCE on legacy NULLs). - Embeds postgres_v12.sql in the migration test helper so future v12 -> N migrations can start there. - Pins the existing TestPostgresMigration_SkipReasonAndClassify to v11 explicitly; the old pgSchemaVersion-1 form silently broke when the schema bumped to v13 because no v12 schema was embedded. - TestBackfillFindingCounts_PartialNull, _SkipsEmptyOutput, and _ClearsSyncedAt: cover the OR branch of the IS NULL predicate, the output != '' guard, and the synced_at = NULL re-queue. --- internal/storage/postgres_migration_test.go | 107 ++++++++++++++++++- internal/storage/summary_test.go | 111 ++++++++++++++++++++ 2 files changed, 215 insertions(+), 3 deletions(-) diff --git a/internal/storage/postgres_migration_test.go b/internal/storage/postgres_migration_test.go index 0a25f921..3e0af5f0 100644 --- a/internal/storage/postgres_migration_test.go +++ b/internal/storage/postgres_migration_test.go @@ -7,6 +7,7 @@ import ( _ "embed" "strings" "testing" + "time" "github.com/google/uuid" "github.com/jackc/pgx/v5/pgxpool" @@ -16,6 +17,9 @@ import ( //go:embed schemas/postgres_v11.sql var postgresV11Schema string +//go:embed schemas/postgres_v12.sql +var postgresV12Schema string + // openTestPgPoolRawAtVersion bootstraps a fresh Postgres test pool at the // given older schema version by running only the corresponding embedded // schema file and seeding schema_version. It deliberately does NOT call @@ -38,6 +42,8 @@ func openTestPgPoolRawAtVersion(t *testing.T, version int) *PgPool { switch version { case 11: schemaSQL = postgresV11Schema + case 12: + schemaSQL = postgresV12Schema default: t.Fatalf("openTestPgPoolRawAtVersion: no embedded schema for version %d", version) } @@ -73,9 +79,11 @@ func openTestPgPoolRawAtVersion(t *testing.T, version int) *PgPool { func pgxPool(p *PgPool) *pgxpool.Pool { return p.pool } func TestPostgresMigration_SkipReasonAndClassify(t *testing.T) { - prevVersion := pgSchemaVersion - 1 - - oldPool := openTestPgPoolRawAtVersion(t, prevVersion) + // Pinned to v11: validates the v11 → v12 migration step (skip_reason + // column, classify support, auto_design dedup indexes). Cannot use + // pgSchemaVersion - 1, because once newer migrations exist the helper + // would start at the wrong version. + oldPool := openTestPgPoolRawAtVersion(t, 11) ctx := context.Background() var repoID int @@ -106,3 +114,96 @@ func TestPostgresMigration_SkipReasonAndClassify(t *testing.T) { `, uuid.New().String(), repoID, machineID) require.NoError(t, err) } + +// TestPostgresMigration_FindingCounts validates the v12 → v13 migration: +// pre-existing reviews get the three nullable count columns added with NULL +// values, new UpsertReview calls persist explicit ints, and PullReviews +// returns 0 (via COALESCE) for the still-NULL legacy rows. +func TestPostgresMigration_FindingCounts(t *testing.T) { + oldPool := openTestPgPoolRawAtVersion(t, 12) + ctx := context.Background() + + var repoID int + require.NoError(t, pgxPool(oldPool).QueryRow(ctx, + `INSERT INTO roborev.repos (identity) VALUES ($1) RETURNING id`, + "git@example.com:owner/findings-repo.git").Scan(&repoID)) + machineID := uuid.New().String() + jobUUID := uuid.New().String() + legacyReviewUUID := uuid.New().String() + + _, err := pgxPool(oldPool).Exec(ctx, ` + INSERT INTO roborev.review_jobs + (uuid, repo_id, git_ref, agent, status, enqueued_at, source_machine_id) + VALUES ($1, $2, 'legacy-sha', 'test', 'done', NOW(), $3) + `, jobUUID, repoID, machineID) + require.NoError(t, err) + _, err = pgxPool(oldPool).Exec(ctx, ` + INSERT INTO roborev.reviews + (uuid, job_uuid, agent, prompt, output, updated_by_machine_id) + VALUES ($1, $2, 'test', 'p', 'legacy output (no count cols)', $3) + `, legacyReviewUUID, jobUUID, machineID) + require.NoError(t, err) + + // Run EnsureSchema by opening a fresh pool, which advances v12 → v13. + pg := openTestPgPool(t) + defer pg.Close() + + // Legacy row: columns exist now, but they're NULL because the v13 + // migration adds them as nullable (NULL == "not yet parsed"). + var hi, me, lo *int + require.NoError(t, pgxPool(pg).QueryRow(ctx, + `SELECT high_count, medium_count, low_count + FROM roborev.reviews WHERE uuid = $1`, legacyReviewUUID, + ).Scan(&hi, &me, &lo)) + require.Nil(t, hi, "legacy review should have NULL high_count after migration") + require.Nil(t, me, "legacy review should have NULL medium_count") + require.Nil(t, lo, "legacy review should have NULL low_count") + + // New write goes in with explicit non-NULL counts. + freshUUID := uuid.New().String() + require.NoError(t, pg.UpsertReview(ctx, SyncableReview{ + UUID: freshUUID, + JobUUID: jobUUID, + Agent: "test", + Prompt: "p", + Output: "fresh review", + Closed: false, + UpdatedByMachineID: machineID, + CreatedAt: mustNow(), + HighCount: 3, + MediumCount: 2, + LowCount: 5, + })) + var hh, mm, ll int + require.NoError(t, pgxPool(pg).QueryRow(ctx, + `SELECT high_count, medium_count, low_count + FROM roborev.reviews WHERE uuid = $1`, freshUUID, + ).Scan(&hh, &mm, &ll)) + require.Equal(t, 3, hh) + require.Equal(t, 2, mm) + require.Equal(t, 5, ll) + + // PullReviews should COALESCE the legacy NULLs to 0 and return the + // fresh row's explicit values. excludeMachineID must be a valid UUID + // (the type comparison happens server-side); use a fresh one not + // equal to machineID so both rows are returned. + pulled, _, err := pg.PullReviews(ctx, uuid.New().String(), + []string{jobUUID}, "", 100) + require.NoError(t, err) + byUUID := map[string]PulledReview{} + for _, r := range pulled { + byUUID[r.UUID] = r + } + legacy, ok := byUUID[legacyReviewUUID] + require.True(t, ok, "legacy review missing from PullReviews") + require.Equal(t, 0, legacy.HighCount) + require.Equal(t, 0, legacy.MediumCount) + require.Equal(t, 0, legacy.LowCount) + fresh, ok := byUUID[freshUUID] + require.True(t, ok, "fresh review missing from PullReviews") + require.Equal(t, 3, fresh.HighCount) + require.Equal(t, 2, fresh.MediumCount) + require.Equal(t, 5, fresh.LowCount) +} + +func mustNow() time.Time { return time.Now().UTC() } diff --git a/internal/storage/summary_test.go b/internal/storage/summary_test.go index 3470421a..2569b682 100644 --- a/internal/storage/summary_test.go +++ b/internal/storage/summary_test.go @@ -1,6 +1,7 @@ package storage import ( + "database/sql" "testing" "time" @@ -519,6 +520,116 @@ func TestBackfillFindingCounts(t *testing.T) { assert.Equal(t, 0, updated, "second run should be no-op") } +// TestBackfillFindingCounts_PartialNull covers the case where only some of +// the three columns are NULL. The predicate uses OR, so any column being +// NULL flags the row for re-parsing — the write then sets all three to +// concrete values from CountFindings. +func TestBackfillFindingCounts_PartialNull(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, _ := db.GetOrCreateRepo("/tmp/partialrepo", "partialrepo") + commit, _ := db.GetOrCreateCommit(repo.ID, "p1", "alice", "msg", time.Now()) + job, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: commit.ID, GitRef: "p1", Agent: "test", + }) + require.NoError(t, err) + + output := "- Medium: leaky goroutine\n- Low: typo" + + // Only high_count is NULL — medium and low were already populated + // (e.g. by a partial backfill run that was interrupted, or by a + // future migration that added columns piecemeal). The predicate + // still picks this up via the OR branch. + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, 'test', 'p', ?, NULL, 99, 99)`, + job.ID, output, + ) + require.NoError(t, err) + + updated, err := db.BackfillFindingCounts() + require.NoError(t, err) + assert.Equal(t, 1, updated) + + // All three columns now reflect the parsed counts (medium=1, low=1), + // overwriting the stale 99s. + var h, m, l int + require.NoError(t, db.QueryRow( + `SELECT high_count, medium_count, low_count FROM reviews WHERE job_id = ?`, + job.ID).Scan(&h, &m, &l)) + assert.Equal(t, 0, h) + assert.Equal(t, 1, m) + assert.Equal(t, 1, l) +} + +// TestBackfillFindingCounts_SkipsEmptyOutput confirms the predicate's +// `output != ''` guard. A NULL-counts row whose output is empty stays +// untouched — there's nothing to parse, and writing 0/0/0 would obscure +// the fact that the review never ran. +func TestBackfillFindingCounts_SkipsEmptyOutput(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, _ := db.GetOrCreateRepo("/tmp/emptyrepo", "emptyrepo") + commit, _ := db.GetOrCreateCommit(repo.ID, "e1", "alice", "msg", time.Now()) + job, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: commit.ID, GitRef: "e1", Agent: "test", + }) + require.NoError(t, err) + + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count) + VALUES (?, 'test', 'p', '', NULL, NULL, NULL)`, + job.ID, + ) + require.NoError(t, err) + + updated, err := db.BackfillFindingCounts() + require.NoError(t, err) + assert.Equal(t, 0, updated, "empty-output rows should not be backfilled") + + // Columns remain NULL. + var h, m, l sql.NullInt64 + require.NoError(t, db.QueryRow( + `SELECT high_count, medium_count, low_count FROM reviews WHERE job_id = ?`, + job.ID).Scan(&h, &m, &l)) + assert.False(t, h.Valid) + assert.False(t, m.Valid) + assert.False(t, l.Valid) +} + +// TestBackfillFindingCounts_ClearsSyncedAt confirms backfilled rows are +// re-queued for sync. Without this, a remote PostgreSQL with stale 0/0/0 +// rows (or NULL rows) would never receive the corrected counts. +func TestBackfillFindingCounts_ClearsSyncedAt(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, _ := db.GetOrCreateRepo("/tmp/syncrepo", "syncrepo") + commit, _ := db.GetOrCreateCommit(repo.ID, "s1", "alice", "msg", time.Now()) + job, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, CommitID: commit.ID, GitRef: "s1", Agent: "test", + }) + require.NoError(t, err) + + _, err = db.Exec( + `INSERT INTO reviews (job_id, agent, prompt, output, high_count, medium_count, low_count, synced_at) + VALUES (?, 'test', 'p', '- High: bug', NULL, NULL, NULL, '2026-04-01T00:00:00Z')`, + job.ID, + ) + require.NoError(t, err) + + _, err = db.BackfillFindingCounts() + require.NoError(t, err) + + var syncedAt sql.NullString + require.NoError(t, db.QueryRow( + `SELECT synced_at FROM reviews WHERE job_id = ?`, job.ID, + ).Scan(&syncedAt)) + assert.False(t, syncedAt.Valid, "synced_at must be cleared so the row re-syncs") +} + func TestPercentile(t *testing.T) { tests := []struct { name string