From 4d799b0e0af5aaf654a8b89edf26c25bcdeac6b3 Mon Sep 17 00:00:00 2001 From: patrick szymkowiak <52030887+pszymkowiak@users.noreply.github.com> Date: Wed, 18 Mar 2026 10:41:44 +0100 Subject: [PATCH 01/30] fix: remove all decorative emojis from CLI output (#687) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: remove decorative emojis from CLI output (#511) Replace decorative emojis with plain text to reduce token waste. Keep functional symbols (⚠️ ✓ ❌ ✅ ℹ️) that convey meaning in fewer tokens. Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak * fix: remove remaining decorative emojis from find_cmd and formatter Missed in initial emoji cleanup pass: 📁 in find_cmd.rs and parser/formatter.rs Signed-off-by: Patrick szymkowiak * fix: remove all decorative emojis from CLI output (#511) Replace emojis with plain text tokens across all production files for better LLM compatibility. Test fixtures and external tool detection patterns (e.g. Black's "All done!") are preserved. Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak * fix: remove last decorative emoji from next_cmd.rs Remove ⚡ from Next.js Build header, missed in previous passes. Signed-off-by: Patrick szymkowiak * fix: remove remaining emojis from gh_cmd.rs and init.rs Replace production emojis: - gh_cmd.rs: 🟣→[merged], ⚪→[unknown]/[pending], ⭐→removed, 🔱→removed - init.rs: ⚪→[--] for "not found" status indicators Signed-off-by: Patrick szymkowiak * fix: remove all checkmark emojis from CLI output Replace ✓ (U+2713) with plain text across 19 files: - "ok ✓" → "ok" (git add/commit/push/pull) - "✓ cargo test: ..." → "cargo test: ..." (all tool summaries) - Preserved ✓ in input detection patterns and test fixtures LLMs cannot interpret emoji semantics; plain text is clearer. Signed-off-by: Patrick szymkowiak --------- Signed-off-by: Patrick Szymkowiak Signed-off-by: Patrick szymkowiak Signed-off-by: Patrick Szymkowiak --- src/cargo_cmd.rs | 57 ++++++++++++------------ src/cc_economics.rs | 10 ++--- src/ccusage.rs | 6 +-- src/container.rs | 60 +++++++++++++------------ src/deps.rs | 10 ++--- src/diff_cmd.rs | 8 ++-- src/display_helpers.rs | 14 +++--- src/env_cmd.rs | 12 ++--- src/find_cmd.rs | 2 +- src/format_cmd.rs | 11 ++--- src/gain.rs | 6 +-- src/gh_cmd.rs | 97 +++++++++++++++++++---------------------- src/git.rs | 64 +++++++++++++-------------- src/go_cmd.rs | 18 ++++---- src/golangci_cmd.rs | 4 +- src/grep_cmd.rs | 6 +-- src/init.rs | 79 +++++++++++++++++---------------- src/lint_cmd.rs | 12 ++--- src/log_cmd.rs | 12 ++--- src/ls.rs | 4 +- src/main.rs | 8 ++-- src/next_cmd.rs | 10 ++--- src/npm_cmd.rs | 4 +- src/parser/README.md | 2 +- src/parser/formatter.rs | 20 ++++----- src/pip_cmd.rs | 5 +-- src/pnpm_cmd.rs | 4 +- src/prettier_cmd.rs | 10 ++--- src/prisma_cmd.rs | 16 +++---- src/pytest_cmd.rs | 8 ++-- src/ruff_cmd.rs | 16 +++---- src/runner.rs | 10 ++--- src/summary.rs | 36 +++++++-------- src/trust.rs | 6 +-- src/tsc_cmd.rs | 2 +- src/vitest_cmd.rs | 2 +- src/wget_cmd.rs | 32 +++++--------- 37 files changed, 334 insertions(+), 349 deletions(-) diff --git a/src/cargo_cmd.rs b/src/cargo_cmd.rs index 3e963698..63eea4b7 100644 --- a/src/cargo_cmd.rs +++ b/src/cargo_cmd.rs @@ -264,7 +264,7 @@ fn filter_cargo_install(output: &str) -> String { // Already installed / up to date if already_installed { let info = ignored_line.split('`').nth(1).unwrap_or(&ignored_line); - return format!("✓ cargo install: {} already installed", info); + return format!("cargo install: {} already installed", info); } // Errors @@ -313,10 +313,7 @@ fn filter_cargo_install(output: &str) -> String { // Success let crate_info = format_crate_info(&installed_crate, &installed_version, "package"); - let mut result = format!( - "✓ cargo install ({}, {} deps compiled)", - crate_info, compiled - ); + let mut result = format!("cargo install ({}, {} deps compiled)", crate_info, compiled); for line in &replaced_lines { result.push_str(&format!("\n {}", line)); @@ -502,7 +499,7 @@ fn filter_cargo_nextest(output: &str) -> String { } else { format!("{}, {}s", binary_text, duration) }; - return format!("✓ cargo nextest: {} ({})", parts.join(", "), meta); + return format!("cargo nextest: {} ({})", parts.join(", "), meta); } // With failures - show failure details then summary @@ -625,7 +622,7 @@ fn filter_cargo_build(output: &str) -> String { } if error_count == 0 && warnings == 0 { - return format!("✓ cargo build ({} crates compiled)", compiled); + return format!("cargo build ({} crates compiled)", compiled); } let mut result = String::new(); @@ -739,11 +736,11 @@ impl AggregatedTestResult { if self.has_duration { format!( - "✓ cargo test: {} ({}, {:.2}s)", + "cargo test: {} ({}, {:.2}s)", counts, suite_text, self.duration_secs ) } else { - format!("✓ cargo test: {} ({})", counts, suite_text) + format!("cargo test: {} ({})", counts, suite_text) } } } @@ -831,7 +828,7 @@ fn filter_cargo_test(output: &str) -> String { // Fallback: use original behavior if regex failed for line in &summary_lines { - result.push_str(&format!("✓ {}\n", line)); + result.push_str(&format!("{}\n", line)); } return result.trim().to_string(); } @@ -931,7 +928,7 @@ fn filter_cargo_clippy(output: &str) -> String { } if error_count == 0 && warning_count == 0 { - return "✓ cargo clippy: No issues found".to_string(); + return "cargo clippy: No issues found".to_string(); } let mut result = String::new(); @@ -1103,7 +1100,7 @@ mod tests { Finished dev [unoptimized + debuginfo] target(s) in 15.23s "#; let result = filter_cargo_build(output); - assert!(result.contains("✓ cargo build")); + assert!(result.contains("cargo build")); assert!(result.contains("3 crates compiled")); } @@ -1139,7 +1136,7 @@ test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fin "#; let result = filter_cargo_test(output); assert!( - result.contains("✓ cargo test: 15 passed (1 suite, 0.01s)"), + result.contains("cargo test: 15 passed (1 suite, 0.01s)"), "Expected compact format, got: {}", result ); @@ -1196,7 +1193,7 @@ test result: ok. 32 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fin "#; let result = filter_cargo_test(output); assert!( - result.contains("✓ cargo test: 137 passed (4 suites, 1.45s)"), + result.contains("cargo test: 137 passed (4 suites, 1.45s)"), "Expected aggregated format, got: {}", result ); @@ -1260,7 +1257,7 @@ test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fini "#; let result = filter_cargo_test(output); assert!( - result.contains("✓ cargo test: 0 passed (3 suites, 0.00s)"), + result.contains("cargo test: 0 passed (3 suites, 0.00s)"), "Expected compact format for zero tests, got: {}", result ); @@ -1280,7 +1277,7 @@ test result: ok. 18 passed; 0 failed; 2 ignored; 0 measured; 0 filtered out; fin "#; let result = filter_cargo_test(output); assert!( - result.contains("✓ cargo test: 63 passed, 5 ignored, 2 filtered out (2 suites, 0.70s)"), + result.contains("cargo test: 63 passed, 5 ignored, 2 filtered out (2 suites, 0.70s)"), "Expected compact format with ignored and filtered, got: {}", result ); @@ -1295,7 +1292,7 @@ test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fin "#; let result = filter_cargo_test(output); assert!( - result.contains("✓ cargo test: 15 passed (1 suite, 0.01s)"), + result.contains("cargo test: 15 passed (1 suite, 0.01s)"), "Expected singular 'suite', got: {}", result ); @@ -1309,9 +1306,9 @@ running 15 tests test result: MALFORMED LINE WITHOUT PROPER FORMAT "#; let result = filter_cargo_test(output); - // Should fallback to original behavior (show line with checkmark) + // Should fallback to original behavior (show line without checkmark) assert!( - result.contains("✓ test result: MALFORMED"), + result.contains("test result: MALFORMED"), "Expected fallback format, got: {}", result ); @@ -1323,7 +1320,7 @@ test result: MALFORMED LINE WITHOUT PROPER FORMAT Finished dev [unoptimized + debuginfo] target(s) in 1.53s "#; let result = filter_cargo_clippy(output); - assert!(result.contains("✓ cargo clippy: No issues found")); + assert!(result.contains("cargo clippy: No issues found")); } #[test] @@ -1366,7 +1363,7 @@ warning: `rtk` (bin) generated 2 warnings Replaced package `rtk v0.9.4` with `rtk v0.11.0` (/Users/user/.cargo/bin/rtk) "#; let result = filter_cargo_install(output); - assert!(result.contains("✓ cargo install"), "got: {}", result); + assert!(result.contains("cargo install"), "got: {}", result); assert!(result.contains("rtk v0.11.0"), "got: {}", result); assert!(result.contains("5 deps compiled"), "got: {}", result); assert!(result.contains("Replaced"), "got: {}", result); @@ -1383,7 +1380,7 @@ warning: `rtk` (bin) generated 2 warnings Replaced package `rtk v0.9.4` with `rtk v0.11.0` (/Users/user/.cargo/bin/rtk) "#; let result = filter_cargo_install(output); - assert!(result.contains("✓ cargo install"), "got: {}", result); + assert!(result.contains("cargo install"), "got: {}", result); assert!(result.contains("Replacing"), "got: {}", result); assert!(result.contains("Replaced"), "got: {}", result); } @@ -1428,7 +1425,7 @@ error: aborting due to 1 previous error #[test] fn test_filter_cargo_install_empty_output() { let result = filter_cargo_install(""); - assert!(result.contains("✓ cargo install"), "got: {}", result); + assert!(result.contains("cargo install"), "got: {}", result); assert!(result.contains("0 deps compiled"), "got: {}", result); } @@ -1442,7 +1439,7 @@ error: aborting due to 1 previous error warning: be sure to add `/Users/user/.cargo/bin` to your PATH "#; let result = filter_cargo_install(output); - assert!(result.contains("✓ cargo install"), "got: {}", result); + assert!(result.contains("cargo install"), "got: {}", result); assert!( result.contains("be sure to add"), "PATH warning should be kept: {}", @@ -1492,7 +1489,7 @@ error: aborting due to 2 previous errors Installing rtk v0.11.0 "#; let result = filter_cargo_install(output); - assert!(result.contains("✓ cargo install"), "got: {}", result); + assert!(result.contains("cargo install"), "got: {}", result); assert!(!result.contains("Locking"), "got: {}", result); assert!(!result.contains("Blocking"), "got: {}", result); assert!(!result.contains("Downloading"), "got: {}", result); @@ -1506,7 +1503,7 @@ error: aborting due to 2 previous errors "#; let result = filter_cargo_install(output); // Path-based install: crate info not extracted from path - assert!(result.contains("✓ cargo install"), "got: {}", result); + assert!(result.contains("cargo install"), "got: {}", result); assert!(result.contains("1 deps compiled"), "got: {}", result); } @@ -1532,7 +1529,7 @@ error: aborting due to 2 previous errors "#; let result = filter_cargo_nextest(output); assert_eq!( - result, "✓ cargo nextest: 301 passed (1 binary, 0.192s)", + result, "cargo nextest: 301 passed (1 binary, 0.192s)", "got: {}", result ); @@ -1617,7 +1614,7 @@ error: test run failed "#; let result = filter_cargo_nextest(output); assert_eq!( - result, "✓ cargo nextest: 50 passed, 3 skipped (2 binaries, 0.500s)", + result, "cargo nextest: 50 passed, 3 skipped (2 binaries, 0.500s)", "got: {}", result ); @@ -1668,7 +1665,7 @@ error: test run failed "#; let result = filter_cargo_nextest(output); assert_eq!( - result, "✓ cargo nextest: 100 passed (5 binaries, 1.234s)", + result, "cargo nextest: 100 passed (5 binaries, 1.234s)", "got: {}", result ); @@ -1703,7 +1700,7 @@ error: test run failed result ); assert!( - result.contains("✓ cargo nextest: 10 passed"), + result.contains("cargo nextest: 10 passed"), "got: {}", result ); diff --git a/src/cc_economics.rs b/src/cc_economics.rs index cf135ac3..6f50f677 100644 --- a/src/cc_economics.rs +++ b/src/cc_economics.rs @@ -250,7 +250,7 @@ fn merge_weekly(cc: Option>, rtk: Vec) -> Vec m, None => { - eprintln!("⚠️ Invalid week_start format: {}", entry.week_start); + eprintln!("[warn] Invalid week_start format: {}", entry.week_start); continue; } }; @@ -442,7 +442,7 @@ fn display_summary(tracker: &Tracker, verbose: u8) -> Result<()> { let totals = compute_totals(&periods); - println!("💰 Claude Code Economics"); + println!("[cost] Claude Code Economics"); println!("════════════════════════════════════════════════════"); println!(); @@ -550,7 +550,7 @@ fn display_daily(tracker: &Tracker, verbose: u8) -> Result<()> { .context("Failed to load daily token savings from database")?; let periods = merge_daily(cc_daily, rtk_daily); - println!("📅 Daily Economics"); + println!("Daily Economics"); println!("════════════════════════════════════════════════════"); print_period_table(&periods, verbose); Ok(()) @@ -564,7 +564,7 @@ fn display_weekly(tracker: &Tracker, verbose: u8) -> Result<()> { .context("Failed to load weekly token savings from database")?; let periods = merge_weekly(cc_weekly, rtk_weekly); - println!("📅 Weekly Economics"); + println!("Weekly Economics"); println!("════════════════════════════════════════════════════"); print_period_table(&periods, verbose); Ok(()) @@ -578,7 +578,7 @@ fn display_monthly(tracker: &Tracker, verbose: u8) -> Result<()> { .context("Failed to load monthly token savings from database")?; let periods = merge_monthly(cc_monthly, rtk_monthly); - println!("📅 Monthly Economics"); + println!("Monthly Economics"); println!("════════════════════════════════════════════════════"); print_period_table(&periods, verbose); Ok(()) diff --git a/src/ccusage.rs b/src/ccusage.rs index d9ca8668..b49e483d 100644 --- a/src/ccusage.rs +++ b/src/ccusage.rs @@ -126,7 +126,7 @@ pub fn fetch(granularity: Granularity) -> Result>> { let mut cmd = match build_command() { Some(cmd) => cmd, None => { - eprintln!("⚠️ ccusage not found. Install: npm i -g ccusage (or use npx ccusage)"); + eprintln!("[warn] ccusage not found. Install: npm i -g ccusage (or use npx ccusage)"); return Ok(None); } }; @@ -146,7 +146,7 @@ pub fn fetch(granularity: Granularity) -> Result>> { let output = match output { Err(e) => { - eprintln!("⚠️ ccusage execution failed: {}", e); + eprintln!("[warn] ccusage execution failed: {}", e); return Ok(None); } Ok(o) => o, @@ -155,7 +155,7 @@ pub fn fetch(granularity: Granularity) -> Result>> { if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); eprintln!( - "⚠️ ccusage exited with {}: {}", + "[warn] ccusage exited with {}: {}", output.status, stderr.trim() ); diff --git a/src/container.rs b/src/container.rs index ee8d4268..e609de0c 100644 --- a/src/container.rs +++ b/src/container.rs @@ -53,14 +53,14 @@ fn docker_ps(_verbose: u8) -> Result<()> { let mut rtk = String::new(); if stdout.trim().is_empty() { - rtk.push_str("🐳 0 containers"); + rtk.push_str("[docker] 0 containers"); println!("{}", rtk); timer.track("docker ps", "rtk docker ps", &raw, &rtk); return Ok(()); } let count = stdout.lines().count(); - rtk.push_str(&format!("🐳 {} containers:\n", count)); + rtk.push_str(&format!("[docker] {} containers:\n", count)); for line in stdout.lines().take(15) { let parts: Vec<&str> = line.split('\t').collect(); @@ -119,7 +119,7 @@ fn docker_images(_verbose: u8) -> Result<()> { let mut rtk = String::new(); if lines.is_empty() { - rtk.push_str("🐳 0 images"); + rtk.push_str("[docker] 0 images"); println!("{}", rtk); timer.track("docker images", "rtk docker images", &raw, &rtk); return Ok(()); @@ -146,7 +146,11 @@ fn docker_images(_verbose: u8) -> Result<()> { } else { format!("{:.0}MB", total_size_mb) }; - rtk.push_str(&format!("🐳 {} images ({})\n", lines.len(), total_display)); + rtk.push_str(&format!( + "[docker] {} images ({})\n", + lines.len(), + total_display + )); for line in lines.iter().take(15) { let parts: Vec<&str> = line.split('\t').collect(); @@ -202,7 +206,7 @@ fn docker_logs(args: &[String], _verbose: u8) -> Result<()> { } let analyzed = crate::log_cmd::run_stdin_str(&raw); - let rtk = format!("🐳 Logs for {}:\n{}", container, analyzed); + let rtk = format!("[docker] Logs for {}:\n{}", container, analyzed); println!("{}", rtk); timer.track( &format!("docker logs {}", container), @@ -238,7 +242,7 @@ fn kubectl_pods(args: &[String], _verbose: u8) -> Result<()> { let json: serde_json::Value = match serde_json::from_str(&raw) { Ok(v) => v, Err(_) => { - rtk.push_str("☸️ No pods found"); + rtk.push_str("No pods found"); println!("{}", rtk); timer.track("kubectl get pods", "rtk kubectl pods", &raw, &rtk); return Ok(()); @@ -246,7 +250,7 @@ fn kubectl_pods(args: &[String], _verbose: u8) -> Result<()> { }; let Some(pods) = json["items"].as_array().filter(|a| !a.is_empty()) else { - rtk.push_str("☸️ No pods found"); + rtk.push_str("No pods found"); println!("{}", rtk); timer.track("kubectl get pods", "rtk kubectl pods", &raw, &rtk); return Ok(()); @@ -292,21 +296,21 @@ fn kubectl_pods(args: &[String], _verbose: u8) -> Result<()> { let mut parts = Vec::new(); if running > 0 { - parts.push(format!("{} ✓", running)); + parts.push(format!("{}", running)); } if pending > 0 { parts.push(format!("{} pending", pending)); } if failed > 0 { - parts.push(format!("{} ✗", failed)); + parts.push(format!("{} [x]", failed)); } if restarts_total > 0 { parts.push(format!("{} restarts", restarts_total)); } - rtk.push_str(&format!("☸️ {} pods: {}\n", pods.len(), parts.join(", "))); + rtk.push_str(&format!("{} pods: {}\n", pods.len(), parts.join(", "))); if !issues.is_empty() { - rtk.push_str("⚠️ Issues:\n"); + rtk.push_str("[warn] Issues:\n"); for issue in issues.iter().take(10) { rtk.push_str(&format!(" {}\n", issue)); } @@ -345,7 +349,7 @@ fn kubectl_services(args: &[String], _verbose: u8) -> Result<()> { let json: serde_json::Value = match serde_json::from_str(&raw) { Ok(v) => v, Err(_) => { - rtk.push_str("☸️ No services found"); + rtk.push_str("No services found"); println!("{}", rtk); timer.track("kubectl get svc", "rtk kubectl svc", &raw, &rtk); return Ok(()); @@ -353,12 +357,12 @@ fn kubectl_services(args: &[String], _verbose: u8) -> Result<()> { }; let Some(services) = json["items"].as_array().filter(|a| !a.is_empty()) else { - rtk.push_str("☸️ No services found"); + rtk.push_str("No services found"); println!("{}", rtk); timer.track("kubectl get svc", "rtk kubectl svc", &raw, &rtk); return Ok(()); }; - rtk.push_str(&format!("☸️ {} services:\n", services.len())); + rtk.push_str(&format!("{} services:\n", services.len())); for svc in services.iter().take(15) { let ns = svc["metadata"]["namespace"].as_str().unwrap_or("-"); @@ -433,7 +437,7 @@ fn kubectl_logs(args: &[String], _verbose: u8) -> Result<()> { } let analyzed = crate::log_cmd::run_stdin_str(&raw); - let rtk = format!("☸️ Logs for {}:\n{}", pod, analyzed); + let rtk = format!("Logs for {}:\n{}", pod, analyzed); println!("{}", rtk); timer.track( &format!("kubectl logs {}", pod), @@ -451,10 +455,10 @@ pub fn format_compose_ps(raw: &str) -> String { let lines: Vec<&str> = raw.lines().filter(|l| !l.trim().is_empty()).collect(); if lines.is_empty() { - return "🐳 0 compose services".to_string(); + return "[compose] 0 services".to_string(); } - let mut result = format!("🐳 {} compose services:\n", lines.len()); + let mut result = format!("[compose] {} services:\n", lines.len()); for line in lines.iter().take(20) { let parts: Vec<&str> = line.split('\t').collect(); @@ -493,19 +497,19 @@ pub fn format_compose_ps(raw: &str) -> String { /// Format `docker compose logs` output into compact form pub fn format_compose_logs(raw: &str) -> String { if raw.trim().is_empty() { - return "🐳 No logs".to_string(); + return "[compose] No logs".to_string(); } // docker compose logs prefixes each line with "service-N | " // Use the existing log deduplication engine let analyzed = crate::log_cmd::run_stdin_str(raw); - format!("🐳 Compose logs:\n{}", analyzed) + format!("[compose] Logs:\n{}", analyzed) } /// Format `docker compose build` output into compact summary pub fn format_compose_build(raw: &str) -> String { if raw.trim().is_empty() { - return "🐳 Build: no output".to_string(); + return "[compose] Build: no output".to_string(); } let mut result = String::new(); @@ -513,7 +517,7 @@ pub fn format_compose_build(raw: &str) -> String { // Extract the summary line: "[+] Building 12.3s (8/8) FINISHED" for line in raw.lines() { if line.contains("Building") && line.contains("FINISHED") { - result.push_str(&format!("🐳 {}\n", line.trim())); + result.push_str(&format!("[compose] {}\n", line.trim())); break; } } @@ -521,9 +525,9 @@ pub fn format_compose_build(raw: &str) -> String { if result.is_empty() { // No FINISHED line found — might still be building or errored if let Some(line) = raw.lines().find(|l| l.contains("Building")) { - result.push_str(&format!("🐳 {}\n", line.trim())); + result.push_str(&format!("[compose] {}\n", line.trim())); } else { - result.push_str("🐳 Build:\n"); + result.push_str("[compose] Build:\n"); } } @@ -822,8 +826,11 @@ mod tests { let raw = "redis-1\tredis:7\tUp 5 hours\t"; let out = format_compose_ps(raw); assert!(out.contains("redis"), "should show service name"); + // Should not show port info when no ports (but [compose] prefix is OK) + let lines: Vec<&str> = out.lines().collect(); + let redis_line = lines.iter().find(|l| l.contains("redis")).unwrap(); assert!( - !out.contains("["), + !redis_line.contains("] ["), "should not show port brackets when empty" ); } @@ -852,10 +859,7 @@ web-1 | 192.168.1.1 - GET /favicon.ico 404 api-1 | Server listening on port 3000 api-1 | Connected to database"; let out = format_compose_logs(raw); - assert!( - out.contains("Compose logs"), - "should have compose logs header" - ); + assert!(out.contains("Logs"), "should have compose logs header"); } #[test] diff --git a/src/deps.rs b/src/deps.rs index 29ea21e0..27902984 100644 --- a/src/deps.rs +++ b/src/deps.rs @@ -26,7 +26,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> { if cargo_path.exists() { found = true; raw.push_str(&fs::read_to_string(&cargo_path).unwrap_or_default()); - rtk.push_str("📦 Rust (Cargo.toml):\n"); + rtk.push_str("Rust (Cargo.toml):\n"); rtk.push_str(&summarize_cargo_str(&cargo_path)?); } @@ -34,7 +34,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> { if package_path.exists() { found = true; raw.push_str(&fs::read_to_string(&package_path).unwrap_or_default()); - rtk.push_str("📦 Node.js (package.json):\n"); + rtk.push_str("Node.js (package.json):\n"); rtk.push_str(&summarize_package_json_str(&package_path)?); } @@ -42,7 +42,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> { if requirements_path.exists() { found = true; raw.push_str(&fs::read_to_string(&requirements_path).unwrap_or_default()); - rtk.push_str("📦 Python (requirements.txt):\n"); + rtk.push_str("Python (requirements.txt):\n"); rtk.push_str(&summarize_requirements_str(&requirements_path)?); } @@ -50,7 +50,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> { if pyproject_path.exists() { found = true; raw.push_str(&fs::read_to_string(&pyproject_path).unwrap_or_default()); - rtk.push_str("📦 Python (pyproject.toml):\n"); + rtk.push_str("Python (pyproject.toml):\n"); rtk.push_str(&summarize_pyproject_str(&pyproject_path)?); } @@ -58,7 +58,7 @@ pub fn run(path: &Path, verbose: u8) -> Result<()> { if gomod_path.exists() { found = true; raw.push_str(&fs::read_to_string(&gomod_path).unwrap_or_default()); - rtk.push_str("📦 Go (go.mod):\n"); + rtk.push_str("Go (go.mod):\n"); rtk.push_str(&summarize_gomod_str(&gomod_path)?); } diff --git a/src/diff_cmd.rs b/src/diff_cmd.rs index 13608254..d9299eb5 100644 --- a/src/diff_cmd.rs +++ b/src/diff_cmd.rs @@ -22,7 +22,7 @@ pub fn run(file1: &Path, file2: &Path, verbose: u8) -> Result<()> { let mut rtk = String::new(); if diff.added == 0 && diff.removed == 0 { - rtk.push_str("✅ Files are identical"); + rtk.push_str("[ok] Files are identical"); println!("{}", rtk); timer.track( &format!("diff {} {}", file1.display(), file2.display()), @@ -33,7 +33,7 @@ pub fn run(file1: &Path, file2: &Path, verbose: u8) -> Result<()> { return Ok(()); } - rtk.push_str(&format!("📊 {} → {}\n", file1.display(), file2.display())); + rtk.push_str(&format!("{} → {}\n", file1.display(), file2.display())); rtk.push_str(&format!( " +{} added, -{} removed, ~{} modified\n\n", diff.added, diff.removed, diff.modified @@ -168,7 +168,7 @@ fn condense_unified_diff(diff: &str) -> String { // File header if line.starts_with("+++ ") { if !current_file.is_empty() && (added > 0 || removed > 0) { - result.push(format!("📄 {} (+{} -{})", current_file, added, removed)); + result.push(format!("[file] {} (+{} -{})", current_file, added, removed)); for c in changes.iter().take(10) { result.push(format!(" {}", c)); } @@ -199,7 +199,7 @@ fn condense_unified_diff(diff: &str) -> String { // Last file if !current_file.is_empty() && (added > 0 || removed > 0) { - result.push(format!("📄 {} (+{} -{})", current_file, added, removed)); + result.push(format!("[file] {} (+{} -{})", current_file, added, removed)); for c in changes.iter().take(10) { result.push(format!(" {}", c)); } diff --git a/src/display_helpers.rs b/src/display_helpers.rs index a102c397..60354c7c 100644 --- a/src/display_helpers.rs +++ b/src/display_helpers.rs @@ -21,7 +21,7 @@ pub fn format_duration(ms: u64) -> String { /// Trait for period-based statistics that can be displayed in tables pub trait PeriodStats { - /// Icon for this period type (e.g., "📅", "📊", "📆") + /// Icon for this period type (e.g., "D", "W", "M") fn icon() -> &'static str; /// Label for this period type (e.g., "Daily", "Weekly", "Monthly") @@ -143,7 +143,7 @@ pub fn print_period_table(data: &[T]) { impl PeriodStats for DayStats { fn icon() -> &'static str { - "📅" + "D" } fn label() -> &'static str { @@ -193,7 +193,7 @@ impl PeriodStats for DayStats { impl PeriodStats for WeekStats { fn icon() -> &'static str { - "📊" + "W" } fn label() -> &'static str { @@ -253,7 +253,7 @@ impl PeriodStats for WeekStats { impl PeriodStats for MonthStats { fn icon() -> &'static str { - "📆" + "M" } fn label() -> &'static str { @@ -322,7 +322,7 @@ mod tests { assert_eq!(day.commands(), 10); assert_eq!(day.saved_tokens(), 200); assert_eq!(day.avg_time_ms(), 150); - assert_eq!(DayStats::icon(), "📅"); + assert_eq!(DayStats::icon(), "D"); assert_eq!(DayStats::label(), "Daily"); } @@ -342,7 +342,7 @@ mod tests { assert_eq!(week.period(), "01-20 → 01-26"); assert_eq!(week.avg_time_ms(), 100); - assert_eq!(WeekStats::icon(), "📊"); + assert_eq!(WeekStats::icon(), "W"); assert_eq!(WeekStats::label(), "Weekly"); } @@ -361,7 +361,7 @@ mod tests { assert_eq!(month.period(), "2026-01"); assert_eq!(month.avg_time_ms(), 100); - assert_eq!(MonthStats::icon(), "📆"); + assert_eq!(MonthStats::icon(), "M"); assert_eq!(MonthStats::label(), "Monthly"); } diff --git a/src/env_cmd.rs b/src/env_cmd.rs index 4a2437c4..d4b9b6a3 100644 --- a/src/env_cmd.rs +++ b/src/env_cmd.rs @@ -62,7 +62,7 @@ pub fn run(filter: Option<&str>, show_all: bool, verbose: u8) -> Result<()> { // Print categorized if !path_vars.is_empty() { - println!("📂 PATH Variables:"); + println!("PATH Variables:"); for (k, v) in &path_vars { if k == "PATH" { // Split PATH for readability @@ -81,28 +81,28 @@ pub fn run(filter: Option<&str>, show_all: bool, verbose: u8) -> Result<()> { } if !lang_vars.is_empty() { - println!("\n🔧 Language/Runtime:"); + println!("\nLanguage/Runtime:"); for (k, v) in &lang_vars { println!(" {}={}", k, v); } } if !cloud_vars.is_empty() { - println!("\n☁️ Cloud/Services:"); + println!("\nCloud/Services:"); for (k, v) in &cloud_vars { println!(" {}={}", k, v); } } if !tool_vars.is_empty() { - println!("\n🛠️ Tools:"); + println!("\nTools:"); for (k, v) in &tool_vars { println!(" {}={}", k, v); } } if !other_vars.is_empty() { - println!("\n📋 Other:"); + println!("\nOther:"); for (k, v) in other_vars.iter().take(20) { println!(" {}={}", k, v); } @@ -118,7 +118,7 @@ pub fn run(filter: Option<&str>, show_all: bool, verbose: u8) -> Result<()> { + tool_vars.len() + other_vars.len().min(20); if filter.is_none() { - println!("\n📊 Total: {} vars (showing {} relevant)", total, shown); + println!("\nTotal: {} vars (showing {} relevant)", total, shown); } let raw: String = vars.iter().map(|(k, v)| format!("{}={}\n", k, v)).collect(); diff --git a/src/find_cmd.rs b/src/find_cmd.rs index 25da54e2..df1e41b2 100644 --- a/src/find_cmd.rs +++ b/src/find_cmd.rs @@ -305,7 +305,7 @@ pub fn run( let dirs_count = dirs.len(); let total_files = files.len(); - println!("📁 {}F {}D:", total_files, dirs_count); + println!("{}F {}D:", total_files, dirs_count); println!(); // Display with proper --max limiting (count individual files) diff --git a/src/format_cmd.rs b/src/format_cmd.rs index fe6ce13f..23c01a2b 100644 --- a/src/format_cmd.rs +++ b/src/format_cmd.rs @@ -226,7 +226,7 @@ fn filter_black_output(output: &str) -> String { if !needs_formatting && (all_done || files_unchanged > 0) { // All files formatted correctly - result.push_str("✓ Format (black): All files formatted"); + result.push_str("Format (black): All files formatted"); if files_unchanged > 0 { result.push_str(&format!(" ({} files checked)", files_unchanged)); } @@ -258,13 +258,10 @@ fn filter_black_output(output: &str) -> String { } if files_unchanged > 0 { - result.push_str(&format!( - "\n✓ {} files already formatted\n", - files_unchanged - )); + result.push_str(&format!("\n{} files already formatted\n", files_unchanged)); } - result.push_str("\n💡 Run `black .` to format these files\n"); + result.push_str("\n[hint] Run `black .` to format these files\n"); } else { // Fallback: show raw output result.push_str(output.trim()); @@ -349,7 +346,7 @@ mod tests { fn test_filter_black_all_formatted() { let output = "All done! ✨ 🍰 ✨\n5 files left unchanged."; let result = filter_black_output(output); - assert!(result.contains("✓ Format (black)")); + assert!(result.contains("Format (black)")); assert!(result.contains("All files formatted")); assert!(result.contains("5 files checked")); } diff --git a/src/gain.rs b/src/gain.rs index dfba7e08..bafdc001 100644 --- a/src/gain.rs +++ b/src/gain.rs @@ -109,7 +109,7 @@ pub fn run( hook_check::HookStatus::Missing => { eprintln!( "{}", - "⚠️ No hook installed — run `rtk init -g` for automatic token savings" + "[warn] No hook installed — run `rtk init -g` for automatic token savings" .yellow() ); eprintln!(); @@ -117,7 +117,7 @@ pub fn run( hook_check::HookStatus::Outdated => { eprintln!( "{}", - "⚠️ Hook outdated — run `rtk init -g` to update".yellow() + "[warn] Hook outdated — run `rtk init -g` to update".yellow() ); eprintln!(); } @@ -659,7 +659,7 @@ fn check_rtk_disabled_bypass() -> Option { let pct = (bypassed as f64 / total_bash as f64) * 100.0; if pct > 10.0 { Some(format!( - "⚠️ {} commands ({:.0}%) used RTK_DISABLED=1 unnecessarily — run `rtk discover` for details", + "[warn] {} commands ({:.0}%) used RTK_DISABLED=1 unnecessarily — run `rtk discover` for details", bypassed, pct )) } else { diff --git a/src/gh_cmd.rs b/src/gh_cmd.rs index cf17fabd..9073c7e0 100644 --- a/src/gh_cmd.rs +++ b/src/gh_cmd.rs @@ -235,8 +235,8 @@ fn list_prs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { filtered.push_str("PRs\n"); println!("PRs"); } else { - filtered.push_str("📋 Pull Requests\n"); - println!("📋 Pull Requests"); + filtered.push_str("Pull Requests\n"); + println!("Pull Requests"); } for pr in prs.iter().take(20) { @@ -254,10 +254,10 @@ fn list_prs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { } } else { match state { - "OPEN" => "🟢", - "MERGED" => "🟣", - "CLOSED" => "🔴", - _ => "⚪", + "OPEN" => "[open]", + "MERGED" => "[merged]", + "CLOSED" => "[closed]", + _ => "[unknown]", } }; @@ -352,10 +352,10 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { } } else { match state { - "OPEN" => "🟢", - "MERGED" => "🟣", - "CLOSED" => "🔴", - _ => "⚪", + "OPEN" => "[open]", + "MERGED" => "[merged]", + "CLOSED" => "[closed]", + _ => "[unknown]", } }; @@ -368,8 +368,8 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { print!("{}", line); let mergeable_str = match mergeable { - "MERGEABLE" => "✓", - "CONFLICTING" => "✗", + "MERGEABLE" => "[ok]", + "CONFLICTING" => "[x]", _ => "?", }; let line = format!(" {} | {}\n", state, mergeable_str); @@ -417,11 +417,11 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { if ultra_compact { if failed > 0 { - let line = format!(" ✗{}/{} {} fail\n", passed, total, failed); + let line = format!(" [x]{}/{} {} fail\n", passed, total, failed); filtered.push_str(&line); print!("{}", line); } else { - let line = format!(" ✓{}/{}\n", passed, total); + let line = format!(" {}/{}\n", passed, total); filtered.push_str(&line); print!("{}", line); } @@ -430,7 +430,7 @@ fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { filtered.push_str(&line); print!("{}", line); if failed > 0 { - let line = format!(" ⚠️ {} checks failed\n", failed); + let line = format!(" [warn] {} checks failed\n", failed); filtered.push_str(&line); print!("{}", line); } @@ -504,9 +504,9 @@ fn pr_checks(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()> let mut failed_checks = Vec::new(); for line in stdout.lines() { - if line.contains('✓') || line.contains("pass") { + if line.contains("[ok]") || line.contains("pass") { passed += 1; - } else if line.contains('✗') || line.contains("fail") { + } else if line.contains("[x]") || line.contains("fail") { failed += 1; failed_checks.push(line.trim().to_string()); } else if line.contains('*') || line.contains("pending") { @@ -516,20 +516,20 @@ fn pr_checks(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()> let mut filtered = String::new(); - let line = "🔍 CI Checks Summary:\n"; + let line = "CI Checks Summary:\n"; filtered.push_str(line); print!("{}", line); - let line = format!(" ✅ Passed: {}\n", passed); + let line = format!(" [ok] Passed: {}\n", passed); filtered.push_str(&line); print!("{}", line); - let line = format!(" ❌ Failed: {}\n", failed); + let line = format!(" [FAIL] Failed: {}\n", failed); filtered.push_str(&line); print!("{}", line); if pending > 0 { - let line = format!(" ⏳ Pending: {}\n", pending); + let line = format!(" [pending] Pending: {}\n", pending); filtered.push_str(&line); print!("{}", line); } @@ -581,7 +581,7 @@ fn pr_status(_verbose: u8, _ultra_compact: bool) -> Result<()> { let mut filtered = String::new(); if let Some(created_by) = json["createdBy"].as_array() { - let line = format!("📝 Your PRs ({}):\n", created_by.len()); + let line = format!("Your PRs ({}):\n", created_by.len()); filtered.push_str(&line); print!("{}", line); for pr in created_by.iter().take(5) { @@ -636,13 +636,8 @@ fn list_issues(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> let mut filtered = String::new(); if let Some(issues) = json.as_array() { - if ultra_compact { - filtered.push_str("Issues\n"); - println!("Issues"); - } else { - filtered.push_str("🐛 Issues\n"); - println!("🐛 Issues"); - } + filtered.push_str("Issues\n"); + println!("Issues"); for issue in issues.iter().take(20) { let number = issue["number"].as_i64().unwrap_or(0); let title = issue["title"].as_str().unwrap_or("???"); @@ -656,9 +651,9 @@ fn list_issues(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> } } else { if state == "OPEN" { - "🟢" + "[open]" } else { - "🔴" + "[closed]" } }; let line = format!(" {} #{} {}\n", icon, number, truncate(title, 60)); @@ -721,7 +716,11 @@ fn view_issue(args: &[String], _verbose: u8) -> Result<()> { let author = json["author"]["login"].as_str().unwrap_or("???"); let url = json["url"].as_str().unwrap_or(""); - let icon = if state == "OPEN" { "🟢" } else { "🔴" }; + let icon = if state == "OPEN" { + "[open]" + } else { + "[closed]" + }; let mut filtered = String::new(); @@ -814,8 +813,8 @@ fn list_runs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { filtered.push_str("Runs\n"); println!("Runs"); } else { - filtered.push_str("🏃 Workflow Runs\n"); - println!("🏃 Workflow Runs"); + filtered.push_str("Workflow Runs\n"); + println!("Workflow Runs"); } for run in runs { let id = run["databaseId"].as_i64().unwrap_or(0); @@ -825,8 +824,8 @@ fn list_runs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { let icon = if ultra_compact { match conclusion { - "success" => "✓", - "failure" => "✗", + "success" => "[ok]", + "failure" => "[x]", "cancelled" => "X", _ => { if status == "in_progress" { @@ -838,14 +837,14 @@ fn list_runs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { } } else { match conclusion { - "success" => "✅", - "failure" => "❌", - "cancelled" => "🚫", + "success" => "[ok]", + "failure" => "[FAIL]", + "cancelled" => "[X]", _ => { if status == "in_progress" { - "⏳" + "[time]" } else { - "⚪" + "[pending]" } } } @@ -910,7 +909,7 @@ fn view_run(args: &[String], _verbose: u8) -> Result<()> { let mut filtered = String::new(); - let line = format!("🏃 Workflow Run #{}\n", run_id); + let line = format!("Workflow Run #{}\n", run_id); filtered.push_str(&line); print!("{}", line); @@ -924,8 +923,8 @@ fn view_run(args: &[String], _verbose: u8) -> Result<()> { // Skip successful jobs in compact mode continue; } - if line.contains('✗') || line.contains("fail") { - let formatted = format!(" ❌ {}\n", line.trim()); + if line.contains("[x]") || line.contains("fail") { + let formatted = format!(" [FAIL] {}\n", line.trim()); filtered.push_str(&formatted); print!("{}", formatted); } @@ -992,15 +991,11 @@ fn run_repo(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()> { let forks = json["forkCount"].as_i64().unwrap_or(0); let private = json["isPrivate"].as_bool().unwrap_or(false); - let visibility = if private { - "🔒 Private" - } else { - "🌐 Public" - }; + let visibility = if private { "[private]" } else { "[public]" }; let mut filtered = String::new(); - let line = format!("📦 {}/{}\n", owner, name); + let line = format!("{}/{}\n", owner, name); filtered.push_str(&line); print!("{}", line); @@ -1014,7 +1009,7 @@ fn run_repo(args: &[String], _verbose: u8, _ultra_compact: bool) -> Result<()> { print!("{}", line); } - let line = format!(" ⭐ {} stars | 🔱 {} forks\n", stars, forks); + let line = format!(" {} stars | {} forks\n", stars, forks); filtered.push_str(&line); print!("{}", line); diff --git a/src/git.rs b/src/git.rs index 0f4d137a..3d49fdd6 100644 --- a/src/git.rs +++ b/src/git.rs @@ -573,7 +573,7 @@ fn format_status_output(porcelain: &str) -> String { if let Some(branch_line) = lines.first() { if branch_line.starts_with("##") { let branch = branch_line.trim_start_matches("## "); - output.push_str(&format!("branch: {}\n", branch)); + output.push_str(&format!("* {}\n", branch)); } } @@ -623,7 +623,7 @@ fn format_status_output(porcelain: &str) -> String { let max_untracked = limits.status_max_untracked; if staged > 0 { - output.push_str(&format!("staged: {} files\n", staged)); + output.push_str(&format!("+ Staged: {} files\n", staged)); for f in staged_files.iter().take(max_files) { output.push_str(&format!(" {}\n", f)); } @@ -636,7 +636,7 @@ fn format_status_output(porcelain: &str) -> String { } if modified > 0 { - output.push_str(&format!("modified: {} files\n", modified)); + output.push_str(&format!("~ Modified: {} files\n", modified)); for f in modified_files.iter().take(max_files) { output.push_str(&format!(" {}\n", f)); } @@ -649,7 +649,7 @@ fn format_status_output(porcelain: &str) -> String { } if untracked > 0 { - output.push_str(&format!("untracked: {} files\n", untracked)); + output.push_str(&format!("? Untracked: {} files\n", untracked)); for f in untracked_files.iter().take(max_untracked) { output.push_str(&format!(" {}\n", f)); } @@ -704,7 +704,7 @@ fn filter_status_with_args(output: &str) -> String { } if result.is_empty() { - "ok ✓".to_string() + "ok".to_string() } else { result.join("\n") } @@ -830,9 +830,9 @@ fn run_add(args: &[String], verbose: u8, global_args: &[String]) -> Result<()> { // Parse "1 file changed, 5 insertions(+)" format let short = stat.lines().last().unwrap_or("").trim(); if short.is_empty() { - "ok ✓".to_string() + "ok".to_string() } else { - format!("ok ✓ {}", short) + format!("ok {}", short) } }; @@ -893,15 +893,15 @@ fn run_commit(args: &[String], verbose: u8, global_args: &[String]) -> Result<() if let Some(hash_start) = line.find(' ') { let hash = line[1..hash_start].split(' ').next_back().unwrap_or(""); if !hash.is_empty() && hash.len() >= 7 { - format!("ok ✓ {}", &hash[..7.min(hash.len())]) + format!("ok {}", &hash[..7.min(hash.len())]) } else { - "ok ✓".to_string() + "ok".to_string() } } else { - "ok ✓".to_string() + "ok".to_string() } } else { - "ok ✓".to_string() + "ok".to_string() }; println!("{}", compact); @@ -959,7 +959,7 @@ fn run_push(args: &[String], verbose: u8, global_args: &[String]) -> Result<()> if line.contains("->") { let parts: Vec<&str> = line.split_whitespace().collect(); if parts.len() >= 3 { - result = format!("ok ✓ {}", parts[parts.len() - 1]); + result = format!("ok {}", parts[parts.len() - 1]); break; } } @@ -967,7 +967,7 @@ fn run_push(args: &[String], verbose: u8, global_args: &[String]) -> Result<()> if !result.is_empty() { result } else { - "ok ✓".to_string() + "ok".to_string() } }; @@ -1051,9 +1051,9 @@ fn run_pull(args: &[String], verbose: u8, global_args: &[String]) -> Result<()> } if files > 0 { - format!("ok ✓ {} files +{} -{}", files, insertions, deletions) + format!("ok {} files +{} -{}", files, insertions, deletions) } else { - "ok ✓".to_string() + "ok".to_string() } }; @@ -1171,7 +1171,7 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<() let combined = format!("{}{}", stdout, stderr); let msg = if output.status.success() { - "ok ✓" + "ok" } else { &combined }; @@ -1184,7 +1184,7 @@ fn run_branch(args: &[String], verbose: u8, global_args: &[String]) -> Result<() ); if output.status.success() { - println!("ok ✓"); + println!("ok"); } else { eprintln!("FAILED: git branch {}", args.join(" ")); if !stderr.trim().is_empty() { @@ -1548,7 +1548,7 @@ fn run_worktree(args: &[String], verbose: u8, global_args: &[String]) -> Result< let combined = format!("{}{}", stdout, stderr); let msg = if output.status.success() { - "ok ✓" + "ok" } else { &combined }; @@ -1561,7 +1561,7 @@ fn run_worktree(args: &[String], verbose: u8, global_args: &[String]) -> Result< ); if output.status.success() { - println!("ok ✓"); + println!("ok"); } else { eprintln!("FAILED: git worktree {}", args.join(" ")); if !stderr.trim().is_empty() { @@ -1808,8 +1808,8 @@ mod tests { fn test_format_status_output_modified_files() { let porcelain = "## main...origin/main\n M src/main.rs\n M src/lib.rs\n"; let result = format_status_output(porcelain); - assert!(result.contains("branch: main...origin/main")); - assert!(result.contains("modified: 2 files")); + assert!(result.contains("* main...origin/main")); + assert!(result.contains("~ Modified: 2 files")); assert!(result.contains("src/main.rs")); assert!(result.contains("src/lib.rs")); assert!(!result.contains("Staged")); @@ -1820,8 +1820,8 @@ mod tests { fn test_format_status_output_untracked_files() { let porcelain = "## feature/new\n?? temp.txt\n?? debug.log\n?? test.sh\n"; let result = format_status_output(porcelain); - assert!(result.contains("branch: feature/new")); - assert!(result.contains("untracked: 3 files")); + assert!(result.contains("* feature/new")); + assert!(result.contains("? Untracked: 3 files")); assert!(result.contains("temp.txt")); assert!(result.contains("debug.log")); assert!(result.contains("test.sh")); @@ -1837,13 +1837,13 @@ A added.rs ?? untracked.txt "#; let result = format_status_output(porcelain); - assert!(result.contains("branch: main")); - assert!(result.contains("staged: 2 files")); + assert!(result.contains("* main")); + assert!(result.contains("+ Staged: 2 files")); assert!(result.contains("staged.rs")); assert!(result.contains("added.rs")); - assert!(result.contains("modified: 1 files")); + assert!(result.contains("~ Modified: 1 files")); assert!(result.contains("modified.rs")); - assert!(result.contains("untracked: 1 files")); + assert!(result.contains("? Untracked: 1 files")); assert!(result.contains("untracked.txt")); } @@ -1855,7 +1855,7 @@ A added.rs porcelain.push_str(&format!("M file{}.rs\n", i)); } let result = format_status_output(&porcelain); - assert!(result.contains("staged: 20 files")); + assert!(result.contains("+ Staged: 20 files")); assert!(result.contains("file1.rs")); assert!(result.contains("file15.rs")); assert!(result.contains("... +5 more")); @@ -1871,7 +1871,7 @@ A added.rs porcelain.push_str(&format!(" M file{}.rs\n", i)); } let result = format_status_output(&porcelain); - assert!(result.contains("modified: 20 files")); + assert!(result.contains("~ Modified: 20 files")); assert!(result.contains("file1.rs")); assert!(result.contains("file15.rs")); assert!(result.contains("... +5 more")); @@ -1886,7 +1886,7 @@ A added.rs porcelain.push_str(&format!("?? file{}.rs\n", i)); } let result = format_status_output(&porcelain); - assert!(result.contains("untracked: 15 files")); + assert!(result.contains("? Untracked: 15 files")); assert!(result.contains("file1.rs")); assert!(result.contains("file10.rs")); assert!(result.contains("... +5 more")); @@ -2100,7 +2100,7 @@ no changes added to commit (use "git add" and/or "git commit -a") let porcelain = "## main\n M สวัสดี.txt\n?? ทดสอบ.rs\n"; let result = format_status_output(porcelain); // Should not panic - assert!(result.contains("branch: main")); + assert!(result.contains("* main")); assert!(result.contains("สวัสดี.txt")); assert!(result.contains("ทดสอบ.rs")); } @@ -2109,7 +2109,7 @@ no changes added to commit (use "git add" and/or "git commit -a") fn test_format_status_output_emoji_filename() { let porcelain = "## main\nA 🎉-party.txt\n M 日本語ファイル.rs\n"; let result = format_status_output(porcelain); - assert!(result.contains("branch: main")); + assert!(result.contains("* main")); } /// Regression test: --oneline and other user format flags must preserve all commits. diff --git a/src/go_cmd.rs b/src/go_cmd.rs index 06ee8b54..d250c427 100644 --- a/src/go_cmd.rs +++ b/src/go_cmd.rs @@ -348,7 +348,7 @@ fn filter_go_test_json(output: &str) -> String { if !has_failures { return format!( - "✓ Go test: {} passed in {} packages", + "Go test: {} passed in {} packages", total_pass, total_packages ); } @@ -372,7 +372,7 @@ fn filter_go_test_json(output: &str) -> String { } result.push_str(&format!( - "\n📦 {} [build failed]\n", + "\n{} [build failed]\n", compact_package_name(package) )); @@ -392,14 +392,14 @@ fn filter_go_test_json(output: &str) -> String { } result.push_str(&format!( - "\n📦 {} ({} passed, {} failed)\n", + "\n{} ({} passed, {} failed)\n", compact_package_name(package), pkg_result.pass, pkg_result.fail )); for (test, outputs) in &pkg_result.failed_tests { - result.push_str(&format!(" ❌ {}\n", test)); + result.push_str(&format!(" [FAIL] {}\n", test)); // Show failure output (limit to key lines) let relevant_lines: Vec<&String> = outputs @@ -452,7 +452,7 @@ fn filter_go_build(output: &str) -> String { } if errors.is_empty() { - return "✓ Go build: Success".to_string(); + return "Go build: Success".to_string(); } let mut result = String::new(); @@ -484,7 +484,7 @@ fn filter_go_vet(output: &str) -> String { } if issues.is_empty() { - return "✓ Go vet: No issues found".to_string(); + return "Go vet: No issues found".to_string(); } let mut result = String::new(); @@ -524,7 +524,7 @@ mod tests { {"Time":"2024-01-01T10:00:02Z","Action":"pass","Package":"example.com/foo","Elapsed":0.5}"#; let result = filter_go_test_json(output); - assert!(result.contains("✓ Go test")); + assert!(result.contains("Go test")); assert!(result.contains("1 passed")); assert!(result.contains("1 packages")); } @@ -547,7 +547,7 @@ mod tests { fn test_filter_go_build_success() { let output = ""; let result = filter_go_build(output); - assert!(result.contains("✓ Go build")); + assert!(result.contains("Go build")); assert!(result.contains("Success")); } @@ -567,7 +567,7 @@ main.go:15:2: cannot use x (type int) as type string"#; fn test_filter_go_vet_no_issues() { let output = ""; let result = filter_go_vet(output); - assert!(result.contains("✓ Go vet")); + assert!(result.contains("Go vet")); assert!(result.contains("No issues found")); } diff --git a/src/golangci_cmd.rs b/src/golangci_cmd.rs index ab0f74f3..f6a3166c 100644 --- a/src/golangci_cmd.rs +++ b/src/golangci_cmd.rs @@ -118,7 +118,7 @@ fn filter_golangci_json(output: &str) -> String { let issues = golangci_output.issues; if issues.is_empty() { - return "✓ golangci-lint: No issues found".to_string(); + return "golangci-lint: No issues found".to_string(); } let total_issues = issues.len(); @@ -215,7 +215,7 @@ mod tests { fn test_filter_golangci_no_issues() { let output = r#"{"Issues":[]}"#; let result = filter_golangci_json(output); - assert!(result.contains("✓ golangci-lint")); + assert!(result.contains("golangci-lint")); assert!(result.contains("No issues found")); } diff --git a/src/grep_cmd.rs b/src/grep_cmd.rs index 50ee4ad6..c1819dde 100644 --- a/src/grep_cmd.rs +++ b/src/grep_cmd.rs @@ -62,7 +62,7 @@ pub fn run( eprintln!("{}", stderr.trim()); } } - let msg = format!("🔍 0 for '{}'", pattern); + let msg = format!("0 matches for '{}'", pattern); println!("{}", msg); timer.track( &format!("grep -rn '{}' {}", pattern, path), @@ -105,7 +105,7 @@ pub fn run( } let mut rtk_output = String::new(); - rtk_output.push_str(&format!("🔍 {} in {}F:\n\n", total, by_file.len())); + rtk_output.push_str(&format!("{} matches in {}F:\n\n", total, by_file.len())); let mut shown = 0; let mut files: Vec<_> = by_file.iter().collect(); @@ -117,7 +117,7 @@ pub fn run( } let file_display = compact_path(file); - rtk_output.push_str(&format!("📄 {} ({}):\n", file_display, matches.len())); + rtk_output.push_str(&format!("[file] {} ({}):\n", file_display, matches.len())); let per_file = config::limits().grep_max_per_file; for (line_num, content) in matches.iter().take(per_file) { diff --git a/src/init.rs b/src/init.rs index e50b79f5..565d1719 100644 --- a/src/init.rs +++ b/src/init.rs @@ -722,7 +722,7 @@ fn run_default_mode( _verbose: u8, _install_opencode: bool, ) -> Result<()> { - eprintln!("⚠️ Hook-based mode requires Unix (macOS/Linux)."); + eprintln!("[warn] Hook-based mode requires Unix (macOS/Linux)."); eprintln!(" Windows: use --claude-md mode for full injection."); eprintln!(" Falling back to --claude-md mode."); run_claude_md_mode(_global, _verbose, _install_opencode) @@ -779,7 +779,7 @@ fn run_default_mode( println!(" CLAUDE.md: @RTK.md reference added"); if migrated { - println!("\n ✅ Migrated: removed 137-line RTK block from CLAUDE.md"); + println!("\n [ok] Migrated: removed 137-line RTK block from CLAUDE.md"); println!(" replaced with @RTK.md (10 lines)"); } @@ -880,7 +880,7 @@ fn run_hook_only_mode( install_opencode: bool, ) -> Result<()> { if !global { - eprintln!("⚠️ Warning: --hook-only only makes sense with --global"); + eprintln!("[warn] Warning: --hook-only only makes sense with --global"); eprintln!(" For local projects, use default mode or --claude-md"); return Ok(()); } @@ -963,22 +963,22 @@ fn run_claude_md_mode(global: bool, verbose: u8, install_opencode: bool) -> Resu match action { RtkBlockUpsert::Added => { fs::write(&path, new_content)?; - println!("✅ Added rtk instructions to existing {}", path.display()); + println!("[ok] Added rtk instructions to existing {}", path.display()); } RtkBlockUpsert::Updated => { fs::write(&path, new_content)?; - println!("✅ Updated rtk instructions in {}", path.display()); + println!("[ok] Updated rtk instructions in {}", path.display()); } RtkBlockUpsert::Unchanged => { println!( - "✅ {} already contains up-to-date rtk instructions", + "[ok] {} already contains up-to-date rtk instructions", path.display() ); return Ok(()); } RtkBlockUpsert::Malformed => { eprintln!( - "⚠️ Warning: Found '\nold\n\n", + ) + .unwrap(); + + let added = patch_agents_md(&agents_md, 0).unwrap(); + + assert!(added); + let content = fs::read_to_string(&agents_md).unwrap(); + assert!(!content.contains("old")); + assert_eq!(content.matches("@RTK.md").count(), 1); + } + + #[test] + fn test_uninstall_codex_at_is_idempotent() { + let temp = TempDir::new().unwrap(); + let codex_dir = temp.path(); + let agents_md = codex_dir.join("AGENTS.md"); + let rtk_md = codex_dir.join("RTK.md"); + + fs::write(&agents_md, "# Team rules\n\n@RTK.md\n").unwrap(); + fs::write(&rtk_md, "codex config").unwrap(); + + let removed_first = uninstall_codex_at(codex_dir, 0).unwrap(); + let removed_second = uninstall_codex_at(codex_dir, 0).unwrap(); + + assert_eq!(removed_first.len(), 2); + assert!(removed_second.is_empty()); + assert!(!rtk_md.exists()); + + let content = fs::read_to_string(&agents_md).unwrap(); + assert!(!content.contains("@RTK.md")); + assert!(content.contains("# Team rules")); + } + #[test] fn test_local_init_unchanged() { // Local init should use claude-md mode diff --git a/src/main.rs b/src/main.rs index 81350dea..1429b1da 100644 --- a/src/main.rs +++ b/src/main.rs @@ -323,9 +323,9 @@ enum Commands { extra_args: Vec, }, - /// Initialize rtk instructions in CLAUDE.md + /// Initialize rtk instructions for assistant CLI usage Init { - /// Add to global ~/.claude/CLAUDE.md instead of local + /// Add to global assistant config directory instead of local project file #[arg(short, long)] global: bool, @@ -357,9 +357,13 @@ enum Commands { #[arg(long = "no-patch", group = "patch")] no_patch: bool, - /// Remove all RTK artifacts (hook, RTK.md, CLAUDE.md reference, settings.json entry) + /// Remove RTK artifacts for the selected assistant mode #[arg(long)] uninstall: bool, + + /// Target Codex CLI (uses AGENTS.md + RTK.md, no Claude hook patching) + #[arg(long)] + codex: bool, }, /// Download with compact output (strips progress bars) @@ -1632,11 +1636,12 @@ fn main() -> Result<()> { auto_patch, no_patch, uninstall, + codex, } => { if show { - init::show_config()?; + init::show_config(codex)?; } else if uninstall { - init::uninstall(global, gemini, cli.verbose)?; + init::uninstall(global, gemini, codex, cli.verbose)?; } else if gemini { let patch_mode = if auto_patch { init::PatchMode::Auto @@ -1663,6 +1668,7 @@ fn main() -> Result<()> { install_opencode, claude_md, hook_only, + codex, patch_mode, cli.verbose, )?; From 0800bbecef3c1744336aaab36f6888066a258c2e Mon Sep 17 00:00:00 2001 From: Jeziel Lopes Date: Wed, 18 Mar 2026 12:21:07 -0300 Subject: [PATCH 05/30] feat(copilot): add Copilot hook support (VS Code + CLI) (#605) Add `rtk hook copilot` command that handles both VS Code Copilot Chat (updatedInput rewrite) and GitHub Copilot CLI (deny-with-suggestion). - Auto-detects format: snake_case (VS Code) vs camelCase (Copilot CLI) - Delegates to `rtk rewrite` (single source of truth) - 14 hook tests (format detection, rewrite gating, output shape) - .github/hooks/rtk-rewrite.json for repo-scoped hook config - .github/copilot-instructions.md for RTK awareness - Test script: hooks/test-copilot-rtk-rewrite.sh Rebased on develop (includes Gemini #573, Codex #377, OpenClaw #358). Original work by @jeziellopes, cleaned up and rebased by maintainer. Signed-off-by: Patrick szymkowiak Co-authored-by: Patrick szymkowiak --- .github/copilot-instructions.md | 127 +++++++++++++ .github/hooks/rtk-rewrite.json | 12 ++ hooks/copilot-rtk-awareness.md | 60 ++++++ hooks/test-copilot-rtk-rewrite.sh | 293 ++++++++++++++++++++++++++++++ src/hook_cmd.rs | 208 ++++++++++++++++++++- src/main.rs | 7 +- 6 files changed, 705 insertions(+), 2 deletions(-) create mode 100644 .github/copilot-instructions.md create mode 100644 .github/hooks/rtk-rewrite.json create mode 100644 hooks/copilot-rtk-awareness.md create mode 100755 hooks/test-copilot-rtk-rewrite.sh diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000..7651df48 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,127 @@ +# Copilot Instructions for rtk + +**rtk (Rust Token Killer)** is a CLI proxy that filters and compresses command outputs before they reach an LLM context, saving 60–90% of tokens. It wraps common tools (`git`, `cargo`, `grep`, `pnpm`, `go`, etc.) and outputs condensed summaries instead of raw output. + +## Using rtk in this session + +**Always prefix commands with `rtk` when running shell commands** — this is the entire point of the project and reduces token consumption for every operation you perform. + +```bash +# Instead of: Use: +git status rtk git status +git log -10 rtk git log -10 +cargo test rtk cargo test +cargo clippy --all-targets rtk cargo clippy --all-targets +grep -r "pattern" src/ rtk grep -r "pattern" src/ +``` + +**rtk meta-commands** (always use these directly, no prefix needed): +```bash +rtk gain # Show token savings analytics for this session +rtk gain --history # Full command history with per-command savings +rtk discover # Scan session history for missed rtk opportunities +rtk proxy # Run a command raw (no filtering) but still track it +``` + +**Verify rtk is installed before starting:** +```bash +rtk --version # Should print: rtk X.Y.Z +rtk gain # Should show a dashboard (not "command not found") +``` + +> ⚠️ **Name collision**: `rtk gain` failing means you have `reachingforthejack/rtk` (Rust Type Kit) installed instead of this project. Run `which rtk` and check the binary source. + +## Build, Test & Lint + +```bash +# Development build +cargo build + +# Run all tests +cargo test + +# Run a single test by name +cargo test test_filter_git_log + +# Run all tests in a module +cargo test git::tests:: + +# Run tests with stdout +cargo test -- --nocapture + +# Pre-commit gate (must all pass before any PR) +cargo fmt --all --check && cargo clippy --all-targets && cargo test + +# Smoke tests (requires installed binary) +bash scripts/test-all.sh +``` + +PRs target the **`develop`** branch, not `main`. All commits require a DCO sign-off (`git commit -s`). + +## Architecture + +``` +main.rs ← Clap Commands enum → specialized module (git.rs, *_cmd.rs, etc.) + ↓ + execute subprocess + ↓ + filter/compress output + ↓ + tracking::TimedExecution → SQLite (~/.local/share/rtk/tracking.db) +``` + +Key modules: +- **`main.rs`** — Clap `Commands` enum routes every subcommand to its module. Each arm calls `tracking::TimedExecution::start()` before running, then `.track(...)` after. +- **`filter.rs`** — Language-aware filtering with `FilterLevel` (`none` / `minimal` / `aggressive`) and `Language` enum. Used by `read` and `smart` commands. +- **`tracking.rs`** — SQLite persistence for token savings, scoped per project path. Powers `rtk gain`. +- **`tee.rs`** — On filter failure, saves raw output to `~/.local/share/rtk/tee/` and prints a one-line hint so the LLM can re-read without re-running the command. +- **`utils.rs`** — Shared helpers: `truncate`, `strip_ansi`, `execute_command`, package-manager auto-detection (pnpm/yarn/npm/npx). + +New commands follow this structure: one file `src/_cmd.rs` with a `pub fn run(...)` entry point, registered in the `Commands` enum in `main.rs`. + +## Key Conventions + +### Error handling +- Use `anyhow::Result` throughout (this is a binary, not a library). +- Always attach context: `operation.context("description")?` — never bare `?` without context. +- No `unwrap()` in production code; `expect("reason")` is acceptable only in tests. +- Every filter must fall back to raw command execution on error — never break the user's workflow. + +### Regex +- Compile once with `lazy_static!`, never inside a function body: + ```rust + lazy_static! { + static ref RE: Regex = Regex::new(r"pattern").unwrap(); + } + ``` + +### Testing +- Unit tests live **inside the module file** in `#[cfg(test)] mod tests { ... }` — not in `tests/`. +- Fixtures are real captured command output in `tests/fixtures/_raw.txt`, loaded with `include_str!("../tests/fixtures/...")`. +- Each test module defines its own local `fn count_tokens(text: &str) -> usize` (word-split approximation) — there is no shared utility for this. +- Token savings assertions use `assert!(savings >= 60.0, ...)`. +- Snapshot tests use `assert_snapshot!()` from the `insta` crate; review with `cargo insta review`. + +### Adding a new command +1. Create `src/_cmd.rs` with `pub fn run(...)`. +2. Add `mod _cmd;` at the top of `main.rs`. +3. Add a variant to the `Commands` enum with `#[arg(trailing_var_arg = true, allow_hyphen_values = true)]` for pass-through flags. +4. Route the variant in the `match` block, wrapping execution with `tracking::TimedExecution`. +5. Write a fixture from real output, then unit tests in the module file. +6. Update `README.md` (command list + savings %) and `CHANGELOG.md`. + +### Exit codes +Preserve the underlying command's exit code. Use `std::process::exit(code)` when the child process exits non-zero. + +### Performance constraints +- Startup must stay under 10ms — no async runtime (no `tokio`/`async-std`). +- No blocking I/O at startup; config is loaded on-demand. +- Binary size target: <5 MB stripped. + +### Branch naming +``` +fix(scope): short-description +feat(scope): short-description +chore(scope): short-description +``` +`scope` is the affected component (e.g. `git`, `filter`, `tracking`). diff --git a/.github/hooks/rtk-rewrite.json b/.github/hooks/rtk-rewrite.json new file mode 100644 index 00000000..c488d434 --- /dev/null +++ b/.github/hooks/rtk-rewrite.json @@ -0,0 +1,12 @@ +{ + "hooks": { + "PreToolUse": [ + { + "type": "command", + "command": "rtk hook", + "cwd": ".", + "timeout": 5 + } + ] + } +} diff --git a/hooks/copilot-rtk-awareness.md b/hooks/copilot-rtk-awareness.md new file mode 100644 index 00000000..185f460c --- /dev/null +++ b/hooks/copilot-rtk-awareness.md @@ -0,0 +1,60 @@ +# RTK — Copilot Integration (VS Code Copilot Chat + Copilot CLI) + +**Usage**: Token-optimized CLI proxy (60-90% savings on dev operations) + +## What's automatic + +The `.github/copilot-instructions.md` file is loaded at session start by both Copilot CLI and VS Code Copilot Chat. +It instructs Copilot to prefix commands with `rtk` automatically. + +The `.github/hooks/rtk-rewrite.json` hook adds a `PreToolUse` safety net via `rtk hook` — +a cross-platform Rust binary that intercepts raw bash tool calls and rewrites them. +No shell scripts, no `jq` dependency, works on Windows natively. + +## Meta commands (always use directly) + +```bash +rtk gain # Token savings dashboard for this session +rtk gain --history # Per-command history with savings % +rtk discover # Scan session history for missed rtk opportunities +rtk proxy # Run raw (no filtering) but still track it +``` + +## Installation verification + +```bash +rtk --version # Should print: rtk X.Y.Z +rtk gain # Should show a dashboard (not "command not found") +which rtk # Verify correct binary path +``` + +> ⚠️ **Name collision**: If `rtk gain` fails, you may have `reachingforthejack/rtk` +> (Rust Type Kit) installed instead. Check `which rtk` and reinstall from rtk-ai/rtk. + +## How the hook works + +`rtk hook` reads `PreToolUse` JSON from stdin, detects the agent format, and responds appropriately: + +**VS Code Copilot Chat** (supports `updatedInput` — transparent rewrite, no denial): +1. Agent runs `git status` → `rtk hook` intercepts via `PreToolUse` +2. `rtk hook` detects VS Code format (`tool_name`/`tool_input` keys) +3. Returns `hookSpecificOutput.updatedInput.command = "rtk git status"` +4. Agent runs the rewritten command silently — no denial, no retry + +**GitHub Copilot CLI** (deny-with-suggestion — CLI ignores `updatedInput` today, see [issue #2013](https://github.com/github/copilot-cli/issues/2013)): +1. Agent runs `git status` → `rtk hook` intercepts via `PreToolUse` +2. `rtk hook` detects Copilot CLI format (`toolName`/`toolArgs` keys) +3. Returns `permissionDecision: deny` with reason: `"Token savings: use 'rtk git status' instead"` +4. Copilot reads the reason and re-runs `rtk git status` + +When Copilot CLI adds `updatedInput` support, only `rtk hook` needs updating — no config changes. + +## Integration comparison + +| Tool | Mechanism | Hook output | File | +|-----------------------|-----------------------------------------|--------------------------|------------------------------------| +| Claude Code | `PreToolUse` hook with `updatedInput` | Transparent rewrite | `hooks/rtk-rewrite.sh` | +| VS Code Copilot Chat | `PreToolUse` hook with `updatedInput` | Transparent rewrite | `.github/hooks/rtk-rewrite.json` | +| GitHub Copilot CLI | `PreToolUse` deny-with-suggestion | Denial + retry | `.github/hooks/rtk-rewrite.json` | +| OpenCode | Plugin `tool.execute.before` | Transparent rewrite | `hooks/opencode-rtk.ts` | +| (any) | Custom instructions | Prompt-level guidance | `.github/copilot-instructions.md` | diff --git a/hooks/test-copilot-rtk-rewrite.sh b/hooks/test-copilot-rtk-rewrite.sh new file mode 100755 index 00000000..f1cca949 --- /dev/null +++ b/hooks/test-copilot-rtk-rewrite.sh @@ -0,0 +1,293 @@ +#!/usr/bin/env bash +# Test suite for rtk hook (cross-platform preToolUse handler). +# Feeds mock preToolUse JSON through `rtk hook` and verifies allow/deny decisions. +# +# Usage: bash hooks/test-copilot-rtk-rewrite.sh +# +# Copilot CLI input format: +# {"toolName":"bash","toolArgs":"{\"command\":\"...\"}"} +# Output on intercept: {"permissionDecision":"deny","permissionDecisionReason":"..."} +# +# VS Code Copilot Chat input format: +# {"tool_name":"Bash","tool_input":{"command":"..."}} +# Output on intercept: {"hookSpecificOutput":{"permissionDecision":"allow","updatedInput":{...}}} +# +# Output on pass-through: empty (exit 0) + +RTK="${RTK:-rtk}" +PASS=0 +FAIL=0 +TOTAL=0 + +# Colors +GREEN='\033[32m' +RED='\033[31m' +DIM='\033[2m' +RESET='\033[0m' + +# Build a Copilot CLI preToolUse input JSON +copilot_bash_input() { + local cmd="$1" + local tool_args + tool_args=$(jq -cn --arg cmd "$cmd" '{"command":$cmd}') + jq -cn --arg ta "$tool_args" '{"toolName":"bash","toolArgs":$ta}' +} + +# Build a VS Code Copilot Chat preToolUse input JSON +vscode_bash_input() { + local cmd="$1" + jq -cn --arg cmd "$cmd" '{"tool_name":"Bash","tool_input":{"command":$cmd}}' +} + +# Build a non-bash tool input +tool_input() { + local tool_name="$1" + jq -cn --arg t "$tool_name" '{"toolName":$t,"toolArgs":"{}"}' +} + +# Assert Copilot CLI: hook denies and reason contains the expected rtk command +test_deny() { + local description="$1" + local input_cmd="$2" + local expected_rtk="$3" + TOTAL=$((TOTAL + 1)) + + local output + output=$(copilot_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true + + local decision reason + decision=$(echo "$output" | jq -r '.permissionDecision // empty' 2>/dev/null) + reason=$(echo "$output" | jq -r '.permissionDecisionReason // empty' 2>/dev/null) + + if [ "$decision" = "deny" ] && echo "$reason" | grep -qF "$expected_rtk"; then + printf " ${GREEN}DENY${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$expected_rtk" + PASS=$((PASS + 1)) + else + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected decision: deny, reason containing: %s\n" "$expected_rtk" + printf " actual decision: %s\n" "$decision" + printf " actual reason: %s\n" "$reason" + FAIL=$((FAIL + 1)) + fi +} + +# Assert VS Code Copilot Chat: hook returns updatedInput (allow) with rewritten command +test_vscode_rewrite() { + local description="$1" + local input_cmd="$2" + local expected_rtk="$3" + TOTAL=$((TOTAL + 1)) + + local output + output=$(vscode_bash_input "$input_cmd" | "$RTK" hook 2>/dev/null) || true + + local decision updated_cmd + decision=$(echo "$output" | jq -r '.hookSpecificOutput.permissionDecision // empty' 2>/dev/null) + updated_cmd=$(echo "$output" | jq -r '.hookSpecificOutput.updatedInput.command // empty' 2>/dev/null) + + if [ "$decision" = "allow" ] && echo "$updated_cmd" | grep -qF "$expected_rtk"; then + printf " ${GREEN}REWRITE${RESET} %s ${DIM}→ %s${RESET}\n" "$description" "$updated_cmd" + PASS=$((PASS + 1)) + else + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected decision: allow, updatedInput containing: %s\n" "$expected_rtk" + printf " actual decision: %s\n" "$decision" + printf " actual updatedInput: %s\n" "$updated_cmd" + FAIL=$((FAIL + 1)) + fi +} + +# Assert the hook emits no output (pass-through) +test_allow() { + local description="$1" + local input="$2" + TOTAL=$((TOTAL + 1)) + + local output + output=$(echo "$input" | "$RTK" hook 2>/dev/null) || true + + if [ -z "$output" ]; then + printf " ${GREEN}PASS${RESET} %s ${DIM}→ (allow)${RESET}\n" "$description" + PASS=$((PASS + 1)) + else + local decision + decision=$(echo "$output" | jq -r '.permissionDecision // .hookSpecificOutput.permissionDecision // empty' 2>/dev/null) + printf " ${RED}FAIL${RESET} %s\n" "$description" + printf " expected: (no output)\n" + printf " actual: permissionDecision=%s\n" "$decision" + FAIL=$((FAIL + 1)) + fi +} + +echo "============================================" +echo " RTK Hook Test Suite (rtk hook)" +echo "============================================" +echo "" + +# ---- SECTION 1: Copilot CLI — commands that should be denied ---- +echo "--- Copilot CLI: intercepted (deny with rtk suggestion) ---" + +test_deny "git status" \ + "git status" \ + "rtk git status" + +test_deny "git log --oneline -10" \ + "git log --oneline -10" \ + "rtk git log" + +test_deny "git diff HEAD" \ + "git diff HEAD" \ + "rtk git diff" + +test_deny "cargo test" \ + "cargo test" \ + "rtk cargo test" + +test_deny "cargo clippy --all-targets" \ + "cargo clippy --all-targets" \ + "rtk cargo clippy" + +test_deny "cargo build" \ + "cargo build" \ + "rtk cargo build" + +test_deny "grep -rn pattern src/" \ + "grep -rn pattern src/" \ + "rtk grep" + +test_deny "gh pr list" \ + "gh pr list" \ + "rtk gh" + +echo "" + +# ---- SECTION 2: VS Code Copilot Chat — commands that should be rewritten via updatedInput ---- +echo "--- VS Code Copilot Chat: intercepted (updatedInput rewrite) ---" + +test_vscode_rewrite "git status" \ + "git status" \ + "rtk git status" + +test_vscode_rewrite "cargo test" \ + "cargo test" \ + "rtk cargo test" + +test_vscode_rewrite "gh pr list" \ + "gh pr list" \ + "rtk gh" + +echo "" + +# ---- SECTION 3: Pass-through cases ---- +echo "--- Pass-through (allow silently) ---" + +test_allow "Copilot CLI: already rtk: rtk git status" \ + "$(copilot_bash_input "rtk git status")" + +test_allow "Copilot CLI: already rtk: rtk cargo test" \ + "$(copilot_bash_input "rtk cargo test")" + +test_allow "Copilot CLI: heredoc" \ + "$(copilot_bash_input "cat <<'EOF' +hello +EOF")" + +test_allow "Copilot CLI: unknown command: htop" \ + "$(copilot_bash_input "htop")" + +test_allow "Copilot CLI: unknown command: echo" \ + "$(copilot_bash_input "echo hello world")" + +test_allow "Copilot CLI: non-bash tool: view" \ + "$(tool_input "view")" + +test_allow "Copilot CLI: non-bash tool: edit" \ + "$(tool_input "edit")" + +test_allow "VS Code: already rtk" \ + "$(vscode_bash_input "rtk git status")" + +test_allow "VS Code: non-bash tool: editFiles" \ + "$(jq -cn '{"tool_name":"editFiles"}')" + +echo "" + +# ---- SECTION 4: Output format assertions ---- +echo "--- Output format ---" + +# Copilot CLI output format +TOTAL=$((TOTAL + 1)) +raw_output=$(copilot_bash_input "git status" | "$RTK" hook 2>/dev/null) + +if echo "$raw_output" | jq . >/dev/null 2>&1; then + printf " ${GREEN}PASS${RESET} Copilot CLI: output is valid JSON\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: output is not valid JSON: %s\n" "$raw_output" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +decision=$(echo "$raw_output" | jq -r '.permissionDecision') +if [ "$decision" = "deny" ]; then + printf " ${GREEN}PASS${RESET} Copilot CLI: permissionDecision == \"deny\"\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: expected \"deny\", got \"%s\"\n" "$decision" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +reason=$(echo "$raw_output" | jq -r '.permissionDecisionReason') +if echo "$reason" | grep -qE '`rtk [^`]+`'; then + printf " ${GREEN}PASS${RESET} Copilot CLI: reason contains backtick-quoted rtk command ${DIM}→ %s${RESET}\n" "$reason" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} Copilot CLI: reason missing backtick-quoted command: %s\n" "$reason" + FAIL=$((FAIL + 1)) +fi + +# VS Code output format +TOTAL=$((TOTAL + 1)) +vscode_output=$(vscode_bash_input "git status" | "$RTK" hook 2>/dev/null) + +if echo "$vscode_output" | jq . >/dev/null 2>&1; then + printf " ${GREEN}PASS${RESET} VS Code: output is valid JSON\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: output is not valid JSON: %s\n" "$vscode_output" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +vscode_decision=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.permissionDecision') +if [ "$vscode_decision" = "allow" ]; then + printf " ${GREEN}PASS${RESET} VS Code: hookSpecificOutput.permissionDecision == \"allow\"\n" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: expected \"allow\", got \"%s\"\n" "$vscode_decision" + FAIL=$((FAIL + 1)) +fi + +TOTAL=$((TOTAL + 1)) +vscode_updated=$(echo "$vscode_output" | jq -r '.hookSpecificOutput.updatedInput.command') +if echo "$vscode_updated" | grep -q "^rtk "; then + printf " ${GREEN}PASS${RESET} VS Code: updatedInput.command starts with rtk ${DIM}→ %s${RESET}\n" "$vscode_updated" + PASS=$((PASS + 1)) +else + printf " ${RED}FAIL${RESET} VS Code: updatedInput.command should start with rtk: %s\n" "$vscode_updated" + FAIL=$((FAIL + 1)) +fi + +echo "" + +# ---- SUMMARY ---- +echo "============================================" +if [ $FAIL -eq 0 ]; then + printf " ${GREEN}ALL $TOTAL TESTS PASSED${RESET}\n" +else + printf " ${RED}$FAIL FAILED${RESET} / $TOTAL total ($PASS passed)\n" +fi +echo "============================================" + +exit $FAIL diff --git a/src/hook_cmd.rs b/src/hook_cmd.rs index b85551f3..29a7365d 100644 --- a/src/hook_cmd.rs +++ b/src/hook_cmd.rs @@ -1,9 +1,144 @@ use anyhow::{Context, Result}; -use serde_json::Value; +use serde_json::{json, Value}; use std::io::{self, Read}; use crate::discover::registry::rewrite_command; +// ── Copilot hook (VS Code + Copilot CLI) ────────────────────── + +/// Format detected from the preToolUse JSON input. +enum HookFormat { + /// VS Code Copilot Chat / Claude Code: `tool_name` + `tool_input.command`, supports `updatedInput`. + VsCode { command: String }, + /// GitHub Copilot CLI: camelCase `toolName` + `toolArgs` (JSON string), deny-with-suggestion only. + CopilotCli { command: String }, + /// Non-bash tool, already uses rtk, or unknown format — pass through silently. + PassThrough, +} + +/// Run the Copilot preToolUse hook. +/// Auto-detects VS Code Copilot Chat vs Copilot CLI format. +pub fn run_copilot() -> Result<()> { + let mut input = String::new(); + io::stdin() + .read_to_string(&mut input) + .context("Failed to read stdin")?; + + let input = input.trim(); + if input.is_empty() { + return Ok(()); + } + + let v: Value = match serde_json::from_str(input) { + Ok(v) => v, + Err(e) => { + eprintln!("[rtk hook] Failed to parse JSON input: {e}"); + return Ok(()); + } + }; + + match detect_format(&v) { + HookFormat::VsCode { command } => handle_vscode(&command), + HookFormat::CopilotCli { command } => handle_copilot_cli(&command), + HookFormat::PassThrough => Ok(()), + } +} + +fn detect_format(v: &Value) -> HookFormat { + // VS Code Copilot Chat / Claude Code: snake_case keys + if let Some(tool_name) = v.get("tool_name").and_then(|t| t.as_str()) { + if matches!(tool_name, "runTerminalCommand" | "Bash" | "bash") { + if let Some(cmd) = v + .pointer("/tool_input/command") + .and_then(|c| c.as_str()) + .filter(|c| !c.is_empty()) + { + return HookFormat::VsCode { + command: cmd.to_string(), + }; + } + } + return HookFormat::PassThrough; + } + + // Copilot CLI: camelCase keys, toolArgs is a JSON-encoded string + if let Some(tool_name) = v.get("toolName").and_then(|t| t.as_str()) { + if tool_name == "bash" { + if let Some(tool_args_str) = v.get("toolArgs").and_then(|t| t.as_str()) { + if let Ok(tool_args) = serde_json::from_str::(tool_args_str) { + if let Some(cmd) = tool_args + .get("command") + .and_then(|c| c.as_str()) + .filter(|c| !c.is_empty()) + { + return HookFormat::CopilotCli { + command: cmd.to_string(), + }; + } + } + } + } + return HookFormat::PassThrough; + } + + HookFormat::PassThrough +} + +fn get_rewritten(cmd: &str) -> Option { + if cmd.contains("<<") { + return None; + } + + let excluded = crate::config::Config::load() + .map(|c| c.hooks.exclude_commands) + .unwrap_or_default(); + + let rewritten = rewrite_command(cmd, &excluded)?; + + if rewritten == cmd { + return None; + } + + Some(rewritten) +} + +fn handle_vscode(cmd: &str) -> Result<()> { + let rewritten = match get_rewritten(cmd) { + Some(r) => r, + None => return Ok(()), + }; + + let output = json!({ + "hookSpecificOutput": { + "hookEventName": "PreToolUse", + "permissionDecision": "allow", + "permissionDecisionReason": "RTK auto-rewrite", + "updatedInput": { "command": rewritten } + } + }); + println!("{output}"); + Ok(()) +} + +fn handle_copilot_cli(cmd: &str) -> Result<()> { + let rewritten = match get_rewritten(cmd) { + Some(r) => r, + None => return Ok(()), + }; + + let output = json!({ + "permissionDecision": "deny", + "permissionDecisionReason": format!( + "Token savings: use `{}` instead (rtk saves 60-90% tokens)", + rewritten + ) + }); + println!("{output}"); + Ok(()) +} + +// ── Gemini hook ─────────────────────────────────────────────── + /// Run the Gemini CLI BeforeTool hook. /// Reads JSON from stdin, rewrites shell commands to rtk equivalents, /// outputs JSON to stdout in Gemini CLI format. @@ -61,6 +196,77 @@ fn print_rewrite(cmd: &str) { mod tests { use super::*; + // --- Copilot format detection --- + + fn vscode_input(tool: &str, cmd: &str) -> Value { + json!({ + "tool_name": tool, + "tool_input": { "command": cmd } + }) + } + + fn copilot_cli_input(cmd: &str) -> Value { + let args = serde_json::to_string(&json!({ "command": cmd })).unwrap(); + json!({ "toolName": "bash", "toolArgs": args }) + } + + #[test] + fn test_detect_vscode_bash() { + assert!(matches!( + detect_format(&vscode_input("Bash", "git status")), + HookFormat::VsCode { .. } + )); + } + + #[test] + fn test_detect_vscode_run_terminal_command() { + assert!(matches!( + detect_format(&vscode_input("runTerminalCommand", "cargo test")), + HookFormat::VsCode { .. } + )); + } + + #[test] + fn test_detect_copilot_cli_bash() { + assert!(matches!( + detect_format(&copilot_cli_input("git status")), + HookFormat::CopilotCli { .. } + )); + } + + #[test] + fn test_detect_non_bash_is_passthrough() { + let v = json!({ "tool_name": "editFiles" }); + assert!(matches!(detect_format(&v), HookFormat::PassThrough)); + } + + #[test] + fn test_detect_unknown_is_passthrough() { + assert!(matches!(detect_format(&json!({})), HookFormat::PassThrough)); + } + + #[test] + fn test_get_rewritten_supported() { + assert!(get_rewritten("git status").is_some()); + } + + #[test] + fn test_get_rewritten_unsupported() { + assert!(get_rewritten("htop").is_none()); + } + + #[test] + fn test_get_rewritten_already_rtk() { + assert!(get_rewritten("rtk git status").is_none()); + } + + #[test] + fn test_get_rewritten_heredoc() { + assert!(get_rewritten("cat <<'EOF'\nhello\nEOF").is_none()); + } + + // --- Gemini format --- + #[test] fn test_print_allow_format() { // Verify the allow JSON format matches Gemini CLI expectations diff --git a/src/main.rs b/src/main.rs index 1429b1da..1279a454 100644 --- a/src/main.rs +++ b/src/main.rs @@ -673,7 +673,7 @@ enum Commands { args: Vec, }, - /// Hook processors for LLM CLI tools (Gemini CLI, etc.) + /// Hook processors for LLM CLI tools (Gemini CLI, Copilot, etc.) Hook { #[command(subcommand)] command: HookCommands, @@ -684,6 +684,8 @@ enum Commands { enum HookCommands { /// Process Gemini CLI BeforeTool hook (reads JSON from stdin) Gemini, + /// Process Copilot preToolUse hook (VS Code + Copilot CLI, reads JSON from stdin) + Copilot, } #[derive(Subcommand)] @@ -2014,6 +2016,9 @@ fn main() -> Result<()> { HookCommands::Gemini => { hook_cmd::run_gemini()?; } + HookCommands::Copilot => { + hook_cmd::run_copilot()?; + } }, Commands::Rewrite { args } => { From c3917e4de2a21f9507abf73b09921a6be36a9aed Mon Sep 17 00:00:00 2001 From: Moisei Rabinovich Date: Wed, 18 Mar 2026 17:39:03 +0200 Subject: [PATCH 06/30] feat: add Cursor Agent support via --agent flag (#595) Add `rtk init -g --agent cursor` to install RTK hooks for Cursor Agent. Cursor's preToolUse hook supports command rewriting via updated_input, functionally identical to Claude Code's PreToolUse. Works with both the Cursor editor and cursor-cli (they share ~/.cursor/hooks.json). Changes: - New `--agent ` flag (claude|cursor) on `rtk init`, extensible for future agents. Default is claude (backward compatible). - Cursor hook script (hooks/cursor-rtk-rewrite.sh) outputs Cursor's JSON format: {permission, updated_input} vs Claude's hookSpecificOutput. - `rtk init --show` reports Cursor hook and hooks.json status. - `rtk init -g --uninstall` removes Cursor artifacts. - `rtk discover` notes that Cursor sessions are tracked via `rtk gain` (Cursor transcripts lack structured tool_use/tool_result blocks). - Unit tests for Cursor hooks.json patching, detection, and removal. Made-with: Cursor Signed-off-by: Patrick szymkowiak Co-authored-by: Moisei <1199723+moisei@users.noreply.github.com> --- hooks/cursor-rtk-rewrite.sh | 54 ++++ src/discover/provider.rs | 6 +- src/discover/report.rs | 8 + src/init.rs | 509 +++++++++++++++++++++++++++++++++--- src/main.rs | 22 +- 5 files changed, 564 insertions(+), 35 deletions(-) create mode 100755 hooks/cursor-rtk-rewrite.sh diff --git a/hooks/cursor-rtk-rewrite.sh b/hooks/cursor-rtk-rewrite.sh new file mode 100755 index 00000000..4b80b260 --- /dev/null +++ b/hooks/cursor-rtk-rewrite.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# rtk-hook-version: 1 +# RTK Cursor Agent hook — rewrites shell commands to use rtk for token savings. +# Works with both Cursor editor and cursor-cli (they share ~/.cursor/hooks.json). +# Cursor preToolUse hook format: receives JSON on stdin, returns JSON on stdout. +# Requires: rtk >= 0.23.0, jq +# +# This is a thin delegating hook: all rewrite logic lives in `rtk rewrite`, +# which is the single source of truth (src/discover/registry.rs). +# To add or change rewrite rules, edit the Rust registry — not this file. + +if ! command -v jq &>/dev/null; then + echo "[rtk] WARNING: jq is not installed. Hook cannot rewrite commands. Install jq: https://jqlang.github.io/jq/download/" >&2 + exit 0 +fi + +if ! command -v rtk &>/dev/null; then + echo "[rtk] WARNING: rtk is not installed or not in PATH. Hook cannot rewrite commands. Install: https://github.com/rtk-ai/rtk#installation" >&2 + exit 0 +fi + +# Version guard: rtk rewrite was added in 0.23.0. +RTK_VERSION=$(rtk --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) +if [ -n "$RTK_VERSION" ]; then + MAJOR=$(echo "$RTK_VERSION" | cut -d. -f1) + MINOR=$(echo "$RTK_VERSION" | cut -d. -f2) + if [ "$MAJOR" -eq 0 ] && [ "$MINOR" -lt 23 ]; then + echo "[rtk] WARNING: rtk $RTK_VERSION is too old (need >= 0.23.0). Upgrade: cargo install rtk" >&2 + exit 0 + fi +fi + +INPUT=$(cat) +CMD=$(echo "$INPUT" | jq -r '.tool_input.command // empty') + +if [ -z "$CMD" ]; then + echo '{}' + exit 0 +fi + +# Delegate all rewrite logic to the Rust binary. +# rtk rewrite exits 1 when there's no rewrite — hook passes through silently. +REWRITTEN=$(rtk rewrite "$CMD" 2>/dev/null) || { echo '{}'; exit 0; } + +# No change — nothing to do. +if [ "$CMD" = "$REWRITTEN" ]; then + echo '{}' + exit 0 +fi + +jq -n --arg cmd "$REWRITTEN" '{ + "permission": "allow", + "updated_input": { "command": $cmd } +}' diff --git a/src/discover/provider.rs b/src/discover/provider.rs index ae0852d2..b4105a9d 100644 --- a/src/discover/provider.rs +++ b/src/discover/provider.rs @@ -22,7 +22,11 @@ pub struct ExtractedCommand { pub sequence_index: usize, } -/// Trait for session providers (Claude Code, future: Cursor, Windsurf). +/// Trait for session providers (Claude Code, OpenCode, etc.). +/// +/// Note: Cursor Agent transcripts use a text-only format without structured +/// tool_use/tool_result blocks, so command extraction is not possible. +/// Use `rtk gain` to track savings for Cursor sessions instead. pub trait SessionProvider { fn discover_sessions( &self, diff --git a/src/discover/report.rs b/src/discover/report.rs index 5d05f150..5b1fe801 100644 --- a/src/discover/report.rs +++ b/src/discover/report.rs @@ -165,6 +165,14 @@ pub fn format_text(report: &DiscoverReport, limit: usize, verbose: bool) -> Stri out.push_str("\n~estimated from tool_result output sizes\n"); + // Cursor note: check if Cursor hooks are installed + if let Some(home) = dirs::home_dir() { + let cursor_hook = home.join(".cursor").join("hooks").join("rtk-rewrite.sh"); + if cursor_hook.exists() { + out.push_str("\nNote: Cursor sessions are tracked via `rtk gain` (discover scans Claude Code only)\n"); + } + } + if verbose && report.parse_errors > 0 { out.push_str(&format!("Parse errors skipped: {}\n", report.parse_errors)); } diff --git a/src/init.rs b/src/init.rs index eaf104f8..d0759aae 100644 --- a/src/init.rs +++ b/src/init.rs @@ -9,6 +9,9 @@ use crate::integrity; // Embedded hook script (guards before set -euo pipefail) const REWRITE_HOOK: &str = include_str!("../hooks/rtk-rewrite.sh"); +// Embedded Cursor hook script (preToolUse format) +const CURSOR_REWRITE_HOOK: &str = include_str!("../hooks/cursor-rtk-rewrite.sh"); + // Embedded OpenCode plugin (auto-rewrite) const OPENCODE_PLUGIN: &str = include_str!("../hooks/opencode-rtk.ts"); @@ -205,46 +208,67 @@ pub fn run( global: bool, install_claude: bool, install_opencode: bool, + install_cursor: bool, claude_md: bool, hook_only: bool, codex: bool, patch_mode: PatchMode, verbose: u8, ) -> Result<()> { - match ( - codex, - install_claude, - install_opencode, - global, - claude_md, - hook_only, - patch_mode, - ) { - (true, _, true, _, _, _, _) => anyhow::bail!("--codex cannot be combined with --opencode"), - (true, _, _, _, true, _, _) => anyhow::bail!("--codex cannot be combined with --claude-md"), - (true, _, _, _, _, true, _) => anyhow::bail!("--codex cannot be combined with --hook-only"), - (true, _, _, _, _, _, PatchMode::Auto) => { - anyhow::bail!("--codex cannot be combined with --auto-patch") + // Validation: Codex mode conflicts + if codex { + if install_opencode { + anyhow::bail!("--codex cannot be combined with --opencode"); } - (true, _, _, _, _, _, PatchMode::Skip) => { - anyhow::bail!("--codex cannot be combined with --no-patch") + if claude_md { + anyhow::bail!("--codex cannot be combined with --claude-md"); } - (true, _, _, _, _, _, PatchMode::Ask) => run_codex_mode(global, verbose), - (false, _, true, false, _, _, _) => { - anyhow::bail!("OpenCode plugin is global-only. Use: rtk init -g --opencode") + if hook_only { + anyhow::bail!("--codex cannot be combined with --hook-only"); } - (false, false, true, _, _, _, _) => run_opencode_only_mode(verbose), - (false, true, opencode, _, true, _, _) => run_claude_md_mode(global, verbose, opencode), - (false, true, opencode, _, false, true, _) => { - run_hook_only_mode(global, patch_mode, verbose, opencode) + if matches!(patch_mode, PatchMode::Auto) { + anyhow::bail!("--codex cannot be combined with --auto-patch"); + } + if matches!(patch_mode, PatchMode::Skip) { + anyhow::bail!("--codex cannot be combined with --no-patch"); + } + return run_codex_mode(global, verbose); + } + + // Validation: Global-only features + if install_opencode && !global { + anyhow::bail!("OpenCode plugin is global-only. Use: rtk init -g --opencode"); + } + + if install_cursor && !global { + anyhow::bail!("Cursor hooks are global-only. Use: rtk init -g --agent cursor"); + } + + // Mode selection (Claude Code / OpenCode) + match (install_claude, install_opencode, claude_md, hook_only) { + (false, true, _, _) => run_opencode_only_mode(verbose)?, + (true, opencode, true, _) => run_claude_md_mode(global, verbose, opencode)?, + (true, opencode, false, true) => { + run_hook_only_mode(global, patch_mode, verbose, opencode)? } - (false, true, opencode, _, false, false, _) => { - run_default_mode(global, patch_mode, verbose, opencode) + (true, opencode, false, false) => { + run_default_mode(global, patch_mode, verbose, opencode)? } - (false, false, false, _, _, _, _) => { - anyhow::bail!("at least one of install_claude or install_opencode must be true") + (false, false, _, _) => { + if !install_cursor { + anyhow::bail!( + "at least one of install_claude or install_opencode must be true" + ) + } } } + + // Cursor hooks (additive, installed alongside Claude Code) + if install_cursor { + install_cursor_hooks(verbose)?; + } + + Ok(()) } /// Prepare hook directory and return paths (hook_dir, hook_path) @@ -480,11 +504,30 @@ fn remove_hook_from_settings(verbose: u8) -> Result { Ok(removed) } -/// Full uninstall for Claude, Gemini, or Codex artifacts. -pub fn uninstall(global: bool, gemini: bool, codex: bool, verbose: u8) -> Result<()> { +/// Full uninstall for Claude, Gemini, Codex, or Cursor artifacts. +pub fn uninstall(global: bool, gemini: bool, codex: bool, cursor: bool, verbose: u8) -> Result<()> { if codex { return uninstall_codex(global, verbose); } + + if cursor { + if !global { + anyhow::bail!("Cursor uninstall only works with --global flag"); + } + let cursor_removed = remove_cursor_hooks(verbose) + .context("Failed to remove Cursor hooks")?; + if !cursor_removed.is_empty() { + println!("RTK uninstalled (Cursor):"); + for item in &cursor_removed { + println!(" - {}", item); + } + println!("\nRestart Cursor to apply changes."); + } else { + println!("RTK Cursor support was not installed (nothing to remove)"); + } + return Ok(()); + } + if !global { anyhow::bail!("Uninstall only works with --global flag. For local projects, manually remove RTK from CLAUDE.md"); } @@ -563,6 +606,10 @@ pub fn uninstall(global: bool, gemini: bool, codex: bool, verbose: u8) -> Result removed.push(format!("OpenCode plugin: {}", path.display())); } + // 6. Remove Cursor hooks + let cursor_removed = remove_cursor_hooks(verbose)?; + removed.extend(cursor_removed); + // Report results if removed.is_empty() { println!("RTK was not installed (nothing to remove)"); @@ -571,7 +618,7 @@ pub fn uninstall(global: bool, gemini: bool, codex: bool, verbose: u8) -> Result for item in removed { println!(" - {}", item); } - println!("\nRestart Claude Code and OpenCode (if used) to apply changes."); + println!("\nRestart Claude Code, OpenCode, and Cursor (if used) to apply changes."); } Ok(()) @@ -1441,6 +1488,215 @@ fn remove_opencode_plugin(verbose: u8) -> Result> { Ok(removed) } + +// ─── Cursor Agent support ───────────────────────────────────────────── + +/// Resolve ~/.cursor directory +fn resolve_cursor_dir() -> Result { + dirs::home_dir() + .map(|h| h.join(".cursor")) + .context("Cannot determine home directory. Is $HOME set?") +} + +/// Install Cursor hooks: hook script + hooks.json +fn install_cursor_hooks(verbose: u8) -> Result<()> { + let cursor_dir = resolve_cursor_dir()?; + let hooks_dir = cursor_dir.join("hooks"); + fs::create_dir_all(&hooks_dir) + .with_context(|| format!("Failed to create Cursor hooks directory: {}", hooks_dir.display()))?; + + // 1. Write hook script + let hook_path = hooks_dir.join("rtk-rewrite.sh"); + let hook_changed = write_if_changed(&hook_path, CURSOR_REWRITE_HOOK, "Cursor hook", verbose)?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + fs::set_permissions(&hook_path, fs::Permissions::from_mode(0o755)) + .with_context(|| format!("Failed to set Cursor hook permissions: {}", hook_path.display()))?; + } + + // 2. Create or patch hooks.json + let hooks_json_path = cursor_dir.join("hooks.json"); + let patched = patch_cursor_hooks_json(&hooks_json_path, verbose)?; + + // Report + let hook_status = if hook_changed { "installed/updated" } else { "already up to date" }; + println!("\nCursor hook {} (global).\n", hook_status); + println!(" Hook: {}", hook_path.display()); + println!(" hooks.json: {}", hooks_json_path.display()); + + if patched { + println!(" hooks.json: RTK preToolUse entry added"); + } else { + println!(" hooks.json: RTK preToolUse entry already present"); + } + + println!(" Cursor reloads hooks.json automatically. Test with: git status\n"); + + Ok(()) +} + +/// Patch ~/.cursor/hooks.json to add RTK preToolUse hook. +/// Returns true if the file was modified. +fn patch_cursor_hooks_json(path: &Path, verbose: u8) -> Result { + let mut root = if path.exists() { + let content = fs::read_to_string(path) + .with_context(|| format!("Failed to read {}", path.display()))?; + if content.trim().is_empty() { + serde_json::json!({ "version": 1 }) + } else { + serde_json::from_str(&content) + .with_context(|| format!("Failed to parse {} as JSON", path.display()))? + } + } else { + serde_json::json!({ "version": 1 }) + }; + + // Check idempotency + if cursor_hook_already_present(&root) { + if verbose > 0 { + eprintln!("Cursor hooks.json: RTK hook already present"); + } + return Ok(false); + } + + // Insert the RTK preToolUse entry + insert_cursor_hook_entry(&mut root); + + // Backup if exists + if path.exists() { + let backup_path = path.with_extension("json.bak"); + fs::copy(path, &backup_path) + .with_context(|| format!("Failed to backup to {}", backup_path.display()))?; + if verbose > 0 { + eprintln!("Backup: {}", backup_path.display()); + } + } + + // Atomic write + let serialized = + serde_json::to_string_pretty(&root).context("Failed to serialize hooks.json")?; + atomic_write(path, &serialized)?; + + Ok(true) +} + +/// Check if RTK preToolUse hook is already present in Cursor hooks.json +fn cursor_hook_already_present(root: &serde_json::Value) -> bool { + let hooks = match root.get("hooks").and_then(|h| h.get("preToolUse")).and_then(|p| p.as_array()) + { + Some(arr) => arr, + None => return false, + }; + + hooks.iter().any(|entry| { + entry + .get("command") + .and_then(|c| c.as_str()) + .map_or(false, |cmd| cmd.contains("rtk-rewrite.sh")) + }) +} + +/// Insert RTK preToolUse entry into Cursor hooks.json +fn insert_cursor_hook_entry(root: &mut serde_json::Value) { + let root_obj = match root.as_object_mut() { + Some(obj) => obj, + None => { + *root = serde_json::json!({ "version": 1 }); + root.as_object_mut() + .expect("Just created object, must succeed") + } + }; + + // Ensure version key + root_obj + .entry("version") + .or_insert(serde_json::json!(1)); + + let hooks = root_obj + .entry("hooks") + .or_insert_with(|| serde_json::json!({})) + .as_object_mut() + .expect("hooks must be an object"); + + let pre_tool_use = hooks + .entry("preToolUse") + .or_insert_with(|| serde_json::json!([])) + .as_array_mut() + .expect("preToolUse must be an array"); + + pre_tool_use.push(serde_json::json!({ + "command": "./hooks/rtk-rewrite.sh", + "matcher": "Shell" + })); +} + +/// Remove Cursor RTK artifacts: hook script + hooks.json entry +fn remove_cursor_hooks(verbose: u8) -> Result> { + let cursor_dir = resolve_cursor_dir()?; + let mut removed = Vec::new(); + + // 1. Remove hook script + let hook_path = cursor_dir.join("hooks").join("rtk-rewrite.sh"); + if hook_path.exists() { + fs::remove_file(&hook_path) + .with_context(|| format!("Failed to remove Cursor hook: {}", hook_path.display()))?; + removed.push(format!("Cursor hook: {}", hook_path.display())); + } + + // 2. Remove RTK entry from hooks.json + let hooks_json_path = cursor_dir.join("hooks.json"); + if hooks_json_path.exists() { + let content = fs::read_to_string(&hooks_json_path) + .with_context(|| format!("Failed to read {}", hooks_json_path.display()))?; + + if !content.trim().is_empty() { + if let Ok(mut root) = serde_json::from_str::(&content) { + if remove_cursor_hook_from_json(&mut root) { + let backup_path = hooks_json_path.with_extension("json.bak"); + fs::copy(&hooks_json_path, &backup_path).ok(); + + let serialized = serde_json::to_string_pretty(&root) + .context("Failed to serialize hooks.json")?; + atomic_write(&hooks_json_path, &serialized)?; + + removed.push("Cursor hooks.json: removed RTK entry".to_string()); + + if verbose > 0 { + eprintln!("Removed RTK hook from Cursor hooks.json"); + } + } + } + } + } + + Ok(removed) +} + +/// Remove RTK preToolUse entry from Cursor hooks.json +/// Returns true if entry was found and removed +fn remove_cursor_hook_from_json(root: &mut serde_json::Value) -> bool { + let pre_tool_use = match root + .get_mut("hooks") + .and_then(|h| h.get_mut("preToolUse")) + .and_then(|p| p.as_array_mut()) + { + Some(arr) => arr, + None => return false, + }; + + let original_len = pre_tool_use.len(); + pre_tool_use.retain(|entry| { + !entry + .get("command") + .and_then(|c| c.as_str()) + .map_or(false, |cmd| cmd.contains("rtk-rewrite.sh")) + }); + + pre_tool_use.len() < original_len +} + /// Show current rtk configuration pub fn show_config(codex: bool) -> Result<()> { if codex { @@ -1599,6 +1855,66 @@ fn show_claude_config() -> Result<()> { println!("[--] OpenCode: config dir not found"); } + // Check Cursor hooks + if let Ok(cursor_dir) = resolve_cursor_dir() { + let cursor_hook = cursor_dir.join("hooks").join("rtk-rewrite.sh"); + let cursor_hooks_json = cursor_dir.join("hooks.json"); + + if cursor_hook.exists() { + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let meta = fs::metadata(&cursor_hook)?; + let is_executable = meta.permissions().mode() & 0o111 != 0; + let content = fs::read_to_string(&cursor_hook)?; + let is_thin = content.contains("rtk rewrite"); + + if !is_executable { + println!( + "⚠️ Cursor hook: {} (NOT executable - run: chmod +x)", + cursor_hook.display() + ); + } else if is_thin { + println!("✅ Cursor hook: {} (thin delegator)", cursor_hook.display()); + } else { + println!( + "⚠️ Cursor hook: {} (outdated — missing rtk rewrite delegation)", + cursor_hook.display() + ); + } + } + + #[cfg(not(unix))] + { + println!("✅ Cursor hook: {} (exists)", cursor_hook.display()); + } + } else { + println!("⚪ Cursor hook: not found"); + } + + if cursor_hooks_json.exists() { + let content = fs::read_to_string(&cursor_hooks_json)?; + if !content.trim().is_empty() { + if let Ok(root) = serde_json::from_str::(&content) { + if cursor_hook_already_present(&root) { + println!("✅ Cursor hooks.json: RTK preToolUse configured"); + } else { + println!("⚠️ Cursor hooks.json: exists but RTK not configured"); + println!(" Run: rtk init -g --agent cursor"); + } + } else { + println!("⚠️ Cursor hooks.json: exists but invalid JSON"); + } + } else { + println!("⚪ Cursor hooks.json: empty"); + } + } else { + println!("⚪ Cursor hooks.json: not found"); + } + } else { + println!("⚪ Cursor: home dir not found"); + } + println!("\nUsage:"); println!(" rtk init # Full injection into local CLAUDE.md"); println!(" rtk init -g # Hook + RTK.md + @RTK.md + settings.json (recommended)"); @@ -1610,6 +1926,7 @@ fn show_claude_config() -> Result<()> { println!(" rtk init --codex # Configure local AGENTS.md + RTK.md"); println!(" rtk init -g --codex # Configure ~/.codex/AGENTS.md + ~/.codex/RTK.md"); println!(" rtk init -g --opencode # OpenCode plugin only"); + println!(" rtk init -g --agent cursor # Install Cursor Agent hooks"); Ok(()) } @@ -2110,7 +2427,7 @@ More notes #[test] fn test_codex_mode_rejects_auto_patch() { - let err = run(false, false, false, false, false, true, PatchMode::Auto, 0).unwrap_err(); + let err = run(false, false, false, false, false, false, true, PatchMode::Auto, 0).unwrap_err(); assert_eq!( err.to_string(), "--codex cannot be combined with --auto-patch" @@ -2119,7 +2436,7 @@ More notes #[test] fn test_codex_mode_rejects_no_patch() { - let err = run(false, false, false, false, false, true, PatchMode::Skip, 0).unwrap_err(); + let err = run(false, false, false, false, false, false, true, PatchMode::Skip, 0).unwrap_err(); assert_eq!( err.to_string(), "--codex cannot be combined with --no-patch" @@ -2429,4 +2746,132 @@ More notes let removed = remove_hook_from_json(&mut json_content); assert!(!removed); } + + // ─── Cursor hooks.json tests ─── + + #[test] + fn test_cursor_hook_already_present_true() { + let json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [{ + "command": "./hooks/rtk-rewrite.sh", + "matcher": "Shell" + }] + } + }); + assert!(cursor_hook_already_present(&json_content)); + } + + #[test] + fn test_cursor_hook_already_present_false_empty() { + let json_content = serde_json::json!({ "version": 1 }); + assert!(!cursor_hook_already_present(&json_content)); + } + + #[test] + fn test_cursor_hook_already_present_false_other_hooks() { + let json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [{ + "command": "./hooks/some-other-hook.sh", + "matcher": "Shell" + }] + } + }); + assert!(!cursor_hook_already_present(&json_content)); + } + + #[test] + fn test_insert_cursor_hook_entry_empty() { + let mut json_content = serde_json::json!({ "version": 1 }); + insert_cursor_hook_entry(&mut json_content); + + let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap(); + assert_eq!(hooks.len(), 1); + assert_eq!(hooks[0]["command"], "./hooks/rtk-rewrite.sh"); + assert_eq!(hooks[0]["matcher"], "Shell"); + assert_eq!(json_content["version"], 1); + } + + #[test] + fn test_insert_cursor_hook_preserves_existing() { + let mut json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [{ + "command": "./hooks/other.sh", + "matcher": "Shell" + }], + "afterFileEdit": [{ + "command": "./hooks/format.sh" + }] + } + }); + + insert_cursor_hook_entry(&mut json_content); + + let pre_tool_use = json_content["hooks"]["preToolUse"].as_array().unwrap(); + assert_eq!(pre_tool_use.len(), 2); + assert_eq!(pre_tool_use[0]["command"], "./hooks/other.sh"); + assert_eq!(pre_tool_use[1]["command"], "./hooks/rtk-rewrite.sh"); + + // afterFileEdit should be preserved + assert!(json_content["hooks"]["afterFileEdit"].is_array()); + } + + #[test] + fn test_remove_cursor_hook_from_json() { + let mut json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [ + { "command": "./hooks/other.sh", "matcher": "Shell" }, + { "command": "./hooks/rtk-rewrite.sh", "matcher": "Shell" } + ] + } + }); + + let removed = remove_cursor_hook_from_json(&mut json_content); + assert!(removed); + + let hooks = json_content["hooks"]["preToolUse"].as_array().unwrap(); + assert_eq!(hooks.len(), 1); + assert_eq!(hooks[0]["command"], "./hooks/other.sh"); + } + + #[test] + fn test_remove_cursor_hook_not_present() { + let mut json_content = serde_json::json!({ + "version": 1, + "hooks": { + "preToolUse": [ + { "command": "./hooks/other.sh", "matcher": "Shell" } + ] + } + }); + + let removed = remove_cursor_hook_from_json(&mut json_content); + assert!(!removed); + } + + #[test] + fn test_cursor_hook_script_has_guards() { + assert!(CURSOR_REWRITE_HOOK.contains("command -v rtk")); + assert!(CURSOR_REWRITE_HOOK.contains("command -v jq")); + let jq_pos = CURSOR_REWRITE_HOOK.find("command -v jq").unwrap(); + let rtk_delegate_pos = CURSOR_REWRITE_HOOK.find("rtk rewrite \"$CMD\"").unwrap(); + assert!( + jq_pos < rtk_delegate_pos, + "Guards must appear before rtk rewrite delegation" + ); + } + + #[test] + fn test_cursor_hook_outputs_cursor_format() { + assert!(CURSOR_REWRITE_HOOK.contains("\"permission\": \"allow\"")); + assert!(CURSOR_REWRITE_HOOK.contains("\"updated_input\"")); + assert!(!CURSOR_REWRITE_HOOK.contains("hookSpecificOutput")); + } } diff --git a/src/main.rs b/src/main.rs index 1279a454..6dcc036c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -67,10 +67,19 @@ mod wget_cmd; use anyhow::{Context, Result}; use clap::error::ErrorKind; -use clap::{Parser, Subcommand}; +use clap::{Parser, Subcommand, ValueEnum}; use std::ffi::OsString; use std::path::{Path, PathBuf}; +/// Target agent for hook installation. +#[derive(Debug, Clone, Copy, PartialEq, ValueEnum)] +pub enum AgentTarget { + /// Claude Code (default) + Claude, + /// Cursor Agent (editor and CLI) + Cursor, +} + #[derive(Parser)] #[command( name = "rtk", @@ -337,6 +346,10 @@ enum Commands { #[arg(long)] gemini: bool, + /// Target agent to install hooks for (default: claude) + #[arg(long, value_enum)] + agent: Option, + /// Show current configuration #[arg(long)] show: bool, @@ -1632,6 +1645,7 @@ fn main() -> Result<()> { global, opencode, gemini, + agent, show, claude_md, hook_only, @@ -1643,7 +1657,8 @@ fn main() -> Result<()> { if show { init::show_config(codex)?; } else if uninstall { - init::uninstall(global, gemini, codex, cli.verbose)?; + let cursor = agent.map_or(false, |a| a == AgentTarget::Cursor); + init::uninstall(global, gemini, codex, cursor, cli.verbose)?; } else if gemini { let patch_mode = if auto_patch { init::PatchMode::Auto @@ -1656,6 +1671,8 @@ fn main() -> Result<()> { } else { let install_opencode = opencode; let install_claude = !opencode; + let install_cursor = + agent.map_or(false, |a| a == AgentTarget::Cursor); let patch_mode = if auto_patch { init::PatchMode::Auto @@ -1668,6 +1685,7 @@ fn main() -> Result<()> { global, install_claude, install_opencode, + install_cursor, claude_md, hook_only, codex, From 86d50698c18279c0c5e448ebeaaa5d6f62836cba Mon Sep 17 00:00:00 2001 From: patrick szymkowiak <52030887+pszymkowiak@users.noreply.github.com> Date: Wed, 18 Mar 2026 18:34:56 +0100 Subject: [PATCH 07/30] feat: add Windsurf support via rtk init --agent windsurf (#695) (#697) Install RTK rules in .windsurfrules (project-scoped) so Cascade prefixes shell commands with rtk for token savings. Windsurf hooks don't support command rewriting (only blocking), so RTK uses the rules-based approach (like Codex with AGENTS.md). Tested: Windsurf Cascade correctly uses rtk git status after install. Signed-off-by: Patrick szymkowiak --- hooks/windsurf-rtk-rules.md | 32 +++++++++ src/init.rs | 127 +++++++++++++++++++++++++++++------- src/main.rs | 7 +- 3 files changed, 142 insertions(+), 24 deletions(-) create mode 100644 hooks/windsurf-rtk-rules.md diff --git a/hooks/windsurf-rtk-rules.md b/hooks/windsurf-rtk-rules.md new file mode 100644 index 00000000..8491f5a7 --- /dev/null +++ b/hooks/windsurf-rtk-rules.md @@ -0,0 +1,32 @@ +# RTK - Rust Token Killer (Windsurf) + +**Usage**: Token-optimized CLI proxy for shell commands. + +## Rule + +Always prefix shell commands with `rtk` to minimize token consumption. + +Examples: + +```bash +rtk git status +rtk cargo test +rtk ls src/ +rtk grep "pattern" src/ +rtk find "*.rs" . +rtk docker ps +rtk gh pr list +``` + +## Meta Commands + +```bash +rtk gain # Show token savings +rtk gain --history # Command history with savings +rtk discover # Find missed RTK opportunities +rtk proxy # Run raw (no filtering, for debugging) +``` + +## Why + +RTK filters and compresses command output before it reaches the LLM context, saving 60-90% tokens on common operations. Always use `rtk ` instead of raw commands. diff --git a/src/init.rs b/src/init.rs index d0759aae..407be9bb 100644 --- a/src/init.rs +++ b/src/init.rs @@ -209,6 +209,7 @@ pub fn run( install_claude: bool, install_opencode: bool, install_cursor: bool, + install_windsurf: bool, claude_md: bool, hook_only: bool, codex: bool, @@ -244,21 +245,24 @@ pub fn run( anyhow::bail!("Cursor hooks are global-only. Use: rtk init -g --agent cursor"); } + if install_windsurf && !global { + anyhow::bail!("Windsurf support is global-only. Use: rtk init -g --agent windsurf"); + } + + // Windsurf-only mode + if install_windsurf { + return run_windsurf_mode(verbose); + } + // Mode selection (Claude Code / OpenCode) match (install_claude, install_opencode, claude_md, hook_only) { (false, true, _, _) => run_opencode_only_mode(verbose)?, (true, opencode, true, _) => run_claude_md_mode(global, verbose, opencode)?, - (true, opencode, false, true) => { - run_hook_only_mode(global, patch_mode, verbose, opencode)? - } - (true, opencode, false, false) => { - run_default_mode(global, patch_mode, verbose, opencode)? - } + (true, opencode, false, true) => run_hook_only_mode(global, patch_mode, verbose, opencode)?, + (true, opencode, false, false) => run_default_mode(global, patch_mode, verbose, opencode)?, (false, false, _, _) => { if !install_cursor { - anyhow::bail!( - "at least one of install_claude or install_opencode must be true" - ) + anyhow::bail!("at least one of install_claude or install_opencode must be true") } } } @@ -514,8 +518,8 @@ pub fn uninstall(global: bool, gemini: bool, codex: bool, cursor: bool, verbose: if !global { anyhow::bail!("Cursor uninstall only works with --global flag"); } - let cursor_removed = remove_cursor_hooks(verbose) - .context("Failed to remove Cursor hooks")?; + let cursor_removed = + remove_cursor_hooks(verbose).context("Failed to remove Cursor hooks")?; if !cursor_removed.is_empty() { println!("RTK uninstalled (Cursor):"); for item in &cursor_removed { @@ -1153,6 +1157,48 @@ fn run_claude_md_mode(global: bool, verbose: u8, install_opencode: bool) -> Resu } /// Codex mode: slim RTK.md + @RTK.md reference in AGENTS.md +// ─── Windsurf support ───────────────────────────────────────── + +/// Embedded Windsurf RTK rules +const WINDSURF_RULES: &str = include_str!("../hooks/windsurf-rtk-rules.md"); + +/// Resolve Windsurf user config directory (~/.codeium/windsurf) +fn resolve_windsurf_dir() -> Result { + dirs::home_dir() + .map(|h| h.join(".codeium").join("windsurf")) + .context("Cannot determine home directory") +} + +fn run_windsurf_mode(verbose: u8) -> Result<()> { + // Windsurf reads .windsurfrules from the project root (workspace-scoped). + // Global rules (~/.codeium/windsurf/memories/global_rules.md) are unreliable. + let rules_path = PathBuf::from(".windsurfrules"); + + let existing = fs::read_to_string(&rules_path).unwrap_or_default(); + if existing.contains("RTK") || existing.contains("rtk") { + println!("\nRTK already configured for Windsurf in this project.\n"); + println!(" Rules: .windsurfrules (already present)"); + } else { + let new_content = if existing.trim().is_empty() { + WINDSURF_RULES.to_string() + } else { + format!("{}\n\n{}", existing.trim(), WINDSURF_RULES) + }; + fs::write(&rules_path, &new_content).context("Failed to write .windsurfrules")?; + + if verbose > 0 { + eprintln!("Wrote .windsurfrules"); + } + + println!("\nRTK configured for Windsurf Cascade.\n"); + println!(" Rules: .windsurfrules (installed)"); + } + println!(" Cascade will now use rtk commands for token savings."); + println!(" Restart Windsurf. Test with: git status\n"); + + Ok(()) +} + fn run_codex_mode(global: bool, verbose: u8) -> Result<()> { let (agents_md_path, rtk_md_path) = if global { let codex_dir = resolve_codex_dir()?; @@ -1502,8 +1548,12 @@ fn resolve_cursor_dir() -> Result { fn install_cursor_hooks(verbose: u8) -> Result<()> { let cursor_dir = resolve_cursor_dir()?; let hooks_dir = cursor_dir.join("hooks"); - fs::create_dir_all(&hooks_dir) - .with_context(|| format!("Failed to create Cursor hooks directory: {}", hooks_dir.display()))?; + fs::create_dir_all(&hooks_dir).with_context(|| { + format!( + "Failed to create Cursor hooks directory: {}", + hooks_dir.display() + ) + })?; // 1. Write hook script let hook_path = hooks_dir.join("rtk-rewrite.sh"); @@ -1512,8 +1562,12 @@ fn install_cursor_hooks(verbose: u8) -> Result<()> { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; - fs::set_permissions(&hook_path, fs::Permissions::from_mode(0o755)) - .with_context(|| format!("Failed to set Cursor hook permissions: {}", hook_path.display()))?; + fs::set_permissions(&hook_path, fs::Permissions::from_mode(0o755)).with_context(|| { + format!( + "Failed to set Cursor hook permissions: {}", + hook_path.display() + ) + })?; } // 2. Create or patch hooks.json @@ -1521,7 +1575,11 @@ fn install_cursor_hooks(verbose: u8) -> Result<()> { let patched = patch_cursor_hooks_json(&hooks_json_path, verbose)?; // Report - let hook_status = if hook_changed { "installed/updated" } else { "already up to date" }; + let hook_status = if hook_changed { + "installed/updated" + } else { + "already up to date" + }; println!("\nCursor hook {} (global).\n", hook_status); println!(" Hook: {}", hook_path.display()); println!(" hooks.json: {}", hooks_json_path.display()); @@ -1584,7 +1642,10 @@ fn patch_cursor_hooks_json(path: &Path, verbose: u8) -> Result { /// Check if RTK preToolUse hook is already present in Cursor hooks.json fn cursor_hook_already_present(root: &serde_json::Value) -> bool { - let hooks = match root.get("hooks").and_then(|h| h.get("preToolUse")).and_then(|p| p.as_array()) + let hooks = match root + .get("hooks") + .and_then(|h| h.get("preToolUse")) + .and_then(|p| p.as_array()) { Some(arr) => arr, None => return false, @@ -1610,9 +1671,7 @@ fn insert_cursor_hook_entry(root: &mut serde_json::Value) { }; // Ensure version key - root_obj - .entry("version") - .or_insert(serde_json::json!(1)); + root_obj.entry("version").or_insert(serde_json::json!(1)); let hooks = root_obj .entry("hooks") @@ -2427,7 +2486,19 @@ More notes #[test] fn test_codex_mode_rejects_auto_patch() { - let err = run(false, false, false, false, false, false, true, PatchMode::Auto, 0).unwrap_err(); + let err = run( + false, + false, + false, + false, + false, + false, + false, + true, + PatchMode::Auto, + 0, + ) + .unwrap_err(); assert_eq!( err.to_string(), "--codex cannot be combined with --auto-patch" @@ -2436,7 +2507,19 @@ More notes #[test] fn test_codex_mode_rejects_no_patch() { - let err = run(false, false, false, false, false, false, true, PatchMode::Skip, 0).unwrap_err(); + let err = run( + false, + false, + false, + false, + false, + false, + false, + true, + PatchMode::Skip, + 0, + ) + .unwrap_err(); assert_eq!( err.to_string(), "--codex cannot be combined with --no-patch" diff --git a/src/main.rs b/src/main.rs index 6dcc036c..d78da66c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -78,6 +78,8 @@ pub enum AgentTarget { Claude, /// Cursor Agent (editor and CLI) Cursor, + /// Windsurf IDE (Cascade) + Windsurf, } #[derive(Parser)] @@ -1671,8 +1673,8 @@ fn main() -> Result<()> { } else { let install_opencode = opencode; let install_claude = !opencode; - let install_cursor = - agent.map_or(false, |a| a == AgentTarget::Cursor); + let install_cursor = agent.map_or(false, |a| a == AgentTarget::Cursor); + let install_windsurf = agent.map_or(false, |a| a == AgentTarget::Windsurf); let patch_mode = if auto_patch { init::PatchMode::Auto @@ -1686,6 +1688,7 @@ fn main() -> Result<()> { install_claude, install_opencode, install_cursor, + install_windsurf, claude_md, hook_only, codex, From d921cc4fa0324843c82523085cca2014b075a12e Mon Sep 17 00:00:00 2001 From: patrick szymkowiak <52030887+pszymkowiak@users.noreply.github.com> Date: Wed, 18 Mar 2026 18:58:43 +0100 Subject: [PATCH 08/30] feat: add Cline/Roo Code support via rtk init --agent cline (#701) (#702) Install RTK rules in .clinerules (project-scoped) so Cline prefixes shell commands with rtk for token savings. Same rules-based approach as Windsurf and Codex. Signed-off-by: Patrick szymkowiak --- hooks/cline-rtk-rules.md | 32 +++++++++++++++++++++++ src/init.rs | 55 ++++++++++++++++++++++++++++++++-------- src/main.rs | 10 +++++--- 3 files changed, 84 insertions(+), 13 deletions(-) create mode 100644 hooks/cline-rtk-rules.md diff --git a/hooks/cline-rtk-rules.md b/hooks/cline-rtk-rules.md new file mode 100644 index 00000000..7d6c8aff --- /dev/null +++ b/hooks/cline-rtk-rules.md @@ -0,0 +1,32 @@ +# RTK - Rust Token Killer (Cline) + +**Usage**: Token-optimized CLI proxy for shell commands. + +## Rule + +Always prefix shell commands with `rtk` to minimize token consumption. + +Examples: + +```bash +rtk git status +rtk cargo test +rtk ls src/ +rtk grep "pattern" src/ +rtk find "*.rs" . +rtk docker ps +rtk gh pr list +``` + +## Meta Commands + +```bash +rtk gain # Show token savings +rtk gain --history # Command history with savings +rtk discover # Find missed RTK opportunities +rtk proxy # Run raw (no filtering, for debugging) +``` + +## Why + +RTK filters and compresses command output before it reaches the LLM context, saving 60-90% tokens on common operations. Always use `rtk ` instead of raw commands. diff --git a/src/init.rs b/src/init.rs index 407be9bb..98be225e 100644 --- a/src/init.rs +++ b/src/init.rs @@ -204,12 +204,14 @@ Overall average: **60-90% token reduction** on common development operations. "##; /// Main entry point for `rtk init` +#[allow(clippy::too_many_arguments)] pub fn run( global: bool, install_claude: bool, install_opencode: bool, install_cursor: bool, install_windsurf: bool, + install_cline: bool, claude_md: bool, hook_only: bool, codex: bool, @@ -254,6 +256,11 @@ pub fn run( return run_windsurf_mode(verbose); } + // Cline-only mode + if install_cline { + return run_cline_mode(verbose); + } + // Mode selection (Claude Code / OpenCode) match (install_claude, install_opencode, claude_md, hook_only) { (false, true, _, _) => run_opencode_only_mode(verbose)?, @@ -1156,17 +1163,43 @@ fn run_claude_md_mode(global: bool, verbose: u8, install_opencode: bool) -> Resu Ok(()) } -/// Codex mode: slim RTK.md + @RTK.md reference in AGENTS.md // ─── Windsurf support ───────────────────────────────────────── /// Embedded Windsurf RTK rules const WINDSURF_RULES: &str = include_str!("../hooks/windsurf-rtk-rules.md"); -/// Resolve Windsurf user config directory (~/.codeium/windsurf) -fn resolve_windsurf_dir() -> Result { - dirs::home_dir() - .map(|h| h.join(".codeium").join("windsurf")) - .context("Cannot determine home directory") +/// Embedded Cline RTK rules +const CLINE_RULES: &str = include_str!("../hooks/cline-rtk-rules.md"); + +// ─── Cline / Roo Code support ───────────────────────────────── + +fn run_cline_mode(verbose: u8) -> Result<()> { + // Cline reads .clinerules from the project root (workspace-scoped) + let rules_path = PathBuf::from(".clinerules"); + + let existing = fs::read_to_string(&rules_path).unwrap_or_default(); + if existing.contains("RTK") || existing.contains("rtk") { + println!("\nRTK already configured for Cline in this project.\n"); + println!(" Rules: .clinerules (already present)"); + } else { + let new_content = if existing.trim().is_empty() { + CLINE_RULES.to_string() + } else { + format!("{}\n\n{}", existing.trim(), CLINE_RULES) + }; + fs::write(&rules_path, &new_content).context("Failed to write .clinerules")?; + + if verbose > 0 { + eprintln!("Wrote .clinerules"); + } + + println!("\nRTK configured for Cline.\n"); + println!(" Rules: .clinerules (installed)"); + } + println!(" Cline will now use rtk commands for token savings."); + println!(" Test with: git status\n"); + + Ok(()) } fn run_windsurf_mode(verbose: u8) -> Result<()> { @@ -1655,7 +1688,7 @@ fn cursor_hook_already_present(root: &serde_json::Value) -> bool { entry .get("command") .and_then(|c| c.as_str()) - .map_or(false, |cmd| cmd.contains("rtk-rewrite.sh")) + .is_some_and(|cmd| cmd.contains("rtk-rewrite.sh")) }) } @@ -1750,7 +1783,7 @@ fn remove_cursor_hook_from_json(root: &mut serde_json::Value) -> bool { !entry .get("command") .and_then(|c| c.as_str()) - .map_or(false, |cmd| cmd.contains("rtk-rewrite.sh")) + .is_some_and(|cmd| cmd.contains("rtk-rewrite.sh")) }); pre_tool_use.len() < original_len @@ -2139,7 +2172,7 @@ fn patch_gemini_settings( if arr.iter().any(|h| { h.pointer("/hooks/0/command") .and_then(|v| v.as_str()) - .map_or(false, |c| c.contains("rtk")) + .is_some_and(|c| c.contains("rtk")) }) { if verbose > 0 { eprintln!("Gemini settings.json already has RTK hook"); @@ -2248,7 +2281,7 @@ fn uninstall_gemini(verbose: u8) -> Result> { arr.retain(|h| { !h.pointer("/hooks/0/command") .and_then(|v| v.as_str()) - .map_or(false, |c| c.contains("rtk")) + .is_some_and(|c| c.contains("rtk")) }); if arr.len() < before { let new_content = serde_json::to_string_pretty(&settings)?; @@ -2494,6 +2527,7 @@ More notes false, false, false, + false, true, PatchMode::Auto, 0, @@ -2515,6 +2549,7 @@ More notes false, false, false, + false, true, PatchMode::Skip, 0, diff --git a/src/main.rs b/src/main.rs index d78da66c..2bbc4bb2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -80,6 +80,8 @@ pub enum AgentTarget { Cursor, /// Windsurf IDE (Cascade) Windsurf, + /// Cline / Roo Code (VS Code) + Cline, } #[derive(Parser)] @@ -1659,7 +1661,7 @@ fn main() -> Result<()> { if show { init::show_config(codex)?; } else if uninstall { - let cursor = agent.map_or(false, |a| a == AgentTarget::Cursor); + let cursor = agent == Some(AgentTarget::Cursor); init::uninstall(global, gemini, codex, cursor, cli.verbose)?; } else if gemini { let patch_mode = if auto_patch { @@ -1673,8 +1675,9 @@ fn main() -> Result<()> { } else { let install_opencode = opencode; let install_claude = !opencode; - let install_cursor = agent.map_or(false, |a| a == AgentTarget::Cursor); - let install_windsurf = agent.map_or(false, |a| a == AgentTarget::Windsurf); + let install_cursor = agent == Some(AgentTarget::Cursor); + let install_windsurf = agent == Some(AgentTarget::Windsurf); + let install_cline = agent == Some(AgentTarget::Cline); let patch_mode = if auto_patch { init::PatchMode::Auto @@ -1689,6 +1692,7 @@ fn main() -> Result<()> { install_opencode, install_cursor, install_windsurf, + install_cline, claude_md, hook_only, codex, From 5e1fc20cb0da68b73bf87c92ac74248a0df0ce30 Mon Sep 17 00:00:00 2001 From: Florian BRUNIAUX Date: Thu, 19 Mar 2026 10:11:36 +0100 Subject: [PATCH 09/30] fix(skill/rtk-triage): increase PR/issue limit to 200 with pagination hint (#717) * fix(skill/rtk-triage): increase PR/issue limit to 200 with pagination hint Raise gh pr list limit from 60 to 200 to match gh's max per call. Add inline comment explaining how to paginate for repos with >200 open PRs. Update threshold warning from >60 to >200 PRs/issues. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: Florian BRUNIAUX * docs(architecture): update module count to 67 (hook_cmd added in #573) hook_cmd.rs was added in feat: add Gemini CLI support (#573) but ARCHITECTURE.md was not updated. Fixes pre-push validation failure. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: Florian BRUNIAUX --------- Signed-off-by: Florian BRUNIAUX Co-authored-by: Claude Sonnet 4.6 --- .claude/skills/rtk-triage/SKILL.md | 8 ++++++-- ARCHITECTURE.md | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.claude/skills/rtk-triage/SKILL.md b/.claude/skills/rtk-triage/SKILL.md index 34b9d0fc..9aed21a2 100644 --- a/.claude/skills/rtk-triage/SKILL.md +++ b/.claude/skills/rtk-triage/SKILL.md @@ -57,9 +57,13 @@ gh api "repos/{owner}/{repo}/collaborators" --jq '.[].login' **PRs** : ```bash -gh pr list --state open --limit 60 \ +# Fetcher toutes les PRs ouvertes — paginer si nécessaire (gh limite à 200 par appel) +gh pr list --state open --limit 200 \ --json number,title,author,createdAt,updatedAt,additions,deletions,changedFiles,isDraft,mergeable,reviewDecision,statusCheckRollup,body +# Si le repo a >200 PRs ouvertes, relancer avec --search pour paginer : +# gh pr list --state open --limit 200 --search "is:pr is:open sort:updated-desc" ... + # Pour chaque PR, récupérer les fichiers modifiés (nécessaire pour overlap detection) # Prioriser les PRs candidates (même domaine, même auteur) gh pr view {num} --json files --jq '[.files[].path] | join(",")' @@ -232,6 +236,6 @@ Croisement issues × PRs. {N} PRs ouvertes, {N} issues ouvertes. - Langue : argument `en`/`fr`. Défaut : `fr`. Les commentaires GitHub restent toujours en anglais. - Ne jamais poster de commentaires GitHub sans validation utilisateur (AskUserQuestion). -- Si >150 issues ou >60 PRs : prévenir l'utilisateur, proposer de filtrer par label ou date. +- Si >200 issues ou >200 PRs : prévenir l'utilisateur et paginer (relancer avec `--search` ou `gh api` avec pagination). - L'analyse croisée (Phase 3) est toujours exécutée — c'est la valeur ajoutée de ce skill. - Le fichier claudedocs est sauvegardé automatiquement sauf si l'utilisateur dit "no save". diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index ee375f25..5e4d2578 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -293,11 +293,11 @@ SHARED utils.rs Helpers N/A ✓ tee.rs Full output recovery N/A ✓ ``` -**Total: 66 modules** (44 command modules + 22 infrastructure modules) +**Total: 67 modules** (45 command modules + 22 infrastructure modules) ### Module Count Breakdown -- **Command Modules**: 44 (directly exposed to users) +- **Command Modules**: 45 (directly exposed to users) - **Infrastructure Modules**: 22 (utils, filter, tracking, tee, config, init, gain, toml_filter, verify_cmd, trust, etc.) - **Git Commands**: 7 operations (status, diff, log, add, commit, push, branch/checkout) - **JS/TS Tooling**: 8 modules (modern frontend/fullstack development) From 3480ce5e4ee588033b295a8d136aee1bef153468 Mon Sep 17 00:00:00 2001 From: Adam Powis Date: Thu, 19 Mar 2026 10:53:25 +0000 Subject: [PATCH 10/30] fix(golangci-lint): add v2 compatibility with runtime version detection golangci-lint v2 removed --out-format=json in favour of --output.json.path stdout. Detect the installed major version at runtime via golangci-lint --version and branch on the correct flag and JSON extraction strategy. - Use --output.json.path stdout for v2, --out-format=json for v1 - Extract JSON from first line only on v2 (v2 appends trailing metadata after JSON line) - Deserialise new v2 fields: SourceLines, Severity, Offset (serde default for v1 compat) - Show first source line per linter-file group on v2 for richer context - Always forward stderr to caller (was silently dropped unless --verbose) - Falls back to v1 behaviour on any version detection failure Signed-off-by: Adam Powis --- src/golangci_cmd.rs | 449 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 425 insertions(+), 24 deletions(-) diff --git a/src/golangci_cmd.rs b/src/golangci_cmd.rs index f6a3166c..b0edb677 100644 --- a/src/golangci_cmd.rs +++ b/src/golangci_cmd.rs @@ -4,6 +4,7 @@ use crate::utils::{resolved_command, truncate}; use anyhow::{Context, Result}; use serde::Deserialize; use std::collections::HashMap; +use std::process::Command; #[derive(Debug, Deserialize)] struct Position { @@ -15,6 +16,9 @@ struct Position { #[serde(rename = "Column")] #[allow(dead_code)] column: usize, + #[serde(rename = "Offset", default)] + #[allow(dead_code)] + offset: usize, } #[derive(Debug, Deserialize)] @@ -26,6 +30,11 @@ struct Issue { text: String, #[serde(rename = "Pos")] pos: Position, + #[serde(rename = "SourceLines", default)] + source_lines: Vec, + #[serde(rename = "Severity", default)] + #[allow(dead_code)] + severity: String, } #[derive(Debug, Deserialize)] @@ -34,18 +43,63 @@ struct GolangciOutput { issues: Vec, } +/// Parse major version number from `golangci-lint --version` output. +/// Returns 1 on any failure (safe fallback — v1 behaviour). +fn parse_major_version(version_output: &str) -> u32 { + // Handles: + // "golangci-lint version 1.59.1" + // "golangci-lint has version 2.10.0 built with ..." + for word in version_output.split_whitespace() { + if let Some(major) = word.split('.').next().and_then(|s| s.parse::().ok()) { + if word.contains('.') { + return major; + } + } + } + 1 +} + +/// Run `golangci-lint --version` and return the major version number. +/// Returns 1 on any failure. +fn detect_major_version() -> u32 { + let output = Command::new("golangci-lint").arg("--version").output(); + + match output { + Ok(o) => { + let stdout = String::from_utf8_lossy(&o.stdout); + let stderr = String::from_utf8_lossy(&o.stderr); + let version_text = if stdout.trim().is_empty() { + &*stderr + } else { + &*stdout + }; + parse_major_version(version_text) + } + Err(_) => 1, + } +} + pub fn run(args: &[String], verbose: u8) -> Result<()> { let timer = tracking::TimedExecution::start(); + let version = detect_major_version(); + let mut cmd = resolved_command("golangci-lint"); - // Force JSON output - let has_format = args - .iter() - .any(|a| a == "--out-format" || a.starts_with("--out-format=")); + // Force JSON output (only if user hasn't specified it) + let has_format = args.iter().any(|a| { + a == "--out-format" + || a.starts_with("--out-format=") + || a == "--output.json.path" + || a.starts_with("--output.json.path=") + }); if !has_format { - cmd.arg("run").arg("--out-format=json"); + if version >= 2 { + cmd.arg("run").arg("--output.json.path").arg("stdout"); + } else { + cmd.arg("run").arg("--out-format=json"); + } } else { cmd.arg("run"); } @@ -55,7 +109,11 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { } if verbose > 0 { - eprintln!("Running: golangci-lint run --out-format=json"); + if version >= 2 { + eprintln!("Running: golangci-lint run --output.json.path stdout"); + } else { + eprintln!("Running: golangci-lint run --out-format=json"); + } } let output = cmd.output().context( @@ -66,12 +124,19 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { let stderr = String::from_utf8_lossy(&output.stderr); let raw = format!("{}\n{}", stdout, stderr); - let filtered = filter_golangci_json(&stdout); + // v2 outputs JSON on first line + trailing text; v1 outputs just JSON + let json_output = if version >= 2 { + stdout.lines().next().unwrap_or("") + } else { + &*stdout + }; + + let filtered = filter_golangci_json(json_output, version); println!("{}", filtered); - // Include stderr if present (config errors, etc.) - if !stderr.trim().is_empty() && verbose > 0 { + // Always forward stderr (config errors, missing linters, etc.) + if !stderr.trim().is_empty() { eprintln!("{}", stderr.trim()); } @@ -87,9 +152,6 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { match output.status.code() { Some(0) | Some(1) => Ok(()), Some(code) => { - if !stderr.trim().is_empty() { - eprintln!("{}", stderr.trim()); - } std::process::exit(code); } None => { @@ -100,13 +162,12 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { } /// Filter golangci-lint JSON output - group by linter and file -fn filter_golangci_json(output: &str) -> String { +fn filter_golangci_json(output: &str, version: u32) -> String { let result: Result = serde_json::from_str(output); let golangci_output = match result { Ok(o) => o, Err(e) => { - // Fallback if JSON parsing fails return format!( "golangci-lint (JSON parse failed: {})\n{}", e, @@ -137,7 +198,7 @@ fn filter_golangci_json(output: &str) -> String { // Group by file let mut by_file: HashMap<&str, usize> = HashMap::new(); for issue in &issues { - *by_file.entry(&issue.pos.filename).or_insert(0) += 1; + *by_file.entry(issue.pos.filename.as_str()).or_insert(0) += 1; } let mut file_counts: Vec<_> = by_file.iter().collect(); @@ -170,16 +231,33 @@ fn filter_golangci_json(output: &str) -> String { result.push_str(&format!(" {} ({} issues)\n", short_path, count)); // Show top 3 linters in this file - let mut file_linters: HashMap = HashMap::new(); - for issue in issues.iter().filter(|i| &i.pos.filename == *file) { - *file_linters.entry(issue.from_linter.clone()).or_insert(0) += 1; + let mut file_linters: HashMap> = HashMap::new(); + for issue in issues.iter().filter(|i| i.pos.filename.as_str() == **file) { + file_linters + .entry(issue.from_linter.clone()) + .or_default() + .push(issue); } let mut file_linter_counts: Vec<_> = file_linters.iter().collect(); - file_linter_counts.sort_by(|a, b| b.1.cmp(a.1)); - - for (linter, count) in file_linter_counts.iter().take(3) { - result.push_str(&format!(" {} ({})\n", linter, count)); + file_linter_counts.sort_by(|a, b| b.1.len().cmp(&a.1.len())); + + for (linter, linter_issues) in file_linter_counts.iter().take(3) { + result.push_str(&format!(" {} ({})\n", linter, linter_issues.len())); + + // v2 only: show first source line for this linter-file group + if version >= 2 { + if let Some(first_issue) = linter_issues.first() { + if let Some(source_line) = first_issue.source_lines.first() { + let trimmed = source_line.trim(); + let display = match trimmed.char_indices().nth(80) { + Some((i, _)) => &trimmed[..i], + None => trimmed, + }; + result.push_str(&format!(" → {}\n", display)); + } + } + } } } @@ -214,7 +292,7 @@ mod tests { #[test] fn test_filter_golangci_no_issues() { let output = r#"{"Issues":[]}"#; - let result = filter_golangci_json(output); + let result = filter_golangci_json(output, 1); assert!(result.contains("golangci-lint")); assert!(result.contains("No issues found")); } @@ -241,7 +319,7 @@ mod tests { ] }"#; - let result = filter_golangci_json(output); + let result = filter_golangci_json(output, 1); assert!(result.contains("3 issues")); assert!(result.contains("2 files")); assert!(result.contains("errcheck")); @@ -266,4 +344,327 @@ mod tests { ); assert_eq!(compact_path("relative/file.go"), "file.go"); } + + #[test] + fn test_parse_version_v1_format() { + assert_eq!(parse_major_version("golangci-lint version 1.59.1"), 1); + } + + #[test] + fn test_parse_version_v2_format() { + assert_eq!( + parse_major_version("golangci-lint has version 2.10.0 built with go1.26.0 from 95dcb68a on 2026-02-17T13:05:51Z"), + 2 + ); + } + + #[test] + fn test_parse_version_empty_returns_1() { + assert_eq!(parse_major_version(""), 1); + } + + #[test] + fn test_parse_version_malformed_returns_1() { + assert_eq!(parse_major_version("not a version string"), 1); + } + + #[test] + fn test_filter_golangci_v2_fields_parse_cleanly() { + // v2 JSON includes Severity, SourceLines, Offset — must not panic + let output = r#"{ + "Issues": [ + { + "FromLinter": "errcheck", + "Text": "Error return value not checked", + "Severity": "error", + "SourceLines": [" if err := foo(); err != nil {"], + "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 1024} + } + ] +}"#; + let result = filter_golangci_json(output, 2); + assert!(result.contains("errcheck")); + assert!(result.contains("main.go")); + } + + #[test] + fn test_filter_v2_shows_source_lines() { + let output = r#"{ + "Issues": [ + { + "FromLinter": "errcheck", + "Text": "Error return value not checked", + "Severity": "error", + "SourceLines": [" if err := foo(); err != nil {"], + "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 0} + } + ] +}"#; + let result = filter_golangci_json(output, 2); + assert!( + result.contains("→"), + "v2 should show source line with → prefix" + ); + assert!(result.contains("if err := foo()")); + } + + #[test] + fn test_filter_v1_does_not_show_source_lines() { + let output = r#"{ + "Issues": [ + { + "FromLinter": "errcheck", + "Text": "Error return value not checked", + "Severity": "error", + "SourceLines": [" if err := foo(); err != nil {"], + "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 0} + } + ] +}"#; + let result = filter_golangci_json(output, 1); + assert!(!result.contains("→"), "v1 should not show source lines"); + } + + #[test] + fn test_filter_v2_empty_source_lines_graceful() { + let output = r#"{ + "Issues": [ + { + "FromLinter": "errcheck", + "Text": "Error return value not checked", + "Severity": "", + "SourceLines": [], + "Pos": {"Filename": "main.go", "Line": 42, "Column": 5, "Offset": 0} + } + ] +}"#; + let result = filter_golangci_json(output, 2); + assert!(result.contains("errcheck")); + assert!( + !result.contains("→"), + "no source line to show, should degrade gracefully" + ); + } + + #[test] + fn test_filter_v2_source_line_truncated_to_80_chars() { + let long_line = "x".repeat(120); + let output = format!( + r#"{{ + "Issues": [ + {{ + "FromLinter": "lll", + "Text": "line too long", + "Severity": "", + "SourceLines": ["{}"], + "Pos": {{"Filename": "main.go", "Line": 1, "Column": 1, "Offset": 0}} + }} + ] +}}"#, + long_line + ); + let result = filter_golangci_json(&output, 2); + // Content truncated at 80 chars; prefix " → " = 10 bytes (6 spaces + 3-byte arrow + space) + // Total line max = 80 + 10 = 90 bytes + for line in result.lines() { + if line.trim_start().starts_with('→') { + assert!(line.len() <= 90, "source line too long: {}", line.len()); + } + } + } + + #[test] + fn test_filter_v2_source_line_truncated_non_ascii() { + // Japanese characters are 3 bytes each; 30 chars = 90 bytes > 80 bytes naive slice would panic + let long_line = "日".repeat(30); // 30 chars, 90 bytes + let output = format!( + r#"{{ + "Issues": [ + {{ + "FromLinter": "lll", + "Text": "line too long", + "Severity": "", + "SourceLines": ["{}"], + "Pos": {{"Filename": "main.go", "Line": 1, "Column": 1, "Offset": 0}} + }} + ] +}}"#, + long_line + ); + // Should not panic and output should be ≤ 80 chars + let result = filter_golangci_json(&output, 2); + for line in result.lines() { + if line.trim_start().starts_with('→') { + let content = line.trim_start().trim_start_matches('→').trim(); + assert!( + content.chars().count() <= 80, + "content chars: {}", + content.chars().count() + ); + } + } + } + + fn count_tokens(text: &str) -> usize { + text.split_whitespace().count() + } + + #[test] + fn test_golangci_v2_token_savings() { + // Simulate a realistic v2 JSON output with multiple issues + let raw = r#"{ + "Issues": [ + { + "FromLinter": "errcheck", + "Text": "Error return value of `foo` is not checked", + "Severity": "error", + "SourceLines": [ + " if err := foo(); err != nil {", + " return err", + " }" + ], + "Pos": { + "Filename": "pkg/handler/server.go", + "Line": 42, + "Column": 5, + "Offset": 1024 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "errcheck", + "Text": "Error return value of `bar` is not checked", + "Severity": "error", + "SourceLines": [ + " bar()", + " return nil", + "}" + ], + "Pos": { + "Filename": "pkg/handler/server.go", + "Line": 55, + "Column": 2, + "Offset": 2048 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "gosimple", + "Text": "S1003: should replace strings.Index with strings.Contains", + "Severity": "warning", + "SourceLines": [ + " if strings.Index(s, sub) >= 0 {", + " return true", + " }" + ], + "Pos": { + "Filename": "pkg/utils/strings.go", + "Line": 15, + "Column": 2, + "Offset": 512 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "govet", + "Text": "printf: Sprintf format %s has arg of wrong type int", + "Severity": "error", + "SourceLines": [ + " fmt.Sprintf(\"%s\", 42)" + ], + "Pos": { + "Filename": "cmd/main/main.go", + "Line": 10, + "Column": 3, + "Offset": 256 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "unused", + "Text": "func `unusedHelper` is unused", + "Severity": "warning", + "SourceLines": [ + "func unusedHelper() {", + " // implementation", + "}" + ], + "Pos": { + "Filename": "internal/helpers.go", + "Line": 100, + "Column": 1, + "Offset": 4096 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "errcheck", + "Text": "Error return value of `close` is not checked", + "Severity": "error", + "SourceLines": [ + " defer file.Close()" + ], + "Pos": { + "Filename": "pkg/handler/server.go", + "Line": 120, + "Column": 10, + "Offset": 3072 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "gosimple", + "Text": "S1005: should omit nil check", + "Severity": "warning", + "SourceLines": [ + " if m != nil {", + " for k, v := range m {", + " process(k, v)", + " }", + " }" + ], + "Pos": { + "Filename": "pkg/utils/strings.go", + "Line": 45, + "Column": 1, + "Offset": 1536 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + } + ], + "Report": { + "Warnings": [], + "Linters": [ + {"Name": "errcheck", "Enabled": true, "EnabledByDefault": true}, + {"Name": "gosimple", "Enabled": true, "EnabledByDefault": true}, + {"Name": "govet", "Enabled": true, "EnabledByDefault": true}, + {"Name": "unused", "Enabled": true, "EnabledByDefault": true} + ] + } +}"#; + + let filtered = filter_golangci_json(raw, 2); + let savings = 100.0 - (count_tokens(&filtered) as f64 / count_tokens(raw) as f64 * 100.0); + + assert!( + savings >= 60.0, + "Expected ≥60% token savings, got {:.1}%\nFiltered output:\n{}", + savings, + filtered + ); + } } From 15bc0f8d6e135371688d5fd42decc6d8a99454f0 Mon Sep 17 00:00:00 2001 From: Navid EMAD Date: Thu, 19 Mar 2026 15:04:59 +0100 Subject: [PATCH 11/30] feat(ruby): add Ruby on Rails support (rspec, rubocop, rake, bundle) (#724) * feat(ruby): add Ruby on Rails support (rspec, rubocop, rake, bundle) Unifies 5 competing PRs (#198, #292, #379, #534, #643) into a single coherent implementation. New commands: - rtk rspec: JSON parsing with text fallback (60%+ savings) - rtk rubocop: JSON parsing, group by cop/severity (60%+ savings) - rtk rake test: Minitest state machine parser (85-90% savings) - rtk bundle install: TOML filter, strip Using lines (90%+ savings) Shared infrastructure: ruby_exec(), fallback_tail(), exit_code_from_output(), count_tokens() in utils.rs. Discover/rewrite rules for rspec, rubocop, rake, rails, bundle including bundle exec and bin/ variants. E2E smoke tests (scripts/test-ruby.sh) covering all 4 commands. 56 new unit tests + 4 inline TOML tests. All 1035 tests passing. Co-Authored-By: Claude Opus 4.6 (1M context) Signed-off-by: Navid EMAD * fix(ruby): use TEST= env var for rake single-file test in smoke tests Rails' `rake test` ignores positional file args; use `TEST=path` syntax. Co-Authored-By: Claude Opus 4.6 (1M context) Signed-off-by: Navid EMAD * docs(ruby): add Ruby module architecture and update attribution Integrate ARCHITECTURE.md Ruby Module Architecture section and CLAUDE.md module table/fork-features from PR #643. Update PR description attribution. Co-Authored-By: Claude Opus 4.6 (1M context) Signed-off-by: Navid EMAD * chore: remove PULL_REQUEST_DESCRIPTION.md from repo PR description lives on GitHub, no need to track in the codebase. Co-Authored-By: Claude Opus 4.6 (1M context) Signed-off-by: Navid EMAD --------- Signed-off-by: Navid EMAD Co-authored-by: Claude Opus 4.6 (1M context) --- ARCHITECTURE.md | 36 ++ CHANGELOG.md | 11 + CLAUDE.md | 14 +- README.md | 8 + scripts/test-all.sh | 28 +- scripts/test-ruby.sh | 463 ++++++++++++++ src/discover/rules.rs | 44 ++ src/filters/bundle-install.toml | 61 ++ src/main.rs | 39 ++ src/rake_cmd.rs | 441 +++++++++++++ src/rspec_cmd.rs | 1046 +++++++++++++++++++++++++++++++ src/rubocop_cmd.rs | 659 +++++++++++++++++++ src/toml_filter.rs | 10 +- src/utils.rs | 52 ++ 14 files changed, 2903 insertions(+), 9 deletions(-) create mode 100755 scripts/test-ruby.sh create mode 100644 src/filters/bundle-install.toml create mode 100644 src/rake_cmd.rs create mode 100644 src/rspec_cmd.rs create mode 100644 src/rubocop_cmd.rs diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 5e4d2578..0ae617e1 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -272,6 +272,10 @@ PYTHON ruff_cmd.rs ruff check/format 80%+ ✓ GO go_cmd.rs go test/build/vet 75-90% ✓ golangci_cmd.rs golangci-lint 85% ✓ +RUBY rake_cmd.rs rake/rails test 85-90% ✓ + rspec_cmd.rs rspec 60%+ ✓ + rubocop_cmd.rs rubocop 60%+ ✓ + NETWORK wget_cmd.rs wget 85-95% ✓ curl_cmd.rs curl 70% ✓ @@ -303,6 +307,7 @@ SHARED utils.rs Helpers N/A ✓ - **JS/TS Tooling**: 8 modules (modern frontend/fullstack development) - **Python Tooling**: 3 modules (ruff, pytest, pip) - **Go Tooling**: 2 modules (go test/build/vet, golangci-lint) +- **Ruby Tooling**: 3 modules (rake/minitest, rspec, rubocop) + 1 TOML filter (bundle install) --- @@ -605,6 +610,37 @@ pub fn run(command: &GoCommand, verbose: u8) -> Result<()> { - Different output format (JSON API vs text) - Distinct use case (comprehensive linting vs single-tool diagnostics) +### Ruby Module Architecture + +**Added**: 2026-03-15 +**Motivation**: Ruby on Rails development support (minitest, RSpec, RuboCop, Bundler) + +Ruby modules follow the standalone command pattern (like Python) with a shared `ruby_exec()` utility for auto-detecting `bundle exec`. + +``` +Module Strategy Output Format Savings +───────────────────────────────────────────────────────────────────────── +rake_cmd.rs STATE MACHINE Text parser 85-90% + Minitest output (rake test / rails test) + → State machine: Header → Running → Failures → Summary + → All pass: "ok rake test: 8 runs, 0 failures" + → Failures: summary + numbered failure details + +rspec_cmd.rs JSON/TEXT DUAL JSON → 60%+ 60%+ + Injects --format json, parses structured results + → Fallback to text state machine when JSON unavailable + → Strips Spring, SimpleCov, DEPRECATION, Capybara noise + +rubocop_cmd.rs JSON PARSING JSON API 60%+ + Injects --format json, groups by cop/severity + → Skips JSON injection in autocorrect mode (-a, -A) + +bundle-install.toml TOML FILTER Text rules 90%+ + → Strips "Using" lines, short-circuits to "ok bundle: complete" +``` + +**Shared**: `ruby_exec(tool)` in utils.rs auto-detects `bundle exec` when `Gemfile` exists. Used by rake_cmd, rspec_cmd, rubocop_cmd. + ### Format Strategy Decision Tree ``` diff --git a/CHANGELOG.md b/CHANGELOG.md index ebc3d790..b7053624 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,17 @@ All notable changes to rtk (Rust Token Killer) will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Features + +* **ruby:** add RSpec test runner filter with JSON parsing and text fallback (60%+ reduction) +* **ruby:** add RuboCop linter filter with JSON parsing, grouped by cop/severity (60%+ reduction) +* **ruby:** add Minitest filter for `rake test` / `rails test` with state machine parser (85-90% reduction) +* **ruby:** add TOML filter for `bundle install/update` — strip `Using` lines (90%+ reduction) +* **ruby:** add `ruby_exec()` shared utility for auto-detecting `bundle exec` when Gemfile exists +* **ruby:** add discover/rewrite rules for rake, rails, rspec, rubocop, and bundle commands + ## [0.30.1](https://github.com/rtk-ai/rtk/compare/v0.30.0...v0.30.1) (2026-03-18) diff --git a/CLAUDE.md b/CLAUDE.md index ab512961..35ff19ed 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -230,8 +230,11 @@ rtk gain --history | grep proxy | pip_cmd.rs | pip/uv package manager | JSON parsing, auto-detect uv (70-85% reduction) | | go_cmd.rs | Go commands | NDJSON for test, text for build/vet (80-90% reduction) | | golangci_cmd.rs | golangci-lint | JSON parsing, group by rule (85% reduction) | +| rake_cmd.rs | Minitest via rake/rails test | State machine text parser, failures only (85-90% reduction) | +| rspec_cmd.rs | RSpec test runner | JSON injection + text fallback, failures only (60%+ reduction) | +| rubocop_cmd.rs | RuboCop linter | JSON injection, group by cop/severity (60%+ reduction) | | tee.rs | Full output recovery | Save raw output to file on failure, print hint for LLM re-read | -| utils.rs | Shared utilities | Package manager detection, common formatting | +| utils.rs | Shared utilities | Package manager detection, ruby_exec, common formatting | | discover/ | Claude Code history analysis | Scan JSONL sessions, classify commands, report missed savings | ## Performance Constraints @@ -392,6 +395,15 @@ pub fn execute_with_filter(cmd: &str, args: &[&str]) -> Result<()> { - **Architecture**: Standalone Python commands (mirror lint/prettier), Go sub-enum (mirror git/cargo) - **Patterns**: JSON for structured output (ruff check, golangci-lint, pip), NDJSON streaming (go test), text state machine (pytest), text filters (go build/vet, ruff format) +### Ruby on Rails Support (2026-03-15) +- **Ruby Commands**: 3 modules for Ruby/Rails development + - `rtk rspec`: RSpec test runner with JSON injection (`--format json`), text fallback (60%+ reduction) + - `rtk rubocop`: RuboCop linter with JSON injection, group by cop/severity (60%+ reduction) + - `rtk rake test`: Minitest filter via rake/rails test, state machine parser (85-90% reduction) +- **TOML Filter**: `bundle-install.toml` for bundle install/update — strips `Using` lines (90%+ reduction) +- **Shared Infrastructure**: `ruby_exec()` in utils.rs auto-detects `bundle exec` when Gemfile exists +- **Hook Integration**: Rewrites `rspec`, `rubocop`, `rake test`, `rails test`, `bundle exec` variants + ## Testing Strategy ### TDD Workflow (mandatory) diff --git a/README.md b/README.md index d818e2af..7401256d 100644 --- a/README.md +++ b/README.md @@ -171,6 +171,8 @@ rtk playwright test # E2E results (failures only) rtk pytest # Python tests (-90%) rtk go test # Go tests (NDJSON, -90%) rtk cargo test # Cargo tests (-90%) +rtk rake test # Ruby minitest (-90%) +rtk rspec # RSpec tests (JSON, -60%+) ``` ### Build & Lint @@ -184,6 +186,7 @@ rtk cargo build # Cargo build (-80%) rtk cargo clippy # Cargo clippy (-80%) rtk ruff check # Python linting (JSON, -80%) rtk golangci-lint run # Go linting (JSON, -85%) +rtk rubocop # Ruby linting (JSON, -60%+) ``` ### Package Managers @@ -191,6 +194,7 @@ rtk golangci-lint run # Go linting (JSON, -85%) rtk pnpm list # Compact dependency tree rtk pip list # Python packages (auto-detect uv) rtk pip outdated # Outdated packages +rtk bundle install # Ruby gems (strip Using lines) rtk prisma generate # Schema generation (no ASCII art) ``` @@ -351,6 +355,10 @@ cp hooks/opencode-rtk.ts ~/.config/opencode/plugins/rtk.ts | `pip list/install` | `rtk pip ...` | | `go test/build/vet` | `rtk go ...` | | `golangci-lint` | `rtk golangci-lint` | +| `rake test` / `rails test` | `rtk rake test` | +| `rspec` / `bundle exec rspec` | `rtk rspec` | +| `rubocop` / `bundle exec rubocop` | `rtk rubocop` | +| `bundle install/update` | `rtk bundle ...` | | `docker ps/images/logs` | `rtk docker ...` | | `kubectl get/logs` | `rtk kubectl ...` | | `curl` | `rtk curl` | diff --git a/scripts/test-all.sh b/scripts/test-all.sh index 4cbbef02..f0e2c06b 100755 --- a/scripts/test-all.sh +++ b/scripts/test-all.sh @@ -437,20 +437,42 @@ else skip_test "rtk gt" "gt not installed" fi -# ── 30. Global flags ──────────────────────────────── +# ── 30. Ruby (conditional) ────────────────────────── + +section "Ruby (conditional)" + +if command -v rspec &>/dev/null; then + assert_help "rtk rspec" rtk rspec --help +else + skip_test "rtk rspec" "rspec not installed" +fi + +if command -v rubocop &>/dev/null; then + assert_help "rtk rubocop" rtk rubocop --help +else + skip_test "rtk rubocop" "rubocop not installed" +fi + +if command -v rake &>/dev/null; then + assert_help "rtk rake" rtk rake --help +else + skip_test "rtk rake" "rake not installed" +fi + +# ── 31. Global flags ──────────────────────────────── section "Global flags" assert_ok "rtk -u ls ." rtk -u ls . assert_ok "rtk --skip-env npm --help" rtk --skip-env npm --help -# ── 31. CcEconomics ───────────────────────────────── +# ── 32. CcEconomics ───────────────────────────────── section "CcEconomics" assert_ok "rtk cc-economics" rtk cc-economics -# ── 32. Learn ─────────────────────────────────────── +# ── 33. Learn ─────────────────────────────────────── section "Learn" diff --git a/scripts/test-ruby.sh b/scripts/test-ruby.sh new file mode 100755 index 00000000..3b3008b9 --- /dev/null +++ b/scripts/test-ruby.sh @@ -0,0 +1,463 @@ +#!/usr/bin/env bash +# +# RTK Smoke Tests — Ruby (RSpec, RuboCop, Minitest, Bundle) +# Creates a minimal Rails app, exercises all Ruby RTK filters, then cleans up. +# Usage: bash scripts/test-ruby.sh +# +# Prerequisites: rtk (installed), ruby, bundler, rails gem +# Duration: ~60-120s (rails new + bundle install dominate) +# +set -euo pipefail + +PASS=0 +FAIL=0 +SKIP=0 +FAILURES=() + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +CYAN='\033[0;36m' +BOLD='\033[1m' +NC='\033[0m' + +# ── Helpers ────────────────────────────────────────── + +assert_ok() { + local name="$1"; shift + local output + if output=$("$@" 2>&1); then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} %s\n" "$name" + else + FAIL=$((FAIL + 1)) + FAILURES+=("$name") + printf " ${RED}FAIL${NC} %s\n" "$name" + printf " cmd: %s\n" "$*" + printf " out: %s\n" "$(echo "$output" | head -3)" + fi +} + +assert_contains() { + local name="$1"; local needle="$2"; shift 2 + local output + if output=$("$@" 2>&1) && echo "$output" | grep -q "$needle"; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} %s\n" "$name" + else + FAIL=$((FAIL + 1)) + FAILURES+=("$name") + printf " ${RED}FAIL${NC} %s\n" "$name" + printf " expected: '%s'\n" "$needle" + printf " got: %s\n" "$(echo "$output" | head -3)" + fi +} + +# Allow non-zero exit but check output +assert_output() { + local name="$1"; local needle="$2"; shift 2 + local output + output=$("$@" 2>&1) || true + if echo "$output" | grep -qi "$needle"; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} %s\n" "$name" + else + FAIL=$((FAIL + 1)) + FAILURES+=("$name") + printf " ${RED}FAIL${NC} %s\n" "$name" + printf " expected: '%s'\n" "$needle" + printf " got: %s\n" "$(echo "$output" | head -3)" + fi +} + +skip_test() { + local name="$1"; local reason="$2" + SKIP=$((SKIP + 1)) + printf " ${YELLOW}SKIP${NC} %s (%s)\n" "$name" "$reason" +} + +# Assert command exits with non-zero and output matches needle +assert_exit_nonzero() { + local name="$1"; local needle="$2"; shift 2 + local output + local rc=0 + output=$("$@" 2>&1) || rc=$? + if [[ $rc -ne 0 ]] && echo "$output" | grep -qi "$needle"; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} %s (exit=%d)\n" "$name" "$rc" + else + FAIL=$((FAIL + 1)) + FAILURES+=("$name") + printf " ${RED}FAIL${NC} %s (exit=%d)\n" "$name" "$rc" + if [[ $rc -eq 0 ]]; then + printf " expected non-zero exit, got 0\n" + else + printf " expected: '%s'\n" "$needle" + fi + printf " out: %s\n" "$(echo "$output" | head -3)" + fi +} + +section() { + printf "\n${BOLD}${CYAN}── %s ──${NC}\n" "$1" +} + +# ── Prerequisite checks ───────────────────────────── + +RTK=$(command -v rtk || echo "") +if [[ -z "$RTK" ]]; then + echo "rtk not found in PATH. Run: cargo install --path ." + exit 1 +fi + +if ! command -v ruby >/dev/null 2>&1; then + echo "ruby not found in PATH. Install Ruby first." + exit 1 +fi + +if ! command -v bundle >/dev/null 2>&1; then + echo "bundler not found in PATH. Run: gem install bundler" + exit 1 +fi + +if ! command -v rails >/dev/null 2>&1; then + echo "rails not found in PATH. Run: gem install rails" + exit 1 +fi + +# ── Preamble ───────────────────────────────────────── + +printf "${BOLD}RTK Smoke Tests — Ruby (RSpec, RuboCop, Minitest, Bundle)${NC}\n" +printf "Binary: %s (%s)\n" "$RTK" "$(rtk --version)" +printf "Ruby: %s\n" "$(ruby --version)" +printf "Rails: %s\n" "$(rails --version)" +printf "Bundler: %s\n" "$(bundle --version)" +printf "Date: %s\n\n" "$(date '+%Y-%m-%d %H:%M')" + +# ── Temp dir + cleanup trap ────────────────────────── + +TMPDIR=$(mktemp -d /tmp/rtk-ruby-smoke-XXXXXX) +trap 'rm -rf "$TMPDIR"' EXIT + +printf "${BOLD}Setting up temporary Rails app in %s ...${NC}\n" "$TMPDIR" + +# ── Setup phase (not counted in assertions) ────────── + +cd "$TMPDIR" + +# 1. Create minimal Rails app +printf " → rails new (--minimal --skip-git --skip-docker) ...\n" +rails new rtk_smoke_app --minimal --skip-git --skip-docker --quiet 2>&1 | tail -1 || true +cd rtk_smoke_app + +# 2. Add rspec-rails and rubocop to Gemfile +cat >> Gemfile <<'GEMFILE' + +group :development, :test do + gem 'rspec-rails' + gem 'rubocop', require: false +end +GEMFILE + +# 3. Bundle install +printf " → bundle install ...\n" +bundle install --quiet 2>&1 | tail -1 || true + +# 4. Generate scaffold (creates model + minitest files) +printf " → rails generate scaffold Post ...\n" +rails generate scaffold Post title:string body:text published:boolean --quiet 2>&1 | tail -1 || true + +# 5. Install RSpec + create manual spec file +printf " → rails generate rspec:install ...\n" +rails generate rspec:install --quiet 2>&1 | tail -1 || true + +mkdir -p spec/models +cat > spec/models/post_spec.rb <<'SPEC' +require 'rails_helper' + +RSpec.describe Post, type: :model do + it "is valid with valid attributes" do + post = Post.new(title: "Test", body: "Body", published: false) + expect(post).to be_valid + end +end +SPEC + +# 6. Create + migrate database +printf " → rails db:create && db:migrate ...\n" +rails db:create --quiet 2>&1 | tail -1 || true +rails db:migrate --quiet 2>&1 | tail -1 || true + +# 7. Create a file with intentional RuboCop offenses +printf " → creating rubocop_bait.rb with intentional offenses ...\n" +cat > app/models/rubocop_bait.rb <<'BAIT' +class RubocopBait < ApplicationRecord + def messy_method() + x = 1 + y = 2 + if x == 1 + puts "hello world" + end + return nil + end +end +BAIT + +# 8. Create a failing RSpec spec +printf " → creating failing rspec spec ...\n" +cat > spec/models/post_fail_spec.rb <<'FAILSPEC' +require 'rails_helper' + +RSpec.describe Post, type: :model do + it "intentionally fails validation check" do + post = Post.new(title: "Hello", body: "World", published: false) + expect(post.title).to eq("Wrong Title On Purpose") + end +end +FAILSPEC + +# 9. Create an RSpec spec with pending example +printf " → creating rspec spec with pending example ...\n" +cat > spec/models/post_pending_spec.rb <<'PENDSPEC' +require 'rails_helper' + +RSpec.describe Post, type: :model do + it "is valid with title" do + post = Post.new(title: "OK", body: "Body", published: false) + expect(post).to be_valid + end + + it "will support markdown later" do + pending "Not yet implemented" + expect(Post.new.render_markdown).to eq("

hello

") + end +end +PENDSPEC + +# 10. Create a failing minitest test +printf " → creating failing minitest test ...\n" +cat > test/models/post_fail_test.rb <<'FAILTEST' +require "test_helper" + +class PostFailTest < ActiveSupport::TestCase + test "intentionally fails" do + assert_equal "wrong", Post.new(title: "right").title + end +end +FAILTEST + +# 11. Create a passing minitest test +printf " → creating passing minitest test ...\n" +cat > test/models/post_pass_test.rb <<'PASSTEST' +require "test_helper" + +class PostPassTest < ActiveSupport::TestCase + test "post is valid" do + post = Post.new(title: "OK", body: "Body", published: false) + assert post.valid? + end +end +PASSTEST + +printf "\n${BOLD}Setup complete. Running tests...${NC}\n" + +# ══════════════════════════════════════════════════════ +# Test sections +# ══════════════════════════════════════════════════════ + +# ── 1. RSpec ───────────────────────────────────────── + +section "RSpec" + +assert_output "rtk rspec (with failure)" \ + "failed" \ + rtk rspec + +assert_output "rtk rspec spec/models/post_spec.rb (pass)" \ + "RSpec.*passed" \ + rtk rspec spec/models/post_spec.rb + +assert_output "rtk rspec spec/models/post_fail_spec.rb (fail)" \ + "failed\|❌" \ + rtk rspec spec/models/post_fail_spec.rb + +# ── 2. RuboCop ─────────────────────────────────────── + +section "RuboCop" + +assert_output "rtk rubocop (with offenses)" \ + "offense" \ + rtk rubocop + +assert_output "rtk rubocop app/ (with offenses)" \ + "rubocop_bait\|offense" \ + rtk rubocop app/ + +# ── 3. Minitest (rake test) ────────────────────────── + +section "Minitest (rake test)" + +assert_output "rtk rake test (with failure)" \ + "failure\|error\|FAIL" \ + rtk rake test + +assert_output "rtk rake test single passing file" \ + "ok rake test\|0 failures" \ + rtk rake test TEST=test/models/post_pass_test.rb + +assert_exit_nonzero "rtk rake test single failing file" \ + "failure\|FAIL" \ + rtk rake test test/models/post_fail_test.rb + +# ── 4. Bundle install ──────────────────────────────── + +section "Bundle install" + +assert_output "rtk bundle install (idempotent)" \ + "bundle\|ok\|complete\|install" \ + rtk bundle install + +# ── 5. Exit code preservation ──────────────────────── + +section "Exit code preservation" + +assert_exit_nonzero "rtk rspec exits non-zero on failure" \ + "failed\|failure" \ + rtk rspec spec/models/post_fail_spec.rb + +assert_exit_nonzero "rtk rubocop exits non-zero on offenses" \ + "offense" \ + rtk rubocop app/models/rubocop_bait.rb + +assert_exit_nonzero "rtk rake test exits non-zero on failure" \ + "failure\|FAIL" \ + rtk rake test test/models/post_fail_test.rb + +# ── 6. bundle exec variants ───────────────────────── + +section "bundle exec variants" + +assert_output "bundle exec rspec spec/models/post_spec.rb" \ + "passed\|example" \ + rtk bundle exec rspec spec/models/post_spec.rb + +assert_output "bundle exec rubocop app/" \ + "offense" \ + rtk bundle exec rubocop app/ + +# ── 7. RuboCop autocorrect ─────────────────────────── + +section "RuboCop autocorrect" + +# Copy bait file so autocorrect has something to fix +cp app/models/rubocop_bait.rb app/models/rubocop_bait_ac.rb +sed -i.bak 's/RubocopBait/RubocopBaitAc/' app/models/rubocop_bait_ac.rb + +assert_output "rtk rubocop -A (autocorrect)" \ + "autocorrected\|rubocop\|ok\|offense\|inspected" \ + rtk rubocop -A app/models/rubocop_bait_ac.rb + +# Clean up autocorrect test file +rm -f app/models/rubocop_bait_ac.rb app/models/rubocop_bait_ac.rb.bak + +# ── 8. RSpec pending ───────────────────────────────── + +section "RSpec pending" + +assert_output "rtk rspec with pending example" \ + "pending" \ + rtk rspec spec/models/post_pending_spec.rb + +# ── 9. RSpec text fallback ─────────────────────────── + +section "RSpec text fallback" + +assert_output "rtk rspec --format documentation (text path)" \ + "valid\|example\|post" \ + rtk rspec --format documentation spec/models/post_spec.rb + +# ── 10. RSpec empty suite ──────────────────────────── + +section "RSpec empty suite" + +assert_output "rtk rspec nonexistent tag" \ + "0 examples\|No examples" \ + rtk rspec --tag nonexistent spec/models/post_spec.rb + +# ── 11. Token savings ──────────────────────────────── + +section "Token savings" + +# rspec (passing spec) +raw_len=$( (bundle exec rspec spec/models/post_spec.rb 2>&1 || true) | wc -c | tr -d ' ') +rtk_len=$( (rtk rspec spec/models/post_spec.rb 2>&1 || true) | wc -c | tr -d ' ') +if [[ "$rtk_len" -lt "$raw_len" ]]; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} rspec: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len" +else + FAIL=$((FAIL + 1)) + FAILURES+=("token savings: rspec") + printf " ${RED}FAIL${NC} rspec: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len" +fi + +# rubocop (exits non-zero on offenses, so || true) +raw_len=$( (bundle exec rubocop app/ 2>&1 || true) | wc -c | tr -d ' ') +rtk_len=$( (rtk rubocop app/ 2>&1 || true) | wc -c | tr -d ' ') +if [[ "$rtk_len" -lt "$raw_len" ]]; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} rubocop: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len" +else + FAIL=$((FAIL + 1)) + FAILURES+=("token savings: rubocop") + printf " ${RED}FAIL${NC} rubocop: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len" +fi + +# rake test (passing file) +raw_len=$( (bundle exec rake test TEST=test/models/post_pass_test.rb 2>&1 || true) | wc -c | tr -d ' ') +rtk_len=$( (rtk rake test test/models/post_pass_test.rb 2>&1 || true) | wc -c | tr -d ' ') +if [[ "$rtk_len" -lt "$raw_len" ]]; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} rake test: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len" +else + FAIL=$((FAIL + 1)) + FAILURES+=("token savings: rake test") + printf " ${RED}FAIL${NC} rake test: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len" +fi + +# bundle install (idempotent) +raw_len=$( (bundle install 2>&1 || true) | wc -c | tr -d ' ') +rtk_len=$( (rtk bundle install 2>&1 || true) | wc -c | tr -d ' ') +if [[ "$rtk_len" -lt "$raw_len" ]]; then + PASS=$((PASS + 1)) + printf " ${GREEN}PASS${NC} bundle install: rtk (%s bytes) < raw (%s bytes)\n" "$rtk_len" "$raw_len" +else + FAIL=$((FAIL + 1)) + FAILURES+=("token savings: bundle install") + printf " ${RED}FAIL${NC} bundle install: rtk (%s bytes) >= raw (%s bytes)\n" "$rtk_len" "$raw_len" +fi + +# ── 12. Verbose flag ───────────────────────────────── + +section "Verbose flag (-v)" + +assert_output "rtk -v rspec (verbose)" \ + "RSpec\|passed\|Running\|example" \ + rtk -v rspec spec/models/post_spec.rb + +# ══════════════════════════════════════════════════════ +# Report +# ══════════════════════════════════════════════════════ + +printf "\n${BOLD}══════════════════════════════════════${NC}\n" +printf "${BOLD}Results: ${GREEN}%d passed${NC}, ${RED}%d failed${NC}, ${YELLOW}%d skipped${NC}\n" "$PASS" "$FAIL" "$SKIP" + +if [[ ${#FAILURES[@]} -gt 0 ]]; then + printf "\n${RED}Failures:${NC}\n" + for f in "${FAILURES[@]}"; do + printf " - %s\n" "$f" + done +fi + +printf "${BOLD}══════════════════════════════════════${NC}\n" + +exit "$FAIL" diff --git a/src/discover/rules.rs b/src/discover/rules.rs index 00c79301..44f19d60 100644 --- a/src/discover/rules.rs +++ b/src/discover/rules.rs @@ -44,6 +44,11 @@ pub const PATTERNS: &[&str] = &[ // Go tooling r"^go\s+(test|build|vet)", r"^golangci-lint(\s|$)", + // Ruby tooling + r"^bundle\s+(install|update)\b", + r"^(?:bundle\s+exec\s+)?(?:bin/)?(?:rake|rails)\s+test", + r"^(?:bundle\s+exec\s+)?rspec(?:\s|$)", + r"^(?:bundle\s+exec\s+)?rubocop(?:\s|$)", // AWS CLI r"^aws\s+", // PostgreSQL @@ -332,6 +337,45 @@ pub const RULES: &[RtkRule] = &[ subcmd_savings: &[], subcmd_status: &[], }, + // Ruby tooling + RtkRule { + rtk_cmd: "rtk bundle", + rewrite_prefixes: &["bundle"], + category: "Ruby", + savings_pct: 70.0, + subcmd_savings: &[], + subcmd_status: &[], + }, + RtkRule { + rtk_cmd: "rtk rake", + rewrite_prefixes: &[ + "bundle exec rails", + "bundle exec rake", + "bin/rails", + "rails", + "rake", + ], + category: "Ruby", + savings_pct: 85.0, + subcmd_savings: &[("test", 90.0)], + subcmd_status: &[], + }, + RtkRule { + rtk_cmd: "rtk rspec", + rewrite_prefixes: &["bundle exec rspec", "bin/rspec", "rspec"], + category: "Tests", + savings_pct: 65.0, + subcmd_savings: &[], + subcmd_status: &[], + }, + RtkRule { + rtk_cmd: "rtk rubocop", + rewrite_prefixes: &["bundle exec rubocop", "rubocop"], + category: "Build", + savings_pct: 65.0, + subcmd_savings: &[], + subcmd_status: &[], + }, // AWS CLI RtkRule { rtk_cmd: "rtk aws", diff --git a/src/filters/bundle-install.toml b/src/filters/bundle-install.toml new file mode 100644 index 00000000..80e07486 --- /dev/null +++ b/src/filters/bundle-install.toml @@ -0,0 +1,61 @@ +[filters.bundle-install] +description = "Compact bundle install/update — strip 'Using' lines, keep installs and errors" +match_command = "^bundle\\s+(install|update)\\b" +strip_ansi = true +strip_lines_matching = [ + "^Using ", + "^\\s*$", + "^Fetching gem metadata", + "^Resolving dependencies", +] +match_output = [ + { pattern = "Bundle complete!", message = "ok bundle: complete" }, + { pattern = "Bundle updated!", message = "ok bundle: updated" }, +] +max_lines = 30 + +[[tests.bundle-install]] +name = "all cached short-circuits" +input = """ +Using bundler 2.5.6 +Using rake 13.1.0 +Using ast 2.4.2 +Using base64 0.2.0 +Using minitest 5.22.2 +Bundle complete! 85 Gemfile dependencies, 200 gems now installed. +Use `bundle info [gemname]` to see where a bundled gem is installed. +""" +expected = "ok bundle: complete" + +[[tests.bundle-install]] +name = "mixed install keeps Fetching and Installing lines" +input = """ +Fetching gem metadata from https://rubygems.org/......... +Resolving dependencies... +Using rake 13.1.0 +Using ast 2.4.2 +Fetching rspec 3.13.0 +Installing rspec 3.13.0 +Using rubocop 1.62.0 +Fetching simplecov 0.22.0 +Installing simplecov 0.22.0 +Bundle complete! 85 Gemfile dependencies, 202 gems now installed. +""" +expected = "ok bundle: complete" + +[[tests.bundle-install]] +name = "update output" +input = """ +Fetching gem metadata from https://rubygems.org/......... +Resolving dependencies... +Using rake 13.1.0 +Fetching rspec 3.14.0 (was 3.13.0) +Installing rspec 3.14.0 (was 3.13.0) +Bundle updated! +""" +expected = "ok bundle: updated" + +[[tests.bundle-install]] +name = "empty output" +input = "" +expected = "" diff --git a/src/main.rs b/src/main.rs index 2bbc4bb2..0b1b76dc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,8 +46,11 @@ mod prettier_cmd; mod prisma_cmd; mod psql_cmd; mod pytest_cmd; +mod rake_cmd; mod read; mod rewrite_cmd; +mod rspec_cmd; +mod rubocop_cmd; mod ruff_cmd; mod runner; mod session_cmd; @@ -641,6 +644,27 @@ enum Commands { args: Vec, }, + /// Rake/Rails test with compact Minitest output (Ruby) + Rake { + /// Rake arguments (e.g., test, test TEST=path/to/test.rb) + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + + /// RuboCop linter with compact output (Ruby) + Rubocop { + /// RuboCop arguments (e.g., --auto-correct, -A) + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + + /// RSpec test runner with compact output (Rails/Ruby) + Rspec { + /// RSpec arguments (e.g., spec/models, --tag focus) + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, + }, + /// Pip package manager with compact output (auto-detects uv) Pip { /// Pip arguments (e.g., list, outdated, install) @@ -1986,6 +2010,18 @@ fn main() -> Result<()> { mypy_cmd::run(&args, cli.verbose)?; } + Commands::Rake { args } => { + rake_cmd::run(&args, cli.verbose)?; + } + + Commands::Rubocop { args } => { + rubocop_cmd::run(&args, cli.verbose)?; + } + + Commands::Rspec { args } => { + rspec_cmd::run(&args, cli.verbose)?; + } + Commands::Pip { args } => { pip_cmd::run(&args, cli.verbose)?; } @@ -2245,6 +2281,9 @@ fn is_operational_command(cmd: &Commands) -> bool { | Commands::Curl { .. } | Commands::Ruff { .. } | Commands::Pytest { .. } + | Commands::Rake { .. } + | Commands::Rubocop { .. } + | Commands::Rspec { .. } | Commands::Pip { .. } | Commands::Go { .. } | Commands::GolangciLint { .. } diff --git a/src/rake_cmd.rs b/src/rake_cmd.rs new file mode 100644 index 00000000..f6ab62f5 --- /dev/null +++ b/src/rake_cmd.rs @@ -0,0 +1,441 @@ +//! Minitest output filter for `rake test` and `rails test`. +//! +//! Parses the standard Minitest output format produced by both `rake test` and +//! `rails test`, filtering down to failures/errors and the summary line. +//! Uses `ruby_exec("rake")` to auto-detect `bundle exec`. + +use crate::tracking; +use crate::utils::{exit_code_from_output, ruby_exec, strip_ansi}; +use anyhow::{Context, Result}; + +pub fn run(args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + + let mut cmd = ruby_exec("rake"); + for arg in args { + cmd.arg(arg); + } + + if verbose > 0 { + eprintln!( + "Running: {} {}", + cmd.get_program().to_string_lossy(), + args.join(" ") + ); + } + + let output = cmd + .output() + .context("Failed to run rake. Is it installed? Try: gem install rake")?; + + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + let raw = format!("{}\n{}", stdout, stderr); + + let filtered = filter_minitest_output(&raw); + + let exit_code = exit_code_from_output(&output, "rake"); + if let Some(hint) = crate::tee::tee_and_hint(&raw, "rake", exit_code) { + println!("{}\n{}", filtered, hint); + } else { + println!("{}", filtered); + } + + if !stderr.trim().is_empty() && verbose > 0 { + eprintln!("{}", stderr.trim()); + } + + timer.track( + &format!("rake {}", args.join(" ")), + &format!("rtk rake {}", args.join(" ")), + &raw, + &filtered, + ); + + if !output.status.success() { + std::process::exit(exit_code); + } + + Ok(()) +} + +#[derive(Debug, PartialEq)] +enum ParseState { + Header, + Running, + Failures, + #[allow(dead_code)] + Summary, +} + +/// Parse Minitest output using a state machine. +/// +/// Minitest produces output like: +/// ```text +/// Run options: --seed 12345 +/// +/// # Running: +/// +/// ..F..E.. +/// +/// Finished in 0.123456s, 64.8 runs/s +/// +/// 1) Failure: +/// TestSomething#test_that_fails [/path/to/test.rb:15]: +/// Expected: true +/// Actual: false +/// +/// 8 runs, 7 assertions, 1 failures, 1 errors, 0 skips +/// ``` +fn filter_minitest_output(output: &str) -> String { + let clean = strip_ansi(output); + let mut state = ParseState::Header; + let mut failures: Vec = Vec::new(); + let mut current_failure: Vec = Vec::new(); + let mut summary_line = String::new(); + + for line in clean.lines() { + let trimmed = line.trim(); + + // Detect summary line anywhere (it's always last meaningful line) + // Handles both "N runs, N assertions, ..." and "N tests, N assertions, ..." + if (trimmed.contains(" runs,") || trimmed.contains(" tests,")) + && trimmed.contains(" assertions,") + { + summary_line = trimmed.to_string(); + continue; + } + + // State transitions — handle both standard Minitest and minitest-reporters + if trimmed == "# Running:" || trimmed.starts_with("Started with run options") { + state = ParseState::Running; + continue; + } + + if trimmed.starts_with("Finished in ") { + state = ParseState::Failures; + continue; + } + + match state { + ParseState::Header | ParseState::Running => { + // Skip seed line, blank lines, progress dots + continue; + } + ParseState::Failures => { + if is_failure_header(trimmed) { + if !current_failure.is_empty() { + failures.push(current_failure.join("\n")); + current_failure.clear(); + } + current_failure.push(trimmed.to_string()); + } else if trimmed.is_empty() && !current_failure.is_empty() { + failures.push(current_failure.join("\n")); + current_failure.clear(); + } else if !trimmed.is_empty() { + current_failure.push(line.to_string()); + } + } + ParseState::Summary => {} + } + } + + // Save last failure if any + if !current_failure.is_empty() { + failures.push(current_failure.join("\n")); + } + + build_minitest_summary(&summary_line, &failures) +} + +fn is_failure_header(line: &str) -> bool { + lazy_static::lazy_static! { + static ref RE_FAILURE: regex::Regex = + regex::Regex::new(r"^\d+\)\s+(Failure|Error):$").unwrap(); + } + RE_FAILURE.is_match(line) +} + +fn build_minitest_summary(summary: &str, failures: &[String]) -> String { + let (runs, _assertions, fail_count, error_count, skips) = parse_minitest_summary(summary); + + if runs == 0 && summary.is_empty() { + return "rake test: no tests ran".to_string(); + } + + if fail_count == 0 && error_count == 0 { + let mut msg = format!("ok rake test: {} runs, 0 failures", runs); + if skips > 0 { + msg.push_str(&format!(", {} skips", skips)); + } + return msg; + } + + let mut result = String::new(); + result.push_str(&format!( + "rake test: {} runs, {} failures, {} errors", + runs, fail_count, error_count + )); + if skips > 0 { + result.push_str(&format!(", {} skips", skips)); + } + result.push('\n'); + + if failures.is_empty() { + return result.trim().to_string(); + } + + result.push('\n'); + + for (i, failure) in failures.iter().take(10).enumerate() { + let lines: Vec<&str> = failure.lines().collect(); + // First line is like " 1) Failure:" or " 1) Error:" + if let Some(header) = lines.first() { + result.push_str(&format!("{}. {}\n", i + 1, header.trim())); + } + // Remaining lines contain test name, file:line, assertion message + for line in lines.iter().skip(1).take(4) { + let trimmed = line.trim(); + if !trimmed.is_empty() { + result.push_str(&format!(" {}\n", crate::utils::truncate(trimmed, 120))); + } + } + if i < failures.len().min(10) - 1 { + result.push('\n'); + } + } + + if failures.len() > 10 { + result.push_str(&format!("\n... +{} more failures\n", failures.len() - 10)); + } + + result.trim().to_string() +} + +fn parse_minitest_summary(summary: &str) -> (usize, usize, usize, usize, usize) { + let mut runs = 0; + let mut assertions = 0; + let mut failures = 0; + let mut errors = 0; + let mut skips = 0; + + for part in summary.split(',') { + let part = part.trim(); + let words: Vec<&str> = part.split_whitespace().collect(); + if words.len() >= 2 { + if let Ok(n) = words[0].parse::() { + match words[1].trim_end_matches(',') { + "runs" | "run" | "tests" | "test" => runs = n, + "assertions" | "assertion" => assertions = n, + "failures" | "failure" => failures = n, + "errors" | "error" => errors = n, + "skips" | "skip" => skips = n, + _ => {} + } + } + } + } + + (runs, assertions, failures, errors, skips) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::utils::count_tokens; + + #[test] + fn test_filter_minitest_all_pass() { + let output = r#"Run options: --seed 12345 + +# Running: + +........ + +Finished in 0.123456s, 64.8 runs/s, 72.9 assertions/s. + +8 runs, 9 assertions, 0 failures, 0 errors, 0 skips"#; + + let result = filter_minitest_output(output); + assert!(result.contains("ok rake test")); + assert!(result.contains("8 runs")); + assert!(result.contains("0 failures")); + } + + #[test] + fn test_filter_minitest_with_failures() { + let output = r#"Run options: --seed 54321 + +# Running: + +..F.... + +Finished in 0.234567s, 29.8 runs/s + + 1) Failure: +TestSomething#test_that_fails [/path/to/test.rb:15]: +Expected: true + Actual: false + +7 runs, 7 assertions, 1 failures, 0 errors, 0 skips"#; + + let result = filter_minitest_output(output); + assert!(result.contains("1 failures")); + assert!(result.contains("test_that_fails")); + assert!(result.contains("Expected: true")); + } + + #[test] + fn test_filter_minitest_with_errors() { + let output = r#"Run options: --seed 99999 + +# Running: + +.E.... + +Finished in 0.345678s, 17.4 runs/s + + 1) Error: +TestOther#test_boom [/path/to/test.rb:42]: +RuntimeError: something went wrong + /path/to/test.rb:42:in `test_boom' + +6 runs, 5 assertions, 0 failures, 1 errors, 0 skips"#; + + let result = filter_minitest_output(output); + assert!(result.contains("1 errors")); + assert!(result.contains("test_boom")); + assert!(result.contains("RuntimeError")); + } + + #[test] + fn test_filter_minitest_empty() { + let result = filter_minitest_output(""); + assert!(result.contains("no tests ran")); + } + + #[test] + fn test_filter_minitest_skip() { + let output = r#"Run options: --seed 11111 + +# Running: + +..S.. + +Finished in 0.100000s, 50.0 runs/s + +5 runs, 4 assertions, 0 failures, 0 errors, 1 skips"#; + + let result = filter_minitest_output(output); + assert!(result.contains("ok rake test")); + assert!(result.contains("1 skips")); + } + + #[test] + fn test_token_savings() { + let mut dots = String::new(); + for _ in 0..20 { + dots.push_str( + "......................................................................\n", + ); + } + let output = format!( + "Run options: --seed 12345\n\n\ + # Running:\n\n\ + {}\n\ + Finished in 2.345678s, 213.4 runs/s, 428.7 assertions/s.\n\n\ + 500 runs, 1003 assertions, 0 failures, 0 errors, 0 skips", + dots + ); + + let input_tokens = count_tokens(&output); + let result = filter_minitest_output(&output); + let output_tokens = count_tokens(&result); + + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + assert!( + savings >= 80.0, + "Expected >= 80% savings, got {:.1}% (input: {}, output: {})", + savings, + input_tokens, + output_tokens + ); + } + + #[test] + fn test_parse_minitest_summary() { + assert_eq!( + parse_minitest_summary("8 runs, 9 assertions, 0 failures, 0 errors, 0 skips"), + (8, 9, 0, 0, 0) + ); + assert_eq!( + parse_minitest_summary("5 runs, 4 assertions, 1 failures, 1 errors, 2 skips"), + (5, 4, 1, 1, 2) + ); + // minitest-reporters uses "tests" instead of "runs" + assert_eq!( + parse_minitest_summary("57 tests, 378 assertions, 0 failures, 0 errors, 0 skips"), + (57, 378, 0, 0, 0) + ); + } + + #[test] + fn test_filter_minitest_multiple_failures() { + let output = r#"Run options: --seed 77777 + +# Running: + +.FF.E. + +Finished in 0.500000s, 12.0 runs/s + + 1) Failure: +TestFoo#test_alpha [/test.rb:10]: +Expected: 1 + Actual: 2 + + 2) Failure: +TestFoo#test_beta [/test.rb:20]: +Expected: "hello" + Actual: "world" + + 3) Error: +TestBar#test_gamma [/test.rb:30]: +NoMethodError: undefined method `blah' + +6 runs, 5 assertions, 2 failures, 1 errors, 0 skips"#; + + let result = filter_minitest_output(output); + assert!(result.contains("2 failures")); + assert!(result.contains("1 errors")); + assert!(result.contains("test_alpha")); + assert!(result.contains("test_beta")); + assert!(result.contains("test_gamma")); + } + + #[test] + fn test_filter_minitest_reporters_format() { + let output = "Started with run options --seed 37764\n\n\ + Progress: |========================================|\n\n\ + Finished in 5.79938s\n\ + 57 tests, 378 assertions, 0 failures, 0 errors, 0 skips"; + + let result = filter_minitest_output(output); + assert!(result.contains("ok rake test")); + assert!(result.contains("57 runs")); + assert!(result.contains("0 failures")); + } + + #[test] + fn test_filter_minitest_with_ansi() { + let output = "\x1b[32mRun options: --seed 12345\x1b[0m\n\n\ + # Running:\n\n\ + \x1b[32m....\x1b[0m\n\n\ + Finished in 0.1s, 40.0 runs/s\n\n\ + 4 runs, 4 assertions, 0 failures, 0 errors, 0 skips"; + + let result = filter_minitest_output(output); + assert!(result.contains("ok rake test")); + assert!(result.contains("4 runs")); + } +} diff --git a/src/rspec_cmd.rs b/src/rspec_cmd.rs new file mode 100644 index 00000000..3d8bf2c4 --- /dev/null +++ b/src/rspec_cmd.rs @@ -0,0 +1,1046 @@ +//! RSpec test runner filter. +//! +//! Injects `--format json` to get structured output, parses it to show only +//! failures. Falls back to a state-machine text parser when JSON is unavailable +//! (e.g., user specified `--format documentation`) or when injected JSON output +//! fails to parse. + +use crate::tracking; +use crate::utils::{exit_code_from_output, fallback_tail, ruby_exec, truncate}; +use anyhow::{Context, Result}; +use lazy_static::lazy_static; +use regex::Regex; +use serde::Deserialize; + +// ── Noise-stripping regex patterns ────────────────────────────────────────── + +lazy_static! { + static ref RE_SPRING: Regex = Regex::new(r"(?i)running via spring preloader").unwrap(); + static ref RE_SIMPLECOV: Regex = + Regex::new(r"(?i)(coverage report|simplecov|coverage/|\.simplecov|All Files.*Lines)") + .unwrap(); + static ref RE_DEPRECATION: Regex = Regex::new(r"^DEPRECATION WARNING:").unwrap(); + static ref RE_FINISHED_IN: Regex = Regex::new(r"^Finished in \d").unwrap(); + static ref RE_SCREENSHOT: Regex = Regex::new(r"saved screenshot to (.+)").unwrap(); + static ref RE_RSPEC_SUMMARY: Regex = Regex::new(r"(\d+) examples?, (\d+) failures?").unwrap(); +} + +// ── JSON structures matching RSpec's --format json output ─────────────────── + +#[derive(Deserialize)] +struct RspecOutput { + examples: Vec, + summary: RspecSummary, +} + +#[derive(Deserialize)] +struct RspecExample { + full_description: String, + status: String, + file_path: String, + line_number: u32, + exception: Option, +} + +#[derive(Deserialize)] +struct RspecException { + class: String, + message: String, + #[serde(default)] + backtrace: Vec, +} + +#[derive(Deserialize)] +struct RspecSummary { + duration: f64, + example_count: usize, + failure_count: usize, + pending_count: usize, + #[serde(default)] + errors_outside_of_examples_count: usize, +} + +// ── Public entry point ─────────────────────────────────────────────────────── + +pub fn run(args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + + let mut cmd = ruby_exec("rspec"); + + // Inject --format json unless the user already specified a format. + // Handles: --format, -f, --format=..., -fj, -fjson, -fdocumentation (from PR #534) + let has_format = args.iter().any(|a| { + a == "--format" + || a == "-f" + || a.starts_with("--format=") + || (a.starts_with("-f") && a.len() > 2 && !a.starts_with("--")) + }); + + if !has_format { + cmd.arg("--format").arg("json"); + } + + cmd.args(args); + + if verbose > 0 { + let injected = if has_format { "" } else { " --format json" }; + eprintln!("Running: rspec{} {}", injected, args.join(" ")); + } + + let output = cmd.output().context( + "Failed to run rspec. Is it installed? Try: gem install rspec or add it to your Gemfile", + )?; + + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + let raw = format!("{}\n{}", stdout, stderr); + + let exit_code = exit_code_from_output(&output, "rspec"); + + let filtered = if stdout.trim().is_empty() && !output.status.success() { + "RSpec: FAILED (no stdout, see stderr below)".to_string() + } else if has_format { + // User specified format — use text fallback on stripped output + let stripped = strip_noise(&stdout); + filter_rspec_text(&stripped) + } else { + filter_rspec_output(&stdout) + }; + + if let Some(hint) = crate::tee::tee_and_hint(&raw, "rspec", exit_code) { + println!("{}\n{}", filtered, hint); + } else { + println!("{}", filtered); + } + + if !stderr.trim().is_empty() && (!output.status.success() || verbose > 0) { + eprintln!("{}", stderr.trim()); + } + + timer.track( + &format!("rspec {}", args.join(" ")), + &format!("rtk rspec {}", args.join(" ")), + &raw, + &filtered, + ); + + if !output.status.success() { + std::process::exit(exit_code); + } + + Ok(()) +} + +// ── Noise stripping ───────────────────────────────────────────────────────── + +/// Remove noise lines: Spring preloader, SimpleCov, DEPRECATION warnings, +/// "Finished in" timing line, and Capybara screenshot details (keep path only). +fn strip_noise(output: &str) -> String { + let mut result = Vec::new(); + let mut in_simplecov_block = false; + + for line in output.lines() { + let trimmed = line.trim(); + + // Skip Spring preloader messages + if RE_SPRING.is_match(trimmed) { + continue; + } + + // Skip lines starting with "DEPRECATION WARNING:" (single-line only) + if RE_DEPRECATION.is_match(trimmed) { + continue; + } + + // Skip "Finished in N seconds" line + if RE_FINISHED_IN.is_match(trimmed) { + continue; + } + + // SimpleCov block detection: once we see it, skip until blank line + if RE_SIMPLECOV.is_match(trimmed) { + in_simplecov_block = true; + continue; + } + if in_simplecov_block { + if trimmed.is_empty() { + in_simplecov_block = false; + } + continue; + } + + // Capybara screenshots: keep only the path + if let Some(caps) = RE_SCREENSHOT.captures(trimmed) { + if let Some(path) = caps.get(1) { + result.push(format!("[screenshot: {}]", path.as_str().trim())); + continue; + } + } + + result.push(line.to_string()); + } + + result.join("\n") +} + +// ── Output filtering ───────────────────────────────────────────────────────── + +fn filter_rspec_output(output: &str) -> String { + if output.trim().is_empty() { + return "RSpec: No output".to_string(); + } + + // Try parsing as JSON first (happy path when --format json is injected) + if let Ok(rspec) = serde_json::from_str::(output) { + return build_rspec_summary(&rspec); + } + + // Strip noise (Spring, SimpleCov, etc.) and retry JSON parse + let stripped = strip_noise(output); + match serde_json::from_str::(&stripped) { + Ok(rspec) => return build_rspec_summary(&rspec), + Err(e) => { + eprintln!( + "[rtk] rspec: JSON parse failed ({}), using text fallback", + e + ); + } + } + + filter_rspec_text(&stripped) +} + +fn build_rspec_summary(rspec: &RspecOutput) -> String { + let s = &rspec.summary; + + if s.example_count == 0 && s.errors_outside_of_examples_count == 0 { + return "RSpec: No examples found".to_string(); + } + + if s.example_count == 0 && s.errors_outside_of_examples_count > 0 { + return format!( + "RSpec: {} errors outside of examples ({:.2}s)", + s.errors_outside_of_examples_count, s.duration + ); + } + + if s.failure_count == 0 && s.errors_outside_of_examples_count == 0 { + let passed = s.example_count.saturating_sub(s.pending_count); + let mut result = format!("✓ RSpec: {} passed", passed); + if s.pending_count > 0 { + result.push_str(&format!(", {} pending", s.pending_count)); + } + result.push_str(&format!(" ({:.2}s)", s.duration)); + return result; + } + + let passed = s + .example_count + .saturating_sub(s.failure_count + s.pending_count); + let mut result = format!("RSpec: {} passed, {} failed", passed, s.failure_count); + if s.pending_count > 0 { + result.push_str(&format!(", {} pending", s.pending_count)); + } + result.push_str(&format!(" ({:.2}s)\n", s.duration)); + result.push_str("═══════════════════════════════════════\n"); + + let failures: Vec<&RspecExample> = rspec + .examples + .iter() + .filter(|e| e.status == "failed") + .collect(); + + if failures.is_empty() { + return result.trim().to_string(); + } + + result.push_str("\nFailures:\n"); + + for (i, example) in failures.iter().take(5).enumerate() { + result.push_str(&format!( + "{}. ❌ {}\n {}:{}\n", + i + 1, + example.full_description, + example.file_path, + example.line_number + )); + + if let Some(exc) = &example.exception { + let short_class = exc.class.split("::").last().unwrap_or(&exc.class); + let first_msg = exc.message.lines().next().unwrap_or(""); + result.push_str(&format!( + " {}: {}\n", + short_class, + truncate(first_msg, 120) + )); + + // First backtrace line not from gems/rspec internals + for bt in &exc.backtrace { + if !bt.contains("/gems/") && !bt.contains("lib/rspec") { + result.push_str(&format!(" {}\n", truncate(bt, 120))); + break; + } + } + } + + if i < failures.len().min(5) - 1 { + result.push('\n'); + } + } + + if failures.len() > 5 { + result.push_str(&format!("\n... +{} more failures\n", failures.len() - 5)); + } + + result.trim().to_string() +} + +/// State machine text fallback parser for when JSON is unavailable. +fn filter_rspec_text(output: &str) -> String { + #[derive(PartialEq)] + enum State { + Header, + Failures, + FailedExamples, + Summary, + } + + let mut state = State::Header; + let mut failures: Vec = Vec::new(); + let mut current_failure = String::new(); + let mut summary_line = String::new(); + + for line in output.lines() { + let trimmed = line.trim(); + + match state { + State::Header => { + if trimmed == "Failures:" { + state = State::Failures; + } else if trimmed == "Failed examples:" { + state = State::FailedExamples; + } else if RE_RSPEC_SUMMARY.is_match(trimmed) { + summary_line = trimmed.to_string(); + state = State::Summary; + } + } + State::Failures => { + // New failure block starts with numbered pattern like " 1) ..." + if is_numbered_failure(trimmed) { + if !current_failure.trim().is_empty() { + failures.push(compact_failure_block(¤t_failure)); + } + current_failure = trimmed.to_string(); + current_failure.push('\n'); + } else if trimmed == "Failed examples:" { + if !current_failure.trim().is_empty() { + failures.push(compact_failure_block(¤t_failure)); + } + current_failure.clear(); + state = State::FailedExamples; + } else if RE_RSPEC_SUMMARY.is_match(trimmed) { + if !current_failure.trim().is_empty() { + failures.push(compact_failure_block(¤t_failure)); + } + current_failure.clear(); + summary_line = trimmed.to_string(); + state = State::Summary; + } else if !trimmed.is_empty() { + // Skip gem-internal backtrace lines + if is_gem_backtrace(trimmed) { + continue; + } + current_failure.push_str(trimmed); + current_failure.push('\n'); + } + } + State::FailedExamples => { + if RE_RSPEC_SUMMARY.is_match(trimmed) { + summary_line = trimmed.to_string(); + state = State::Summary; + } + // Skip "Failed examples:" section (just rspec commands to re-run) + } + State::Summary => { + break; + } + } + } + + // Capture remaining failure + if !current_failure.trim().is_empty() && state == State::Failures { + failures.push(compact_failure_block(¤t_failure)); + } + + // If we found a summary line, build result + if !summary_line.is_empty() { + if failures.is_empty() { + return format!("RSpec: {}", summary_line); + } + let mut result = format!("RSpec: {}\n", summary_line); + result.push_str("═══════════════════════════════════════\n\n"); + for (i, failure) in failures.iter().take(5).enumerate() { + result.push_str(&format!("{}. ❌ {}\n", i + 1, failure)); + if i < failures.len().min(5) - 1 { + result.push('\n'); + } + } + if failures.len() > 5 { + result.push_str(&format!("\n... +{} more failures\n", failures.len() - 5)); + } + return result.trim().to_string(); + } + + // Fallback: look for summary anywhere + for line in output.lines().rev() { + let t = line.trim(); + if t.contains("example") && (t.contains("failure") || t.contains("pending")) { + return format!("RSpec: {}", t); + } + } + + // Last resort: last 5 lines + fallback_tail(output, "rspec", 5) +} + +/// Check if a line is a numbered failure like "1) User#full_name..." +fn is_numbered_failure(line: &str) -> bool { + let trimmed = line.trim(); + if let Some(pos) = trimmed.find(')') { + let prefix = &trimmed[..pos]; + prefix.chars().all(|c| c.is_ascii_digit()) && !prefix.is_empty() + } else { + false + } +} + +/// Check if a backtrace line is from gems/rspec internals. +fn is_gem_backtrace(line: &str) -> bool { + line.contains("/gems/") + || line.contains("lib/rspec") + || line.contains("lib/ruby/") + || line.contains("vendor/bundle") +} + +/// Compact a failure block: extract key info, strip verbose backtrace. +fn compact_failure_block(block: &str) -> String { + let mut lines: Vec<&str> = block.lines().collect(); + + // Remove empty lines + lines.retain(|l| !l.trim().is_empty()); + + // Extract spec file:line (lines starting with # ./spec/ or # ./test/) + let mut spec_file = String::new(); + let mut kept_lines: Vec = Vec::new(); + + for line in &lines { + let t = line.trim(); + if t.starts_with("# ./spec/") || t.starts_with("# ./test/") { + spec_file = t.trim_start_matches("# ").to_string(); + } else if t.starts_with('#') && (t.contains("/gems/") || t.contains("lib/rspec")) { + // Skip gem backtrace + continue; + } else { + kept_lines.push(t.to_string()); + } + } + + let mut result = kept_lines.join("\n "); + if !spec_file.is_empty() { + result.push_str(&format!("\n {}", spec_file)); + } + result +} + +// ── Tests ──────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::utils::count_tokens; + + fn all_pass_json() -> &'static str { + r#"{ + "version": "3.12.0", + "examples": [ + { + "id": "./spec/models/user_spec.rb[1:1]", + "description": "is valid with valid attributes", + "full_description": "User is valid with valid attributes", + "status": "passed", + "file_path": "./spec/models/user_spec.rb", + "line_number": 5, + "run_time": 0.001234, + "pending_message": null, + "exception": null + }, + { + "id": "./spec/models/user_spec.rb[1:2]", + "description": "validates email format", + "full_description": "User validates email format", + "status": "passed", + "file_path": "./spec/models/user_spec.rb", + "line_number": 12, + "run_time": 0.0008, + "pending_message": null, + "exception": null + } + ], + "summary": { + "duration": 0.015, + "example_count": 2, + "failure_count": 0, + "pending_count": 0, + "errors_outside_of_examples_count": 0 + }, + "summary_line": "2 examples, 0 failures" + }"# + } + + fn with_failures_json() -> &'static str { + r#"{ + "version": "3.12.0", + "examples": [ + { + "id": "./spec/models/user_spec.rb[1:1]", + "description": "is valid", + "full_description": "User is valid", + "status": "passed", + "file_path": "./spec/models/user_spec.rb", + "line_number": 5, + "run_time": 0.001, + "pending_message": null, + "exception": null + }, + { + "id": "./spec/models/user_spec.rb[1:2]", + "description": "saves to database", + "full_description": "User saves to database", + "status": "failed", + "file_path": "./spec/models/user_spec.rb", + "line_number": 10, + "run_time": 0.002, + "pending_message": null, + "exception": { + "class": "RSpec::Expectations::ExpectationNotMetError", + "message": "expected true but got false", + "backtrace": [ + "/usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37:in `fail_with'", + "./spec/models/user_spec.rb:11:in `block (2 levels) in '" + ] + } + } + ], + "summary": { + "duration": 0.123, + "example_count": 2, + "failure_count": 1, + "pending_count": 0, + "errors_outside_of_examples_count": 0 + }, + "summary_line": "2 examples, 1 failure" + }"# + } + + fn with_pending_json() -> &'static str { + r#"{ + "version": "3.12.0", + "examples": [ + { + "id": "./spec/models/post_spec.rb[1:1]", + "description": "creates a post", + "full_description": "Post creates a post", + "status": "passed", + "file_path": "./spec/models/post_spec.rb", + "line_number": 4, + "run_time": 0.002, + "pending_message": null, + "exception": null + }, + { + "id": "./spec/models/post_spec.rb[1:2]", + "description": "validates title", + "full_description": "Post validates title", + "status": "pending", + "file_path": "./spec/models/post_spec.rb", + "line_number": 8, + "run_time": 0.0, + "pending_message": "Not yet implemented", + "exception": null + } + ], + "summary": { + "duration": 0.05, + "example_count": 2, + "failure_count": 0, + "pending_count": 1, + "errors_outside_of_examples_count": 0 + }, + "summary_line": "2 examples, 0 failures, 1 pending" + }"# + } + + fn large_suite_json() -> &'static str { + r#"{ + "version": "3.12.0", + "examples": [ + {"id":"1","description":"test1","full_description":"Suite test1","status":"passed","file_path":"./spec/a_spec.rb","line_number":1,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"2","description":"test2","full_description":"Suite test2","status":"passed","file_path":"./spec/a_spec.rb","line_number":2,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"3","description":"test3","full_description":"Suite test3","status":"passed","file_path":"./spec/a_spec.rb","line_number":3,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"4","description":"test4","full_description":"Suite test4","status":"passed","file_path":"./spec/a_spec.rb","line_number":4,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"5","description":"test5","full_description":"Suite test5","status":"passed","file_path":"./spec/a_spec.rb","line_number":5,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"6","description":"test6","full_description":"Suite test6","status":"passed","file_path":"./spec/a_spec.rb","line_number":6,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"7","description":"test7","full_description":"Suite test7","status":"passed","file_path":"./spec/a_spec.rb","line_number":7,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"8","description":"test8","full_description":"Suite test8","status":"passed","file_path":"./spec/a_spec.rb","line_number":8,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"9","description":"test9","full_description":"Suite test9","status":"passed","file_path":"./spec/a_spec.rb","line_number":9,"run_time":0.01,"pending_message":null,"exception":null}, + {"id":"10","description":"test10","full_description":"Suite test10","status":"passed","file_path":"./spec/a_spec.rb","line_number":10,"run_time":0.01,"pending_message":null,"exception":null} + ], + "summary": { + "duration": 1.234, + "example_count": 10, + "failure_count": 0, + "pending_count": 0, + "errors_outside_of_examples_count": 0 + }, + "summary_line": "10 examples, 0 failures" + }"# + } + + #[test] + fn test_filter_rspec_all_pass() { + let result = filter_rspec_output(all_pass_json()); + assert!(result.starts_with("✓ RSpec:")); + assert!(result.contains("2 passed")); + assert!(result.contains("0.01s") || result.contains("0.02s")); + } + + #[test] + fn test_filter_rspec_with_failures() { + let result = filter_rspec_output(with_failures_json()); + assert!(result.contains("1 passed, 1 failed")); + assert!(result.contains("❌ User saves to database")); + assert!(result.contains("user_spec.rb:10")); + assert!(result.contains("ExpectationNotMetError")); + assert!(result.contains("expected true but got false")); + } + + #[test] + fn test_filter_rspec_with_pending() { + let result = filter_rspec_output(with_pending_json()); + assert!(result.starts_with("✓ RSpec:")); + assert!(result.contains("1 passed")); + assert!(result.contains("1 pending")); + } + + #[test] + fn test_filter_rspec_empty_output() { + let result = filter_rspec_output(""); + assert_eq!(result, "RSpec: No output"); + } + + #[test] + fn test_filter_rspec_no_examples() { + let json = r#"{ + "version": "3.12.0", + "examples": [], + "summary": { + "duration": 0.001, + "example_count": 0, + "failure_count": 0, + "pending_count": 0, + "errors_outside_of_examples_count": 0 + } + }"#; + let result = filter_rspec_output(json); + assert_eq!(result, "RSpec: No examples found"); + } + + #[test] + fn test_filter_rspec_errors_outside_examples() { + let json = r#"{ + "version": "3.12.0", + "examples": [], + "summary": { + "duration": 0.01, + "example_count": 0, + "failure_count": 0, + "pending_count": 0, + "errors_outside_of_examples_count": 1 + } + }"#; + let result = filter_rspec_output(json); + // Should NOT say "No examples found" — there was an error outside examples + assert!( + !result.contains("No examples found"), + "errors outside examples should not be treated as 'no examples': {}", + result + ); + } + + #[test] + fn test_filter_rspec_text_fallback() { + let text = r#" +..F. + +Failures: + + 1) User is valid + Failure/Error: expect(user).to be_valid + expected true got false + # ./spec/models/user_spec.rb:5 + +4 examples, 1 failure +"#; + let result = filter_rspec_output(text); + assert!(result.contains("RSpec:")); + assert!(result.contains("4 examples, 1 failure")); + assert!(result.contains("❌"), "should show failure marker"); + } + + #[test] + fn test_filter_rspec_text_fallback_extracts_failures() { + let text = r#"Randomized with seed 12345 +..F...E.. + +Failures: + + 1) User#full_name returns first and last name + Failure/Error: expect(user.full_name).to eq("John Doe") + expected: "John Doe" + got: "John D." + # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37 + # ./spec/models/user_spec.rb:15 + + 2) Api::Controller#index fails + Failure/Error: get :index + expected 200 got 500 + # ./spec/controllers/api_spec.rb:42 + +9 examples, 2 failures +"#; + let result = filter_rspec_text(text); + assert!(result.contains("2 failures")); + assert!(result.contains("❌")); + // Should show spec file path, not gem backtrace + assert!(result.contains("spec/models/user_spec.rb:15")); + } + + #[test] + fn test_filter_rspec_backtrace_filters_gems() { + let result = filter_rspec_output(with_failures_json()); + // Should show the spec file backtrace, not the gem one + assert!(result.contains("user_spec.rb:11")); + assert!(!result.contains("gems/rspec-expectations")); + } + + #[test] + fn test_filter_rspec_exception_class_shortened() { + let result = filter_rspec_output(with_failures_json()); + // Should show "ExpectationNotMetError" not "RSpec::Expectations::ExpectationNotMetError" + assert!(result.contains("ExpectationNotMetError")); + assert!(!result.contains("RSpec::Expectations::ExpectationNotMetError")); + } + + #[test] + fn test_filter_rspec_many_failures_caps_at_five() { + let json = r#"{ + "version": "3.12.0", + "examples": [ + {"id":"1","description":"test 1","full_description":"A test 1","status":"failed","file_path":"./spec/a_spec.rb","line_number":5,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 1","backtrace":["./spec/a_spec.rb:6:in `block'"]}}, + {"id":"2","description":"test 2","full_description":"A test 2","status":"failed","file_path":"./spec/a_spec.rb","line_number":10,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 2","backtrace":["./spec/a_spec.rb:11:in `block'"]}}, + {"id":"3","description":"test 3","full_description":"A test 3","status":"failed","file_path":"./spec/a_spec.rb","line_number":15,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 3","backtrace":["./spec/a_spec.rb:16:in `block'"]}}, + {"id":"4","description":"test 4","full_description":"A test 4","status":"failed","file_path":"./spec/a_spec.rb","line_number":20,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 4","backtrace":["./spec/a_spec.rb:21:in `block'"]}}, + {"id":"5","description":"test 5","full_description":"A test 5","status":"failed","file_path":"./spec/a_spec.rb","line_number":25,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 5","backtrace":["./spec/a_spec.rb:26:in `block'"]}}, + {"id":"6","description":"test 6","full_description":"A test 6","status":"failed","file_path":"./spec/a_spec.rb","line_number":30,"run_time":0.001,"pending_message":null,"exception":{"class":"RuntimeError","message":"boom 6","backtrace":["./spec/a_spec.rb:31:in `block'"]}} + ], + "summary": { + "duration": 0.05, + "example_count": 6, + "failure_count": 6, + "pending_count": 0, + "errors_outside_of_examples_count": 0 + }, + "summary_line": "6 examples, 6 failures" + }"#; + let result = filter_rspec_output(json); + assert!(result.contains("1. ❌"), "should show first failure"); + assert!(result.contains("5. ❌"), "should show fifth failure"); + assert!(!result.contains("6. ❌"), "should not show sixth inline"); + assert!( + result.contains("+1 more"), + "should show overflow count: {}", + result + ); + } + + #[test] + fn test_filter_rspec_text_fallback_no_summary() { + // If no summary line, returns last 5 lines (does not panic) + let text = "some output\nwithout a summary line"; + let result = filter_rspec_output(text); + assert!(!result.is_empty()); + } + + #[test] + fn test_filter_rspec_invalid_json_falls_back() { + let garbage = "not json at all { broken"; + let result = filter_rspec_output(garbage); + assert!(!result.is_empty(), "should not panic on invalid JSON"); + } + + // ── Noise stripping tests ──────────────────────────────────────────────── + + #[test] + fn test_strip_noise_spring() { + let input = "Running via Spring preloader in process 12345\n...\n3 examples, 0 failures"; + let result = strip_noise(input); + assert!(!result.contains("Spring")); + assert!(result.contains("3 examples")); + } + + #[test] + fn test_strip_noise_simplecov() { + let input = "...\n\nCoverage report generated for RSpec to /app/coverage.\n142 / 200 LOC (71.0%) covered.\n\n3 examples, 0 failures"; + let result = strip_noise(input); + assert!(!result.contains("Coverage report")); + assert!(!result.contains("LOC")); + assert!(result.contains("3 examples")); + } + + #[test] + fn test_strip_noise_deprecation() { + let input = "DEPRECATION WARNING: Using `return` in before callbacks is deprecated.\n...\n3 examples, 0 failures"; + let result = strip_noise(input); + assert!(!result.contains("DEPRECATION")); + assert!(result.contains("3 examples")); + } + + #[test] + fn test_strip_noise_finished_in() { + let input = "...\nFinished in 12.34 seconds (files took 3.21 seconds to load)\n3 examples, 0 failures"; + let result = strip_noise(input); + assert!(!result.contains("Finished in 12.34")); + assert!(result.contains("3 examples")); + } + + #[test] + fn test_strip_noise_capybara_screenshot() { + let input = "...\n saved screenshot to /tmp/capybara/screenshots/2026_failed.png\n3 examples, 1 failure"; + let result = strip_noise(input); + assert!(result.contains("[screenshot:")); + assert!(result.contains("failed.png")); + assert!(!result.contains("saved screenshot to")); + } + + // ── Token savings tests ────────────────────────────────────────────────── + + #[test] + fn test_token_savings_all_pass() { + let input = large_suite_json(); + let output = filter_rspec_output(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "RSpec all-pass: expected ≥60% savings, got {:.1}% (in={}, out={})", + savings, + input_tokens, + output_tokens + ); + } + + #[test] + fn test_token_savings_with_failures() { + let input = with_failures_json(); + let output = filter_rspec_output(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "RSpec failures: expected ≥60% savings, got {:.1}% (in={}, out={})", + savings, + input_tokens, + output_tokens + ); + } + + #[test] + fn test_token_savings_text_fallback() { + let input = r#"Running via Spring preloader in process 12345 +Randomized with seed 54321 +..F...E..F.. + +Failures: + + 1) User#full_name returns first and last name + Failure/Error: expect(user.full_name).to eq("John Doe") + expected: "John Doe" + got: "John D." + # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37 + # ./spec/models/user_spec.rb:15 + # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-core-3.12.0/lib/rspec/core/example.rb:258 + + 2) Api::Controller#index returns success + Failure/Error: get :index + expected 200 got 500 + # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-expectations-3.12.0/lib/rspec/expectations/fail_with.rb:37 + # ./spec/controllers/api_spec.rb:42 + # /usr/local/lib/ruby/gems/3.2.0/gems/rspec-core-3.12.0/lib/rspec/core/example.rb:258 + +Failed examples: + +rspec ./spec/models/user_spec.rb:15 # User#full_name returns first and last name +rspec ./spec/controllers/api_spec.rb:42 # Api::Controller#index returns success + +12 examples, 2 failures + +Coverage report generated for RSpec to /app/coverage. +142 / 200 LOC (71.0%) covered. +"#; + let output = filter_rspec_text(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 30.0, + "RSpec text fallback: expected ≥30% savings, got {:.1}% (in={}, out={})", + savings, + input_tokens, + output_tokens + ); + } + + // ── ANSI handling tests ──────────────────────────────────────────────── + + #[test] + fn test_filter_rspec_ansi_wrapped_json() { + // ANSI codes around JSON should fall back to text, not panic + let input = "\x1b[32m{\"version\":\"3.12.0\"\x1b[0m broken json"; + let result = filter_rspec_output(input); + assert!(!result.is_empty(), "should not panic on ANSI-wrapped JSON"); + } + + // ── Text fallback >5 failures truncation (Issue 9) ───────────────────── + + #[test] + fn test_filter_rspec_text_many_failures_caps_at_five() { + let text = r#"Randomized with seed 12345 +.......FFFFFFF + +Failures: + + 1) User#full_name fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/user_spec.rb:5 + + 2) Post#title fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/post_spec.rb:10 + + 3) Comment#body fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/comment_spec.rb:15 + + 4) Session#token fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/session_spec.rb:20 + + 5) Profile#avatar fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/profile_spec.rb:25 + + 6) Team#members fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/team_spec.rb:30 + + 7) Role#permissions fails + Failure/Error: expect(true).to eq(false) + # ./spec/models/role_spec.rb:35 + +14 examples, 7 failures +"#; + let result = filter_rspec_text(text); + assert!(result.contains("1. ❌"), "should show first failure"); + assert!(result.contains("5. ❌"), "should show fifth failure"); + assert!(!result.contains("6. ❌"), "should not show sixth inline"); + assert!( + result.contains("+2 more"), + "should show overflow count: {}", + result + ); + } + + // ── Header -> FailedExamples transition (Issue 13) ────────────────────── + + #[test] + fn test_filter_rspec_text_header_to_failed_examples() { + // Input that has "Failed examples:" directly (no "Failures:" block), + // followed by a summary line + let text = r#"..F.. + +Failed examples: + +rspec ./spec/models/user_spec.rb:5 # User is valid + +5 examples, 1 failure +"#; + let result = filter_rspec_text(text); + assert!( + result.contains("5 examples, 1 failure"), + "should contain summary: {}", + result + ); + assert!( + result.contains("RSpec:"), + "should have RSpec prefix: {}", + result + ); + } + + // ── Format flag detection tests (from PR #534) ─────────────────────── + + #[test] + fn test_has_format_flag_none() { + let args: &[String] = &[]; + assert!(!args.iter().any(|a| { + a == "--format" + || a == "-f" + || a.starts_with("--format=") + || (a.starts_with("-f") && a.len() > 2 && !a.starts_with("--")) + })); + } + + #[test] + fn test_has_format_flag_long() { + let args = ["--format".to_string(), "documentation".to_string()]; + assert!(args.iter().any(|a| a == "--format")); + } + + #[test] + fn test_has_format_flag_short_combined() { + // -fjson, -fj, -fdocumentation + for flag in &["-fjson", "-fj", "-fdocumentation"] { + let args = [flag.to_string()]; + assert!( + args.iter() + .any(|a| a.starts_with("-f") && a.len() > 2 && !a.starts_with("--")), + "should detect {}", + flag + ); + } + } + + #[test] + fn test_has_format_flag_equals() { + let args = ["--format=json".to_string()]; + assert!(args.iter().any(|a| a.starts_with("--format="))); + } +} diff --git a/src/rubocop_cmd.rs b/src/rubocop_cmd.rs new file mode 100644 index 00000000..db2d0ac4 --- /dev/null +++ b/src/rubocop_cmd.rs @@ -0,0 +1,659 @@ +//! RuboCop linter filter. +//! +//! Injects `--format json` for structured output, parses offenses grouped by +//! file and sorted by severity. Falls back to text parsing for autocorrect mode, +//! when the user specifies a custom format, or when injected JSON output fails +//! to parse. + +use crate::tracking; +use crate::utils::{exit_code_from_output, ruby_exec}; +use anyhow::{Context, Result}; +use serde::Deserialize; + +// ── JSON structures matching RuboCop's --format json output ───────────────── + +#[derive(Deserialize)] +struct RubocopOutput { + files: Vec, + summary: RubocopSummary, +} + +#[derive(Deserialize)] +struct RubocopFile { + path: String, + offenses: Vec, +} + +#[derive(Deserialize)] +struct RubocopOffense { + cop_name: String, + severity: String, + message: String, + correctable: bool, + location: RubocopLocation, +} + +#[derive(Deserialize)] +struct RubocopLocation { + start_line: usize, +} + +#[derive(Deserialize)] +struct RubocopSummary { + offense_count: usize, + #[allow(dead_code)] + target_file_count: usize, + inspected_file_count: usize, + #[serde(default)] + correctable_offense_count: usize, +} + +// ── Public entry point ─────────────────────────────────────────────────────── + +pub fn run(args: &[String], verbose: u8) -> Result<()> { + let timer = tracking::TimedExecution::start(); + + let mut cmd = ruby_exec("rubocop"); + + // Detect autocorrect mode + let is_autocorrect = args + .iter() + .any(|a| a == "-a" || a == "-A" || a == "--auto-correct" || a == "--auto-correct-all"); + + // Inject --format json unless the user already specified a format + let has_format = args + .iter() + .any(|a| a.starts_with("--format") || a.starts_with("-f")); + + if !has_format && !is_autocorrect { + cmd.arg("--format").arg("json"); + } + + cmd.args(args); + + if verbose > 0 { + eprintln!("Running: rubocop {}", args.join(" ")); + } + + let output = cmd.output().context( + "Failed to run rubocop. Is it installed? Try: gem install rubocop or add it to your Gemfile", + )?; + + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + let raw = format!("{}\n{}", stdout, stderr); + + let exit_code = exit_code_from_output(&output, "rubocop"); + + let filtered = if stdout.trim().is_empty() && !output.status.success() { + "RuboCop: FAILED (no stdout, see stderr below)".to_string() + } else if has_format || is_autocorrect { + filter_rubocop_text(&stdout) + } else { + filter_rubocop_json(&stdout) + }; + + if let Some(hint) = crate::tee::tee_and_hint(&raw, "rubocop", exit_code) { + println!("{}\n{}", filtered, hint); + } else { + println!("{}", filtered); + } + + if !stderr.trim().is_empty() && (!output.status.success() || verbose > 0) { + eprintln!("{}", stderr.trim()); + } + + timer.track( + &format!("rubocop {}", args.join(" ")), + &format!("rtk rubocop {}", args.join(" ")), + &raw, + &filtered, + ); + + if !output.status.success() { + std::process::exit(exit_code); + } + + Ok(()) +} + +// ── JSON filtering ─────────────────────────────────────────────────────────── + +/// Rank severity for ordering: lower = more severe. +fn severity_rank(severity: &str) -> u8 { + match severity { + "fatal" | "error" => 0, + "warning" => 1, + "convention" | "refactor" | "info" => 2, + _ => 3, + } +} + +fn filter_rubocop_json(output: &str) -> String { + if output.trim().is_empty() { + return "RuboCop: No output".to_string(); + } + + let parsed: Result = serde_json::from_str(output); + let rubocop = match parsed { + Ok(r) => r, + Err(e) => { + eprintln!("[rtk] rubocop: JSON parse failed ({})", e); + return crate::utils::fallback_tail(output, "rubocop (JSON parse error)", 5); + } + }; + + let s = &rubocop.summary; + + if s.offense_count == 0 { + return format!("ok ✓ rubocop ({} files)", s.inspected_file_count); + } + + // When correctable_offense_count is 0, it could mean the field was absent + // (older RuboCop) or genuinely zero. Manual count as consistent fallback. + let correctable_count = if s.correctable_offense_count > 0 { + s.correctable_offense_count + } else { + rubocop + .files + .iter() + .flat_map(|f| &f.offenses) + .filter(|o| o.correctable) + .count() + }; + + let mut result = format!( + "rubocop: {} offenses ({} files)\n", + s.offense_count, s.inspected_file_count + ); + + // Build list of files with offenses, sorted by worst severity then file path + let mut files_with_offenses: Vec<&RubocopFile> = rubocop + .files + .iter() + .filter(|f| !f.offenses.is_empty()) + .collect(); + + // Sort files: worst severity first, then alphabetically + files_with_offenses.sort_by(|a, b| { + let a_worst = a + .offenses + .iter() + .map(|o| severity_rank(&o.severity)) + .min() + .unwrap_or(3); + let b_worst = b + .offenses + .iter() + .map(|o| severity_rank(&o.severity)) + .min() + .unwrap_or(3); + a_worst.cmp(&b_worst).then(a.path.cmp(&b.path)) + }); + + let max_files = 10; + let max_offenses_per_file = 5; + + for file in files_with_offenses.iter().take(max_files) { + let short = compact_ruby_path(&file.path); + result.push_str(&format!("\n{}\n", short)); + + // Sort offenses within file: by severity rank, then by line number + let mut sorted_offenses: Vec<&RubocopOffense> = file.offenses.iter().collect(); + sorted_offenses.sort_by(|a, b| { + severity_rank(&a.severity) + .cmp(&severity_rank(&b.severity)) + .then(a.location.start_line.cmp(&b.location.start_line)) + }); + + for offense in sorted_offenses.iter().take(max_offenses_per_file) { + let first_msg_line = offense.message.lines().next().unwrap_or(""); + result.push_str(&format!( + " :{} {} — {}\n", + offense.location.start_line, offense.cop_name, first_msg_line + )); + } + if sorted_offenses.len() > max_offenses_per_file { + result.push_str(&format!( + " ... +{} more\n", + sorted_offenses.len() - max_offenses_per_file + )); + } + } + + if files_with_offenses.len() > max_files { + result.push_str(&format!( + "\n... +{} more files\n", + files_with_offenses.len() - max_files + )); + } + + if correctable_count > 0 { + result.push_str(&format!( + "\n({} correctable, run `rubocop -A`)", + correctable_count + )); + } + + result.trim().to_string() +} + +// ── Text fallback ──────────────────────────────────────────────────────────── + +fn filter_rubocop_text(output: &str) -> String { + // Check for Ruby/Bundler errors first -- show error, truncated to avoid excessive tokens + for line in output.lines() { + let t = line.trim(); + if t.contains("cannot load such file") + || t.contains("Bundler::GemNotFound") + || t.contains("Gem::MissingSpecError") + || t.starts_with("rubocop: command not found") + || t.starts_with("rubocop: No such file") + { + let error_lines: Vec<&str> = output.trim().lines().take(20).collect(); + let truncated = error_lines.join("\n"); + let total_lines = output.trim().lines().count(); + if total_lines > 20 { + return format!( + "RuboCop error:\n{}\n... ({} more lines)", + truncated, + total_lines - 20 + ); + } + return format!("RuboCop error:\n{}", truncated); + } + } + + // Detect autocorrect summary: "N files inspected, M offenses detected, K offenses autocorrected" + for line in output.lines().rev() { + let t = line.trim(); + if t.contains("inspected") && t.contains("autocorrected") { + // Extract counts for compact autocorrect message + let files = extract_leading_number(t); + let corrected = extract_autocorrect_count(t); + if files > 0 && corrected > 0 { + return format!( + "ok ✓ rubocop -A ({} files, {} autocorrected)", + files, corrected + ); + } + return format!("RuboCop: {}", t); + } + if t.contains("inspected") && (t.contains("offense") || t.contains("no offenses")) { + if t.contains("no offenses") { + let files = extract_leading_number(t); + if files > 0 { + return format!("ok ✓ rubocop ({} files)", files); + } + return "ok ✓ rubocop (no offenses)".to_string(); + } + return format!("RuboCop: {}", t); + } + } + // Last resort: last 5 lines + crate::utils::fallback_tail(output, "rubocop", 5) +} + +/// Extract leading number from a string like "15 files inspected". +fn extract_leading_number(s: &str) -> usize { + s.split_whitespace() + .next() + .and_then(|w| w.parse().ok()) + .unwrap_or(0) +} + +/// Extract autocorrect count from summary like "... 3 offenses autocorrected". +fn extract_autocorrect_count(s: &str) -> usize { + // Look for "N offenses autocorrected" near end + let parts: Vec<&str> = s.split(',').collect(); + for part in parts.iter().rev() { + let t = part.trim(); + if t.contains("autocorrected") { + return extract_leading_number(t); + } + } + 0 +} + +/// Compact Ruby file path by finding the nearest Rails convention directory +/// and stripping the absolute path prefix. +fn compact_ruby_path(path: &str) -> String { + let path = path.replace('\\', "/"); + + for prefix in &[ + "app/models/", + "app/controllers/", + "app/views/", + "app/helpers/", + "app/services/", + "app/jobs/", + "app/mailers/", + "lib/", + "spec/", + "test/", + "config/", + ] { + if let Some(pos) = path.find(prefix) { + return path[pos..].to_string(); + } + } + + // Generic: strip up to last known directory marker + if let Some(pos) = path.rfind("/app/") { + return path[pos + 1..].to_string(); + } + if let Some(pos) = path.rfind('/') { + return path[pos + 1..].to_string(); + } + path +} + +// ── Tests ──────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::utils::count_tokens; + + fn no_offenses_json() -> &'static str { + r#"{ + "metadata": {"rubocop_version": "1.60.0"}, + "files": [], + "summary": { + "offense_count": 0, + "target_file_count": 0, + "inspected_file_count": 15 + } + }"# + } + + fn with_offenses_json() -> &'static str { + r#"{ + "metadata": {"rubocop_version": "1.60.0"}, + "files": [ + { + "path": "app/models/user.rb", + "offenses": [ + { + "severity": "convention", + "message": "Trailing whitespace detected.", + "cop_name": "Layout/TrailingWhitespace", + "correctable": true, + "location": {"start_line": 10, "start_column": 5, "last_line": 10, "last_column": 8, "length": 3, "line": 10, "column": 5} + }, + { + "severity": "convention", + "message": "Missing frozen string literal comment.", + "cop_name": "Style/FrozenStringLiteralComment", + "correctable": true, + "location": {"start_line": 1, "start_column": 1, "last_line": 1, "last_column": 1, "length": 1, "line": 1, "column": 1} + }, + { + "severity": "warning", + "message": "Useless assignment to variable - `x`.", + "cop_name": "Lint/UselessAssignment", + "correctable": false, + "location": {"start_line": 25, "start_column": 5, "last_line": 25, "last_column": 6, "length": 1, "line": 25, "column": 5} + } + ] + }, + { + "path": "app/controllers/users_controller.rb", + "offenses": [ + { + "severity": "convention", + "message": "Trailing whitespace detected.", + "cop_name": "Layout/TrailingWhitespace", + "correctable": true, + "location": {"start_line": 5, "start_column": 20, "last_line": 5, "last_column": 22, "length": 2, "line": 5, "column": 20} + }, + { + "severity": "error", + "message": "Syntax error, unexpected end-of-input.", + "cop_name": "Lint/Syntax", + "correctable": false, + "location": {"start_line": 30, "start_column": 1, "last_line": 30, "last_column": 1, "length": 1, "line": 30, "column": 1} + } + ] + } + ], + "summary": { + "offense_count": 5, + "target_file_count": 2, + "inspected_file_count": 20 + } + }"# + } + + #[test] + fn test_filter_rubocop_no_offenses() { + let result = filter_rubocop_json(no_offenses_json()); + assert_eq!(result, "ok ✓ rubocop (15 files)"); + } + + #[test] + fn test_filter_rubocop_with_offenses_per_file() { + let result = filter_rubocop_json(with_offenses_json()); + // Should show per-file offenses + assert!(result.contains("5 offenses (20 files)")); + // controllers file has error severity, should appear first + assert!(result.contains("app/controllers/users_controller.rb")); + assert!(result.contains("app/models/user.rb")); + // Per-file offense format: :line CopName — message + assert!(result.contains(":30 Lint/Syntax — Syntax error")); + assert!(result.contains(":10 Layout/TrailingWhitespace — Trailing whitespace")); + assert!(result.contains(":25 Lint/UselessAssignment — Useless assignment")); + } + + #[test] + fn test_filter_rubocop_severity_ordering() { + let result = filter_rubocop_json(with_offenses_json()); + // File with error should come before file with only convention/warning + let ctrl_pos = result.find("users_controller.rb").unwrap(); + let model_pos = result.find("app/models/user.rb").unwrap(); + assert!( + ctrl_pos < model_pos, + "Error-file should appear before convention-file" + ); + + // Within users_controller.rb, error should come before convention + let error_pos = result.find(":30 Lint/Syntax").unwrap(); + let conv_pos = result.find(":5 Layout/TrailingWhitespace").unwrap(); + assert!( + error_pos < conv_pos, + "Error offense should appear before convention" + ); + } + + #[test] + fn test_filter_rubocop_within_file_line_ordering() { + let result = filter_rubocop_json(with_offenses_json()); + // Within user.rb, warning (line 25) should come before conventions (line 1, 10) + let warning_pos = result.find(":25 Lint/UselessAssignment").unwrap(); + let conv1_pos = result.find(":1 Style/FrozenStringLiteralComment").unwrap(); + assert!( + warning_pos < conv1_pos, + "Warning should come before convention within same file" + ); + } + + #[test] + fn test_filter_rubocop_correctable_hint() { + let result = filter_rubocop_json(with_offenses_json()); + assert!(result.contains("3 correctable")); + assert!(result.contains("rubocop -A")); + } + + #[test] + fn test_filter_rubocop_text_fallback() { + let text = r#"Inspecting 10 files +.......... + +10 files inspected, no offenses detected"#; + let result = filter_rubocop_text(text); + assert_eq!(result, "ok ✓ rubocop (10 files)"); + } + + #[test] + fn test_filter_rubocop_text_autocorrect() { + let text = r#"Inspecting 15 files +...C..CC....... + +15 files inspected, 3 offenses detected, 3 offenses autocorrected"#; + let result = filter_rubocop_text(text); + assert_eq!(result, "ok ✓ rubocop -A (15 files, 3 autocorrected)"); + } + + #[test] + fn test_filter_rubocop_empty_output() { + let result = filter_rubocop_json(""); + assert_eq!(result, "RuboCop: No output"); + } + + #[test] + fn test_filter_rubocop_invalid_json_falls_back() { + let garbage = "some ruby warning\n{broken json"; + let result = filter_rubocop_json(garbage); + assert!(!result.is_empty(), "should not panic on invalid JSON"); + } + + #[test] + fn test_compact_ruby_path() { + assert_eq!( + compact_ruby_path("/home/user/project/app/models/user.rb"), + "app/models/user.rb" + ); + assert_eq!( + compact_ruby_path("app/controllers/users_controller.rb"), + "app/controllers/users_controller.rb" + ); + assert_eq!( + compact_ruby_path("/project/spec/models/user_spec.rb"), + "spec/models/user_spec.rb" + ); + assert_eq!( + compact_ruby_path("lib/tasks/deploy.rake"), + "lib/tasks/deploy.rake" + ); + } + + #[test] + fn test_filter_rubocop_caps_offenses_per_file() { + // File with 7 offenses should show 5 + overflow + let json = r#"{ + "metadata": {"rubocop_version": "1.60.0"}, + "files": [ + { + "path": "app/models/big.rb", + "offenses": [ + {"severity": "convention", "message": "msg1", "cop_name": "Cop/A", "correctable": false, "location": {"start_line": 1, "start_column": 1}}, + {"severity": "convention", "message": "msg2", "cop_name": "Cop/B", "correctable": false, "location": {"start_line": 2, "start_column": 1}}, + {"severity": "convention", "message": "msg3", "cop_name": "Cop/C", "correctable": false, "location": {"start_line": 3, "start_column": 1}}, + {"severity": "convention", "message": "msg4", "cop_name": "Cop/D", "correctable": false, "location": {"start_line": 4, "start_column": 1}}, + {"severity": "convention", "message": "msg5", "cop_name": "Cop/E", "correctable": false, "location": {"start_line": 5, "start_column": 1}}, + {"severity": "convention", "message": "msg6", "cop_name": "Cop/F", "correctable": false, "location": {"start_line": 6, "start_column": 1}}, + {"severity": "convention", "message": "msg7", "cop_name": "Cop/G", "correctable": false, "location": {"start_line": 7, "start_column": 1}} + ] + } + ], + "summary": {"offense_count": 7, "target_file_count": 1, "inspected_file_count": 5} + }"#; + let result = filter_rubocop_json(json); + assert!(result.contains(":5 Cop/E"), "should show 5th offense"); + assert!(!result.contains(":6 Cop/F"), "should not show 6th inline"); + assert!(result.contains("+2 more"), "should show overflow"); + } + + #[test] + fn test_filter_rubocop_text_bundler_error() { + let text = "Bundler::GemNotFound: Could not find gem 'rubocop' in any sources."; + let result = filter_rubocop_text(text); + assert!( + result.starts_with("RuboCop error:"), + "should detect Bundler error: {}", + result + ); + assert!(result.contains("GemNotFound")); + } + + #[test] + fn test_filter_rubocop_text_load_error() { + let text = + "/usr/lib/ruby/3.2.0/rubygems.rb:250: cannot load such file -- rubocop (LoadError)"; + let result = filter_rubocop_text(text); + assert!( + result.starts_with("RuboCop error:"), + "should detect load error: {}", + result + ); + } + + #[test] + fn test_filter_rubocop_text_with_offenses() { + let text = r#"Inspecting 5 files +..C.. + +5 files inspected, 1 offense detected"#; + let result = filter_rubocop_text(text); + assert_eq!(result, "RuboCop: 5 files inspected, 1 offense detected"); + } + + #[test] + fn test_severity_rank() { + assert!(severity_rank("error") < severity_rank("warning")); + assert!(severity_rank("warning") < severity_rank("convention")); + assert!(severity_rank("fatal") < severity_rank("warning")); + } + + #[test] + fn test_token_savings() { + let input = with_offenses_json(); + let output = filter_rubocop_json(input); + + let input_tokens = count_tokens(input); + let output_tokens = count_tokens(&output); + let savings = 100.0 - (output_tokens as f64 / input_tokens as f64 * 100.0); + + assert!( + savings >= 60.0, + "RuboCop: expected ≥60% savings, got {:.1}% (in={}, out={})", + savings, + input_tokens, + output_tokens + ); + } + + // ── ANSI handling test ────────────────────────────────────────────────── + + #[test] + fn test_filter_rubocop_json_with_ansi_prefix() { + // ANSI codes before JSON should trigger fallback, not panic + let input = "\x1b[33mWarning: something\x1b[0m\n{\"broken\": true}"; + let result = filter_rubocop_json(input); + assert!(!result.is_empty(), "should not panic on ANSI-prefixed JSON"); + } + + // ── 10-file cap test (Issue 12) ───────────────────────────────────────── + + #[test] + fn test_filter_rubocop_caps_at_ten_files() { + // Build JSON with 12 files, each having 1 offense + let mut files_json = Vec::new(); + for i in 1..=12 { + files_json.push(format!( + r#"{{"path": "app/models/model_{}.rb", "offenses": [{{"severity": "convention", "message": "msg{}", "cop_name": "Cop/X{}", "correctable": false, "location": {{"start_line": 1, "start_column": 1}}}}]}}"#, + i, i, i + )); + } + let json = format!( + r#"{{"metadata": {{"rubocop_version": "1.60.0"}}, "files": [{}], "summary": {{"offense_count": 12, "target_file_count": 12, "inspected_file_count": 12}}}}"#, + files_json.join(",") + ); + let result = filter_rubocop_json(&json); + assert!( + result.contains("+2 more files"), + "should show +2 more files overflow: {}", + result + ); + } +} diff --git a/src/toml_filter.rs b/src/toml_filter.rs index 69db33bf..0f571626 100644 --- a/src/toml_filter.rs +++ b/src/toml_filter.rs @@ -1610,8 +1610,8 @@ match_command = "^make\\b" let filters = make_filters(BUILTIN_TOML); assert_eq!( filters.len(), - 57, - "Expected exactly 57 built-in filters, got {}. \ + 58, + "Expected exactly 58 built-in filters, got {}. \ Update this count when adding/removing filters in src/filters/.", filters.len() ); @@ -1668,11 +1668,11 @@ expected = "output line 1\noutput line 2" let combined = format!("{}\n\n{}", BUILTIN_TOML, new_filter); let filters = make_filters(&combined); - // All 57 existing filters still present + 1 new = 58 + // All 58 existing filters still present + 1 new = 59 assert_eq!( filters.len(), - 58, - "Expected 58 filters after concat (57 built-in + 1 new)" + 59, + "Expected 59 filters after concat (58 built-in + 1 new)" ); // New filter is discoverable diff --git a/src/utils.rs b/src/utils.rs index ff84961c..c1882fa8 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -207,6 +207,58 @@ pub fn ok_confirmation(action: &str, detail: &str) -> String { } } +/// Extract exit code from a process output. Returns the actual exit code, or +/// `128 + signal` per Unix convention when terminated by a signal (no exit code +/// available). Falls back to 1 on non-Unix platforms. +pub fn exit_code_from_output(output: &std::process::Output, label: &str) -> i32 { + match output.status.code() { + Some(code) => code, + None => { + #[cfg(unix)] + { + use std::os::unix::process::ExitStatusExt; + if let Some(sig) = output.status.signal() { + eprintln!("[rtk] {}: process terminated by signal {}", label, sig); + return 128 + sig; + } + } + eprintln!("[rtk] {}: process terminated by signal", label); + 1 + } + } +} + +/// Return the last `n` lines of output with a label, for use as a fallback +/// when filter parsing fails. Logs a diagnostic to stderr. +pub fn fallback_tail(output: &str, label: &str, n: usize) -> String { + eprintln!( + "[rtk] {}: output format not recognized, showing last {} lines", + label, n + ); + let lines: Vec<&str> = output.lines().collect(); + let start = lines.len().saturating_sub(n); + lines[start..].join("\n") +} + +/// Build a Command for Ruby tools, auto-detecting bundle exec. +/// Uses `bundle exec ` when a Gemfile exists (transitive deps like rake +/// won't appear in the Gemfile but still need bundler for version isolation). +pub fn ruby_exec(tool: &str) -> Command { + if std::path::Path::new("Gemfile").exists() { + let mut c = Command::new("bundle"); + c.arg("exec").arg(tool); + return c; + } + Command::new(tool) +} + +/// Count whitespace-delimited tokens in text. Used by filter tests to verify +/// token savings claims. +#[cfg(test)] +pub fn count_tokens(text: &str) -> usize { + text.split_whitespace().count() +} + /// Detect the package manager used in the current directory. /// Returns "pnpm", "yarn", or "npm" based on lockfile presence. /// From 4edc3fc0838e25ee6d1754c7e987b5507742f600 Mon Sep 17 00:00:00 2001 From: patrick szymkowiak <52030887+pszymkowiak@users.noreply.github.com> Date: Thu, 19 Mar 2026 15:15:35 +0100 Subject: [PATCH 12/30] fix: increase signal in git diff, git log, and json filters (#621) (#708) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - git diff: raise max_hunk_lines from 30 to 100 (LLMs need full hunks) - git log: show 3 body lines instead of 1 (preserves BREAKING CHANGE, migration notes) - json: show values by default (LLMs need values for config debugging), add --schema for types-only Tested with phi4:14b on local LLM — all 3 fixes improve comprehension. Signed-off-by: Patrick szymkowiak --- src/filters/stat.toml | 32 +++++++---- src/git.rs | 30 +++++++---- src/json_cmd.rs | 120 ++++++++++++++++++++++++++++++++++++++---- src/main.rs | 34 ++++++++---- 4 files changed, 173 insertions(+), 43 deletions(-) diff --git a/src/filters/stat.toml b/src/filters/stat.toml index 24d9d946..8c240c05 100644 --- a/src/filters/stat.toml +++ b/src/filters/stat.toml @@ -1,21 +1,17 @@ [filters.stat] -description = "Compact stat output — strip blank lines" +description = "Compact stat output — strip device/inode/birth noise" match_command = "^stat\\b" strip_ansi = true strip_lines_matching = [ "^\\s*$", + "^\\s*Device:", + "^\\s*Birth:", ] -max_lines = 30 +truncate_lines_at = 120 +max_lines = 20 [[tests.stat]] -name = "macOS stat output kept" -input = """ -16777234 8690244974 -rw-r--r-- 1 patrick staff 0 12345 "Mar 10 12:00:00 2026" "Mar 10 11:00:00 2026" "Mar 10 11:00:00 2026" "Mar 9 10:00:00 2026" 4096 24 0 file.txt -""" -expected = "16777234 8690244974 -rw-r--r-- 1 patrick staff 0 12345 \"Mar 10 12:00:00 2026\" \"Mar 10 11:00:00 2026\" \"Mar 10 11:00:00 2026\" \"Mar 9 10:00:00 2026\" 4096 24 0 file.txt" - -[[tests.stat]] -name = "linux stat output kept" +name = "linux stat output strips device and birth" input = """ File: main.rs Size: 12345 Blocks: 24 IO Block: 4096 regular file @@ -26,7 +22,21 @@ Modify: 2026-03-10 11:00:00.000000000 +0100 Change: 2026-03-10 11:00:00.000000000 +0100 Birth: 2026-03-09 10:00:00.000000000 +0100 """ -expected = " File: main.rs\n Size: 12345 Blocks: 24 IO Block: 4096 regular file\nDevice: 801h/2049d Inode: 1234567 Links: 1\nAccess: (0644/-rw-r--r--) Uid: ( 1000/ patrick) Gid: ( 1000/ patrick)\nAccess: 2026-03-10 12:00:00.000000000 +0100\nModify: 2026-03-10 11:00:00.000000000 +0100\nChange: 2026-03-10 11:00:00.000000000 +0100\n Birth: 2026-03-09 10:00:00.000000000 +0100" +expected = " File: main.rs\n Size: 12345 Blocks: 24 IO Block: 4096 regular file\nAccess: (0644/-rw-r--r--) Uid: ( 1000/ patrick) Gid: ( 1000/ patrick)\nAccess: 2026-03-10 12:00:00.000000000 +0100\nModify: 2026-03-10 11:00:00.000000000 +0100\nChange: 2026-03-10 11:00:00.000000000 +0100" + +[[tests.stat]] +name = "macOS stat -x strips device and birth" +input = """ + File: "main.rs" + Size: 82848 FileType: Regular File + Mode: (0644/-rw-r--r--) Uid: ( 501/ patrick) Gid: ( 20/ staff) +Device: 1,15 Inode: 66302332 Links: 1 +Access: Wed Mar 18 21:21:15 2026 +Modify: Wed Mar 18 20:56:11 2026 +Change: Wed Mar 18 20:56:11 2026 + Birth: Wed Mar 18 20:56:11 2026 +""" +expected = " File: \"main.rs\"\n Size: 82848 FileType: Regular File\n Mode: (0644/-rw-r--r--) Uid: ( 501/ patrick) Gid: ( 20/ staff)\nAccess: Wed Mar 18 21:21:15 2026\nModify: Wed Mar 18 20:56:11 2026\nChange: Wed Mar 18 20:56:11 2026" [[tests.stat]] name = "empty input passes through" diff --git a/src/git.rs b/src/git.rs index 3d49fdd6..4bb7f674 100644 --- a/src/git.rs +++ b/src/git.rs @@ -297,7 +297,7 @@ pub(crate) fn compact_diff(diff: &str, max_lines: usize) -> String { let mut removed = 0; let mut in_hunk = false; let mut hunk_lines = 0; - let max_hunk_lines = 30; + let max_hunk_lines = 100; let mut was_truncated = false; for line in diff.lines() { @@ -532,17 +532,25 @@ fn filter_log_output( Some(h) => truncate_line(h.trim(), truncate_width), None => continue, }; - // Remaining lines are the body — keep first non-empty line only - let body_line = lines.map(|l| l.trim()).find(|l| { - !l.is_empty() && !l.starts_with("Signed-off-by:") && !l.starts_with("Co-authored-by:") - }); - - match body_line { - Some(body) => { - let truncated_body = truncate_line(body, truncate_width); - result.push(format!("{}\n {}", header, truncated_body)); + // Remaining lines are the body — keep up to 3 non-empty, non-trailer lines + let body_lines: Vec<&str> = lines + .map(|l| l.trim()) + .filter(|l| { + !l.is_empty() + && !l.starts_with("Signed-off-by:") + && !l.starts_with("Co-authored-by:") + }) + .take(3) + .collect(); + + if body_lines.is_empty() { + result.push(header); + } else { + let mut entry = header; + for body in &body_lines { + entry.push_str(&format!("\n {}", truncate_line(body, truncate_width))); } - None => result.push(header), + result.push(entry); } } diff --git a/src/json_cmd.rs b/src/json_cmd.rs index 76bae3ae..685c8f62 100644 --- a/src/json_cmd.rs +++ b/src/json_cmd.rs @@ -33,8 +33,8 @@ fn validate_json_extension(file: &Path) -> Result<()> { Ok(()) } -/// Show JSON structure without values -pub fn run(file: &Path, max_depth: usize, verbose: u8) -> Result<()> { +/// Show JSON (compact with values, or schema-only with --schema) +pub fn run(file: &Path, max_depth: usize, schema_only: bool, verbose: u8) -> Result<()> { validate_json_extension(file)?; let timer = tracking::TimedExecution::start(); @@ -45,19 +45,23 @@ pub fn run(file: &Path, max_depth: usize, verbose: u8) -> Result<()> { let content = fs::read_to_string(file) .with_context(|| format!("Failed to read file: {}", file.display()))?; - let schema = filter_json_string(&content, max_depth)?; - println!("{}", schema); + let output = if schema_only { + filter_json_string(&content, max_depth)? + } else { + filter_json_compact(&content, max_depth)? + }; + println!("{}", output); timer.track( &format!("cat {}", file.display()), "rtk json", &content, - &schema, + &output, ); Ok(()) } -/// Show JSON structure from stdin -pub fn run_stdin(max_depth: usize, verbose: u8) -> Result<()> { +/// Show JSON from stdin +pub fn run_stdin(max_depth: usize, schema_only: bool, verbose: u8) -> Result<()> { let timer = tracking::TimedExecution::start(); if verbose > 0 { @@ -70,13 +74,107 @@ pub fn run_stdin(max_depth: usize, verbose: u8) -> Result<()> { .read_to_string(&mut content) .context("Failed to read from stdin")?; - let schema = filter_json_string(&content, max_depth)?; - println!("{}", schema); - timer.track("cat - (stdin)", "rtk json -", &content, &schema); + let output = if schema_only { + filter_json_string(&content, max_depth)? + } else { + filter_json_compact(&content, max_depth)? + }; + println!("{}", output); + timer.track("cat - (stdin)", "rtk json -", &content, &output); Ok(()) } -/// Parse a JSON string and return its schema representation. +/// Parse a JSON string and return compact representation with values preserved. +/// Long strings are truncated, arrays are summarized. +pub fn filter_json_compact(json_str: &str, max_depth: usize) -> Result { + let value: Value = serde_json::from_str(json_str).context("Failed to parse JSON")?; + Ok(compact_json(&value, 0, max_depth)) +} + +fn compact_json(value: &Value, depth: usize, max_depth: usize) -> String { + let indent = " ".repeat(depth); + + if depth > max_depth { + return format!("{}...", indent); + } + + match value { + Value::Null => format!("{}null", indent), + Value::Bool(b) => format!("{}{}", indent, b), + Value::Number(n) => format!("{}{}", indent, n), + Value::String(s) => { + if s.len() > 80 { + format!("{}\"{}...\"", indent, &s[..77]) + } else { + format!("{}\"{}\"", indent, s) + } + } + Value::Array(arr) => { + if arr.is_empty() { + format!("{}[]", indent) + } else if arr.len() > 5 { + let first = compact_json(&arr[0], depth + 1, max_depth); + format!("{}[{}, ... +{} more]", indent, first.trim(), arr.len() - 1) + } else { + let items: Vec = arr + .iter() + .map(|v| compact_json(v, depth + 1, max_depth)) + .collect(); + let all_simple = arr.iter().all(|v| { + matches!( + v, + Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) + ) + }); + if all_simple { + let inline: Vec<&str> = items.iter().map(|s| s.trim()).collect(); + format!("{}[{}]", indent, inline.join(", ")) + } else { + let mut lines = vec![format!("{}[", indent)]; + for item in &items { + lines.push(format!("{},", item)); + } + lines.push(format!("{}]", indent)); + lines.join("\n") + } + } + } + Value::Object(map) => { + if map.is_empty() { + format!("{}{{}}", indent) + } else { + let mut lines = vec![format!("{}{{", indent)]; + let mut keys: Vec<_> = map.keys().collect(); + keys.sort(); + + for (i, key) in keys.iter().enumerate() { + let val = &map[*key]; + let is_simple = matches!( + val, + Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) + ); + + if is_simple { + let val_str = compact_json(val, 0, max_depth); + lines.push(format!("{} {}: {}", indent, key, val_str.trim())); + } else { + lines.push(format!("{} {}:", indent, key)); + lines.push(compact_json(val, depth + 1, max_depth)); + } + + if i >= 20 { + lines.push(format!("{} ... +{} more keys", indent, keys.len() - i - 1)); + break; + } + } + lines.push(format!("{}}}", indent)); + lines.join("\n") + } + } + } +} + +/// Parse a JSON string and return its schema representation (types only, no values). /// Useful for piping JSON from other commands (e.g., `gh api`, `curl`). pub fn filter_json_string(json_str: &str, max_depth: usize) -> Result { let value: Value = serde_json::from_str(json_str).context("Failed to parse JSON")?; diff --git a/src/main.rs b/src/main.rs index 0b1b76dc..0ff5124c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -240,13 +240,16 @@ enum Commands { command: Vec, }, - /// Show JSON structure without values + /// Show JSON (compact values, or schema-only with --schema) Json { /// JSON file file: PathBuf, /// Max depth #[arg(short, long, default_value = "5")] depth: usize, + /// Show structure only (strip all values) + #[arg(long)] + schema: bool, }, /// Summarize project dependencies @@ -390,9 +393,9 @@ enum Commands { Wget { /// URL to download url: String, - /// Output to stdout instead of file - #[arg(short = 'O', long)] - stdout: bool, + /// Output file (-O - for stdout) + #[arg(short = 'O', long = "output-document", allow_hyphen_values = true)] + output: Option, /// Additional wget arguments #[arg(trailing_var_arg = true, allow_hyphen_values = true)] args: Vec, @@ -1525,11 +1528,15 @@ fn main() -> Result<()> { runner::run_test(&cmd, cli.verbose)?; } - Commands::Json { file, depth } => { + Commands::Json { + file, + depth, + schema, + } => { if file == Path::new("-") { - json_cmd::run_stdin(depth, cli.verbose)?; + json_cmd::run_stdin(depth, schema, cli.verbose)?; } else { - json_cmd::run(&file, depth, cli.verbose)?; + json_cmd::run(&file, depth, schema, cli.verbose)?; } } @@ -1726,11 +1733,18 @@ fn main() -> Result<()> { } } - Commands::Wget { url, stdout, args } => { - if stdout { + Commands::Wget { url, output, args } => { + if output.as_deref() == Some("-") { wget_cmd::run_stdout(&url, &args, cli.verbose)?; } else { - wget_cmd::run(&url, &args, cli.verbose)?; + // Pass -O through to wget via args + let mut all_args = Vec::new(); + if let Some(out_file) = &output { + all_args.push("-O".to_string()); + all_args.push(out_file.clone()); + } + all_args.extend(args); + wget_cmd::run(&url, &all_args, cli.verbose)?; } } From 15d5beb9f70caf1f84e9b506faaf840c70c1cf4e Mon Sep 17 00:00:00 2001 From: YoubAmj <11021965+youbamj@users.noreply.github.com> Date: Thu, 19 Mar 2026 18:19:18 +0000 Subject: [PATCH 13/30] fix: preserve cargo test compile diagnostics Signed-off-by: YoubAmj <11021965+youbamj@users.noreply.github.com> --- CHANGELOG.md | 4 ++++ src/cargo_cmd.rs | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7053624..c3f1c7ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * **ruby:** add `ruby_exec()` shared utility for auto-detecting `bundle exec` when Gemfile exists * **ruby:** add discover/rewrite rules for rake, rails, rspec, rubocop, and bundle commands +### Bug Fixes + +* **cargo:** preserve compile diagnostics when `cargo test` fails before any test suites run + ## [0.30.1](https://github.com/rtk-ai/rtk/compare/v0.30.0...v0.30.1) (2026-03-18) diff --git a/src/cargo_cmd.rs b/src/cargo_cmd.rs index 63eea4b7..eabf8a37 100644 --- a/src/cargo_cmd.rs +++ b/src/cargo_cmd.rs @@ -850,6 +850,18 @@ fn filter_cargo_test(output: &str) -> String { } if result.trim().is_empty() { + let has_compile_errors = output.lines().any(|line| { + let trimmed = line.trim_start(); + trimmed.starts_with("error[") || trimmed.starts_with("error:") + }); + + if has_compile_errors { + let build_filtered = filter_cargo_build(output); + if build_filtered.starts_with("cargo build:") { + return build_filtered.replacen("cargo build:", "cargo test:", 1); + } + } + // Fallback: show last meaningful lines let meaningful: Vec<&str> = output .lines() @@ -1314,6 +1326,29 @@ test result: MALFORMED LINE WITHOUT PROPER FORMAT ); } + #[test] + fn test_filter_cargo_test_compile_error_preserves_error_header() { + let output = r#" Compiling rtk v0.31.0 (/workspace/projects/rtk) +error[E0425]: cannot find value `missing_symbol` in this scope + --> tests/repro_compile_fail.rs:3:13 + | +3 | let _ = missing_symbol; + | ^^^^^^^^^^^^^^ not found in this scope + +For more information about this error, try `rustc --explain E0425`. +error: could not compile `rtk` (test "repro_compile_fail") due to 1 previous error +"#; + let result = filter_cargo_test(output); + assert!(result.contains("cargo test: 1 errors, 0 warnings (1 crates)")); + assert!(result.contains("error[E0425]"), "got: {}", result); + assert!( + result.contains("--> tests/repro_compile_fail.rs:3:13"), + "got: {}", + result + ); + assert!(!result.starts_with('|'), "got: {}", result); + } + #[test] fn test_filter_cargo_clippy_clean() { let output = r#" Checking rtk v0.5.0 From 138e91411b4802e445a97429056cca73282d09e1 Mon Sep 17 00:00:00 2001 From: Nicholas Lee Date: Thu, 19 Mar 2026 16:48:44 -0700 Subject: [PATCH 14/30] fix(ruby): use rails test for positional file args in rtk rake rake test ignores positional file arguments and only supports TEST=path for single-file runs. When users pass positional test files (e.g., `rtk rake test file1.rb file2.rb` or `rtk rake test file.rb:15`), select_runner() now switches to `rails test` which handles single files, multiple files, and line-number syntax natively. Co-Authored-By: Claude Opus 4.6 Signed-off-by: Nicholas Lee --- CHANGELOG.md | 4 ++ src/rake_cmd.rs | 117 ++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 118 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7053624..963463b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Bug Fixes + +* **ruby:** use `rails test` instead of `rake test` when positional file args are passed — `rake test` ignores positional files and only supports `TEST=path` + ### Features * **ruby:** add RSpec test runner filter with JSON parsing and text fallback (60%+ reduction) diff --git a/src/rake_cmd.rs b/src/rake_cmd.rs index f6ab62f5..e3fba68f 100644 --- a/src/rake_cmd.rs +++ b/src/rake_cmd.rs @@ -8,11 +8,50 @@ use crate::tracking; use crate::utils::{exit_code_from_output, ruby_exec, strip_ansi}; use anyhow::{Context, Result}; +/// Decide whether to use `rake test` or `rails test` based on args. +/// +/// `rake test` only supports a single file via `TEST=path` and ignores positional +/// file args. When any positional test file paths are detected, we switch to +/// `rails test` which handles single files, multiple files, and line-number +/// syntax (`file.rb:15`) natively. +fn select_runner(args: &[String]) -> (&'static str, Vec) { + let has_test_subcommand = args.first().map_or(false, |a| a == "test"); + if !has_test_subcommand { + return ("rake", args.to_vec()); + } + + let after_test: Vec<&String> = args[1..].iter().collect(); + + let positional_files: Vec<&&String> = after_test + .iter() + .filter(|a| !a.contains('=') && !a.starts_with('-')) + .filter(|a| looks_like_test_path(a)) + .collect(); + + let needs_rails = !positional_files.is_empty(); + + if needs_rails { + ("rails", args.to_vec()) + } else { + ("rake", args.to_vec()) + } +} + +fn looks_like_test_path(arg: &str) -> bool { + let path = arg.split(':').next().unwrap_or(arg); + path.ends_with(".rb") + || path.starts_with("test/") + || path.starts_with("spec/") + || path.contains("_test.rb") + || path.contains("_spec.rb") +} + pub fn run(args: &[String], verbose: u8) -> Result<()> { let timer = tracking::TimedExecution::start(); - let mut cmd = ruby_exec("rake"); - for arg in args { + let (tool, effective_args) = select_runner(args); + let mut cmd = ruby_exec(tool); + for arg in &effective_args { cmd.arg(arg); } @@ -20,7 +59,7 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { eprintln!( "Running: {} {}", cmd.get_program().to_string_lossy(), - args.join(" ") + effective_args.join(" ") ); } @@ -438,4 +477,76 @@ NoMethodError: undefined method `blah' assert!(result.contains("ok rake test")); assert!(result.contains("4 runs")); } + + // ── select_runner tests ───────────────────────────── + + fn args(s: &str) -> Vec { + s.split_whitespace().map(String::from).collect() + } + + #[test] + fn test_select_runner_single_file_uses_rake() { + let (tool, _) = select_runner(&args("test TEST=test/models/post_test.rb")); + assert_eq!(tool, "rake"); + } + + #[test] + fn test_select_runner_no_files_uses_rake() { + let (tool, _) = select_runner(&args("test")); + assert_eq!(tool, "rake"); + } + + #[test] + fn test_select_runner_multiple_files_uses_rails() { + let (tool, a) = select_runner(&args( + "test test/models/post_test.rb test/models/user_test.rb", + )); + assert_eq!(tool, "rails"); + assert_eq!( + a, + args("test test/models/post_test.rb test/models/user_test.rb") + ); + } + + #[test] + fn test_select_runner_line_number_uses_rails() { + let (tool, _) = select_runner(&args("test test/models/post_test.rb:15")); + assert_eq!(tool, "rails"); + } + + #[test] + fn test_select_runner_multiple_with_line_numbers() { + let (tool, _) = select_runner(&args( + "test test/models/post_test.rb:15 test/models/user_test.rb:30", + )); + assert_eq!(tool, "rails"); + } + + #[test] + fn test_select_runner_non_test_subcommand_uses_rake() { + let (tool, _) = select_runner(&args("db:migrate")); + assert_eq!(tool, "rake"); + } + + #[test] + fn test_select_runner_single_positional_file_uses_rails() { + let (tool, _) = select_runner(&args("test test/models/post_test.rb")); + assert_eq!(tool, "rails"); + } + + #[test] + fn test_select_runner_flags_not_counted_as_files() { + let (tool, _) = select_runner(&args("test --verbose --seed 12345")); + assert_eq!(tool, "rake"); + } + + #[test] + fn test_looks_like_test_path() { + assert!(looks_like_test_path("test/models/post_test.rb")); + assert!(looks_like_test_path("test/models/post_test.rb:15")); + assert!(looks_like_test_path("spec/models/post_spec.rb")); + assert!(looks_like_test_path("my_file.rb")); + assert!(!looks_like_test_path("--verbose")); + assert!(!looks_like_test_path("12345")); + } } From 53bc81e9e6d3d0876fb1a23dbf6f08bc074b68be Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 19 Mar 2026 20:09:50 +0100 Subject: [PATCH 15/30] fix(cicd): gete release like tag for pre-release added script to act like release please (release please flag was unclear) added workflow dispatch event + dev like for prelease debug guards for workflow_dispatch (limit to push master for release events) Signed-off-by: aesoft <43991222+aeppling@users.noreply.github.com> --- .github/workflows/cd.yml | 57 +++++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 49d52bff..5b01ac30 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -1,6 +1,7 @@ name: CD on: + workflow_dispatch: push: branches: [develop, master] @@ -18,7 +19,9 @@ jobs: # ═══════════════════════════════════════════════ pre-release: - if: github.ref == 'refs/heads/develop' + if: >- + github.ref == 'refs/heads/develop' + || (github.event_name == 'workflow_dispatch' && github.ref != 'refs/heads/master') runs-on: ubuntu-latest outputs: tag: ${{ steps.tag.outputs.tag }} @@ -27,16 +30,53 @@ jobs: with: fetch-depth: 0 - - name: Compute pre-release tag + - name: Compute version from commits like release please id: tag run: | - VERSION=$(grep '^version = ' Cargo.toml | head -1 | cut -d'"' -f2) - TAG="v${VERSION}-rc.${{ github.run_number }}" + # ── Find latest stable tag reachable from HEAD ── + LATEST_TAG="" + for t in $(git tag -l 'v[0-9]*.[0-9]*.[0-9]*' --sort=-version:refname | grep -v '-'); do + if git merge-base --is-ancestor "$t" HEAD 2>/dev/null; then + LATEST_TAG="$t"; break + fi + done + if [ -z "$LATEST_TAG" ]; then + echo "::error::No stable release tag found in branch history" + exit 1 + fi + LATEST_VERSION="${LATEST_TAG#v}" + echo "Latest ancestor release: $LATEST_TAG" + + # ── Analyse conventional commits since that tag ── + COMMITS=$(git log "${LATEST_TAG}..HEAD" --format="%s") + HAS_BREAKING=$(echo "$COMMITS" | grep -cE '^[a-z]+(\(.+\))?!:' || true) + HAS_FEAT=$(echo "$COMMITS" | grep -cE '^feat(\(.+\))?:' || true) + HAS_FIX=$(echo "$COMMITS" | grep -cE '^fix(\(.+\))?:' || true) + echo "Commits since ${LATEST_TAG} — breaking=$HAS_BREAKING feat=$HAS_FEAT fix=$HAS_FIX" - # Safety: warn if this base version is already released - if git ls-remote --tags origin "refs/tags/v${VERSION}" | grep -q .; then - echo "::warning::v${VERSION} already released. Consider bumping Cargo.toml on develop." + # ── Compute next version (matches release-please observed behaviour) ── + # Pre-1.0 with bump-minor-pre-major: breaking → minor, feat → minor, fix → patch + IFS='.' read -r MAJOR MINOR PATCH <<< "$LATEST_VERSION" + if [ "$MAJOR" -eq 0 ]; then + if [ "$HAS_BREAKING" -gt 0 ] || [ "$HAS_FEAT" -gt 0 ]; then + MINOR=$((MINOR + 1)); PATCH=0 # breaking or feat → minor + else + PATCH=$((PATCH + 1)) # fix only → patch + fi + else + if [ "$HAS_BREAKING" -gt 0 ]; then + MAJOR=$((MAJOR + 1)); MINOR=0; PATCH=0 # breaking → major + elif [ "$HAS_FEAT" -gt 0 ]; then + MINOR=$((MINOR + 1)); PATCH=0 # feat → minor + else + PATCH=$((PATCH + 1)) # fix → patch + fi fi + VERSION="${MAJOR}.${MINOR}.${PATCH}" + TAG="v${VERSION}-rc.${{ github.run_number }}" + + echo "Next version: $VERSION (from $LATEST_VERSION)" + echo "Pre-release tag: $TAG" # Safety: fail if this exact tag already exists if git ls-remote --tags origin "refs/tags/${TAG}" | grep -q .; then @@ -45,7 +85,6 @@ jobs: fi echo "tag=$TAG" >> $GITHUB_OUTPUT - echo "Pre-release tag: $TAG" build-prerelease: name: Build pre-release @@ -64,7 +103,7 @@ jobs: # ═══════════════════════════════════════════════ release-please: - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/master' && github.event_name == 'push' runs-on: ubuntu-latest outputs: release_created: ${{ steps.release.outputs.release_created }} From 865749438e67f6da7f719d054bf377d857925ad3 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Thu, 19 Mar 2026 20:20:48 +0100 Subject: [PATCH 16/30] fix(cicd): missing doc Signed-off-by: aesoft <43991222+aeppling@users.noreply.github.com> --- .github/workflows/CICD.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/CICD.md b/.github/workflows/CICD.md index 071d234a..53776a00 100644 --- a/.github/workflows/CICD.md +++ b/.github/workflows/CICD.md @@ -39,18 +39,19 @@ Trigger: pull_request to develop or master ## Merge to develop — pre-release (cd.yml) -Trigger: push to develop | Concurrency: cancel-in-progress +Trigger: push to develop | workflow_dispatch (not master) | Concurrency: cancel-in-progress ``` ┌──────────────────┐ │ push to develop │ + │ OR dispatch │ └────────┬─────────┘ │ ┌────────▼──────────────────┐ │ pre-release │ - │ read Cargo.toml version │ - │ tag = v{ver}-rc.{run} │ - │ safety: fail if exists │ + │ compute next version │ + │ from conventional commits │ + │ tag = v{next}-rc.{run} │ └────────┬──────────────────┘ │ ┌────────▼──────────────────┐ @@ -74,7 +75,7 @@ Trigger: push to develop | Concurrency: cancel-in-progress ## Merge to master — stable release (cd.yml) -Trigger: push to master | Concurrency: never cancelled +Trigger: push to master (only) | Concurrency: never cancelled ``` ┌──────────────────┐ From 6aa5e90dc466f87c88a2401b4eb2aa0f323379f4 Mon Sep 17 00:00:00 2001 From: Adam Powis Date: Fri, 20 Mar 2026 10:38:10 +0000 Subject: [PATCH 17/30] fix(golangci): use resolved_command for version detection, move test fixture to file Signed-off-by: Adam Powis --- src/golangci_cmd.rs | 149 +--------------------------- tests/fixtures/golangci_v2_json.txt | 144 +++++++++++++++++++++++++++ 2 files changed, 146 insertions(+), 147 deletions(-) create mode 100644 tests/fixtures/golangci_v2_json.txt diff --git a/src/golangci_cmd.rs b/src/golangci_cmd.rs index b0edb677..b2fdcd28 100644 --- a/src/golangci_cmd.rs +++ b/src/golangci_cmd.rs @@ -4,7 +4,6 @@ use crate::utils::{resolved_command, truncate}; use anyhow::{Context, Result}; use serde::Deserialize; use std::collections::HashMap; -use std::process::Command; #[derive(Debug, Deserialize)] struct Position { @@ -62,7 +61,7 @@ fn parse_major_version(version_output: &str) -> u32 { /// Run `golangci-lint --version` and return the major version number. /// Returns 1 on any failure. fn detect_major_version() -> u32 { - let output = Command::new("golangci-lint").arg("--version").output(); + let output = resolved_command("golangci-lint").arg("--version").output(); match output { Ok(o) => { @@ -511,151 +510,7 @@ mod tests { #[test] fn test_golangci_v2_token_savings() { - // Simulate a realistic v2 JSON output with multiple issues - let raw = r#"{ - "Issues": [ - { - "FromLinter": "errcheck", - "Text": "Error return value of `foo` is not checked", - "Severity": "error", - "SourceLines": [ - " if err := foo(); err != nil {", - " return err", - " }" - ], - "Pos": { - "Filename": "pkg/handler/server.go", - "Line": 42, - "Column": 5, - "Offset": 1024 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - }, - { - "FromLinter": "errcheck", - "Text": "Error return value of `bar` is not checked", - "Severity": "error", - "SourceLines": [ - " bar()", - " return nil", - "}" - ], - "Pos": { - "Filename": "pkg/handler/server.go", - "Line": 55, - "Column": 2, - "Offset": 2048 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - }, - { - "FromLinter": "gosimple", - "Text": "S1003: should replace strings.Index with strings.Contains", - "Severity": "warning", - "SourceLines": [ - " if strings.Index(s, sub) >= 0 {", - " return true", - " }" - ], - "Pos": { - "Filename": "pkg/utils/strings.go", - "Line": 15, - "Column": 2, - "Offset": 512 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - }, - { - "FromLinter": "govet", - "Text": "printf: Sprintf format %s has arg of wrong type int", - "Severity": "error", - "SourceLines": [ - " fmt.Sprintf(\"%s\", 42)" - ], - "Pos": { - "Filename": "cmd/main/main.go", - "Line": 10, - "Column": 3, - "Offset": 256 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - }, - { - "FromLinter": "unused", - "Text": "func `unusedHelper` is unused", - "Severity": "warning", - "SourceLines": [ - "func unusedHelper() {", - " // implementation", - "}" - ], - "Pos": { - "Filename": "internal/helpers.go", - "Line": 100, - "Column": 1, - "Offset": 4096 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - }, - { - "FromLinter": "errcheck", - "Text": "Error return value of `close` is not checked", - "Severity": "error", - "SourceLines": [ - " defer file.Close()" - ], - "Pos": { - "Filename": "pkg/handler/server.go", - "Line": 120, - "Column": 10, - "Offset": 3072 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - }, - { - "FromLinter": "gosimple", - "Text": "S1005: should omit nil check", - "Severity": "warning", - "SourceLines": [ - " if m != nil {", - " for k, v := range m {", - " process(k, v)", - " }", - " }" - ], - "Pos": { - "Filename": "pkg/utils/strings.go", - "Line": 45, - "Column": 1, - "Offset": 1536 - }, - "Replacement": null, - "ExpectNoLint": false, - "ExpectedNoLintLinter": "" - } - ], - "Report": { - "Warnings": [], - "Linters": [ - {"Name": "errcheck", "Enabled": true, "EnabledByDefault": true}, - {"Name": "gosimple", "Enabled": true, "EnabledByDefault": true}, - {"Name": "govet", "Enabled": true, "EnabledByDefault": true}, - {"Name": "unused", "Enabled": true, "EnabledByDefault": true} - ] - } -}"#; + let raw = include_str!("../tests/fixtures/golangci_v2_json.txt"); let filtered = filter_golangci_json(raw, 2); let savings = 100.0 - (count_tokens(&filtered) as f64 / count_tokens(raw) as f64 * 100.0); diff --git a/tests/fixtures/golangci_v2_json.txt b/tests/fixtures/golangci_v2_json.txt new file mode 100644 index 00000000..959b27f4 --- /dev/null +++ b/tests/fixtures/golangci_v2_json.txt @@ -0,0 +1,144 @@ +{ + "Issues": [ + { + "FromLinter": "errcheck", + "Text": "Error return value of `foo` is not checked", + "Severity": "error", + "SourceLines": [ + " if err := foo(); err != nil {", + " return err", + " }" + ], + "Pos": { + "Filename": "pkg/handler/server.go", + "Line": 42, + "Column": 5, + "Offset": 1024 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "errcheck", + "Text": "Error return value of `bar` is not checked", + "Severity": "error", + "SourceLines": [ + " bar()", + " return nil", + "}" + ], + "Pos": { + "Filename": "pkg/handler/server.go", + "Line": 55, + "Column": 2, + "Offset": 2048 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "gosimple", + "Text": "S1003: should replace strings.Index with strings.Contains", + "Severity": "warning", + "SourceLines": [ + " if strings.Index(s, sub) >= 0 {", + " return true", + " }" + ], + "Pos": { + "Filename": "pkg/utils/strings.go", + "Line": 15, + "Column": 2, + "Offset": 512 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "govet", + "Text": "printf: Sprintf format %s has arg of wrong type int", + "Severity": "error", + "SourceLines": [ + " fmt.Sprintf(\"%s\", 42)" + ], + "Pos": { + "Filename": "cmd/main/main.go", + "Line": 10, + "Column": 3, + "Offset": 256 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "unused", + "Text": "func `unusedHelper` is unused", + "Severity": "warning", + "SourceLines": [ + "func unusedHelper() {", + " // implementation", + "}" + ], + "Pos": { + "Filename": "internal/helpers.go", + "Line": 100, + "Column": 1, + "Offset": 4096 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "errcheck", + "Text": "Error return value of `close` is not checked", + "Severity": "error", + "SourceLines": [ + " defer file.Close()" + ], + "Pos": { + "Filename": "pkg/handler/server.go", + "Line": 120, + "Column": 10, + "Offset": 3072 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + }, + { + "FromLinter": "gosimple", + "Text": "S1005: should omit nil check", + "Severity": "warning", + "SourceLines": [ + " if m != nil {", + " for k, v := range m {", + " process(k, v)", + " }", + " }" + ], + "Pos": { + "Filename": "pkg/utils/strings.go", + "Line": 45, + "Column": 1, + "Offset": 1536 + }, + "Replacement": null, + "ExpectNoLint": false, + "ExpectedNoLintLinter": "" + } + ], + "Report": { + "Warnings": [], + "Linters": [ + {"Name": "errcheck", "Enabled": true, "EnabledByDefault": true}, + {"Name": "gosimple", "Enabled": true, "EnabledByDefault": true}, + {"Name": "govet", "Enabled": true, "EnabledByDefault": true}, + {"Name": "unused", "Enabled": true, "EnabledByDefault": true} + ] + } +} From cadbb231af370cd36d74a8ebc5aef732e9c3bce3 Mon Sep 17 00:00:00 2001 From: Daniel Marbach Date: Fri, 20 Mar 2026 11:16:19 +0100 Subject: [PATCH 18/30] Fix trx injection for dotnet tests running with various Microsoft Testing Platfform modes Signed-off-by: Daniel Marbach --- src/dotnet_cmd.rs | 575 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 564 insertions(+), 11 deletions(-) diff --git a/src/dotnet_cmd.rs b/src/dotnet_cmd.rs index 07bc0d3a..dde3bba5 100644 --- a/src/dotnet_cmd.rs +++ b/src/dotnet_cmd.rs @@ -4,6 +4,9 @@ use crate::dotnet_trx; use crate::tracking; use crate::utils::{resolved_command, truncate}; use anyhow::{Context, Result}; +use quick_xml::events::Event; +use quick_xml::Reader; +use serde_json::Value; use std::ffi::OsString; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicU64, Ordering}; @@ -492,25 +495,56 @@ fn build_effective_dotnet_args( effective.push("-v:minimal".to_string()); } - if !has_nologo_arg(args) { + let runner_mode = if subcommand == "test" { + detect_test_runner_mode(args) + } else { + TestRunnerMode::Classic + }; + + // --nologo: skip for MtpNative — args pass directly to the MTP runtime which + // does not understand MSBuild/VSTest flags. + if runner_mode != TestRunnerMode::MtpNative && !has_nologo_arg(args) { effective.push("-nologo".to_string()); } if subcommand == "test" { - if !has_trx_logger_arg(args) { - effective.push("--logger".to_string()); - effective.push("trx".to_string()); - } - - if !has_results_directory_arg(args) { - if let Some(results_dir) = trx_results_dir { - effective.push("--results-directory".to_string()); - effective.push(results_dir.display().to_string()); + match runner_mode { + TestRunnerMode::Classic => { + if !has_trx_logger_arg(args) { + effective.push("--logger".to_string()); + effective.push("trx".to_string()); + } + if !has_results_directory_arg(args) { + if let Some(results_dir) = trx_results_dir { + effective.push("--results-directory".to_string()); + effective.push(results_dir.display().to_string()); + } + } + effective.extend(args.iter().cloned()); + } + TestRunnerMode::MtpNative => { + // In .NET 10 native MTP mode, --report-trx is a direct dotnet test flag. + // Modern MTP frameworks (TUnit 1.19.74+, MSTest, xUnit with MTP runner) + // include Microsoft.Testing.Extensions.TrxReport natively. + if !has_report_trx_arg(args) { + effective.push("--report-trx".to_string()); + } + effective.extend(args.iter().cloned()); + } + TestRunnerMode::MtpVsTestBridge => { + // In VsTestBridge mode (supported on .NET 9 SDK and earlier), --report-trx + // goes after the -- separator so it reaches the MTP runtime. + if !has_report_trx_arg(args) { + effective.extend(inject_report_trx_into_args(args)); + } else { + effective.extend(args.iter().cloned()); + } } } + } else { + effective.extend(args.iter().cloned()); } - effective.extend(args.iter().cloned()); effective } @@ -533,6 +567,176 @@ fn has_verbosity_arg(args: &[String]) -> bool { }) } +/// How the targeted test project(s) run tests — determines which TRX injection strategy to use. +#[derive(Debug, PartialEq)] +enum TestRunnerMode { + /// Classic VSTest runner. Inject `--logger trx --results-directory`. + Classic, + /// Native MTP runner (`UseMicrosoftTestingPlatformRunner`, `UseTestingPlatformRunner`, or + /// global.json MTP mode). `--logger trx` breaks the run; inject `--report-trx` directly. + MtpNative, + /// VSTest bridge for MTP (`TestingPlatformDotnetTestSupport=true`). `--logger trx` is + /// silently ignored; MTP args must come after `--`. Inject `-- --report-trx`. + MtpVsTestBridge, +} + +/// Which MTP-related property a single MSBuild file declares. +#[derive(Debug, PartialEq)] +enum MtpProjectKind { + None, + VsTestBridge, // UseMicrosoftTestingPlatformRunner | UseTestingPlatformRunner | TestingPlatformDotnetTestSupport +} + +/// Scans a single MSBuild file (.csproj / .fsproj / .vbproj / Directory.Build.props) for +/// MTP-related properties and returns which kind it is. +fn scan_mtp_kind_in_file(path: &Path) -> MtpProjectKind { + let content = match std::fs::read_to_string(path) { + Ok(c) => c, + Err(_) => return MtpProjectKind::None, + }; + + let mut reader = Reader::from_str(&content); + reader.config_mut().trim_text(true); + let mut buf = Vec::new(); + let mut inside_mtp_element = false; + + loop { + match reader.read_event_into(&mut buf) { + Ok(Event::Start(e)) => { + let name_lower = e.local_name().as_ref().to_ascii_lowercase(); + // All project-file MTP properties run in VSTest bridge mode and require + // MTP-specific args to come after `--`. Only global.json MTP mode is native. + inside_mtp_element = matches!( + name_lower.as_slice(), + b"usemicrosofttestingplatformrunner" + | b"usetestingplatformrunner" + | b"testingplatformdotnettestsupport" + ); + } + Ok(Event::Text(e)) => { + if inside_mtp_element { + if let Ok(text) = e.unescape() { + if text.trim().eq_ignore_ascii_case("true") { + return MtpProjectKind::VsTestBridge; + } + } + } + } + Ok(Event::End(_)) => inside_mtp_element = false, + Ok(Event::Eof) => break, + Err(_) => break, + _ => {} + } + buf.clear(); + } + + MtpProjectKind::None +} + +fn parse_global_json_mtp_mode(path: &Path) -> bool { + let Ok(content) = std::fs::read_to_string(path) else { + return false; + }; + let Ok(json) = serde_json::from_str::(&content) else { + return false; + }; + json.get("test") + .and_then(|t| t.get("runner")) + .and_then(|r| r.as_str()) + .is_some_and(|r| r.eq_ignore_ascii_case("Microsoft.Testing.Platform")) +} + +/// Checks whether the `global.json` closest to the current directory enables the .NET 10 +/// native MTP mode (`"test": { "runner": "Microsoft.Testing.Platform" }`). +fn is_global_json_mtp_mode() -> bool { + let Ok(mut dir) = std::env::current_dir() else { + return false; + }; + loop { + let path = dir.join("global.json"); + if path.exists() { + let is_mtp = parse_global_json_mtp_mode(&path); + return is_mtp; // stop at first global.json found, regardless of result + } + if !dir.pop() { + break; + } + } + false +} + +/// Detects which test runner mode the targeted project(s) use. +/// +/// Priority order: global.json (MtpNative) > project-file/Directory.Build.props (MtpVsTestBridge) > Classic. +/// `global.json` MTP mode is checked first because it overrides all project-level properties. +fn detect_test_runner_mode(args: &[String]) -> TestRunnerMode { + // global.json MTP mode takes overall precedence — when set, dotnet test runs MTP + // natively regardless of project file properties. + if is_global_json_mtp_mode() { + return TestRunnerMode::MtpNative; + } + + let project_extensions = ["csproj", "fsproj", "vbproj"]; + + let explicit_projects: Vec<&str> = args + .iter() + .map(String::as_str) + .filter(|a| { + let lower = a.to_ascii_lowercase(); + project_extensions + .iter() + .any(|ext| lower.ends_with(&format!(".{ext}"))) + }) + .collect(); + + let mut found = MtpProjectKind::None; + + if !explicit_projects.is_empty() { + for p in &explicit_projects { + if scan_mtp_kind_in_file(Path::new(p)) == MtpProjectKind::VsTestBridge { + found = MtpProjectKind::VsTestBridge; + } + } + } else { + // No explicit project — scan current directory. + if let Ok(entries) = std::fs::read_dir(".") { + for entry in entries.flatten() { + let name = entry.file_name(); + let name_str = name.to_string_lossy().to_ascii_lowercase(); + if project_extensions + .iter() + .any(|ext| name_str.ends_with(&format!(".{ext}"))) + && scan_mtp_kind_in_file(&entry.path()) == MtpProjectKind::VsTestBridge + { + found = MtpProjectKind::VsTestBridge; + } + } + } + } + + if found == MtpProjectKind::VsTestBridge { + return TestRunnerMode::MtpVsTestBridge; + } + + // Walk up from current directory looking for Directory.Build.props. + if let Ok(mut dir) = std::env::current_dir() { + loop { + let props = dir.join("Directory.Build.props"); + if props.exists() { + if scan_mtp_kind_in_file(&props) == MtpProjectKind::VsTestBridge { + return TestRunnerMode::MtpVsTestBridge; + } + break; // only read the first (closest) Directory.Build.props + } + if !dir.pop() { + break; + } + } + } + + TestRunnerMode::Classic +} + fn has_nologo_arg(args: &[String]) -> bool { args.iter() .any(|arg| matches!(arg.to_ascii_lowercase().as_str(), "-nologo" | "/nologo")) @@ -578,6 +782,25 @@ fn has_report_arg(args: &[String]) -> bool { }) } +fn has_report_trx_arg(args: &[String]) -> bool { + args.iter().any(|a| a.eq_ignore_ascii_case("--report-trx")) +} + +/// Injects `--report-trx` after the `--` separator in `args`. +/// If no `--` separator exists, appends `-- --report-trx` at the end. +fn inject_report_trx_into_args(args: &[String]) -> Vec { + if let Some(sep) = args.iter().position(|a| a == "--") { + let mut result = args.to_vec(); + result.insert(sep + 1, "--report-trx".to_string()); + result + } else { + let mut result = args.to_vec(); + result.push("--".to_string()); + result.push("--report-trx".to_string()); + result + } +} + fn extract_report_arg(args: &[String]) -> Option { let mut iter = args.iter().peekable(); while let Some(arg) = iter.next() { @@ -1474,6 +1697,336 @@ mod tests { .any(|w| w[0] == "--results-directory" && w[1] == "/custom/results")); } + #[test] + fn test_scan_mtp_kind_detects_use_microsoft_testing_platform_runner() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MyProject.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge); + } + + #[test] + fn test_scan_mtp_kind_detects_use_testing_platform_runner() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MyProject.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge); + } + + #[test] + fn test_is_mtp_project_file_returns_false_for_classic_vstest() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MyProject.csproj"); + fs::write( + &csproj, + r#" + + net9.0 + + + + +"#, + ) + .expect("write csproj"); + + assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::None); + } + + #[test] + fn test_scan_mtp_kind_returns_none_when_value_is_false() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MyProject.csproj"); + fs::write( + &csproj, + r#" + + false + +"#, + ) + .expect("write csproj"); + + assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::None); + } + + #[test] + fn test_scan_mtp_kind_detects_vstest_bridge() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MSTest.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge); + } + + #[test] + fn test_both_mtp_properties_in_same_file_still_vstest_bridge() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("Hybrid.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + true + +"#, + ) + .expect("write csproj"); + + // All project-file properties → VsTestBridge; only global.json gives MtpNative + assert_eq!(scan_mtp_kind_in_file(&csproj), MtpProjectKind::VsTestBridge); + } + + #[test] + fn test_detect_mode_mtp_csproj_is_vstest_bridge_injects_report_trx() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MTP.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + let args = vec![csproj.display().to_string()]; + assert_eq!( + detect_test_runner_mode(&args), + TestRunnerMode::MtpVsTestBridge + ); + + let binlog_path = Path::new("/tmp/test.binlog"); + let injected = build_effective_dotnet_args("test", &args, binlog_path, None); + + // MTP VsTestBridge → --report-trx injected after --, no VSTest --logger trx + assert!(!injected.contains(&"--logger".to_string())); + assert!(injected.contains(&"--report-trx".to_string())); + assert!(injected.contains(&"--".to_string())); + } + + #[test] + fn test_detect_mode_vstest_bridge_injects_report_trx() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MSTest.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + let args = vec![csproj.display().to_string()]; + assert_eq!( + detect_test_runner_mode(&args), + TestRunnerMode::MtpVsTestBridge + ); + + let binlog_path = Path::new("/tmp/test.binlog"); + let injected = build_effective_dotnet_args("test", &args, binlog_path, None); + + // --report-trx injected after --, --nologo supported in bridge mode + assert!(!injected.contains(&"--logger".to_string())); + assert!(injected.contains(&"--report-trx".to_string())); + assert!(injected.contains(&"--".to_string())); + assert!(injected.contains(&"-nologo".to_string())); + } + + #[test] + fn test_parse_global_json_mtp_mode_detects_mtp_native() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let global_json = temp_dir.path().join("global.json"); + fs::write( + &global_json, + r#"{"sdk":{"version":"10.0.100"},"test":{"runner":"Microsoft.Testing.Platform"}}"#, + ) + .expect("write global.json"); + + assert!(parse_global_json_mtp_mode(&global_json)); + } + + #[test] + fn test_vstest_bridge_injects_report_trx_after_separator() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MTP.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + let args = vec![csproj.display().to_string()]; + assert_eq!( + detect_test_runner_mode(&args), + TestRunnerMode::MtpVsTestBridge + ); + + let binlog_path = Path::new("/tmp/test.binlog"); + let injected = build_effective_dotnet_args("test", &args, binlog_path, None); + + // VsTestBridge → inject -- --report-trx after user args + assert!(injected.contains(&"--".to_string())); + assert!(injected.contains(&"--report-trx".to_string())); + let sep_pos = injected.iter().position(|a| a == "--").unwrap(); + let trx_pos = injected.iter().position(|a| a == "--report-trx").unwrap(); + assert!(sep_pos < trx_pos); + // No VSTest logger + assert!(!injected.contains(&"--logger".to_string())); + } + + #[test] + fn test_vstest_bridge_existing_separator_inserts_report_trx_after_it() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MTP.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + let args = vec![ + csproj.display().to_string(), + "--".to_string(), + "--parallel".to_string(), + ]; + let binlog_path = Path::new("/tmp/test.binlog"); + let injected = build_effective_dotnet_args("test", &args, binlog_path, None); + + // --report-trx inserted right after existing -- + let sep_pos = injected.iter().position(|a| a == "--").unwrap(); + assert_eq!(injected[sep_pos + 1], "--report-trx"); + assert!(injected.contains(&"--parallel".to_string())); + } + + #[test] + fn test_vstest_bridge_respects_existing_report_trx() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("MTP.Tests.csproj"); + fs::write( + &csproj, + r#" + + true + +"#, + ) + .expect("write csproj"); + + let args = vec![ + csproj.display().to_string(), + "--".to_string(), + "--report-trx".to_string(), + ]; + let binlog_path = Path::new("/tmp/test.binlog"); + let injected = build_effective_dotnet_args("test", &args, binlog_path, None); + + // Should not double-inject + assert_eq!(injected.iter().filter(|a| *a == "--report-trx").count(), 1); + } + + #[test] + fn test_detect_mode_classic_csproj_injects_trx() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let csproj = temp_dir.path().join("Classic.Tests.csproj"); + fs::write( + &csproj, + r#" + + net9.0 + +"#, + ) + .expect("write csproj"); + + let args = vec![csproj.display().to_string()]; + assert_eq!(detect_test_runner_mode(&args), TestRunnerMode::Classic); + + let binlog_path = Path::new("/tmp/test.binlog"); + let trx_dir = Path::new("/tmp/test_results"); + let injected = build_effective_dotnet_args("test", &args, binlog_path, Some(trx_dir)); + assert!(injected.contains(&"--logger".to_string())); + assert!(injected.contains(&"trx".to_string())); + } + + #[test] + fn test_detect_mode_directory_build_props_vstest_bridge() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let props = temp_dir.path().join("Directory.Build.props"); + fs::write( + &props, + r#" + + true + +"#, + ) + .expect("write Directory.Build.props"); + + assert_eq!(scan_mtp_kind_in_file(&props), MtpProjectKind::VsTestBridge); + } + + #[test] + fn test_is_global_json_mtp_mode_detects_mtp_runner() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let global_json = temp_dir.path().join("global.json"); + fs::write( + &global_json, + r#"{ "sdk": { "version": "10.0.100" }, "test": { "runner": "Microsoft.Testing.Platform" } }"#, + ) + .expect("write global.json"); + + assert!(parse_global_json_mtp_mode(&global_json)); + } + + #[test] + fn test_is_global_json_mtp_mode_returns_false_for_vstest_runner() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let global_json = temp_dir.path().join("global.json"); + fs::write(&global_json, r#"{ "sdk": { "version": "9.0.100" } }"#) + .expect("write global.json"); + + assert!(!parse_global_json_mtp_mode(&global_json)); + } + #[test] fn test_merge_test_summary_from_trx_uses_primary_and_cleans_file() { let temp_dir = tempfile::tempdir().expect("create temp dir"); From f7b09fc86a693acf2b52954215ff0c4e6c5d03f9 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Thu, 19 Mar 2026 13:50:57 +0000 Subject: [PATCH 19/30] fix(formatter): show full error message for test failures (#690) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit format_compact() was truncating error messages to 2 lines via .take(2). Playwright (and vitest, pytest, cargo test) errors contain the critical expected/received diff and call log starting at line 3+, so agents saw only the error type with no actionable debug information. Fix: iterate all lines of error_message for each failure instead of taking the first 2. Summary line stays compact; only the per-failure detail is preserved in full. Affects all test runners using TestResult (playwright, vitest, cargo, pytest, dotnet) — any test failure is now fully visible. Closes #690 Signed-off-by: Ousama Ben Younes --- src/parser/formatter.rs | 95 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 88 insertions(+), 7 deletions(-) diff --git a/src/parser/formatter.rs b/src/parser/formatter.rs index bf9f693e..b41280e2 100644 --- a/src/parser/formatter.rs +++ b/src/parser/formatter.rs @@ -51,13 +51,9 @@ impl TokenFormatter for TestResult { lines.push(String::new()); for (idx, failure) in self.failures.iter().enumerate().take(5) { lines.push(format!("{}. {}", idx + 1, failure.test_name)); - let error_preview: String = failure - .error_message - .lines() - .take(2) - .collect::>() - .join(" "); - lines.push(format!(" {}", error_preview)); + for line in failure.error_message.lines() { + lines.push(format!(" {}", line)); + } } if self.failures.len() > 5 { @@ -334,3 +330,88 @@ impl TokenFormatter for BuildOutput { ) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::parser::types::{TestFailure, TestResult}; + + fn make_failure(name: &str, error: &str) -> TestFailure { + TestFailure { + test_name: name.to_string(), + file_path: "tests/e2e.spec.ts".to_string(), + error_message: error.to_string(), + stack_trace: None, + } + } + + fn make_result(passed: usize, failures: Vec) -> TestResult { + TestResult { + total: passed + failures.len(), + passed, + failed: failures.len(), + skipped: 0, + duration_ms: Some(1500), + failures, + } + } + + // RED: format_compact must show the full error message, not just 2 lines. + // Playwright errors contain the expected/received diff and call log starting + // at line 3+. Truncating to 2 lines leaves the agent with no debug info. + #[test] + fn test_compact_shows_full_error_message() { + let error = "Error: expect(locator).toHaveText(expected)\n\nExpected: 'Submit'\nReceived: 'Loading'\n\nCall log:\n - waiting for getByRole('button', { name: 'Submit' })"; + let result = make_result(5, vec![make_failure("should click submit", error)]); + + let output = result.format_compact(); + + assert!( + output.contains("Expected: 'Submit'"), + "format_compact must preserve expected/received diff\nGot:\n{output}" + ); + assert!( + output.contains("Received: 'Loading'"), + "format_compact must preserve received value\nGot:\n{output}" + ); + assert!( + output.contains("Call log:"), + "format_compact must preserve call log\nGot:\n{output}" + ); + } + + // RED: summary line stays compact regardless of failure detail + #[test] + fn test_compact_summary_line_is_concise() { + let result = make_result(28, vec![make_failure("test", "some error")]); + let output = result.format_compact(); + let first_line = output.lines().next().unwrap_or(""); + assert!( + first_line.contains("28") && first_line.contains("1"), + "First line must show pass/fail counts, got: {first_line}" + ); + } + + // RED: all-pass output stays compact (no failure detail bloat) + #[test] + fn test_compact_all_pass_is_one_line() { + let result = make_result(10, vec![]); + let output = result.format_compact(); + assert!( + output.lines().count() <= 3, + "All-pass output should be compact, got {} lines:\n{output}", + output.lines().count() + ); + } + + // RED: error_message with only 1 line still works (no trailing noise) + #[test] + fn test_compact_single_line_error_no_trailing_noise() { + let result = make_result(0, vec![make_failure("should work", "Timeout exceeded")]); + let output = result.format_compact(); + assert!( + output.contains("Timeout exceeded"), + "Single-line error must appear\nGot:\n{output}" + ); + } +} From b4ccf046f59ce6ed1396e4d8c46f8a35152d6d09 Mon Sep 17 00:00:00 2001 From: Ousama Ben Younes Date: Sat, 21 Mar 2026 15:53:06 +0000 Subject: [PATCH 20/30] fix(playwright): add tee_and_hint pass-through on failure (#690) Add crate::tee::tee_and_hint call to playwright_cmd::run(), matching the pattern used by all other test runners (vitest, pytest, cargo, go, rspec, rake). When playwright fails, the full raw output is saved to ~/.local/share/rtk/tee/ and a one-line hint is appended so the LLM can read the complete output without re-running. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: Ousama Ben Younes --- src/playwright_cmd.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/playwright_cmd.rs b/src/playwright_cmd.rs index c553bcc2..ce6f0fe7 100644 --- a/src/playwright_cmd.rs +++ b/src/playwright_cmd.rs @@ -314,7 +314,12 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { } }; - println!("{}", filtered); + let exit_code = output.status.code().unwrap_or(1); + if let Some(hint) = crate::tee::tee_and_hint(&raw, "playwright", exit_code) { + println!("{}\n{}", filtered, hint); + } else { + println!("{}", filtered); + } timer.track( &format!("playwright {}", args.join(" ")), @@ -325,7 +330,7 @@ pub fn run(args: &[String], verbose: u8) -> Result<()> { // Preserve exit code for CI/CD if !output.status.success() { - std::process::exit(output.status.code().unwrap_or(1)); + std::process::exit(exit_code); } Ok(()) From 4a79e48ee5b6f3aec8aa9391f21245a270ae11bf Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 21 Mar 2026 17:12:49 +0100 Subject: [PATCH 21/30] Add files via upload --- LICENSE | 211 ++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 190 insertions(+), 21 deletions(-) diff --git a/LICENSE b/LICENSE index 5c5efcd4..0afaf4b9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,190 @@ -MIT License - -Copyright (c) 2024 Patrick Szymkowiak - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2024 rtk-ai and rtk-ai Labs + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From da999c8364252995b7553354207ee466c04890ec Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 21 Mar 2026 17:16:33 +0100 Subject: [PATCH 22/30] Update ci.yml --- .github/workflows/ci.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eae6866c..bad4b5d6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -201,14 +201,6 @@ jobs: - name: Run benchmark run: ./scripts/benchmark.sh - # ─── DCO: develop PRs only ─── - - check: - name: check - if: github.base_ref == 'develop' - runs-on: ubuntu-latest - steps: - - uses: KineticCafe/actions-dco@v1 # ─── AI Doc Review: develop PRs only ─── From 4b2179e587171b50dafab7c32e9fa7d9aad77dd6 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sat, 21 Mar 2026 17:17:55 +0100 Subject: [PATCH 23/30] Update CONTRIBUTING.md --- CONTRIBUTING.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0ecb18c8..3221a21b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -89,15 +89,16 @@ Every change **must** include tests. See [Testing](#testing) below. Every change **must** include documentation updates. See [Documentation](#documentation) below. -### Developer Certificate of Origin (DCO) +### Contributor License Agreement (CLA) -All contributions must be signed off (git commit -s) to certify -you have the right to submit the code under the project's license. +All contributions require signing our [Contributor License Agreement (CLA)](CLA.md) before being merged. -Expected format: Signed-off-by: Your Name your@email.com -https://developercertificate.org/ +By signing, you certify that: +- You have authored 100% of the contribution, or have the necessary rights to submit it. +- You grant **rtk-ai** and **rtk-ai Labs** a perpetual, worldwide, royalty-free license to use your contribution — including in commercial products such as **rtk Pro** — under the [Apache License 2.0](LICENSE). +- If your employer has rights over your work, you have obtained their permission. -By signing off, you agree to the DCO. +**This is automatic.** When you open a Pull Request, [CLA Assistant](https://cla-assistant.io) will post a comment asking you to sign. Click the link in that comment to sign with your GitHub account. You only need to sign once. ### 5. Merge into `develop` From fdeb09fb93564e795711e9a531d2e2e20187c3a7 Mon Sep 17 00:00:00 2001 From: Matt Van Horn <455140+mvanhorn@users.noreply.github.com> Date: Sat, 21 Mar 2026 13:54:17 -0700 Subject: [PATCH 24/30] fix(gh): passthrough --comments flag in issue/pr view Add --comments to the passthrough trigger list in should_passthrough_pr_view(). The --comments flag changes gh output to include comments, but the rtk filter hardcodes a JSON field list without 'comments', silently discarding all comment data. This misleads AI agents into concluding issues have no responses. Add should_passthrough_issue_view() for view_issue(), which previously had no passthrough check at all. Both functions now passthrough on --comments, --json, --jq, and --web. Fixes #720 Signed-off-by: Matt Van Horn <455140+mvanhorn@users.noreply.github.com> --- src/gh_cmd.rs | 52 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/src/gh_cmd.rs b/src/gh_cmd.rs index 9073c7e0..325adf5b 100644 --- a/src/gh_cmd.rs +++ b/src/gh_cmd.rs @@ -286,7 +286,13 @@ fn list_prs(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { fn should_passthrough_pr_view(extra_args: &[String]) -> bool { extra_args .iter() - .any(|a| a == "--json" || a == "--jq" || a == "--web") + .any(|a| a == "--json" || a == "--jq" || a == "--web" || a == "--comments") +} + +fn should_passthrough_issue_view(extra_args: &[String]) -> bool { + extra_args + .iter() + .any(|a| a == "--json" || a == "--jq" || a == "--web" || a == "--comments") } fn view_pr(args: &[String], _verbose: u8, ultra_compact: bool) -> Result<()> { @@ -680,6 +686,13 @@ fn view_issue(args: &[String], _verbose: u8) -> Result<()> { None => return Err(anyhow::anyhow!("Issue number required")), }; + // Passthrough when --comments, --json, --jq, or --web is present. + // --comments changes the output to include comments which our JSON + // field list doesn't request, causing silent data loss. + if should_passthrough_issue_view(&extra_args) { + return run_passthrough_with_extra("gh", &["issue", "view", &issue_number], &extra_args); + } + let mut cmd = resolved_command("gh"); cmd.args([ "issue", @@ -1488,8 +1501,41 @@ mod tests { } #[test] - fn test_should_passthrough_pr_view_other_flags() { - assert!(!should_passthrough_pr_view(&["--comments".into()])); + fn test_should_passthrough_pr_view_comments() { + assert!(should_passthrough_pr_view(&["--comments".into()])); + } + + // --- should_passthrough_issue_view tests --- + + #[test] + fn test_should_passthrough_issue_view_comments() { + assert!(should_passthrough_issue_view(&["--comments".into()])); + } + + #[test] + fn test_should_passthrough_issue_view_json() { + assert!(should_passthrough_issue_view(&[ + "--json".into(), + "body,comments".into() + ])); + } + + #[test] + fn test_should_passthrough_issue_view_jq() { + assert!(should_passthrough_issue_view(&[ + "--jq".into(), + ".body".into() + ])); + } + + #[test] + fn test_should_passthrough_issue_view_web() { + assert!(should_passthrough_issue_view(&["--web".into()])); + } + + #[test] + fn test_should_passthrough_issue_view_default() { + assert!(!should_passthrough_issue_view(&[])); } // --- filter_markdown_body tests --- From 15366678adeece701f38e91204128b070c0e3fc4 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 22 Mar 2026 00:29:53 +0100 Subject: [PATCH 25/30] fix(cicd): pre-release correct tag Was using develop ancestor -> wrong behavior because version happen on master branch --- .github/workflows/cd.yml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 5b01ac30..7311b591 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -33,19 +33,13 @@ jobs: - name: Compute version from commits like release please id: tag run: | - # ── Find latest stable tag reachable from HEAD ── - LATEST_TAG="" - for t in $(git tag -l 'v[0-9]*.[0-9]*.[0-9]*' --sort=-version:refname | grep -v '-'); do - if git merge-base --is-ancestor "$t" HEAD 2>/dev/null; then - LATEST_TAG="$t"; break - fi - done + LATEST_TAG=$(git tag -l 'v[0-9]*.[0-9]*.[0-9]*' --sort=-version:refname | grep -v '-' | head -1) if [ -z "$LATEST_TAG" ]; then - echo "::error::No stable release tag found in branch history" + echo "::error::No stable release tag found" exit 1 fi LATEST_VERSION="${LATEST_TAG#v}" - echo "Latest ancestor release: $LATEST_TAG" + echo "Latest release: $LATEST_TAG" # ── Analyse conventional commits since that tag ── COMMITS=$(git log "${LATEST_TAG}..HEAD" --format="%s") From 3b94b602ed24b9ecec597ce001e59f325caaadd4 Mon Sep 17 00:00:00 2001 From: aesoft <43991222+aeppling@users.noreply.github.com> Date: Sun, 22 Mar 2026 00:30:37 +0100 Subject: [PATCH 26/30] fix(cicd): explicit fetch tag --- .github/workflows/cd.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 7311b591..1d29a855 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -29,6 +29,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + fetch-tags: true - name: Compute version from commits like release please id: tag From 2ef0690767eb733c705e4de56d02c64696a4acc6 Mon Sep 17 00:00:00 2001 From: Matt Van Horn <455140+mvanhorn@users.noreply.github.com> Date: Sat, 21 Mar 2026 13:51:32 -0700 Subject: [PATCH 27/30] fix(gh): skip compact_diff for --name-only/--stat flags in pr diff When `gh pr diff` is called with output-format-changing flags like --name-only, --stat, --name-status, --numstat, or --shortstat, the output is a plain filename/stat list rather than a unified diff. compact_diff() expects diff headers and hunks, so it produces empty output from these formats. Skip filtering and passthrough directly when any of these flags are present. The output is already compact and doesn't benefit from diff compaction. Fixes #730 Signed-off-by: Matt Van Horn <455140+mvanhorn@users.noreply.github.com> --- src/gh_cmd.rs | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/src/gh_cmd.rs b/src/gh_cmd.rs index 325adf5b..2477bbd6 100644 --- a/src/gh_cmd.rs +++ b/src/gh_cmd.rs @@ -1118,6 +1118,18 @@ fn pr_merge(args: &[String], _verbose: u8) -> Result<()> { Ok(()) } +/// Flags that change `gh pr diff` output from unified diff to a different format. +/// When present, compact_diff would produce empty output since it expects diff headers. +fn has_non_diff_format_flag(args: &[String]) -> bool { + args.iter().any(|a| { + a == "--name-only" + || a == "--name-status" + || a == "--stat" + || a == "--numstat" + || a == "--shortstat" + }) +} + fn pr_diff(args: &[String], _verbose: u8) -> Result<()> { // --no-compact: pass full diff through (gh CLI doesn't know this flag, strip it) let no_compact = args.iter().any(|a| a == "--no-compact"); @@ -1127,7 +1139,9 @@ fn pr_diff(args: &[String], _verbose: u8) -> Result<()> { .cloned() .collect(); - if no_compact { + // Passthrough when --no-compact or when a format flag changes output away from + // unified diff (e.g. --name-only produces a filename list, not diff hunks). + if no_compact || has_non_diff_format_flag(&gh_args) { return run_passthrough_with_extra("gh", &["pr", "diff"], &gh_args); } @@ -1538,6 +1552,46 @@ mod tests { assert!(!should_passthrough_issue_view(&[])); } + // --- has_non_diff_format_flag tests --- + + #[test] + fn test_non_diff_format_flag_name_only() { + assert!(has_non_diff_format_flag(&["--name-only".into()])); + } + + #[test] + fn test_non_diff_format_flag_stat() { + assert!(has_non_diff_format_flag(&["--stat".into()])); + } + + #[test] + fn test_non_diff_format_flag_name_status() { + assert!(has_non_diff_format_flag(&["--name-status".into()])); + } + + #[test] + fn test_non_diff_format_flag_numstat() { + assert!(has_non_diff_format_flag(&["--numstat".into()])); + } + + #[test] + fn test_non_diff_format_flag_shortstat() { + assert!(has_non_diff_format_flag(&["--shortstat".into()])); + } + + #[test] + fn test_non_diff_format_flag_absent() { + assert!(!has_non_diff_format_flag(&[])); + } + + #[test] + fn test_non_diff_format_flag_regular_args() { + assert!(!has_non_diff_format_flag(&[ + "123".into(), + "--color=always".into() + ])); + } + // --- filter_markdown_body tests --- #[test] From af56573ae2b234123e4685fd945980e644f40fa3 Mon Sep 17 00:00:00 2001 From: patrick szymkowiak <52030887+pszymkowiak@users.noreply.github.com> Date: Mon, 23 Mar 2026 10:13:44 +0100 Subject: [PATCH 28/30] fix: update Discord invite link (#711) (#786) Replace expired invite links across all README translations. Signed-off-by: Patrick szymkowiak --- README.md | 6 +++--- README_es.md | 6 +++--- README_fr.md | 6 +++--- README_ja.md | 6 +++--- README_ko.md | 6 +++--- README_zh.md | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 7401256d..7e010b14 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ CI Release License: MIT - Discord + Discord Homebrew

@@ -19,7 +19,7 @@ InstallTroubleshootingArchitecture • - Discord + Discord

@@ -414,7 +414,7 @@ brew uninstall rtk # If installed via Homebrew Contributions welcome! Please open an issue or PR on [GitHub](https://github.com/rtk-ai/rtk). -Join the community on [Discord](https://discord.gg/pvHdzAec). +Join the community on [Discord](https://discord.gg/RySmvNF5kF). ## License diff --git a/README_es.md b/README_es.md index c05da936..c099d664 100644 --- a/README_es.md +++ b/README_es.md @@ -10,7 +10,7 @@ CI Release License: MIT - Discord + Discord Homebrew

@@ -19,7 +19,7 @@ InstalarSolucion de problemasArquitectura • - Discord + Discord

@@ -152,7 +152,7 @@ rtk discover # Descubrir ahorros perdidos Las contribuciones son bienvenidas. Abre un issue o PR en [GitHub](https://github.com/rtk-ai/rtk). -Unete a la comunidad en [Discord](https://discord.gg/pvHdzAec). +Unete a la comunidad en [Discord](https://discord.gg/RySmvNF5kF). ## Licencia diff --git a/README_fr.md b/README_fr.md index b8c71734..4c5e749d 100644 --- a/README_fr.md +++ b/README_fr.md @@ -10,7 +10,7 @@ CI Release License: MIT - Discord + Discord Homebrew

@@ -19,7 +19,7 @@ InstallerDepannageArchitecture • - Discord + Discord

@@ -190,7 +190,7 @@ mode = "failures" Les contributions sont les bienvenues ! Ouvrez une issue ou une PR sur [GitHub](https://github.com/rtk-ai/rtk). -Rejoignez la communaute sur [Discord](https://discord.gg/pvHdzAec). +Rejoignez la communaute sur [Discord](https://discord.gg/RySmvNF5kF). ## Licence diff --git a/README_ja.md b/README_ja.md index a6e7dc22..6c690aff 100644 --- a/README_ja.md +++ b/README_ja.md @@ -10,7 +10,7 @@ CI Release License: MIT - Discord + Discord Homebrew

@@ -19,7 +19,7 @@ インストールトラブルシューティングアーキテクチャ • - Discord + Discord

@@ -152,7 +152,7 @@ rtk discover # 見逃した節約機会を発見 コントリビューション歓迎![GitHub](https://github.com/rtk-ai/rtk) で issue または PR を作成してください。 -[Discord](https://discord.gg/pvHdzAec) コミュニティに参加。 +[Discord](https://discord.gg/RySmvNF5kF) コミュニティに参加。 ## ライセンス diff --git a/README_ko.md b/README_ko.md index b9eca724..5d3b1a0b 100644 --- a/README_ko.md +++ b/README_ko.md @@ -10,7 +10,7 @@ CI Release License: MIT - Discord + Discord Homebrew

@@ -19,7 +19,7 @@ 설치문제 해결아키텍처 • - Discord + Discord

@@ -152,7 +152,7 @@ rtk discover # 놓친 절약 기회 발견 기여를 환영합니다! [GitHub](https://github.com/rtk-ai/rtk)에서 issue 또는 PR을 생성해 주세요. -[Discord](https://discord.gg/pvHdzAec) 커뮤니티에 참여하세요. +[Discord](https://discord.gg/RySmvNF5kF) 커뮤니티에 참여하세요. ## 라이선스 diff --git a/README_zh.md b/README_zh.md index bd7fce8d..00b9c001 100644 --- a/README_zh.md +++ b/README_zh.md @@ -10,7 +10,7 @@ CI Release License: MIT - Discord + Discord Homebrew

@@ -19,7 +19,7 @@ 安装故障排除架构 • - Discord + Discord

@@ -160,7 +160,7 @@ rtk discover # 发现遗漏的节省机会 欢迎贡献!请在 [GitHub](https://github.com/rtk-ai/rtk) 上提交 issue 或 PR。 -加入 [Discord](https://discord.gg/pvHdzAec) 社区。 +加入 [Discord](https://discord.gg/RySmvNF5kF) 社区。 ## 许可证 From 0eecee5bf35ffd8b13f36a59ec39bd52626948d3 Mon Sep 17 00:00:00 2001 From: patrick szymkowiak <52030887+pszymkowiak@users.noreply.github.com> Date: Mon, 23 Mar 2026 11:52:26 +0100 Subject: [PATCH 29/30] fix: add telemetry documentation and init notice (#640) (#788) - Add "Privacy & Telemetry" section to README explaining what is collected, what is NOT, and how to opt-out - Show telemetry notice during rtk init so users are informed Signed-off-by: Patrick szymkowiak --- README.md | 22 ++++++++++++++++++++++ src/init.rs | 5 +++++ 2 files changed, 27 insertions(+) diff --git a/README.md b/README.md index 7e010b14..cc2a4e8b 100644 --- a/README.md +++ b/README.md @@ -410,6 +410,28 @@ brew uninstall rtk # If installed via Homebrew - **[SECURITY.md](SECURITY.md)** - Security policy and PR review process - **[AUDIT_GUIDE.md](docs/AUDIT_GUIDE.md)** - Token savings analytics guide +## Privacy & Telemetry + +RTK collects **anonymous, aggregate usage metrics** once per day to help prioritize development. This is standard practice for open-source CLI tools. + +**What is collected:** +- Device hash (SHA-256 of hostname+username, not reversible) +- RTK version, OS, architecture +- Command count (last 24h) and top command names (e.g. "git", "cargo" — no arguments, no file paths) +- Token savings percentage + +**What is NOT collected:** source code, file paths, command arguments, secrets, environment variables, or any personally identifiable information. + +**Opt-out** (any of these): +```bash +# Environment variable +export RTK_TELEMETRY_DISABLED=1 + +# Or in config file (~/.config/rtk/config.toml) +[telemetry] +enabled = false +``` + ## Contributing Contributions welcome! Please open an issue or PR on [GitHub](https://github.com/rtk-ai/rtk). diff --git a/src/init.rs b/src/init.rs index 241a7ef5..494bef34 100644 --- a/src/init.rs +++ b/src/init.rs @@ -279,6 +279,11 @@ pub fn run( install_cursor_hooks(verbose)?; } + // Telemetry notice (shown once during init) + println!(); + println!(" [info] Anonymous telemetry is enabled (opt-out: RTK_TELEMETRY_DISABLED=1)"); + println!(" [info] See: https://github.com/rtk-ai/rtk#privacy--telemetry"); + Ok(()) } From e666d3ebd1f40d05d5c88034a384b852daa76fa5 Mon Sep 17 00:00:00 2001 From: Patrick szymkowiak Date: Mon, 23 Mar 2026 16:36:34 +0100 Subject: [PATCH 30/30] docs: add all 9 AI tool integrations to README Document Copilot, Cursor, Codex, Windsurf, Cline, OpenClaw setup. Unified table with install commands for each tool. Updated Quick Start with all agent options. Signed-off-by: Patrick szymkowiak --- README.md | 102 ++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 76 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index cc2a4e8b..6073d5eb 100644 --- a/README.md +++ b/README.md @@ -99,12 +99,15 @@ rtk gain # Should show token savings stats ## Quick Start ```bash -# 1. Install hook for Claude Code (recommended) -rtk init --global -# Follow instructions to register in ~/.claude/settings.json -# Claude Code only by default (use --opencode for OpenCode, --gemini for Gemini CLI) - -# 2. Restart Claude Code, then test +# 1. Install for your AI tool +rtk init -g # Claude Code / Copilot (default) +rtk init -g --gemini # Gemini CLI +rtk init -g --codex # Codex (OpenAI) +rtk init -g --agent cursor # Cursor +rtk init --agent windsurf # Windsurf +rtk init --agent cline # Cline / Roo Code + +# 2. Restart your AI tool, then test git status # Automatically rewritten to rtk git status ``` @@ -291,49 +294,96 @@ rtk init --show # Verify installation After install, **restart Claude Code**. -## Gemini CLI Support (Global) +## Supported AI Tools + +RTK supports 9 AI coding tools. Each integration transparently rewrites shell commands to `rtk` equivalents for 60-90% token savings. -RTK supports Gemini CLI via a native Rust hook processor. The hook intercepts `run_shell_command` tool calls and rewrites them to `rtk` equivalents using the same rewrite engine as Claude Code. +| Tool | Install | Method | +|------|---------|--------| +| **Claude Code** | `rtk init -g` | PreToolUse hook (bash) | +| **GitHub Copilot** | `rtk init -g` | PreToolUse hook (`rtk hook copilot`) | +| **Cursor** | `rtk init -g --agent cursor` | preToolUse hook (hooks.json) | +| **Gemini CLI** | `rtk init -g --gemini` | BeforeTool hook (`rtk hook gemini`) | +| **Codex** | `rtk init -g --codex` | AGENTS.md + RTK.md instructions | +| **Windsurf** | `rtk init --agent windsurf` | .windsurfrules (project-scoped) | +| **Cline / Roo Code** | `rtk init --agent cline` | .clinerules (project-scoped) | +| **OpenCode** | `rtk init -g --opencode` | Plugin TS (tool.execute.before) | +| **OpenClaw** | `openclaw plugins install ./openclaw` | Plugin TS (before_tool_call) | + +### Claude Code (default) -**Install Gemini hook:** ```bash -rtk init -g --gemini +rtk init -g # Install hook + RTK.md +rtk init -g --auto-patch # Non-interactive (CI/CD) +rtk init --show # Verify installation +rtk init -g --uninstall # Remove +``` + +### GitHub Copilot (VS Code + CLI) + +```bash +rtk init -g # Same hook as Claude Code +``` + +The hook auto-detects Copilot format (VS Code `runTerminalCommand` or CLI `toolName: bash`) and rewrites commands. Works with both Copilot Chat in VS Code and `copilot` CLI. + +### Cursor + +```bash +rtk init -g --agent cursor ``` -**What it creates:** -- `~/.gemini/hooks/rtk-hook-gemini.sh` (thin wrapper calling `rtk hook gemini`) -- `~/.gemini/GEMINI.md` (RTK awareness instructions) -- Patches `~/.gemini/settings.json` with BeforeTool hook +Creates `~/.cursor/hooks/rtk-rewrite.sh` + patches `~/.cursor/hooks.json` with preToolUse matcher. Works with both Cursor editor and `cursor-agent` CLI. + +### Gemini CLI -**Uninstall:** ```bash +rtk init -g --gemini rtk init -g --gemini --uninstall ``` -**Restart Required**: Restart Gemini CLI, then test with `git status` in a session. +Creates `~/.gemini/hooks/rtk-hook-gemini.sh` + patches `~/.gemini/settings.json` with BeforeTool hook. + +### Codex (OpenAI) -## OpenCode Plugin (Global) +```bash +rtk init -g --codex +``` -OpenCode supports plugins that can intercept tool execution. RTK provides a global plugin that mirrors the Claude auto-rewrite behavior by rewriting Bash tool commands to `rtk ...` before they execute. This plugin is **not** installed by default. +Creates `~/.codex/RTK.md` + `~/.codex/AGENTS.md` with `@RTK.md` reference. Codex reads these as global instructions. -> **Note**: This plugin uses OpenCode's `tool.execute.before` hook. Known limitation: plugin hooks do not intercept subagent tool calls ([upstream issue](https://github.com/sst/opencode/issues/5894)). See [OpenCode plugin docs](https://open-code.ai/en/docs/plugins) for API details. +### Windsurf + +```bash +rtk init --agent windsurf +``` + +Creates `.windsurfrules` in the current project. Cascade reads rules and prefixes commands with `rtk`. + +### Cline / Roo Code + +```bash +rtk init --agent cline +``` + +Creates `.clinerules` in the current project. Cline reads rules and prefixes commands with `rtk`. + +### OpenCode -**Install OpenCode plugin:** ```bash rtk init -g --opencode ``` -**What it creates:** -- `~/.config/opencode/plugins/rtk.ts` +Creates `~/.config/opencode/plugins/rtk.ts`. Uses `tool.execute.before` hook. -**Restart Required**: Restart OpenCode, then test with `git status` in a session. +### OpenClaw -**Manual install (fallback):** ```bash -mkdir -p ~/.config/opencode/plugins -cp hooks/opencode-rtk.ts ~/.config/opencode/plugins/rtk.ts +openclaw plugins install ./openclaw ``` +Plugin in `openclaw/` directory. Uses `before_tool_call` hook, delegates to `rtk rewrite`. + ### Commands Rewritten | Raw Command | Rewritten To |