diff --git a/.claude/skills/cli-toolbox_publish/SKILL.md b/.claude/skills/cli-toolbox_publish/SKILL.md index 9702d52..03a1bcf 100644 --- a/.claude/skills/cli-toolbox_publish/SKILL.md +++ b/.claude/skills/cli-toolbox_publish/SKILL.md @@ -23,7 +23,7 @@ Parse `$ARGUMENTS` to determine: - **`--install`**: after releases complete, install binaries locally via `scripts/install.sh` - **`--with-skill`**: when installing, also install Claude Code skills (passed through to install.sh) -Valid tool names: `tb-prod`, `tb-sem`, `tb-bug`, `tb-lf` +Valid tool names: `tb-prod`, `tb-sem`, `tb-bug`, `tb-lf`, `tb-devctl` If `--all` is used and no version is specified, read each crate's current version from `crates//Cargo.toml` and suggest a patch bump for each. Ask the user to confirm. diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5c10692..8be0ddd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,7 +8,7 @@ on: crate: description: "Crate to build (e.g. tb-prod)" type: choice - options: [tb-prod, tb-sem, tb-bug, tb-lf] + options: [tb-prod, tb-sem, tb-bug, tb-lf, tb-devctl] required: true dry_run: description: "Dry run (build but don't create release)" diff --git a/.gitignore b/.gitignore index 7f7a5ed..7a39ecd 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ secrets.toml benchmark-results/ docs/ +output/ diff --git a/CLAUDE.md b/CLAUDE.md index 413b404..e0b6b0e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -2,7 +2,7 @@ ## Git Workflow -- Commit directly to `main` — no branches or PRs needed for now. +- Create a feature branch and open a draft PR for review. Do not commit directly to `main`. - **Before publishing a tool**, always check for uncommitted changes in its crate directory and commit them first. The bump script only commits the version change — any pending code changes will be left out of the tagged release. ## Bugs and Issues diff --git a/Cargo.lock b/Cargo.lock index 382bbc4..d56405b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1949,6 +1949,22 @@ dependencies = [ "urlencoding", ] +[[package]] +name = "tb-devctl" +version = "0.1.0" +dependencies = [ + "chrono", + "clap", + "colored", + "dirs", + "reqwest", + "serde", + "serde_json", + "thiserror 2.0.18", + "toml", + "toolbox-core", +] + [[package]] name = "tb-lf" version = "0.2.2" diff --git a/Cargo.toml b/Cargo.toml index 90001e1..96673fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ tb-lf = { path = "crates/tb-lf" } tb-sem = { path = "crates/tb-sem" } tb-prod = { path = "crates/tb-prod" } tb-bug = { path = "crates/tb-bug" } +tb-devctl = { path = "crates/tb-devctl" } # Dev/test (also in workspace.dependencies so crates can inherit them) assert_cmd = "2" diff --git a/crates/tb-devctl/Cargo.toml b/crates/tb-devctl/Cargo.toml new file mode 100644 index 0000000..959b0c2 --- /dev/null +++ b/crates/tb-devctl/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "tb-devctl" +version = "0.1.0" +edition = "2024" +description = "Local dev environment orchestrator for Productive services" +authors.workspace = true +license.workspace = true +repository.workspace = true + +[[bin]] +name = "tb-devctl" +path = "src/main.rs" + +[lib] +doctest = false + +[dependencies] +toolbox-core.workspace = true +clap.workspace = true +reqwest.workspace = true +serde.workspace = true +serde_json.workspace = true +toml.workspace = true +chrono.workspace = true +colored.workspace = true +dirs = "6" +thiserror.workspace = true diff --git a/crates/tb-devctl/src/commands/doctor.rs b/crates/tb-devctl/src/commands/doctor.rs new file mode 100644 index 0000000..e18daf8 --- /dev/null +++ b/crates/tb-devctl/src/commands/doctor.rs @@ -0,0 +1,231 @@ +use std::path::Path; + +use colored::Colorize; + +use crate::config::Config; +use crate::error::Result; +use crate::health; + +struct ServiceResult { + name: String, + companion_of: Option, + docker_ok: bool, + local_ok: bool, + issues: Vec, +} + +pub fn run(config: &Config, project_root: &Path) -> Result<()> { + let mut issues = 0; + + // --- System checks --- + println!("{}", "System".bold()); + + let docker_ok = health::docker_is_running(); + if docker_ok { + println!(" {} Docker", "✓".green()); + } else { + println!(" {} Docker — not running", "✗".red()); + issues += 1; + } + + let caddy_ok = health::caddy_is_running(); + if caddy_ok { + println!(" {} Caddy (localhost:2019)", "✓".green()); + } else { + println!(" {} Caddy — not responding on localhost:2019", "✗".red()); + println!(" Run: ./scripts/setup-caddy.sh"); + issues += 1; + } + + match health::aws_sso_status() { + health::AwsSsoStatus::Valid(Some(remaining)) => { + let time_str = health::format_duration(&remaining); + if remaining.as_secs() < 1800 { + println!(" {} AWS SSO ({} remaining)", "!".yellow(), time_str); + } else { + println!(" {} AWS SSO ({} remaining)", "✓".green(), time_str); + } + } + health::AwsSsoStatus::Valid(None) => { + println!(" {} AWS SSO (valid, expiry unknown)", "✓".green()); + } + health::AwsSsoStatus::Expired => { + println!(" {} AWS SSO — expired or invalid", "!".yellow()); + println!(" Run: aws sso login"); + } + health::AwsSsoStatus::NotInstalled => { + println!(" {} AWS CLI not installed", "!".yellow()); + } + } + + // --- Infrastructure --- + println!(); + println!("{}", "Infrastructure".bold()); + + let infra_running = health::infra_is_running(config, project_root); + + for (name, svc) in &config.infra.services { + if infra_running && health::port_is_open(svc.port) { + println!(" {} {} (port {})", "✓".green(), name, svc.port); + } else { + println!(" {} {} (port {}) — not running", "✗".red(), name, svc.port); + issues += 1; + } + } + + // --- Services: collect results --- + let repos_dir = project_root.join("repos"); + let companions = config.companion_map(); + let mut results: Vec = Vec::new(); + + for (name, svc) in &config.services { + // Companion services — don't check independently + if let Some(parent) = companions.get(name.as_str()) { + results.push(ServiceResult { + name: name.clone(), + companion_of: Some(parent.clone()), + docker_ok: true, + local_ok: true, + issues: Vec::new(), + }); + continue; + } + + let repo_path = svc.repo.as_ref().map(|r| repos_dir.join(r)); + let repo_exists = repo_path.as_ref().is_some_and(|p| p.exists()); + + // Repo not cloned — fail both, single issue, skip rest + if repo_path.is_some() && !repo_exists { + results.push(ServiceResult { + name: name.clone(), + companion_of: None, + docker_ok: false, + local_ok: false, + issues: vec!["repo not cloned".into()], + }); + continue; + } + + let mut svc_issues: Vec = Vec::new(); + let mut docker_issues = false; + let mut local_issues = false; + + // Secrets check (affects both docker and local) + if let Some(ref path) = repo_path { + for secret in &svc.secrets { + if !path.join(secret).exists() { + svc_issues.push(format!("missing {}", secret)); + docker_issues = true; + local_issues = true; + } + } + } + + // Local requirements check (affects local only) + for req in &svc.requires { + let check_path = if repo_exists { + repo_path.as_deref() + } else { + None + }; + let status = health::check_requirement(req, check_path); + if !status.ok { + let msg = format!( + "{} — {}", + req, + status.detail.unwrap_or_else(|| "not found".into()) + ); + svc_issues.push(msg); + local_issues = true; + } + } + + results.push(ServiceResult { + name: name.clone(), + companion_of: None, + docker_ok: !docker_issues, + local_ok: !local_issues, + issues: svc_issues, + }); + } + + // --- Services: render table --- + println!(); + println!("{}", "Services".bold()); + + let max_name_len = results + .iter() + .map(|r| r.name.len()) + .max() + .unwrap_or(7) + .max(7); // minimum "SERVICE" width + + // Header + println!( + " {: = results.iter().filter(|r| !r.issues.is_empty()).collect(); + + if !failing.is_empty() { + println!(); + println!("{}", "Issues".bold()); + + for (i, result) in failing.iter().enumerate() { + if i > 0 { + println!(); + } + println!(" {}", result.name); + for issue in &result.issues { + println!(" {} {}", "✗".red(), issue); + } + issues += 1; + } + } + + // --- Summary --- + println!(); + if issues == 0 { + println!("{}", "Everything looks good!".green().bold()); + } else { + println!("{} {} issue(s) found.", "!".yellow().bold(), issues); + } + + Ok(()) +} diff --git a/crates/tb-devctl/src/commands/infra.rs b/crates/tb-devctl/src/commands/infra.rs new file mode 100644 index 0000000..0119a16 --- /dev/null +++ b/crates/tb-devctl/src/commands/infra.rs @@ -0,0 +1,109 @@ +use std::path::Path; +use std::process::Command; + +use colored::Colorize; + +use crate::config::Config; +use crate::error::{Error, Result}; +use crate::health; + +pub fn up(config: &Config, project_root: &Path) -> Result<()> { + if !health::docker_is_running() { + return Err(Error::Other( + "Docker is not running. Start Docker Desktop first.".into(), + )); + } + + let compose_file = project_root.join(&config.infra.compose_file); + + // Auto-create volumes + for svc in config.infra.services.values() { + if let Some(vol) = &svc.volume { + let exists = Command::new("docker") + .args(["volume", "inspect", vol]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .is_ok_and(|s| s.success()); + + if !exists { + println!(" Creating volume: {}", vol.bold()); + let status = Command::new("docker") + .args(["volume", "create", vol]) + .stdout(std::process::Stdio::null()) + .status()?; + if !status.success() { + return Err(Error::Other(format!("Failed to create volume: {}", vol))); + } + } + } + } + + println!("{}", "Starting infrastructure...".blue()); + let status = Command::new("docker") + .args([ + "compose", + "-p", + &config.infra.compose_project, + "-f", + &compose_file.to_string_lossy(), + "up", + "-d", + ]) + .status()?; + + if !status.success() { + return Err(Error::Other("docker compose up failed".into())); + } + + println!("{}", "Infrastructure started.".green()); + for (name, svc) in &config.infra.services { + println!(" {} → port {}", name.bold(), svc.port); + } + Ok(()) +} + +pub fn down(config: &Config, project_root: &Path) -> Result<()> { + let compose_file = project_root.join(&config.infra.compose_file); + + println!("{}", "Stopping infrastructure...".yellow()); + let status = Command::new("docker") + .args([ + "compose", + "-p", + &config.infra.compose_project, + "-f", + &compose_file.to_string_lossy(), + "down", + ]) + .status()?; + + if !status.success() { + return Err(Error::Other("docker compose down failed".into())); + } + + println!("{}", "Infrastructure stopped.".green()); + Ok(()) +} + +pub fn status(config: &Config, project_root: &Path) -> Result<()> { + if health::infra_is_running(config, project_root) { + println!("{}", "Infrastructure is running.".green()); + } else { + println!("{}", "Infrastructure is not running.".red()); + println!(" Start with: tb-devctl infra up"); + return Ok(()); + } + + // Show per-service port status + for (name, svc) in &config.infra.services { + let port_status = if health::port_is_open(svc.port) { + "●".green() + } else { + "○".red() + }; + println!(" {} {} (port {})", port_status, name, svc.port); + } + + Ok(()) +} diff --git a/crates/tb-devctl/src/commands/init.rs b/crates/tb-devctl/src/commands/init.rs new file mode 100644 index 0000000..c6e4e9d --- /dev/null +++ b/crates/tb-devctl/src/commands/init.rs @@ -0,0 +1,93 @@ +use std::path::Path; +use std::process::Command; + +use colored::Colorize; + +use crate::config::Config; +use crate::docker; +use crate::error::{Error, Result}; + +pub fn run(config: &Config, project_root: &Path, service: &str) -> Result<()> { + let svc = config + .services + .get(service) + .ok_or_else(|| Error::Config(format!("Unknown service: '{}'", service)))?; + + if svc.init.is_empty() { + println!("No init steps defined for '{}'.", service); + return Ok(()); + } + + let repo = svc + .repo + .as_deref() + .ok_or_else(|| Error::Config(format!("Service '{}' has no repo defined", service)))?; + + // Determine execution context + let container_up = docker::container_is_running(config); + + // Check AWS SSO if any init step needs it + let needs_aws = svc.init.iter().any(|s| s.contains("secrets-manager")); + if needs_aws && !crate::health::aws_sso_is_valid() { + return Err(Error::Other( + "AWS SSO session expired or invalid. Run: aws sso login".into(), + )); + } + + println!("{} {}", "Initializing".blue(), service.bold()); + println!(" Steps: {}", svc.init.len()); + println!(); + + for (i, step) in svc.init.iter().enumerate() { + println!(" [{}/{}] {}", i + 1, svc.init.len(), step.bold()); + + if container_up { + // Run inside Docker container as root (needed for gem/package installs). + // Set HOME=/home/dev so AWS SDK finds the mounted ~/.aws credentials. + let status = Command::new("docker") + .args([ + "exec", + "-e", + "HOME=/home/dev", + "-w", + &format!("/workspace/{}", repo), + &config.docker.container, + "bash", + "-lc", + step, + ]) + .status()?; + + if !status.success() { + return Err(Error::Other(format!( + "Init step failed: {} (exit {})", + step, + status.code().unwrap_or(-1) + ))); + } + } else { + // Run on host in repos/ + let repo_dir = project_root.join("repos").join(repo); + if !repo_dir.exists() { + return Err(Error::Config(format!("Repo not found: repos/{}", repo))); + } + + let status = Command::new("bash") + .args(["-lc", step]) + .current_dir(&repo_dir) + .status()?; + + if !status.success() { + return Err(Error::Other(format!( + "Init step failed: {} (exit {})", + step, + status.code().unwrap_or(-1) + ))); + } + } + } + + println!(); + println!("{} {} initialized.", "✓".green(), service); + Ok(()) +} diff --git a/crates/tb-devctl/src/commands/local.rs b/crates/tb-devctl/src/commands/local.rs new file mode 100644 index 0000000..d4447bd --- /dev/null +++ b/crates/tb-devctl/src/commands/local.rs @@ -0,0 +1,286 @@ +use std::path::{Path, PathBuf}; +use std::process::Command; + +use colored::Colorize; + +use crate::config::Config; +use crate::error::{Error, Result}; +use crate::health; +use crate::state::{ServiceState, State}; + +/// Build a shell command that initializes runtime version managers and +/// cd's into the service directory before running the actual command. +/// This ensures rbenv/nvm detect .ruby-version/.node-version/.nvmrc. +fn shell_cmd( + svc_dir: &Path, + cmd: &str, + env: &std::collections::BTreeMap, +) -> String { + let mut parts = Vec::new(); + + // Suppress interactive prompts (pnpm, corepack, etc.) + parts.push("export CI=true".to_string()); + + // Per-service env vars from tb-devctl.toml (shared + local-specific) + for (key, val) in env { + parts.push(format!("export {}={}", key, shell_escape(val))); + } + + // rbenv init puts shims first in PATH (needed for .ruby-version detection) + if svc_dir.join(".ruby-version").exists() { + parts.push("eval \"$(rbenv init - bash)\" 2>/dev/null".to_string()); + } + + // nvm init for .node-version/.nvmrc detection + if svc_dir.join(".node-version").exists() || svc_dir.join(".nvmrc").exists() { + parts.push("export NVM_DIR=\"${NVM_DIR:-$HOME/.nvm}\"; [ -s \"$NVM_DIR/nvm.sh\" ] && . \"$NVM_DIR/nvm.sh\" 2>/dev/null; true".to_string()); + } + + parts.push(format!("cd {}", svc_dir.display())); + parts.push(cmd.to_string()); + parts.join(" && ") +} + +fn shell_escape(s: &str) -> String { + if s.contains(' ') || s.contains('"') || s.contains('$') { + format!("'{}'", s.replace('\'', "'\\''")) + } else { + s.to_string() + } +} + +pub fn start( + config: &Config, + project_root: &Path, + service: &str, + dir_override: Option<&str>, + background: bool, +) -> Result<()> { + let svc = config + .services + .get(service) + .ok_or_else(|| Error::Config(format!("Unknown service: '{}'", service)))?; + + let cmd = svc + .cmd + .as_deref() + .ok_or_else(|| Error::Config(format!("Service '{}' has no cmd defined", service)))?; + + let repo = svc + .repo + .as_deref() + .ok_or_else(|| Error::Config(format!("Service '{}' has no repo defined", service)))?; + + // Determine service directory (--dir paths resolved relative to project root) + let svc_dir: PathBuf = if let Some(dir) = dir_override { + let p = PathBuf::from(dir); + if p.is_absolute() { + p + } else { + project_root.join(p) + } + } else { + project_root.join("repos").join(repo) + }; + + if !svc_dir.exists() { + return Err(Error::Config(format!( + "Service directory not found: {}", + svc_dir.display() + ))); + } + + // Check port conflicts + if let Some(port) = svc.port + && health::port_is_open(port) + { + let owner = health::port_owner(port) + .map(|(pid, cmd)| format!("{} (PID {})", cmd, pid)) + .unwrap_or_else(|| "unknown".into()); + return Err(Error::Other(format!( + "Port {} is already in use by {}", + port, owner + ))); + } + + // Check secrets + for secret in &svc.secrets { + if !svc_dir.join(secret).exists() { + return Err(Error::Config(format!( + "Missing secret: {}/{}. Run `tb-devctl init {}` first.", + svc_dir.display(), + secret, + service + ))); + } + } + + // Auto-start infra if needed + if !svc.infra.is_empty() && !health::infra_is_running(config, project_root) { + println!("{}", "Starting infrastructure...".blue()); + crate::commands::infra::up(config, project_root)?; + } + + // Merge shared + local-specific env vars (local overrides shared) + let mut merged_env = svc.env.clone(); + merged_env.extend(svc.env_local.iter().map(|(k, v)| (k.clone(), v.clone()))); + + // Run start steps (git pull, deps, migrate) + if !svc.start.is_empty() { + println!("{}", "Running setup steps...".blue()); + for step in &svc.start { + // git pull: skip if working tree is dirty + if step.starts_with("git pull") { + let output = Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(&svc_dir) + .output()?; + if !output.stdout.is_empty() { + println!(" {} git pull (dirty working tree, skipping)", "!".yellow()); + continue; + } + } + + // git restore: clean up generated files after migrations + if step.starts_with("git restore") { + let cmd = shell_cmd(&svc_dir, step, &merged_env); + let status = Command::new("bash").args(["-lc", &cmd]).status()?; + if !status.success() { + println!(" {} {} (non-fatal)", "!".yellow(), step); + } + continue; + } + + println!(" {}", step); + // Explicit cd so rbenv/nvm detect .ruby-version/.node-version + let cmd = shell_cmd(&svc_dir, step, &merged_env); + let status = Command::new("bash").args(["-lc", &cmd]).status()?; + + if !status.success() { + return Err(Error::Other(format!("Setup step failed: {}", step))); + } + } + } + + // Clean stale PID files + let pid_file = svc_dir.join("tmp/pids/server.pid"); + if pid_file.exists() { + std::fs::remove_file(&pid_file)?; + println!(" Cleaned stale PID file"); + } + + // Start the service + let now = chrono::Utc::now().to_rfc3339(); + + if background { + // Background mode: redirect output to log file + let log_dir = project_root.join(".tb-devctl/logs"); + std::fs::create_dir_all(&log_dir)?; + let log_file = log_dir.join(format!("{}.log", service)); + let log = std::fs::File::create(&log_file)?; + + println!( + "{} {} (background, logs: {})", + "Starting".blue(), + service.bold(), + log_file.display() + ); + + let full_cmd = shell_cmd(&svc_dir, cmd, &merged_env); + let child = Command::new("bash") + .args(["-lc", &full_cmd]) + .stdout(log.try_clone()?) + .stderr(log) + .spawn()?; + + // Update state with PID + let mut state = State::load(project_root)?; + state.services.insert( + service.to_string(), + ServiceState { + mode: "local".into(), + started_at: now, + dir: Some(svc_dir.to_string_lossy().into()), + pid: Some(child.id()), + }, + ); + state.save(project_root)?; + + println!("{} {} started (PID {})", "✓".green(), service, child.id()); + if let Some(hostname) = &svc.hostname { + println!(" https://{}", hostname); + } + } else { + // Foreground mode: inherit terminal + println!( + "{} {} (foreground, Ctrl+C to stop)", + "Starting".blue(), + service.bold() + ); + + // Update state before starting (no PID for foreground) + let mut state = State::load(project_root)?; + state.services.insert( + service.to_string(), + ServiceState { + mode: "local".into(), + started_at: now, + dir: Some(svc_dir.to_string_lossy().into()), + pid: None, + }, + ); + state.save(project_root)?; + + let full_cmd = shell_cmd(&svc_dir, cmd, &merged_env); + let status = Command::new("bash").args(["-lc", &full_cmd]).status()?; + + // Clean up state after exit + let mut state = State::load(project_root)?; + state.services.remove(service); + state.save(project_root)?; + + if !status.success() { + return Err(Error::Other(format!( + "{} exited with code {}", + service, + status.code().unwrap_or(-1) + ))); + } + } + + Ok(()) +} + +/// Stop a locally running service by PID. +pub fn stop(project_root: &Path, service: &str) -> Result<()> { + let mut state = State::load(project_root)?; + + let svc_state = state + .services + .get(service) + .ok_or_else(|| Error::Other(format!("Service '{}' is not tracked in state", service)))?; + + if svc_state.mode != "local" { + return Err(Error::Other(format!( + "Service '{}' is in {} mode, not local", + service, svc_state.mode + ))); + } + + if let Some(pid) = svc_state.pid { + println!("Stopping {} (PID {})...", service, pid); + let _ = Command::new("kill").arg(pid.to_string()).status(); + std::thread::sleep(std::time::Duration::from_secs(2)); + println!("{} stopped.", service.green()); + } else { + println!( + "{} {} was running in foreground (no PID tracked)", + "!".yellow(), + service + ); + } + + state.services.remove(service); + state.save(project_root)?; + Ok(()) +} diff --git a/crates/tb-devctl/src/commands/logs.rs b/crates/tb-devctl/src/commands/logs.rs new file mode 100644 index 0000000..a5cbe7c --- /dev/null +++ b/crates/tb-devctl/src/commands/logs.rs @@ -0,0 +1,86 @@ +use std::path::Path; +use std::process::Command; + +use crate::config::Config; +use crate::docker; +use crate::error::{Error, Result}; + +pub fn run(config: &Config, project_root: &Path, service: &str) -> Result<()> { + // Infra services → docker compose logs + if config.infra.services.contains_key(service) { + let compose_file = project_root.join(&config.infra.compose_file); + let status = Command::new("docker") + .args([ + "compose", + "-p", + &config.infra.compose_project, + "-f", + &compose_file.to_string_lossy(), + "logs", + "-f", + "--tail", + "100", + service, + ]) + .status()?; + + if !status.success() { + return Err(Error::Other(format!("Failed to get logs for {}", service))); + } + return Ok(()); + } + + // App services → overmind tmux pane capture (non-interactive) + if !docker::container_is_running(config) { + return Err(Error::Other("Dev container is not running.".into())); + } + + // Find the overmind tmux socket + let output = Command::new("docker") + .args([ + "exec", + "-u", + "dev", + &config.docker.container, + "bash", + "-c", + "basename $(ls -d /tmp/overmind-workspace-*/ 2>/dev/null | head -1) 2>/dev/null || echo ''", + ]) + .output()?; + + let socket = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if socket.is_empty() { + return Err(Error::Other("Overmind not running in container.".into())); + } + + // Capture last 100 lines from tmux pane + let output = Command::new("docker") + .args([ + "exec", + "-u", + "dev", + &config.docker.container, + "tmux", + "-L", + &socket, + "capture-pane", + "-t", + &format!("workspace:{}", service), + "-p", + "-S", + "-100", + ]) + .output()?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(Error::Other(format!( + "Failed to capture logs for '{}': {}", + service, + stderr.trim() + ))); + } + + print!("{}", String::from_utf8_lossy(&output.stdout)); + Ok(()) +} diff --git a/crates/tb-devctl/src/commands/mod.rs b/crates/tb-devctl/src/commands/mod.rs new file mode 100644 index 0000000..d519536 --- /dev/null +++ b/crates/tb-devctl/src/commands/mod.rs @@ -0,0 +1,9 @@ +pub mod doctor; +pub mod infra; +pub mod init; +pub mod local; +pub mod logs; +pub mod preset; +pub mod start; +pub mod status; +pub mod stop; diff --git a/crates/tb-devctl/src/commands/preset.rs b/crates/tb-devctl/src/commands/preset.rs new file mode 100644 index 0000000..fd1456b --- /dev/null +++ b/crates/tb-devctl/src/commands/preset.rs @@ -0,0 +1,65 @@ +use std::path::Path; + +use colored::Colorize; + +use crate::config::Config; +use crate::error::{Error, Result}; + +pub fn run(config: &Config, project_root: &Path, preset_name: &str) -> Result<()> { + let preset = config.presets.get(preset_name).ok_or_else(|| { + let available: Vec<&str> = config.presets.keys().map(|k| k.as_str()).collect(); + Error::Config(format!( + "Unknown preset: '{}'. Available: {}", + preset_name, + available.join(", ") + )) + })?; + + if let Some(desc) = &preset.description { + println!("{} {}", "Preset:".blue(), desc); + } + + let mode = preset.mode.as_deref().unwrap_or("local"); + + // Set preset env vars in the current process (inherited by child commands) + for (key, val) in &preset.env { + println!(" {} {}={}", "env".dimmed(), key, val); + // SAFETY: single-threaded CLI, no concurrent env access + unsafe { std::env::set_var(key, val) }; + } + + match mode { + "docker" => crate::commands::start::docker(config, project_root, &preset.services), + "local" => { + // Start each service locally in background + for (i, service) in preset.services.iter().enumerate() { + let is_last = i == preset.services.len() - 1; + if is_last { + // Last service runs in foreground (so Ctrl+C stops everything) + println!(); + crate::commands::local::start( + config, + project_root, + service, + None, + false, // foreground + )?; + } else { + // Other services run in background + crate::commands::local::start( + config, + project_root, + service, + None, + true, // background + )?; + } + } + Ok(()) + } + other => Err(Error::Config(format!( + "Unknown preset mode: '{}'. Use 'docker' or 'local'.", + other + ))), + } +} diff --git a/crates/tb-devctl/src/commands/start.rs b/crates/tb-devctl/src/commands/start.rs new file mode 100644 index 0000000..5f071ce --- /dev/null +++ b/crates/tb-devctl/src/commands/start.rs @@ -0,0 +1,247 @@ +use std::path::Path; + +use colored::Colorize; + +use crate::config::Config; +use crate::docker; +use crate::error::{Error, Result}; +use crate::health; +use crate::state::{ServiceState, State}; + +pub fn docker(config: &Config, project_root: &Path, services: &[String]) -> Result<()> { + // --- Prerequisite: Docker running --- + if !health::docker_is_running() { + return Err(Error::Other( + "Docker is not running. Start Docker Desktop first.".into(), + )); + } + + // --- Validate services --- + for svc in services { + if !config.services.contains_key(svc) { + return Err(Error::Config(format!( + "Unknown service: '{}'. Check tb-devctl.toml.", + svc + ))); + } + } + + // --- Stop existing container if running (declarative: new list replaces old) --- + if docker::container_is_running(config) { + println!("{}", "Replacing existing container...".yellow()); + docker::stop_container(config, project_root)?; + } + + // --- Check port conflicts (after stopping our container, before starting new) --- + println!("{}", "Checking ports...".blue()); + let mut conflicts = Vec::new(); + for svc_name in services { + let svc = &config.services[svc_name]; + if let Some(port) = svc.port + && health::port_is_open(port) + { + let owner = health::port_owner(port) + .map(|(pid, cmd)| format!("{} (PID {})", cmd, pid)) + .unwrap_or_else(|| "unknown".into()); + conflicts.push(format!( + " {} (port {}) — occupied by {}", + svc_name, port, owner + )); + } + } + // Also check companion ports + for svc_name in services { + if let Some(companion) = &config.services[svc_name].companion + && let Some(comp_svc) = config.services.get(companion) + && let Some(port) = comp_svc.port + && health::port_is_open(port) + { + let owner = health::port_owner(port) + .map(|(pid, cmd)| format!("{} (PID {})", cmd, pid)) + .unwrap_or_else(|| "unknown".into()); + conflicts.push(format!( + " {} (port {}) — occupied by {}", + companion, port, owner + )); + } + } + if !conflicts.is_empty() { + eprintln!("{}", "Port conflicts detected:".red()); + for c in &conflicts { + eprintln!("{}", c); + } + return Err(Error::Other( + "Stop conflicting processes before starting.".into(), + )); + } + + // --- Ensure repos are cloned --- + println!("{}", "Checking repos...".blue()); + let repos_dir = project_root.join("repos"); + for svc_name in services { + let svc = &config.services[svc_name]; + if let Some(repo) = &svc.repo + && !repos_dir.join(repo).exists() + { + return Err(Error::Config(format!( + "Repo not cloned: repos/{}. Run: git clone https://github.com/productiveio/{}.git repos/{}", + repo, repo, repo + ))); + } + } + + // --- Check secrets --- + println!("{}", "Checking secrets...".blue()); + let mut missing = Vec::new(); + for svc_name in services { + let svc = &config.services[svc_name]; + if let Some(repo) = &svc.repo { + for secret in &svc.secrets { + let secret_path = repos_dir.join(repo).join(secret); + if !secret_path.exists() { + missing.push(format!(" {}: {} (missing)", svc_name, secret)); + } + } + } + } + if !missing.is_empty() { + eprintln!("{}", "Missing secrets:".red()); + for m in &missing { + eprintln!("{}", m); + } + return Err(Error::Other( + "Pull secrets before starting. See tb-devctl.toml init steps.".into(), + )); + } + + // --- Auto-start infra if needed --- + let infra_needed = services + .iter() + .any(|svc_name| !config.services[svc_name].infra.is_empty()); + + if infra_needed { + if !health::infra_is_running(config, project_root) { + println!("{}", "Starting infrastructure...".blue()); + crate::commands::infra::up(config, project_root)?; + } else { + println!(" Infrastructure already running."); + } + } + + // --- Capture env vars --- + println!("{}", "Capturing environment...".blue()); + capture_env(project_root)?; + + // --- Generate Procfile --- + println!("{}", "Generating Procfile...".blue()); + docker::generate_procfile(config, services, project_root)?; + + // --- Start container --- + println!("{}", "Starting container...".blue()); + docker::start_container(config, project_root, services)?; + + // --- Update state immediately (so status works during boot) --- + let now = chrono::Utc::now().to_rfc3339(); + let mut state = State::load(project_root)?; + // Clear previous docker services + state.services.retain(|_, s| s.mode != "docker"); + for svc_name in services { + state.services.insert( + svc_name.clone(), + ServiceState { + mode: "docker".into(), + started_at: now.clone(), + dir: config.services[svc_name] + .repo + .as_ref() + .map(|r| format!("repos/{}", r)), + pid: None, + }, + ); + // Track companions too + if let Some(companion) = &config.services[svc_name].companion { + state.services.insert( + companion.clone(), + ServiceState { + mode: "docker".into(), + started_at: now.clone(), + dir: config + .services + .get(companion) + .and_then(|s| s.repo.as_ref()) + .map(|r| format!("repos/{}", r)), + pid: None, + }, + ); + } + } + state.save(project_root)?; + + // --- Wait for healthy --- + print!("{}", "Waiting for container to be ready".blue()); + docker::wait_for_healthy(config)?; + println!(" {}", "ready!".green()); + + // --- Report --- + println!(); + println!("{}", "Services started:".green()); + for svc_name in services { + let svc = &config.services[svc_name]; + if let Some(hostname) = &svc.hostname { + println!(" https://{} → port {}", hostname, svc.port.unwrap_or(0)); + } + } + println!(); + println!("Branch switch: cd repos/ && git checkout "); + println!("Then: tb-devctl stop && tb-devctl start --docker"); + + Ok(()) +} + +/// Capture host environment variables to .env.session file. +fn capture_env(project_root: &Path) -> Result<()> { + let env_dir = project_root.join(".docker-sessions/.dev"); + std::fs::create_dir_all(&env_dir)?; + let env_file = env_dir.join(".env.session"); + + let mut lines = vec!["# Auto-captured from host environment".to_string()]; + + let vars = [ + "ANTHROPIC_API_KEY", + "PRODUCTIVE_AUTH_TOKEN", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "BUGSNAG_AUTH_TOKEN", + "SEMAPHORE_API_TOKEN", + "GRAFANA_SERVICE_ACCOUNT_TOKEN", + ]; + + for var in &vars { + let val = std::env::var(var).unwrap_or_default(); + lines.push(format!("{}={}", var, val)); + } + + // GH_TOKEN fallback + let gh_token = std::env::var("GH_TOKEN") + .or_else(|_| std::env::var("GITHUB_PERSONAL_ACCESS_TOKEN")) + .unwrap_or_default(); + lines.push(format!("GH_TOKEN={}", gh_token)); + + // AWS — only forward explicit credentials, never override region + // (region comes from ~/.aws/config which is mounted into the container) + for var in &[ + "AWS_DEFAULT_REGION", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + "AWS_PROFILE", + ] { + if let Ok(val) = std::env::var(var) + && !val.is_empty() + { + lines.push(format!("{}={}", var, val)); + } + } + + std::fs::write(&env_file, lines.join("\n") + "\n")?; + Ok(()) +} diff --git a/crates/tb-devctl/src/commands/status.rs b/crates/tb-devctl/src/commands/status.rs new file mode 100644 index 0000000..4c87768 --- /dev/null +++ b/crates/tb-devctl/src/commands/status.rs @@ -0,0 +1,151 @@ +use std::path::Path; + +use colored::Colorize; + +use crate::config::Config; +use crate::docker; +use crate::error::Result; +use crate::health; +use crate::state::State; + +pub fn run(config: &Config, project_root: &Path) -> Result<()> { + let state = State::load(project_root)?; + + // Prerequisite checks + let docker_ok = health::docker_is_running(); + let caddy_ok = health::caddy_is_running(); + + if !docker_ok { + println!("{} Docker is not running", "✗".red()); + } + if !caddy_ok { + println!("{} Caddy is not running (localhost:2019)", "!".yellow()); + } + + match health::aws_sso_status() { + health::AwsSsoStatus::Valid(Some(remaining)) if remaining.as_secs() < 1800 => { + println!( + "{} AWS SSO expires in {}", + "!".yellow(), + health::format_duration(&remaining) + ); + } + health::AwsSsoStatus::Expired => { + println!("{} AWS SSO expired (run: aws sso login)", "!".yellow()); + } + _ => {} // Valid with plenty of time, or not installed — don't clutter + } + + // If container is running, get overmind status for accurate service state + let container_up = docker::container_is_running(config); + let overmind = if container_up { + docker::overmind_status(config) + } else { + Default::default() + }; + + // Collect rows first, then format with correct alignment + let mut rows: Vec<(String, String, String, String, String)> = Vec::new(); + + for (name, svc) in &config.services { + let mode = if let Some(svc_state) = state.services.get(name) { + svc_state.mode.clone() + } else { + "-".to_string() + }; + + let (state_text, state_color) = + determine_service_state(name, svc.port, &mode, &overmind, container_up); + let url = svc.hostname.as_deref().unwrap_or("-").to_string(); + + rows.push((name.clone(), mode, state_text, state_color, url)); + } + + // Print service table with padding applied before colorization + println!(); + println!(" {:<20} {:<10} {:<10} URL", "SERVICE", "MODE", "STATE"); + println!(" {:<20} {:<10} {:<10} ───", "───────", "────", "─────"); + + for (name, mode, state_text, state_color, url) in &rows { + let padded_state = format!("{:<10}", state_text); + let colored_state = match state_color.as_str() { + "green" => padded_state.green().to_string(), + "red" => padded_state.red().to_string(), + "yellow" => padded_state.yellow().to_string(), + _ => padded_state.dimmed().to_string(), + }; + println!(" {:<20} {:<10} {} {}", name, mode, colored_state, url); + } + + // Infra status + let infra_running = health::infra_is_running(config, project_root); + let infra_compose = project_root.join(&config.infra.compose_file); + + let infra_containers = if infra_running { + health::compose_container_states( + &config.infra.compose_project, + &infra_compose.to_string_lossy(), + ) + } else { + Default::default() + }; + + println!(); + println!(" {:<20} {:<10}", "INFRA", "STATE"); + println!(" {:<20} {:<10}", "─────", "─────"); + + for name in config.infra.services.keys() { + let is_up = infra_containers + .get(name.as_str()) + .is_some_and(|s| s.starts_with("Up")); + let padded = format!("{:<10}", if is_up { "running" } else { "stopped" }); + let colored = if is_up { + padded.green().to_string() + } else { + padded.red().to_string() + }; + println!(" {:<20} {}", name, colored); + } + + println!(); + Ok(()) +} + +/// Returns (display_text, color_name) for a service state. +fn determine_service_state( + name: &str, + port: Option, + mode: &str, + overmind: &std::collections::BTreeMap, + container_up: bool, +) -> (String, String) { + // Docker mode: use overmind as source of truth + if mode == "docker" && container_up { + return match overmind.get(name).map(|s| s.as_str()) { + Some("running") => ("running".into(), "green".into()), + Some("dead") => ("crashed".into(), "red".into()), + Some(other) => (other.into(), "yellow".into()), + None => ("no proc".into(), "dim".into()), + }; + } + + // No mode set + if mode == "-" { + if let Some(port) = port + && !container_up + && health::port_is_open(port) + { + return ("external".into(), "yellow".into()); + } + return ("-".into(), "dim".into()); + } + + // Local mode (future): probe port + if let Some(port) = port + && health::port_is_open(port) + { + return ("running".into(), "green".into()); + } + + ("stopped".into(), "red".into()) +} diff --git a/crates/tb-devctl/src/commands/stop.rs b/crates/tb-devctl/src/commands/stop.rs new file mode 100644 index 0000000..613d7b5 --- /dev/null +++ b/crates/tb-devctl/src/commands/stop.rs @@ -0,0 +1,53 @@ +use std::path::Path; + +use colored::Colorize; + +use crate::config::Config; +use crate::docker; +use crate::error::{Error, Result}; +use crate::state::State; + +pub fn run(config: &Config, project_root: &Path) -> Result<()> { + if !docker::container_is_running(config) { + println!("{}", "Dev container is not running.".yellow()); + return Ok(()); + } + + println!("{}", "Stopping dev container...".yellow()); + docker::stop_container(config, project_root)?; + + // Clear docker services from state + let mut state = State::load(project_root)?; + state.services.retain(|_, s| s.mode != "docker"); + state.save(project_root)?; + + println!("{}", "Dev container stopped.".green()); + Ok(()) +} + +/// Restart a specific service inside the running container via overmind. +pub fn restart_service(config: &Config, service: &str) -> Result<()> { + if !docker::container_is_running(config) { + return Err(Error::Other( + "Dev container is not running. Start with: tb-devctl start --docker".into(), + )); + } + + println!("Restarting {}...", service.bold()); + let status = std::process::Command::new("docker") + .args([ + "exec", + &config.docker.container, + "overmind", + "restart", + service, + ]) + .status()?; + + if !status.success() { + return Err(Error::Other(format!("Failed to restart {}", service))); + } + + println!("{} restarted.", service.green()); + Ok(()) +} diff --git a/crates/tb-devctl/src/config.rs b/crates/tb-devctl/src/config.rs new file mode 100644 index 0000000..de08d92 --- /dev/null +++ b/crates/tb-devctl/src/config.rs @@ -0,0 +1,152 @@ +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; + +use serde::Deserialize; + +use crate::error::{Error, Result}; + +#[derive(Debug, Deserialize)] +pub struct Config { + pub infra: InfraConfig, + pub docker: DockerConfig, + #[serde(default)] + pub presets: BTreeMap, + #[serde(default)] + pub services: BTreeMap, +} + +#[derive(Debug, Deserialize)] +pub struct PresetConfig { + #[serde(default)] + pub description: Option, + pub services: Vec, + #[serde(default)] + pub mode: Option, + #[serde(default)] + pub env: BTreeMap, +} + +#[derive(Debug, Deserialize)] +pub struct InfraConfig { + pub compose_file: String, + pub compose_project: String, + #[serde(default)] + pub services: BTreeMap, +} + +#[derive(Debug, Deserialize)] +pub struct InfraServiceConfig { + pub port: u16, + #[serde(default)] + pub volume: Option, +} + +#[derive(Debug, Deserialize)] +pub struct DockerConfig { + pub compose_file: String, + pub compose_project: String, + pub container: String, +} + +#[derive(Debug, Deserialize)] +pub struct ServiceConfig { + #[serde(default)] + pub port: Option, + #[serde(default)] + pub hostname: Option, + #[serde(default)] + pub repo: Option, + #[serde(default)] + pub cmd: Option, + #[serde(default)] + pub infra: Vec, + #[serde(default)] + pub secrets: Vec, + #[serde(default)] + pub companion: Option, + #[serde(default)] + pub requires: Vec, + #[serde(default)] + pub init: Vec, + #[serde(default)] + pub start: Vec, + #[serde(default)] + pub env: std::collections::BTreeMap, + #[serde(default)] + pub env_docker: std::collections::BTreeMap, + #[serde(default)] + pub env_local: std::collections::BTreeMap, +} + +impl Config { + /// Build a map of companion service name → parent service name. + pub fn companion_map(&self) -> BTreeMap { + let mut map = BTreeMap::new(); + for (name, svc) in &self.services { + if let Some(companion) = &svc.companion { + map.insert(companion.clone(), name.clone()); + } + } + map + } +} + +/// Walk up from `start` looking for `tb-devctl.toml`. +/// Returns (config, project_root) on success. +pub fn find_and_load(start: &Path) -> Result<(Config, PathBuf)> { + let config_path = find_config_file(start)?; + let project_root = config_path + .parent() + .ok_or_else(|| Error::Config("tb-devctl.toml has no parent directory".into()))? + .to_path_buf(); + + let content = std::fs::read_to_string(&config_path)?; + let config: Config = toml::from_str(&content)?; + Ok((config, project_root)) +} + +/// Walk up the directory tree to find `tb-devctl.toml`. +fn find_config_file(start: &Path) -> Result { + let mut dir = start.to_path_buf(); + loop { + let candidate = dir.join("tb-devctl.toml"); + if candidate.exists() { + return Ok(candidate); + } + if !dir.pop() { + return Err(Error::Config( + "tb-devctl.toml not found (searched up from current directory)".into(), + )); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_minimal_config() { + let toml_str = r#" +[infra] +compose_file = "docker/infra-compose.yml" +compose_project = "productive-infra" + +[docker] +compose_file = "docker/dev-compose.yml" +compose_project = "productive-dev" +container = "productive-dev-workspace" + +[services.api] +port = 3000 +hostname = "api.productive.io.localhost" +repo = "api" +cmd = "bundle exec rails server -b 0.0.0.0 -p 3000" +infra = ["mysql", "redis"] +"#; + let config: Config = toml::from_str(toml_str).unwrap(); + assert_eq!(config.services.len(), 1); + assert_eq!(config.services["api"].port, Some(3000)); + assert_eq!(config.services["api"].infra, vec!["mysql", "redis"]); + } +} diff --git a/crates/tb-devctl/src/docker.rs b/crates/tb-devctl/src/docker.rs new file mode 100644 index 0000000..63c854a --- /dev/null +++ b/crates/tb-devctl/src/docker.rs @@ -0,0 +1,408 @@ +use std::path::Path; +use std::process::Command; + +use crate::config::{Config, ServiceConfig}; +use crate::error::{Error, Result}; + +/// Default runtime versions — must match Dockerfile.base ARGs. +const DEFAULT_RUBY: &str = "3.4.7"; +const DEFAULT_NODE: &str = "22.16.0"; + +/// Generate a Procfile for overmind from the selected services. +/// Writes to `.docker-sessions/.dev/Procfile.dev`. +pub fn generate_procfile(config: &Config, services: &[String], project_root: &Path) -> Result<()> { + let procfile_dir = project_root.join(".docker-sessions/.dev"); + std::fs::create_dir_all(&procfile_dir)?; + let procfile_path = procfile_dir.join("Procfile.dev"); + + let mut lines = Vec::new(); + + for svc_name in services { + let svc = config + .services + .get(svc_name) + .ok_or_else(|| Error::Config(format!("Unknown service: {}", svc_name)))?; + + if let Some(entry) = procfile_entry(svc_name, svc, project_root) { + lines.push(entry); + } + + // Add companion (e.g., sidekiq for api) + if let Some(companion) = &svc.companion + && let Some(comp_svc) = config.services.get(companion) + && let Some(entry) = procfile_entry(companion, comp_svc, project_root) + { + lines.push(entry); + } + } + + std::fs::write(&procfile_path, lines.join("\n") + "\n")?; + Ok(()) +} + +/// Build a single Procfile entry, with runtime version wrappers if needed. +fn procfile_entry(name: &str, svc: &ServiceConfig, project_root: &Path) -> Option { + let repo = svc.repo.as_deref()?; + let cmd = svc.cmd.as_deref()?; + + let repos_dir = project_root.join("repos"); + let mut wrapper = String::new(); + + // Check if repo needs a different Ruby version + let ruby_version_file = repos_dir.join(repo).join(".ruby-version"); + if ruby_version_file.exists() + && let Ok(version) = std::fs::read_to_string(&ruby_version_file) + { + let version = version.trim(); + if version != DEFAULT_RUBY { + wrapper.push_str(&format!("rvm use {} && ", version)); + } + } + + // Check if repo needs a different Node version + let node_version = read_node_version(&repos_dir.join(repo)); + if let Some(version) = node_version + && version != DEFAULT_NODE + { + wrapper.push_str(&format!( + ". /usr/local/nvm/nvm.sh && nvm use {} && ", + version + )); + } + + let full_cmd = if wrapper.is_empty() { + format!("{}: cd /workspace/{} && {}", name, repo, cmd) + } else { + format!( + "{}: bash -lc '{} cd /workspace/{} && {}'", + name, wrapper, repo, cmd + ) + }; + + Some(full_cmd) +} + +/// Read Node version from .node-version or .nvmrc +fn read_node_version(repo_path: &Path) -> Option { + for filename in &[".node-version", ".nvmrc"] { + let path = repo_path.join(filename); + if path.exists() + && let Ok(version) = std::fs::read_to_string(&path) + { + return Some(version.trim().to_string()); + } + } + None +} + +/// Query overmind inside the container to get running service names and their status. +/// Returns a map of service_name → "running" | "stopped" | "dead". +pub fn overmind_status(config: &Config) -> std::collections::BTreeMap { + let mut result = std::collections::BTreeMap::new(); + + let output = Command::new("docker") + .args(["exec", &config.docker.container, "overmind", "status"]) + .output(); + + let Ok(output) = output else { + return result; + }; + + // overmind status output: + // PROCESS PID STATUS + // api 5796 running + // sidekiq 5797 running + let stdout = String::from_utf8_lossy(&output.stdout); + for line in stdout.lines().skip(1) { + // Skip header + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() >= 3 { + let name = parts[0].to_string(); + let status = parts[2].to_string(); + result.insert(name, status); + } + } + + result +} + +/// Generate a docker-compose.yml with only the ports and mounts needed +/// for the requested services. Written to `.docker-sessions/.dev/docker-compose.yml`. +pub fn generate_compose( + config: &Config, + services: &[String], + project_root: &Path, +) -> Result { + let compose_dir = project_root.join(".docker-sessions/.dev"); + std::fs::create_dir_all(&compose_dir)?; + let compose_path = compose_dir.join("docker-compose.yml"); + + // Collect ports and repo names for selected services (+ companions) + let mut ports = Vec::new(); + let mut selected_repos = Vec::new(); + + for svc_name in services { + if let Some(svc) = config.services.get(svc_name) { + if let Some(port) = svc.port { + ports.push(port); + } + if let Some(repo) = &svc.repo + && !selected_repos.contains(repo) + { + selected_repos.push(repo.clone()); + } + // Include companion + if let Some(companion) = &svc.companion + && let Some(comp) = config.services.get(companion) + { + if let Some(port) = comp.port { + ports.push(port); + } + if let Some(repo) = &comp.repo + && !selected_repos.contains(repo) + { + selected_repos.push(repo.clone()); + } + } + } + } + + // Service discovery env vars — always include all services so inter-service + // communication works regardless of which are running + let mut service_urls = Vec::new(); + for (name, svc) in &config.services { + if let (Some(hostname), Some(port)) = (&svc.hostname, svc.port) { + let env_key = format!("{}_SERVICE_URL", name.to_uppercase().replace('-', "_")); + service_urls.push(format!(" - {}=http://{}:{}", env_key, hostname, port)); + } + } + + // Collect per-service env vars from selected services (+ companions) + let mut service_env_lines = Vec::new(); + for svc_name in services { + if let Some(svc) = config.services.get(svc_name) { + // Shared env, then docker-specific (docker overrides shared) + for (key, val) in svc.env.iter().chain(svc.env_docker.iter()) { + service_env_lines.push(format!(" - {}={}", key, val)); + } + if let Some(companion) = &svc.companion + && let Some(comp) = config.services.get(companion) + { + for (key, val) in comp.env.iter().chain(comp.env_docker.iter()) { + service_env_lines.push(format!(" - {}={}", key, val)); + } + } + } + } + + // Build ports section + let ports_yaml: Vec = ports + .iter() + .map(|p| format!(" - \"{}:{}\"", p, p)) + .collect(); + + // Build volume mounts — only for repos we need, plus always mount all + // (Docker creates empty dirs for unmounted repos, harmless) + let docker_dir = project_root.join("docker"); + let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into()); + + let mut content = format!( + r#"# Generated by tb-devctl — do not edit manually +services: + workspace: + image: productive-dev:base + container_name: {container} + hostname: productive-dev + env_file: + - .env.session + environment: + - SESSION_NAME=productive-dev + - SESSION_MODE=dev + - SELECTED_REPOS={selected_repos} + - MYSQL_HOST=productive-dev-mysql + - MYSQL_PORT=3306 + - MYSQL_USER=root + - MYSQL_PASSWORD= + - REDIS_URL=redis://productive-dev-redis:6379/0 + - redis_host=productive-dev-redis + - MEILISEARCH_URL=http://productive-dev-meilisearch:7700 + - MEMCACHE_SERVERS=productive-dev-memcached:11211 + - cache_url=productive-dev-memcached:11211 + - db_host=productive-dev-mysql + - RAISE_ON_MISSING_FLAGS=false + - RAISE_ON_MISSING_FEATURES=false + - RAILS_ENV=development + - NODE_ENV=development + - COREPACK_ENABLE_DOWNLOAD_PROMPT=0 + - COREPACK_ENABLE_AUTO_PIN=0 + - CI=true + - PUPPETEER_EXECUTABLE_PATH=/opt/chromium/chrome-linux/chrome + - PRODUCTIVE_API_BASE_URL=http://api.productive.io.localhost:3000 +{service_urls} +{service_env} + ports: +{ports} + volumes: +"#, + container = config.docker.container, + selected_repos = selected_repos.join(","), + service_urls = service_urls.join("\n"), + service_env = service_env_lines.join("\n"), + ports = ports_yaml.join("\n"), + ); + + // Mount all repos (static, same as dev-compose.yml) + for svc in config.services.values() { + if let Some(repo) = &svc.repo { + content.push_str(&format!( + " - {}/repos/{}:/workspace/{}\n", + project_root.display(), + repo, + repo + )); + } + } + + // Procfile, entrypoint, config, AWS + content.push_str(&format!( + r#" - {compose_dir}/Procfile.dev:/workspace/Procfile.dev + - {docker_dir}/entrypoint.sh:/entrypoint.sh:ro + - {docker_dir}/config:/docker-config:ro + - {home}/.aws:/home/dev/.aws:ro + security_opt: + - no-new-privileges:true + cap_drop: + - ALL + cap_add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - NET_BIND_SERVICE + deploy: + resources: + limits: + memory: 8G + cpus: "4" + pids: 2048 + healthcheck: + test: ["CMD", "test", "-f", "/tmp/.session-ready"] + interval: 10s + timeout: 5s + retries: 60 + start_period: 120s + stdin_open: true + tty: true + networks: + - productive-dev-net + +networks: + productive-dev-net: + external: true +"#, + compose_dir = compose_dir.display(), + docker_dir = docker_dir.display(), + home = home, + )); + + std::fs::write(&compose_path, content)?; + Ok(compose_path) +} + +/// Check if the dev container is currently running. +pub fn container_is_running(config: &Config) -> bool { + // Use ^name$ anchor for exact match (docker filter does substring by default) + Command::new("docker") + .args([ + "ps", + "--filter", + &format!("name=^{}$", config.docker.container), + "--filter", + "status=running", + "--format", + "{{.Names}}", + ]) + .output() + .is_ok_and(|o| !o.stdout.is_empty()) +} + +/// Stop the dev container. +pub fn stop_container(config: &Config, project_root: &Path) -> Result<()> { + let compose_file = generated_compose_path(project_root); + // Fall back to static compose if generated doesn't exist + let compose_file = if compose_file.exists() { + compose_file + } else { + project_root.join(&config.docker.compose_file) + }; + + let status = Command::new("docker") + .args([ + "compose", + "-p", + &config.docker.compose_project, + "-f", + &compose_file.to_string_lossy(), + "down", + ]) + .status()?; + + if !status.success() { + return Err(Error::Other("Failed to stop dev container".into())); + } + Ok(()) +} + +fn generated_compose_path(project_root: &Path) -> std::path::PathBuf { + project_root.join(".docker-sessions/.dev/docker-compose.yml") +} + +/// Start the dev container using the generated compose file. +pub fn start_container(config: &Config, project_root: &Path, services: &[String]) -> Result<()> { + // Generate compose with only the needed ports + let compose_file = generate_compose(config, services, project_root)?; + + let status = Command::new("docker") + .args([ + "compose", + "-p", + &config.docker.compose_project, + "-f", + &compose_file.to_string_lossy(), + "up", + "-d", + ]) + .status()?; + if !status.success() { + return Err(Error::Other("Failed to start dev container".into())); + } + Ok(()) +} + +/// Wait for the container healthcheck to pass. +/// Timeout: 10 minutes (first-time setup may compile Ruby/Node from source). +pub fn wait_for_healthy(config: &Config) -> Result<()> { + let container = &config.docker.container; + for i in 0..300 { + let output = Command::new("docker") + .args(["inspect", "--format", "{{.State.Health.Status}}", container]) + .output()?; + + let status = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if status == "healthy" { + return Ok(()); + } + + if i % 15 == 0 && i > 0 { + eprint!(" {}s", i * 2); + } else if i % 5 == 0 { + eprint!("."); + } + std::thread::sleep(std::time::Duration::from_secs(2)); + } + + Err(Error::Other( + "Container did not become healthy within 10 minutes".into(), + )) +} diff --git a/crates/tb-devctl/src/error.rs b/crates/tb-devctl/src/error.rs new file mode 100644 index 0000000..b3b881b --- /dev/null +++ b/crates/tb-devctl/src/error.rs @@ -0,0 +1 @@ +toolbox_core::define_error!(Error); diff --git a/crates/tb-devctl/src/health.rs b/crates/tb-devctl/src/health.rs new file mode 100644 index 0000000..1a34110 --- /dev/null +++ b/crates/tb-devctl/src/health.rs @@ -0,0 +1,547 @@ +use std::net::TcpStream; +use std::process::Command; +use std::time::Duration; + +/// Check if a TCP port is listening on localhost. +pub fn port_is_open(port: u16) -> bool { + TcpStream::connect_timeout( + &format!("127.0.0.1:{}", port).parse().unwrap(), + Duration::from_millis(200), + ) + .is_ok() +} + +/// Check if Docker daemon is running. +pub fn docker_is_running() -> bool { + Command::new("docker") + .args(["info"]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .is_ok_and(|s| s.success()) +} + +/// Check if Caddy admin API is responding (localhost:2019). +pub fn caddy_is_running() -> bool { + port_is_open(2019) +} + +/// Check if the shared infrastructure is running. +pub fn infra_is_running(config: &crate::config::Config, project_root: &std::path::Path) -> bool { + let compose_file = project_root.join(&config.infra.compose_file); + compose_is_running( + &config.infra.compose_project, + &compose_file.to_string_lossy(), + ) +} + +/// Check if a Docker compose project has running containers. +pub fn compose_is_running(project: &str, compose_file: &str) -> bool { + Command::new("docker") + .args([ + "compose", + "-p", + project, + "-f", + compose_file, + "ps", + "--quiet", + ]) + .output() + .is_ok_and(|o| !o.stdout.is_empty()) +} + +/// Get container states from a docker compose project. +/// Returns a map of service_name → status string (e.g., "Up 7 hours (healthy)"). +pub fn compose_container_states( + project: &str, + compose_file: &str, +) -> std::collections::BTreeMap { + let mut result = std::collections::BTreeMap::new(); + + let output = Command::new("docker") + .args([ + "compose", + "-p", + project, + "-f", + compose_file, + "ps", + "--format", + "{{.Service}}\t{{.Status}}", + ]) + .output(); + + if let Ok(output) = output { + let stdout = String::from_utf8_lossy(&output.stdout); + for line in stdout.lines() { + if let Some((service, status)) = line.split_once('\t') { + result.insert(service.to_string(), status.to_string()); + } + } + } + + result +} + +/// AWS SSO session status. +pub enum AwsSsoStatus { + /// Valid session with optional time remaining + Valid(Option), + /// Session expired or not authenticated + Expired, + /// AWS CLI not installed + NotInstalled, +} + +/// Check AWS SSO session validity and remaining time. +pub fn aws_sso_status() -> AwsSsoStatus { + // First check if aws CLI works + let ok = Command::new("aws") + .args(["sts", "get-caller-identity", "--no-cli-pager"]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status(); + + match ok { + Err(_) => return AwsSsoStatus::NotInstalled, + Ok(s) if !s.success() => return AwsSsoStatus::Expired, + _ => {} + } + + // Session is valid — try to find expiry from SSO cache + let remaining = sso_session_remaining(); + AwsSsoStatus::Valid(remaining) +} + +/// Convenience check for simple valid/invalid. +pub fn aws_sso_is_valid() -> bool { + matches!(aws_sso_status(), AwsSsoStatus::Valid(_)) +} + +/// Read SSO session expiry from ~/.aws/sso/cache/*.json. +/// Returns remaining duration if found. +fn sso_session_remaining() -> Option { + let cache_dir = dirs::home_dir()?.join(".aws/sso/cache"); + if !cache_dir.exists() { + return None; + } + + let mut newest_expiry: Option> = None; + let mut newest_mtime = std::time::SystemTime::UNIX_EPOCH; + + let Ok(entries) = std::fs::read_dir(&cache_dir) else { + return None; + }; + for entry in entries { + let Ok(entry) = entry else { continue }; + let path = entry.path(); + if path.extension().is_some_and(|e| e == "json") { + let Ok(content) = std::fs::read_to_string(&path) else { + continue; + }; + // Only consider files with an accessToken (SSO session files) + if !content.contains("accessToken") { + continue; + } + let Some(mtime) = entry.metadata().ok().and_then(|m| m.modified().ok()) else { + continue; + }; + if mtime > newest_mtime { + newest_mtime = mtime; + // Parse expiresAt from JSON + if let Ok(json) = serde_json::from_str::(&content) + && let Some(expires_at) = json.get("expiresAt").and_then(|v| v.as_str()) + && let Ok(dt) = expires_at.parse::>() + { + newest_expiry = Some(dt); + } + } + } + } + + let expiry = newest_expiry?; + let now = chrono::Utc::now(); + if expiry > now { + Some((expiry - now).to_std().ok()?) + } else { + None // Already expired + } +} + +/// Format a duration as human-readable time remaining. +pub fn format_duration(d: &std::time::Duration) -> String { + let total_secs = d.as_secs(); + let hours = total_secs / 3600; + let mins = (total_secs % 3600) / 60; + if hours > 0 { + format!("{}h {}m", hours, mins) + } else { + format!("{}m", mins) + } +} + +/// Result of checking a single requirement. +pub struct RequirementStatus { + pub ok: bool, + /// Human-readable detail for the issue line (shown on failure). + pub detail: Option, +} + +/// Check whether a local requirement is satisfied. +pub fn check_requirement(req: &str, repo_path: Option<&std::path::Path>) -> RequirementStatus { + match req { + "ruby" => check_ruby(repo_path), + "node" => check_node(repo_path), + "python3" => check_python(repo_path), + "chromium" => check_chromium(), + _ => check_command(req), + } +} + +// --------------------------------------------------------------------------- +// Ruby +// --------------------------------------------------------------------------- + +fn check_ruby(repo_path: Option<&std::path::Path>) -> RequirementStatus { + let home = dirs::home_dir().unwrap_or_default(); + + let manager = if home.join(".rvm").exists() { + Some("rvm") + } else if command_exists("rbenv") { + Some("rbenv") + } else if command_exists("asdf") && asdf_has_plugin("ruby") { + Some("asdf") + } else { + None + }; + + if manager.is_none() && !command_exists("ruby") { + return fail("no version manager found (install rvm or rbenv)"); + } + + let version_check = + repo_path.and_then(|p| check_runtime_version(p, ".ruby-version", manager, "ruby")); + runtime_result( + version_check, + manager, + manager.is_some() || command_exists("ruby"), + ) +} + +// --------------------------------------------------------------------------- +// Node +// --------------------------------------------------------------------------- + +fn check_node(repo_path: Option<&std::path::Path>) -> RequirementStatus { + let home = dirs::home_dir().unwrap_or_default(); + + // Detect version manager — n is NOT supported + let manager = if home.join(".nvm").exists() || std::env::var("NVM_DIR").is_ok() { + Some("nvm") + } else if command_exists("fnm") { + Some("fnm") + } else if command_exists("volta") { + Some("volta") + } else if command_exists("asdf") && asdf_has_plugin("nodejs") { + Some("asdf") + } else { + None + }; + + // n detected as the only tool → hard fail + if manager.is_none() && command_exists("n") { + return fail("n is not supported (install nvm or fnm for multi-version)"); + } + + if manager.is_none() && !command_exists("node") { + return fail("no version manager found (install nvm or fnm)"); + } + + let version_check = repo_path.and_then(|p| { + check_runtime_version(p, ".node-version", manager, "node") + .or_else(|| check_runtime_version(p, ".nvmrc", manager, "node")) + }); + runtime_result( + version_check, + manager, + manager.is_some() || command_exists("node"), + ) +} + +// --------------------------------------------------------------------------- +// Python +// --------------------------------------------------------------------------- + +fn check_python(repo_path: Option<&std::path::Path>) -> RequirementStatus { + let manager = if command_exists("pyenv") { + Some("pyenv") + } else if command_exists("asdf") && asdf_has_plugin("python") { + Some("asdf") + } else { + None + }; + + if manager.is_none() && !command_exists("python3") { + return fail("not found"); + } + + let version_check = + repo_path.and_then(|p| check_runtime_version(p, ".python-version", manager, "python3")); + runtime_result(version_check, manager, true) +} + +// --------------------------------------------------------------------------- +// Chromium +// --------------------------------------------------------------------------- + +fn check_chromium() -> RequirementStatus { + let home = dirs::home_dir().unwrap_or_default(); + let chrome_dir = home.join(".cache/puppeteer/chrome"); + + // Check for at least one Chrome binary in the Puppeteer cache + if chrome_dir.is_dir() + && let Ok(entries) = std::fs::read_dir(&chrome_dir) + { + for entry in entries.flatten() { + let sub = entry.path(); + if sub.is_dir() && has_chrome_binary(&sub) { + return RequirementStatus { + ok: true, + detail: None, + }; + } + } + } + + // Fallback: system chromium + if command_exists("chromium") { + return RequirementStatus { + ok: true, + detail: None, + }; + } + + fail("not found (run: npx puppeteer install chrome)") +} + +// --------------------------------------------------------------------------- +// Shared runtime result builder +// --------------------------------------------------------------------------- + +/// Build a RequirementStatus from a version check result. +/// Used by all three runtime checks (ruby, node, python). +fn runtime_result( + version_check: Option<(String, bool)>, + manager: Option<&str>, + fallback_ok: bool, +) -> RequirementStatus { + match version_check { + Some((_version, true)) => RequirementStatus { + ok: true, + detail: None, + }, + Some((version, false)) => RequirementStatus { + ok: false, + detail: Some(format!( + "{} not installed ({})", + version, + manager.unwrap_or("no version manager") + )), + }, + None => RequirementStatus { + ok: fallback_ok, + detail: None, + }, + } +} + +fn fail(detail: &str) -> RequirementStatus { + RequirementStatus { + ok: false, + detail: Some(detail.into()), + } +} + +/// Check if a Puppeteer chrome version directory contains an actual Chrome binary. +fn has_chrome_binary(version_dir: &std::path::Path) -> bool { + // Structure: /chrome-mac-arm64/Google Chrome for Testing.app/... + // or: /chrome-linux64/chrome + if let Ok(entries) = std::fs::read_dir(version_dir) { + for entry in entries.flatten() { + let p = entry.path(); + if p.is_dir() { + // macOS: look for .app bundle + if let Ok(inner) = std::fs::read_dir(&p) { + for inner_entry in inner.flatten() { + let name = inner_entry.file_name(); + let name_str = name.to_string_lossy(); + if name_str.ends_with(".app") || name_str == "chrome" { + return true; + } + } + } + } + } + } + false +} + +// --------------------------------------------------------------------------- +// Generic command check +// --------------------------------------------------------------------------- + +fn check_command(cmd: &str) -> RequirementStatus { + if command_exists(cmd) { + RequirementStatus { + ok: true, + detail: None, + } + } else { + fail("not found") + } +} + +// --------------------------------------------------------------------------- +// Shared helpers +// --------------------------------------------------------------------------- + +fn command_exists(cmd: &str) -> bool { + Command::new("which") + .arg(cmd) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .is_ok_and(|s| s.success()) +} + +fn asdf_has_plugin(plugin: &str) -> bool { + Command::new("asdf") + .args(["list", plugin]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .is_ok_and(|s| s.success()) +} + +/// Read a version file from the repo and check if the version is installed. +/// Returns (wanted_version, is_installed). +fn check_runtime_version( + repo_path: &std::path::Path, + version_filename: &str, + manager: Option<&str>, + runtime: &str, +) -> Option<(String, bool)> { + let wanted = read_version_file(repo_path, version_filename)?; + + let installed = match (runtime, manager) { + // Ruby + ("ruby", Some("rvm")) => { + let home = dirs::home_dir()?; + home.join(".rvm/rubies") + .join(format!("ruby-{}", wanted)) + .exists() + } + ("ruby", Some("rbenv")) => { + let home = dirs::home_dir()?; + home.join(".rbenv/versions").join(&wanted).exists() + } + ("ruby", Some("asdf")) => { + let home = dirs::home_dir()?; + home.join(".asdf/installs/ruby").join(&wanted).exists() + } + + // Node + ("node", Some("nvm")) => { + let nvm_dir = std::env::var("NVM_DIR") + .map(std::path::PathBuf::from) + .unwrap_or_else(|_| dirs::home_dir().unwrap_or_default().join(".nvm")); + let v = if wanted.starts_with('v') { + wanted.clone() + } else { + format!("v{}", wanted) + }; + nvm_dir.join("versions/node").join(&v).exists() + } + ("node", Some("fnm")) => { + let home = dirs::home_dir().unwrap_or_default(); + let v = if wanted.starts_with('v') { + wanted.clone() + } else { + format!("v{}", wanted) + }; + home.join(".local/share/fnm/node-versions") + .join(&v) + .exists() + || home.join(".fnm/node-versions").join(&v).exists() + } + ("node", Some("volta")) => { + let home = dirs::home_dir().unwrap_or_default(); + let v = wanted.strip_prefix('v').unwrap_or(&wanted); + home.join(".volta/tools/image/node").join(v).exists() + } + ("node", Some("asdf")) => { + let home = dirs::home_dir().unwrap_or_default(); + home.join(".asdf/installs/nodejs").join(&wanted).exists() + } + + // Python + ("python3", Some("pyenv")) => { + let home = dirs::home_dir()?; + home.join(".pyenv/versions").join(&wanted).exists() + } + ("python3", Some("asdf")) => { + let home = dirs::home_dir()?; + home.join(".asdf/installs/python").join(&wanted).exists() + } + + // Fallback: compare active version + _ => check_current_version(runtime, &wanted), + }; + + Some((wanted, installed)) +} + +/// Read a version file, trim whitespace. +fn read_version_file(repo_path: &std::path::Path, filename: &str) -> Option { + let content = std::fs::read_to_string(repo_path.join(filename)).ok()?; + let trimmed = content.trim().to_string(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } +} + +/// Check if the currently active version of a command matches the wanted version. +fn check_current_version(cmd: &str, wanted: &str) -> bool { + let output = Command::new(cmd).arg("--version").output().ok(); + if let Some(output) = output { + let version_str = String::from_utf8_lossy(&output.stdout); + let clean = wanted.strip_prefix('v').unwrap_or(wanted); + version_str.contains(clean) + } else { + false + } +} + +/// Get the PID and command of the process listening on a port. +/// Returns None if no process is found. +pub fn port_owner(port: u16) -> Option<(u32, String)> { + let output = Command::new("lsof") + .args(["-i", &format!(":{}", port), "-sTCP:LISTEN", "-n", "-P"]) + .output() + .ok()?; + + let stdout = String::from_utf8_lossy(&output.stdout); + // Skip header line, parse first result + let line = stdout.lines().nth(1)?; + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() >= 2 { + let pid: u32 = parts[1].parse().ok()?; + let cmd = parts[0].to_string(); + Some((pid, cmd)) + } else { + None + } +} diff --git a/crates/tb-devctl/src/lib.rs b/crates/tb-devctl/src/lib.rs new file mode 100644 index 0000000..f515c53 --- /dev/null +++ b/crates/tb-devctl/src/lib.rs @@ -0,0 +1,6 @@ +pub mod commands; +pub mod config; +pub mod docker; +pub mod error; +pub mod health; +pub mod state; diff --git a/crates/tb-devctl/src/main.rs b/crates/tb-devctl/src/main.rs new file mode 100644 index 0000000..47fd7ea --- /dev/null +++ b/crates/tb-devctl/src/main.rs @@ -0,0 +1,167 @@ +use std::env; + +use clap::Parser; +use colored::Colorize; + +use tb_devctl::commands; +use tb_devctl::config; + +#[derive(Parser)] +#[command( + name = "tb-devctl", + version, + about = "Local dev environment orchestrator for Productive services" +)] +struct Cli { + #[command(subcommand)] + command: Commands, +} + +#[derive(clap::Subcommand)] +enum Commands { + /// Show status of all services and infrastructure + Status, + + /// Start services + Start { + /// Comma-separated list of services, or omit when using --preset + services: Option, + + /// Use a named preset from tb-devctl.toml + #[arg(long)] + preset: Option, + + /// Run in Docker container + #[arg(long, conflicts_with_all = ["local"])] + docker: bool, + + /// Run locally from repos/ (or --dir) + #[arg(long, conflicts_with_all = ["docker"])] + local: bool, + + /// Service directory override (local mode only) + #[arg(long, requires = "local")] + dir: Option, + + /// Run in background (local mode only) + #[arg(long, requires = "local")] + bg: bool, + }, + + /// Stop services + Stop { + /// Service name (local mode). Omit to stop Docker container. + service: Option, + }, + + /// Restart a service inside the running container + Restart { + /// Service name + service: String, + }, + + /// View logs for a service + Logs { + /// Service name + service: String, + }, + + /// First-time setup for a service (secrets, schema, seeding) + Init { + /// Service name + service: String, + }, + + /// Manage shared infrastructure (MySQL, Redis, etc.) + Infra { + #[command(subcommand)] + action: InfraAction, + }, + + /// Diagnose environment health + Doctor, +} + +#[derive(clap::Subcommand)] +enum InfraAction { + /// Start shared infrastructure + Up, + /// Stop shared infrastructure + Down, + /// Check infrastructure status + Status, +} + +fn main() { + let cli = Cli::parse(); + + let cwd = env::current_dir().unwrap_or_else(|e| { + eprintln!( + "{} Cannot determine current directory: {}", + "Error:".red().bold(), + e + ); + std::process::exit(1); + }); + + let (cfg, root) = match config::find_and_load(&cwd) { + Ok(result) => result, + Err(e) => { + eprintln!("{} {}", "Error:".red().bold(), e); + std::process::exit(1); + } + }; + + let result = match cli.command { + Commands::Status => commands::status::run(&cfg, &root), + Commands::Start { + services, + preset, + docker, + local, + dir, + bg, + } => { + if let Some(preset_name) = preset { + commands::preset::run(&cfg, &root, &preset_name) + } else if let Some(services) = services { + if docker { + let svc_list: Vec = + services.split(',').map(|s| s.trim().to_string()).collect(); + commands::start::docker(&cfg, &root, &svc_list) + } else if local { + commands::local::start(&cfg, &root, &services, dir.as_deref(), bg) + } else { + Err(tb_devctl::error::Error::Other( + "Specify --docker or --local mode.".into(), + )) + } + } else { + Err(tb_devctl::error::Error::Other( + "Specify services or --preset.".into(), + )) + } + } + Commands::Stop { service } => { + if let Some(svc) = service { + commands::local::stop(&root, &svc) + } else { + commands::stop::run(&cfg, &root) + } + } + Commands::Restart { service } => commands::stop::restart_service(&cfg, &service), + Commands::Logs { service } => commands::logs::run(&cfg, &root, &service), + Commands::Init { service } => commands::init::run(&cfg, &root, &service), + Commands::Infra { action } => match action { + InfraAction::Up => commands::infra::up(&cfg, &root), + InfraAction::Down => commands::infra::down(&cfg, &root), + InfraAction::Status => commands::infra::status(&cfg, &root), + }, + Commands::Doctor => commands::doctor::run(&cfg, &root), + }; + + if let Err(e) = result { + eprintln!("{} {}", "Error:".red().bold(), e); + std::process::exit(1); + } +} diff --git a/crates/tb-devctl/src/state.rs b/crates/tb-devctl/src/state.rs new file mode 100644 index 0000000..80545d2 --- /dev/null +++ b/crates/tb-devctl/src/state.rs @@ -0,0 +1,51 @@ +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; + +use crate::error::Result; + +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct State { + #[serde(default)] + pub services: BTreeMap, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ServiceState { + pub mode: String, + pub started_at: String, + #[serde(default)] + pub dir: Option, + #[serde(default)] + pub pid: Option, +} + +impl State { + /// Load state from `.tb-devctl/state.json` under the project root. + /// Returns empty state if file doesn't exist. + pub fn load(project_root: &Path) -> Result { + let path = state_path(project_root); + if !path.exists() { + return Ok(Self::default()); + } + let content = std::fs::read_to_string(&path)?; + let state: Self = serde_json::from_str(&content)?; + Ok(state) + } + + /// Save state to `.tb-devctl/state.json` under the project root. + pub fn save(&self, project_root: &Path) -> Result<()> { + let path = state_path(project_root); + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + let content = serde_json::to_string_pretty(self)?; + std::fs::write(&path, content)?; + Ok(()) + } +} + +fn state_path(project_root: &Path) -> PathBuf { + project_root.join(".tb-devctl").join("state.json") +} diff --git a/scripts/bump.sh b/scripts/bump.sh index 89fcbdf..57116f0 100755 --- a/scripts/bump.sh +++ b/scripts/bump.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -VALID_TOOLS="tb-prod tb-sem tb-bug tb-lf" +VALID_TOOLS="tb-prod tb-sem tb-bug tb-lf tb-devctl" usage() { echo "Usage: $0 " diff --git a/scripts/install.sh b/scripts/install.sh index ee0e0c4..1132b61 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -2,7 +2,7 @@ set -euo pipefail REPO="productiveio/cli-toolbox" -ALL_TOOLS="tb-prod tb-sem tb-bug tb-lf" +ALL_TOOLS="tb-prod tb-sem tb-bug tb-lf tb-devctl" INSTALL_DIR="$HOME/.local/bin" # --- Flags ---