diff --git a/.gitignore b/.gitignore index f9e6b78c..8a5f20e5 100644 --- a/.gitignore +++ b/.gitignore @@ -181,4 +181,11 @@ bazel-* # Generated from uv.lock, not committed requirements_lock.txt -chipcompiler/tools/ecc_dreamplace/dreamplace \ No newline at end of file +chipcompiler/tools/ecc_dreamplace/dreamplace + +.humanize/ +humanize-* +docs/superpowers/ +findings.md +progress.md +task_plan.md diff --git a/README.cn.md b/README.cn.md index e026012d..bde0ad92 100644 --- a/README.cn.md +++ b/README.cn.md @@ -24,7 +24,7 @@ ECOS Chip Compiler 是一个**开源芯片设计自动化解决方案**,集成 GUI(ECOS Studio)已迁移至 [ecos-studio](https://github.com/0xharry/ecos-studio) 仓库。 **使用方式:** -- **CLI (`cli`)** - 命令行流程执行 +- **CLI (`ecc`)** - 面向项目的命令行流程执行 - **Python API** - 将 `chipcompiler` 作为库使用 @@ -32,22 +32,40 @@ GUI(ECOS Studio)已迁移至 [ecos-studio](https://github.com/0xharry/ecos-s ### CLI 流程运行 -可以使用 `nix run .#cli -- ...` 直接创建 workspace 并执行完整 RTL2GDS 流程。 +可以使用 `nix run .#cli -- ...` 创建 ECC 项目,校验 `ecc.toml`,并执行完整 RTL2GDS 流程。 ```bash -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/top.v \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/filelist.f \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 \ - --freq 200 +nix run .#cli -- init gcd +cp ./rtl/gcd.v gcd/rtl/gcd.v +``` + +编辑 `gcd/ecc.toml`: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/path/to/ics55" + +[flow] +preset = "rtl2gds" +run = "default" +``` + +然后校验并运行: + +```bash +nix run .#cli -- check --project gcd +nix run .#cli -- run --project gcd +nix run .#cli -- status --project gcd +nix run .#cli -- metrics --project gcd +nix run .#cli -- log --project gcd ``` ## 功能特性 diff --git a/README.md b/README.md index 63024725..363580ce 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ ECOS Chip Compiler is an **open-source chip design automation solution** that in The GUI (ECOS Studio) has been moved to the [ecos-studio](https://github.com/0xharry/ecos-studio) repo. **How to use:** -- **CLI (`cli`)** - Command-line flow execution +- **CLI (`ecc`)** - Project-oriented command-line flow execution - **Python API** - Use `chipcompiler` as a library @@ -32,22 +32,41 @@ The GUI (ECOS Studio) has been moved to the [ecos-studio](https://github.com/0xh ### CLI Flow Runner -Use `nix run .#cli -- ...` to create a workspace and run the full RTL2GDS flow directly. +Use `nix run .#cli -- ...` to create an ECC project, validate its `ecc.toml`, +and run the full RTL2GDS flow. ```bash -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/top.v \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/filelist.f \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 \ - --freq 200 +nix run .#cli -- init gcd +cp ./rtl/gcd.v gcd/rtl/gcd.v +``` + +Edit `gcd/ecc.toml`: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/path/to/ics55" + +[flow] +preset = "rtl2gds" +run = "default" +``` + +Then validate and run: + +```bash +nix run .#cli -- check --project gcd +nix run .#cli -- run --project gcd +nix run .#cli -- status --project gcd +nix run .#cli -- metrics --project gcd +nix run .#cli -- log --project gcd ``` ## Features diff --git a/chipcompiler/BUILD.bazel b/chipcompiler/BUILD.bazel index cfc01dc5..7805f8e7 100644 --- a/chipcompiler/BUILD.bazel +++ b/chipcompiler/BUILD.bazel @@ -76,6 +76,12 @@ py_library( ], ) +py_library( + name = "chipcompiler_cli_lib", + srcs = glob(["cli/**/*.py"]), + deps = [":chipcompiler_core"], +) + py_binary( name = "chipcompiler_cli", srcs = ["cli/main.py"], @@ -87,5 +93,7 @@ py_binary( env = { "CHIPCOMPILER_ICS55_PDK_ROOT": "$(location @icsprout55_pdk//:README.md)/..", }, - deps = [":chipcompiler_core"], + deps = [ + ":chipcompiler_cli_lib", + ], ) diff --git a/chipcompiler/cli/artifacts.py b/chipcompiler/cli/artifacts.py new file mode 100644 index 00000000..8268c6db --- /dev/null +++ b/chipcompiler/cli/artifacts.py @@ -0,0 +1,63 @@ +import os + +from chipcompiler.cli.output import disclosure_cmd + +KNOWN_ROLES = {"config", "input", "output", "data", "feature", "report", "log", "script", "analysis"} + + +def _role_from_dirname(dirname: str) -> str: + return dirname if dirname in KNOWN_ROLES else "unknown" + + +def discover_artifacts(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None, + project_dir: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.inspect import discover_step_dirs, read_flow_json, _safe_steps + from chipcompiler.cli.output import normalize_step_name + + base_dir = project_dir or os.path.dirname(os.path.dirname(run_dir)) + step_dirs = discover_step_dirs(run_dir) + + flow_data = read_flow_json(run_dir) + flow_tokens = set() + if flow_data is not None and not isinstance(flow_data, str): + for s in _safe_steps(flow_data): + flow_tokens.add(normalize_step_name(s.get("name", ""))) + + if step_token is not None: + if step_token not in step_dirs and step_token not in flow_tokens: + return [{"kind": "error", "step": step_token, + "status": "unknown_step"}], 1 + if step_token not in step_dirs: + return [], 0 + tokens = [step_token] + else: + tokens = sorted(step_dirs.keys()) + + artifacts = [] + for token in tokens: + step_path = step_dirs[token] + for entry in sorted(os.listdir(step_path)): + subdir = os.path.join(step_path, entry) + if not os.path.isdir(subdir): + continue + role = _role_from_dirname(entry) + for root, _, files in os.walk(subdir): + for fname in sorted(files): + fpath = os.path.join(root, fname) + if os.path.isfile(fpath): + artifacts.append({ + "kind": "artifact", + "step": token, + "role": role, + "run": run_id or "default", + "path": os.path.relpath(fpath, base_dir), + "exists": True, + "inspect_cmd": disclosure_cmd(f"ecc artifacts {token} --json", project, run_id), + }) + + if not artifacts: + return [], 0 + + return artifacts, 0 diff --git a/chipcompiler/cli/commands.py b/chipcompiler/cli/commands.py new file mode 100644 index 00000000..7b489594 --- /dev/null +++ b/chipcompiler/cli/commands.py @@ -0,0 +1,38 @@ +from chipcompiler.cli.config import resolve_project_dir +from chipcompiler.cli.inspect import resolve_run_dir +from chipcompiler.cli.types import CommandContext, CommandResult, OutputMode + + +def build_context(args) -> CommandContext: + project = getattr(args, "project", None) + project_dir = resolve_project_dir(project) + + run_id = getattr(args, "run_id", None) + run_dir, run_id = resolve_run_dir(project_dir, run_id) + + if getattr(args, "jsonl", False): + mode = OutputMode.JSONL + elif getattr(args, "json", False): + mode = OutputMode.JSON + elif getattr(args, "plain", False): + mode = OutputMode.PLAIN + else: + mode = OutputMode.TEXT + + return CommandContext( + project_dir=project_dir, + project=project, + run_dir=run_dir, + run_id=run_id, + output_mode=mode, + ) + + +def dispatch(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli import handlers + if args.command == "param": + return handlers.param(args, ctx) + handler = getattr(handlers, args.command, None) + if handler is None: + return CommandResult.err([], exit_code=1) + return handler(args, ctx) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py new file mode 100644 index 00000000..d81957ab --- /dev/null +++ b/chipcompiler/cli/config.py @@ -0,0 +1,247 @@ +import os +import tomllib +from dataclasses import dataclass, field + +SUPPORTED_PDK_NAMES = {"ics55"} +SUPPORTED_FLOW_PRESETS = {"rtl2gds"} +SUPPORTED_FLOW_RUNS = {"default"} +FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} +RTL_SUFFIXES = {".v", ".sv", ".svh", ".vh"} + + +@dataclass +class ProjectConfig: + design_name: str = "" + design_top: str = "" + design_rtl: list[str] = field(default_factory=list) + design_clock_port: str = "" + design_frequency_mhz: float = 0.0 + + pdk_name: str = "" + pdk_root: str = "" + + flow_preset: str = "" + flow_run: str = "" + + config_path: str = "" + project_dir: str = "" + + params_overrides: dict[str, object] = field(default_factory=dict) + + +def load_project_config(config_path: str) -> ProjectConfig: + try: + with open(config_path, "rb") as f: + data = tomllib.load(f) + except tomllib.TOMLDecodeError as exc: + cfg = ProjectConfig(config_path=config_path) + cfg._toml_error = str(exc) + return cfg + return _parse_config(data, config_path) + + +def _parse_config(data: dict, config_path: str) -> ProjectConfig: + design = data.get("design", {}) + pdk = data.get("pdk", {}) + flow = data.get("flow", {}) + + if not isinstance(design, dict): + design = {} + if not isinstance(pdk, dict): + pdk = {} + if not isinstance(flow, dict): + flow = {} + + project_dir = os.path.dirname(os.path.abspath(config_path)) + + try: + freq = float(design.get("frequency_mhz", 0)) + except (TypeError, ValueError): + freq = 0.0 + + rtl_raw = design.get("rtl", []) + if not isinstance(rtl_raw, list): + rtl_raw = [] + design_rtl = [v for v in rtl_raw if isinstance(v, str)] + + def _str(val, default=""): + return val if isinstance(val, str) else default + + cfg = ProjectConfig( + design_name=_str(design.get("name", "")), + design_top=_str(design.get("top", "")), + design_rtl=design_rtl, + design_clock_port=_str(design.get("clock_port", "")), + design_frequency_mhz=freq, + pdk_name=_str(pdk.get("name", "")), + pdk_root=_str(pdk.get("root", "")), + flow_preset=_str(flow.get("preset", "")), + flow_run=_str(flow.get("run", "default"), "default"), + config_path=config_path, + project_dir=project_dir, + ) + + params_raw = data.get("params") + if isinstance(params_raw, dict): + from chipcompiler.cli.params import parse_toml_params + flat, param_errors = parse_toml_params(params_raw) + cfg.params_overrides = flat + if param_errors: + cfg._param_errors = param_errors + + return cfg + + +def resolve_project_dir(project: str | None) -> str: + if project: + return os.path.abspath(project) + return os.getcwd() + + +def find_config_path(project_dir: str) -> str | None: + path = os.path.join(project_dir, "ecc.toml") + return path if os.path.isfile(path) else None + + +def validate_project_config(cfg: ProjectConfig) -> list[str]: + toml_error = getattr(cfg, "_toml_error", None) + if toml_error: + return [f"malformed ecc.toml: {toml_error}"] + + errors = [] + + param_errors = getattr(cfg, "_param_errors", None) + if param_errors: + for pe in param_errors: + errors.append(f"invalid params: {pe}") + + if not cfg.design_name: + errors.append("design.name is required") + if not cfg.design_top: + errors.append("design.top is required") + if not cfg.design_clock_port: + errors.append("design.clock_port is required") + if cfg.design_frequency_mhz <= 0: + errors.append("design.frequency_mhz must be greater than 0") + if not cfg.design_rtl: + errors.append("design.rtl must have at least one entry") + elif len(cfg.design_rtl) > 1: + errors.append("design.rtl must have exactly one entry; use a filelist for multiple sources") + + if not cfg.pdk_name: + errors.append("pdk.name is required") + elif cfg.pdk_name not in SUPPORTED_PDK_NAMES: + errors.append(f"unsupported pdk.name: {cfg.pdk_name}") + + pdk_root = _resolve_pdk_root(cfg) + if pdk_root: + if not os.path.isdir(pdk_root): + errors.append(f"pdk.root is not a directory: {cfg.pdk_root or '$(env)'}") + else: + pdk_err = _validate_pdk_contents(cfg.pdk_name, pdk_root) + if pdk_err: + errors.append(pdk_err) + else: + errors.append("pdk.root is required") + + if not cfg.flow_preset: + errors.append("flow.preset is required") + elif cfg.flow_preset not in SUPPORTED_FLOW_PRESETS: + errors.append(f"unsupported flow.preset: {cfg.flow_preset}") + + if cfg.flow_run and cfg.flow_run not in SUPPORTED_FLOW_RUNS: + errors.append(f"unsupported flow.run: {cfg.flow_run}") + + if len(cfg.design_rtl) == 1: + rtl_path = _resolve_path(cfg.project_dir, cfg.design_rtl[0]) + if not os.path.exists(rtl_path): + errors.append(f"rtl path does not exist: {cfg.design_rtl[0]}") + elif os.path.isdir(rtl_path): + errors.append(f"rtl path must be a file, not a directory: {cfg.design_rtl[0]}") + else: + suffix = os.path.splitext(rtl_path)[1].lower() + if suffix in FILELIST_SUFFIXES: + from chipcompiler.utility.filelist import validate_filelist + try: + _, missing = validate_filelist(rtl_path) + if missing: + errors.append(f"filelist references missing files: {', '.join(missing)}") + except (ValueError, OSError) as e: + errors.append(f"invalid filelist {cfg.design_rtl[0]}: {e}") + + return errors + + +def to_parameters(cfg: ProjectConfig) -> dict: + return { + "PDK": cfg.pdk_name, + "Design": cfg.design_name, + "Top module": cfg.design_top, + "Clock": cfg.design_clock_port, + "Frequency max [MHz]": cfg.design_frequency_mhz, + } + + +def resolve_rtl(cfg: ProjectConfig) -> tuple[str, str, str]: + if not cfg.design_rtl: + return ("", "", "") + + rtl_path = _resolve_path(cfg.project_dir, cfg.design_rtl[0]) + suffix = os.path.splitext(rtl_path)[1].lower() + + if suffix in FILELIST_SUFFIXES: + return ("filelist", "", rtl_path) + if suffix in RTL_SUFFIXES: + return ("rtl", rtl_path, "") + + if os.path.isfile(rtl_path): + try: + from chipcompiler.utility.filelist import parse_filelist, validate_filelist + + parse_filelist(rtl_path) + _, missing = validate_filelist(rtl_path) + if not missing: + return ("filelist", "", rtl_path) + except Exception: + pass + + return ("rtl", rtl_path, "") + + +def _resolve_path(project_dir: str, path: str) -> str: + path = os.path.expandvars(os.path.expanduser(path)) + if os.path.isabs(path): + return path + return os.path.join(project_dir, path) + + +def resolve_pdk_root(cfg: ProjectConfig) -> str: + return _resolve_pdk_root(cfg) + + +def _resolve_pdk_root(cfg: ProjectConfig) -> str: + if not cfg.pdk_root: + return _pdk_root_from_env() + return _resolve_path(cfg.project_dir, cfg.pdk_root) + + +def _validate_pdk_contents(pdk_name: str, pdk_root: str) -> str | None: + if not pdk_root: + return None + try: + from chipcompiler.data.pdk import get_pdk + get_pdk(pdk_name, pdk_root) + return None + except ValueError as exc: + return str(exc) + + +def _pdk_root_from_env() -> str: + for key in ("CHIPCOMPILER_ICS55_PDK_ROOT", "ICS55_PDK_ROOT"): + val = os.environ.get(key, "").strip() + if not val: + continue + val = os.path.normpath(val) + if os.path.isdir(val): + return val + return "" diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py new file mode 100644 index 00000000..0786c17f --- /dev/null +++ b/chipcompiler/cli/config_view.py @@ -0,0 +1,180 @@ +import os + +from chipcompiler.cli.output import disclosure_cmd + + +def build_project_config_items(project_dir: str, run_dir: str, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.config import ( + _resolve_path, + find_config_path, + load_project_config, + resolve_pdk_root, + validate_project_config, + ) + + config_path = find_config_path(project_dir) + if config_path is None: + return [{"kind": "error", "status": "missing_config"}], 1 + + cfg = load_project_config(config_path) + if getattr(cfg, "_toml_error", None): + return [{"kind": "error", "status": "invalid_config"}], 1 + + errors = validate_project_config(cfg) + if errors: + return [{"kind": "error", "status": "invalid_config"}], 1 + + pdk_root = resolve_pdk_root(cfg) + + items = [] + entries = [ + ("design.name", cfg.design_name, cfg.design_name, "ecc.toml"), + ("design.top", cfg.design_top, cfg.design_top, "ecc.toml"), + ("design.clock_port", cfg.design_clock_port, cfg.design_clock_port, "ecc.toml"), + ("design.frequency_mhz", cfg.design_frequency_mhz, cfg.design_frequency_mhz, "ecc.toml"), + ("pdk.name", cfg.pdk_name, cfg.pdk_name, "ecc.toml"), + ("flow.preset", cfg.flow_preset, cfg.flow_preset, "ecc.toml"), + ("flow.run", cfg.flow_run, cfg.flow_run, "ecc.toml"), + ] + + inspect = disclosure_cmd("ecc config --resolved --json", project, run_id) + + for key, value, resolved, source in entries: + items.append({ + "kind": "config", + "scope": "project", + "key": key, + "value": value, + "resolved": resolved, + "source": source, + "inspect_cmd": inspect, + }) + + # RTL entries + for i, rtl in enumerate(cfg.design_rtl): + rtl_resolved = os.path.normpath(_resolve_path(project_dir, rtl)) + items.append({ + "kind": "config", + "scope": "project", + "key": f"design.rtl.{i}", + "value": rtl, + "resolved": rtl_resolved, + "source": "ecc.toml", + "inspect_cmd": inspect, + }) + + # PDK root with resolution + pdk_source = "ecc.toml" if cfg.pdk_root else "env" + items.append({ + "kind": "config", + "scope": "project", + "key": "pdk.root", + "value": cfg.pdk_root or "", + "resolved": pdk_root, + "source": pdk_source, + "inspect_cmd": inspect, + }) + + # Run directory + try: + run_dir_rel = os.path.relpath(run_dir, project_dir) + except ValueError: + run_dir_rel = run_dir + run_dir_value = run_dir if run_dir_rel.startswith("..") else run_dir_rel + items.append({ + "kind": "config", + "scope": "project", + "key": "run_dir", + "value": run_dir_value, + "resolved": os.path.abspath(run_dir), + "source": "resolved", + "inspect_cmd": disclosure_cmd("ecc status", project, run_id), + }) + + # Parameter records with source information + from chipcompiler.cli.params import resolve_parameters + cli_provenance, prov_error = _load_cli_provenance(run_dir) + if prov_error: + return [{"kind": "error", "status": "invalid_config", "reason": prov_error}], 1 + toml_overrides = dict(cfg.params_overrides) + if "design.frequency_mhz" not in toml_overrides and cfg.design_frequency_mhz > 0: + toml_overrides["design.frequency_mhz"] = cfg.design_frequency_mhz + resolved_params, _ = resolve_parameters( + toml_overrides=toml_overrides, + cli_overrides=cli_provenance, + ) + from chipcompiler.cli.param_handler import _maps_to_str + for rp in resolved_params: + items.append({ + "kind": "param", + "scope": "project", + "key": rp.param, + "value": rp.value, + "default": rp.default, + "source": rp.source, + "maps_to": _maps_to_str(rp.schema.maps_to), + "inspect_cmd": disclosure_cmd(f"ecc param show {rp.param}", project), + }) + + return items, 0 + + +def _load_cli_provenance(run_dir: str) -> tuple[dict[str, object], str | None]: + import json + provenance_path = os.path.join(run_dir, "home", "cli-param-overrides.json") + if not os.path.isfile(provenance_path): + return {}, None + try: + with open(provenance_path) as f: + data = json.load(f) + except (json.JSONDecodeError, OSError) as exc: + return {}, f"invalid CLI parameter provenance: {exc}" + if not isinstance(data, dict): + return {}, "invalid CLI parameter provenance: expected object" + from chipcompiler.cli.params import parse_cli_overrides + items = [f"{k}={v}" for k, v in data.items()] + validated, errors = parse_cli_overrides(items) + if errors: + return {}, f"invalid CLI parameter provenance: {errors[0]}" + return validated, None + + +def build_step_config_items(run_dir: str, step_token: str | None, + project: str | None = None, + run_id: str | None = None, + project_dir: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.inspect import discover_step_dirs + + base_dir = project_dir or os.path.dirname(os.path.dirname(run_dir)) + step_dirs = discover_step_dirs(run_dir) + + if step_token not in step_dirs: + return [{"kind": "error", "status": "unknown_step", "step": step_token}], 1 + + config_dir = os.path.join(step_dirs[step_token], "config") + items = [] + display_run = run_id or "default" + + if os.path.isdir(config_dir): + for fname in sorted(os.listdir(config_dir)): + fpath = os.path.join(config_dir, fname) + if os.path.isfile(fpath): + items.append({ + "kind": "config", + "scope": "step", + "step": step_token, + "role": "config", + "run": display_run, + "path": os.path.relpath(fpath, base_dir), + "source": "step_config", + "inspect_cmd": disclosure_cmd(f"ecc artifacts {step_token} --json", project, run_id), + }) + + if not items: + return [{"kind": "config", "scope": "step", "step": step_token, + "config_status": "none", + "artifacts": disclosure_cmd(f"ecc artifacts {step_token}", project, run_id)}], 0 + + return items, 0 diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py new file mode 100644 index 00000000..879bb2c1 --- /dev/null +++ b/chipcompiler/cli/diagnose.py @@ -0,0 +1,185 @@ +import os + +from chipcompiler.cli.output import disclosure_cmd + + +def _has_investigation_files(step_path: str) -> bool: + for role in ("output", "report", "analysis"): + role_dir = os.path.join(step_path, role) + if os.path.isdir(role_dir): + if any(os.path.isfile(os.path.join(role_dir, f)) for f in os.listdir(role_dir)): + return True + return False + + +def _count_log_errors(run_dir: str, step_token: str) -> int: + from chipcompiler.cli.inspect import discover_logs, filter_errors, read_log_file + logs = discover_logs(run_dir, step_token) + count = 0 + for lf in logs: + raw = read_log_file(lf) + count += len(filter_errors(raw)) + return count + + +def _has_metrics(run_dir: str, step_token: str) -> bool: + from chipcompiler.cli.inspect import discover_metrics + return bool(discover_metrics(run_dir, step_token)) + + +def _has_config_files(step_path: str) -> bool: + config_dir = os.path.join(step_path, "config") + if not os.path.isdir(config_dir): + return False + return any(os.path.isfile(os.path.join(config_dir, f)) for f in os.listdir(config_dir)) + + +def _make_issue(issue: str, severity: str, run: str, + step: str | None = None, + status: str | None = None, + count: int | None = None, + project: str | None = None, + run_id: str | None = None) -> dict: + obj = { + "kind": "issue", + "issue": issue, + "severity": severity, + "run": run, + } + if step: + obj["step"] = step + if status: + obj["status"] = status + if count is not None: + obj["count"] = count + + cmd_kwargs = {"project": project, "run_id": run_id} + if issue in ("missing_run", "invalid_flow_json"): + obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) + obj["start_cmd"] = disclosure_cmd("ecc run", project=project) + elif issue == "log_errors": + obj["evidence"] = disclosure_cmd(f"ecc log {step}", **cmd_kwargs) + obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + elif issue == "missing_metrics": + obj["evidence"] = disclosure_cmd(f"ecc metrics {step} --json", **cmd_kwargs) + obj["log"] = disclosure_cmd(f"ecc log {step}", **cmd_kwargs) + elif issue == "missing_artifacts": + obj["evidence"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) + elif issue == "config_unavailable": + obj["evidence"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) + obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + elif step: + obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) + obj["log"] = disclosure_cmd(f"ecc log {step}", **cmd_kwargs) + obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) + + return obj + + +def _check_step_artifacts( + issues: list[dict], run_dir: str, token: str, step_path: str, + display_run: str, project: str | None, run_id: str | None, +) -> None: + error_count = _count_log_errors(run_dir, token) + if error_count > 0: + issues.append(_make_issue("log_errors", "error", display_run, + step=token, count=error_count, + project=project, run_id=run_id)) + if not _has_metrics(run_dir, token): + issues.append(_make_issue("missing_metrics", "warning", display_run, + step=token, project=project, run_id=run_id)) + if not _has_investigation_files(step_path): + issues.append(_make_issue("missing_artifacts", "warning", display_run, + step=token, project=project, run_id=run_id)) + if not _has_config_files(step_path): + issues.append(_make_issue("config_unavailable", "info", display_run, + step=token, project=project, run_id=run_id)) + + +def build_diagnose_issues(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.inspect import ( + CORRUPT_FLOW_JSON, + discover_step_dirs, + read_flow_json, + _safe_steps, + ) + from chipcompiler.cli.output import normalize_state, normalize_step_name + + display_run = run_id or "default" + issues = [] + + flow_data = read_flow_json(run_dir) + + if flow_data is None: + issues.append(_make_issue("missing_run", "error", display_run, + project=project, run_id=run_id)) + return issues, 1 + + if flow_data is CORRUPT_FLOW_JSON: + issues.append(_make_issue("invalid_flow_json", "error", display_run, + project=project, run_id=run_id)) + return issues, 1 + + steps = _safe_steps(flow_data) + step_dirs = discover_step_dirs(run_dir) + + flow_tokens = {normalize_step_name(s.get("name", "")) for s in steps} + known_tokens = flow_tokens | set(step_dirs.keys()) + + if step_token is not None: + if step_token not in known_tokens: + issues.append(_make_issue("unknown_step", "error", display_run, + step=step_token, project=project, run_id=run_id)) + return issues, 1 + + for s in steps: + token = normalize_step_name(s.get("name", "")) + if step_token is not None and token != step_token: + continue + state = normalize_state(s.get("state", "")) + + if state in ("incomplete", "invalid"): + issues.append(_make_issue("failed_step", "error", display_run, + step=token, status=state, + project=project, run_id=run_id)) + elif state == "pending": + issues.append(_make_issue("pending_step", "warning", display_run, + step=token, status=state, + project=project, run_id=run_id)) + elif state == "ongoing": + issues.append(_make_issue("ongoing_step", "warning", display_run, + step=token, status=state, + project=project, run_id=run_id)) + elif state == "unstart": + issues.append(_make_issue("unstarted_step", "info", display_run, + step=token, status=state, + project=project, run_id=run_id)) + + if token in step_dirs: + _check_step_artifacts( + issues, run_dir, token, step_dirs[token], + display_run, project, run_id, + ) + else: + issues.append(_make_issue("missing_metrics", "warning", display_run, + step=token, project=project, run_id=run_id)) + issues.append(_make_issue("missing_artifacts", "warning", display_run, + step=token, project=project, run_id=run_id)) + issues.append(_make_issue("config_unavailable", "info", display_run, + step=token, project=project, run_id=run_id)) + + dir_only_tokens = set(step_dirs.keys()) - flow_tokens + if step_token is not None: + dir_only_tokens &= {step_token} + for token in sorted(dir_only_tokens): + _check_step_artifacts( + issues, run_dir, token, step_dirs[token], + display_run, project, run_id, + ) + + has_error = any(i.get("severity") == "error" for i in issues) + return issues, 1 if has_error else 0 diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py new file mode 100644 index 00000000..1e57012a --- /dev/null +++ b/chipcompiler/cli/handlers.py @@ -0,0 +1,701 @@ +import os +import shutil +import sys + +from chipcompiler.cli.types import CommandContext, CommandResult +from chipcompiler.cli.records import error_record +from chipcompiler.cli.output import ( + disclosure_cmd, + normalize_metric_key, + normalize_state, + normalize_step_name, +) + + +def param(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.param_handler import ( + param_diff, + param_list, + param_set, + param_show, + param_unset, + ) + + subcmd = getattr(args, "param_command", None) + handlers = { + "list": param_list, + "show": param_show, + "set": param_set, + "unset": param_unset, + "diff": param_diff, + } + handler = handlers.get(subcmd) + if handler is None: + return CommandResult.err([error_record("missing_subcommand")], exit_code=1) + return handler(args, ctx) + + +def status(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.inspect import ( + CORRUPT_FLOW_JSON, + _safe_steps, + get_run_status, + read_flow_json, + ) + + flow_data = read_flow_json(ctx.run_dir) + display_run = ctx.run_id or "default" + project = ctx.project + + if flow_data is None: + return CommandResult.err([{ + "run": display_run, + "status": "missing", + "workspace": ctx.run_dir, + "start_cmd": disclosure_cmd("ecc run", project), + }]) + + if flow_data is CORRUPT_FLOW_JSON: + return CommandResult.err([{ + "run": display_run, + "status": "corrupt", + "workspace": ctx.run_dir, + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "log_cmd": disclosure_cmd("ecc log", project, ctx.run_id), + }]) + + run_status = get_run_status(flow_data) + records = [{ + "run": display_run, + "status": run_status, + "workspace": ctx.run_dir, + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "metrics_cmd": disclosure_cmd("ecc metrics", project, ctx.run_id), + "log_cmd": disclosure_cmd("ecc log", project, ctx.run_id), + }] + + for step in _safe_steps(flow_data): + step_token = normalize_step_name(step.get("name", "")) + records.append({ + "step": step_token, + "tool": step.get("tool", ""), + "status": normalize_state(step.get("state", "")), + "runtime": step.get("runtime", "") or None, + "metrics_cmd": disclosure_cmd(f"ecc metrics {step_token}", project, ctx.run_id), + "log_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }) + + return CommandResult.ok(records) + + +def log(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.inspect import ( + discover_logs, + discover_step_dirs, + get_flow_step_names, + listing_step_order, + ) + from chipcompiler.cli.log_view import build_log_records + + step_token = args.step + project = ctx.project + + if step_token is None: + records = [] + + for lf in discover_logs(ctx.run_dir): + records.append({ + "log": os.path.relpath(lf, ctx.run_dir), + "inspect_cmd": disclosure_cmd("ecc log", project, ctx.run_id), + }) + + for token in listing_step_order(ctx.run_dir): + for lf in discover_logs(ctx.run_dir, token): + records.append({ + "step": token, + "source": os.path.relpath(lf, ctx.run_dir), + "inspect_cmd": disclosure_cmd(f"ecc log {token}", project, ctx.run_id), + }) + + if not records: + return CommandResult.ok([{ + "log_status": "no_logs", + "workspace": ctx.run_dir, + "run": disclosure_cmd("ecc run", project), + }]) + return CommandResult.ok(records) + + step_dirs = discover_step_dirs(ctx.run_dir) + if step_token not in step_dirs: + flow_steps = get_flow_step_names(ctx.run_dir) + if step_token in flow_steps: + return CommandResult.err([{ + "step": step_token, + "log_status": "missing", + "inspect_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) + return CommandResult.err([{ + "step": step_token, + "status": "unknown_step", + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + + log_files = discover_logs(ctx.run_dir, step_token) + if not log_files: + return CommandResult.err([{ + "step": step_token, + "log_status": "missing", + "source": os.path.relpath( + os.path.join(step_dirs[step_token], "log"), ctx.run_dir, + ), + "inspect_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) + + inspect_cmd = disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id) + + all_records = [] + for lf in log_files: + source = os.path.relpath(lf, ctx.run_dir) + try: + with open(lf, errors="replace") as f: + raw = f.read().splitlines() + except OSError as exc: + return CommandResult.err([{ + "step": step_token, + "log_status": "unreadable", + "source": source, + "error": str(exc), + "inspect_cmd": inspect_cmd, + }]) + if not raw: + continue + all_records.extend(build_log_records(step_token, source, raw, inspect_cmd)) + + if not all_records: + return CommandResult.ok([{ + "step": step_token, + "log_status": "empty", + "inspect_cmd": inspect_cmd, + }]) + + return CommandResult.ok(all_records) + + +def metrics(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.inspect import ( + _internal_from_token, + discover_metrics, + discover_step_dirs, + get_flow_step_names, + read_metrics, + ) + + step_token = args.step + project = ctx.project + + metrics_files = discover_metrics(ctx.run_dir, step_token) + if not metrics_files: + if step_token is not None: + step_dirs = discover_step_dirs(ctx.run_dir) + flow_steps = get_flow_step_names(ctx.run_dir) + if step_token in step_dirs: + return CommandResult.err([{ + "metric_step": step_token, + "status": "missing", + "path": os.path.relpath( + os.path.join(step_dirs[step_token], "analysis", + f"{_internal_from_token(step_token)}_metrics.json"), + ctx.run_dir, + ), + "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) + if step_token in flow_steps: + return CommandResult.err([{ + "metric_step": step_token, + "status": "missing", + "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) + return CommandResult.err([{ + "step": step_token, + "status": "unknown_step", + "inspect": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + return CommandResult.ok([{ + "metrics_status": "none", + "workspace": ctx.run_dir, + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + + records = [] + has_corrupt = False + for token, path in sorted(metrics_files.items()): + data = read_metrics(path) + if data is None: + has_corrupt = True + records.append({ + "metric_step": token, + "status": "corrupt", + "path": os.path.relpath(path, ctx.run_dir), + "log_cmd": disclosure_cmd(f"ecc log {token}", project, ctx.run_id), + }) + continue + for raw_key, value in data.items(): + norm_key = normalize_metric_key(raw_key) + records.append({ + "metric": norm_key, + "step": token, + "value": value, + "source": os.path.relpath(path, ctx.run_dir), + "inspect": disclosure_cmd(f"ecc metrics {token} --json", project, ctx.run_id), + }) + + if has_corrupt: + return CommandResult.err(records) + return CommandResult.ok(records) + + +def artifacts(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.artifacts import discover_artifacts + + step_token = args.step + project = ctx.project + + artifact_records, rc = discover_artifacts( + ctx.run_dir, step_token, project, ctx.run_id, ctx.project_dir, + ) + + if rc != 0: + if artifact_records and artifact_records[0].get("status") == "unknown_step": + return CommandResult.err([{ + "step": artifact_records[0]["step"], + "status": "unknown_step", + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + return CommandResult.err(artifact_records) + + if not artifact_records: + if step_token is not None: + return CommandResult.ok([{ + "step": step_token, + "artifacts_status": "none", + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) + return CommandResult.ok([{ + "artifacts_status": "none", + "workspace": ctx.run_dir, + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + + records = [] + for a in artifact_records: + line_fields = { + "artifact": os.path.basename(a["path"]), + "step": a["step"], + "role": a["role"], + "path": a["path"], + "inspect": disclosure_cmd(f"ecc artifacts {a['step']} --json", project, ctx.run_id), + } + if a["role"] == "analysis": + line_fields["metrics"] = disclosure_cmd(f"ecc metrics {a['step']}", project, ctx.run_id) + if a["role"] == "log": + line_fields["inspect"] = disclosure_cmd(f"ecc log {a['step']}", project, ctx.run_id) + if a["role"] in ("output", "report", "analysis", "log"): + line_fields["config"] = disclosure_cmd(f"ecc config {a['step']} --resolved", project, ctx.run_id) + records.append(line_fields) + return CommandResult.ok(records) + + +def config(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.config_view import build_project_config_items, build_step_config_items + + step_token = args.step + project = ctx.project + + if step_token is not None: + items, rc = build_step_config_items( + ctx.run_dir, step_token, project, ctx.run_id, ctx.project_dir, + ) + else: + items, rc = build_project_config_items( + ctx.project_dir, ctx.run_dir, project, ctx.run_id, + ) + + if rc != 0: + first = items[0] if items else {} + status = first.get("status") + if status == "unknown_step": + return CommandResult.err([{ + "step": first.get("step", ""), + "status": "unknown_step", + "inspect": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + if status == "missing_config": + return CommandResult.err([error_record( + "missing_config", + inspect=disclosure_cmd("ecc check", project), + )]) + if status == "invalid_config": + reason = first.get("reason") + rec = error_record( + "invalid_config", + inspect=disclosure_cmd("ecc check", project), + ) + if reason: + rec["reason"] = reason + return CommandResult.err([rec]) + return CommandResult.err(items) + + if not items: + return CommandResult.ok([{"config_status": "none"}]) + + first = items[0] + if first.get("config_status") == "none": + return CommandResult.ok([{ + "step": first["step"], + "config_status": "none", + "artifacts": first.get("artifacts"), + }]) + + records = [] + for item in items: + if item.get("kind") == "param": + records.append({ + "kind": "param", + "config": item["key"], + "key": item["key"], + "scope": "project", + "value": item["value"], + "default": item.get("default"), + "source": item["source"], + "maps_to": item.get("maps_to"), + "inspect": item.get("inspect_cmd"), + }) + elif item.get("scope") == "project": + records.append({ + "config": item["key"], + "scope": "project", + "value": item["value"], + "resolved": item.get("resolved"), + "source": item["source"], + "inspect": item.get("inspect_cmd"), + }) + else: + records.append({ + "config": os.path.basename(item["path"]), + "scope": "step", + "step": item["step"], + "role": item["role"], + "run": item.get("run", "default"), + "path": item["path"], + "source": item["source"], + "inspect": item.get("inspect_cmd"), + }) + return CommandResult.ok(records) + + +def diagnose(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.diagnose import build_diagnose_issues + + step_token = args.step + project = ctx.project + display_run = ctx.run_id or "default" + + issues, _ = build_diagnose_issues(ctx.run_dir, step_token, project, ctx.run_id) + + if not issues: + return CommandResult.ok([{ + "status": "clean", + "run": display_run, + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "artifacts": disclosure_cmd("ecc artifacts", project, ctx.run_id), + "config": disclosure_cmd("ecc config --resolved", project, ctx.run_id), + }]) + + has_error = any(i.get("severity") == "error" for i in issues) + text_keys = ( + "issue", "severity", "run", "step", "status", "count", + "evidence", "log", "artifacts", "config", "start_cmd", + ) + records = [] + for issue in issues: + records.append({k: issue[k] for k in text_keys if k in issue}) + + if has_error: + return CommandResult.err(records) + return CommandResult.ok(records) + + +def init(args, ctx: CommandContext) -> CommandResult: + name = args.name + if not name or not name.strip(): + return CommandResult.err([{"kind": "error", "error": "project name is required"}]) + + project_dir = os.path.abspath(name) + config_path = os.path.join(project_dir, "ecc.toml") + design_name = os.path.basename(project_dir) + + if os.path.isfile(project_dir): + return CommandResult.err([{ + "kind": "error", + "error": "path_is_file", + "path": project_dir, + }]) + + if os.path.exists(config_path): + return CommandResult.err([{ + "kind": "error", + "error": "already_exists", + "path": config_path, + }]) + + os.makedirs(project_dir, exist_ok=True) + os.makedirs(os.path.join(project_dir, "rtl"), exist_ok=True) + os.makedirs(os.path.join(project_dir, "constraints"), exist_ok=True) + os.makedirs(os.path.join(project_dir, "runs"), exist_ok=True) + + DEFAULT_TOML = '''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/{name}.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "" + +[flow] +preset = "rtl2gds" +run = "default" +''' + + with open(config_path, "w") as f: + f.write(DEFAULT_TOML.format(name=design_name)) + + project_arg = ctx.project or name + return CommandResult.ok([{ + "project": name, + "status": "created", + "path": name, + "check": disclosure_cmd("ecc check", project_arg), + "run": disclosure_cmd("ecc run", project_arg), + }]) + + +def check(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + validate_project_config, + ) + + project = ctx.project + + config_path = find_config_path(ctx.project_dir) + if config_path is None: + return CommandResult.err([error_record( + "missing_config", + path=os.path.join(ctx.project_dir, "ecc.toml"), + inspect=disclosure_cmd("ecc check", project), + )]) + + cfg = load_project_config(config_path) + errors = validate_project_config(cfg) + + if errors: + return CommandResult.err([{ + "check": "config", + "status": "fail", + "reason": err, + "source": "ecc.toml", + "inspect": disclosure_cmd("ecc check --json", project), + } for err in errors]) + + records = [{ + "project": cfg.design_name, + "status": "checked", + "config": "ecc.toml", + "run_dir": "runs/default", + "run": disclosure_cmd("ecc run", project), + "inspect_cmd": disclosure_cmd("ecc status", project), + }] + + if cfg.design_rtl: + records.append({ + "check": "rtl", + "status": "pass", + "path": cfg.design_rtl[0], + "inspect": disclosure_cmd("ecc check --json", project), + }) + + return CommandResult.ok(records) + + +def run(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + resolve_pdk_root, + resolve_rtl, + to_parameters, + validate_project_config, + ) + from chipcompiler.data import create_workspace + from chipcompiler.engine import EngineFlow + from chipcompiler.rtl2gds import build_rtl2gds_flow + + project = ctx.project + project_dir = ctx.project_dir + + config_path = find_config_path(project_dir) + if config_path is None: + return CommandResult.err([{ + "kind": "error", + "error": "missing_config", + "path": os.path.join(project_dir, "ecc.toml"), + }]) + + cfg = load_project_config(config_path) + errors = validate_project_config(cfg) + if errors: + return CommandResult.err([{ + "kind": "error", + "error": "config_error", + "reason": err, + } for err in errors]) + + # Parse and validate --set overrides before workspace creation + cli_overrides = {} + raw_sets = getattr(args, "param_set", []) + if raw_sets: + from chipcompiler.cli.params import parse_cli_overrides + cli_overrides, set_errors = parse_cli_overrides(raw_sets) + if set_errors: + return CommandResult.err([{ + "kind": "error", + "error": "invalid_parameter", + "reason": err, + } for err in set_errors]) + + run_dir = os.path.join(project_dir, "runs", "default") + flow_json = os.path.join(run_dir, "home", "flow.json") + + if os.path.exists(flow_json) and not args.overwrite: + return CommandResult.err([{ + "kind": "error", + "error": "run_exists", + "run": "default", + "workspace": run_dir, + "overwrite": disclosure_cmd("ecc run --overwrite", project), + }]) + + if args.overwrite and os.path.exists(run_dir): + for root, dirs, files in os.walk(run_dir): + for d in dirs: + dp = os.path.join(root, d) + if not os.path.islink(dp): + os.chmod(dp, 0o755) + for f in files: + fp = os.path.join(root, f) + if not os.path.islink(fp): + os.chmod(fp, 0o644) + os.chmod(run_dir, 0o755) + shutil.rmtree(run_dir) + + _, origin_verilog, input_filelist = resolve_rtl(cfg) + parameters = to_parameters(cfg) + pdk_root = resolve_pdk_root(cfg) + + # Merge resolved parameter overrides into workspace parameters + if cfg.params_overrides or cli_overrides: + from chipcompiler.cli.params import ( + build_backend_overrides, + resolve_parameters, + ) + resolved, _ = resolve_parameters( + toml_overrides=cfg.params_overrides, + cli_overrides=cli_overrides, + ) + backend_overrides = build_backend_overrides(resolved) + from chipcompiler.data.parameter import update_parameters + update_parameters(backend_overrides, parameters) + + try: + workspace = create_workspace( + directory=run_dir, + origin_def="", + origin_verilog=origin_verilog, + pdk=cfg.pdk_name, + parameters=parameters, + input_filelist=input_filelist, + pdk_root=pdk_root, + ) + except Exception as exc: + return CommandResult.err([{ + "kind": "error", + "error": "workspace_failed", + "run": "default", + "workspace": run_dir, + "reason": str(exc), + }]) + + if workspace is None: + return CommandResult.err([{ + "kind": "error", + "error": "workspace_failed", + "run": "default", + "workspace": run_dir, + }]) + + # Persist CLI parameter provenance for config --resolved inspection + if cli_overrides: + import json + provenance_path = os.path.join(run_dir, "home", "cli-param-overrides.json") + os.makedirs(os.path.dirname(provenance_path), exist_ok=True) + with open(provenance_path, "w") as _f: + json.dump(cli_overrides, _f) + + try: + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) + + engine_flow.create_step_workspaces() + + from chipcompiler.cli.progress import ( + run_flow_with_progress, + should_enable_run_progress, + ) + + if should_enable_run_progress(ctx, sys.stderr): + flow_ok = run_flow_with_progress(engine_flow, ctx, project, sys.stderr) + else: + flow_ok = engine_flow.run_steps() + + if not flow_ok: + return CommandResult.err([{ + "run": "default", + "status": "failed", + "workspace": run_dir, + "inspect_cmd": disclosure_cmd("ecc status", project), + "log": disclosure_cmd("ecc log", project), + }]) + except Exception as exc: + return CommandResult.err([{ + "kind": "error", + "error": "flow_failed", + "run": "default", + "workspace": run_dir, + "reason": str(exc), + }]) + + return CommandResult.ok([{ + "run": "default", + "status": "success", + "workspace": run_dir, + "inspect_cmd": disclosure_cmd("ecc status", project), + "metrics_cmd": disclosure_cmd("ecc metrics", project), + "log_cmd": disclosure_cmd("ecc log", project), + }]) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py new file mode 100644 index 00000000..9836e6c8 --- /dev/null +++ b/chipcompiler/cli/inspect.py @@ -0,0 +1,191 @@ +import json +import os +import re + +from chipcompiler.cli.output import ( + normalize_step_name, + normalize_state, +) + + +def resolve_run_dir(project_dir: str, run_id: str | None = None) -> tuple[str, str | None]: + if not run_id: + return os.path.join(project_dir, "runs", "default"), None + + if run_id == "default": + return os.path.join(project_dir, "runs", "default"), "default" + + if os.path.isabs(run_id): + return run_id, run_id + + if os.sep in run_id or "/" in run_id: + return os.path.join(project_dir, run_id), run_id + + return os.path.join(project_dir, "runs", run_id), run_id + + +CORRUPT_FLOW_JSON = "CORRUPT" + + +def read_flow_json(run_dir: str) -> dict | str | None: + path = os.path.join(run_dir, "home", "flow.json") + if not os.path.isfile(path): + return None + try: + with open(path) as f: + data = json.load(f) + return data if isinstance(data, dict) else CORRUPT_FLOW_JSON + except (json.JSONDecodeError, OSError): + return CORRUPT_FLOW_JSON + + +def _safe_steps(flow_data: dict) -> list[dict]: + steps = flow_data.get("steps", []) + if not isinstance(steps, list): + return [] + return [s for s in steps if isinstance(s, dict)] + + +def get_run_status(flow_data: dict) -> str: + steps = _safe_steps(flow_data) + if not steps: + return "unstart" + states = {normalize_state(s.get("state", "")) for s in steps} + if states & {"ongoing", "pending"}: + return "ongoing" + if states & {"incomplete", "invalid"}: + return "failed" + if states == {"success"}: + return "success" + if states == {"unstart"}: + return "unstart" + return "failed" + + +ERROR_PATTERNS = re.compile(r"(error|failed|traceback)", re.IGNORECASE) +_CLEAN_SUMMARY = re.compile(r"^\s*0\s+(error|failed|warning)|^no\s+(error|failed|warning)", re.IGNORECASE) + + +def filter_errors(lines: list[str]) -> list[str]: + result = [] + for line in lines: + if ERROR_PATTERNS.search(line) and not _CLEAN_SUMMARY.search(line): + result.append(line) + return result + + +def discover_step_dirs(run_dir: str) -> dict[str, str]: + result = {} + if not os.path.isdir(run_dir): + return result + for entry in os.listdir(run_dir): + full = os.path.join(run_dir, entry) + if os.path.isdir(full) and "_" in entry: + name, _, tool = entry.partition("_") + token = normalize_step_name(name) + result[token] = full + return result + + +def get_flow_step_names(run_dir: str) -> set[str]: + flow_data = read_flow_json(run_dir) + if not isinstance(flow_data, dict): + return set() + return {normalize_step_name(s.get("name", "")) for s in _safe_steps(flow_data) if s.get("name")} + + +def _list_files(directory: str) -> list[str]: + if not os.path.isdir(directory): + return [] + return sorted( + os.path.join(directory, f) + for f in os.listdir(directory) + if os.path.isfile(os.path.join(directory, f)) + ) + + +def discover_logs(run_dir: str, step_token: str | None = None) -> list[str]: + if step_token is None: + return _list_files(os.path.join(run_dir, "log")) + + step_dirs = discover_step_dirs(run_dir) + if step_token not in step_dirs: + return [] + + return _list_files(os.path.join(step_dirs[step_token], "log")) + + +def read_log_file(path: str) -> list[str]: + try: + with open(path, errors="replace") as f: + return f.read().splitlines() + except OSError: + return [] + + +def discover_metrics(run_dir: str, step_token: str | None = None) -> dict[str, str]: + step_dirs = discover_step_dirs(run_dir) + result = {} + + if step_token is not None: + if step_token not in step_dirs: + return {} + tokens = [step_token] + else: + tokens = list(step_dirs.keys()) + + for token in tokens: + analysis_dir = os.path.join(step_dirs[token], "analysis") + if not os.path.isdir(analysis_dir): + continue + for f in os.listdir(analysis_dir): + if f.endswith("_metrics.json"): + result[token] = os.path.join(analysis_dir, f) + break + + return result + + +def read_metrics(path: str) -> dict | None: + try: + with open(path) as f: + data = json.load(f) + return data if isinstance(data, dict) else None + except (json.JSONDecodeError, OSError): + return None + + +def _internal_from_token(token: str) -> str: + reverse = { + "synthesis": "Synthesis", + "floorplan": "Floorplan", + "fixfanout": "fixFanout", + "placement": "place", + "cts": "CTS", + "legalization": "legalization", + "routing": "route", + "drc": "drc", + "filler": "filler", + } + return reverse.get(token, token) + + +def listing_step_order(run_dir: str) -> list[str]: + """Return step tokens in flow.json order, with undiscovered extras alphabetically after.""" + step_dirs = discover_step_dirs(run_dir) + if not step_dirs: + return [] + + flow_data = read_flow_json(run_dir) + if isinstance(flow_data, dict): + flow_tokens = [ + normalize_step_name(s.get("name", "")) + for s in _safe_steps(flow_data) + if s.get("name") + ] + flow_set = set(flow_tokens) + result = [t for t in flow_tokens if t in step_dirs] + result.extend(sorted(t for t in step_dirs if t not in flow_set)) + return result + + return sorted(step_dirs) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py new file mode 100644 index 00000000..1243a62c --- /dev/null +++ b/chipcompiler/cli/log_view.py @@ -0,0 +1,282 @@ +import enum +import re +import sys + +from chipcompiler.cli.pretty import BOLD, DIM, RED, YELLOW, BLUE, CYAN, RESET, style +from chipcompiler.cli.render import _plain_value + + +class LineKind(enum.Enum): + ERROR = "error" + WARNING = "warning" + INFO = "info" + TRACEBACK = "traceback" + SECTION = "section" + PLAIN = "plain" + + +_TRACEBACK_HEADER = "Traceback (most recent call last):" + +_ERROR_RE = re.compile(r"error", re.IGNORECASE) +_WARNING_RE = re.compile(r"warn(?:ing)?", re.IGNORECASE) +_INFO_RE = re.compile(r"^(?:INFO(?:\s*:|\s*\]|:root:)|\[INFO\s*\])") +_SECTION_RE = re.compile(r"^[-=]{3,}$") +_EXCEPTION_RE = re.compile( + r"^[A-Za-z_][\w.]*:\s" + r"|^[A-Za-z_]\w*(?:Error|Exception|Warning|Interrupt|Exit|Iteration)$" + r"|^(?:KeyboardInterrupt|SystemExit|StopIteration|GeneratorExit)$" +) + + +def classify_line(line: str, in_traceback: bool = False) -> LineKind: + if line.strip() == _TRACEBACK_HEADER: + return LineKind.TRACEBACK + if in_traceback: + stripped = line.strip() + if not stripped: + return LineKind.PLAIN + if line.startswith(" ") or line.startswith("\t"): + return LineKind.TRACEBACK + if _EXCEPTION_RE.match(stripped): + return LineKind.ERROR + if _ERROR_RE.search(stripped): + return LineKind.ERROR + return LineKind.PLAIN + if _SECTION_RE.match(line.strip()): + return LineKind.SECTION + if _INFO_RE.match(line): + return LineKind.INFO + if _WARNING_RE.search(line): + return LineKind.WARNING + if _ERROR_RE.search(line): + return LineKind.ERROR + return LineKind.PLAIN + + +class LogLine: + __slots__ = ("line_no", "kind", "text") + + def __init__(self, line_no: int, kind: LineKind, text: str): + self.line_no = line_no + self.kind = kind + self.text = text + + def __eq__(self, other): + if not isinstance(other, LogLine): + return NotImplemented + return (self.line_no, self.kind, self.text) == (other.line_no, other.kind, other.text) + + def __repr__(self): + return f"LogLine({self.line_no!r}, {self.kind!r}, {self.text!r})" + + +def annotate_log_lines(lines: list[str]) -> list[LogLine]: + result = [] + in_traceback = False + for i, text in enumerate(lines): + kind = classify_line(text, in_traceback) + if kind == LineKind.TRACEBACK and text.strip() == _TRACEBACK_HEADER: + in_traceback = True + elif in_traceback and kind == LineKind.ERROR: + in_traceback = False + elif in_traceback and kind == LineKind.PLAIN and not text.startswith(" ") and not text.startswith("\t") and text.strip(): + in_traceback = False + result.append(LogLine(line_no=i + 1, kind=kind, text=text)) + return result + + +def build_log_records( + step: str, + source: str, + lines: list[str], + inspect_cmd: str, +) -> list[dict]: + annotated = annotate_log_lines(lines) + records = [] + for ll in annotated: + records.append({ + "step": step, + "source": source, + "line_no": ll.line_no, + "kind": ll.kind.value, + "line": ll.text, + "inspect_cmd": inspect_cmd, + }) + return records + + +# --- Pretty rendering --- + +_KIND_LABEL = { + LineKind.ERROR: "error", + LineKind.WARNING: "warn ", + LineKind.INFO: "info ", + LineKind.TRACEBACK: "trace", + LineKind.SECTION: "-----", + LineKind.PLAIN: " ", +} + +_KIND_COLOR = { + LineKind.ERROR: RED, + LineKind.WARNING: YELLOW, + LineKind.TRACEBACK: YELLOW, + LineKind.INFO: BLUE, + LineKind.SECTION: CYAN, +} + + +def render_log_pretty( + step: str, + source: str, + lines: list[str], + inspect_cmd: str, + file=None, + color: bool = True, +) -> None: + target = file or sys.stdout + annotated = annotate_log_lines(lines) + + log_tag = style("[log]", BOLD, color) + source_label = f" {style('source:', DIM, color)}" if color else " source:" + target.write(f"{log_tag} step={step}\n") + target.write(f"{source_label} {source}\n") + + for ll in annotated: + label = _KIND_LABEL[ll.kind] + if color and ll.kind in _KIND_COLOR: + code = _KIND_COLOR[ll.kind] + if ll.kind == LineKind.ERROR: + target.write(f" {code}{label} {ll.text}{RESET}\n") + else: + target.write(f" {code}{label}{RESET} {ll.text}\n") + else: + target.write(f" {label} {ll.text}\n") + + inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" + target.write(f"{inspect_label} {inspect_cmd}\n") + + +def _render_plain_record(rec, target): + parts = [] + for key in ("step", "source", "line_no", "kind", "line", "inspect_cmd"): + parts.append(f"{key}={_plain_value(rec.get(key, ''))}") + target.write(" ".join(parts) + "\n") + + +def render_log_plain( + step: str, + source: str, + lines: list[str], + inspect_cmd: str, + file=None, +) -> None: + target = file or sys.stdout + records = build_log_records(step, source, lines, inspect_cmd) + for rec in records: + _render_plain_record(rec, target) + + +def render_log_records_plain(records, file=None) -> None: + target = file or sys.stdout + for rec in records: + _render_plain_record(rec, target) + + +def tail_lines_for_log(path: str, max_lines: int = 10) -> list[str]: + """Return up to max_lines non-empty sanitized lines from the end of a log file.""" + try: + with open(path, errors="replace") as f: + raw = f.read().splitlines() + except OSError: + return [] + + from chipcompiler.cli.progress import sanitize_log_line + sanitized = [sanitize_log_line(line) for line in raw] + non_empty = [line for line in sanitized if line] + return non_empty[-max_lines:] + + +def render_log_listing_pretty( + records: list[dict], + file=None, + color: bool = True, + tail_map: dict | None = None, +) -> None: + target = file or sys.stdout + + log_tag = style("[logs]", BOLD, color) + target.write(f"{log_tag}\n") + + for rec in records: + step = rec.get("step", "") + source = rec.get("source") or rec.get("log", "") + inspect = rec.get("inspect_cmd") or rec.get("inspect", "") + + if step: + label = f" {style(step, CYAN, color)}" if color else f" {step}" + else: + label = f" {style('run', CYAN, color)}" if color else " run" + + target.write(f"{label} {source}\n") + + if tail_map and source in tail_map: + tail_lines = tail_map[source] + if tail_lines: + target.write(f" {style('tail:', DIM, color)}\n" if color else " tail:\n") + for tl in tail_lines: + target.write(f" {tl}\n") + + inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" + target.write(f"{inspect_label} {inspect}\n") + + +# --- Context extraction --- + + +def extract_error_context(lines: list[str], max_lines: int = 50) -> list: + """Extract at most max_lines log lines around the failure anchor. + + Anchor priority: last error > last traceback > last \"failed\" > last non-empty. + """ + if not lines: + return [] + + annotated = annotate_log_lines(lines) + total = len(annotated) + + anchor_idx = _find_context_anchor(annotated) + + if total <= max_lines: + return annotated + + half = max_lines // 2 + start = max(0, anchor_idx - half) + end = min(total, start + max_lines) + if end - start < max_lines: + start = max(0, end - max_lines) + + return annotated[start:end] + + +def _find_context_anchor(annotated): + # Priority 1: last error line + for i in range(len(annotated) - 1, -1, -1): + if annotated[i].kind == LineKind.ERROR: + return i + + # Priority 2: last traceback line + for i in range(len(annotated) - 1, -1, -1): + if annotated[i].kind == LineKind.TRACEBACK: + return i + + # Priority 3: last line containing "failed" + for i in range(len(annotated) - 1, -1, -1): + if "failed" in annotated[i].text.lower(): + return i + + # Priority 4: last non-empty line + for i in range(len(annotated) - 1, -1, -1): + if annotated[i].text.strip(): + return i + + return len(annotated) - 1 diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index be3248bd..c1d4f1c3 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -1,75 +1,326 @@ -#!/usr/bin/env python - import argparse import os import sys from collections.abc import Sequence -from chipcompiler.data import create_workspace, get_parameters -from chipcompiler.engine import EngineFlow -from chipcompiler.rtl2gds import build_rtl2gds_flow -from chipcompiler.utility.filelist import parse_filelist, validate_filelist - -FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} -RTL_SUFFIXES = {".v", ".sv", ".svh", ".vh"} +from chipcompiler.cli.commands import build_context, dispatch +from chipcompiler.cli.render import render_result +from chipcompiler.cli.types import OutputMode def build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( - prog="cli", - description="Create ChipCompiler workspace and run RTL2GDS flow", - ) - parser.add_argument("--workspace", required=True, help="Workspace directory path") - parser.add_argument("--rtl", required=True, help="RTL file or filelist path") - parser.add_argument("--design", required=True, help="Design name") - parser.add_argument("--top", required=True, help="Top module name") - parser.add_argument("--clock", required=True, help="Clock port name") - parser.add_argument("--pdk-root", required=True, help="ICS55 PDK root directory") - parser.add_argument( - "--freq", - type=float, - default=100.0, - help="Clock frequency in MHz (default: 100)", + prog="ecc", + description="ECC - EDA toolchain for RTL-to-GDS flows", ) + subparsers = parser.add_subparsers(dest="command") + + # ecc init + init_parser = subparsers.add_parser("init", help="Create a new project skeleton") + init_parser.add_argument("name", help="Project name") + init_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + + # ecc check + check_parser = subparsers.add_parser("check", help="Validate project configuration") + _add_project_arg(check_parser) + check_parser.add_argument("--json", action="store_true", help="JSON output") + check_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + + # ecc run + run_parser = subparsers.add_parser("run", help="Execute the complete flow") + _add_project_arg(run_parser) + run_parser.add_argument("--overwrite", action="store_true", + help="Remove existing runs/default before running") + run_parser.add_argument("--json", action="store_true", help="JSON output") + run_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + + # ecc status + status_parser = subparsers.add_parser("status", help="Show run and step status") + _add_project_arg(status_parser) + status_parser.add_argument("--json", action="store_true", help="JSON output") + status_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + status_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + status_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc log + log_parser = subparsers.add_parser("log", help="Inspect step logs") + _add_project_arg(log_parser) + log_parser.add_argument("step", nargs="?", default=None, help="Step name") + log_parser.add_argument("--errors", action="store_true", + help=argparse.SUPPRESS) + log_parser.add_argument("--json", action="store_true", help="JSON output") + log_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + log_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + log_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc metrics + metrics_parser = subparsers.add_parser("metrics", help="Show step metrics") + _add_project_arg(metrics_parser) + metrics_parser.add_argument("step", nargs="?", default=None, help="Step name") + metrics_parser.add_argument("--json", action="store_true", help="JSON output") + metrics_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + metrics_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + metrics_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc artifacts + artifacts_parser = subparsers.add_parser("artifacts", help="List generated files") + _add_project_arg(artifacts_parser) + artifacts_parser.add_argument("step", nargs="?", default=None, help="Step name") + artifacts_parser.add_argument("--json", action="store_true", help="JSON output") + artifacts_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + artifacts_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + artifacts_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc config + config_parser = subparsers.add_parser("config", help="Show configuration") + _add_project_arg(config_parser) + config_parser.add_argument("step", nargs="?", default=None, help="Step name") + config_parser.add_argument("--resolved", action="store_true", required=True, + help="Show resolved configuration") + config_parser.add_argument("--json", action="store_true", help="JSON output") + config_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + config_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + config_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc diagnose + diagnose_parser = subparsers.add_parser("diagnose", help="Show run diagnostics") + _add_project_arg(diagnose_parser) + diagnose_parser.add_argument("step", nargs="?", default=None, help="Step name") + diagnose_parser.add_argument("--json", action="store_true", help="JSON output") + diagnose_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + diagnose_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + diagnose_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc param + param_parser = subparsers.add_parser("param", help="Manage EDA parameters") + param_sub = param_parser.add_subparsers(dest="param_command") + + def _add_param_flags(p): + _add_project_arg(p) + p.add_argument("--json", action="store_true", help="JSON output") + p.add_argument("--jsonl", action="store_true", help="JSONL output") + p.add_argument("--plain", action="store_true", help="Plain key-value output") + + # ecc param list + param_list = param_sub.add_parser("list", help="List all parameters") + _add_param_flags(param_list) + + # ecc param show + param_show = param_sub.add_parser("show", help="Show parameter details") + _add_param_flags(param_show) + param_show.add_argument("key", help="Parameter key (e.g. place.target_density)") + + # ecc param set + param_set = param_sub.add_parser("set", help="Set a persistent parameter override") + _add_param_flags(param_set) + param_set.add_argument("key", help="Parameter key") + param_set.add_argument("value", help="Parameter value") + + # ecc param unset + param_unset = param_sub.add_parser("unset", help="Remove a persistent override") + _add_param_flags(param_unset) + param_unset.add_argument("key", help="Parameter key") + + # ecc param diff + param_diff = param_sub.add_parser("diff", help="Show overrides that differ from defaults") + _add_param_flags(param_diff) + + # ecc run --set + run_parser.add_argument("--set", action="append", default=[], dest="param_set", + help="Set parameter override (repeatable, e.g. --set place.target_density=0.65)") + run_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + return parser -def resolve_rtl_input(rtl_path: str) -> tuple[str, str, str]: - normalized_path = os.path.abspath(os.path.expanduser(rtl_path)) - suffix = os.path.splitext(normalized_path)[1].lower() +def _add_project_arg(parser: argparse.ArgumentParser) -> None: + parser.add_argument("--project", default=None, + help="Project directory (default: current directory)") - if suffix in FILELIST_SUFFIXES: - return ("filelist", "", normalized_path) - if suffix in RTL_SUFFIXES: - return ("rtl", normalized_path, "") +def _render_param_text(args, result, color=True) -> None: + from chipcompiler.cli.param_handler import ( + render_param_diff_text, + render_param_list_text, + render_param_set_text, + render_param_show_text, + ) + from chipcompiler.cli.pretty import render_error + + if result.exit_code != 0: + render_error(result.records, color=color) + return + + renderers = { + "list": render_param_list_text, + "show": render_param_show_text, + "set": render_param_set_text, + "unset": render_param_set_text, + "diff": render_param_diff_text, + } + subcmd = getattr(args, "param_command", None) + renderer = renderers.get(subcmd) + if renderer: + renderer(result.records) + else: + render_result(result, OutputMode.PLAIN) + + +def _should_colorize(): + from chipcompiler.cli.pretty import supports_color + return supports_color(file=sys.stdout) + + +def _render_log_text(args, result, color=True, run_dir=None) -> None: + from chipcompiler.cli.log_view import ( + render_log_listing_pretty, + render_log_pretty, + tail_lines_for_log, + ) + from chipcompiler.cli.pretty import render_error, render_generic_block + + if getattr(args, "errors", False): + print("warning: --errors is deprecated and no longer filters output", file=sys.stderr) + + if result.exit_code != 0: + render_error(result.records, color=color) + return + + records = result.records + if not records: + return + + first = records[0] + + # Status/sentinel records (no_logs, empty, etc.) + if "log_status" in first or "status" in first: + render_generic_block(records, color=color, tag="log") + return + + # Step mode: records have line_no and kind + if "line_no" in first: + inspect_cmd = first.get("inspect_cmd", "") + current_source = None + current_lines = [] + current_step = first["step"] + for rec in records: + src = rec["source"] + if src != current_source: + if current_source is not None: + render_log_pretty( + current_step, current_source, current_lines, + inspect_cmd, color=color, + ) + current_source = src + current_lines = [] + current_lines.append(rec["line"]) + if current_source is not None: + render_log_pretty( + current_step, current_source, current_lines, + inspect_cmd, color=color, + ) + return + + # Listing mode: compute tail previews only for pretty text output + tail_map = None + if run_dir: + tail_map = {} + for rec in records: + source = rec.get("source") or rec.get("log", "") + if not source: + continue + full_path = os.path.join(run_dir, source) + lines = tail_lines_for_log(full_path) + if lines: + tail_map[source] = lines + + render_log_listing_pretty(list(records), color=color, tail_map=tail_map) + + +def _render_log_plain(result) -> None: + from chipcompiler.cli.log_view import render_log_records_plain + + records = result.records + if not records: + return + + if "line_no" in records[0]: + render_log_records_plain(records) + return + + render_result(result, OutputMode.PLAIN) - try: - parse_filelist(normalized_path) - _, missing_files = validate_filelist(normalized_path) - if len(missing_files) == 0: - return ("filelist", "", normalized_path) - except Exception: - pass - return ("rtl", normalized_path, "") +def run(argv: Sequence[str] | None = None) -> int: + raw = list(argv) if argv is not None else sys.argv[1:] + if _is_legacy_args(raw): + return _run_legacy(raw) + + parser = build_parser() + args = parser.parse_args(raw) + + if args.command is None: + parser.print_help() + return 1 + + ctx = build_context(args) + result = dispatch(args, ctx) + + color = _should_colorize() + + if args.command == "param" and ctx.output_mode == OutputMode.TEXT: + _render_param_text(args, result, color=color) + elif args.command == "log" and ctx.output_mode == OutputMode.TEXT: + _render_log_text(args, result, color=color, run_dir=ctx.run_dir) + elif args.command == "log" and ctx.output_mode == OutputMode.PLAIN: + _render_log_plain(result) + else: + render_result(result, ctx.output_mode, command=args.command, color=color) + + return result.exit_code + + +_LEGACY_FLAGS = {"--workspace", "--rtl", "--design", "--top", "--clock", "--pdk-root", "--freq"} + + +def _is_legacy_args(args: list[str]) -> bool: + for a in args: + if a in _LEGACY_FLAGS: + return True + if "=" in a: + flag = a.split("=", 1)[0] + if flag in _LEGACY_FLAGS: + return True + return False -def build_parameters(args: argparse.Namespace) -> dict: - parameters = get_parameters("ics55") - parameters.data.update( - { - "PDK": "ics55", - "Design": args.design, - "Top module": args.top, - "Clock": args.clock, - "Frequency max [MHz]": args.freq, - } - ) - return parameters.data +def _resolve_rtl_input(rtl_path: str) -> tuple[str, str]: + from chipcompiler.utility.filelist import parse_filelist, validate_filelist -def _validate_args(args: argparse.Namespace) -> str | None: + normalized = os.path.abspath(os.path.expanduser(rtl_path)) + suffix = os.path.splitext(normalized)[1].lower() + if suffix in {".f", ".fl", ".filelist"}: + return ("", normalized) + if suffix in {".v", ".sv", ".svh", ".vh"}: + return (normalized, "") + try: + parse_filelist(normalized) + _, missing = validate_filelist(normalized) + if not missing: + return ("", normalized) + except Exception: + pass + return (normalized, "") + + +def _validate_legacy_args(args) -> str | None: if not str(args.workspace).strip(): return "--workspace must not be empty" if not str(args.design).strip(): @@ -78,38 +329,58 @@ def _validate_args(args: argparse.Namespace) -> str | None: return "--top must not be empty" if not str(args.clock).strip(): return "--clock must not be empty" - rtl_path = os.path.abspath(os.path.expanduser(args.rtl)) if not os.path.exists(rtl_path): return f"--rtl path does not exist: {rtl_path}" if not os.path.isfile(rtl_path): return f"--rtl must point to a file: {rtl_path}" - pdk_root = os.path.abspath(os.path.expanduser(args.pdk_root)) if not os.path.exists(pdk_root): return f"--pdk-root path does not exist: {pdk_root}" if not os.path.isdir(pdk_root): return f"--pdk-root must point to a directory: {pdk_root}" - if args.freq <= 0: return "--freq must be greater than 0" - return None -def run(argv: Sequence[str] | None = None) -> int: - parser = build_parser() - args = parser.parse_args(list(argv) if argv is not None else None) +def _run_legacy(argv: list[str]) -> int: + import argparse as _argparse + + from chipcompiler.data import create_workspace, get_parameters + from chipcompiler.engine import EngineFlow + from chipcompiler.rtl2gds import build_rtl2gds_flow - validation_error = _validate_args(args) - if validation_error: - print(f"Error: {validation_error}", file=sys.stderr) + parser = _argparse.ArgumentParser( + prog="cli", + description="Legacy parameter-only invocation (use 'ecc run' for project-based flows)", + ) + parser.add_argument("--workspace", required=True) + parser.add_argument("--rtl", required=True) + parser.add_argument("--design", required=True) + parser.add_argument("--top", required=True) + parser.add_argument("--clock", required=True) + parser.add_argument("--pdk-root", required=True) + parser.add_argument("--freq", type=float, default=100.0) + args = parser.parse_args(argv) + + err = _validate_legacy_args(args) + if err: + print(f"Error: {err}", file=sys.stderr) return 1 - try: - _, origin_verilog, input_filelist = resolve_rtl_input(args.rtl) - parameters = build_parameters(args) + parameters = get_parameters("ics55") + parameters.data.update({ + "PDK": "ics55", + "Design": args.design, + "Top module": args.top, + "Clock": args.clock, + "Frequency max [MHz]": args.freq, + }) + origin_verilog, input_filelist = _resolve_rtl_input(args.rtl) + + try: workspace = create_workspace( directory=args.workspace, origin_def="", @@ -119,26 +390,27 @@ def run(argv: Sequence[str] | None = None) -> int: input_filelist=input_filelist, pdk_root=args.pdk_root, ) - if workspace is None: - print("Error: failed to create workspace", file=sys.stderr) - return 1 + except Exception as exc: + print(f"Error: {exc}", file=sys.stderr) + return 1 - engine_flow = EngineFlow(workspace=workspace) - if not engine_flow.has_init(): - for step, tool, state in build_rtl2gds_flow(): - engine_flow.add_step(step=step, tool=tool, state=state) + if workspace is None: + print("Error: failed to create workspace", file=sys.stderr) + return 1 - engine_flow.create_step_workspaces() + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) - if not engine_flow.run_steps(): - print("Error: flow execution failed", file=sys.stderr) - return 1 + engine_flow.create_step_workspaces() - return 0 - except Exception as exc: - print(f"Error: {exc}", file=sys.stderr) + if not engine_flow.run_steps(): + print("Error: flow execution failed", file=sys.stderr) return 1 + return 0 + def main() -> None: sys.exit(run()) diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py new file mode 100644 index 00000000..78eb36e9 --- /dev/null +++ b/chipcompiler/cli/output.py @@ -0,0 +1,84 @@ +import re +import shlex + + +def disclosure_cmd(command: str, project: str | None = None, + run_id: str | None = None) -> str: + parts = [command] + if project: + parts.append(f"--project {shlex.quote(project)}") + if run_id: + parts.append(f"--run-id {shlex.quote(run_id)}") + return " ".join(parts) + + +def normalize_step_name(internal: str) -> str: + mapping = { + "Synthesis": "synthesis", + "Floorplan": "floorplan", + "fixFanout": "fixfanout", + "place": "placement", + "CTS": "cts", + "legalization": "legalization", + "route": "routing", + "drc": "drc", + "filler": "filler", + } + return mapping.get(internal, internal.lower()) + + +def normalize_state(internal: str) -> str: + mapping = { + "Success": "success", + "Incomplete": "incomplete", + "Unstart": "unstart", + "Ongoing": "ongoing", + "Pending": "pending", + "Invalid": "invalid", + } + return mapping.get(internal, internal.lower()) + + +def normalize_metric_key(raw_key: str) -> str: + known = { + "Cell number": "cell_number", + "Cell area": "cell_area", + "Wire number": "wire_number", + "Port number": "port_number", + "Frequency [MHz]": "frequency_mhz", + "Die area [μm^2]": "die_area_um2", + "Die width [um]": "die_width_um", + "Die height [um]": "die_height_um", + "Die util": "die_util", + "Core util": "core_util", + "Total io pins": "total_io_pins", + "Total instances": "total_instances", + "Total nets": "total_nets", + "max_WNS": "max_wns", + "max_TNS": "max_tns", + "min_WNS": "min_wns", + "min_TNS": "min_tns", + "GP HPWL": "gp_hpwl", + "DP HPWL": "dp_hpwl", + "overflow": "overflow", + "overflow_number": "overflow_number", + "bin_number": "bin_number", + "buffer_num": "buffer_num", + "buffer_area": "buffer_area", + "clock_path_max_buffer": "clock_path_max_buffer", + "clock_path_min_buffer": "clock_path_min_buffer", + "total_clock_wirelength": "total_clock_wirelength", + "wire_len": "wire_len", + "num_via": "num_via", + "total_movement": "total_movement", + "drc_num": "drc_num", + "Max fanout": "max_fanout", + "Tool": "tool", + } + if raw_key in known: + return known[raw_key] + s = raw_key.lower() + s = re.sub(r'[\s\[\]μ^]+', '_', s) + s = re.sub(r'_+', '_', s) + s = s.strip('_') + return s diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py new file mode 100644 index 00000000..ca5edbd3 --- /dev/null +++ b/chipcompiler/cli/param_handler.py @@ -0,0 +1,475 @@ +from __future__ import annotations + +import os +import re +import sys + +from chipcompiler.cli.output import disclosure_cmd +from chipcompiler.cli.params import ( + ResolvedParam, + build_backend_overrides, + is_known_key, + list_groups, + list_schemas, + lookup_schema, + parse_cli_overrides, + parse_value, + resolve_parameters, + validate_value, +) +from chipcompiler.cli.records import error_record +from chipcompiler.cli.types import CommandContext, CommandResult, OutputMode + + +def param_list(args, ctx: CommandContext) -> CommandResult: + toml_overrides, param_errors = _load_toml_overrides(ctx.project_dir) + if param_errors: + return CommandResult.err([error_record("invalid_param_config", reason=e) for e in param_errors]) + resolved, _ = resolve_parameters(toml_overrides=toml_overrides) + project = ctx.project + + records = [] + for rp in resolved: + s = rp.schema + record = { + "param": s.param, + "group": s.group, + "name": s.name, + "value": rp.value, + "default": s.default, + "source": rp.source, + "type": s.type, + "applies": s.applies, + "maps_to": _maps_to_str(s.maps_to), + "description": s.description, + "inspect": disclosure_cmd(f"ecc param show {s.param}", project), + } + if s.range is not None: + record["range"] = f"[{s.range[0]}, {s.range[1]}]" + if s.choices is not None: + record["choices"] = ", ".join(s.choices) + if s.unit is not None: + record["unit"] = s.unit + records.append(record) + + return CommandResult.ok(records) + + +def param_show(args, ctx: CommandContext) -> CommandResult: + key = args.key + schema = lookup_schema(key) + if schema is None: + return CommandResult.err([error_record( + "unknown_parameter", + param=key, + )], exit_code=1) + + toml_overrides, param_errors = _load_toml_overrides(ctx.project_dir) + if param_errors: + return CommandResult.err([error_record("invalid_param_config", reason=e) for e in param_errors]) + resolved, _ = resolve_parameters(toml_overrides=toml_overrides) + rp = next(r for r in resolved if r.param == key) + + record = { + "param": rp.param, + "value": rp.value, + "default": rp.default, + "source": rp.source, + "type": schema.type, + "applies": schema.applies, + "maps_to": _maps_to_str(schema.maps_to), + "description": schema.description, + "inspect": disclosure_cmd(f"ecc param show {rp.param}", ctx.project), + "set": disclosure_cmd(f"ecc param set {rp.param}", ctx.project), + "run": disclosure_cmd(f"ecc run --set {rp.param}=", ctx.project), + } + if schema.range is not None: + record["range"] = f"[{schema.range[0]}, {schema.range[1]}]" + if schema.choices is not None: + record["choices"] = ", ".join(schema.choices) + if schema.unit is not None: + record["unit"] = schema.unit + + return CommandResult.ok([record]) + + +def param_set(args, ctx: CommandContext) -> CommandResult: + key = args.key + raw_value = args.value + + schema = lookup_schema(key) + if schema is None: + return CommandResult.err([error_record( + "unknown_parameter", + param=key, + )], exit_code=1) + + try: + value = parse_value(raw_value, schema) + except ValueError as exc: + return CommandResult.err([error_record( + "invalid_value", + param=key, + reason=str(exc), + )], exit_code=1) + + val_errors = validate_value(value, schema) + if val_errors: + return CommandResult.err([error_record( + "invalid_value", + param=key, + reason=val_errors[0], + )], exit_code=1) + + config_path = _find_config_path(ctx.project_dir) + if config_path is None: + return CommandResult.err([error_record( + "missing_config", + )], exit_code=1) + + _write_param_to_toml(config_path, key, value) + + return CommandResult.ok([{ + "param": key, + "value": value, + "status": "set", + "source": "ecc.toml", + }]) + + +def param_unset(args, ctx: CommandContext) -> CommandResult: + key = args.key + + schema = lookup_schema(key) + if schema is None: + return CommandResult.err([error_record( + "unknown_parameter", + param=key, + )], exit_code=1) + + config_path = _find_config_path(ctx.project_dir) + if config_path is None: + return CommandResult.ok([{ + "param": key, + "status": "no_override", + "source": "default", + }]) + + removed = _remove_param_from_toml(config_path, key) + + if removed: + return CommandResult.ok([{ + "param": key, + "status": "unset", + "value": schema.default, + "source": "default", + }]) + return CommandResult.ok([{ + "param": key, + "status": "no_override", + "source": "default", + }]) + + +def param_diff(args, ctx: CommandContext) -> CommandResult: + toml_overrides, param_errors = _load_toml_overrides(ctx.project_dir) + if param_errors: + return CommandResult.err([error_record("invalid_param_config", reason=e) for e in param_errors]) + resolved, _ = resolve_parameters(toml_overrides=toml_overrides) + + records = [] + for rp in resolved: + if rp.value != rp.default: + records.append({ + "param": rp.param, + "value": rp.value, + "default": rp.default, + "source": rp.source, + }) + + if not records: + return CommandResult.ok([{"diff_status": "clean"}]) + + return CommandResult.ok(records) + + +# --------------------------------------------------------------------------- +# Pretty rendering for param commands +# --------------------------------------------------------------------------- + +def render_param_result(result, mode: OutputMode, file=None) -> bool: + """Render param-specific output. Returns True if handled, False otherwise.""" + target = file or sys.stdout + + if mode == OutputMode.JSON: + from chipcompiler.cli.render import render_json + render_json(result, file=target) + return True + if mode == OutputMode.JSONL: + from chipcompiler.cli.render import render_jsonl + render_jsonl(result, file=target) + return True + if mode == OutputMode.PLAIN: + from chipcompiler.cli.render import render_plain + render_plain(result.records, file=target) + return True + + return False + + +def render_param_list_text(records, file=None): + target = file or sys.stdout + groups: dict[str, list] = {} + for r in records: + g = r.get("group", "") + groups.setdefault(g, []).append(r) + + for group_name, group_records in groups.items(): + print(f" {group_name}", file=target) + for r in group_records: + val = r.get("value") + src = r.get("source", "default") + line = f" {r['param']:30s} {val}" + if src != "default": + line += f" ({src})" + print(line, file=target) + + +def render_param_show_text(records, file=None): + target = file or sys.stdout + r = records[0] + + print(f" {r['param']}", file=target) + for field in ("value", "default", "source", "type", "applies", + "maps_to", "description", "range", "choices", "unit", + "inspect", "set", "run"): + val = r.get(field) + if val is not None: + label = field.replace("_", " ") + print(f" {label:14s} {val}", file=target) + + +def render_param_set_text(records, file=None): + target = file or sys.stdout + r = records[0] + status = r.get("status", "") + if status == "set": + print(f" set {r['param']} = {r['value']} (ecc.toml)", file=target) + elif status == "no_override": + print(f" {r['param']}: no override to remove", file=target) + elif status == "unset": + print(f" unset {r['param']} (now default: {r['value']})", file=target) + else: + from chipcompiler.cli.render import render_text + render_text(records, file=target) + + +def render_param_diff_text(records, file=None): + target = file or sys.stdout + if len(records) == 1 and records[0].get("diff_status") == "clean": + print(" No overrides.", file=target) + return + for r in records: + print(f" {r['param']:30s} {r['value']} (was {r['default']}, {r['source']})", file=target) + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + +def _maps_to_str(maps_to): + if isinstance(maps_to, str): + return maps_to + parts = [f"{k}.{v}" for k, v in maps_to.items()] + return ", ".join(parts) + + +def _find_config_path(project_dir: str) -> str | None: + path = os.path.join(project_dir, "ecc.toml") + return path if os.path.isfile(path) else None + + +def _load_toml_overrides(project_dir: str) -> tuple[dict[str, object], list[str]]: + from chipcompiler.cli.config import load_project_config + + config_path = _find_config_path(project_dir) + if config_path is None: + return {}, [] + + cfg = load_project_config(config_path) + errors = list(getattr(cfg, "_param_errors", [])) + toml_error = getattr(cfg, "_toml_error", None) + if toml_error: + errors.insert(0, f"malformed ecc.toml: {toml_error}") + overrides = dict(cfg.params_overrides) + if "design.frequency_mhz" not in overrides and cfg.design_frequency_mhz > 0: + overrides["design.frequency_mhz"] = cfg.design_frequency_mhz + return overrides, errors + + +def _write_param_to_toml(config_path: str, key: str, value: object) -> None: + group, _, name = key.partition(".") + + with open(config_path, "r") as f: + original = f.read() + + new_text = _apply_scoped_param_edit(original, group, name, value) + + with open(config_path, "w") as f: + f.write(new_text) + + +def _remove_param_from_toml(config_path: str, key: str) -> bool: + group, _, name = key.partition(".") + + with open(config_path, "r") as f: + original = f.read() + + result = _remove_scoped_param_key(original, group, name) + if result is None: + return False + + with open(config_path, "w") as f: + f.write(result) + return True + + +_TABLE_HEADER_RE = re.compile(r"^[ \t]*\[([^\]]+)\][ \t]*(?:#.*)?$", re.MULTILINE) + + +def _find_table_span(text: str, table_name: str) -> tuple[int, int] | None: + """Return (body_start, body_end) for a TOML table, or None.""" + for m in _TABLE_HEADER_RE.finditer(text): + if m.group(1).strip() == table_name: + header_end = m.end() + nl = text.find("\n", header_end) + if nl == -1: + body_start = len(text) + else: + body_start = nl + 1 + next_header = _TABLE_HEADER_RE.search(text, body_start) + body_end = next_header.start() if next_header else len(text) + return body_start, body_end + return None + + +def _extend_multiline_value(text: str, match_end: int) -> int: + """Extend match end past continuation lines for multiline TOML values. + + After matching `key = ...` on one line, consume subsequent lines if the + value has unclosed brackets (arrays or inline tables). + """ + line_start = text.rfind("\n", 0, match_end) + 1 + matched_line = text[line_start:match_end] + + depth = 0 + eq_pos = matched_line.find("=") + if eq_pos >= 0: + for ch in matched_line[eq_pos + 1:]: + if ch in ("[", "{"): + depth += 1 + elif ch in ("]", "}"): + depth -= 1 + + if depth <= 0: + return match_end + + pos = match_end + while pos < len(text) and depth > 0: + ch = text[pos] + if ch in ("[", "{"): + depth += 1 + elif ch in ("]", "}"): + depth -= 1 + pos += 1 + + while pos < len(text) and text[pos] in (" ", "\t"): + pos += 1 + if pos < len(text) and text[pos] == "\n": + pos += 1 + + return pos + + +def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> str: + value_str = _format_toml_value(value) + target_table = f"params.{group}" + + span = _find_table_span(text, target_table) + if span is None: + params_span = _find_table_span(text, "params") + if params_span is None: + return text.rstrip() + f"\n\n[{target_table}]\n{name} = {value_str}\n" + body_start, body_end = params_span + insert = f"\n\n[{target_table}]\n{name} = {value_str}" + next_header = _TABLE_HEADER_RE.search(text, body_start) + if next_header: + pos = next_header.start() + return text[:pos] + insert + "\n" + text[pos:] + return text + insert + "\n" + + body_start, body_end = span + section_body = text[body_start:body_end] + key_pattern = re.compile(rf"^(\s*){re.escape(name)}\s*=[^\n]*$", re.MULTILINE) + key_match = key_pattern.search(section_body) + + if key_match: + indent = key_match.group(1) + end = _extend_multiline_value(section_body, key_match.end()) + new_line = f"{indent}{name} = {value_str}" + if end > key_match.end(): + new_line += "\n" + new_body = section_body[:key_match.start()] + new_line + section_body[end:] + return text[:body_start] + new_body + text[body_end:] + else: + insert = f"{name} = {value_str}\n" + return text[:body_start] + insert + text[body_start:] + + +def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: + target_table = f"params.{group}" + + span = _find_table_span(text, target_table) + if span is None: + return None + + body_start, body_end = span + section_body = text[body_start:body_end] + key_pattern = re.compile(rf"^\s*{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) + key_match = key_pattern.search(section_body) + if not key_match: + return None + + end = _extend_multiline_value(section_body, key_match.end()) + # Consume trailing newline after multiline value + if section_body[end:end + 1] == "\n": + end += 1 + new_body = section_body[:key_match.start()] + section_body[end:] + remaining_keys = [l for l in new_body.strip().split("\n") if l.strip()] + if not remaining_keys: + header_match = None + for m in _TABLE_HEADER_RE.finditer(text): + if m.group(1).strip() == target_table: + header_match = m + break + if header_match is None: + return None + header_start = header_match.start() + result = text[:header_start].rstrip("\n") + "\n" + text[body_end:].lstrip("\n") + return result if result.strip() else None + else: + return text[:body_start] + new_body + text[body_end:] + + +def _format_toml_value(val: object) -> str: + if isinstance(val, bool): + return "true" if val else "false" + if isinstance(val, (int, float)): + return str(val) + if isinstance(val, str): + escaped = val.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + if isinstance(val, (list, tuple)): + items = ", ".join(_format_toml_value(v) for v in val) + return f"[{items}]" + return str(val) diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py new file mode 100644 index 00000000..b044c809 --- /dev/null +++ b/chipcompiler/cli/params.py @@ -0,0 +1,469 @@ +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class ParamSchema: + param: str + group: str + name: str + type: str + default: object + applies: str + maps_to: str | dict + description: str + range: tuple[float, float] | None = None + choices: tuple[str, ...] | None = None + unit: str | None = None + example: str | None = None + + +PARAM_REGISTRY: tuple[ParamSchema, ...] = ( + ParamSchema( + param="design.frequency_mhz", + group="design", + name="frequency_mhz", + type="float", + default=100.0, + applies="synthesis", + maps_to="Frequency max [MHz]", + description="Target clock frequency in MHz", + range=(1e-6, 10000.0), + unit="MHz", + example="200.0", + ), + ParamSchema( + param="floorplan.core_util", + group="floorplan", + name="core_util", + type="float", + default=0.4, + applies="floorplan", + maps_to={"Core": "Utilitization"}, + description="Core utilization ratio", + range=(0.01, 1.0), + example="0.45", + ), + ParamSchema( + param="floorplan.core_margin", + group="floorplan", + name="core_margin", + type="list[int]", + default=[2, 2], + applies="floorplan", + maps_to={"Core": "Margin"}, + description="Core margin in micrometers [horizontal, vertical]", + example="[2, 2]", + ), + ParamSchema( + param="floorplan.aspect_ratio", + group="floorplan", + name="aspect_ratio", + type="float", + default=1.0, + applies="floorplan", + maps_to={"Core": "Aspect ratio"}, + description="Core aspect ratio (width/height)", + range=(0.1, 10.0), + example="1.0", + ), + ParamSchema( + param="synth.max_fanout", + group="synth", + name="max_fanout", + type="int", + default=20, + applies="fixfanout", + maps_to="Max fanout", + description="Maximum fanout for netlist optimization", + range=(1, 200), + example="16", + ), + ParamSchema( + param="place.target_density", + group="place", + name="target_density", + type="float", + default=0.2, + applies="placement", + maps_to={"DreamPlace": "target_density"}, + description="Target placement density", + range=(0.1, 0.95), + example="0.65", + ), + ParamSchema( + param="place.target_overflow", + group="place", + name="target_overflow", + type="float", + default=0.1, + applies="placement", + maps_to={"DreamPlace": "stop_overflow"}, + description="Target overflow for global placement", + range=(0.0, 1.0), + example="0.08", + ), + ParamSchema( + param="place.global_right_padding", + group="place", + name="global_right_padding", + type="int", + default=0, + applies="placement", + maps_to="Global right padding", + description="Global right padding for placement sites", + range=(0, 100), + example="8", + ), + ParamSchema( + param="place.cell_padding_x", + group="place", + name="cell_padding_x", + type="int", + default=600, + applies="placement", + maps_to={"DreamPlace": "cell_padding_x"}, + description="Cell padding in x-direction in database units", + range=(0, 10000), + example="400", + ), + ParamSchema( + param="place.routability_opt", + group="place", + name="routability_opt", + type="int", + default=1, + applies="placement", + maps_to={"DreamPlace": "routability_opt_flag"}, + description="Enable routability-driven placement optimization", + choices=("0", "1"), + example="1", + ), + ParamSchema( + param="route.bottom_layer", + group="route", + name="bottom_layer", + type="str", + default="MET2", + applies="routing", + maps_to="Bottom layer", + description="Bottom routing layer", + choices=("MET1", "MET2", "MET3", "MET4", "MET5"), + example="MET2", + ), + ParamSchema( + param="route.top_layer", + group="route", + name="top_layer", + type="str", + default="MET5", + applies="routing", + maps_to="Top layer", + description="Top routing layer", + choices=("MET2", "MET3", "MET4", "MET5", "MET6"), + example="MET5", + ), +) + +_REGISTRY_INDEX: dict[str, ParamSchema] = {s.param: s for s in PARAM_REGISTRY} +_REQUIRED_FIELDS = ("param", "group", "name", "type", "default", "applies", "maps_to", "description") + + +def lookup_schema(key: str) -> ParamSchema | None: + return _REGISTRY_INDEX.get(key) + + +def list_schemas() -> tuple[ParamSchema, ...]: + return PARAM_REGISTRY + + +def list_groups() -> list[str]: + seen: list[str] = [] + for s in PARAM_REGISTRY: + if s.group not in seen: + seen.append(s.group) + return seen + + +def is_known_key(key: str) -> bool: + return key in _REGISTRY_INDEX + + +def validate_schema_record(schema: ParamSchema) -> list[str]: + return [f"missing required field: {f}" for f in _REQUIRED_FIELDS if not getattr(schema, f, None)] + + +# --------------------------------------------------------------------------- +# Value parsing +# --------------------------------------------------------------------------- + +def parse_value(raw: str, schema: ParamSchema) -> object: + ptype = schema.type + + if ptype == "int": + try: + return int(raw) + except ValueError: + raise ValueError(f"expected int for {schema.param}, got '{raw}'") + + if ptype == "float": + try: + return float(raw) + except ValueError: + raise ValueError(f"expected float for {schema.param}, got '{raw}'") + + if ptype == "bool": + low = raw.lower() + if low in ("true", "1", "yes"): + return True + if low in ("false", "0", "no"): + return False + raise ValueError(f"expected bool for {schema.param}, got '{raw}'") + + if ptype == "str": + return raw + + if ptype in ("list[int]", "list[float]", "list[str]"): + stripped = raw.strip("[]() ") + if not stripped: + return [] + parts = [p.strip() for p in stripped.split(",")] + if ptype == "list[int]": + try: + return [int(p) for p in parts if p] + except ValueError: + raise ValueError(f"expected list[int] for {schema.param}, got '{raw}'") + if ptype == "list[float]": + try: + return [float(p) for p in parts if p] + except ValueError: + raise ValueError(f"expected list[float] for {schema.param}, got '{raw}'") + return [p for p in parts if p] + + raise ValueError(f"unsupported type '{ptype}' for {schema.param}") + + +def validate_value(value: object, schema: ParamSchema) -> list[str]: + errors: list[str] = [] + + if schema.range is not None: + lo, hi = schema.range + if isinstance(value, (int, float)): + if value < lo or value > hi: + errors.append(f"value {value} out of range [{lo}, {hi}] for {schema.param}") + + if schema.choices is not None: + str_val = str(value) + if str_val not in schema.choices: + errors.append(f"value '{str_val}' not in allowed choices {schema.choices} for {schema.param}") + + return errors + + +# --------------------------------------------------------------------------- +# Source-aware resolution +# --------------------------------------------------------------------------- + +@dataclass +class ResolvedParam: + param: str + value: object + default: object + source: str + schema: ParamSchema + + +def _validate_toml_type(value: object, schema: ParamSchema) -> tuple[object, str | None]: + ptype = schema.type + key = schema.param + + if ptype == "int": + if isinstance(value, bool) or not isinstance(value, int): + return value, f"expected int for {key}, got {type(value).__name__}" + return value, None + + if ptype == "float": + if isinstance(value, bool): + return value, f"expected float for {key}, got bool" + if isinstance(value, (int, float)): + return float(value), None + return value, f"expected float for {key}, got {type(value).__name__}" + + if ptype == "bool": + if isinstance(value, bool): + return value, None + if isinstance(value, str): + low = value.lower() + if low in ("true", "1", "yes"): + return True, None + if low in ("false", "0", "no"): + return False, None + return value, f"expected bool for {key}, got {type(value).__name__}" + + if ptype == "str": + if isinstance(value, str): + return value, None + return value, f"expected str for {key}, got {type(value).__name__}" + + if ptype == "list[int]": + if not isinstance(value, list): + return value, f"expected list for {key}, got {type(value).__name__}" + for i, v in enumerate(value): + if isinstance(v, bool) or not isinstance(v, int): + return value, f"expected list[int] for {key}, element {i} is {type(v).__name__}" + return value, None + + if ptype == "list[float]": + if not isinstance(value, list): + return value, f"expected list for {key}, got {type(value).__name__}" + for i, v in enumerate(value): + if isinstance(v, bool): + return value, f"expected list[float] for {key}, element {i} is bool" + if not isinstance(v, (int, float)): + return value, f"expected list[float] for {key}, element {i} is {type(v).__name__}" + return [float(v) for v in value], None + + if ptype == "list[str]": + if not isinstance(value, list): + return value, f"expected list for {key}, got {type(value).__name__}" + for i, v in enumerate(value): + if not isinstance(v, str): + return value, f"expected list[str] for {key}, element {i} is {type(v).__name__}" + return value, None + + return value, None + + +def resolve_parameters( + toml_overrides: dict[str, object] | None = None, + cli_overrides: dict[str, object] | None = None, +) -> tuple[list[ResolvedParam], list[str]]: + toml_overrides = toml_overrides or {} + cli_overrides = cli_overrides or {} + resolved: list[ResolvedParam] = [] + errors: list[str] = [] + + for schema in PARAM_REGISTRY: + key = schema.param + if key in cli_overrides: + value = cli_overrides[key] + val_errors = validate_value(value, schema) + if val_errors: + errors.extend(val_errors) + resolved.append(ResolvedParam( + param=key, value=value, default=schema.default, + source="cli", schema=schema, + )) + elif key in toml_overrides: + value = toml_overrides[key] + value, coerce_err = _validate_toml_type(value, schema) + if coerce_err: + errors.append(coerce_err) + val_errors = validate_value(value, schema) + if val_errors: + errors.extend(val_errors) + resolved.append(ResolvedParam( + param=key, value=value, default=schema.default, + source="ecc.toml", schema=schema, + )) + else: + resolved.append(ResolvedParam( + param=key, value=schema.default, default=schema.default, + source="default", schema=schema, + )) + + return resolved, errors + + +# --------------------------------------------------------------------------- +# Semantic-to-backend mapping +# --------------------------------------------------------------------------- + +def build_backend_overrides(resolved: list[ResolvedParam]) -> dict: + overrides: dict = {} + for rp in resolved: + if rp.value == rp.default and rp.source == "default": + continue + maps_to = rp.schema.maps_to + value = rp.value + if isinstance(maps_to, str): + overrides[maps_to] = value + elif isinstance(maps_to, dict): + for parent_key, child_key in maps_to.items(): + if parent_key not in overrides: + overrides[parent_key] = {} + overrides[parent_key][child_key] = value + return overrides + + +def parse_cli_overrides(pairs: list[str]) -> tuple[dict[str, object], list[str]]: + result: dict[str, object] = {} + errors: list[str] = [] + + for pair in pairs: + if "=" not in pair: + errors.append(f"malformed override: '{pair}' (expected key=value)") + continue + + key, _, raw_value = pair.partition("=") + key = key.strip() + raw_value = raw_value.strip() + + schema = lookup_schema(key) + if schema is None: + errors.append(f"unknown parameter: '{key}'") + continue + + try: + value = parse_value(raw_value, schema) + except ValueError as exc: + errors.append(str(exc)) + continue + + val_errors = validate_value(value, schema) + if val_errors: + errors.extend(val_errors) + continue + + result[key] = value + + return result, errors + + +def parse_toml_params(params_table: dict) -> tuple[dict[str, object], list[str]]: + flat: dict[str, object] = {} + errors: list[str] = [] + + for group_key, group_val in params_table.items(): + if not isinstance(group_val, dict): + errors.append(f"[params.{group_key}] must be a table, got {type(group_val).__name__}") + continue + + for name_key, value in group_val.items(): + param_key = f"{group_key}.{name_key}" + schema = lookup_schema(param_key) + if schema is None: + errors.append(f"unknown parameter in ecc.toml: '{param_key}'") + continue + + try: + if isinstance(value, str): + parsed = parse_value(value, schema) + else: + parsed, type_err = _validate_toml_type(value, schema) + if type_err: + errors.append(type_err) + continue + except ValueError as exc: + errors.append(str(exc)) + continue + + val_errors = validate_value(parsed, schema) + if val_errors: + errors.extend(val_errors) + continue + + flat[param_key] = parsed + + return flat, errors diff --git a/chipcompiler/cli/pretty.py b/chipcompiler/cli/pretty.py new file mode 100644 index 00000000..33a9b9c4 --- /dev/null +++ b/chipcompiler/cli/pretty.py @@ -0,0 +1,457 @@ +import os +import sys + +from chipcompiler.cli.types import OutputMode + +# --- ANSI constants --- + +BOLD = "\x1b[1m" +DIM = "\x1b[2m" +RED = "\x1b[31m" +GREEN = "\x1b[32m" +YELLOW = "\x1b[33m" +BLUE = "\x1b[34m" +CYAN = "\x1b[36m" +RESET = "\x1b[0m" + +# --- Color gating --- + + +def supports_color(file=None, env=None, mode=None): + if env is None: + env = os.environ + if mode is not None and mode != OutputMode.TEXT: + return False + target = file or sys.stdout + if not hasattr(target, "isatty") or not target.isatty(): + return False + if env.get("NO_COLOR") is not None: + return False + if env.get("TERM", "") == "dumb": + return False + return True + + +def style(text, code, enabled=True): + if not enabled: + return text + return f"{code}{text}{RESET}" + + +# --- Display key normalization --- + + +def display_key(key): + k = key[:-4] if key.endswith("_cmd") else key + return k.replace("_", " ") + + +# --- Value formatting --- + + +# --- Pretty block rendering --- + + +def render_header(tag, color=True): + return style(f"[{tag}]", BOLD, color) + + +def render_field(label, value, color=True, dim_label=False): + if dim_label: + return f" {style(label + ':', DIM, color)} {value}" + return f" {label}: {value}" + + +def render_generic_block(records, file=None, color=True, tag=None): + """Render records as a generic pretty block.""" + target = file or sys.stdout + first = records[0] if records else {} + + header_tag = tag or _infer_tag(first) + target.write(f"{render_header(header_tag, color)}\n") + + for record in records: + for key, value in record.items(): + if value is None: + continue + dk = display_key(key) + target.write(f" {dk}: {value}\n") + + target.write("\n") + + +def _infer_tag(record): + for key in ("status", "run", "project", "kind"): + if key in record: + return key + return "result" + + +# --- Status-specific color helpers --- + +_STATUS_COLORS = { + "success": GREEN, + "clean": GREEN, + "checked": GREEN, + "created": GREEN, + "pass": GREEN, + "set": GREEN, + "failed": RED, + "fail": RED, + "missing": RED, + "corrupt": RED, + "error": RED, + "unknown_step": RED, + "invalid": RED, + "warning": YELLOW, + "incomplete": YELLOW, + "ongoing": YELLOW, + "pending": YELLOW, +} + + +def status_style(status_text, color=True): + code = _STATUS_COLORS.get(status_text) + if code and color: + return style(status_text, code, True) + return status_text + + +# --- Command-specific pretty renderers --- + + +def render_init(records, file=None, color=True): + target = file or sys.stdout + r = records[0] + target.write(f"{render_header('init', color)}\n") + target.write(f" project: {r.get('project', '')}\n") + target.write(f" status: {status_style(r.get('status', ''), color)}\n") + target.write(f" path: {r.get('path', '')}\n") + _render_disclosure_fields(target, r, color) + target.write("\n") + + +def render_check(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error" or first.get("status") == "fail": + target.write(f"{render_header('check', color)}\n") + for r in records: + reason = r.get("reason", r.get("error", "")) + target.write(f" {status_style('fail', color)} {reason}\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('check', color)}\n") + r = records[0] + target.write(f" project: {r.get('project', '')}\n") + target.write(f" status: {status_style(r.get('status', ''), color)}\n") + target.write(f" config: {r.get('config', '')}\n") + _render_disclosure_fields(target, r, color) + + for r in records[1:]: + label = r.get("check", "") + st = r.get("status", "") + target.write(f" {label}: {status_style(st, color)}\n") + if r.get("path"): + target.write(f" path: {r['path']}\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_run_summary(records, file=None, color=True): + target = file or sys.stdout + r = records[0] + st = r.get("status", "") + tag = "run" + target.write(f"{render_header(tag, color)}\n") + target.write(f" run: {r.get('run', '')}\n") + target.write(f" status: {status_style(st, color)}\n") + target.write(f" workspace: {r.get('workspace', '')}\n") + _render_disclosure_fields(target, r, color) + target.write("\n") + + +def render_status(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error": + render_generic_block(records, file=file, color=color, tag="status") + return + + st = first.get("status", "") + target.write(f"{render_header('status', color)}\n") + target.write(f" run: {first.get('run', '')}\n") + target.write(f" status: {status_style(st, color)}\n") + if first.get("workspace"): + target.write(f" workspace: {first['workspace']}\n") + _render_disclosure_fields(target, first, color) + + step_records = [r for r in records if "step" in r] + if step_records: + target.write("\n") + target.write(f" {style('steps', CYAN if color else None, color)}:\n" if color else " steps:\n") + for r in step_records: + step = r.get("step", "") + tool = r.get("tool", "") + st = r.get("status", "") + runtime = r.get("runtime", "") or "" + step_label = style(step, CYAN, color) if color else step + status_label = status_style(st, color) + line = f" {step_label} ({tool}) {status_label}" + if runtime: + line += f" {runtime}" + target.write(f"{line}\n") + _render_step_disclosure(target, r, color) + + target.write("\n") + + +def render_metrics(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error" or first.get("status") in ("missing", "unknown_step", "corrupt"): + render_generic_block(records, file=file, color=color, tag="metrics") + return + + if first.get("metrics_status") == "none": + target.write(f"{render_header('metrics', color)}\n") + target.write(f" No metrics available.\n") + if first.get("inspect_cmd"): + target.write(render_field("inspect", first["inspect_cmd"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('metrics', color)}\n") + + current_step = None + for r in records: + step = r.get("step", r.get("metric_step", "")) + if step != current_step: + if current_step is not None: + target.write("\n") + current_step = step + target.write(f" {style(step, CYAN, color) if color else step}:\n") + + metric = r.get("metric", "") + value = r.get("value", "") + if metric: + target.write(f" {metric}: {value}\n") + elif r.get("status"): + target.write(f" {status_style(r['status'], color)}\n") + if r.get("source"): + target.write(render_field("source", r["source"], color, dim_label=True) + "\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_artifacts(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error" or first.get("status") in ("unknown_step",): + render_generic_block(records, file=file, color=color, tag="artifacts") + return + + if first.get("artifacts_status") == "none": + target.write(f"{render_header('artifacts', color)}\n") + target.write(f" No artifacts found.\n") + if first.get("inspect_cmd"): + target.write(render_field("inspect", first["inspect_cmd"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('artifacts', color)}\n") + + current_step = None + for r in records: + step = r.get("step", "") + if step != current_step: + if current_step is not None: + target.write("\n") + current_step = step + target.write(f" {style(step, CYAN, color) if color else step}:\n") + + artifact = r.get("artifact", "") + role = r.get("role", "") + path = r.get("path", "") + target.write(f" {artifact} ({role})\n") + if path: + target.write(render_field("path", path, color, dim_label=True) + "\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + if r.get("metrics"): + target.write(render_field("metrics", r["metrics"], color, dim_label=True) + "\n") + if r.get("config"): + target.write(render_field("config", r["config"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_config(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error": + render_generic_block(records, file=file, color=color, tag="config") + return + + if first.get("config_status") == "none": + target.write(f"{render_header('config', color)}\n") + msg = f" No configuration for step {first.get('step', '')}.\n" if first.get("step") else " No configuration found.\n" + target.write(msg) + if first.get("artifacts"): + target.write(render_field("artifacts", first["artifacts"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('config', color)}\n") + + current_scope = None + for r in records: + scope = r.get("scope", "") + if scope != current_scope: + if current_scope is not None: + target.write("\n") + current_scope = scope + scope_label = style(scope, CYAN, color) if color else scope + target.write(f" {scope_label}:\n") + + config = r.get("config", r.get("key", "")) + value = r.get("value", "") + source = r.get("source", "") + step = r.get("step", "") + + if r.get("kind") == "param": + target.write(f" {config}: {value}") + if source and source != "default": + target.write(f" ({source})") + target.write("\n") + elif scope == "step": + target.write(f" {config} ({r.get('role', '')})\n") + target.write(f" path: {r.get('path', '')}\n") + else: + target.write(f" {config}: {value}") + if source: + target.write(f" ({source})") + target.write("\n") + + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_diagnose(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error": + render_generic_block(records, file=file, color=color, tag="diagnose") + return + + if first.get("status") == "clean": + target.write(f"{render_header('diagnose', color)}\n") + target.write(f" {status_style('clean', color)} No issues found.\n") + _render_disclosure_fields(target, first, color) + target.write("\n") + return + + target.write(f"{render_header('diagnose', color)}\n") + + by_severity = {} + for r in records: + sev = r.get("severity", "info") + by_severity.setdefault(sev, []).append(r) + + for severity in ("error", "warning", "info"): + issues = by_severity.get(severity, []) + if not issues: + continue + sev_label = status_style(severity, color) + target.write(f" {sev_label}:\n") + for r in issues: + issue = r.get("issue", "") + target.write(f" {issue}\n") + if r.get("evidence"): + target.write(f" evidence: {r['evidence']}\n") + if r.get("step"): + target.write(f" step: {r['step']}\n") + if r.get("count"): + target.write(f" count: {r['count']}\n") + _render_step_disclosure(target, r, color, indent=" ") + + target.write("\n") + + +def render_error(records, file=None, color=True): + target = file or sys.stdout + target.write(f"{render_header('error', color)}\n") + for record in records: + error = record.get("error", record.get("kind", "error")) + reason = record.get("reason", "") + target.write(f" {style(error, RED, color)}") + if reason: + target.write(f" {reason}") + target.write("\n") + for key, value in record.items(): + if key in ("kind", "error", "reason"): + continue + if value is None: + continue + dk = display_key(key) + target.write(render_field(dk, value, color, dim_label=True) + "\n") + target.write("\n") + + +# --- Internal helpers --- + + +def _render_disclosure_fields(target, record, color): + for key in sorted(record.keys()): + if not key.endswith("_cmd") and key not in ("inspect", "check", "run", + "start_cmd", "log", "config", + "artifacts", "metrics"): + continue + value = record.get(key) + if not value: + continue + label = display_key(key) + target.write(render_field(label, value, color, dim_label=True) + "\n") + + +def _render_step_disclosure(target, record, color, indent=" "): + for key in ("metrics_cmd", "log_cmd", "log", "artifacts", "config", + "start_cmd", "inspect"): + value = record.get(key) + if not value: + continue + label = display_key(key) + dim_label = style(f"{label}:", DIM, color) if color else f"{label}:" + target.write(f"{indent}{dim_label} {value}\n") + + +# --- Renderer registry --- + + +def get_pretty_renderer(command): + registry = { + "init": render_init, + "check": render_check, + "run": render_run_summary, + "status": render_status, + "metrics": render_metrics, + "artifacts": render_artifacts, + "config": render_config, + "diagnose": render_diagnose, + } + return registry.get(command) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py new file mode 100644 index 00000000..9052bf4a --- /dev/null +++ b/chipcompiler/cli/progress.py @@ -0,0 +1,270 @@ +import os +import re +import shutil +import threading +import time + +from chipcompiler.cli.log_view import LineKind, _KIND_COLOR, _KIND_LABEL, extract_error_context +from chipcompiler.cli.output import disclosure_cmd, normalize_step_name, normalize_state +from chipcompiler.cli.pretty import BOLD, DIM, CYAN, GREEN, RED, RESET, style as _style +from chipcompiler.cli.types import OutputMode +from chipcompiler.data import StateEnum, log_flow + + +def supports_color(stream, mode, env=None): + from chipcompiler.cli.pretty import supports_color as _supports_color + return _supports_color(file=stream, mode=mode, env=env) + + +def style(text, code, enabled): + return _style(text, code, enabled) + + +def should_enable_run_progress(ctx, stderr): + if ctx.output_mode != OutputMode.TEXT: + return False + return hasattr(stderr, "isatty") and stderr.isatty() + + +_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[a-zA-Z]") +_OSC_RE = re.compile(r"\x1b\].*?(?:\x07|\x1b\\)") +_DCS_RE = re.compile(r"\x1bP.*?(?:\x1b\\)") +_CONTROL_RE = re.compile(r"[\r\n\t]+") +_C0_RE = re.compile(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]") +_MULTI_SPACE_RE = re.compile(r" {2,}") + + +def sanitize_log_line(line): + stripped = _OSC_RE.sub("", line) + stripped = _DCS_RE.sub("", stripped) + stripped = _ANSI_RE.sub("", stripped) + stripped = _CONTROL_RE.sub(" ", stripped) + stripped = _C0_RE.sub("", stripped) + stripped = _MULTI_SPACE_RE.sub(" ", stripped) + return stripped.strip() + + +def truncate_to_width(text, width): + if width <= 0: + return "" + if len(text) <= width: + return text + if width <= 3: + return text[:width] + return text[: width - 3] + "..." + + +# --- Failure context block formatting --- + +_KIND_LABEL_COMPACT = {k: v.upper() for k, v in _KIND_LABEL.items()} + + +def format_error_context(log_path, context_lines, log_cmd, color=True): + """Format a failure context block for interactive progress output. + + Args: + log_path: Relative path to the failed step's log file. + context_lines: List of LogLine objects from extract_error_context(). + log_cmd: Full disclosure command (e.g. 'ecc log synth --project p'). + color: Whether to emit ANSI color codes. + """ + lines = [] + lines.append(f"error: {log_path}") + + if context_lines: + max_no = max(ll.line_no for ll in context_lines) + width = max(len(str(max_no)), 4) + else: + width = 4 + + for ll in context_lines: + no = str(ll.line_no).rjust(width) + label = _KIND_LABEL_COMPACT[ll.kind] + + if color and ll.kind in _KIND_COLOR: + code = _KIND_COLOR[ll.kind] + if ll.kind == LineKind.ERROR: + lines.append(f" {no} {code}{label} {ll.text}{RESET}") + else: + lines.append(f" {no} {code}{label}{RESET} {ll.text}") + else: + lines.append(f" {no} {label} {ll.text}") + + lines.append(f"For more log info: {log_cmd}") + lines.append(f'command="{log_cmd}"') + return "\n".join(lines) + "\n" + + +def latest_log_line(path): + if not path or not os.path.isfile(path): + return None + try: + with open(path, "r", errors="replace") as f: + lines = f.readlines() + except OSError: + return None + for line in reversed(lines): + sanitized = sanitize_log_line(line) + if sanitized: + return sanitized + return None + + +def terminal_width(fallback=80): + cols, _ = shutil.get_terminal_size(fallback=(fallback, 24)) + return max(cols, 1) + + +class RunProgressRenderer: + def __init__(self, stream, width_fn=None, color=False): + self._stream = stream + self._width_fn = width_fn or terminal_width + self._color = color + self._has_transient = False + self._step_started = False + + def running(self, text): + width = self._width_fn() + visible = truncate_to_width(f" log: {text}", width) + if self._color and visible.startswith(" log:"): + visible = f" {DIM}log:{RESET}{visible[6:]}" + self._stream.write(f"\r\x1b[K{visible}") + self._stream.flush() + self._has_transient = True + + def clear(self): + if self._has_transient: + self._stream.write("\r\x1b[K\n") + self._stream.flush() + self._has_transient = False + + def start_run(self, name, workspace): + self.clear() + run_label = style("[run]", BOLD, self._color) + self._stream.write(f"{run_label} {name} workspace={workspace}\n") + self._stream.flush() + + def start_step(self, step, tool): + self.clear() + if self._step_started: + self._stream.write("\n") + header = style(f"> {step} ({tool})", CYAN, self._color) + self._stream.write(f"{header}\n") + self._stream.flush() + self._step_started = True + + def finish_step(self, step, tool, status, runtime, log_path, inspect_cmd, success): + self.clear() + if success: + line = style(f"✓ {step} ({tool}) {runtime}", GREEN, self._color) + else: + sym = style("✗", RED, self._color) + status_styled = style(status, RED, self._color) + line = f"{sym} {step} ({tool}) {status_styled} {runtime}" + self._stream.write(f"{line}\n") + log_label = style(" log:", DIM, self._color) + self._stream.write(f"{log_label} {log_path}\n") + inspect_label = style(" inspect:", DIM, self._color) + self._stream.write(f"{inspect_label} {inspect_cmd}\n") + self._stream.flush() + + def render_failure_context(self, block): + """Write a pre-formatted failure context block to the progress stream.""" + self._stream.write(block) + self._stream.flush() + + +def _poll_log(renderer, log_path, stop_event, interval=0.5): + while not stop_event.is_set(): + line = latest_log_line(log_path) + renderer.running(line or "waiting for log...") + stop_event.wait(interval) + + +def run_flow_with_progress(engine_flow, ctx, project, stderr): + color = supports_color(stderr, ctx.output_mode) + renderer = RunProgressRenderer(stderr, color=color) + engine_flow.workspace.home.reset() + + run_dir = engine_flow.workspace.directory + run_name = os.path.basename(run_dir) or "default" + renderer.start_run(run_name, run_dir) + + for workspace_step in engine_flow.workspace_steps: + step_token = normalize_step_name(workspace_step.name) + tool = workspace_step.tool + log_path = workspace_step.log.get("file", "") + + engine_flow.workspace.logger.log_section( + f"{workspace_step.tool} - begin step - {workspace_step.name}" + ) + + renderer.start_step(step_token, tool) + + stop_event = threading.Event() + monitor = threading.Thread( + target=_poll_log, + args=(renderer, log_path, stop_event), + daemon=True, + ) + monitor.start() + + start = time.time() + + try: + state = engine_flow.run_step(workspace_step) + finally: + stop_event.set() + monitor.join(timeout=2.0) + renderer.clear() + + log_flow(workspace=engine_flow.workspace) + engine_flow.workspace.logger.log_section( + f"{workspace_step.tool} - end step - {workspace_step.name}" + ) + + elapsed = time.time() - start + hours = int(elapsed // 3600) + minutes = int((elapsed % 3600) // 60) + seconds = int(elapsed % 60) + runtime = f"{hours}:{minutes:02d}:{seconds:02d}" + + status = normalize_state(state.value) + + rel_log = "" + if log_path: + try: + rel_log = os.path.relpath(log_path, engine_flow.workspace.directory) + except ValueError: + rel_log = log_path + + inspect = disclosure_cmd(f"ecc log {step_token}", project) + + is_success = state == StateEnum.Success + renderer.finish_step(step_token, tool, status, runtime, rel_log, inspect, is_success) + + if not is_success: + _maybe_render_failure_context(renderer, log_path, rel_log, step_token, + project, ctx.run_id, color) + return False + + return True + + +def _maybe_render_failure_context(renderer, log_path, rel_log, step_token, + project, run_id, color): + if not log_path or not os.path.isfile(log_path): + return + try: + with open(log_path, "r", errors="replace") as f: + raw = f.read() + except OSError: + return + log_lines = raw.splitlines() + if not log_lines: + return + + ctx_lines = extract_error_context(log_lines) + full_cmd = disclosure_cmd(f"ecc log {step_token}", project, run_id) + block = format_error_context(rel_log, ctx_lines, full_cmd, color=color) + renderer.render_failure_context(block) diff --git a/chipcompiler/cli/records.py b/chipcompiler/cli/records.py new file mode 100644 index 00000000..c081dd77 --- /dev/null +++ b/chipcompiler/cli/records.py @@ -0,0 +1,4 @@ +def error_record(error: str, **fields) -> dict: + record = {"kind": "error", "error": error} + record.update(fields) + return record diff --git a/chipcompiler/cli/render.py b/chipcompiler/cli/render.py new file mode 100644 index 00000000..b13607da --- /dev/null +++ b/chipcompiler/cli/render.py @@ -0,0 +1,88 @@ +import json +import sys + +from chipcompiler.cli.types import CommandResult, OutputMode + + +def render_text(records: tuple[dict, ...], file=None) -> None: + target = file or sys.stdout + for record in records: + parts = [] + for key, value in record.items(): + if value is None: + continue + display_key = key[:-4] if key.endswith("_cmd") else key + if isinstance(value, str) and any(c.isspace() for c in value): + escaped = value.replace('\\', '\\\\').replace('"', '\\"') + parts.append(f'{display_key}="{escaped}"') + else: + parts.append(f"{display_key}={value}") + print(" ".join(parts), file=target) + + +def render_json(result: CommandResult, file=None) -> None: + target = file or sys.stdout + print(json.dumps({"records": list(result.records)}, ensure_ascii=False), file=target) + + +def render_jsonl(result: CommandResult, file=None) -> None: + target = file or sys.stdout + for record in result.records: + print(json.dumps(record, ensure_ascii=False), file=target) + + +def render_plain(records: tuple[dict, ...], file=None) -> None: + target = file or sys.stdout + for record in records: + parts = [] + for key, value in record.items(): + if value is None: + continue + parts.append(f"{key}={_plain_value(value)}") + print(" ".join(parts), file=target) + + +def _plain_value(value) -> str: + s = str(value) + if any(c.isspace() for c in s) or "\\" in s or '"' in s or "=" in s: + escaped = s.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + return s + + +def render_result(result: CommandResult, mode: OutputMode, file=None, + command=None, color=True) -> None: + if mode == OutputMode.JSON: + render_json(result, file=file) + elif mode == OutputMode.JSONL: + render_jsonl(result, file=file) + elif mode == OutputMode.PLAIN: + render_plain(result.records, file=file) + elif mode == OutputMode.TEXT: + _render_pretty(result, file=file, command=command, color=color) + else: + render_text(result.records, file=file) + + +def _render_pretty(result: CommandResult, file=None, command=None, color=True) -> None: + from chipcompiler.cli.pretty import ( + get_pretty_renderer, + render_error, + render_generic_block, + ) + + records = result.records + if not records: + return + + first = records[0] + + if result.exit_code != 0 and first.get("kind") == "error": + render_error(records, file=file, color=color) + return + + renderer = get_pretty_renderer(command) if command else None + if renderer: + renderer(records, file=file, color=color) + else: + render_generic_block(records, file=file, color=color) diff --git a/chipcompiler/cli/types.py b/chipcompiler/cli/types.py new file mode 100644 index 00000000..0ad02cce --- /dev/null +++ b/chipcompiler/cli/types.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass, field +from enum import Enum + + +class OutputMode(Enum): + TEXT = "text" + PLAIN = "plain" + JSON = "json" + JSONL = "jsonl" + + +@dataclass(frozen=True) +class CommandContext: + project_dir: str + project: str | None + run_dir: str + run_id: str | None + output_mode: OutputMode + + +@dataclass(frozen=True) +class CommandResult: + records: tuple[dict, ...] = field(default_factory=tuple) + exit_code: int = 0 + + @staticmethod + def ok(records: list[dict]) -> "CommandResult": + return CommandResult(records=tuple(records), exit_code=0) + + @staticmethod + def err(records: list[dict], exit_code: int = 1) -> "CommandResult": + return CommandResult(records=tuple(records), exit_code=exit_code) diff --git a/chipcompiler/data/parameter.py b/chipcompiler/data/parameter.py index 45cc8c77..31a60123 100644 --- a/chipcompiler/data/parameter.py +++ b/chipcompiler/data/parameter.py @@ -22,7 +22,7 @@ "Aspect ratio" : 1 }, "Max fanout" : 20, - "Target density" : 0.3, + "Target density" : 0.2, "Target overflow" : 0.1, "Global right padding": 0, "Cell padding x": 600, diff --git a/chipcompiler/thirdparty/ecc-dreamplace b/chipcompiler/thirdparty/ecc-dreamplace index b8606d35..884af86c 160000 --- a/chipcompiler/thirdparty/ecc-dreamplace +++ b/chipcompiler/thirdparty/ecc-dreamplace @@ -1 +1 @@ -Subproject commit b8606d35455b3a6aae7cd0a5584f4ea389cc223a +Subproject commit 884af86c66aa6f6e9c2644d7af6b0ee021f8e48c diff --git a/chipcompiler/thirdparty/ecc-tools b/chipcompiler/thirdparty/ecc-tools index 749185eb..e7dc4d3c 160000 --- a/chipcompiler/thirdparty/ecc-tools +++ b/chipcompiler/thirdparty/ecc-tools @@ -1 +1 @@ -Subproject commit 749185eb923125e7478baee3206ca72892be7f0e +Subproject commit e7dc4d3c3f59474cbe00e435010e2ebb0525976e diff --git a/chipcompiler/tools/ecc/builder.py b/chipcompiler/tools/ecc/builder.py index 6f34c676..babe1251 100644 --- a/chipcompiler/tools/ecc/builder.py +++ b/chipcompiler/tools/ecc/builder.py @@ -2,6 +2,7 @@ # -*- encoding: utf-8 -*- import os import stat +from contextlib import suppress from chipcompiler.data import WorkspaceStep, Workspace, Parameters, StepEnum, StateEnum def build_step(workspace: Workspace, @@ -215,13 +216,24 @@ def build_step_config(workspace: Workspace, def _ensure_writable(path: str): """Make files writable after copying from read-only sources.""" + def _chmod_owner_writable(target: str, is_dir: bool = False): + mode = os.stat(target).st_mode | stat.S_IWUSR + if is_dir: + mode |= stat.S_IXUSR + os.chmod(target, mode) + + with suppress(OSError): + _chmod_owner_writable(path, is_dir=True) + for root, dirs, files in os.walk(path): - for name in dirs + files: + for name in dirs: + target = os.path.join(root, name) + with suppress(OSError): + _chmod_owner_writable(target, is_dir=True) + for name in files: target = os.path.join(root, name) - try: - os.chmod(target, os.stat(target).st_mode | stat.S_IWUSR) - except OSError: - pass + with suppress(OSError): + _chmod_owner_writable(target) def _update_flow(): # read config diff --git a/chipcompiler/tools/ecc_dreamplace/builder.py b/chipcompiler/tools/ecc_dreamplace/builder.py index edc6a55b..3bbda72a 100644 --- a/chipcompiler/tools/ecc_dreamplace/builder.py +++ b/chipcompiler/tools/ecc_dreamplace/builder.py @@ -2,7 +2,10 @@ from __future__ import annotations +import os import shutil +import stat +from contextlib import suppress from copy import deepcopy from pathlib import Path @@ -77,6 +80,11 @@ def build_step_config(workspace: Workspace, step: WorkspaceStep) -> None: # then copy it to the destination specified by step.config["dreamplace"] param_src = Path(__file__).resolve().parent / "configs" / "dreamplace.json" shutil.copy2(param_src, step.config["dreamplace"]) + with suppress(OSError): + os.chmod( + step.config["dreamplace"], + os.stat(step.config["dreamplace"]).st_mode | stat.S_IWUSR, + ) params = json_read(step.config["dreamplace"]) diff --git a/docs/development.md b/docs/development.md index 32f82442..32d669d6 100644 --- a/docs/development.md +++ b/docs/development.md @@ -337,24 +337,40 @@ Create `chipcompiler/tools//` with `__init__.py`, `builder.py`, `runner.py For command-line automation and scripting, run CLI via Nix: ```bash -# Run directly from project root -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/top.v \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 - -# Filelist mode -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/filelist.f \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 \ - --freq 200 +# Create a project skeleton with ecc.toml, rtl/, constraints/, and runs/ +nix run .#cli -- init gcd + +# After editing gcd/ecc.toml and adding RTL files +nix run .#cli -- check --project gcd +nix run .#cli -- run --project gcd +nix run .#cli -- status --project gcd +nix run .#cli -- metrics --project gcd +nix run .#cli -- log --project gcd ``` +The project config is the CLI input surface: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/path/to/ics55" + +[flow] +preset = "rtl2gds" +run = "default" +``` + +For filelist mode, set `design.rtl` to a single filelist path, for example +`rtl = ["rtl/filelist.f"]`. Multiple RTL sources should be listed in the +filelist rather than as multiple `design.rtl` entries. + If you need an interactive environment for development, use `nix develop`. REST API reference: Examples: **[examples/gcd](examples/gcd/README.md)** diff --git a/docs/index.md b/docs/index.md index 8c8ff622..195bacb6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -26,6 +26,13 @@ ChipCompiler supports various EDA file formats. Technical specifications for par - Supports file paths, +incdir directives, comments, quoted paths - Parser implementation: `chipcompiler/utility/filelist.py` +### CLI Specifications + +- **[CLI Design](specification/cli-design.md)** - Progressive-disclosure CLI design and roadmap + - Grep-friendly summary lines with disclosure commands + - Project, run, step, metric, artifact, issue, and config object model + - Phased roadmap for project setup, debug, traceability, and exploration + ## Quick Navigation ### I want to... diff --git a/docs/specification/cli-design.md b/docs/specification/cli-design.md new file mode 100644 index 00000000..f22e337c --- /dev/null +++ b/docs/specification/cli-design.md @@ -0,0 +1,426 @@ +# CLI Design Specification + +This document defines the design principles and staged roadmap for the ECC +command line interface. + +The CLI should be useful to both human flow developers and agent frameworks. It +must expose a short default path for common flows, while every summary line must +also provide explicit commands for deeper inspection. + +## Goals + +- Provide a project-oriented interface for RTL-to-GDS workflows. +- Make step-level reruns, inspection, and debugging first-class operations. +- Keep default output concise and stable. +- Make output easy to parse with simple tools such as `rg`, `awk`, and shell + scripts. +- Provide structured output for agents through `--json` and `--jsonl`. +- Preserve the existing Python API for advanced integration. +- Build CLI behavior as a wrapper around the current Python APIs. + +## Non-Goals + +- Full OpenLane or LibreLane configuration import. +- A conversational assistant as the primary CLI interface. +- Tool-specific command exposure as the default user model. +- Pretty terminal UI as the canonical output format. + +## Design Principles + +### Progressive Disclosure + +The default command output should answer only: + +- What happened? +- Did it succeed? +- What command should inspect the next level of detail? + +Detailed information must be available through explicit follow-up commands. +The disclosure path is: + +```text +summary -> diagnosis -> evidence -> raw data +``` + +Examples: + +```bash +ecc status +ecc diagnose step cts +ecc log step cts --errors +ecc artifacts step cts +ecc config step cts --resolved +``` + +### Disclosure Commands On Summary Lines + +Every summary line must include at least one disclosure command on the same +line. This is required so agents can grep the output and continue inspection +without interpreting natural language paragraphs. + +Use stable `key="command"` fields: + +```text +step=cts status=failed elapsed=37s wns=-0.083 hold_vios=12 diagnose="ecc diagnose step cts" log="ecc log step cts --errors" config="ecc config step cts --resolved" +``` + +Do not rely on prose such as: + +```text +Run ecc diagnose step cts for more details. +``` + +The command field names should be stable across releases: + +| Field | Purpose | +| --- | --- | +| `inspect` | Show detailed object state | +| `diagnose` | Explain failures or quality issues | +| `log` | Show filtered or raw logs | +| `artifacts` | List output artifacts | +| `config` | Show resolved configuration | +| `metrics` | Show metrics | +| `open` | Open a viewer or report | + +### Stable Text Output + +The default output should be line-oriented and grep-friendly. Avoid box drawing, +multi-line table cells, and terminal-width-dependent formatting in the default +mode. + +Recommended style: + +```text +run=baseline status=failed failed_step=routing elapsed=554s diagnose="ecc diagnose run baseline" metrics="ecc metrics run baseline" artifacts="ecc artifacts run baseline" +step=synthesis status=success elapsed=18s cells=312 area=1840.2 inspect="ecc show step synthesis" log="ecc log step synthesis --errors" +step=floorplan status=success elapsed=4s util=45.0 die=100x100 inspect="ecc show step floorplan" config="ecc config step floorplan --resolved" +step=placement status=success elapsed=72s hpwl=18423 overflow=0.02 inspect="ecc show step placement" metrics="ecc metrics step placement" +step=cts status=failed elapsed=37s wns=-0.083 hold_vios=12 diagnose="ecc diagnose step cts" log="ecc log step cts --errors" +``` + +Pretty output may be provided through a separate option: + +```bash +ecc status --pretty +``` + +Pretty output is for humans only and must not be treated as the stable parsing +interface. + +### Structured Output + +Every inspection command should support: + +```bash +--json +--jsonl +``` + +Use `--json` for object-level output and `--jsonl` for stream or list output. + +Example: + +```jsonl +{"kind":"step","step":"synthesis","status":"success","elapsed_s":18,"inspect_cmd":"ecc show step synthesis","log_cmd":"ecc log step synthesis --errors"} +{"kind":"step","step":"cts","status":"failed","elapsed_s":37,"wns":-0.083,"hold_vios":12,"diagnose_cmd":"ecc diagnose step cts","log_cmd":"ecc log step cts --errors"} +``` + +Text output and JSON output should describe the same objects. The text output is +the human and shell interface; JSON is the strict machine interface. + +### Object-Oriented CLI Model + +Commands should be organized around flow objects instead of internal tools: + +| Object | Description | +| --- | --- | +| Project | User design directory and `ecc.toml` | +| Run | One execution instance with a stable run id or tag | +| Step | A flow step such as synthesis, placement, CTS, routing | +| Artifact | DEF, GDS, Verilog, SPEF, reports, logs, scripts | +| Metric | QoR values such as WNS, TNS, area, HPWL, DRC count | +| Issue | Failure or QoR problem with evidence | +| Config | User config and resolved step config | + +Users should not need to understand the internal Yosys, ECC-Tools, or +DreamPlace directory layout to perform common actions. + +### Python API Wrapper Boundary + +The CLI must be implemented as a thin orchestration layer over the existing +Python APIs. CLI commands should compose and wrap APIs such as workspace +creation, flow construction, step execution, state inspection, metrics parsing, +and artifact discovery. + +The CLI must not require invasive changes to the current flow-related APIs. In +particular, CLI implementation should avoid changing the semantics of +`EngineFlow`, `Workspace`, `WorkspaceStep`, tool plugin interfaces, or RTL-to-GDS +flow builders only to satisfy command-line concerns. + +If the CLI needs behavior that is not exposed today, prefer one of these +approaches: + +- Add a small, general-purpose Python API that is useful outside the CLI. +- Add a CLI-local adapter that translates current API data into CLI output + objects. +- Add read-only inspection helpers around existing state files, reports, and + artifacts. + +Avoid embedding CLI output formatting, argument parsing, terminal behavior, or +agent-specific disclosure fields inside core flow APIs. + +## Command Shape + +### Core Commands + +The first stable CLI surface should stay small: + +```bash +ecc init +ecc check +ecc run +ecc status +ecc diagnose +ecc metrics +ecc log +ecc artifacts +ecc config +ecc open +``` + +Responsibilities: + +| Command | Responsibility | +| --- | --- | +| `ecc init` | Create a project skeleton and `ecc.toml` | +| `ecc check` | Validate RTL, constraints, PDK, tools, and config | +| `ecc run` | Execute a full flow or selected step range | +| `ecc status` | Summarize run and step state | +| `ecc diagnose` | Explain failures or QoR problems with evidence | +| `ecc metrics` | Show run-level or step-level metrics | +| `ecc log` | Show filtered or raw logs | +| `ecc artifacts` | List generated files and viewer commands | +| `ecc config` | Show user or resolved configuration | +| `ecc open` | Open KLayout, reports, or other viewers | + +### Project-Oriented Entry + +The preferred user entry should be configuration driven: + +```bash +ecc init gcd +ecc check +ecc run +``` + +The project should contain: + +```text +gcd/ +├── ecc.toml +├── rtl/ +├── constraints/ +├── runs/ +└── reports/ +``` + +Command-line arguments may override configuration values, but `ecc.toml` should +be the primary user-facing interface. + +### Step-Level Execution + +Back-end flow work is iterative. Step-level execution must be first-class: + +```bash +ecc run --from placement +ecc run --to routing +ecc run --only cts +ecc run --after floorplan +ecc run --resume +ecc run --force --step placement +``` + +Each run should have a stable run id and may have a user tag: + +```bash +ecc run --tag baseline +ecc run --tag dense_place +ecc diff baseline dense_place +``` + +## Output Contracts + +### Summary Line Format + +Default text output should follow this general shape: + +```text +kind= key=value ... disclosure_key="ecc command ..." +``` + +Examples: + +```text +run=baseline status=success elapsed=914s metrics="ecc metrics run baseline" artifacts="ecc artifacts run baseline" +step=routing status=failed elapsed=222s shorts=84 opens=3 drc=87 diagnose="ecc diagnose step routing" log="ecc log step routing --errors" open="ecc open step routing --markers drc" +metric=wns value=-0.083 unit=ns status=fail source=cts/reports/timing_hold.rpt inspect="ecc show metric wns --step cts" +artifact=def step=placement path=runs/baseline/placement/output/design.def open="ecc open step placement --artifact def" +``` + +Rules: + +- Keep one object per line. +- Do not wrap summary lines. +- Use stable lowercase keys. +- Use stable lowercase tokens for step names and metric names. +- Quote command values with double quotes. +- Commands in disclosure fields must be directly executable from the project + root. +- Include at least one disclosure command per summary line. +- Prefer relative paths rooted at the project directory. +- Avoid terminal color as the only status indicator. + +### Error Output + +Errors should also follow progressive disclosure. A failing command should print +a concise summary and actionable disclosure commands: + +```text +error=E2103 status=failed step=routing reason=drc_violations shorts=84 opens=3 diagnose="ecc diagnose step routing" log="ecc log step routing --errors" open="ecc open step routing --markers drc" +``` + +For human readability, a short paragraph may follow, but agents should be able +to use the first line alone. + +### Diagnosis Output + +Diagnosis must include evidence, not only suggestions: + +```text +issue=cts_hold status=fail severity=error wns=-0.083 hold_vios=12 evidence="ecc show issue cts_hold --evidence" log="ecc log step cts --errors" +evidence=timing_hold_report path=runs/baseline/cts/reports/timing_hold.rpt value=-0.083 inspect="ecc show artifact timing_hold_report" +action=enable_hold_repair confidence=medium config="ecc config step cts --resolved" +``` + +Suggestions should be traceable to metrics, reports, or logs. + +## Configuration Direction + +The CLI should move toward a single project configuration file: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +clock_period = "10ns" + +[pdk] +name = "ics55" +root = "$PDK_ROOT" + +[floorplan] +die_area = [0, 0, 100, 100] +core_util = 45 +aspect_ratio = 1.0 + +[flow] +preset = "rtl2gds" +from = "synthesis" +to = "gds" +``` + +The resolved configuration used by each step should be inspectable: + +```bash +ecc config --resolved +ecc config step placement --resolved +``` + +## AI-Native Behavior + +The CLI should not start with a general chat command. It should first produce +stable structured context that agents can inspect. + +Preferred data files: + +```text +run.json +steps.json +metrics.json +issues.json +artifacts.json +resolved_config.json +events.jsonl +``` + +Agent-oriented commands can then be layered on top: + +```bash +ecc diagnose +ecc explain step routing +ecc suggest --goal "fix hold" +ecc summarize run latest +``` + +These commands must still return evidence-backed results and disclosure +commands. + +## Roadmap + +### Phase 1: Project And Run Basics + +- [ ] `ecc init` +- [ ] `ecc check` +- [ ] `ecc run` +- [ ] `ecc status` +- [ ] `ecc log` +- [ ] `ecc metrics` +- [ ] Default grep-friendly summary output +- [ ] `--json` and `--jsonl` for status and metrics + +Success criteria: + +- [ ] A user can create a project, run the default RTL-to-GDS flow, inspect status, + inspect logs, and read metrics without writing Python. +- [ ] Every summary line includes at least one disclosure command. + +### Phase 2: Debug And Traceability + +- [ ] `ecc diagnose` +- [ ] `ecc artifacts` +- [ ] `ecc config --resolved` +- [ ] Run tags and run comparison basics +- [ ] Structured issue and artifact metadata + +Success criteria: + +- [ ] A failed step can be investigated through `ecc status -> ecc diagnose -> ecc + log/artifacts/config`. +- [ ] Agent frameworks can follow disclosure commands without parsing prose. + +### Phase 3: Exploration And Assistance + +- [ ] `ecc diff` +- [ ] `ecc sweep` +- [ ] `ecc explain` +- [ ] `ecc suggest` +- [ ] QoR dashboards or report export + +Success criteria: + +- [ ] A user can compare runs, sweep key flow parameters, and receive + evidence-backed next actions for common timing, placement, routing, and DRC + failures. + +## Compatibility Notes + +The current CLI accepts explicit arguments such as `--workspace`, `--rtl`, +`--design`, `--top`, `--clock`, `--pdk-root`, and `--freq`. The new CLI should +preserve a migration path for scripted users, but the long-term default should +be project-oriented and configuration-driven. + +The CLI should remain API-compatible with existing Python users. Changes needed +for the CLI should be additive and should not force current Python flow scripts +to change unless the underlying API already requires a broader cleanup. diff --git a/flake.lock b/flake.lock index 60451134..986b11b8 100644 --- a/flake.lock +++ b/flake.lock @@ -53,11 +53,11 @@ }, "nixpkgs-lib_2": { "locked": { - "lastModified": 1772328832, - "narHash": "sha256-e+/T/pmEkLP6BHhYjx6GmwP5ivonQQn0bJdH9YrRB+Q=", + "lastModified": 1777168982, + "narHash": "sha256-GOkGPcboWE9BmGCRMLX3worL4EMnsnG8MyKmXNeYuhQ=", "owner": "nix-community", "repo": "nixpkgs.lib", - "rev": "c185c7a5e5dd8f9add5b2f8ebeff00888b070742", + "rev": "f5901329dade4a6ea039af1433fb087bd9c1fe14", "type": "github" }, "original": { @@ -68,11 +68,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1773201692, - "narHash": "sha256-NXrKzNMniu4Oam2kAFvqJ3GB2kAvlAFIriTAheaY8hw=", + "lastModified": 1777946660, + "narHash": "sha256-iw3XDIG6xxk+AZTcawCLHf6i9i4tXRzLZEoV9xhRToQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "b6067cc0127d4db9c26c79e4de0513e58d0c40c9", + "rev": "bc57abace07689cfd34203aa5fb4027514895987", "type": "github" }, "original": { @@ -105,11 +105,11 @@ "nixpkgs-lib": "nixpkgs-lib_2" }, "locked": { - "lastModified": 1772408722, - "narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=", + "lastModified": 1777988971, + "narHash": "sha256-qIoWPDs+0/8JecyYgE3gpKQxW/4bLW/gp45vow9ioCQ=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3", + "rev": "0678d8986be1661af6bb555f3489f2fdfc31f6ff", "type": "github" }, "original": { @@ -154,11 +154,11 @@ ] }, "locked": { - "lastModified": 1773297127, - "narHash": "sha256-6E/yhXP7Oy/NbXtf1ktzmU8SdVqJQ09HC/48ebEGBpk=", + "lastModified": 1775636079, + "narHash": "sha256-pc20NRoMdiar8oPQceQT47UUZMBTiMdUuWrYu2obUP0=", "owner": "numtide", "repo": "treefmt-nix", - "rev": "71b125cd05fbfd78cab3e070b73544abe24c5016", + "rev": "790751ff7fd3801feeaf96d7dc416a8d581265ba", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 943fcde5..57368116 100644 --- a/flake.nix +++ b/flake.nix @@ -32,7 +32,6 @@ ]; systems = [ "x86_64-linux" - "aarch64-linux" ]; flake.overlays.default = overlay; perSystem = @@ -57,9 +56,10 @@ }; packages = { inherit (pkgs) - ecc-tools chipcompiler cli + ecc-tools-python + ecc-dreamplace-python ; }; }; diff --git a/nix/chipcompiler/default.nix b/nix/chipcompiler/default.nix index 23d311ad..cc3a3725 100644 --- a/nix/chipcompiler/default.nix +++ b/nix/chipcompiler/default.nix @@ -1,13 +1,14 @@ { lib, python3Packages, - ecc-tools, + ecc-dreamplace-python, + ecc-tools-python, makeWrapper, }: python3Packages.buildPythonPackage { pname = "chipcompiler"; - version = "0.1.0"; + version = "0.1.0.0-alpha.2"; pyproject = true; src = @@ -22,12 +23,6 @@ python3Packages.buildPythonPackage { ]; }; - postPatch = '' - mkdir -p thirdparty/ecc-tools/bin - install -m 755 ${ecc-tools}/bin/*.cpython-*.so thirdparty/ecc-tools/bin/ - install -m 755 ${ecc-tools}/bin/*.cpython-*.so chipcompiler/tools/ecc/bin/ - ''; - postInstall = '' site_packages="$out/${python3Packages.python.sitePackages}" @@ -45,7 +40,10 @@ python3Packages.buildPythonPackage { build-system = with python3Packages; [ uv-build ]; - dependencies = with python3Packages; [ + dependencies = [ + ecc-dreamplace-python + ecc-tools-python + ] ++ (with python3Packages; [ fastapi klayout matplotlib @@ -55,10 +53,11 @@ python3Packages.buildPythonPackage { pyjson5 pyyaml scipy + torch tqdm uvicorn pip - ]; + ]); nativeBuildInputs = [ makeWrapper ]; diff --git a/nix/cli/default.nix b/nix/cli/default.nix index 071499c9..23f5c5e2 100644 --- a/nix/cli/default.nix +++ b/nix/cli/default.nix @@ -1,14 +1,15 @@ { lib, python3Packages, - ecc-tools, + ecc-dreamplace-python, + ecc-tools-python, yosysWithSlang, makeWrapper, }: python3Packages.buildPythonPackage { pname = "chipcompiler-cli"; - version = "0.1.0"; + version = "0.1.0.0-alpha.2"; pyproject = true; src = @@ -23,12 +24,6 @@ python3Packages.buildPythonPackage { ]; }; - postPatch = '' - mkdir -p thirdparty/ecc-tools/bin - install -m 755 ${ecc-tools}/bin/*.cpython-*.so thirdparty/ecc-tools/bin/ - install -m 755 ${ecc-tools}/bin/*.cpython-*.so chipcompiler/tools/ecc/bin/ - ''; - postInstall = '' site_packages="$out/${python3Packages.python.sitePackages}" @@ -43,19 +38,27 @@ python3Packages.buildPythonPackage { fi done - # This package should expose only the dedicated `cli` entrypoint. + # This package should expose only the dedicated `ecc` entrypoint. rm -f "$out/bin/chipcompiler" ''; postFixup = '' - wrapProgram "$out/bin/cli" \ + wrapProgram "$out/bin/ecc" \ --set CHIPCOMPILER_OSS_CAD_DIR "${yosysWithSlang}" \ --prefix PATH : "${yosysWithSlang}/bin" + if [ -e "$out/bin/cli" ]; then + wrapProgram "$out/bin/cli" \ + --set CHIPCOMPILER_OSS_CAD_DIR "${yosysWithSlang}" \ + --prefix PATH : "${yosysWithSlang}/bin" + fi ''; build-system = with python3Packages; [ uv-build ]; - dependencies = with python3Packages; [ + dependencies = [ + ecc-dreamplace-python + ecc-tools-python + ] ++ (with python3Packages; [ fastapi klayout matplotlib @@ -65,9 +68,11 @@ python3Packages.buildPythonPackage { pyjson5 pyyaml scipy + torch tqdm uvicorn - ]; + pip + ]); nativeBuildInputs = [ makeWrapper ]; @@ -76,7 +81,6 @@ python3Packages.buildPythonPackage { pythonImportsCheck = [ "chipcompiler" - "chipcompiler.server" "chipcompiler.engine" "chipcompiler.tools" "chipcompiler.cli" @@ -88,6 +92,6 @@ python3Packages.buildPythonPackage { license = lib.licenses.mulan-psl2; platforms = lib.platforms.linux; maintainers = [ ]; - mainProgram = "cli"; + mainProgram = "ecc"; }; } diff --git a/nix/ecc-tools/default.nix b/nix/ecc-tools/default.nix deleted file mode 100644 index 5a4741b7..00000000 --- a/nix/ecc-tools/default.nix +++ /dev/null @@ -1,134 +0,0 @@ -{ - lib, - fetchpatch, - callPackages, - stdenv, - cmake, - ninja, - flex, - bison, - zlib, - tcl, - boost, - eigen, - yaml-cpp, - libunwind, - glog, - gtest, - gflags, - metis, - gmp, - python3, - onnxruntime, - gperftools, - pkg-config, - curl, - tbb_2022, -}: - -let - rootSrc = stdenv.mkDerivation { - pname = "ecc-tools"; - version = "0-unstable-2026-01-23"; - src = fetchGit { - url = "git@github.com:openecos-projects/ecc-tools.git"; - rev = "07b6d4133f848ba6e54c0889c3b777a2b544d06b"; - }; - - patches = [ - # This patch is to fix the build system to properly find and link against rust libraries. - # Due to the way they organized the source code, it's hard to upstream this patch. - # So we have to maintain this patch locally. - (fetchpatch { - url = "https://github.com/Emin017/iEDA/commit/e5f3ce024965df5e1d400b6a1d7f8b5b307a4bf3.patch"; - hash = "sha256-YJnY+r9A887WT0a/H/Zf++r1PpD7t567NpkesDmIsD0="; - }) - ./fix.patch - ]; - - dontBuild = true; - dontFixup = true; - installPhase = '' - cp -r . $out - ''; - - }; - - rustpkgs = callPackages ./rustpkgs.nix { inherit rootSrc; }; -in -stdenv.mkDerivation { - inherit (rootSrc) pname version; - - src = rootSrc; - - nativeBuildInputs = [ - cmake - ninja - flex - bison - python3 - tcl - pkg-config - ]; - - cmakeBuildType = "Release"; - - cmakeFlags = [ - (lib.cmakeBool "CMD_BUILD" true) - (lib.cmakeBool "SANITIZER" false) - (lib.cmakeBool "BUILD_STATIC_LIB" false) - (lib.cmakeBool "USE_PROFILER" false) - (lib.cmakeBool "BUILD_PYTHON" true) - (lib.cmakeBool "BUILD_ECOS" true) - ]; - - # Only build the Python bindings target - buildTargets = [ "ecc_py" ]; - - preConfigure = '' - cmakeFlags+=" -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:FILEPATH=$out/bin -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:FILEPATH=$out/lib" - ''; - - postPatch = '' - sed -i '1i find_package(Boost REQUIRED)' src/operation/iPA/test/CMakeLists.txt - sed -i 's/boost_system/Boost::headers/g' src/operation/iPA/test/CMakeLists.txt - ''; - - buildInputs = [ - rustpkgs.iir-rust - rustpkgs.sdf-parse - rustpkgs.spef-parser - rustpkgs.vcd-parser - rustpkgs.verilog-parser - rustpkgs.liberty-parser - gtest - glog - gflags - boost - onnxruntime - eigen - yaml-cpp - libunwind - metis - gmp - tcl - zlib - gperftools - curl - tbb_2022 - ]; - - postInstall = '' - # Tests rely on hardcoded path, so they should not be included - rm $out/bin/*test $out/bin/*Test $out/bin/test_* $out/bin/*_app - ''; - - enableParallelBuild = true; - - meta = { - description = "Open-source EDA for ASIC design"; - homepage = "https://github.com/openecos-projects/ecc-tools"; - license = lib.licenses.mulan-psl2; - platforms = lib.platforms.linux; - }; -} diff --git a/nix/ecc-tools/fix.patch b/nix/ecc-tools/fix.patch deleted file mode 100644 index c01c4e8d..00000000 --- a/nix/ecc-tools/fix.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/src/interface/python/CMakeLists.txt b/src/interface/python/CMakeLists.txt -index a0c8c9f4a..efeda99a8 100644 ---- a/src/interface/python/CMakeLists.txt -+++ b/src/interface/python/CMakeLists.txt -@@ -54,3 +54,7 @@ target_link_libraries(ecc_py - py_vec - py_ipnp - ) -+ -+set_target_properties(ecc_py PROPERTIES -+ LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} -+) diff --git a/nix/modules/devShells.nix b/nix/modules/devShells.nix index 8b2aa706..f9ecbe16 100644 --- a/nix/modules/devShells.nix +++ b/nix/modules/devShells.nix @@ -4,7 +4,6 @@ default = pkgs.mkShell { inputsFrom = [ inputs'.infra.packages.iedaUnstable - pkgs.ecc-tools pkgs.chipcompiler ]; nativeBuildInputs = with pkgs; [ uv bazel_8 bazel-buildtools ]; diff --git a/nix/overlay.nix b/nix/overlay.nix index c0458e0d..d90cbce7 100644 --- a/nix/overlay.nix +++ b/nix/overlay.nix @@ -1,5 +1,10 @@ final: prev: { - ecc-tools = prev.callPackage ./ecc-tools { }; + ecc-tools-python = prev.python3Packages.callPackage ./python/ecc-tools { + callPackages = prev.callPackages; + gflags = prev.gflags; + onnxruntime = prev.onnxruntime; + }; + ecc-dreamplace-python = prev.python3Packages.callPackage ./python/ecc-dreamplace { }; chipcompiler = prev.callPackage ./chipcompiler { }; cli = prev.callPackage ./cli { }; } diff --git a/nix/python/ecc-dreamplace/default.nix b/nix/python/ecc-dreamplace/default.nix new file mode 100644 index 00000000..cea3ced3 --- /dev/null +++ b/nix/python/ecc-dreamplace/default.nix @@ -0,0 +1,158 @@ +{ + lib, + stdenv, + buildPythonPackage, + fetchFromGitHub, + cmake, + ninja, + flex, + bison, + python, + pkg-config, + zlib, + boost, + cairo, + cairocffi, + distutils, + matplotlib, + numpy, + patool, + pkgconfig, + scipy, + setuptools, + shapely, + torch, + uv-build, + wheel, +}: + +let + version = "0.1.0.0-alpha.2"; + + rootSrc = fetchFromGitHub { + owner = "openecos-projects"; + repo = "ecc-dreamplace"; + rev = "b8606d35455b3a6aae7cd0a5584f4ea389cc223a"; + hash = "sha256-+eFHxOyt6BwUYZ5MN1DHGu35f7NoL6f4PiAATj9nDrc="; + fetchSubmodules = true; + }; + + nativeInputs = [ + cmake + ninja + flex + bison + python + pkg-config + ]; + + runtimeInputs = [ + zlib + boost + cairo + torch + ]; + + runtime = stdenv.mkDerivation { + pname = "ecc-dreamplace-runtime"; + inherit version; + src = rootSrc; + + nativeBuildInputs = nativeInputs; + buildInputs = runtimeInputs; + + cmakeFlags = [ + (lib.cmakeFeature "CMAKE_POLICY_VERSION_MINIMUM" "3.5") + (lib.cmakeFeature "CMAKE_CXX_ABI" "1") + (lib.cmakeFeature "PYTHON_EXECUTABLE" python.interpreter) + (lib.cmakeFeature "Python_EXECUTABLE" python.interpreter) + (lib.cmakeFeature "TORCH_INSTALL_PREFIX" "${torch}/${python.sitePackages}/torch") + (lib.cmakeFeature "TORCH_ENABLE_CUDA" "0") + (lib.cmakeFeature "TORCH_VERSION" torch.version) + ]; + + postPatch = '' + sed -i 's/^[[:space:]]*CMAKE_POLICY(SET CMP0048 OLD)/CMAKE_POLICY(SET CMP0048 NEW)/' thirdparty/Limbo/limbo/thirdparty/lemon/CMakeLists.txt + sed -i 's/static void thread_hold();/static void thread_hold(int sig);/; s/static void thread_hold ()/static void thread_hold(int sig)/' thirdparty/Limbo/limbo/thirdparty/CThreadPool/thpool.c + sed -i 's/i1\.center() < i2\.center()/(i1.low() + i1.high()) < (i2.low() + i2.high())/' dreamplace/ops/place_io/src/Interval.h + sed -i '/import stat/d; /nctugr_bin = "%s\/NCTUgr"/,+2d' dreamplace/ops/nctugr_binary/nctugr_binary.py + ''; + + installPhase = '' + runHook preInstall + cmake --install . --prefix "$out" + runHook postInstall + ''; + + enableParallelBuild = true; + }; +in +buildPythonPackage { + pname = "ecc-dreamplace"; + inherit version; + pyproject = true; + + src = rootSrc; + + build-system = [ uv-build ]; + + buildInputs = runtimeInputs; + + postPatch = '' + substituteInPlace pyproject.toml \ + --replace-fail 'uv_build>=0.10.9,<0.12' 'uv_build>=0.10.0,<0.12' + ''; + + preBuild = '' + cp -r ${runtime}/dreamplace/. dreamplace/ + rm -rf thirdparty + cp -r ${runtime}/thirdparty thirdparty + chmod +x thirdparty/NCTUgr.ICCAD2012/NCTUgr + ''; + + postInstall = '' + site_packages="$out/${python.sitePackages}" + rm -rf "$site_packages/thirdparty" + cp -r ${runtime}/thirdparty "$site_packages/thirdparty" + chmod +x "$site_packages/thirdparty/NCTUgr.ICCAD2012/NCTUgr" + ''; + + dependencies = [ + cairocffi + distutils + matplotlib + numpy + patool + pkgconfig + scipy + setuptools + shapely + torch + wheel + ]; + + pythonRemoveDeps = [ + "configspace" + "pydoe2" + "pygmo" + "pyro4" + "pyunpack" + "shap" + "statsmodels" + "xgboost" + ]; + + doCheck = false; + + pythonImportsCheck = [ + "dreamplace" + "dreamplace.Params" + ]; + + meta = { + description = "ECC DreamPlace Python wheel"; + homepage = "https://github.com/openecos-projects/ecc-dreamplace"; + license = lib.licenses.asl20; + platforms = [ "x86_64-linux" ]; + }; +} diff --git a/nix/python/ecc-tools/default.nix b/nix/python/ecc-tools/default.nix new file mode 100644 index 00000000..5cb4b8ee --- /dev/null +++ b/nix/python/ecc-tools/default.nix @@ -0,0 +1,198 @@ +{ + lib, + stdenv, + buildPythonPackage, + fetchFromGitHub, + callPackages, + cmake, + ninja, + flex, + bison, + python, + patchelf, + pkg-config, + zlib, + tcl, + boost, + eigen, + yaml-cpp, + libunwind, + glog, + gtest, + gflags, + metis, + gmp, + curl, + onnxruntime, + tbb_2022, + uv-build, +}: + +let + version = "0.1.0.0-alpha.2"; + + rootSrc = fetchFromGitHub { + owner = "openecos-projects"; + repo = "ecc-tools"; + rev = "36160db0b30ccd627f2c2a06d9fa517d4cce4d49"; + hash = "sha256-/09acQVPB9l4EyWtKy3DGkIFsjsJkao2PW3VS2gmLLI="; + }; + + patchedSrc = stdenv.mkDerivation { + pname = "ecc-tools-src"; + inherit version; + src = rootSrc; + + patches = [ + ./use-nix-built-rust-libraries.patch + ./fix-ino-output-summary-init.patch + ]; + + postPatch = '' + substituteInPlace src/operation/iIR/source/iir-rust/CMakeLists.txt \ + --replace-fail 'ADD_EXTERNAL_PROJ(iir)' "" \ + --replace-fail 'target_link_libraries(iIR-Rust PRIVATE ''${RUST_LIB_PATH} dl)' 'target_link_libraries(iIR-Rust PRIVATE iir dl)' + + substituteInPlace src/operation/iSTA/CMakeLists.txt \ + --replace-fail 'link_directories(''${HOME_THIRDPARTY}/onnxruntime/)' 'link_libraries(${onnxruntime}/lib/libonnxruntime.so)' + ''; + + dontBuild = true; + dontFixup = true; + + installPhase = '' + runHook preInstall + cp -r . "$out" + runHook postInstall + ''; + }; + + rustpkgs = callPackages ./rustpkgs.nix { rootSrc = patchedSrc; }; + + nativeInputs = [ + cmake + ninja + flex + bison + python + patchelf + pkg-config + ]; + + runtimeInputs = [ + rustpkgs.iir-rust + rustpkgs.sdf_parse + rustpkgs.spef-parser + rustpkgs.vcd_parser + rustpkgs.verilog-parser + rustpkgs.liberty-parser + stdenv.cc.cc.lib + zlib + tcl + boost + eigen + yaml-cpp + libunwind + glog + gtest + gflags + metis + gmp + curl + onnxruntime + tbb_2022 + ]; + + runtime = stdenv.mkDerivation { + pname = "ecc-tools-runtime"; + inherit version; + src = patchedSrc; + + nativeBuildInputs = nativeInputs; + buildInputs = runtimeInputs; + cmakeGenerator = "Ninja"; + + cmakeFlags = [ + (lib.cmakeBool "BUILD_ECOS" true) + (lib.cmakeBool "BUILD_PYTHON" true) + (lib.cmakeBool "BUILD_STATIC_LIB" false) + (lib.cmakeBool "COMPATIBILITY_MODE" true) + (lib.cmakeFeature "Python3_EXECUTABLE" python.interpreter) + (lib.cmakeFeature "Python3_ROOT_DIR" "${python}") + ]; + + buildPhase = '' + runHook preBuild + cmake --build . --target ecc_py + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + install -d "$out/ecc_tools_bin" + for dir in . ../bin; do + if [ -d "$dir" ]; then + find "$dir" -type f -name '*.so*' -exec cp -f {} "$out/ecc_tools_bin/" \; + fi + done + + ecc_py_so="$(find "$out/ecc_tools_bin" -type f -name 'ecc_py*.so' -print -quit)" + if [ -z "$ecc_py_so" ]; then + echo "ERROR: ecc_py extension was not built" >&2 + exit 1 + fi + + for so in "$out"/ecc_tools_bin/*.so*; do + [ -e "$so" ] || continue + patchelf --set-rpath "\$ORIGIN:${lib.makeLibraryPath runtimeInputs}" "$so" || true + done + + runHook postInstall + ''; + + enableParallelBuild = true; + }; +in +buildPythonPackage { + pname = "ecc-tools"; + inherit version; + pyproject = true; + + src = patchedSrc; + + buildInputs = runtimeInputs; + + build-system = [ uv-build ]; + nativeBuildInputs = [ patchelf ]; + + preBuild = '' + install -d ecc_tools_bin + cp -f ${runtime}/ecc_tools_bin/*.so* ecc_tools_bin/ + ''; + + postInstall = '' + site_packages="$out/${python.sitePackages}" + install -d "$site_packages/ecc_tools_bin" + cp -f ${patchedSrc}/ecc_tools_bin/__init__.py "$site_packages/ecc_tools_bin/" + cp -f ${runtime}/ecc_tools_bin/*.so* "$site_packages/ecc_tools_bin/" + ''; + + postFixup = '' + for so in "$out/${python.sitePackages}"/ecc_tools_bin/*.so*; do + [ -e "$so" ] || continue + patchelf --set-rpath "\$ORIGIN:${lib.makeLibraryPath runtimeInputs}" "$so" + done + ''; + + doCheck = false; + + pythonImportsCheck = [ "ecc_tools_bin.ecc_py" ]; + + meta = { + description = "ECC tools Python wheel"; + homepage = "https://github.com/openecos-projects/ecc-tools"; + license = lib.licenses.mulan-psl2; + platforms = [ "x86_64-linux" ]; + }; +} diff --git a/nix/python/ecc-tools/fix-ino-output-summary-init.patch b/nix/python/ecc-tools/fix-ino-output-summary-init.patch new file mode 100644 index 00000000..56748563 --- /dev/null +++ b/nix/python/ecc-tools/fix-ino-output-summary-init.patch @@ -0,0 +1,14 @@ +diff --git a/src/operation/iNO/api/NoApi.cpp b/src/operation/iNO/api/NoApi.cpp +index 45a676320..5f69b1be2 100644 +--- a/src/operation/iNO/api/NoApi.cpp ++++ b/src/operation/iNO/api/NoApi.cpp +@@ -225,8 +225,7 @@ ieda_feature::NetOptSummary NoApi::outputSummary() { + clock_timing.hold_wns = eval_data.hold_wns; + clock_timing.suggest_freq = eval_data.freq; + +- ieda_feature::NOClockTimingCmp clock_cmp; +- memset(&clock_cmp, 0, sizeof(ieda_feature::NOClockTimingCmp)); ++ ieda_feature::NOClockTimingCmp clock_cmp{}; + clock_cmp.origin = clock_timing; + summary_map[clock_name] = clock_cmp; + } diff --git a/nix/ecc-tools/rustpkgs.nix b/nix/python/ecc-tools/rustpkgs.nix similarity index 93% rename from nix/ecc-tools/rustpkgs.nix rename to nix/python/ecc-tools/rustpkgs.nix index 4ac37149..dbc678cf 100644 --- a/nix/ecc-tools/rustpkgs.nix +++ b/nix/python/ecc-tools/rustpkgs.nix @@ -1,74 +1,63 @@ { rustPlatform, rootSrc }: let - mkRustpkgs = _: p: rustPlatform.buildRustPackage p; + mkRustPackage = _: args: rustPlatform.buildRustPackage args; in -(builtins.mapAttrs mkRustpkgs { +builtins.mapAttrs mkRustPackage { iir-rust = rec { pname = "iir-rust"; version = "0.1.3"; src = rootSrc; sourceRoot = "${src.name}/src/operation/iIR/source/iir-rust/iir"; - cargoHash = "sha256-CV1e/f3oCKW5mTbQnFBnp7E2d9nFyDwY3qclP2HwdPM="; - doCheck = false; - nativeBuildInputs = [ rustPlatform.bindgenHook ]; }; + liberty-parser = rec { pname = "liberty-parser"; version = "0.1.0"; src = rootSrc; sourceRoot = "${src.name}/src/database/manager/parser/liberty/lib-rust/liberty-parser"; - cargoHash = "sha256-nRIOuSz5ImENvKeMAnthmBo+2/Jy5xbM66xkcfVCTMI="; - doCheck = false; - nativeBuildInputs = [ rustPlatform.bindgenHook ]; }; - sdf-parse = rec { + + sdf_parse = rec { pname = "sdf_parse"; version = "0.1.0"; src = rootSrc; sourceRoot = "${src.name}/src/database/manager/parser/sdf/sdf_parse"; - cargoHash = "sha256-PORA/9DDIax4lOn/pzmi7Y8mCCBUphMTzbBsb64sDl0="; - nativeBuildInputs = [ rustPlatform.bindgenHook ]; }; + spef-parser = rec { pname = "spef-parser"; version = "0.2.4"; src = rootSrc; sourceRoot = "${src.name}/src/database/manager/parser/spef/spef-parser"; - cargoHash = "sha256-Qr/oXTqn2gaxyAyLsRjaXNniNzIYVzPGefXTdkULmYk="; - nativeBuildInputs = [ rustPlatform.bindgenHook ]; }; - vcd-parser = rec { + + vcd_parser = rec { pname = "vcd_parser"; version = "0.1.0"; src = rootSrc; sourceRoot = "${src.name}/src/database/manager/parser/vcd/vcd_parser"; - cargoHash = "sha256-xcfVzDrnW4w3fU7qo6xzSQeIH8sEbEyzPF92F5tDcAk="; - doCheck = false; - nativeBuildInputs = [ rustPlatform.bindgenHook ]; }; + verilog-parser = rec { pname = "verilog-parser"; version = "0.1.0"; src = rootSrc; sourceRoot = "${src.name}/src/database/manager/parser/verilog/verilog-rust/verilog-parser"; - cargoHash = "sha256-ooxY8Q8bfD+klBGfpTDD3YyWptEOGGHDoyamhjlSNTM="; - doCheck = false; - nativeBuildInputs = [ rustPlatform.bindgenHook ]; }; -}) +} diff --git a/nix/python/ecc-tools/use-nix-built-rust-libraries.patch b/nix/python/ecc-tools/use-nix-built-rust-libraries.patch new file mode 100644 index 00000000..6ddb4292 --- /dev/null +++ b/nix/python/ecc-tools/use-nix-built-rust-libraries.patch @@ -0,0 +1,60 @@ +diff --git a/src/database/manager/parser/liberty/CMakeLists.txt b/src/database/manager/parser/liberty/CMakeLists.txt +index f6511c3..43f18b5 100644 +--- a/src/database/manager/parser/liberty/CMakeLists.txt ++++ b/src/database/manager/parser/liberty/CMakeLists.txt +@@ -19,9 +19,7 @@ endif() + + message(STATUS "liberty parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(liberty) +- +-target_link_libraries(liberty str sta-solver log ${RUST_LIB_PATH} dl) ++target_link_libraries(liberty str sta-solver log liberty_parser dl) + + target_include_directories(liberty PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR} +diff --git a/src/database/manager/parser/spef/CMakeLists.txt b/src/database/manager/parser/spef/CMakeLists.txt +index 322eb15..3ab9801 100644 +--- a/src/database/manager/parser/spef/CMakeLists.txt ++++ b/src/database/manager/parser/spef/CMakeLists.txt +@@ -19,9 +19,7 @@ endif() + + message(STATUS "spef parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(spef) +- +-target_link_libraries(spef log ${RUST_LIB_PATH} dl) ++target_link_libraries(spef log spef_parser dl) + target_include_directories(spef PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}) + +diff --git a/src/database/manager/parser/vcd/CMakeLists.txt b/src/database/manager/parser/vcd/CMakeLists.txt +index 0aa2635..ab944c2 100644 +--- a/src/database/manager/parser/vcd/CMakeLists.txt ++++ b/src/database/manager/parser/vcd/CMakeLists.txt +@@ -15,9 +15,7 @@ SETUP_RUST_PROJECT() + + message(STATUS "vcd parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(vcd) +- +-target_link_libraries(vcd ${RUST_LIB_PATH} dl) ++target_link_libraries(vcd vcd_parser dl) + target_include_directories(vcd PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}) + +diff --git a/src/database/manager/parser/verilog/CMakeLists.txt b/src/database/manager/parser/verilog/CMakeLists.txt +index 1ceaee3..a318da4 100644 +--- a/src/database/manager/parser/verilog/CMakeLists.txt ++++ b/src/database/manager/parser/verilog/CMakeLists.txt +@@ -40,9 +40,7 @@ endif() + + message(STATUS "verilog parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(verilog) +- +-target_link_libraries(verilog str log ${ZLIB_LIBRARIES} ${RUST_LIB_PATH} dl) ++target_link_libraries(verilog str log ${ZLIB_LIBRARIES} verilog_parser dl) + + target_include_directories(verilog + PUBLIC diff --git a/pyproject.toml b/pyproject.toml index b5f03056..94cba48a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ "Programming Language :: Python :: 3.14", ] dependencies = [ - "ecc-dreamplace==0.1.0a1", + "ecc-dreamplace==0.1.0a2", "ecc-tools==0.1.0a2", "fastapi>=0.109", "klayout>=0.30.2", @@ -36,7 +36,7 @@ dependencies = [ "tqdm>=4.67.1", "uvicorn>=0.27", ] -scripts.cli = "chipcompiler.cli.main:main" +scripts.ecc = "chipcompiler.cli.main:main" [dependency-groups] dev = [ @@ -76,7 +76,7 @@ url = "https://download.pytorch.org/whl/cpu" explicit = true [tool.uv.sources] -ecc-dreamplace = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl" } +ecc-dreamplace = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" } ecc-tools = { url = "https://github.com/openecos-projects/ecc-tools/releases/download/v0.1.0-alpha.2/ecc_tools-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" } torch = { index = "pytorch-cpu" } diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py new file mode 100644 index 00000000..6041927b --- /dev/null +++ b/test/cli/test_cli_inspect.py @@ -0,0 +1,1495 @@ +import json +import os + +import pytest + +from chipcompiler.cli import main as cli_main + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +def _create_flow_json(run_dir, steps=None): + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + if steps is None: + steps = [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + {"name": "Floorplan", "tool": "ecc", "state": "Success", "runtime": "0:00:03"}, + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ] + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": steps}, f) + + +def _create_step_dir(run_dir, step_name, tool, subdirs=None, files=None): + step_dir = os.path.join(run_dir, f"{step_name}_{tool}") + os.makedirs(step_dir, exist_ok=True) + if subdirs: + for sd in subdirs: + d = os.path.join(step_dir, sd) + os.makedirs(d, exist_ok=True) + if files: + for relpath, content in files.items(): + fpath = os.path.join(step_dir, relpath) + os.makedirs(os.path.dirname(fpath), exist_ok=True) + with open(fpath, "w") as f: + f.write(content) + return step_dir + + +def _has_disclosure(line: str) -> bool: + return bool( + '"ecc ' in line + or "=ecc " in line + or " ecc check" in line + or " ecc run" in line + or " ecc status" in line + or " ecc log" in line + or " ecc metrics" in line + or " ecc artifacts" in line + or " ecc config" in line + or " ecc diagnose" in line + or " ecc param" in line + ) + + +def _mock_pdk_validation(monkeypatch): + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + + +# =========================================================================== +# AC-1: Run-id resolution +# =========================================================================== + + +class TestRunIdResolution: + def test_status_default_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--run-id", "default", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "default" in out + + def test_status_simple_token_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_004") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--run-id", "run_004", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "run_004" in out + + def test_status_relative_path_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + + rc = cli_main.run( + ["status", "--run-id", "sweeps/sweep_001/run_004", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "sweeps/sweep_001/run_004" in out + + def test_status_absolute_path_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = tmp_path / "ecc-run-004" + _create_flow_json(str(run_dir)) + + rc = cli_main.run( + ["status", "--run-id", str(run_dir), "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "run:" in out + + def test_status_missing_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["status", "--run-id", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + + def test_log_preserves_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_005") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "Synthesis", "yosys", subdirs=["log"], + files={"log/synthesis.log": "Error: something failed\n"}) + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--run-id", "run_005", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "--run-id run_005" in out + + def test_metrics_preserves_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_006") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["analysis"], + files={"analysis/CTS_metrics.json": json.dumps({"Frequency [MHz]": 450.0})}) + + rc = cli_main.run( + ["metrics", "cts", "--run-id", "run_006", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "--run-id run_006" in out + + +# =========================================================================== +# AC-2: ecc artifacts +# =========================================================================== + + +class TestArtifacts: + def test_artifacts_all_steps(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output", "log"], + files={"output/design.def": "def content", + "log/cts.log": "log content"}) + + rc = cli_main.run(["artifacts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "cts" in out + assert "(output)" in out + assert "(log)" in out + + def test_artifacts_single_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "cts" in out + assert "(output)" in out + + def test_artifacts_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["artifacts", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "unknown_step" in out + + def test_artifacts_empty_known_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "No artifacts found" in out + + def test_artifacts_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert len(data["records"]) > 0 + assert data["records"][0]["artifact"] == "design.def" + + def test_artifacts_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output", "log"], + files={"output/design.def": "def content", + "log/cts.log": "log content"}) + + rc = cli_main.run(["artifacts", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 2 + assert all("artifact" in o for o in objects) + + def test_artifacts_with_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run( + ["artifacts", "--run-id", "sweeps/sweep_001/run_004", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "cts" in out + + def test_artifacts_derives_roles_from_dirs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["config", "output", "report", "log", "analysis"], + files={"config/cts_config.json": "{}", + "output/design.def": "def", + "report/timing.rpt": "rpt", + "log/cts.log": "log", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["artifacts", "cts", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + roles = {a["role"] for a in data["records"]} + assert roles == {"config", "output", "report", "log", "analysis"} + + +# =========================================================================== +# AC-3: ecc config --resolved (project level) +# =========================================================================== + + +class TestConfigResolved: + def test_config_resolved_project(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "design.name" in out + assert "project:" in out + assert "pdk.name" in out + assert "run_dir" in out + + def test_config_resolved_json(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + keys = [item["config"] for item in data["records"]] + assert "design.name" in keys + assert "pdk.name" in keys + assert "run_dir" in keys + + def test_config_resolved_default_run_dir_value(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + run_item = next(i for i in data["records"] if i["config"] == "run_dir") + assert run_item["value"] == "runs/default" + + def test_config_resolved_jsonl(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + keys = [o["config"] for o in objects] + assert "design.name" in keys + + def test_config_resolved_pdk_root_from_env(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + pdk_root = tmp_path / "ics55_env" + pdk_root.mkdir() + monkeypatch.setenv("CHIPCOMPILER_ICS55_PDK_ROOT", str(pdk_root)) + + project_dir = _create_valid_project(tmp_path, pdk_root="") + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + pdk_item = next(i for i in data["records"] if i["config"] == "pdk.root") + assert pdk_item["source"] == "env" + + def test_config_resolved_run_id(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run( + ["config", "--resolved", "--run-id", "sweeps/sweep_001/run_004", + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + run_item = next(i for i in data["records"] if i["config"] == "run_dir") + assert run_item["value"] == "sweeps/sweep_001/run_004" + + def test_config_missing_config(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_config_missing_config_json_has_kind_error(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir), "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["kind"] == "error" + assert record["error"] == "missing_config" + + def test_config_missing_config_jsonl_has_kind_error(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir), "--jsonl"]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["kind"] == "error" + assert record["error"] == "missing_config" + + def test_config_missing_config_text_has_kind_error(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + out = capsys.readouterr().out + assert "[error]" in out + assert "missing_config" in out + assert "ecc check" in out + assert str(project_dir) in out + + def test_config_requires_resolved(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + with pytest.raises(SystemExit): + cli_main.run(["config", "--project", project_dir]) + + +# =========================================================================== +# AC-4: ecc config --resolved +# =========================================================================== + + +class TestConfigStepResolved: + def test_config_step_lists_files(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_default_config.json": "{}", + "config/run.tcl": "echo hi"}) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step:" in out or "cts" in out + assert "step:" in out or "step:" in out + assert "cts_default_config.json" in out + + def test_config_step_json(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert all(item["scope"] == "step" for item in data["records"]) + + def test_config_step_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["config", "nonexistent", "--resolved", "--project", project_dir]) + assert rc == 1 + + def test_config_step_no_config_files(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + + +# =========================================================================== +# AC-5: ecc diagnose +# =========================================================================== + + +class TestDiagnose: + def test_diagnose_missing_run(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing_run" in out + assert "error:" in out + + def test_diagnose_invalid_flow_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + f.write("NOT VALID JSON{{{") + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "invalid_flow_json" in out + + def test_diagnose_failed_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "analysis"], + files={"log/cts.log": "Error: failed\n", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "failed_step" in out + assert "error:" in out + + def test_diagnose_ongoing_step_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Ongoing", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "running\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "ongoing_step" in out + assert "warning:" in out + + def test_diagnose_unstarted_step_info(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Unstart", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "unstarted_step" in out + assert "info:" in out + + def test_diagnose_log_errors_count(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "Error: bad thing\nError: other bad\nok line\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "log_errors" in out + assert "count: 2" in out + + def test_diagnose_missing_metrics_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "missing_metrics" in out + assert "warning:" in out + + def test_diagnose_missing_artifacts_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "analysis", "config"], + files={"log/cts.log": "ok\n", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + # Remove investigation role dirs to trigger missing_artifacts + import shutil + shutil.rmtree(os.path.join(run_dir, "CTS_ecc", "analysis")) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "missing_artifacts" in out + assert "warning:" in out + + def test_diagnose_config_unavailable_info(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "config_unavailable" in out + assert "info:" in out + + def test_diagnose_clean_run(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": json.dumps({"Frequency [MHz]": 450.0}), + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "clean" in out + + def test_diagnose_step_filter(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "Synthesis", "yosys", subdirs=["output", "log", "analysis", "config"], + files={"output/synth.v": "verilog", + "log/synthesis.log": "ok\n", + "analysis/Synthesis_metrics.json": "{}", + "config/config.json": "{}"}) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "failed_step" in out + assert "cts" in out + assert "synthesis" not in out + + def test_diagnose_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["diagnose", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "unknown_step" in out + + def test_diagnose_no_repair_suggestions(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "suggest" not in out.lower() + assert "fix" not in out.lower() + assert "recommend" not in out.lower() + + def test_diagnose_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert any(i["issue"] == "failed_step" for i in data["records"]) + + def test_diagnose_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--jsonl", "--project", project_dir]) + assert rc == 1 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert any(o["issue"] == "failed_step" for o in objects) + + def test_diagnose_with_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_007") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run( + ["diagnose", "--run-id", "run_007", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "clean" in out + + +# =========================================================================== +# AC-6: Diagnose exit codes +# =========================================================================== + + +class TestDiagnoseExitCodes: + def test_error_issue_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + + def test_warning_only_returns_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Ongoing", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "running\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + + def test_clean_run_returns_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + + def test_failed_step_not_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc") + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc != 0 + + +# =========================================================================== +# AC-7: Disclosure commands in Phase 2 output +# =========================================================================== + + +class TestDisclosure: + def test_artifacts_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_config_resolved_lines_have_disclosure(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_diagnose_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_phase2_disclosure_preserves_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_008") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: fail\n"}) + + rc = cli_main.run( + ["diagnose", "--run-id", "run_008", "--project", project_dir] + ) + assert rc == 1 + out = capsys.readouterr().out + assert "--run-id run_008" in out + + def test_artifacts_disclosure_preserves_project(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert f"--project {project_dir}" in out + + +# =========================================================================== +# AC-8: Read-only and CLI-local +# =========================================================================== + + +class TestReadOnly: + def test_artifacts_does_not_modify_files(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "original"}) + + before_mtime = os.path.getmtime( + os.path.join(run_dir, "CTS_ecc", "output", "design.def") + ) + + rc = cli_main.run(["artifacts", "--project", project_dir]) + assert rc == 0 + + after_mtime = os.path.getmtime( + os.path.join(run_dir, "CTS_ecc", "output", "design.def") + ) + assert before_mtime == after_mtime + + def test_no_persistent_metadata_files(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + cli_main.run(["artifacts", "--project", project_dir]) + cli_main.run(["config", "--resolved", "--project", project_dir]) + cli_main.run(["diagnose", "--project", project_dir]) + + assert not os.path.exists(os.path.join(project_dir, "issues.json")) + assert not os.path.exists(os.path.join(project_dir, "artifacts.json")) + assert not os.path.exists(os.path.join(project_dir, "resolved_config.json")) + assert not os.path.exists(os.path.join(run_dir, "issues.json")) + assert not os.path.exists(os.path.join(run_dir, "artifacts.json")) + + +# =========================================================================== +# Regression tests for Codex review findings (Round 1) +# =========================================================================== + + +class TestRunIdDisclosure: + def test_explicit_default_preserved_in_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--run-id", "default", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--run-id default" in out + + def test_project_relative_run_id_resolves(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + + rc = cli_main.run( + ["status", "--run-id", "sweeps/sweep_001/run_004", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "sweeps/sweep_001/run_004" in out + + +class TestArtifactPaths: + def test_nested_run_artifact_paths(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run( + ["artifacts", "--run-id", "sweeps/sweep_001/run_004", + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert len(data["records"]) == 1 + path = data["records"][0]["path"] + assert path.startswith("sweeps/") + + def test_nested_run_step_config_paths(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run( + ["config", "cts", "--resolved", "--run-id", "sweeps/sweep_001/run_004", + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + path = data["records"][0]["path"] + + +class TestEmptyStepConfigSentinel: + def test_step_no_config_emits_sentinel_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "cts" in out + assert "No configuration" in out + assert "artifacts:" in out + + def test_step_no_config_emits_sentinel_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["step"] == "cts" + assert data["records"][0]["config_status"] == "none" + + +class TestDiagnoseFlowOnlySteps: + def test_flow_step_without_directory_emits_issues(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "failed_step" in out + assert "cts" in out + assert "unknown_step" not in out + + def test_flow_step_without_dir_reports_missing_artifacts(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "missing_artifacts" in out + assert "missing_metrics" in out + assert "config_unavailable" in out + + +class TestConfigRoleDisclosure: + def test_config_artifact_has_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + +# =========================================================================== +# Regression tests for Codex Round 2 findings (Round 3) +# =========================================================================== + + +class TestAbsoluteRunIdConfig: + def test_absolute_run_id_preserves_run_dir_value(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + external_run = tmp_path / "external_run" + _create_flow_json(str(external_run)) + + rc = cli_main.run( + ["config", "--resolved", "--run-id", str(external_run), + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + run_item = next(i for i in data["records"] if i["config"] == "run_dir") + assert run_item["value"] == str(external_run) + + +class TestConfigTextUsesItemInspectCmd: + def test_run_dir_text_uses_status_command(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "run_dir" in out + assert "ecc status" in out + + +class TestDiagnoseIssueSpecificEvidence: + def test_log_errors_uses_log_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "Error: bad thing\nError: other\nok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "log_errors" in out + assert "ecc log cts" in out + + def test_missing_metrics_uses_metrics_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "missing_metrics" in out + assert "ecc metrics cts" in out + + def test_missing_artifacts_uses_artifacts_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "config"], + files={"log/cts.log": "ok\n", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "missing_artifacts" in out + assert "ecc artifacts cts" in out + + def test_config_unavailable_uses_config_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "config_unavailable" in out + assert "ecc config cts --resolved" in out + + def test_invalid_flow_json_has_evidence(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("NOT VALID JSON{{{") + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "invalid_flow_json" in out + assert "evidence:" in out + assert "ecc status" in out + + def test_invalid_flow_json_json_has_evidence(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("NOT VALID JSON{{{") + + rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + data = json.loads(out) + issue = data["records"][0] + assert issue["issue"] == "invalid_flow_json" + assert "evidence" in issue + assert "start_cmd" in issue + + +class TestCleanDiagnoseOutput: + def test_clean_has_status_and_disclosure_commands(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "clean" in out + assert "inspect:" in out + assert "artifacts:" in out + assert "config:" in out + + def test_clean_json_has_disclosure_metadata(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["status"] == "clean" + assert "inspect_cmd" in data["records"][0] + assert "artifacts" in data["records"][0] + assert "config" in data["records"][0] + + +class TestConfigJsonDisclosure: + def test_project_config_json_has_inspect_cmd(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + for item in data["records"]: + assert "inspect" in item, f"Missing inspect in item: {item['config']}" + + +class TestIsolatedConfigValidation: + @staticmethod + def _valid_toml(tmp_path, **overrides): + pdk_dir = tmp_path / "pdk" + pdk_dir.mkdir(exist_ok=True) + rtl_dir = tmp_path / "rtl" + rtl_dir.mkdir(exist_ok=True) + (rtl_dir / "gcd.v").write_text("module gcd; endmodule") + defaults = { + "name": "gcd", "top": "gcd", "rtl": '["rtl/gcd.v"]', + "clock_port": "clk", "frequency_mhz": "100.0", + "pdk_name": "ics55", "pdk_root": str(pdk_dir), + "flow_preset": "rtl2gds", "flow_run": "default", + } + defaults.update(overrides) + return f'''[design] +name = "{defaults['name']}" +top = "{defaults['top']}" +rtl = {defaults['rtl']} +clock_port = "{defaults['clock_port']}" +frequency_mhz = {defaults['frequency_mhz']} + +[pdk] +name = "{defaults['pdk_name']}" +root = "{defaults['pdk_root']}" + +[flow] +preset = "{defaults['flow_preset']}" +run = "{defaults['flow_run']}" +''' + + def test_unsupported_flow_run_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "bad_run" + project_dir.mkdir() + toml = self._valid_toml(tmp_path, flow_run="custom") + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_empty_clock_port_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "bad_clock" + project_dir.mkdir() + toml = self._valid_toml(tmp_path, clock_port="") + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_zero_frequency_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "bad_freq" + project_dir.mkdir() + toml = self._valid_toml(tmp_path, frequency_mhz="0") + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_empty_rtl_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "bad_rtl" + project_dir.mkdir() + toml = self._valid_toml(tmp_path, rtl="[]") + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + +# =========================================================================== +# Regression tests for Codex Round 4 code review (Round 5) +# =========================================================================== + + +class TestCorruptFlowJson: + def test_corrupt_flow_json_status_reports_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("BROKEN{{{") + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "corrupt" in out + + def test_missing_flow_json_status_reports_missing(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + + def test_corrupt_flow_json_json_reports_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("BROKEN{{{") + rc = cli_main.run(["status", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["status"] == "corrupt" + + +class TestCorruptMetricsJson: + def test_malformed_metrics_reports_corrupt_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["analysis"], + files={"analysis/CTS_metrics.json": "NOT JSON{{{"}) + rc = cli_main.run(["metrics", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "corrupt" in out + + def test_malformed_metrics_reports_corrupt_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["analysis"], + files={"analysis/CTS_metrics.json": "NOT JSON{{{"}) + rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["status"] == "corrupt" + + +class TestRtlPathResolution: + def test_absolute_rtl_resolved_correctly(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "proj" + project_dir.mkdir() + rtl_dir = tmp_path / "external_rtl" + rtl_dir.mkdir() + (rtl_dir / "gcd.v").write_text("module gcd; endmodule") + (project_dir / "ecc.toml").write_text(f'''[design] +name = "gcd" +top = "gcd" +rtl = ["{rtl_dir / "gcd.v"}"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{tmp_path / "pdk"}" + +[flow] +preset = "rtl2gds" +run = "default" +''') + (tmp_path / "pdk").mkdir(exist_ok=True) + rc = cli_main.run(["config", "--resolved", "--json", "--project", str(project_dir)]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + rtl_item = next(i for i in data["records"] if i["config"] == "design.rtl.0") + assert rtl_item["resolved"] == str(rtl_dir / "gcd.v") + + +# =========================================================================== +# Regression tests for Codex Round 5 code review (Round 6) +# =========================================================================== + + +class TestPendingStepDiagnose: + def test_pending_step_creates_issue(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Pending", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": '{"freq": 100}', + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "pending_step" in out + assert "pending" in out + + +class TestMissingRunJsonlKind: + def test_missing_run_jsonl_has_kind(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["status", "--jsonl", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + data = [json.loads(line) for line in out.strip().split("\n") if line.strip()] + assert data[0]["run"] == "default" + assert data[0]["status"] == "missing" + + +class TestLogErrorMatching: + def test_clean_summary_not_counted_as_error(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "CTS completed successfully\n0 errors\nNo errors found\n0 failed checks\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": '{"freq": 100}', + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "log_errors" not in out + + def test_real_errors_still_detected(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "CTS completed\nError: bad thing\nTraceback (most recent call):\n0 errors\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": '{"freq": 100}', + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "log_errors" in out + assert "count: 2" in out + diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index e12585f1..df5f680b 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1,11 +1,16 @@ -#!/usr/bin/env python - +import json +import os +import re from types import SimpleNamespace import pytest from chipcompiler.cli import main as cli_main +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + class DummyFlow: has_init_value = False @@ -17,6 +22,7 @@ def __init__(self, workspace): self.added_steps = [] self.create_called = False self.run_called = False + self.workspace_steps = [] DummyFlow.instances.append(self) def has_init(self): @@ -32,28 +38,14 @@ def run_steps(self): self.run_called = True return self.run_steps_value + def run_step(self, workspace_step): + from chipcompiler.data import StateEnum + self.run_called = True + return StateEnum.Success if self.run_steps_value else StateEnum.Imcomplete -def _common_args(workspace, rtl, pdk_root): - return [ - "--workspace", - str(workspace), - "--rtl", - str(rtl), - "--design", - "top_design", - "--top", - "top", - "--clock", - "clk", - "--pdk-root", - str(pdk_root), - ] - - -def _install_cli_mocks(monkeypatch): - capture = { - "create_kwargs": None, - } + +def _install_flow_mocks(monkeypatch): + capture = {"create_kwargs": None} workspace_obj = SimpleNamespace(name="workspace") DummyFlow.instances = [] @@ -64,85 +56,1832 @@ def fake_create_workspace(**kwargs): capture["create_kwargs"] = kwargs return workspace_obj - monkeypatch.setattr(cli_main, "create_workspace", fake_create_workspace) - monkeypatch.setattr(cli_main, "EngineFlow", DummyFlow) - monkeypatch.setattr(cli_main, "build_rtl2gds_flow", lambda: [("Synthesis", "yosys", "Unstart")]) + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create_workspace) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", DummyFlow) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) return capture -def test_cli_rtl_mode_calls_create_workspace_correctly(tmp_path, monkeypatch): - rtl = tmp_path / "top.v" - rtl.write_text("module top(input clk); endmodule\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - capture = _install_cli_mocks(monkeypatch) - rc = cli_main.run(_common_args(workspace_dir, rtl, pdk_root)) - - assert rc == 0 - assert capture["create_kwargs"]["origin_verilog"] == str(rtl.resolve()) - assert capture["create_kwargs"]["input_filelist"] == "" - assert capture["create_kwargs"]["pdk"] == "ics55" - assert capture["create_kwargs"]["parameters"]["Design"] == "top_design" - assert capture["create_kwargs"]["parameters"]["Top module"] == "top" - assert capture["create_kwargs"]["parameters"]["Clock"] == "clk" - assert capture["create_kwargs"]["parameters"]["Frequency max [MHz]"] == 100.0 - assert DummyFlow.instances[0].create_called is True - assert DummyFlow.instances[0].run_called is True - - -def test_cli_filelist_mode_calls_create_workspace_correctly(tmp_path, monkeypatch): - rtl_source = tmp_path / "a.v" - rtl_source.write_text("module a(); endmodule\n") - filelist = tmp_path / "design.f" - filelist.write_text("a.v\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - capture = _install_cli_mocks(monkeypatch) - rc = cli_main.run(_common_args(workspace_dir, filelist, pdk_root)) - - assert rc == 0 - assert capture["create_kwargs"]["origin_verilog"] == "" - assert capture["create_kwargs"]["input_filelist"] == str(filelist.resolve()) - - -def test_cli_unknown_suffix_fallback_to_filelist(tmp_path, monkeypatch): - rtl_source = tmp_path / "b.v" - rtl_source.write_text("module b(); endmodule\n") - filelist_like = tmp_path / "design.listing" - filelist_like.write_text("b.v\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - capture = _install_cli_mocks(monkeypatch) - rc = cli_main.run(_common_args(workspace_dir, filelist_like, pdk_root)) - - assert rc == 0 - assert capture["create_kwargs"]["origin_verilog"] == "" - assert capture["create_kwargs"]["input_filelist"] == str(filelist_like.resolve()) - - -def test_cli_requires_mandatory_arguments(): - with pytest.raises(SystemExit) as exc_info: - cli_main.run([]) - assert exc_info.value.code == 2 - - -def test_cli_returns_nonzero_when_run_steps_failed(tmp_path, monkeypatch): - rtl = tmp_path / "top.v" - rtl.write_text("module top(input clk); endmodule\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - _install_cli_mocks(monkeypatch) - DummyFlow.run_steps_value = False - - rc = cli_main.run(_common_args(workspace_dir, rtl, pdk_root)) - assert rc == 1 +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +def _create_flow_json(run_dir, steps=None): + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + if steps is None: + steps = [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:18"}, + {"name": "Floorplan", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ] + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": steps}, f) + + +def _has_disclosure(line): + return bool(re.search(r'ecc (?:check|run|status|log|metrics|artifacts|config|diagnose|param)\b', line)) + + +def _is_structural_line(line): + s = line.strip() + if not s: + return True + if re.match(r'^\[.+\]$', s): + return True + if s.startswith('steps:'): + return True + if re.match(r'^\s+\w+:$', s): + return True + return False + + +# =========================================================================== +# AC-1: ecc init +# =========================================================================== + + +class TestInit: + def test_init_creates_skeleton(self, tmp_path): + project_path = str(tmp_path / "gcd") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + + assert (tmp_path / "gcd" / "ecc.toml").exists() + assert (tmp_path / "gcd" / "rtl").is_dir() + assert (tmp_path / "gcd" / "constraints").is_dir() + assert (tmp_path / "gcd" / "runs").is_dir() + + def test_init_output_has_disclosure_commands(self, tmp_path, capsys): + project_path = str(tmp_path / "myproj") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + out = capsys.readouterr().out + assert "ecc check" in out + assert "ecc run" in out + + def test_init_fails_if_ecc_toml_exists(self, tmp_path): + project_dir = tmp_path / "gcd" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text("[design]\n") + rc = cli_main.run(["init", str(project_dir)]) + assert rc == 1 + + def test_init_rejects_empty_name(self): + rc = cli_main.run(["init", ""]) + assert rc == 1 + + def test_init_uses_basename_for_design_name(self, tmp_path): + project_path = str(tmp_path / "subdir" / "mydesign") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + toml = (tmp_path / "subdir" / "mydesign" / "ecc.toml").read_text() + assert 'name = "mydesign"' in toml + assert "rtl/mydesign.v" in toml + + +# =========================================================================== +# AC-2: ecc check +# =========================================================================== + + +class TestCheck: + def test_check_passes_valid_config(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "checked" in out + + def test_check_from_inside_project_dir(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.chdir(project_dir) + rc = cli_main.run(["check"]) + assert rc == 0 + out = capsys.readouterr().out + assert "checked" in out + + def test_check_fails_missing_ecc_toml(self, tmp_path): + rc = cli_main.run(["check", "--project", str(tmp_path)]) + assert rc == 1 + + def test_check_fails_malformed_toml(self, tmp_path, capsys): + project_dir = tmp_path / "bad" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text("[design\ninvalid {{{") + rc = cli_main.run(["check", "--project", str(project_dir)]) + assert rc == 1 + + def test_check_fails_missing_rtl(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path, "w") as f: + f.write( + '[design]\nname="gcd"\ntop="gcd"\nrtl=["rtl/missing.v"]\n' + 'clock_port="clk"\nfrequency_mhz=100\n' + '[pdk]\nname="ics55"\nroot=""\n' + '[flow]\npreset="rtl2gds"\nrun="default"\n', + ) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_empty_pdk_root(self, tmp_path): + project_dir = _create_valid_project(tmp_path, pdk_root="") + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_non_directory_pdk_root(self, tmp_path): + pdk_root = tmp_path / "ics55.txt" + pdk_root.write_text("not a dir") + project_dir = _create_valid_project(tmp_path, pdk_root=str(pdk_root)) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_unsupported_pdk(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace('name = "ics55"', 'name = "unsupported"') + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_unsupported_preset(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace('preset = "rtl2gds"', 'preset = "unknown"') + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_non_positive_frequency(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace("frequency_mhz = 100.0", "frequency_mhz = -10") + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_multiple_rtl(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace( + 'rtl = ["rtl/gcd.v"]', + 'rtl = ["rtl/a.v", "rtl/b.v"]', + ) + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_non_numeric_frequency(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace("frequency_mhz = 100.0", 'frequency_mhz = "fast"') + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_json_output(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + data = json.loads(out) + assert "records" in data + assert data["records"][0]["status"] == "checked" + assert data["records"][0]["project"] == "gcd" + + +# =========================================================================== +# AC-3: ecc run +# =========================================================================== + + +class TestRun: + def test_run_calls_create_workspace(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + capture = _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + assert capture["create_kwargs"]["directory"] == os.path.join( + project_dir, "runs", "default" + ) + + def test_run_adds_flow_steps_when_no_init(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + assert len(DummyFlow.instances[0].added_steps) > 0 + + def test_run_calls_create_and_run(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + assert DummyFlow.instances[0].create_called + assert DummyFlow.instances[0].run_called + + def test_run_overwrite_removes_existing(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--overwrite"]) + assert rc == 0 + + def test_run_fails_if_flow_json_exists(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + def test_run_fails_on_config_error(self, tmp_path): + project_dir = tmp_path / "bad" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text("[design]\n") + rc = cli_main.run(["run", "--project", str(project_dir)]) + assert rc == 1 + + def test_run_fails_when_create_workspace_returns_none(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + def fake_create(**kwargs): + return None + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + def test_run_fails_when_run_steps_false(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + DummyFlow.run_steps_value = False + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + def test_run_json_uses_non_progress_path(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + data = json.loads(out) + assert "records" in data + assert data["records"][0]["status"] == "success" + assert DummyFlow.instances[0].run_called + + def test_run_jsonl_uses_non_progress_path(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--jsonl"]) + assert rc == 0 + out = capsys.readouterr().out + objects = [json.loads(ln) for ln in out.strip().split("\n")] + assert any("status" in obj for obj in objects) + assert DummyFlow.instances[0].run_called + + def test_run_json_no_progress_on_stderr(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--json"]) + assert rc == 0 + err = capsys.readouterr().err + assert "step=" not in err + + def test_run_preserves_final_records(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + data = json.loads(out) + record = data["records"][0] + assert record["run"] == "default" + assert record["status"] == "success" + assert "inspect_cmd" in record + assert "metrics_cmd" in record + assert "log_cmd" in record + + +# =========================================================================== +# AC-4: ecc status +# =========================================================================== + + +class TestStatus: + def test_status_reads_flow_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[status]" in out + assert "synthesis" in out + assert "floorplan" in out + + def test_status_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + records = data["records"] + assert records[0]["run"] == "default" + assert records[0]["status"] == "success" + step_records = [r for r in records if "step" in r] + assert len(step_records) == 2 + + def test_status_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir, "--jsonl"]) + assert rc == 0 + lines = capsys.readouterr().out.strip().split("\n") + objects = [json.loads(ln) for ln in lines] + assert "run" in objects[0] + assert "step" in objects[1] + + def test_status_normalizes_step_names(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:18"}, + {"name": "place", "tool": "dreamplace", "state": "Success", "runtime": "0:01:12"}, + ]) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis" in out + assert "placement" in out + + def test_status_missing_run(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + assert "ecc run" in out + + def test_status_invalid_flow_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + f.write("not valid json{{{") + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + + +# =========================================================================== +# AC-5: ecc log +# =========================================================================== + + +class TestLog: + def test_log_step_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Info: running\nError: bad thing\nWarning: meh\nTraceback: crash\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "Error: bad thing" in out + assert "Traceback: crash" in out + assert "Warning: meh" in out + assert "Info: running" in out + + def test_log_step_errors_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Info: running\nError: bad thing\n") + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--jsonl", "--project", project_dir] + ) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert any("Error" in obj["line"] for obj in objects) + + def test_log_no_step_shows_locations(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("log content\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert 'ecc log' in out + + def test_log_no_step_discovers_step_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis" in out + assert "Synthesis_yosys/log/synthesis.log" in out + assert "ecc log synthesis" in out + + def test_log_no_step_global_logs_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "ecc log" in out + + def test_log_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["log", "nonexistent", "--project", project_dir]) + assert rc == 1 + + def test_log_missing_step_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "Synthesis_yosys"), exist_ok=True) + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 1 + + +# =========================================================================== +# AC-6: ecc metrics +# =========================================================================== + + +class TestMetrics: + def test_metrics_reads_step_metrics(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312, "Cell area": 1840.2}, f) + + rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "cell_number: 312" in out + + def test_metrics_all_steps(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + for step_dir_name in ["Synthesis_yosys", "Floorplan_ecc"]: + analysis = os.path.join(run_dir, step_dir_name, "analysis") + os.makedirs(analysis, exist_ok=True) + metrics_name = step_dir_name.split("_")[0] + "_metrics.json" + with open(os.path.join(analysis, metrics_name), "w") as f: + json.dump({"Cell number": 100}, f) + + rc = cli_main.run(["metrics", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis" in out + assert "floorplan" in out + + def test_metrics_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + + rc = cli_main.run( + ["metrics", "synthesis", "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert len(data["records"]) == 1 + assert data["records"][0]["metric"] == "cell_number" + + def test_metrics_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312, "Cell area": 1840.2}, f) + + rc = cli_main.run( + ["metrics", "synthesis", "--jsonl", "--project", project_dir] + ) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 2 + + def test_metrics_normalizes_known_keys(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "CTS_ecc", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "CTS_metrics.json"), "w") as f: + json.dump({"Frequency [MHz]": 450.0, "Die area [μm^2]": "10000.000"}, f) + + rc = cli_main.run(["metrics", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "frequency_mhz: 450.0" in out + assert "die_area_um2" in out + + def test_metrics_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["metrics", "nonexistent", "--project", project_dir]) + assert rc == 1 + + def test_metrics_missing_file(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "CTS_ecc", "analysis"), exist_ok=True) + + rc = cli_main.run(["metrics", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + assert "ecc log cts" in out + + def test_metrics_json_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["metrics", "nonexistent", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["status"] == "unknown_step" + assert data["records"][0]["step"] == "nonexistent" + + def test_metrics_json_missing_file(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "CTS_ecc", "analysis"), exist_ok=True) + + rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["status"] == "missing" + + def test_metrics_jsonl_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["metrics", "nonexistent", "--jsonl", "--project", project_dir]) + assert rc == 1 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert objects[0]["status"] == "unknown_step" + + +# =========================================================================== +# AC-7: Disclosure commands on all output +# =========================================================================== + + +class TestDisclosureCommands: + def test_init_lines_have_disclosure(self, tmp_path, capsys): + project_path = str(tmp_path / "disctest") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_check_lines_have_disclosure(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_status_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_metrics_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + + rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert _has_disclosure(out) + + def test_log_error_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: something failed\n") + + rc = cli_main.run( + ["log", "synthesis", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "ecc log synthesis" in out + + def test_project_arg_propagated_to_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert f"--project {project_dir}" in out + + def test_output_lowercase_tokens(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:01"}, + ]) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis" in out + assert "success" in out + + +# =========================================================================== +# AC-8: Packaging +# =========================================================================== + + +class TestPackaging: + def test_ecc_console_script_in_pyproject(self): + import tomllib + + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(__file__)) + ) + pyproject = os.path.join(project_root, "pyproject.toml") + with open(pyproject, "rb") as f: + data = tomllib.load(f) + assert data["project"]["scripts"]["ecc"] == "chipcompiler.cli.main:main" + + +# =========================================================================== +# Edge cases +# =========================================================================== + + +class TestEdgeCases: + def test_no_command_returns_nonzero(self, capsys): + rc = cli_main.run([]) + assert rc == 1 + + +class TestCheckFilelistValidation: + def test_check_fails_filelist_with_missing_sources(self, tmp_path, monkeypatch): + from chipcompiler.cli.config import _validate_pdk_contents + monkeypatch.setattr(_validate_pdk_contents, "__wrapped__", + lambda *a, **k: None, raising=False) + monkeypatch.setattr("chipcompiler.cli.config._validate_pdk_contents", + lambda *a, **k: None) + + project_dir = tmp_path / "flproj" + project_dir.mkdir() + (project_dir / "rtl").mkdir() + (project_dir / "rtl" / "gcd.v").write_text("module gcd; endmodule") + + filelist = project_dir / "rtl" / "files.f" + filelist.write_text("gcd.v\nmissing.v\nother_missing.v\n") + + pdk_root = tmp_path / "ics55" + pdk_root.mkdir() + + toml = f'''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/files.f"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["check", "--project", str(project_dir)]) + assert rc == 1 + + def test_check_fails_invalid_filelist_directive(self, tmp_path, monkeypatch): + from chipcompiler.cli.config import _validate_pdk_contents + monkeypatch.setattr("chipcompiler.cli.config._validate_pdk_contents", + lambda *a, **k: None) + + project_dir = tmp_path / "flproj2" + project_dir.mkdir() + (project_dir / "rtl").mkdir() + + filelist = project_dir / "rtl" / "files.f" + filelist.write_text("gcd.v\n-f other.f\n") + + pdk_root = tmp_path / "ics55" + pdk_root.mkdir() + + toml = f'''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/files.f"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["check", "--project", str(project_dir)]) + assert rc == 1 + + +class TestRendererCmdStripping: + def test_text_strips_cmd_suffix(self): + from chipcompiler.cli.render import render_text + from io import StringIO + buf = StringIO() + render_text(({"inspect_cmd": "ecc status", "log_cmd": "ecc log"},), file=buf) + line = buf.getvalue().strip() + assert "inspect=" in line + assert "log=" in line + assert "inspect_cmd=" not in line + assert "log_cmd=" not in line + + def test_json_preserves_cmd_keys(self): + from chipcompiler.cli.render import render_json + from chipcompiler.cli.types import CommandResult + from io import StringIO + buf = StringIO() + result = CommandResult(records=({"inspect_cmd": "ecc status", "log_cmd": "ecc log"},)) + render_json(result, file=buf) + data = json.loads(buf.getvalue()) + assert "inspect_cmd" in data["records"][0] + assert "log_cmd" in data["records"][0] + + def test_jsonl_preserves_cmd_keys(self): + from chipcompiler.cli.render import render_jsonl + from chipcompiler.cli.types import CommandResult + from io import StringIO + buf = StringIO() + result = CommandResult(records=({"inspect_cmd": "ecc status", "log_cmd": "ecc log"},)) + render_jsonl(result, file=buf) + record = json.loads(buf.getvalue().strip()) + assert "inspect_cmd" in record + assert "log_cmd" in record + + +class TestMissingConfigErrorRecord: + def test_check_missing_config_has_kind_error_json(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path), "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["kind"] == "error" + assert record["error"] == "missing_config" + + def test_check_missing_config_has_kind_error_text(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path)]) + assert rc == 1 + out = capsys.readouterr().out + assert "[error]" in out + assert "missing_config" in out + + def test_check_missing_config_has_disclosure_command(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path), "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert "inspect" in record or "inspect_cmd" in record + + +# =========================================================================== +# Log output refactoring integration tests +# =========================================================================== + + +class TestLogDefaultShowsAllContent: + """AC-1: Default ecc log renders complete log content.""" + + def test_default_shows_all_lines(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("INFO: starting\nsome output\nError: bad\nWarning: meh\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "INFO: starting" in out + assert "some output" in out + assert "Error: bad" in out + assert "Warning: meh" in out + + def test_default_includes_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[log]" in out + assert "step=synthesis" in out + assert "source:" in out + + def test_blank_lines_preserved(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line1\n\nline3\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "line1" in out + assert "line3" in out + + +class TestLogTracebackComplete: + """AC-2: Python traceback blocks remain complete and contiguous.""" + + def test_traceback_complete_in_default_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write( + "INFO: before\n" + "Traceback (most recent call last):\n" + ' File "app.py", line 42, in run\n' + " result = compute()\n" + " ^^^^^^^^^\n" + "ValueError: invalid value\n" + "INFO: after\n" + ) + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "Traceback (most recent call last):" in out + assert 'File "app.py", line 42' in out + assert "result = compute()" in out + assert "^^^^^^^^^" in out + assert "ValueError: invalid value" in out + + def test_traceback_complete_in_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write( + "Traceback (most recent call last):\n" + ' File "a.py", line 1\n' + "ValueError: fail\n" + ) + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert objects[0]["kind"] == "traceback" + assert objects[1]["kind"] == "traceback" + assert objects[2]["kind"] == "error" + + def test_keyboard_interrupt_jsonl_classified_as_error(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write( + "Traceback (most recent call last):\n" + ' File "a.py", line 1\n' + "KeyboardInterrupt\n" + ) + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert objects[0]["kind"] == "traceback" + assert objects[1]["kind"] == "traceback" + assert objects[2]["kind"] == "error" + assert objects[2]["line"] == "KeyboardInterrupt" + + +class TestLogPlainMode: + """AC-5: --plain emits full-content stable line records.""" + + def test_plain_has_all_fields(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\nINFO: ok\n") + + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 2 + assert "step=synthesis" in lines[0] + assert "line_no=1" in lines[0] + assert "kind=error" in lines[0] + assert "line_no=2" in lines[1] + assert "kind=info" in lines[1] + + def test_plain_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\n") + + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + def test_plain_stable_quoting_for_special_chars(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write('key=value path\\to\\file "quoted text"\n') + + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 1 + assert 'line="key=value' in lines[0] + assert 'inspect_cmd=' in lines[0] + + +class TestLogJsonlMode: + """AC-6: --jsonl emits full-content structured log objects.""" + + def test_jsonl_per_line_objects(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\nINFO: ok\nplain\n") + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 3 + for obj in objects: + assert "step" in obj + assert "source" in obj + assert "line_no" in obj + assert "kind" in obj + assert "line" in obj + assert "inspect_cmd" in obj + + def test_jsonl_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\n") + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + +class TestLogJsonMode: + """ecc log --json must produce JSON envelope output.""" + + def test_json_step_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\nINFO: ok\n") + + rc = cli_main.run(["log", "synthesis", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert len(data["records"]) == 2 + + def test_json_listing_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + + +class TestLogListingMode: + """AC-7: ecc log without step lists available logs.""" + + def test_listing_shows_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis" in out + assert "ecc log synthesis" in out + + def test_listing_no_logs_returns_no_log_status(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "no_logs" in out + + def test_listing_jsonl_records(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + assert any("step" in o for o in objects) + + def test_listing_plain_step_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=synthesis" in out + assert "source=" in out + assert "inspect_cmd=" in out + assert "line_no=" not in out + + def test_listing_plain_run_level_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("log content\n") + + rc = cli_main.run(["log", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "log=" in out + assert "inspect_cmd=" in out + assert "line_no=" not in out + assert "kind=" not in out + + +class TestLogErrorCases: + """AC-9: Error cases are structured and readable.""" + + def test_unknown_step_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["log", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "unknown_step" in out + + def test_unknown_step_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["log", "nonexistent", "--jsonl", "--project", project_dir]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["status"] == "unknown_step" + + def test_known_step_no_logs_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "Synthesis_yosys"), exist_ok=True) + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + + def test_known_step_no_logs_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "Synthesis_yosys"), exist_ok=True) + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["log_status"] == "missing" + + def test_empty_log_returns_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "empty" in out + + +class TestLogNoErrorsInDisclosure: + """AC-8: Disclosure commands do not include --errors.""" + + def test_listing_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_step_log_inspect_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_status_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_metrics_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 100}, f) + + rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_artifacts_log_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + log_dir = os.path.join(run_dir, "CTS_ecc", "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "cts.log"), "w") as f: + f.write("log content\n") + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + +class TestLogUnreadableFile: + """AC-9: Unreadable log files return non-zero with OS error.""" + + def test_unreadable_log_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content\n") + os.chmod(log_path, 0o000) + + try: + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "unreadable" in out + finally: + os.chmod(log_path, 0o644) + + def test_unreadable_log_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content\n") + os.chmod(log_path, 0o000) + + try: + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["log_status"] == "unreadable" + assert "source" in record + assert "error" in record + finally: + os.chmod(log_path, 0o644) + + +class TestLogMultiSource: + """AC-1: Multiple log files per step shown with separate source headers.""" + + def test_multi_source_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "a.log"), "w") as f: + f.write("from A\n") + with open(os.path.join(step_dir, "b.log"), "w") as f: + f.write("from B\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "a.log" in out + assert "b.log" in out + assert "from A" in out + assert "from B" in out + + +class TestLogErrorsDeprecation: + """AC-8: --errors is deprecated with visible notice.""" + + def test_errors_hidden_from_help(self, tmp_path, capsys): + with pytest.raises(SystemExit): + cli_main.run(["log", "--help"]) + + def test_errors_emits_deprecation_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "synthesis", "--errors", "--project", project_dir]) + assert rc == 0 + err = capsys.readouterr().err + assert "deprecated" in err + + def test_errors_jsonl_still_full_records(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("INFO: running\nError: bad\n") + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--jsonl", "--project", project_dir] + ) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 2 + assert objects[0]["kind"] == "info" + assert objects[1]["kind"] == "error" + assert "\x1b[" not in capsys.readouterr().out + + +class TestCorruptFlowJson: + """Non-dict flow.json must be reported as corrupt, not missing.""" + + def test_array_flow_json_is_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump([], f) + + rc = cli_main.run(["status", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("status") == "corrupt" + + def test_string_flow_json_is_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump("bad", f) + + rc = cli_main.run(["status", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("status") == "corrupt" + + +class TestFlowOnlyStepMetrics: + """Step in flow.json but no step directory should report missing, not unknown.""" + + def test_metrics_flow_only_step_is_missing(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": [{"name": "CTS", "state": "unstart"}]}, f) + + rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("status") == "missing" + assert data["records"][0].get("status") != "unknown_step" + + +class TestLogListingFlowOrder: + """Listing step logs follow flow.json order, not alphabetical.""" + + def _setup_steps_with_flow(self, tmp_path, step_names, extra_dirs=None): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, steps=[ + {"name": n, "tool": "ecc", "state": "Success"} for n in step_names + ]) + all_dirs = list(step_names) + (extra_dirs or []) + tool_map = { + "Synthesis": "yosys", "Floorplan": "ecc", "fixFanout": "ecc", + "place": "ecc", "CTS": "ecc", "legalization": "ecc", + "route": "ecc", "drc": "ecc", "filler": "ecc", + } + for name in all_dirs: + tool = tool_map.get(name, "ecc") + step_dir = os.path.join(run_dir, f"{name}_{tool}", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, f"{name.lower()}.log"), "w") as f: + f.write(f"log from {name}\n") + return project_dir + + def test_steps_follow_flow_json_order(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis", "Floorplan", "CTS"], + ) + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + assert steps == ["synthesis", "floorplan", "cts"] + + def test_run_level_logs_before_step_logs(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis", "CTS"], + ) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("run-level log\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + run_indices = [i for i, r in enumerate(records) if "log" in r and "step" not in r] + step_indices = [i for i, r in enumerate(records) if "step" in r] + assert run_indices, "expected at least one run-level record" + assert step_indices, "expected at least one step record" + assert max(run_indices) < min(step_indices) + + def test_extra_steps_after_flow_steps(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis", "CTS"], + extra_dirs=["Floorplan"], + ) + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + synth_idx = steps.index("synthesis") + cts_idx = steps.index("cts") + fp_idx = steps.index("floorplan") + assert synth_idx < cts_idx + assert cts_idx < fp_idx + + def test_extra_steps_sorted_alphabetically(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis"], + extra_dirs=["Floorplan", "CTS"], + ) + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + extras = [s for s in steps if s != "synthesis"] + assert extras == sorted(extras) + + def test_missing_flow_json_falls_back_to_alphabetical(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + for name in ["CTS_ecc", "Floorplan_ecc", "Synthesis_yosys"]: + step_dir = os.path.join(run_dir, name, "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "test.log"), "w") as f: + f.write("content\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + assert steps == sorted(steps) + + def test_corrupt_flow_json_falls_back_to_alphabetical(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + f.write("not valid json{{{") + for name in ["CTS_ecc", "Floorplan_ecc", "Synthesis_yosys"]: + step_dir = os.path.join(run_dir, name, "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "test.log"), "w") as f: + f.write("content\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + assert steps == sorted(steps) + + +class TestLogListingTailPreview: + """Tail preview shows up to 10 lines in default pretty text mode.""" + + def test_listing_shows_tail_lines(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + lines = [f"log line {i}" for i in range(15)] + with open(log_path, "w") as f: + f.write("\n".join(lines) + "\n") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "log line 14" in out + assert "tail:" in out + + def test_listing_tail_max_10_lines(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + lines = [f"line {i}" for i in range(20)] + with open(log_path, "w") as f: + f.write("\n".join(lines) + "\n") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + output_lines = out.split("\n") + tail_header_idx = next(i for i, l in enumerate(output_lines) if l.strip() == "tail:") + tail_content = [l for l in output_lines[tail_header_idx + 1:] if l.startswith(" ") and "inspect:" not in l] + assert len(tail_content) == 10 + + def test_empty_log_no_tail_block(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "tail:" not in out + assert "inspect:" in out + + def test_inspect_visible_below_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content line\n") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + tail_pos = out.find("tail:") + inspect_pos = out.find("inspect:") + assert tail_pos < inspect_pos + + +class TestLogListingMachineModeNoTail: + """Machine modes must not include tail data.""" + + def test_plain_no_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line 1\nline 2\nline 3\n") + rc = cli_main.run(["log", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "tail=" not in out + + def test_json_no_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line 1\nline 2\n") + rc = cli_main.run(["log", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + for rec in data["records"]: + assert "tail" not in rec + + def test_jsonl_no_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line 1\nline 2\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + for rec in records: + assert "tail" not in rec + + +class TestLogStepUnchanged: + """ecc log full output must remain unchanged.""" + + def test_step_shows_all_lines_not_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + lines = [f"line {i}" for i in range(20)] + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("\n".join(lines) + "\n") + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "line 0" in out + assert "line 19" in out + + def test_step_plain_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("a\nb\nc\n") + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "line_no=1" in out + assert "line_no=2" in out + assert "line_no=3" in out + assert "tail" not in out + + def test_step_jsonl_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("a\nb\n") + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + assert len(records) == 2 + for rec in records: + assert "tail" not in rec + + +class TestLogListingUnreadable: + """Unreadable logs in listing mode must omit tail, keep path+inspect, no traceback.""" + + def test_unreadable_step_log_in_listing(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content\n") + os.chmod(log_path, 0o000) + + try: + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "tail:" not in out + assert "Synthesis_yosys" in out + assert "inspect:" in out + assert "Traceback" not in out + finally: + os.chmod(log_path, 0o644) diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py new file mode 100644 index 00000000..48954ad8 --- /dev/null +++ b/test/cli/test_cli_params.py @@ -0,0 +1,1230 @@ +import json +import os + +from chipcompiler.cli import main as cli_main + + +def _create_valid_project(tmp_path, name="gcd", pdk_root=None, freq=100.0): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = {freq} + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +class TestParamList: + def test_param_list_text_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + + def test_param_list_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + params = [r["param"] for r in data["records"]] + assert "place.target_density" in params + + def test_param_list_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--jsonl"]) + assert rc == 0 + lines = capsys.readouterr().out.strip().split("\n") + objects = [json.loads(ln) for ln in lines] + assert len(objects) == 12 + + def test_param_list_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + assert "place.target_density" in out + + +class TestParamShow: + def test_param_show_known_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + + def test_param_show_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["param"] == "place.target_density" + assert record["default"] == 0.2 + assert "source" in record + assert "maps_to" in record + + def test_param_show_unknown_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "unknown.key", "--project", project_dir]) + assert rc == 1 + + +class TestParamSet: + def test_param_set_writes_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + assert rc == 0 + + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + assert "target_density" in content + assert "0.65" in content + + def test_param_set_then_show(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["value"] == 0.65 + assert record["source"] == "ecc.toml" + + def test_param_set_rejects_unknown_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "bogus.key", "5", "--project", project_dir]) + assert rc == 1 + + def test_param_set_rejects_invalid_value(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "place.target_density", "1.5", "--project", project_dir]) + assert rc == 1 + + def test_param_set_preserves_other_sections(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + assert "[design]" in content + assert "[pdk]" in content + assert "[flow]" in content + + +class TestParamUnset: + def test_param_unset_removes_override(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + assert rc == 0 + capsys.readouterr() # flush unset output + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["source"] == "default" + + def test_param_unset_noop_when_absent(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "no override" in out + + +class TestParamDiff: + def test_param_diff_shows_overrides(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + assert len(records) == 1 + assert records[0]["param"] == "place.target_density" + + def test_param_diff_clean_when_no_overrides(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("diff_status") == "clean" + + +class TestRunSet: + def test_run_set_override(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + capture = {"kwargs": None} + + def fake_create(**kwargs): + capture["kwargs"] = kwargs + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "place.target_density=0.65", + ]) + assert rc == 0 + + params = capture["kwargs"]["parameters"] + assert params.get("DreamPlace", {}).get("target_density") == 0.65 + + def test_run_set_rejects_unknown_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "bogus.key=5", + ]) + assert rc == 1 + + def test_run_set_rejects_invalid_value(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "place.target_density=1.5", + ]) + assert rc == 1 + + def test_run_set_does_not_modify_toml(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + original_toml = f.read() + + workspace_obj = SimpleNamespace(name="workspace") + + def fake_create(**kwargs): + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + cli_main.run([ + "run", "--project", project_dir, + "--set", "place.target_density=0.65", + ]) + + with open(toml_path) as f: + current_toml = f.read() + assert current_toml == original_toml + + +class TestOutputContracts: + def test_plain_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + + def test_json_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + + def test_jsonl_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--jsonl"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + + def test_json_uses_records_envelope(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert isinstance(data["records"], list) + + def test_plain_is_line_oriented(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 12 + + +class TestConfigResolved: + def test_config_resolved_includes_param_records(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": []}, f) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + param_records = [r for r in records if r.get("kind") == "param"] + assert len(param_records) == 12 + first_param = param_records[0] + assert "source" in first_param + assert "maps_to" in first_param + + def test_config_resolved_shows_toml_source(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": []}, f) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + density = next(r for r in param_records if r["key"] == "place.target_density") + assert density["value"] == 0.65 + assert density["source"] == "ecc.toml" + + def test_config_resolved_seeds_design_frequency(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": []}, f) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + freq = next(r for r in param_records if r["key"] == "design.frequency_mhz") + assert freq["value"] == 200.0 + + +class TestTomlValidationErrors: + def _create_project_with_invalid_param(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = "not_an_int"\n' + with open(toml_path, "w") as f: + f.write(content) + return project_dir + + def test_check_fails_invalid_param_type(self, tmp_path, capsys): + project_dir = self._create_project_with_invalid_param(tmp_path) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + reasons = [r.get("reason", "") for r in data["records"]] + assert any("params" in r for r in reasons) + + def test_check_fails_unknown_param_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.bogus]\nkey = 5\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_run_fails_invalid_param_type(self, tmp_path): + project_dir = self._create_project_with_invalid_param(tmp_path) + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + +class TestPrettyOutput: + def test_param_list_default_is_grouped_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place" in out + assert "place.target_density" in out + + def test_param_list_plain_is_one_line_per_record(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 12 + assert "\033[" not in out + + def test_param_show_default_is_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + assert "source" in out + assert "default" in out + + def test_param_set_default_is_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "0.65" in out + + def test_param_diff_default_is_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + rc = cli_main.run(["param", "diff", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + + +class TestResolvedListValues: + def test_param_list_json_has_value_and_source(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + density = next(r for r in records if r["param"] == "place.target_density") + assert density["value"] == 0.65 + assert density["source"] == "ecc.toml" + assert "default" in density + assert "maps_to" in density + assert "inspect" in density + + def test_param_list_default_source_when_no_overrides(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + for r in data["records"]: + if r["param"] == "design.frequency_mhz": + assert r["source"] == "ecc.toml" + else: + assert r["source"] == "default" + + +class TestDiffFiltering: + def test_diff_only_shows_values_that_differ(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + assert len(records) == 1 + assert records[0]["param"] == "place.target_density" + assert records[0]["value"] == 0.65 + assert records[0]["default"] != 0.65 + + def test_diff_clean_when_set_to_default(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + schema_default = 0.2 + cli_main.run(["param", "set", "place.target_density", str(schema_default), "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("diff_status") == "clean" + + +class TestScopedTomlEdit: + def test_set_preserves_unrelated_sections(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + original = f.read() + + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + design_section = original[original.index("[design]"):original.index("[pdk]")] + assert design_section in after + + def test_set_preserves_comments(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace("[design]", "[design]\n# my design") + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "# my design" in after + + def test_set_same_key_twice_has_one_assignment(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + content = f.read() + assert content.count("target_density") == 1 + assert "0.7" in content + assert "0.65" not in content + + def test_set_then_show_still_works(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 0.7 + + +class TestNativeTomlTypeValidation: + def test_check_rejects_float_for_int(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = 16.5\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_check_rejects_bool_for_int(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = true\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_check_rejects_float_in_list_int(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [2.5, 3]\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_check_accepts_valid_int(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = 16\n' + with open(toml_path, "w") as f: + f.write(content) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 0 + + +class TestCliProvenance: + def test_run_set_reports_cli_source_in_config(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + + def fake_create(**kwargs): + run_dir = kwargs["directory"] + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "synth.max_fanout=16", + ]) + assert rc == 0 + capsys.readouterr() + + # Verify provenance file was written + provenance = os.path.join( + project_dir, "runs", "default", "home", "cli-param-overrides.json" + ) + assert os.path.isfile(provenance) + with open(provenance) as f: + data = json.load(f) + assert data["synth.max_fanout"] == 16 + + def test_config_resolved_shows_cli_source(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + + def fake_create(**kwargs): + run_dir = kwargs["directory"] + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + # Run with --set + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "synth.max_fanout=16", + ]) + assert rc == 0 + capsys.readouterr() + + # Now inspect config --resolved + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + fanout = next(r for r in param_records if r["key"] == "synth.max_fanout") + assert fanout["value"] == 16 + assert fanout["source"] == "cli" + + def test_config_resolved_toml_plus_cli_precedence(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + + # Set a TOML override first + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + capsys.readouterr() + + def fake_create(**kwargs): + run_dir = kwargs["directory"] + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + # Run with different CLI override + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "synth.max_fanout=32", + ]) + assert rc == 0 + capsys.readouterr() + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + fanout = next(r for r in param_records if r["key"] == "synth.max_fanout") + assert fanout["value"] == 32 + assert fanout["source"] == "cli" + + +class TestParamHandlersRejectInvalidToml: + """Param list/show/diff must return errors when ecc.toml has invalid [params.*].""" + + def _write_invalid_toml(self, project_dir): + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = 16.5\n' + with open(toml_path, "w") as f: + f.write(content) + + def test_param_list_rejects_invalid_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_invalid_toml(project_dir) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + def test_param_show_rejects_invalid_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_invalid_toml(project_dir) + rc = cli_main.run(["param", "show", "synth.max_fanout", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + def test_param_diff_rejects_invalid_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_invalid_toml(project_dir) + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + +class TestIndentedTomlKeys: + """Scoped TOML edit must handle indented assignment lines.""" + + def test_set_replaces_indented_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert after.count("target_density") == 1 + assert "0.7" in after + + def test_set_then_show_indented(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 0.7 + + def test_unset_removes_indented_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "target_density" not in after + + def test_set_indented_preserves_other_sections(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n\n[flow]\npreset = "rtl2gds"\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert 'preset = "rtl2gds"' in after + assert after.count("target_density") == 1 + + +class TestMultilineTomlValues: + """Scoped TOML edit must handle multiline array values.""" + + def test_set_replaces_multiline_array(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "floorplan.core_margin", "[4, 4]", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "2," not in after + assert after.count("core_margin") == 1 + assert "[4, 4]" in after + + def test_unset_removes_multiline_array(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "unset", "floorplan.core_margin", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "core_margin" not in after + + def test_set_multiline_then_show(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "floorplan.core_margin", "[4, 4]", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "floorplan.core_margin", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == [4, 4] + + def test_set_preserves_adjacent_key_after_multiline(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n core_util = 0.5\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "floorplan.core_margin", "[4, 4]", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "core_util = 0.5" in after + assert after.count("core_margin") == 1 + for line in after.splitlines(): + assert "core_margin" not in line or "core_util" not in line, ( + f"multiline replacement concatenated keys on one line: {line!r}" + ) + """config --resolved must error on malformed/invalid CLI provenance.""" + + def _setup_run_dir(self, project_dir): + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return run_dir + + def test_malformed_json_provenance_fails(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = self._setup_run_dir(project_dir) + with open(os.path.join(run_dir, "home", "cli-param-overrides.json"), "w") as f: + f.write("not valid json{") + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_config" + + def test_non_dict_provenance_fails(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = self._setup_run_dir(project_dir) + with open(os.path.join(run_dir, "home", "cli-param-overrides.json"), "w") as f: + json.dump([1, 2, 3], f) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_unknown_key_in_provenance_fails(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = self._setup_run_dir(project_dir) + with open(os.path.join(run_dir, "home", "cli-param-overrides.json"), "w") as f: + json.dump({"nonexistent.param": 42}, f) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_config" + + +class TestParamShowDisclosureCommands: + """param show must include disclosure command fields.""" + + def test_show_json_has_disclosure_commands(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert "inspect" in record + assert "set" in record + assert "run" in record + assert "ecc param show place.target_density" in record["inspect"] + + def test_show_text_has_disclosure_commands(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "ecc param show place.target_density" in out + assert "ecc param set place.target_density" in out + assert "ecc run --set place.target_density" in out + + +class TestSafeTomlSectionParsing: + """Scoped TOML edits must handle comments and indented headers safely.""" + + def test_set_ignores_commented_section_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n# [params.place]\n# target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "[params.place]" in after + assert "target_density = 0.7" in after + + def test_set_ignores_indented_next_section_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\ntarget_density = 0.65\n\n [flow]\npreset = "rtl2gds"\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert after.count("target_density") == 1 + assert "0.7" in after + assert 'preset = "rtl2gds"' in after + + def test_set_then_show_after_commented_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n# [params.place]\n# target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 0.7 + + def test_unset_ignores_commented_section_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n# [params.place]\n# target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + assert rc == 0 + capsys.readouterr() + with open(toml_path) as f: + after = f.read() + assert "target_density" in after + + +class TestListDefaultDiffFiltering: + """param diff must not report list values equal to defaults.""" + + def test_list_default_not_in_diff(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "floorplan.core_margin", "[2,2]", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("diff_status") == "clean" + + def test_list_changed_value_in_diff(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "floorplan.core_margin", "[4,4]", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert len(data["records"]) >= 1 + margin = next((r for r in data["records"] if r.get("param") == "floorplan.core_margin"), None) + assert margin is not None + assert margin["value"] == [4, 4] + + +class TestZeroFrequencyRejected: + """ecc param set design.frequency_mhz 0 must be rejected.""" + + def test_set_zero_rejected(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "design.frequency_mhz", "0", "--project", project_dir]) + assert rc == 1 + + def test_cli_set_zero_rejected(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "design.frequency_mhz=0", + ]) + assert rc == 1 + + +class TestDesignFrequencySeeded: + """ecc param list/show must reflect [design] frequency_mhz.""" + + def test_list_shows_design_frequency(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + freq = next(r for r in data["records"] if r["param"] == "design.frequency_mhz") + assert freq["value"] == 200.0 + + def test_show_shows_design_frequency(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + rc = cli_main.run(["param", "show", "design.frequency_mhz", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 200.0 + + def test_param_override_beats_design_frequency(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + cli_main.run(["param", "set", "design.frequency_mhz", "300", "--project", project_dir]) + capsys.readouterr() + rc = cli_main.run(["param", "show", "design.frequency_mhz", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 300.0 + assert data["records"][0]["source"] == "ecc.toml" + + +class TestMalformedTomlRejected: + """ecc param list/show/diff must reject syntactically malformed ecc.toml.""" + + def _write_malformed_toml(self, project_dir): + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path, "w") as f: + f.write('[design\nname = "gcd"\n') + + def test_param_list_rejects_malformed(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_malformed_toml(project_dir) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + def test_param_show_rejects_malformed(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_malformed_toml(project_dir) + rc = cli_main.run(["param", "show", "design.frequency_mhz", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_param_diff_rejects_malformed(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_malformed_toml(project_dir) + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 1 diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py new file mode 100644 index 00000000..a880238c --- /dev/null +++ b/test/cli/test_log_view.py @@ -0,0 +1,855 @@ +import os + +import pytest + +from chipcompiler.cli.log_view import ( + LineKind, + annotate_log_lines, + build_log_records, + classify_line, + extract_error_context, + render_log_listing_pretty, + render_log_plain, + render_log_pretty, +) + + +class TestClassifyLine: + def test_error_keyword(self): + assert classify_line("Error: something failed") == LineKind.ERROR + + def test_error_case_insensitive(self): + assert classify_line("ERROR: critical") == LineKind.ERROR + + def test_warning_keyword(self): + assert classify_line("Warning: check this") == LineKind.WARNING + + def test_warn_keyword(self): + assert classify_line("WARN: deprecated") == LineKind.WARNING + + def test_info_prefix(self): + assert classify_line("INFO: running step") == LineKind.INFO + + def test_info_bracket(self): + assert classify_line("[INFO ] running step") == LineKind.INFO + + def test_info_root(self): + assert classify_line("INFO:root: message") == LineKind.INFO + + def test_traceback_header(self): + assert classify_line("Traceback (most recent call last):") == LineKind.TRACEBACK + + def test_section_separator(self): + assert classify_line("---") == LineKind.SECTION + + def test_section_equals(self): + assert classify_line("==========") == LineKind.SECTION + + def test_plain_line(self): + assert classify_line("some ordinary output") == LineKind.PLAIN + + def test_plain_empty(self): + assert classify_line("") == LineKind.PLAIN + + def test_plain_whitespace(self): + assert classify_line(" ") == LineKind.PLAIN + + def test_traceback_header_indented(self): + assert classify_line(" Traceback (most recent call last):") == LineKind.TRACEBACK + + def test_error_inside_traceback_stops_traceback(self): + assert classify_line("ValueError: bad", in_traceback=True) == LineKind.ERROR + + def test_indented_line_in_traceback(self): + assert classify_line(' File "test.py", line 1', in_traceback=True) == LineKind.TRACEBACK + + def test_tab_indented_line_in_traceback(self): + assert classify_line("\tFile \"test.py\", line 1", in_traceback=True) == LineKind.TRACEBACK + + +class TestClassifyDoesNotFilter: + """Classification must never remove or hide lines.""" + + def test_every_line_gets_a_kind(self): + lines = [ + "Error: bad", + "Warning: meh", + "INFO: ok", + "---", + "plain text", + "", + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + annotated = annotate_log_lines(lines) + assert len(annotated) == len(lines) + + def test_classification_preserves_text(self): + text = "Error: something went wrong" + assert classify_line(text).value # just returns a kind, text is separate + + +class TestTracebackAnnotation: + def test_complete_traceback_block(self): + lines = [ + "Traceback (most recent call last):", + ' File "app.py", line 42, in run', + " result = compute()", + " ^^^^^^^^^", + "ValueError: invalid value", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.TRACEBACK + assert annotated[1].kind == LineKind.TRACEBACK + assert annotated[2].kind == LineKind.TRACEBACK + assert annotated[3].kind == LineKind.TRACEBACK + assert annotated[4].kind == LineKind.ERROR + + def test_traceback_with_blank_source_line(self): + lines = [ + "Traceback (most recent call last):", + ' File "app.py", line 10, in ', + "", + "ValueError: oops", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.TRACEBACK + assert annotated[1].kind == LineKind.TRACEBACK + assert annotated[2].kind == LineKind.PLAIN + assert annotated[3].kind == LineKind.ERROR + + def test_traceback_exits_on_non_indented_non_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: bad", + "next log line", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.TRACEBACK + assert annotated[1].kind == LineKind.TRACEBACK + assert annotated[2].kind == LineKind.ERROR + assert annotated[3].kind == LineKind.PLAIN + + def test_traceback_order_preserved(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + ' File "b.py", line 2', + ' File "c.py", line 3', + "RuntimeError: end", + ] + annotated = annotate_log_lines(lines) + kinds = [a.kind for a in annotated] + assert kinds == [ + LineKind.TRACEBACK, + LineKind.TRACEBACK, + LineKind.TRACEBACK, + LineKind.TRACEBACK, + LineKind.ERROR, + ] + + def test_pre_traceback_info_preserved(self): + lines = [ + "INFO: starting step", + "some output", + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.INFO + assert annotated[1].kind == LineKind.PLAIN + assert annotated[2].kind == LineKind.TRACEBACK + + def test_exception_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "Exception: something went wrong", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR + + def test_keyboard_interrupt_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "KeyboardInterrupt", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR + + def test_system_exit_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "SystemExit: 1", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR + + def test_stop_iteration_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "StopIteration", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR + + +class TestAnnotateLineNumbers: + def test_line_numbers_start_at_one(self): + lines = ["first", "second", "third"] + annotated = annotate_log_lines(lines) + assert [a.line_no for a in annotated] == [1, 2, 3] + + def test_empty_input(self): + assert annotate_log_lines([]) == [] + + +# --- Renderer tests --- + + +class TestBuildLogRecords: + def test_builds_records_with_all_fields(self): + lines = ["Error: bad", "INFO: ok"] + records = build_log_records("synthesis", "log/synthesis.log", lines, "ecc log synthesis") + assert len(records) == 2 + assert records[0]["step"] == "synthesis" + assert records[0]["source"] == "log/synthesis.log" + assert records[0]["line_no"] == 1 + assert records[0]["kind"] == "error" + assert records[0]["line"] == "Error: bad" + assert records[0]["inspect_cmd"] == "ecc log synthesis" + + def test_traceback_frames_in_records(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + records = build_log_records("cts", "log/cts.log", lines, "ecc log cts") + assert records[0]["kind"] == "traceback" + assert records[1]["kind"] == "traceback" + assert records[2]["kind"] == "error" + + +class TestPrettyRenderer: + def test_header_includes_step_and_source(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["ok"], "ecc log cts", file=buf, color=False) + out = buf.getvalue() + assert "[log] step=cts" in out + assert "source: log/cts.log" in out + + def test_all_lines_appear_in_output(self): + from io import StringIO + lines = ["Error: bad", "INFO: ok", "plain line", "---", "Warning: meh"] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=False) + out = buf.getvalue() + for line in lines: + assert line in out + + def test_traceback_complete_in_output(self): + from io import StringIO + lines = [ + "INFO: before", + "Traceback (most recent call last):", + ' File "a.py", line 1', + " x = bad()", + " ^^^^^", + "ValueError: oops", + "INFO: after", + ] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=False) + out = buf.getvalue() + for line in lines: + assert line in out + + def test_inspect_footer(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["ok"], "ecc log cts", file=buf, color=False) + out = buf.getvalue() + assert "inspect: ecc log cts" in out + + def test_no_ansi_when_color_disabled(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + def test_ansi_when_color_enabled(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=True) + assert "\x1b[" in buf.getvalue() + + def test_error_colored_red(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[31m" in out + + def test_warning_colored_yellow(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Warning: meh"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[33m" in out + + def test_section_colored_cyan(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["---"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[36m" in out + + def test_info_colored_blue(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["INFO: ok"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[34m" in out + + def test_traceback_colored_yellow(self): + from io import StringIO + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: bad", + ] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[33m" in out + + +# --------------------------------------------------------------------------- +# Full error line coloring (AC-2) +# --------------------------------------------------------------------------- + + +class TestErrorLineFullColoring: + def test_error_label_and_message_both_red(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: something failed"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + + red_idx = out.find("\x1b[31m") + assert red_idx >= 0 + reset_idx = out.find("\x1b[0m", red_idx) + assert reset_idx > red_idx + between = out[red_idx:reset_idx] + assert "error" in between + assert "something failed" in between + + def test_error_message_content_not_default_after_label(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: critical failure"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + idx = out.find("error") + after_label = out[idx:] + assert "critical failure" in after_label + + def test_warning_line_keeps_label_only_color(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Warning: check this"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[33m" in out + assert "Warning: check this" in out + + def test_info_plain_section_unchanged(self): + from io import StringIO + buf = StringIO() + lines = ["INFO: running", "some plain text", "---"] + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[34m" in out + assert "some plain text" in out + assert "---" in out + + def test_error_line_no_ansi_when_color_disabled(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + out = buf.getvalue() + assert "\x1b[" not in out + + def test_non_error_lines_not_colored_red(self): + from io import StringIO + lines = ["Warning: meh", "INFO: ok", "plain", "---"] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=True) + out = buf.getvalue() + red_count = out.count("\x1b[31m") + assert red_count == 0 + + +# --------------------------------------------------------------------------- +# Context extraction (AC-3, AC-4) +# --------------------------------------------------------------------------- + + +class TestExtractErrorContextAnchor: + def test_last_error_wins(self): + lines = ["INFO: start", "Error: first", "plain", "Error: last", "INFO: end"] + result = extract_error_context(lines, max_lines=50) + kinds = [ll.kind for ll in result] + assert LineKind.ERROR in kinds + anchor_texts = [ll.text for ll in result if ll.kind == LineKind.ERROR] + assert "Error: last" in anchor_texts + + def test_traceback_when_no_error(self): + lines = [ + "INFO: start", + "Traceback (most recent call last):", + ' File "a.py", line 1', + "RuntimeError: boom", + ] + result = extract_error_context(lines, max_lines=50) + kinds = [ll.kind for ll in result] + assert LineKind.TRACEBACK in kinds + + def test_failed_keyword_when_no_error_or_traceback(self): + lines = ["INFO: start", "step failed: timeout", "plain after"] + result = extract_error_context(lines, max_lines=50) + texts = [ll.text for ll in result] + assert any("failed" in t.lower() for t in texts) + + def test_last_nonempty_when_no_failure(self): + lines = ["INFO: start", "some output", "final output"] + result = extract_error_context(lines, max_lines=50) + assert result[-1].text == "final output" + + def test_empty_input(self): + assert extract_error_context([], max_lines=50) == [] + + +class TestExtractErrorContextWindow: + def test_max_50_lines(self): + lines = [f"line {i}" for i in range(100)] + lines[80] = "Error: failure at 80" + result = extract_error_context(lines, max_lines=50) + assert len(result) <= 50 + + def test_preserves_line_numbers(self): + lines = [f"line {i}" for i in range(100)] + lines[30] = "Error: mid" + result = extract_error_context(lines, max_lines=50) + line_nos = [ll.line_no for ll in result] + assert line_nos == sorted(line_nos) + for ll in result: + assert ll.line_no >= 1 + assert ll.text == lines[ll.line_no - 1] + + def test_preserves_order(self): + lines = [f"line {i}" for i in range(10)] + lines[5] = "Error: mid" + result = extract_error_context(lines, max_lines=50) + line_nos = [ll.line_no for ll in result] + assert line_nos == sorted(line_nos) + + def test_fewer_than_max_returns_all(self): + lines = ["one", "Error: two", "three"] + result = extract_error_context(lines, max_lines=50) + assert len(result) == 3 + + def test_anchor_last_error_not_first(self): + lines = ["Error: first", "plain", "Error: last", "plain"] + result = extract_error_context(lines, max_lines=50) + error_lines = [ll for ll in result if ll.kind == LineKind.ERROR] + assert len(error_lines) >= 1 + + +class TestExtractErrorContextTraceback: + def test_traceback_includes_stack_frames(self): + lines = [ + "INFO: before", + "Traceback (most recent call last):", + ' File "a.py", line 10, in f', + ' File "b.py", line 20, in g', + "ValueError: bad value", + "INFO: after", + ] + result = extract_error_context(lines, max_lines=50) + kinds = [ll.kind for ll in result] + assert LineKind.TRACEBACK in kinds + traceback_texts = [ll.text for ll in result if ll.kind == LineKind.TRACEBACK] + assert any("File" in t for t in traceback_texts) + + def test_final_exception_visible_in_window(self): + lines = ["line " + str(i) for i in range(60)] + lines[52] = "Traceback (most recent call last):" + lines[53] = ' File "a.py", line 1, in run' + lines[54] = "ValueError: final exception" + lines[55] = "line 55" + result = extract_error_context(lines, max_lines=50) + texts = [ll.text for ll in result] + assert "ValueError: final exception" in texts + + def test_traceback_context_not_exceed_max(self): + lines = ["line " + str(i) for i in range(100)] + lines[60] = "Traceback (most recent call last):" + for i in range(61, 75): + lines[i] = f' File "mod{i}.py", line {i}' + lines[75] = "RuntimeError: deep traceback" + result = extract_error_context(lines, max_lines=50) + assert len(result) <= 50 + + def test_traceback_lines_in_order(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + ' File "b.py", line 2', + "ValueError: boom", + ] + result = extract_error_context(lines, max_lines=50) + line_nos = [ll.line_no for ll in result] + assert line_nos == sorted(line_nos) + + +class TestPlainRenderer: + def test_emits_one_record_per_line(self): + from io import StringIO + lines = ["Error: bad", "INFO: ok", "plain"] + buf = StringIO() + render_log_plain("cts", "log/cts.log", lines, "ecc log cts", file=buf) + out_lines = [l for l in buf.getvalue().strip().split("\n") if l.strip()] + assert len(out_lines) == 3 + + def test_record_has_required_fields(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["ok"], "ecc log cts", file=buf) + line = buf.getvalue().strip() + assert "step=cts" in line + assert "source=log/cts.log" in line + assert "line_no=1" in line + assert "kind=plain" in line + assert "line=ok" in line + assert "inspect_cmd=" in line + + def test_values_with_spaces_are_quoted(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["line with spaces"], "ecc log cts --project /tmp/a b", file=buf) + line = buf.getvalue().strip() + assert 'line="line with spaces"' in line + assert 'inspect_cmd="ecc log cts --project /tmp/a b"' in line + + def test_values_with_backslashes_escaped(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ['path\\to\\file'], "ecc log cts", file=buf) + line = buf.getvalue().strip() + assert 'line="path\\\\to\\\\file"' in line + + def test_values_with_equals_quoted(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["key=value"], "ecc log cts", file=buf) + line = buf.getvalue().strip() + assert 'line="key=value"' in line + + def test_no_ansi_in_plain(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf) + assert "\x1b[" not in buf.getvalue() + + def test_traceback_frames_in_plain(self): + from io import StringIO + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + buf = StringIO() + render_log_plain("cts", "log/cts.log", lines, "ecc log cts", file=buf) + out_lines = [l for l in buf.getvalue().strip().split("\n") if l.strip()] + assert len(out_lines) == 3 + assert "kind=traceback" in out_lines[0] + assert "kind=traceback" in out_lines[1] + assert "kind=error" in out_lines[2] + + +class TestColorGuards: + def test_no_color_when_not_tty(self): + import io + from unittest.mock import patch + + class FakeNonTTY: + def isatty(self): + return False + + with patch("sys.stdout", FakeNonTTY()): + buf = io.StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + def test_no_color_when_no_color_env(self): + import os + import io + from unittest.mock import patch + + with patch.dict(os.environ, {"NO_COLOR": "1"}): + buf = io.StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + def test_no_color_when_term_dumb(self): + import os + import io + from unittest.mock import patch + + with patch.dict(os.environ, {"TERM": "dumb"}): + buf = io.StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + +class TestListingPrettyRenderer: + def test_listing_header(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + assert "[logs]" in buf.getvalue() + + def test_listing_shows_step_and_source(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + out = buf.getvalue() + assert "synthesis" in out + assert "Synthesis_yosys/log/synthesis.log" in out + + def test_listing_inspect_cmd(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + assert "ecc log synthesis" in buf.getvalue() + + def test_listing_color_enabled_no_crash(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=True) + out = buf.getvalue() + assert "[logs]" in out + assert "synthesis" in out + assert "Synthesis_yosys/log/synthesis.log" in out + assert "ecc log synthesis" in out + + def test_listing_color_enabled_has_ansi(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=True) + assert "\x1b[" in buf.getvalue() + + def test_listing_color_disabled_no_ansi(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + +class TestTailLinesForLog: + def test_returns_last_10_non_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + lines = [f"line {i}" for i in range(15)] + log_file.write_text("\n".join(lines)) + result = tail_lines_for_log(str(log_file)) + assert len(result) == 10 + assert result[0] == "line 5" + assert result[-1] == "line 14" + + def test_fewer_than_10_returns_all(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("a\nb\nc\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["a", "b", "c"] + + def test_empty_lines_omitted(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("a\n\n\nb\n\n\nc\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["a", "b", "c"] + + def test_preserves_order(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("first\nmiddle\nlast\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["first", "middle", "last"] + + def test_ansi_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1b[31mred text\x1b[0m\nnormal\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["red text", "normal"] + + def test_missing_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + result = tail_lines_for_log(str(tmp_path / "nonexistent.log")) + assert result == [] + + def test_empty_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("") + result = tail_lines_for_log(str(log_file)) + assert result == [] + + def test_blank_only_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text(" \n\n\t\n \n") + result = tail_lines_for_log(str(log_file)) + assert result == [] + + def test_ansi_control_sequences_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1b[31mred\x1b[0m\n\x1b[2Kclear\nvalid\n") + result = tail_lines_for_log(str(log_file)) + assert "\x1b[" not in " ".join(result) + assert "valid" in result + + def test_osc_sequences_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1b]0;window title\x07message\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["message"] + + def test_dcs_sequences_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1bP$data\x1b\\visible\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["visible"] + + def test_bel_and_backspace_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("a\x07b\x08c\ndone\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["abc", "done"] + + def test_unreadable_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("content\n") + os.chmod(str(log_file), 0o000) + try: + result = tail_lines_for_log(str(log_file)) + assert result == [] + finally: + os.chmod(str(log_file), 0o644) + + +class TestListingTailRendering: + def test_tail_block_header_with_indented_lines(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + tail_map = {"synth.log": ["line 1", "line 2"]} + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) + out = buf.getvalue() + lines = out.split("\n") + tail_idx = next(i for i, l in enumerate(lines) if l.strip() == "tail:") + assert "line 1" in lines[tail_idx + 1] + assert "line 2" in lines[tail_idx + 2] + assert lines[tail_idx + 1].startswith(" ") + + def test_inspect_remains_below_tail(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + tail_map = {"synth.log": ["preview"]} + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) + out = buf.getvalue() + tail_pos = out.find("tail:") + inspect_pos = out.find("inspect:") + assert tail_pos < inspect_pos + + def test_no_tail_block_when_empty(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + tail_map = {"synth.log": []} + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) + out = buf.getvalue() + assert "tail:" not in out + assert "inspect:" in out + + def test_no_tail_block_when_source_not_in_map(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map={}) + out = buf.getvalue() + assert "tail:" not in out + assert "inspect:" in out + + def test_no_tail_block_when_tail_map_is_none(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=None) + out = buf.getvalue() + assert "tail:" not in out + assert "inspect:" in out + + def test_run_level_entry_labeled_run(self): + from io import StringIO + records = [ + {"log": "log/flow.log", "inspect_cmd": "ecc log"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + out = buf.getvalue() + assert " run log/flow.log" in out + assert "inspect:" in out diff --git a/test/cli/test_params.py b/test/cli/test_params.py new file mode 100644 index 00000000..901f5f1b --- /dev/null +++ b/test/cli/test_params.py @@ -0,0 +1,338 @@ +import pytest + +from chipcompiler.cli.params import ( + PARAM_REGISTRY, + ParamSchema, + ResolvedParam, + build_backend_overrides, + is_known_key, + list_groups, + list_schemas, + lookup_schema, + parse_cli_overrides, + parse_toml_params, + parse_value, + resolve_parameters, + validate_schema_record, + validate_value, +) + +REQUIRED_KEYS = [ + "design.frequency_mhz", + "floorplan.core_util", + "floorplan.core_margin", + "floorplan.aspect_ratio", + "synth.max_fanout", + "place.target_density", + "place.target_overflow", + "place.global_right_padding", + "place.cell_padding_x", + "place.routability_opt", + "route.bottom_layer", + "route.top_layer", +] + + +class TestSchemaRegistry: + def test_registry_contains_all_required_keys(self): + params = {s.param for s in PARAM_REGISTRY} + for key in REQUIRED_KEYS: + assert key in params, f"Missing key: {key}" + + def test_every_record_has_required_metadata(self): + required = ("param", "group", "name", "type", "default", "applies", "maps_to", "description") + for schema in PARAM_REGISTRY: + for field_name in required: + val = getattr(schema, field_name, None) + assert val is not None and val != "", ( + f"{schema.param} missing required field: {field_name}" + ) + + def test_optional_fields_present_when_relevant(self): + for schema in PARAM_REGISTRY: + if schema.type in ("float", "int") and schema.choices is None: + assert schema.range is not None, ( + f"{schema.param}: numeric param without range or choices should have range" + ) + + def test_cli_keys_map_to_backend_names(self): + density = lookup_schema("place.target_density") + assert density.maps_to == {"DreamPlace": "target_density"} + + fanout = lookup_schema("synth.max_fanout") + assert fanout.maps_to == "Max fanout" + + util = lookup_schema("floorplan.core_util") + assert util.maps_to == {"Core": "Utilitization"} + + def test_internal_keys_not_accepted_as_cli_keys(self): + assert not is_known_key("Core.Utilitization") + assert not is_known_key("Target density") + assert not is_known_key("Max fanout") + assert not is_known_key("Frequency max [MHz]") + + def test_schema_record_missing_required_fields_rejected(self): + bad = ParamSchema( + param="", group="", name="", type="int", default=0, + applies="", maps_to="", description="", + ) + errors = validate_schema_record(bad) + assert len(errors) > 0 + + def test_lookup_schema_returns_none_for_unknown(self): + assert lookup_schema("nonexistent.key") is None + + def test_list_groups_returns_ordered_groups(self): + groups = list_groups() + assert "design" in groups + assert "floorplan" in groups + assert "synth" in groups + assert "place" in groups + assert "route" in groups + + +class TestValueParsing: + @pytest.mark.parametrize("raw,ptype,expected", [ + ("0.65", "float", 0.65), + ("42", "int", 42), + ("true", "bool", True), + ("false", "bool", False), + ("MET5", "str", "MET5"), + ("1.5,2.5", "list[float]", [1.5, 2.5]), + ("1,2,3", "list[int]", [1, 2, 3]), + ("a,b,c", "list[str]", ["a", "b", "c"]), + ]) + def test_parse_value_correct_types(self, raw, ptype, expected): + schema = lookup_schema("place.target_density") + schema = ParamSchema( + param="test", group="test", name="test", type=ptype, + default=None, applies="test", maps_to="test", description="test", + ) + result = parse_value(raw, schema) + assert result == expected + + def test_parse_int_rejects_alpha(self): + schema = ParamSchema( + param="test", group="test", name="test", type="int", + default=0, applies="test", maps_to="test", description="test", + ) + with pytest.raises(ValueError, match="expected int"): + parse_value("abc", schema) + + def test_parse_float_rejects_alpha(self): + schema = ParamSchema( + param="test", group="test", name="test", type="float", + default=0.0, applies="test", maps_to="test", description="test", + ) + with pytest.raises(ValueError, match="expected float"): + parse_value("not_a_number", schema) + + def test_range_validation_rejects_out_of_bounds(self): + schema = lookup_schema("place.target_density") + errors = validate_value(1.2, schema) + assert len(errors) > 0 + assert "out of range" in errors[0] + + def test_range_validation_accepts_in_bounds(self): + schema = lookup_schema("place.target_density") + errors = validate_value(0.5, schema) + assert errors == [] + + def test_choice_validation_rejects_invalid(self): + schema = lookup_schema("route.top_layer") + errors = validate_value("MET99", schema) + assert len(errors) > 0 + assert "not in allowed choices" in errors[0] + + def test_choice_validation_accepts_valid(self): + schema = lookup_schema("route.top_layer") + errors = validate_value("MET5", schema) + assert errors == [] + + def test_unknown_key_returns_error_in_cli_overrides(self): + result, errors = parse_cli_overrides(["unknown.key=5"]) + assert len(errors) > 0 + assert "unknown parameter" in errors[0] + + def test_malformed_key_value_rejected(self): + result, errors = parse_cli_overrides(["no_equals_sign"]) + assert len(errors) > 0 + assert "malformed" in errors[0] + + def test_out_of_range_value_rejected(self): + result, errors = parse_cli_overrides(["place.target_density=1.2"]) + assert len(errors) > 0 + + def test_type_mismatch_rejected(self): + result, errors = parse_cli_overrides(["synth.max_fanout=abc"]) + assert len(errors) > 0 + + +class TestSourceAwareResolution: + def test_default_source_when_no_overrides(self): + resolved, errors = resolve_parameters() + assert len(errors) == 0 + for rp in resolved: + assert rp.source == "default" + + def test_toml_override_source(self): + toml = {"place.target_density": 0.65} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert errors == [] + density = next(r for r in resolved if r.param == "place.target_density") + assert density.value == 0.65 + assert density.source == "ecc.toml" + + def test_cli_override_source(self): + cli = {"place.target_density": 0.7} + resolved, errors = resolve_parameters(cli_overrides=cli) + assert errors == [] + density = next(r for r in resolved if r.param == "place.target_density") + assert density.value == 0.7 + assert density.source == "cli" + + def test_cli_beats_toml(self): + toml = {"place.target_density": 0.65} + cli = {"place.target_density": 0.7} + resolved, errors = resolve_parameters(toml_overrides=toml, cli_overrides=cli) + assert errors == [] + density = next(r for r in resolved if r.param == "place.target_density") + assert density.value == 0.7 + assert density.source == "cli" + + def test_invalid_toml_type_produces_error(self): + toml = {"synth.max_fanout": "not_int"} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_float_rejected_for_int_schema(self): + toml = {"synth.max_fanout": 16.5} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_bool_rejected_for_int_schema(self): + toml = {"synth.max_fanout": True} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_int_accepted_for_float_schema(self): + toml = {"place.target_density": 1} + resolved, errors = resolve_parameters(toml_overrides=toml) + # 1 converts to 1.0 which is out of range for target_density + assert len(errors) > 0 # range validation catches it + + def test_int_in_range_accepted_for_float_schema(self): + toml = {"floorplan.core_util": 1} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert errors == [] + util = next(r for r in resolved if r.param == "floorplan.core_util") + assert util.value == 1.0 + + def test_float_in_list_int_rejected(self): + toml = {"floorplan.core_margin": [2.5, 3]} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_str_rejected_for_int_schema(self): + toml = {"synth.max_fanout": "abc"} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + +class TestBackendMapping: + def test_flat_key_mapping(self): + schema = lookup_schema("place.target_density") + rp = ResolvedParam( + param="place.target_density", value=0.65, default=0.8, + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"DreamPlace": {"target_density": 0.65}} + + def test_nested_key_mapping(self): + schema = lookup_schema("floorplan.core_util") + rp = ResolvedParam( + param="floorplan.core_util", value=0.45, default=0.4, + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Core": {"Utilitization": 0.45}} + + def test_nested_list_mapping(self): + schema = lookup_schema("floorplan.core_margin") + rp = ResolvedParam( + param="floorplan.core_margin", value=(3, 3), default=(2, 2), + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Core": {"Margin": (3, 3)}} + + def test_string_key_mapping(self): + schema = lookup_schema("route.top_layer") + rp = ResolvedParam( + param="route.top_layer", value="MET4", default="MET5", + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Top layer": "MET4"} + + def test_default_values_excluded(self): + resolved, _ = resolve_parameters() + result = build_backend_overrides(resolved) + assert result == {} + + def test_mapping_does_not_mutate_schema_defaults(self): + schema = lookup_schema("place.target_density") + original_default = schema.default + rp = ResolvedParam( + param="place.target_density", value=0.65, default=original_default, + source="cli", schema=schema, + ) + build_backend_overrides([rp]) + assert schema.default == original_default + + +class TestCliOverrides: + def test_repeatable_set(self): + result, errors = parse_cli_overrides([ + "place.target_density=0.65", + "synth.max_fanout=16", + ]) + assert errors == [] + assert result == {"place.target_density": 0.65, "synth.max_fanout": 16} + + def test_malformed_rejected(self): + result, errors = parse_cli_overrides(["noequals"]) + assert len(errors) > 0 + + def test_unknown_key_rejected(self): + result, errors = parse_cli_overrides(["bogus.key=5"]) + assert len(errors) > 0 + + def test_raw_backend_key_rejected(self): + result, errors = parse_cli_overrides(["Target density=0.5"]) + assert len(errors) > 0 + + def test_invalid_value_does_not_produce_override(self): + result, errors = parse_cli_overrides(["place.target_density=1.5"]) + assert "place.target_density" not in result + assert len(errors) > 0 + + +class TestTomlParams: + def test_flat_toml_parsing(self): + table = {"place": {"target_density": 0.65}} + flat, errors = parse_toml_params(table) + assert errors == [] + assert flat == {"place.target_density": 0.65} + + def test_unknown_toml_key_rejected(self): + table = {"bogus": {"key": 5}} + flat, errors = parse_toml_params(table) + assert len(errors) > 0 + assert "unknown parameter" in errors[0] + + def test_non_table_toml_section_rejected(self): + table = {"place": "not_a_table"} + flat, errors = parse_toml_params(table) + assert len(errors) > 0 diff --git a/test/cli/test_pretty.py b/test/cli/test_pretty.py new file mode 100644 index 00000000..3aac8b06 --- /dev/null +++ b/test/cli/test_pretty.py @@ -0,0 +1,557 @@ +import io +import json +import os + +from chipcompiler.cli import main as cli_main +from chipcompiler.cli.pretty import ( + BOLD, + CYAN, + DIM, + GREEN, + RED, + RESET, + YELLOW, + display_key, + render_header, + status_style, + style, + supports_color, +) +from chipcompiler.cli.render import _plain_value, render_plain +from io import StringIO +from chipcompiler.cli.types import CommandResult + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +def _create_flow_json(run_dir, steps=None): + import json as j + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + if steps is None: + steps = [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + ] + with open(os.path.join(home, "flow.json"), "w") as f: + j.dump({"steps": steps}, f) + + +# --------------------------------------------------------------------------- +# Plain key-value stability tests +# --------------------------------------------------------------------------- + + +class TestPlainQuoting: + def test_plain_value_no_quoting_for_simple(self): + assert _plain_value("hello") == "hello" + + def test_plain_value_quotes_spaces(self): + assert _plain_value("hello world") == '"hello world"' + + def test_plain_value_quotes_equals(self): + assert _plain_value("a=b") == '"a=b"' + + def test_plain_value_escapes_backslashes(self): + assert _plain_value("path\\to\\file") == '"path\\\\to\\\\file"' + + def test_plain_value_escapes_quotes(self): + assert _plain_value('say "hi"') == '"say \\"hi\\""' + + def test_plain_value_numeric(self): + assert _plain_value(42) == "42" + + def test_render_plain_one_record_per_line(self): + records = ( + {"a": "1", "b": "two words"}, + {"c": "3"}, + ) + buf = StringIO() + render_plain(records, file=buf) + lines = [l for l in buf.getvalue().strip().split("\n") if l.strip()] + assert len(lines) == 2 + assert "a=1" in lines[0] + assert 'b="two words"' in lines[0] + + def test_render_plain_no_ansi(self): + records = ({"status": "success", "path": "/tmp/x"},) + buf = StringIO() + render_plain(records, file=buf) + assert "\x1b[" not in buf.getvalue() + + +# --------------------------------------------------------------------------- +# Display key normalization +# --------------------------------------------------------------------------- + + +class TestDisplayKey: + def test_strips_cmd_suffix(self): + assert display_key("inspect_cmd") == "inspect" + + def test_preserves_non_cmd(self): + assert display_key("status") == "status" + + def test_replaces_underscores(self): + assert display_key("run_dir") == "run dir" + + +# --------------------------------------------------------------------------- +# Color gating +# --------------------------------------------------------------------------- + + +class TestColorGating: + def test_supports_color_non_tty(self): + assert not supports_color(file=StringIO()) + + def test_style_disabled(self): + assert style("text", RED, enabled=False) == "text" + + def test_style_enabled(self): + styled = style("text", RED, enabled=True) + assert RED in styled + assert RESET in styled + + def test_status_style_known_states(self): + assert GREEN in status_style("success", color=True) + assert RED in status_style("failed", color=True) + assert YELLOW in status_style("pending", color=True) + + def test_status_style_unknown_passthrough(self): + assert status_style("unknown_state", color=True) == "unknown_state" + + def test_status_style_no_color(self): + assert status_style("success", color=False) == "success" + + +# --------------------------------------------------------------------------- +# Pretty header rendering +# --------------------------------------------------------------------------- + + +class TestPrettyHeader: + def test_header_with_color(self): + h = render_header("status", color=True) + assert BOLD in h + assert RESET in h + assert "[status]" in h + + def test_header_without_color(self): + h = render_header("status", color=False) + assert "\x1b[" not in h + assert "[status]" in h + + +# --------------------------------------------------------------------------- +# CLI --plain acceptance tests +# --------------------------------------------------------------------------- + + +class TestPlainFlagAcceptance: + def test_init_plain(self, tmp_path, capsys): + rc = cli_main.run(["init", str(tmp_path / "p"), "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "=" in out + + def test_check_plain(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "=" in out + + def test_status_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default")) + rc = cli_main.run(["status", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "status=" in out + + def test_metrics_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + rc = cli_main.run(["metrics", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "metric=" in out + + def test_artifacts_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + rc = cli_main.run(["artifacts", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + def test_diagnose_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["diagnose", "--plain", "--project", project_dir]) + out = capsys.readouterr().out + assert "\x1b[" not in out + + def test_config_plain(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["config", "--resolved", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + +# --------------------------------------------------------------------------- +# Pretty default output structure tests +# --------------------------------------------------------------------------- + + +class TestPrettyDefaultOutput: + def test_init_has_header(self, tmp_path, capsys): + rc = cli_main.run(["init", str(tmp_path / "p")]) + assert rc == 0 + out = capsys.readouterr().out + assert "[init]" in out + + def test_check_has_header(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[check]" in out + + def test_status_has_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default")) + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[status]" in out + + def test_status_groups_steps(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default"), [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis (yosys)" in out + assert "cts (ecc)" in out + + def test_metrics_groups_by_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + for step_dir_name in ["Synthesis_yosys", "CTS_ecc"]: + analysis = os.path.join(run_dir, step_dir_name, "analysis") + os.makedirs(analysis, exist_ok=True) + metrics_name = step_dir_name.split("_")[0] + "_metrics.json" + with open(os.path.join(analysis, metrics_name), "w") as f: + json.dump({"Cell number": 100}, f) + rc = cli_main.run(["metrics", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[metrics]" in out + assert "synthesis:" in out + assert "cts:" in out + + def test_diagnose_clean_has_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + step_dir = os.path.join(run_dir, "CTS_ecc", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "cts.log"), "w") as f: + f.write("ok\n") + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[diagnose]" in out + assert "clean" in out + + def test_error_output_has_error_header(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path)]) + assert rc == 1 + out = capsys.readouterr().out + assert "[error]" in out + + def test_run_summary_has_header(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + from types import SimpleNamespace + + DummyFlow_instances = [] + class DummyFlow: + instances = DummyFlow_instances + has_init_value = False + run_steps_value = True + def __init__(self, workspace): + self.workspace = workspace + self.added_steps = [] + self.create_called = False + self.run_called = False + self.workspace_steps = [] + DummyFlow.instances.append(self) + def has_init(self): + return False + def add_step(self, step, tool, state): + self.added_steps.append((step, tool, state)) + def create_step_workspaces(self): + self.create_called = True + def run_steps(self): + self.run_called = True + return True + + monkeypatch.setattr("chipcompiler.data.create_workspace", + lambda **kw: SimpleNamespace(name="ws")) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", DummyFlow) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[run]" in out + assert "success" in out + + +# --------------------------------------------------------------------------- +# JSON/JSONL unaffected by pretty changes +# --------------------------------------------------------------------------- + + +class TestJsonUnchanged: + def test_status_json_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default")) + rc = cli_main.run(["status", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert data["records"][0]["run"] == "default" + + def test_metrics_jsonl_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + rc = cli_main.run(["metrics", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + objects = [json.loads(l) for l in out.strip().split("\n")] + assert any("metric" in o for o in objects) + + +# --------------------------------------------------------------------------- +# Regression: multi-record error rendering (Codex Round 1 finding) +# --------------------------------------------------------------------------- + + +class TestMultiRecordError: + def test_render_error_two_records(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + records = [ + {"error": "missing", "reason": "file not found"}, + {"error": "corrupt", "reason": "bad format"}, + ] + render_error(records, file=buf, color=False) + out = buf.getvalue() + assert "[error]" in out + assert "missing" in out + assert "file not found" in out + assert "corrupt" in out + assert "bad format" in out + + def test_render_error_three_records_all_shown(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + records = [ + {"kind": "error", "reason": "a"}, + {"kind": "error", "reason": "b"}, + {"kind": "error", "reason": "c"}, + ] + render_error(records, file=buf, color=False) + out = buf.getvalue() + assert out.count("error") >= 3 + for reason in ("a", "b", "c"): + assert reason in out + + +# --------------------------------------------------------------------------- +# Error code coloring (AC-1) +# --------------------------------------------------------------------------- + + +class TestErrorCodeColoring: + def test_arbitrary_error_code_colored_red(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "missing_config", "reason": "no config found"}], file=buf, color=True) + out = buf.getvalue() + assert RED in out + assert "missing_config" in out + + def test_multiple_arbitrary_codes_colored_red(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + records = [ + {"error": "workspace_failed", "reason": "bad state"}, + {"error": "config_error", "reason": "invalid toml"}, + {"error": "invalid_parameter", "reason": "bad value"}, + ] + render_error(records, file=buf, color=True) + out = buf.getvalue() + for code in ("workspace_failed", "config_error", "invalid_parameter"): + assert code in out + assert RED in out + + def test_error_preserves_secondary_fields(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "missing_config", "path": "/tmp/x", "reason": "gone"}], file=buf, color=True) + out = buf.getvalue() + assert "path:" in out + assert "/tmp/x" in out + assert "gone" in out + + def test_error_no_ansi_when_color_disabled(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "missing_config", "reason": "bad"}], file=buf, color=False) + out = buf.getvalue() + assert "\x1b[" not in out + assert "missing_config" in out + + def test_unknown_error_code_not_white_by_default(self): + """Unknown error codes should still be red, not white or default.""" + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "unknown_code_xyz"}], file=buf, color=True) + out = buf.getvalue() + assert RED in out + + +# --------------------------------------------------------------------------- +# Shared color policy tests (Codex Round 1 finding) +# --------------------------------------------------------------------------- + + +class TestSharedColorPolicy: + def test_pretty_supports_color_no_color_env(self): + from chipcompiler.cli.pretty import supports_color + + env = {"NO_COLOR": "1"} + assert not supports_color(env=env) + + def test_pretty_supports_color_dumb_term(self): + from chipcompiler.cli.pretty import supports_color + + env = {"TERM": "dumb"} + assert not supports_color(env=env) + + def test_pretty_supports_color_non_tty(self): + from chipcompiler.cli.pretty import supports_color + + assert not supports_color(file=io.StringIO()) + + def test_pretty_supports_color_machine_mode(self): + from chipcompiler.cli.pretty import supports_color + from chipcompiler.cli.types import OutputMode + + assert not supports_color(mode=OutputMode.JSON) + assert not supports_color(mode=OutputMode.PLAIN) + + def test_progress_supports_color_delegates(self): + from chipcompiler.cli.progress import supports_color + + assert not supports_color(io.StringIO(), None, env={"NO_COLOR": "1"}) + assert not supports_color(io.StringIO(), None, env={"TERM": "dumb"}) + + def test_log_view_uses_shared_constants(self): + from chipcompiler.cli import log_view + from chipcompiler.cli import pretty + + assert log_view.BOLD is pretty.BOLD + assert log_view.RED is pretty.RED + assert log_view.CYAN is pretty.CYAN + assert log_view.RESET is pretty.RESET diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py new file mode 100644 index 00000000..6ef73cc4 --- /dev/null +++ b/test/cli/test_progress.py @@ -0,0 +1,839 @@ +import io +import re +import time + +import pytest + +_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[a-zA-Z]") + + +def _strip_ansi(text): + return _ANSI_RE.sub("", text) + +from chipcompiler.cli.pretty import BOLD, CYAN, DIM, GREEN, RED, RESET +from chipcompiler.cli.progress import ( + RunProgressRenderer, + format_error_context, + latest_log_line, + run_flow_with_progress, + sanitize_log_line, + should_enable_run_progress, + style, + supports_color, + truncate_to_width, +) +from chipcompiler.cli.log_view import LineKind, LogLine +from chipcompiler.cli.types import CommandContext, OutputMode +from chipcompiler.data import StateEnum + + +class FakeTTYStderr: + def __init__(self, isatty_value=True): + self._isatty = isatty_value + self.written = [] + + def isatty(self): + return self._isatty + + def write(self, s): + self.written.append(s) + + def flush(self): + pass + + +def _make_ctx(mode=OutputMode.TEXT): + return CommandContext( + project_dir="/tmp/project", + project=None, + run_dir="/tmp/project/runs/default", + run_id=None, + output_mode=mode, + ) + + +# -- supports_color -- + + +class TestSupportsColor: + def test_enabled_text_tty(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"TERM": "xterm-256color"}) is True + + def test_disabled_non_tty(self): + assert supports_color(FakeTTYStderr(False), OutputMode.TEXT) is False + + def test_disabled_no_isattr(self): + assert supports_color(io.StringIO(), OutputMode.TEXT) is False + + def test_disabled_no_color(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"NO_COLOR": "1"}) is False + + def test_disabled_term_dumb(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"TERM": "dumb"}) is False + + def test_disabled_json(self): + assert supports_color(FakeTTYStderr(True), OutputMode.JSON) is False + + def test_disabled_jsonl(self): + assert supports_color(FakeTTYStderr(True), OutputMode.JSONL) is False + + def test_enabled_with_clean_env(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"TERM": "xterm-256color"}) is True + + +# -- style -- + + +class TestStyle: + def test_applies_code_when_enabled(self): + result = style("hello", GREEN, True) + assert result == f"{GREEN}hello{RESET}" + + def test_passthrough_when_disabled(self): + assert style("hello", GREEN, False) == "hello" + + +# -- should_enable_run_progress -- + + +class TestShouldEnableRunProgress: + def test_enabled_text_tty(self): + ctx = _make_ctx(OutputMode.TEXT) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is True + + def test_disabled_json(self): + ctx = _make_ctx(OutputMode.JSON) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + + def test_disabled_jsonl(self): + ctx = _make_ctx(OutputMode.JSONL) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + + def test_disabled_plain(self): + ctx = _make_ctx(OutputMode.PLAIN) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + + def test_disabled_no_tty(self): + ctx = _make_ctx(OutputMode.TEXT) + assert should_enable_run_progress(ctx, FakeTTYStderr(False)) is False + + def test_disabled_no_isattr(self): + ctx = _make_ctx(OutputMode.TEXT) + assert should_enable_run_progress(ctx, io.StringIO()) is False + + +# -- sanitize_log_line -- + + +class TestSanitizeLogLine: + def test_strips_ansi(self): + assert sanitize_log_line("\x1b[32mOK\x1b[0m") == "OK" + + def test_replaces_control_chars(self): + assert sanitize_log_line("a\r\nb\tc") == "a b c" + + def test_collapses_spaces(self): + assert sanitize_log_line("a b") == "a b" + + def test_strips_whitespace(self): + assert sanitize_log_line(" hello ") == "hello" + + def test_empty_string(self): + assert sanitize_log_line("") == "" + + def test_preserves_normal_text(self): + assert sanitize_log_line("Synthesis completed") == "Synthesis completed" + + +# -- truncate_to_width -- + + +class TestTruncateToWidth: + def test_short_text_passes(self): + assert truncate_to_width("hi", 80) == "hi" + + def test_long_text_truncated(self): + text = "x" * 100 + result = truncate_to_width(text, 20) + assert len(result) == 20 + assert result.endswith("...") + + def test_exact_width(self): + text = "x" * 10 + assert truncate_to_width(text, 10) == text + + def test_zero_width(self): + assert truncate_to_width("hello", 0) == "" + + def test_small_width(self): + assert truncate_to_width("hello", 2) == "he" + + +# -- latest_log_line -- + + +class TestLatestLogLine: + def test_returns_last_nonempty_line(self, tmp_path): + log = tmp_path / "test.log" + log.write_text("line one\nline two\n\n") + assert latest_log_line(str(log)) == "line two" + + def test_returns_none_for_missing_file(self): + assert latest_log_line("/nonexistent/file.log") is None + + def test_returns_none_for_empty_file(self, tmp_path): + log = tmp_path / "empty.log" + log.write_text("") + assert latest_log_line(str(log)) is None + + def test_returns_none_for_none_path(self): + assert latest_log_line(None) is None + + def test_sanitizes_ansi_in_line(self, tmp_path): + log = tmp_path / "ansi.log" + log.write_text("\x1b[32mprogress\x1b[0m\n") + assert latest_log_line(str(log)) == "progress" + + def test_trailing_newlines_only(self, tmp_path): + log = tmp_path / "nl.log" + log.write_text("\n\n\n") + assert latest_log_line(str(log)) is None + + +# -- RunProgressRenderer -- + + +class TestRunProgressRenderer: + def test_running_writes_log_prefix(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("working...") + output = "".join(buf.written) + assert output.startswith("\r\x1b[K") + assert " log: working..." in output + + def test_clear_noop_without_transient(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.clear() + assert buf.written == [] + + def test_truncates_long_running_text(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 20) + r.running("x" * 100) + output = "".join(buf.written) + display = output.replace("\r\x1b[K", "") + assert len(display) <= 20 + + def test_start_step_emits_header(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_step("synthesis", "yosys") + output = "".join(buf.written) + assert "> synthesis (yosys)\n" in output + + def test_start_step_separator_after_first(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_step("synthesis", "yosys") + r.start_step("floorplan", "ecc") + output = "".join(buf.written) + assert "\n> floorplan (ecc)\n" in output + + def test_start_step_no_separator_before_first(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_step("synthesis", "yosys") + output = "".join(buf.written) + assert not output.startswith("\n") + + def test_start_run_emits_header(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_run("default", "/tmp/runs/default") + output = "".join(buf.written) + assert "[run] default workspace=/tmp/runs/default\n" in output + + def test_finish_step_success(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.finish_step("synthesis", "yosys", "success", "0:00:06", "output/synth.log", "ecc log synthesis --errors", True) + output = "".join(buf.written) + assert "✓ synthesis (yosys) 0:00:06\n" in output + assert " log: output/synth.log\n" in output + assert " inspect: ecc log synthesis --errors\n" in output + + def test_finish_step_non_success(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "ecc log placement --errors", False) + output = "".join(buf.written) + assert "✗ placement (dreamplace) incomplete 0:00:00\n" in output + assert " log: \n" in output + assert " inspect: ecc log placement --errors\n" in output + + def test_finish_step_clears_transient_to_clean_line(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("transient log") + r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) + output = "".join(buf.written) + # The final clear before the summary must move to a clean line + assert "\r\x1b[K\n✓ synthesis" in output + + def test_finish_step_non_success_clears_transient_to_clean_line(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("transient log") + r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "cmd", False) + output = "".join(buf.written) + assert "\r\x1b[K\n✗ placement" in output + + def test_running_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.running("working...") + output = "".join(buf.written) + assert DIM in output + assert "log:" in output + + def test_running_without_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=False) + r.running("working...") + output = "".join(buf.written) + assert DIM not in output + + def test_no_color_codes_when_disabled(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=False) + r.start_run("default", "/tmp") + r.start_step("synthesis", "yosys") + r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) + output = "".join(buf.written) + for code in (BOLD, DIM, CYAN, GREEN, RED): + assert code not in output + + def test_start_step_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.start_step("synthesis", "yosys") + output = "".join(buf.written) + assert CYAN in output + # Cyan sequence must appear before the `>` marker in raw output + cyan_pos = output.find(CYAN) + marker_pos = output.find(">") + assert cyan_pos < marker_pos + + def test_start_run_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.start_run("default", "/tmp") + output = "".join(buf.written) + assert BOLD in output + + def test_finish_step_success_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) + output = "".join(buf.written) + assert GREEN in output + + def test_finish_step_non_success_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "cmd", False) + output = "".join(buf.written) + assert RED in output + + +# -- run_flow_with_progress -- + + +def _make_ws(directory="/tmp", log_section_fn=None): + section_fn = log_section_fn or (lambda self, msg: None) + return type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), { + "info": lambda *a, **k: None, + "log_section": section_fn, + "log_separator": lambda *a, **k: None, + })(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": directory, + })() + + +def _make_step(name, tool, log_file=""): + return type("WSS", (), {"name": name, "tool": tool, "log": {"file": log_file}})() + + +def _make_flow(ws, steps, run_step_fn): + return type("EF", (), { + "workspace": ws, + "workspace_steps": steps, + "run_step": run_step_fn, + })() + + +class TestRunFlowWithProgress: + def test_success_summary_format(self, tmp_path): + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(tmp_path / "synth.log"))], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + output = "".join(buf.written) + assert "✓ synthesis (yosys)" in output + assert "status=success" not in output + + def test_stops_on_failure(self): + call_count = [0] + + def fake_run_step(self, s): + call_count[0] += 1 + if s.name == "Synthesis": + return StateEnum.Success + return StateEnum.Imcomplete + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys"), _make_step("Floorplan", "ecc")], + fake_run_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is False + assert call_count[0] == 2 + + def test_summary_includes_inspect_detail_line(self): + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys", "/tmp/synth.log")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), "myproject", buf) + plain = _strip_ansi("".join(buf.written)) + assert " inspect: ecc log synthesis --project myproject\n" in plain + + def test_summary_includes_log_detail_line(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("content\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + assert " log:" in output + + def test_step_headers_emitted(self): + flow = _make_flow( + _make_ws(), + [ + _make_step("Synthesis", "yosys"), + _make_step("Floorplan", "ecc"), + ], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + plain = _strip_ansi("".join(buf.written)) + assert "> synthesis (yosys)\n" in plain + assert "> floorplan (ecc)\n" in plain + + def test_run_header_emitted(self, tmp_path): + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + assert "[run]" in output + assert "workspace=" in output + + def test_block_separator_between_steps(self): + flow = _make_flow( + _make_ws(), + [ + _make_step("Synthesis", "yosys"), + _make_step("Floorplan", "ecc"), + ], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + output = "".join(buf.written) + synth_summary = output.find("✓ synthesis") + fp_header = output.find("> floorplan") + between = output[synth_summary:fp_header] + assert "\n\n" in between + + def test_failure_summary_includes_status(self): + def fake_run_step(self, s): + if s.name == "Synthesis": + return StateEnum.Success + return StateEnum.Imcomplete + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys"), _make_step("Floorplan", "ecc")], + fake_run_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "✗ floorplan (ecc)" in plain + assert "incomplete" in plain + + def test_transient_line_shows_log_content(self, tmp_path): + log_file = tmp_path / "synth.log" + + def fake_run_step(self, s): + log_file.write_text("Synthesizing module top\n") + time.sleep(1.0) + return StateEnum.Success + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + fake_run_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + plain = _strip_ansi("".join(buf.written)) + assert "Synthesizing module top" in plain + + log_pos = plain.find("Synthesizing module top") + summary_pos = plain.find("✓ synthesis") + assert log_pos >= 0 + assert summary_pos >= 0 + assert log_pos < summary_pos + + def test_transient_shows_waiting_when_no_log(self): + def fake_run_step(self, s): + time.sleep(1.0) + return StateEnum.Success + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys", "/tmp/nonexistent_synth.log")], + fake_run_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + plain = _strip_ansi("".join(buf.written)) + assert " log: waiting for log..." in plain + + def test_log_section_markers_emitted(self, tmp_path): + sections = [] + flow = _make_flow( + _make_ws(str(tmp_path), log_section_fn=lambda self, msg: sections.append(msg)), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + + assert "yosys - begin step - Synthesis" in sections + assert "yosys - end step - Synthesis" in sections + assert sections.index("yosys - begin step - Synthesis") < sections.index("yosys - end step - Synthesis") + + def test_log_section_markers_around_run_step(self, tmp_path): + call_order = [] + + def fake_run_step(self, s): + call_order.append(("run_step", s.name)) + return StateEnum.Success + + flow = _make_flow( + _make_ws(str(tmp_path), log_section_fn=lambda self, msg: call_order.append(("section", msg))), + [_make_step("Floorplan", "ecc")], + fake_run_step, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + + begin_idx = call_order.index(("section", "ecc - begin step - Floorplan")) + run_idx = call_order.index(("run_step", "Floorplan")) + end_idx = call_order.index(("section", "ecc - end step - Floorplan")) + assert begin_idx < run_idx < end_idx + + def test_monitor_cleanup_on_run_step_exception(self, tmp_path): + def raising_run_step(self, s): + raise RuntimeError("tool crashed") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys")], + raising_run_step, + ) + + buf = FakeTTYStderr(True) + with pytest.raises(RuntimeError, match="tool crashed"): + run_flow_with_progress(flow, _make_ctx(), None, buf) + + output = "".join(buf.written) + assert "\r\x1b[K" in output + + def test_color_enabled_for_tty_text(self, monkeypatch): + monkeypatch.delenv("NO_COLOR", raising=False) + monkeypatch.setenv("TERM", "xterm-256color") + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + assert "\x1b[36m" in output # cyan for step header + + def test_color_disabled_for_non_tty(self): + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(False) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + for code in (BOLD, CYAN, GREEN, RED, DIM): + assert code not in output + + +# --------------------------------------------------------------------------- +# Failure context block formatting (AC-5) +# --------------------------------------------------------------------------- + + +class TestFormatErrorContext: + def test_first_line_is_error_log_path(self): + ctx_lines = [LogLine(10, LineKind.ERROR, "Error: something")] + out = format_error_context("log/synthesis.log", ctx_lines, "ecc log synthesis", color=False) + assert out.startswith("error: log/synthesis.log") + + def test_includes_numbered_context_lines(self): + ctx_lines = [ + LogLine(8, LineKind.INFO, "INFO: before"), + LogLine(9, LineKind.WARNING, "Warning: careful"), + LogLine(10, LineKind.ERROR, "Error: failed"), + ] + out = format_error_context("log/synthesis.log", ctx_lines, "ecc log synthesis", color=False) + for ll in ctx_lines: + assert str(ll.line_no) in out + assert ll.text in out + + def test_compact_kind_labels(self): + ctx_lines = [ + LogLine(5, LineKind.ERROR, "bad"), + LogLine(6, LineKind.WARNING, "meh"), + LogLine(7, LineKind.TRACEBACK, " File ..."), + LogLine(8, LineKind.INFO, "ok"), + ] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=False) + assert "ERROR" in out + assert "WARN" in out + assert "TRACE" in out + assert "INFO" in out + + def test_footer_includes_for_more_log_info(self): + ctx_lines = [LogLine(1, LineKind.ERROR, "failed")] + out = format_error_context("log/p.log", ctx_lines, "ecc log synthesis --project myproj", color=False) + assert "For more log info:" in out + assert "ecc log synthesis --project myproj" in out + + def test_footer_includes_command_grep_field(self): + ctx_lines = [LogLine(1, LineKind.ERROR, "failed")] + log_cmd = "ecc log synthesis --project myproj --run-id abc123" + out = format_error_context("log/p.log", ctx_lines, log_cmd, color=False) + assert 'command="ecc log synthesis --project myproj --run-id abc123"' in out + + def test_project_and_run_id_preserved_in_footer(self): + ctx_lines = [LogLine(1, LineKind.ERROR, "failed")] + log_cmd = "ecc log synthesis --project /path/to/proj --run-id run42" + out = format_error_context("log/p.log", ctx_lines, log_cmd, color=False) + assert "--project /path/to/proj" in out + assert "--run-id run42" in out + + def test_color_gating_no_ansi_when_disabled(self): + ctx_lines = [LogLine(10, LineKind.ERROR, "Error: bad")] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=False) + assert "\x1b[" not in out + + def test_color_gating_ansi_when_enabled(self): + ctx_lines = [LogLine(10, LineKind.ERROR, "Error: bad")] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=True) + assert "\x1b[" in out + + def test_line_number_padding_consistent(self): + ctx_lines = [ + LogLine(1, LineKind.PLAIN, "first"), + LogLine(10, LineKind.ERROR, "error"), + LogLine(100, LineKind.PLAIN, "hundred"), + ] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=False) + lines = out.strip().split("\n") + context_lines = [l for l in lines if l.strip() and not l.startswith("error:") and not l.startswith("For") and not l.startswith("command=")] + for line in context_lines: + assert line.startswith(" ") + + def test_empty_context(self): + out = format_error_context("log/p.log", [], "ecc log step", color=False) + assert "error: log/p.log" in out + assert "For more log info:" in out + + +# --------------------------------------------------------------------------- +# Failure context progress integration (AC-6) +# --------------------------------------------------------------------------- + + +class TestFailureContextIntegration: + def test_failed_step_prints_context_block(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line 1\nline 2\nError: something failed\nline 4\n") + + def fail_step(self, s): + return StateEnum.Imcomplete + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + fail_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "error:" in plain + assert "For more log info:" in plain + assert 'command="' in plain + + def test_successful_step_no_context_block(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line 1\nline 2\nall good\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is True + plain = _strip_ansi("".join(buf.written)) + assert "error:" not in plain + assert "For more log info:" not in plain + + def test_missing_log_no_context_block(self): + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys", "/nonexistent/synth.log")], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "error:" not in plain + assert "For more log info:" not in plain + assert "log:" in plain + assert "inspect:" in plain + + def test_empty_log_no_context_block(self, tmp_path): + log_file = tmp_path / "empty.log" + log_file.write_text("") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "For more log info:" not in plain + + def test_existing_log_and_inspect_lines_remain(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line 1\nError: fail\nline 3\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "log:" in plain + assert "inspect:" in plain + + def test_context_block_no_blank_lines_between_rows(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line one\nline two\nError: boom\nline four\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + raw = "".join(buf.written) + + header_pos = raw.find("error:") + footer_pos = raw.find("For more log info:", header_pos) + assert header_pos >= 0 + assert footer_pos > header_pos + + block = raw[header_pos:footer_pos] + plain_block = _strip_ansi(block) + all_lines = plain_block.rstrip("\n").split("\n") + + body_lines = [l for l in all_lines if not l.startswith("error:")] + assert len(body_lines) > 0 + + for i, line in enumerate(body_lines): + assert line.strip() != "", f"blank line at index {i} in context block: {body_lines!r}" + assert line.startswith(" "), f"context row not indented at index {i}: {line!r}" diff --git a/test/formal/test_param_propagation.py b/test/formal/test_param_propagation.py index ec9b1af0..234a6a79 100644 --- a/test/formal/test_param_propagation.py +++ b/test/formal/test_param_propagation.py @@ -69,7 +69,7 @@ def _key_exists_in_dict(data: dict[str, Any], key: str) -> bool: # Known parameter -> config mappings with both defaults. # (param_key, param_default, config_default, description) PARAM_CONFIG_DEFAULTS: list[tuple[str, float, float, str]] = [ - ("Target density", 0.3, 0.8, "dreamplace.target_density"), + ("Target density", 0.2, 0.8, "dreamplace.target_density"), ("Target overflow", 0.1, 0.1, "dreamplace.stop_overflow"), ("Cell padding x", 600, 600, "dreamplace.cell_padding_x"), ("Routability opt flag", 1, 0, "dreamplace.routability_opt_flag"), diff --git a/test/test_ecc_dreamplace_config_permissions.py b/test/test_ecc_dreamplace_config_permissions.py new file mode 100644 index 00000000..7447705e --- /dev/null +++ b/test/test_ecc_dreamplace_config_permissions.py @@ -0,0 +1,109 @@ +import shutil +import stat + +from chipcompiler.data import PDK, OriginDesign, Parameters, StepEnum, Workspace +from chipcompiler.tools.ecc import builder as ecc_builder +from chipcompiler.tools.ecc_dreamplace import builder as dreamplace_builder +from chipcompiler.utility import json_read, json_write + + +def test_ecc_config_generation_leaves_config_root_writable_after_read_only_copy( + tmp_path, + monkeypatch, +): + parameters_path = tmp_path / "parameters.json" + json_write(str(parameters_path), {}) + workspace = Workspace( + directory=str(tmp_path / "workspace"), + design=OriginDesign(name="gcd"), + pdk=PDK(tech="tech.lef", lefs=["std.lef"], buffers=[], fillers=[]), + parameters=Parameters(path=str(parameters_path), data={}), + ) + step = ecc_builder.build_step( + workspace=workspace, + step_name=StepEnum.PLACEMENT.value, + input_def="input.def", + input_verilog="input.v", + ) + config_dir = tmp_path / "workspace" / "place_ecc" / "config" + readonly_source = tmp_path / "readonly_configs" + + monkeypatch.setattr(ecc_builder, "build_sub_flow", lambda **_: None) + monkeypatch.setattr(ecc_builder, "build_checklist", lambda **_: None) + + real_copytree = shutil.copytree + + def copy_readonly_config_source(_src, dst, dirs_exist_ok=False): + real_copytree(_src, readonly_source, dirs_exist_ok=True) + readonly_source.chmod(stat.S_IREAD | stat.S_IEXEC) + try: + return real_copytree(readonly_source, dst, dirs_exist_ok=dirs_exist_ok) + finally: + readonly_source.chmod( + stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC, + ) + + monkeypatch.setattr(shutil, "copytree", copy_readonly_config_source) + + ecc_builder.build_step_config(workspace, step) + + config_mode = config_dir.stat().st_mode + copied_config = config_dir / "flow_config.json" + copied_mode = copied_config.stat().st_mode + assert config_mode & stat.S_IWUSR + assert config_mode & stat.S_IXUSR + assert copied_mode & stat.S_IWUSR + + extra_config = config_dir / "created_after_build.json" + extra_config.write_text("{}", encoding="utf-8") + assert extra_config.exists() + + +def test_dreamplace_config_generation_writes_generated_fields_to_copied_config( + tmp_path, + monkeypatch, +): + workspace = Workspace( + directory=str(tmp_path / "workspace"), + design=OriginDesign(name="gcd"), + pdk=PDK(tech="tech.lef", lefs=["std.lef"]), + parameters=Parameters(data={}), + ) + step = dreamplace_builder.build_step( + workspace=workspace, + step_name=StepEnum.PLACEMENT.value, + input_def="input.def", + input_verilog="input.v", + ) + config_dir = tmp_path / "workspace" / "place_dreamplace" / "config" + + def fake_ecc_build_step_config(_workspace, _step): + config_dir.mkdir(parents=True, exist_ok=True) + + monkeypatch.setattr( + dreamplace_builder.ecc_builder, + "build_step_config", + fake_ecc_build_step_config, + ) + + real_copy2 = shutil.copy2 + + def copy_readonly_config_file(src, dst): + result = real_copy2(src, dst) + tmp_path.joinpath(dst).chmod(stat.S_IREAD) + return result + + monkeypatch.setattr(shutil, "copy2", copy_readonly_config_file) + + dreamplace_builder.build_step_config(workspace, step) + + dreamplace_config = config_dir / "dreamplace.json" + mode = dreamplace_config.stat().st_mode + data = json_read(str(dreamplace_config)) + + assert mode & stat.S_IWUSR + assert data["lef_input"] == ["tech.lef", "std.lef"] + assert data["def_input"] == "input.def" + assert data["verilog_input"] == "input.v" + assert data["result_dir"] == step.data[step.name] + assert data["base_design_name"] == "gcd" diff --git a/uv.lock b/uv.lock index e1916b34..3bdad99d 100644 --- a/uv.lock +++ b/uv.lock @@ -470,7 +470,7 @@ dev = [ [package.metadata] requires-dist = [ - { name = "ecc-dreamplace", url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl" }, + { name = "ecc-dreamplace", url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" }, { name = "ecc-tools", url = "https://github.com/openecos-projects/ecc-tools/releases/download/v0.1.0-alpha.2/ecc_tools-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" }, { name = "fastapi", specifier = ">=0.109" }, { name = "klayout", specifier = ">=0.30.2" }, @@ -506,8 +506,8 @@ dev = [ [[package]] name = "ecc-dreamplace" -version = "0.1.0a1" -source = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl" } +version = "0.1.0a2" +source = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" } dependencies = [ { name = "cairocffi" }, { name = "configspace" }, @@ -530,7 +530,7 @@ dependencies = [ { name = "xgboost" }, ] wheels = [ - { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl", hash = "sha256:212139c43f825498968eda10309959b99cd93aec0744d182a17bb5d34b53145e" }, + { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl", hash = "sha256:fad0e489bfba62f79c193e2e0ec5051a492768e2a3d6099aa5e604c08abb191f" }, ] [package.metadata]