From 5eba3b53223c78876ad6a351527fcf9c6152df1a Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 21:28:35 +0800 Subject: [PATCH 001/104] chore: add humanize and superpowers dir to .gitignore Signed-off-by: Emin --- .gitignore | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f9e6b78c..f6f1946b 100644 --- a/.gitignore +++ b/.gitignore @@ -181,4 +181,8 @@ bazel-* # Generated from uv.lock, not committed requirements_lock.txt -chipcompiler/tools/ecc_dreamplace/dreamplace \ No newline at end of file +chipcompiler/tools/ecc_dreamplace/dreamplace + +humanize/ +humanize-* +docs/superpowers/ From f4692b37ab04438b3fe00eb2c5a90952cf5d297c Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 21:28:47 +0800 Subject: [PATCH 002/104] docs: add cli specifications Signed-off-by: Emin --- docs/index.md | 7 + docs/specification/cli-design.md | 427 +++++++++++++++++++++++++++++++ 2 files changed, 434 insertions(+) create mode 100644 docs/specification/cli-design.md diff --git a/docs/index.md b/docs/index.md index 8c8ff622..195bacb6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -26,6 +26,13 @@ ChipCompiler supports various EDA file formats. Technical specifications for par - Supports file paths, +incdir directives, comments, quoted paths - Parser implementation: `chipcompiler/utility/filelist.py` +### CLI Specifications + +- **[CLI Design](specification/cli-design.md)** - Progressive-disclosure CLI design and roadmap + - Grep-friendly summary lines with disclosure commands + - Project, run, step, metric, artifact, issue, and config object model + - Phased roadmap for project setup, debug, traceability, and exploration + ## Quick Navigation ### I want to... diff --git a/docs/specification/cli-design.md b/docs/specification/cli-design.md new file mode 100644 index 00000000..6cbe39a0 --- /dev/null +++ b/docs/specification/cli-design.md @@ -0,0 +1,427 @@ +# CLI Design Specification + +This document defines the design principles and staged roadmap for the ECC +command line interface. + +The CLI should be useful to both human flow developers and agent frameworks. It +must expose a short default path for common flows, while every summary line must +also provide explicit commands for deeper inspection. + +## Goals + +- Provide a project-oriented interface for RTL-to-GDS workflows. +- Make step-level reruns, inspection, and debugging first-class operations. +- Keep default output concise and stable. +- Make output easy to parse with simple tools such as `rg`, `awk`, and shell + scripts. +- Provide structured output for agents through `--json` and `--jsonl`. +- Preserve the existing Python API for advanced integration. +- Build CLI behavior as a wrapper around the current Python APIs. + +## Non-Goals + +- Full OpenLane or LibreLane configuration import. +- A conversational assistant as the primary CLI interface. +- Tool-specific command exposure as the default user model. +- Pretty terminal UI as the canonical output format. + +## Design Principles + +### Progressive Disclosure + +The default command output should answer only: + +- What happened? +- Did it succeed? +- What command should inspect the next level of detail? + +Detailed information must be available through explicit follow-up commands. +The disclosure path is: + +```text +summary -> diagnosis -> evidence -> raw data +``` + +Examples: + +```bash +ecc status +ecc diagnose step cts +ecc log step cts --errors +ecc artifacts step cts +ecc config step cts --resolved +``` + +### Disclosure Commands On Summary Lines + +Every summary line must include at least one disclosure command on the same +line. This is required so agents can grep the output and continue inspection +without interpreting natural language paragraphs. + +Use stable `key="command"` fields: + +```text +step=cts status=failed elapsed=37s wns=-0.083 hold_vios=12 diagnose="ecc diagnose step cts" log="ecc log step cts --errors" config="ecc config step cts --resolved" +``` + +Do not rely on prose such as: + +```text +Run ecc diagnose step cts for more details. +``` + +The command field names should be stable across releases: + +| Field | Purpose | +| --- | --- | +| `inspect` | Show detailed object state | +| `diagnose` | Explain failures or quality issues | +| `log` | Show filtered or raw logs | +| `artifacts` | List output artifacts | +| `config` | Show resolved configuration | +| `metrics` | Show metrics | +| `open` | Open a viewer or report | + +### Stable Text Output + +The default output should be line-oriented and grep-friendly. Avoid box drawing, +multi-line table cells, and terminal-width-dependent formatting in the default +mode. + +Recommended style: + +```text +run=baseline status=failed failed_step=routing elapsed=554s diagnose="ecc diagnose run baseline" metrics="ecc metrics run baseline" artifacts="ecc artifacts run baseline" +step=synthesis status=success elapsed=18s cells=312 area=1840.2 inspect="ecc show step synthesis" log="ecc log step synthesis --errors" +step=floorplan status=success elapsed=4s util=45.0 die=100x100 inspect="ecc show step floorplan" config="ecc config step floorplan --resolved" +step=placement status=success elapsed=72s hpwl=18423 overflow=0.02 inspect="ecc show step placement" metrics="ecc metrics step placement" +step=cts status=failed elapsed=37s wns=-0.083 hold_vios=12 diagnose="ecc diagnose step cts" log="ecc log step cts --errors" +``` + +Pretty output may be provided through a separate option: + +```bash +ecc status --pretty +``` + +Pretty output is for humans only and must not be treated as the stable parsing +interface. + +### Structured Output + +Every inspection command should support: + +```bash +--json +--jsonl +``` + +Use `--json` for object-level output and `--jsonl` for stream or list output. + +Example: + +```jsonl +{"kind":"step","step":"synthesis","status":"success","elapsed_s":18,"inspect_cmd":"ecc show step synthesis","log_cmd":"ecc log step synthesis --errors"} +{"kind":"step","step":"cts","status":"failed","elapsed_s":37,"wns":-0.083,"hold_vios":12,"diagnose_cmd":"ecc diagnose step cts","log_cmd":"ecc log step cts --errors"} +``` + +Text output and JSON output should describe the same objects. The text output is +the human and shell interface; JSON is the strict machine interface. + +### Object-Oriented CLI Model + +Commands should be organized around flow objects instead of internal tools: + +| Object | Description | +| --- | --- | +| Project | User design directory and `ecc.toml` | +| Run | One execution instance with a stable run id or tag | +| Step | A flow step such as synthesis, placement, CTS, routing | +| Artifact | DEF, GDS, Verilog, SPEF, reports, logs, scripts | +| Metric | QoR values such as WNS, TNS, area, HPWL, DRC count | +| Issue | Failure or QoR problem with evidence | +| Config | User config and resolved step config | + +Users should not need to understand the internal Yosys, ECC-Tools, or +DreamPlace directory layout to perform common actions. + +### Python API Wrapper Boundary + +The CLI must be implemented as a thin orchestration layer over the existing +Python APIs. CLI commands should compose and wrap APIs such as workspace +creation, flow construction, step execution, state inspection, metrics parsing, +and artifact discovery. + +The CLI must not require invasive changes to the current flow-related APIs. In +particular, CLI implementation should avoid changing the semantics of +`EngineFlow`, `Workspace`, `WorkspaceStep`, tool plugin interfaces, or RTL-to-GDS +flow builders only to satisfy command-line concerns. + +If the CLI needs behavior that is not exposed today, prefer one of these +approaches: + +- Add a small, general-purpose Python API that is useful outside the CLI. +- Add a CLI-local adapter that translates current API data into CLI output + objects. +- Add read-only inspection helpers around existing state files, reports, and + artifacts. + +Avoid embedding CLI output formatting, argument parsing, terminal behavior, or +agent-specific disclosure fields inside core flow APIs. + +## Command Shape + +### Core Commands + +The first stable CLI surface should stay small: + +```bash +ecc init +ecc check +ecc run +ecc status +ecc diagnose +ecc metrics +ecc log +ecc artifacts +ecc config +ecc open +``` + +Responsibilities: + +| Command | Responsibility | +| --- | --- | +| `ecc init` | Create a project skeleton and `ecc.toml` | +| `ecc check` | Validate RTL, constraints, PDK, tools, and config | +| `ecc run` | Execute a full flow or selected step range | +| `ecc status` | Summarize run and step state | +| `ecc diagnose` | Explain failures or QoR problems with evidence | +| `ecc metrics` | Show run-level or step-level metrics | +| `ecc log` | Show filtered or raw logs | +| `ecc artifacts` | List generated files and viewer commands | +| `ecc config` | Show user or resolved configuration | +| `ecc open` | Open KLayout, reports, or other viewers | + +### Project-Oriented Entry + +The preferred user entry should be configuration driven: + +```bash +ecc init gcd +ecc check +ecc run +``` + +The project should contain: + +```text +gcd/ +├── ecc.toml +├── rtl/ +├── constraints/ +├── runs/ +└── reports/ +``` + +Command-line arguments may override configuration values, but `ecc.toml` should +be the primary user-facing interface. + +### Step-Level Execution + +Back-end flow work is iterative. Step-level execution must be first-class: + +```bash +ecc run --from placement +ecc run --to routing +ecc run --only cts +ecc run --after floorplan +ecc run --resume +ecc run --force --step placement +``` + +Each run should have a stable run id and may have a user tag: + +```bash +ecc run --tag baseline +ecc run --tag dense_place +ecc diff baseline dense_place +``` + +## Output Contracts + +### Summary Line Format + +Default text output should follow this general shape: + +```text +kind= key=value ... disclosure_key="ecc command ..." +``` + +Examples: + +```text +run=baseline status=success elapsed=914s metrics="ecc metrics run baseline" artifacts="ecc artifacts run baseline" +step=routing status=failed elapsed=222s shorts=84 opens=3 drc=87 diagnose="ecc diagnose step routing" log="ecc log step routing --errors" open="ecc open step routing --markers drc" +metric=wns value=-0.083 unit=ns status=fail source=cts/reports/timing_hold.rpt inspect="ecc show metric wns --step cts" +artifact=def step=placement path=runs/baseline/placement/output/design.def open="ecc open step placement --artifact def" +``` + +Rules: + +- Keep one object per line. +- Do not wrap summary lines. +- Use stable lowercase keys. +- Use stable lowercase tokens for step names and metric names. +- Quote command values with double quotes. +- Commands in disclosure fields must be directly executable from the project + root. +- Include at least one disclosure command per summary line. +- Prefer relative paths rooted at the project directory. +- Avoid terminal color as the only status indicator. + +### Error Output + +Errors should also follow progressive disclosure. A failing command should print +a concise summary and actionable disclosure commands: + +```text +error=E2103 status=failed step=routing reason=drc_violations shorts=84 opens=3 diagnose="ecc diagnose step routing" log="ecc log step routing --errors" open="ecc open step routing --markers drc" +``` + +For human readability, a short paragraph may follow, but agents should be able +to use the first line alone. + +### Diagnosis Output + +Diagnosis must include evidence, not only suggestions: + +```text +issue=cts_hold status=fail severity=error wns=-0.083 hold_vios=12 evidence="ecc show issue cts_hold --evidence" log="ecc log step cts --errors" +evidence=timing_hold_report path=runs/baseline/cts/reports/timing_hold.rpt value=-0.083 inspect="ecc show artifact timing_hold_report" +action=enable_hold_repair confidence=medium config="ecc config step cts --resolved" +``` + +Suggestions should be traceable to metrics, reports, or logs. + +## Configuration Direction + +The CLI should move toward a single project configuration file: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +clock_period = "10ns" + +[pdk] +name = "ics55" +root = "$PDK_ROOT" + +[floorplan] +die_area = [0, 0, 100, 100] +core_util = 45 +aspect_ratio = 1.0 + +[flow] +preset = "rtl2gds" +from = "synthesis" +to = "gds" +``` + +The resolved configuration used by each step should be inspectable: + +```bash +ecc config --resolved +ecc config step placement --resolved +``` + +## AI-Native Behavior + +The CLI should not start with a general chat command. It should first produce +stable structured context that agents can inspect. + +Preferred data files: + +```text +run.json +steps.json +metrics.json +issues.json +artifacts.json +resolved_config.json +events.jsonl +``` + +Agent-oriented commands can then be layered on top: + +```bash +ecc diagnose +ecc explain step routing +ecc suggest --goal "fix hold" +ecc summarize run latest +``` + +These commands must still return evidence-backed results and disclosure +commands. + +## Roadmap + +### Phase 1: Project And Run Basics + +- [ ] `ecc init` +- [ ] `ecc check` +- [ ] `ecc run` +- [ ] `ecc status` +- [ ] `ecc log` +- [ ] `ecc metrics` +- [ ] Default grep-friendly summary output +- [ ] `--json` and `--jsonl` for status and metrics + +Success criteria: + +- [ ] A user can create a project, run the default RTL-to-GDS flow, inspect status, + inspect logs, and read metrics without writing Python. +- [ ] Every summary line includes at least one disclosure command. + +### Phase 2: Debug And Traceability + +- [ ] `ecc diagnose` +- [ ] `ecc artifacts` +- [ ] `ecc config --resolved` +- [ ] `ecc open` +- [ ] Run tags and run comparison basics +- [ ] Structured issue and artifact metadata + +Success criteria: + +- [ ] A failed step can be investigated through `ecc status -> ecc diagnose -> ecc + log/artifacts/config`. +- [ ] Agent frameworks can follow disclosure commands without parsing prose. + +### Phase 3: Exploration And Assistance + +- [ ] `ecc diff` +- [ ] `ecc sweep` +- [ ] `ecc explain` +- [ ] `ecc suggest` +- [ ] QoR dashboards or report export + +Success criteria: + +- [ ] A user can compare runs, sweep key flow parameters, and receive + evidence-backed next actions for common timing, placement, routing, and DRC + failures. + +## Compatibility Notes + +The current CLI accepts explicit arguments such as `--workspace`, `--rtl`, +`--design`, `--top`, `--clock`, `--pdk-root`, and `--freq`. The new CLI should +preserve a migration path for scripted users, but the long-term default should +be project-oriented and configuration-driven. + +The CLI should remain API-compatible with existing Python users. Changes needed +for the CLI should be additive and should not force current Python flow scripts +to change unless the underlying API already requires a broader cleanup. From 6e9211ac9f867e21dcdd1ad299ed4cf447619843 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 21:29:15 +0800 Subject: [PATCH 003/104] chore: bump ecc submodule Signed-off-by: Emin --- chipcompiler/thirdparty/ecc-tools | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/thirdparty/ecc-tools b/chipcompiler/thirdparty/ecc-tools index 749185eb..36160db0 160000 --- a/chipcompiler/thirdparty/ecc-tools +++ b/chipcompiler/thirdparty/ecc-tools @@ -1 +1 @@ -Subproject commit 749185eb923125e7478baee3206ca72892be7f0e +Subproject commit 36160db0b30ccd627f2c2a06d9fa517d4cce4d49 From cf9241a9f24074a6d9da265c1ae6cb53fe184d23 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 21:38:55 +0800 Subject: [PATCH 004/104] chore: add .humanize to .gitignore Signed-off-by: Emin --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f6f1946b..bf09d5f3 100644 --- a/.gitignore +++ b/.gitignore @@ -183,6 +183,6 @@ requirements_lock.txt chipcompiler/tools/ecc_dreamplace/dreamplace -humanize/ +.humanize/ humanize-* docs/superpowers/ From 68b43d87c4669a5a04cb7cdd0926944373c2ba94 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 21:53:48 +0800 Subject: [PATCH 005/104] feat: implement Phase 1 project-oriented CLI Replace the parameter-only CLI with a subcommand-based ecc command: - ecc init: create project skeleton with ecc.toml template - ecc check: validate project configuration from ecc.toml - ecc run: execute complete rtl2gds flow via existing Python APIs - ecc status: read-only inspection of flow.json run/step state - ecc log: step log discovery with --errors filtering - ecc metrics: metrics JSON reading with key normalization All commands support --json/--jsonl structured output and follow the disclosure-line contract (every summary line includes a runnable command). Module split: config.py (TOML/validation), output.py (formatting), project.py (init/run), inspect.py (status/log/metrics). 50 tests covering all acceptance criteria (positive and negative cases). Console script entry point updated from cli to ecc. --- chipcompiler/cli/config.py | 142 +++++++ chipcompiler/cli/inspect.py | 370 ++++++++++++++++++ chipcompiler/cli/main.py | 282 ++++++++------ chipcompiler/cli/output.py | 116 ++++++ chipcompiler/cli/project.py | 190 +++++++++ pyproject.toml | 2 +- test/cli/test_cli_main.py | 755 +++++++++++++++++++++++++++++++----- 7 files changed, 1645 insertions(+), 212 deletions(-) create mode 100644 chipcompiler/cli/config.py create mode 100644 chipcompiler/cli/inspect.py create mode 100644 chipcompiler/cli/output.py create mode 100644 chipcompiler/cli/project.py diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py new file mode 100644 index 00000000..b1a9349e --- /dev/null +++ b/chipcompiler/cli/config.py @@ -0,0 +1,142 @@ +import os +import tomllib +from dataclasses import dataclass, field + +SUPPORTED_PDK_NAMES = {"ics55"} +SUPPORTED_FLOW_PRESETS = {"rtl2gds"} +SUPPORTED_FLOW_RUNS = {"default"} + + +@dataclass +class ProjectConfig: + design_name: str = "" + design_top: str = "" + design_rtl: list[str] = field(default_factory=list) + design_clock_port: str = "" + design_frequency_mhz: float = 0.0 + + pdk_name: str = "" + pdk_root: str = "" + + flow_preset: str = "" + flow_run: str = "" + + config_path: str = "" + project_dir: str = "" + + +def load_project_config(config_path: str) -> ProjectConfig: + with open(config_path, "rb") as f: + data = tomllib.load(f) + return _parse_config(data, config_path) + + +def _parse_config(data: dict, config_path: str) -> ProjectConfig: + design = data.get("design", {}) + pdk = data.get("pdk", {}) + flow = data.get("flow", {}) + + project_dir = os.path.dirname(os.path.abspath(config_path)) + + cfg = ProjectConfig( + design_name=design.get("name", ""), + design_top=design.get("top", ""), + design_rtl=design.get("rtl", []), + design_clock_port=design.get("clock_port", ""), + design_frequency_mhz=float(design.get("frequency_mhz", 0)), + pdk_name=pdk.get("name", ""), + pdk_root=pdk.get("root", ""), + flow_preset=flow.get("preset", ""), + flow_run=flow.get("run", "default"), + config_path=config_path, + project_dir=project_dir, + ) + return cfg + + +def resolve_project_dir(project: str | None) -> str: + if project: + return os.path.abspath(project) + return os.getcwd() + + +def find_config_path(project_dir: str) -> str | None: + path = os.path.join(project_dir, "ecc.toml") + return path if os.path.isfile(path) else None + + +def validate_project_config(cfg: ProjectConfig) -> list[str]: + errors = [] + + if not cfg.design_name: + errors.append("design.name is required") + if not cfg.design_top: + errors.append("design.top is required") + if not cfg.design_clock_port: + errors.append("design.clock_port is required") + if cfg.design_frequency_mhz <= 0: + errors.append("design.frequency_mhz must be greater than 0") + if not cfg.design_rtl: + errors.append("design.rtl must have at least one entry") + elif len(cfg.design_rtl) > 1: + errors.append("design.rtl must have exactly one entry; use a filelist for multiple sources") + + if not cfg.pdk_name: + errors.append("pdk.name is required") + elif cfg.pdk_name not in SUPPORTED_PDK_NAMES: + errors.append(f"unsupported pdk.name: {cfg.pdk_name}") + + if cfg.pdk_root: + resolved_root = _resolve_path(cfg.project_dir, cfg.pdk_root) + if not os.path.isdir(resolved_root): + errors.append(f"pdk.root is not a directory: {cfg.pdk_root}") + else: + errors.append("pdk.root is required") + + if not cfg.flow_preset: + errors.append("flow.preset is required") + elif cfg.flow_preset not in SUPPORTED_FLOW_PRESETS: + errors.append(f"unsupported flow.preset: {cfg.flow_preset}") + + if cfg.flow_run and cfg.flow_run not in SUPPORTED_FLOW_RUNS: + errors.append(f"unsupported flow.run: {cfg.flow_run}") + + if len(cfg.design_rtl) == 1: + rtl_path = _resolve_path(cfg.project_dir, cfg.design_rtl[0]) + if not os.path.exists(rtl_path): + errors.append(f"rtl path does not exist: {cfg.design_rtl[0]}") + + return errors + + +def to_parameters(cfg: ProjectConfig) -> dict: + return { + "PDK": cfg.pdk_name, + "Design": cfg.design_name, + "Top module": cfg.design_top, + "Clock": cfg.design_clock_port, + "Frequency max [MHz]": cfg.design_frequency_mhz, + } + + +def resolve_rtl(cfg: ProjectConfig) -> tuple[str, str, str]: + if not cfg.design_rtl: + return ("", "", "") + + rtl_path = _resolve_path(cfg.project_dir, cfg.design_rtl[0]) + suffix = os.path.splitext(rtl_path)[1].lower() + + FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} + RTL_SUFFIXES = {".v", ".sv", ".svh", ".vh"} + + if suffix in FILELIST_SUFFIXES: + return ("filelist", "", rtl_path) + if suffix in RTL_SUFFIXES: + return ("rtl", rtl_path, "") + return ("rtl", rtl_path, "") + + +def _resolve_path(project_dir: str, path: str) -> str: + if os.path.isabs(path): + return path + return os.path.join(project_dir, path) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py new file mode 100644 index 00000000..463a2915 --- /dev/null +++ b/chipcompiler/cli/inspect.py @@ -0,0 +1,370 @@ +import json +import os +import re + +from chipcompiler.cli.output import ( + normalize_metric_key, + normalize_state, + normalize_step_name, +) + + +def read_flow_json(run_dir: str) -> dict | None: + path = os.path.join(run_dir, "home", "flow.json") + if not os.path.isfile(path): + return None + try: + with open(path) as f: + return json.load(f) + except (json.JSONDecodeError, OSError): + return None + + +def get_run_status(flow_data: dict) -> str: + steps = flow_data.get("steps", []) + if not steps: + return "unstart" + for step in steps: + state = normalize_state(step.get("state", "")) + if state in ("incomplete", "invalid", "ongoing"): + return "failed" + all_done = all(normalize_state(s.get("state", "")) == "success" for s in steps) + return "success" if all_done else "failed" + + +def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[str], int]: + from chipcompiler.cli.output import disclosure_cmd, format_line + + flow_data = read_flow_json(run_dir) + if flow_data is None: + line = format_line( + run="default", + status="missing", + workspace=run_dir, + run_cmd=disclosure_cmd("ecc run", project), + ) + return [line], 1 + + run_status = get_run_status(flow_data) + lines = [] + + lines.append(format_line( + run="default", + status=run_status, + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project), + metrics=disclosure_cmd("ecc metrics", project), + log=disclosure_cmd("ecc log", project), + )) + + for step in flow_data.get("steps", []): + step_token = normalize_step_name(step.get("name", "")) + lines.append(format_line( + step=step_token, + tool=step.get("tool", ""), + status=normalize_state(step.get("state", "")), + runtime=step.get("runtime", "") or None, + metrics=disclosure_cmd(f"ecc metrics {step_token}", project), + log=disclosure_cmd(f"ecc log {step_token} --errors", project), + )) + + return lines, 0 + + +def build_status_json(run_dir: str) -> tuple[dict, int]: + flow_data = read_flow_json(run_dir) + if flow_data is None: + return {"run": "default", "status": "missing", "workspace": run_dir}, 1 + + run_status = get_run_status(flow_data) + steps = [] + for step in flow_data.get("steps", []): + steps.append({ + "step": normalize_step_name(step.get("name", "")), + "tool": step.get("tool", ""), + "status": normalize_state(step.get("state", "")), + "runtime": step.get("runtime", ""), + }) + + return {"run": "default", "status": run_status, "workspace": run_dir, "steps": steps}, 0 + + +def build_status_jsonl(run_dir: str) -> tuple[list[dict], int]: + flow_data = read_flow_json(run_dir) + if flow_data is None: + return [{"run": "default", "status": "missing", "workspace": run_dir}], 1 + + run_status = get_run_status(flow_data) + objects = [{"kind": "run", "run": "default", "status": run_status, "workspace": run_dir}] + + for step in flow_data.get("steps", []): + objects.append({ + "kind": "step", + "step": normalize_step_name(step.get("name", "")), + "tool": step.get("tool", ""), + "status": normalize_state(step.get("state", "")), + "runtime": step.get("runtime", ""), + }) + + return objects, 0 + + +ERROR_PATTERNS = re.compile(r"(error|failed|traceback)", re.IGNORECASE) + + +def discover_step_dirs(run_dir: str) -> dict[str, str]: + result = {} + if not os.path.isdir(run_dir): + return result + for entry in os.listdir(run_dir): + full = os.path.join(run_dir, entry) + if os.path.isdir(full) and "_" in entry: + name, _, tool = entry.partition("_") + token = normalize_step_name(name) + result[token] = full + return result + + +def discover_logs(run_dir: str, step_token: str | None = None) -> list[str]: + if step_token is None: + log_dir = os.path.join(run_dir, "log") + if os.path.isdir(log_dir): + return sorted( + os.path.join(log_dir, f) + for f in os.listdir(log_dir) + if os.path.isfile(os.path.join(log_dir, f)) + ) + return [] + + step_dirs = discover_step_dirs(run_dir) + if step_token not in step_dirs: + return [] + + log_dir = os.path.join(step_dirs[step_token], "log") + if not os.path.isdir(log_dir): + return [] + + return sorted( + os.path.join(log_dir, f) + for f in os.listdir(log_dir) + if os.path.isfile(os.path.join(log_dir, f)) + ) + + +def filter_errors(lines: list[str]) -> list[str]: + return [line for line in lines if ERROR_PATTERNS.search(line)] + + +def read_log_file(path: str) -> list[str]: + try: + with open(path) as f: + return f.read().splitlines() + except OSError: + return [] + + +def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, + project: str | None = None) -> tuple[list[str], int]: + from chipcompiler.cli.output import disclosure_cmd, format_line + + if step_token is None: + log_files = discover_logs(run_dir) + if not log_files: + return [format_line( + log_status="no_global_logs", + workspace=run_dir, + run=disclosure_cmd("ecc run", project), + )], 0 + + lines = [] + for lf in log_files: + lines.append(format_line(log=os.path.relpath(lf, run_dir))) + return lines, 0 + + step_dirs = discover_step_dirs(run_dir) + if step_token not in step_dirs: + return [format_line( + step=step_token, + status="unknown_step", + inspect=disclosure_cmd("ecc status", project), + )], 1 + + log_files = discover_logs(run_dir, step_token) + if not log_files: + return [format_line( + step=step_token, + log_status="missing", + log=disclosure_cmd(f"ecc log {step_token} --errors", project), + )], 1 + + matched_lines = [] + for lf in log_files: + raw = read_log_file(lf) + filtered = filter_errors(raw) if errors_only else raw + for line in filtered: + matched_lines.append((lf, line)) + + if not matched_lines: + return [format_line( + step=step_token, + log_status="no_matching_lines", + log=disclosure_cmd(f"ecc log {step_token}", project), + )], 0 + + result = [] + for lf, line in matched_lines: + result.append(format_line( + step=step_token, + source=os.path.relpath(lf, run_dir), + line=line, + log=disclosure_cmd(f"ecc log {step_token} --errors", project), + )) + return result, 0 + + +def build_log_jsonl(run_dir: str, step_token: str, errors_only: bool, + project: str | None = None) -> tuple[list[dict], int]: + step_dirs = discover_step_dirs(run_dir) + if step_token not in step_dirs: + return [{"step": step_token, "status": "unknown_step"}], 1 + + log_files = discover_logs(run_dir, step_token) + if not log_files: + return [{"step": step_token, "log_status": "missing"}], 1 + + objects = [] + for lf in log_files: + raw = read_log_file(lf) + lines = filter_errors(raw) if errors_only else raw + for line in lines: + objects.append({ + "step": step_token, + "source": os.path.relpath(lf, run_dir), + "line": line, + }) + + return objects, 0 + + +def discover_metrics(run_dir: str, step_token: str | None = None) -> dict[str, str]: + step_dirs = discover_step_dirs(run_dir) + result = {} + + if step_token is not None: + if step_token not in step_dirs: + return {} + tokens = [step_token] + else: + tokens = list(step_dirs.keys()) + + for token in tokens: + analysis_dir = os.path.join(step_dirs[token], "analysis") + if not os.path.isdir(analysis_dir): + continue + for f in os.listdir(analysis_dir): + if f.endswith("_metrics.json"): + result[token] = os.path.join(analysis_dir, f) + break + + return result + + +def read_metrics(path: str) -> dict: + try: + with open(path) as f: + return json.load(f) + except (json.JSONDecodeError, OSError): + return {} + + +def build_metrics_lines(run_dir: str, step_token: str | None = None, + project: str | None = None) -> tuple[list[str], int]: + from chipcompiler.cli.output import disclosure_cmd, format_line + + metrics_files = discover_metrics(run_dir, step_token) + if not metrics_files: + if step_token is not None: + step_dirs = discover_step_dirs(run_dir) + if step_token in step_dirs: + return [format_line( + metric_step=step_token, + status="missing", + path=os.path.relpath( + os.path.join(step_dirs[step_token], "analysis", + f"{_internal_from_token(step_token)}_metrics.json"), + run_dir, + ), + log=disclosure_cmd(f"ecc log {step_token} --errors", project), + )], 1 + return [format_line( + step=step_token, + status="unknown_step", + inspect=disclosure_cmd("ecc status", project), + )], 1 + return [format_line( + metrics_status="none", + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project), + )], 0 + + lines = [] + rc = 0 + for token, path in sorted(metrics_files.items()): + data = read_metrics(path) + if not data: + continue + for raw_key, value in data.items(): + norm_key = normalize_metric_key(raw_key) + lines.append(format_line( + metric=norm_key, + step=token, + value=value, + source=os.path.relpath(path, run_dir), + inspect=disclosure_cmd(f"ecc metrics {token} --json", project), + )) + return lines, rc + + +def build_metrics_json(run_dir: str, step_token: str | None = None) -> tuple[dict, int]: + metrics_files = discover_metrics(run_dir, step_token) + all_metrics = [] + for token, path in sorted(metrics_files.items()): + data = read_metrics(path) + for raw_key, value in data.items(): + all_metrics.append({ + "metric": normalize_metric_key(raw_key), + "step": token, + "value": value, + "source": os.path.relpath(path, run_dir), + }) + return {"metrics": all_metrics}, 0 + + +def build_metrics_jsonl(run_dir: str, step_token: str | None = None) -> tuple[list[dict], int]: + metrics_files = discover_metrics(run_dir, step_token) + objects = [] + for token, path in sorted(metrics_files.items()): + data = read_metrics(path) + for raw_key, value in data.items(): + objects.append({ + "metric": normalize_metric_key(raw_key), + "step": token, + "value": value, + "source": os.path.relpath(path, run_dir), + }) + return objects, 0 + + +def _internal_from_token(token: str) -> str: + reverse = { + "synthesis": "Synthesis", + "floorplan": "Floorplan", + "fixfanout": "fixFanout", + "placement": "place", + "cts": "CTS", + "legalization": "legalization", + "routing": "route", + "drc": "drc", + "filler": "filler", + } + return reverse.get(token, token) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index be3248bd..a731fcfa 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -1,143 +1,209 @@ -#!/usr/bin/env python - import argparse -import os import sys from collections.abc import Sequence -from chipcompiler.data import create_workspace, get_parameters -from chipcompiler.engine import EngineFlow -from chipcompiler.rtl2gds import build_rtl2gds_flow -from chipcompiler.utility.filelist import parse_filelist, validate_filelist - -FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} -RTL_SUFFIXES = {".v", ".sv", ".svh", ".vh"} +from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + resolve_project_dir, +) def build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( - prog="cli", - description="Create ChipCompiler workspace and run RTL2GDS flow", - ) - parser.add_argument("--workspace", required=True, help="Workspace directory path") - parser.add_argument("--rtl", required=True, help="RTL file or filelist path") - parser.add_argument("--design", required=True, help="Design name") - parser.add_argument("--top", required=True, help="Top module name") - parser.add_argument("--clock", required=True, help="Clock port name") - parser.add_argument("--pdk-root", required=True, help="ICS55 PDK root directory") - parser.add_argument( - "--freq", - type=float, - default=100.0, - help="Clock frequency in MHz (default: 100)", + prog="ecc", + description="ECC - EDA toolchain for RTL-to-GDS flows", ) + subparsers = parser.add_subparsers(dest="command") + + # ecc init + init_parser = subparsers.add_parser("init", help="Create a new project skeleton") + init_parser.add_argument("name", help="Project name") + + # ecc check + check_parser = subparsers.add_parser("check", help="Validate project configuration") + _add_project_arg(check_parser) + check_parser.add_argument("--json", action="store_true", help="JSON output") + + # ecc run + run_parser = subparsers.add_parser("run", help="Execute the complete flow") + _add_project_arg(run_parser) + run_parser.add_argument("--overwrite", action="store_true", + help="Remove existing runs/default before running") + + # ecc status + status_parser = subparsers.add_parser("status", help="Show run and step status") + _add_project_arg(status_parser) + status_parser.add_argument("--json", action="store_true", help="JSON output") + status_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + + # ecc log + log_parser = subparsers.add_parser("log", help="Inspect step logs") + _add_project_arg(log_parser) + log_parser.add_argument("step", nargs="?", default=None, help="Step name") + log_parser.add_argument("--errors", action="store_true", help="Filter error lines") + log_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + + # ecc metrics + metrics_parser = subparsers.add_parser("metrics", help="Show step metrics") + _add_project_arg(metrics_parser) + metrics_parser.add_argument("step", nargs="?", default=None, help="Step name") + metrics_parser.add_argument("--json", action="store_true", help="JSON output") + metrics_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + return parser -def resolve_rtl_input(rtl_path: str) -> tuple[str, str, str]: - normalized_path = os.path.abspath(os.path.expanduser(rtl_path)) - suffix = os.path.splitext(normalized_path)[1].lower() +def _add_project_arg(parser: argparse.ArgumentParser) -> None: + parser.add_argument("--project", default=None, + help="Project directory (default: current directory)") - if suffix in FILELIST_SUFFIXES: - return ("filelist", "", normalized_path) - if suffix in RTL_SUFFIXES: - return ("rtl", normalized_path, "") +def run(argv: Sequence[str] | None = None) -> int: + parser = build_parser() + args = parser.parse_args(list(argv) if argv is not None else None) - try: - parse_filelist(normalized_path) - _, missing_files = validate_filelist(normalized_path) - if len(missing_files) == 0: - return ("filelist", "", normalized_path) - except Exception: - pass + if args.command is None: + parser.print_help() + return 1 - return ("rtl", normalized_path, "") + project = getattr(args, "project", None) + project_dir = resolve_project_dir(project) + + match args.command: + case "init": + return _cmd_init(args) + case "check": + return _cmd_check(args, project_dir, project) + case "run": + return _cmd_run(args, project_dir, project) + case "status": + return _cmd_status(args, project_dir, project) + case "log": + return _cmd_log(args, project_dir, project) + case "metrics": + return _cmd_metrics(args, project_dir, project) + case _: + parser.print_help() + return 1 -def build_parameters(args: argparse.Namespace) -> dict: - parameters = get_parameters("ics55") - parameters.data.update( - { - "PDK": "ics55", - "Design": args.design, - "Top module": args.top, - "Clock": args.clock, - "Frequency max [MHz]": args.freq, - } - ) - return parameters.data +def _cmd_init(args) -> int: + from chipcompiler.cli.output import emit_text + from chipcompiler.cli.project import init_project + lines, rc = init_project(args.name, args.name) + if lines: + emit_text(lines) + return rc -def _validate_args(args: argparse.Namespace) -> str | None: - if not str(args.workspace).strip(): - return "--workspace must not be empty" - if not str(args.design).strip(): - return "--design must not be empty" - if not str(args.top).strip(): - return "--top must not be empty" - if not str(args.clock).strip(): - return "--clock must not be empty" - rtl_path = os.path.abspath(os.path.expanduser(args.rtl)) - if not os.path.exists(rtl_path): - return f"--rtl path does not exist: {rtl_path}" - if not os.path.isfile(rtl_path): - return f"--rtl must point to a file: {rtl_path}" +def _cmd_check(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.output import emit_json, emit_text + from chipcompiler.cli.project import check_project - pdk_root = os.path.abspath(os.path.expanduser(args.pdk_root)) - if not os.path.exists(pdk_root): - return f"--pdk-root path does not exist: {pdk_root}" - if not os.path.isdir(pdk_root): - return f"--pdk-root must point to a directory: {pdk_root}" + if getattr(args, "json", False): + config_path = find_config_path(project_dir) + if config_path is None: + emit_json({"status": "fail", "errors": ["missing ecc.toml"]}) + return 1 + cfg = load_project_config(config_path) + from chipcompiler.cli.config import validate_project_config + errors = validate_project_config(cfg) + if errors: + emit_json({"status": "fail", "errors": errors}) + return 1 + emit_json({ + "status": "pass", + "design": cfg.design_name, + "top": cfg.design_top, + "rtl": cfg.design_rtl, + "pdk": cfg.pdk_name, + "preset": cfg.flow_preset, + }) + return 0 - if args.freq <= 0: - return "--freq must be greater than 0" + lines, rc = check_project(project_dir, project) + if lines: + emit_text(lines) + return rc - return None +def _cmd_run(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.output import emit_text + from chipcompiler.cli.project import run_project -def run(argv: Sequence[str] | None = None) -> int: - parser = build_parser() - args = parser.parse_args(list(argv) if argv is not None else None) + lines, rc = run_project(project_dir, args.overwrite, project) + if lines: + emit_text(lines) + return rc - validation_error = _validate_args(args) - if validation_error: - print(f"Error: {validation_error}", file=sys.stderr) - return 1 - try: - _, origin_verilog, input_filelist = resolve_rtl_input(args.rtl) - parameters = build_parameters(args) - - workspace = create_workspace( - directory=args.workspace, - origin_def="", - origin_verilog=origin_verilog, - pdk="ics55", - parameters=parameters, - input_filelist=input_filelist, - pdk_root=args.pdk_root, - ) - if workspace is None: - print("Error: failed to create workspace", file=sys.stderr) - return 1 +def _cmd_status(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.inspect import build_status_json, build_status_jsonl, build_status_lines + from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - engine_flow = EngineFlow(workspace=workspace) - if not engine_flow.has_init(): - for step, tool, state in build_rtl2gds_flow(): - engine_flow.add_step(step=step, tool=tool, state=state) + run_dir = _run_dir(project_dir) - engine_flow.create_step_workspaces() + if getattr(args, "jsonl", False): + objects, rc = build_status_jsonl(run_dir) + emit_jsonl(objects) + return rc - if not engine_flow.run_steps(): - print("Error: flow execution failed", file=sys.stderr) - return 1 + if getattr(args, "json", False): + obj, rc = build_status_json(run_dir) + emit_json(obj) + return rc - return 0 - except Exception as exc: - print(f"Error: {exc}", file=sys.stderr) - return 1 + lines, rc = build_status_lines(run_dir, project) + emit_text(lines) + return rc + + +def _cmd_log(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.inspect import build_log_jsonl, build_log_lines + from chipcompiler.cli.output import emit_jsonl, emit_text + + run_dir = _run_dir(project_dir) + + if getattr(args, "jsonl", False): + objects, rc = build_log_jsonl(run_dir, args.step, args.errors, project) + emit_jsonl(objects) + return rc + + lines, rc = build_log_lines(run_dir, args.step, args.errors, project) + emit_text(lines) + return rc + + +def _cmd_metrics(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.inspect import ( + build_metrics_json, + build_metrics_jsonl, + build_metrics_lines, + ) + from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text + + run_dir = _run_dir(project_dir) + + if getattr(args, "jsonl", False): + objects, rc = build_metrics_jsonl(run_dir, args.step) + emit_jsonl(objects) + return rc + + if getattr(args, "json", False): + obj, rc = build_metrics_json(run_dir, args.step) + emit_json(obj) + return rc + + lines, rc = build_metrics_lines(run_dir, args.step, project) + emit_text(lines) + return rc + + +def _run_dir(project_dir: str) -> str: + import os + return os.path.join(project_dir, "runs", "default") def main() -> None: diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py new file mode 100644 index 00000000..51f5b460 --- /dev/null +++ b/chipcompiler/cli/output.py @@ -0,0 +1,116 @@ +import json +import re +import sys + + +def format_field(key: str, value) -> str: + if isinstance(value, str) and re.search(r'\s', value): + return f'{key}="{value}"' + return f"{key}={value}" + + +def format_line(**fields) -> str: + parts = [] + for key, value in fields.items(): + if value is not None: + parts.append(format_field(key, value)) + return " ".join(parts) + + +def disclosure_cmd(command: str, project: str | None = None) -> str: + if project: + return f"{command} --project {project}" + return command + + +def emit_text(lines: list[str], file=None) -> None: + target = file or sys.stdout + for line in lines: + print(line, file=target) + + +def emit_json(obj: dict, file=None) -> None: + target = file or sys.stdout + print(json.dumps(obj, ensure_ascii=False), file=target) + + +def emit_jsonl(objects: list[dict], file=None) -> None: + target = file or sys.stdout + for obj in objects: + print(json.dumps(obj, ensure_ascii=False), file=target) + + +def normalize_step_name(internal: str) -> str: + mapping = { + "Synthesis": "synthesis", + "Floorplan": "floorplan", + "fixFanout": "fixfanout", + "place": "placement", + "CTS": "cts", + "legalization": "legalization", + "route": "routing", + "drc": "drc", + "filler": "filler", + } + return mapping.get(internal, internal.lower()) + + +def normalize_state(internal: str) -> str: + mapping = { + "Success": "success", + "Incomplete": "incomplete", + "Unstart": "unstart", + "Ongoing": "ongoing", + "Pending": "pending", + "Invalid": "invalid", + } + return mapping.get(internal, internal.lower()) + + +def normalize_metric_key(raw_key: str) -> str: + known = { + "Cell number": "cell_number", + "Cell area": "cell_area", + "Wire number": "wire_number", + "Port number": "port_number", + "Frequency [MHz]": "frequency_mhz", + "Die area [μm^2]": "die_area_um2", + "Die width [um]": "die_width_um", + "Die height [um]": "die_height_um", + "Die util": "die_util", + "Core util": "core_util", + "Total io pins": "total_io_pins", + "Total instances": "total_instances", + "Total nets": "total_nets", + "max_WNS": "max_wns", + "max_TNS": "max_tns", + "min_WNS": "min_wns", + "min_TNS": "min_tns", + "GP HPWL": "gp_hpwl", + "DP HPWL": "dp_hpwl", + "overflow": "overflow", + "overflow_number": "overflow_number", + "bin_number": "bin_number", + "buffer_num": "buffer_num", + "buffer_area": "buffer_area", + "clock_path_max_buffer": "clock_path_max_buffer", + "clock_path_min_buffer": "clock_path_min_buffer", + "total_clock_wirelength": "total_clock_wirelength", + "wire_len": "wire_len", + "num_via": "num_via", + "total_movement": "total_movement", + "drc_num": "drc_num", + "Max fanout": "max_fanout", + "Tool": "tool", + } + if raw_key in known: + return known[raw_key] + s = raw_key.lower() + s = re.sub(r'[\s\[\]μm^]+', '_', s) + s = re.sub(r'_+', '_', s) + s = s.strip('_') + return s + + +def step_dir_name(step_name: str, tool: str) -> str: + return f"{step_name}_{tool}" diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py new file mode 100644 index 00000000..9df84d53 --- /dev/null +++ b/chipcompiler/cli/project.py @@ -0,0 +1,190 @@ +import os +import shutil +import sys + +from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + resolve_rtl, + to_parameters, + validate_project_config, +) +from chipcompiler.cli.output import disclosure_cmd, format_line +from chipcompiler.data import create_workspace +from chipcompiler.engine import EngineFlow +from chipcompiler.rtl2gds import build_rtl2gds_flow + +DEFAULT_TOML = '''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/{name}.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "" + +[flow] +preset = "rtl2gds" +run = "default" +''' + + +def init_project(name: str, project: str | None = None) -> tuple[list[str], int]: + if not name or not name.strip(): + print(format_line(error="project name is required"), file=sys.stderr) + return [], 1 + + project_dir = os.path.abspath(name) + config_path = os.path.join(project_dir, "ecc.toml") + + if os.path.exists(config_path): + print(format_line( + error="already_exists", + path=config_path, + ), file=sys.stderr) + return [], 1 + + os.makedirs(project_dir, exist_ok=True) + os.makedirs(os.path.join(project_dir, "rtl"), exist_ok=True) + os.makedirs(os.path.join(project_dir, "constraints"), exist_ok=True) + os.makedirs(os.path.join(project_dir, "runs"), exist_ok=True) + + with open(config_path, "w") as f: + f.write(DEFAULT_TOML.format(name=name)) + + project_arg = project or name + line = format_line( + project=name, + status="created", + path=name, + check=disclosure_cmd("ecc check", project_arg), + run=disclosure_cmd("ecc run", project_arg), + ) + return [line], 0 + + +def check_project(project_dir: str, project: str | None = None) -> tuple[list[str], int]: + config_path = find_config_path(project_dir) + if config_path is None: + print(format_line( + error="missing_config", + path=os.path.join(project_dir, "ecc.toml"), + ), file=sys.stderr) + return [], 1 + + cfg = load_project_config(config_path) + errors = validate_project_config(cfg) + + lines = [] + + if errors: + for err in errors: + lines.append(format_line( + check="config", + status="fail", + reason=err, + source="ecc.toml", + inspect=disclosure_cmd("ecc check --json", project), + )) + return lines, 1 + + lines.append(format_line( + project=cfg.design_name, + status="checked", + config="ecc.toml", + run_dir="runs/default", + run=disclosure_cmd("ecc run", project), + status_cmd=disclosure_cmd("ecc status", project), + )) + + if cfg.design_rtl: + lines.append(format_line( + check="rtl", + status="pass", + path=cfg.design_rtl[0], + inspect=disclosure_cmd("ecc check --json", project), + )) + + return lines, 0 + + +def run_project(project_dir: str, overwrite: bool = False, + project: str | None = None) -> tuple[list[str], int]: + config_path = find_config_path(project_dir) + if config_path is None: + print(format_line( + error="missing_config", + path=os.path.join(project_dir, "ecc.toml"), + ), file=sys.stderr) + return [], 1 + + cfg = load_project_config(config_path) + errors = validate_project_config(cfg) + if errors: + for err in errors: + print(format_line(error="config_error", reason=err), file=sys.stderr) + return [], 1 + + run_dir = os.path.join(project_dir, "runs", "default") + flow_json = os.path.join(run_dir, "home", "flow.json") + + if os.path.exists(flow_json) and not overwrite: + print(format_line( + error="run_exists", + run="default", + workspace=run_dir, + overwrite="ecc run --overwrite", + ), file=sys.stderr) + return [], 1 + + if overwrite and os.path.exists(run_dir): + shutil.rmtree(run_dir) + + rtl_mode, origin_verilog, input_filelist = resolve_rtl(cfg) + parameters = to_parameters(cfg) + + workspace = create_workspace( + directory=run_dir, + origin_def="", + origin_verilog=origin_verilog, + pdk=cfg.pdk_name, + parameters=parameters, + input_filelist=input_filelist, + pdk_root=cfg.pdk_root, + ) + if workspace is None: + print(format_line( + error="workspace_failed", + run="default", + workspace=run_dir, + ), file=sys.stderr) + return [], 1 + + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) + + engine_flow.create_step_workspaces() + + if not engine_flow.run_steps(): + print(format_line( + run="default", + status="failed", + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project), + log=disclosure_cmd("ecc log --errors", project), + ), file=sys.stderr) + return [], 1 + + lines = [format_line( + run="default", + status="success", + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project), + metrics=disclosure_cmd("ecc metrics", project), + log=disclosure_cmd("ecc log", project), + )] + return lines, 0 diff --git a/pyproject.toml b/pyproject.toml index b5f03056..50dd1916 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ dependencies = [ "tqdm>=4.67.1", "uvicorn>=0.27", ] -scripts.cli = "chipcompiler.cli.main:main" +scripts.ecc = "chipcompiler.cli.main:main" [dependency-groups] dev = [ diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index e12585f1..2c9fdf2d 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1,11 +1,14 @@ -#!/usr/bin/env python - +import json +import os +import re from types import SimpleNamespace -import pytest - from chipcompiler.cli import main as cli_main +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + class DummyFlow: has_init_value = False @@ -33,27 +36,8 @@ def run_steps(self): return self.run_steps_value -def _common_args(workspace, rtl, pdk_root): - return [ - "--workspace", - str(workspace), - "--rtl", - str(rtl), - "--design", - "top_design", - "--top", - "top", - "--clock", - "clk", - "--pdk-root", - str(pdk_root), - ] - - -def _install_cli_mocks(monkeypatch): - capture = { - "create_kwargs": None, - } +def _install_flow_mocks(monkeypatch): + capture = {"create_kwargs": None} workspace_obj = SimpleNamespace(name="workspace") DummyFlow.instances = [] @@ -64,85 +48,650 @@ def fake_create_workspace(**kwargs): capture["create_kwargs"] = kwargs return workspace_obj - monkeypatch.setattr(cli_main, "create_workspace", fake_create_workspace) - monkeypatch.setattr(cli_main, "EngineFlow", DummyFlow) - monkeypatch.setattr(cli_main, "build_rtl2gds_flow", lambda: [("Synthesis", "yosys", "Unstart")]) + monkeypatch.setattr("chipcompiler.cli.project.create_workspace", fake_create_workspace) + monkeypatch.setattr("chipcompiler.cli.project.EngineFlow", DummyFlow) + monkeypatch.setattr( + "chipcompiler.cli.project.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) return capture -def test_cli_rtl_mode_calls_create_workspace_correctly(tmp_path, monkeypatch): - rtl = tmp_path / "top.v" - rtl.write_text("module top(input clk); endmodule\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - capture = _install_cli_mocks(monkeypatch) - rc = cli_main.run(_common_args(workspace_dir, rtl, pdk_root)) - - assert rc == 0 - assert capture["create_kwargs"]["origin_verilog"] == str(rtl.resolve()) - assert capture["create_kwargs"]["input_filelist"] == "" - assert capture["create_kwargs"]["pdk"] == "ics55" - assert capture["create_kwargs"]["parameters"]["Design"] == "top_design" - assert capture["create_kwargs"]["parameters"]["Top module"] == "top" - assert capture["create_kwargs"]["parameters"]["Clock"] == "clk" - assert capture["create_kwargs"]["parameters"]["Frequency max [MHz]"] == 100.0 - assert DummyFlow.instances[0].create_called is True - assert DummyFlow.instances[0].run_called is True - - -def test_cli_filelist_mode_calls_create_workspace_correctly(tmp_path, monkeypatch): - rtl_source = tmp_path / "a.v" - rtl_source.write_text("module a(); endmodule\n") - filelist = tmp_path / "design.f" - filelist.write_text("a.v\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - capture = _install_cli_mocks(monkeypatch) - rc = cli_main.run(_common_args(workspace_dir, filelist, pdk_root)) - - assert rc == 0 - assert capture["create_kwargs"]["origin_verilog"] == "" - assert capture["create_kwargs"]["input_filelist"] == str(filelist.resolve()) - - -def test_cli_unknown_suffix_fallback_to_filelist(tmp_path, monkeypatch): - rtl_source = tmp_path / "b.v" - rtl_source.write_text("module b(); endmodule\n") - filelist_like = tmp_path / "design.listing" - filelist_like.write_text("b.v\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - capture = _install_cli_mocks(monkeypatch) - rc = cli_main.run(_common_args(workspace_dir, filelist_like, pdk_root)) - - assert rc == 0 - assert capture["create_kwargs"]["origin_verilog"] == "" - assert capture["create_kwargs"]["input_filelist"] == str(filelist_like.resolve()) - - -def test_cli_requires_mandatory_arguments(): - with pytest.raises(SystemExit) as exc_info: - cli_main.run([]) - assert exc_info.value.code == 2 - - -def test_cli_returns_nonzero_when_run_steps_failed(tmp_path, monkeypatch): - rtl = tmp_path / "top.v" - rtl.write_text("module top(input clk); endmodule\n") - pdk_root = tmp_path / "ics55" - pdk_root.mkdir() - workspace_dir = tmp_path / "ws" - - _install_cli_mocks(monkeypatch) - DummyFlow.run_steps_value = False - - rc = cli_main.run(_common_args(workspace_dir, rtl, pdk_root)) - assert rc == 1 - +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +def _create_flow_json(run_dir, steps=None): + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + if steps is None: + steps = [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:18"}, + {"name": "Floorplan", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ] + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": steps}, f) + + +def _has_disclosure(line): + return bool(re.search(r'\w+="ecc ', line)) + + +# =========================================================================== +# AC-1: ecc init +# =========================================================================== + + +class TestInit: + def test_init_creates_skeleton(self, tmp_path): + project_path = str(tmp_path / "gcd") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + + assert (tmp_path / "gcd" / "ecc.toml").exists() + assert (tmp_path / "gcd" / "rtl").is_dir() + assert (tmp_path / "gcd" / "constraints").is_dir() + assert (tmp_path / "gcd" / "runs").is_dir() + + def test_init_output_has_disclosure_commands(self, tmp_path, capsys): + project_path = str(tmp_path / "myproj") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + out = capsys.readouterr().out + assert 'check="ecc check' in out + assert 'run="ecc run' in out + + def test_init_fails_if_ecc_toml_exists(self, tmp_path): + project_dir = tmp_path / "gcd" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text("[design]\n") + rc = cli_main.run(["init", str(project_dir)]) + assert rc == 1 + + def test_init_rejects_empty_name(self): + rc = cli_main.run(["init", ""]) + assert rc == 1 + + +# =========================================================================== +# AC-2: ecc check +# =========================================================================== + + +class TestCheck: + def test_check_passes_valid_config(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "status=checked" in out + + def test_check_from_inside_project_dir(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.chdir(project_dir) + rc = cli_main.run(["check"]) + assert rc == 0 + out = capsys.readouterr().out + assert "status=checked" in out + + def test_check_fails_missing_ecc_toml(self, tmp_path): + rc = cli_main.run(["check", "--project", str(tmp_path)]) + assert rc == 1 + + def test_check_fails_missing_rtl(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path, "w") as f: + f.write( + '[design]\nname="gcd"\ntop="gcd"\nrtl=["rtl/missing.v"]\n' + 'clock_port="clk"\nfrequency_mhz=100\n' + '[pdk]\nname="ics55"\nroot=""\n' + '[flow]\npreset="rtl2gds"\nrun="default"\n', + ) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_empty_pdk_root(self, tmp_path): + project_dir = _create_valid_project(tmp_path, pdk_root="") + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_non_directory_pdk_root(self, tmp_path): + pdk_root = tmp_path / "ics55.txt" + pdk_root.write_text("not a dir") + project_dir = _create_valid_project(tmp_path, pdk_root=str(pdk_root)) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_unsupported_pdk(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace('name = "ics55"', 'name = "unsupported"') + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_unsupported_preset(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace('preset = "rtl2gds"', 'preset = "unknown"') + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_non_positive_frequency(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace("frequency_mhz = 100.0", "frequency_mhz = -10") + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_fails_multiple_rtl(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace( + 'rtl = ["rtl/gcd.v"]', + 'rtl = ["rtl/a.v", "rtl/b.v"]', + ) + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + + def test_check_json_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + data = json.loads(out) + assert data["status"] == "pass" + assert data["design"] == "gcd" + + +# =========================================================================== +# AC-3: ecc run +# =========================================================================== + + +class TestRun: + def test_run_calls_create_workspace(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + capture = _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + assert capture["create_kwargs"]["directory"] == os.path.join( + project_dir, "runs", "default" + ) + + def test_run_adds_flow_steps_when_no_init(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + assert len(DummyFlow.instances[0].added_steps) > 0 + + def test_run_calls_create_and_run(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + assert DummyFlow.instances[0].create_called + assert DummyFlow.instances[0].run_called + + def test_run_overwrite_removes_existing(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--overwrite"]) + assert rc == 0 + + def test_run_fails_if_flow_json_exists(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + def test_run_fails_on_config_error(self, tmp_path): + project_dir = tmp_path / "bad" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text("[design]\n") + rc = cli_main.run(["run", "--project", str(project_dir)]) + assert rc == 1 + + def test_run_fails_when_create_workspace_returns_none(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + def fake_create(**kwargs): + return None + + monkeypatch.setattr("chipcompiler.cli.project.create_workspace", fake_create) + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + def test_run_fails_when_run_steps_false(self, tmp_path, monkeypatch): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + DummyFlow.run_steps_value = False + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + +# =========================================================================== +# AC-4: ecc status +# =========================================================================== + + +class TestStatus: + def test_status_reads_flow_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "run=default" in out + assert "step=synthesis" in out + assert "step=floorplan" in out + + def test_status_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["run"] == "default" + assert data["status"] == "success" + assert len(data["steps"]) == 2 + + def test_status_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir, "--jsonl"]) + assert rc == 0 + lines = capsys.readouterr().out.strip().split("\n") + objects = [json.loads(ln) for ln in lines] + assert objects[0]["kind"] == "run" + assert objects[1]["kind"] == "step" + + def test_status_normalizes_step_names(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:18"}, + {"name": "place", "tool": "dreamplace", "state": "Success", "runtime": "0:01:12"}, + ]) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=synthesis" in out + assert "step=placement" in out + + def test_status_missing_run(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "status=missing" in out + assert 'run_cmd="ecc run' in out + + def test_status_invalid_flow_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + f.write("not valid json{{{") + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + + +# =========================================================================== +# AC-5: ecc log +# =========================================================================== + + +class TestLog: + def test_log_step_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Info: running\nError: bad thing\nWarning: meh\nTraceback: crash\n") + + rc = cli_main.run(["log", "synthesis", "--errors", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "Error: bad thing" in out + assert "Traceback: crash" in out + assert "Info: running" not in out + + def test_log_step_errors_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Info: running\nError: bad thing\n") + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--jsonl", "--project", project_dir] + ) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert any("Error" in obj["line"] for obj in objects) + + def test_log_no_step_shows_locations(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("log content\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + + def test_log_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["log", "nonexistent", "--project", project_dir]) + assert rc == 1 + + def test_log_missing_step_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "Synthesis_yosys"), exist_ok=True) + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 1 + + +# =========================================================================== +# AC-6: ecc metrics +# =========================================================================== + + +class TestMetrics: + def test_metrics_reads_step_metrics(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312, "Cell area": 1840.2}, f) + + rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "metric=cell_number" in out + assert "value=312" in out + + def test_metrics_all_steps(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + for step_dir_name in ["Synthesis_yosys", "Floorplan_ecc"]: + analysis = os.path.join(run_dir, step_dir_name, "analysis") + os.makedirs(analysis, exist_ok=True) + metrics_name = step_dir_name.split("_")[0] + "_metrics.json" + with open(os.path.join(analysis, metrics_name), "w") as f: + json.dump({"Cell number": 100}, f) + + rc = cli_main.run(["metrics", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=synthesis" in out + assert "step=floorplan" in out + + def test_metrics_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + + rc = cli_main.run( + ["metrics", "synthesis", "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert len(data["metrics"]) == 1 + assert data["metrics"][0]["metric"] == "cell_number" + + def test_metrics_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312, "Cell area": 1840.2}, f) + + rc = cli_main.run( + ["metrics", "synthesis", "--jsonl", "--project", project_dir] + ) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 2 + + def test_metrics_normalizes_known_keys(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "CTS_ecc", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "CTS_metrics.json"), "w") as f: + json.dump({"Frequency [MHz]": 450.0, "Die area [μm^2]": "10000.000"}, f) + + rc = cli_main.run(["metrics", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "metric=frequency_mhz" in out + assert "metric=die_area_um2" in out + + def test_metrics_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["metrics", "nonexistent", "--project", project_dir]) + assert rc == 1 + + def test_metrics_missing_file(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "CTS_ecc", "analysis"), exist_ok=True) + + rc = cli_main.run(["metrics", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "status=missing" in out + assert 'log="ecc log cts --errors' in out + + +# =========================================================================== +# AC-7: Disclosure commands on all output +# =========================================================================== + + +class TestDisclosureCommands: + def test_init_lines_have_disclosure(self, tmp_path, capsys): + project_path = str(tmp_path / "disctest") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_check_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_status_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_metrics_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + + rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_log_error_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: something failed\n") + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_project_arg_propagated_to_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert f"--project {project_dir}" in out + + def test_output_lowercase_tokens(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:01"}, + ]) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=synthesis" in out + assert "status=success" in out + + +# =========================================================================== +# AC-8: Packaging +# =========================================================================== + + +class TestPackaging: + def test_ecc_console_script_in_pyproject(self): + import tomllib + + project_root = os.path.dirname( + os.path.dirname(os.path.dirname(__file__)) + ) + pyproject = os.path.join(project_root, "pyproject.toml") + with open(pyproject, "rb") as f: + data = tomllib.load(f) + assert data["project"]["scripts"]["ecc"] == "chipcompiler.cli.main:main" + + +# =========================================================================== +# Edge cases +# =========================================================================== + + +class TestEdgeCases: + def test_no_command_returns_nonzero(self, capsys): + rc = cli_main.run([]) + assert rc == 1 From 60f51f08012a8443280ae19bfda1b74e55663e2e Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 22:09:22 +0800 Subject: [PATCH 006/104] fix: address Codex review findings for CLI Phase 1 - ecc log without step now discovers both global and step log locations, with disclosure commands on every output line (AC-5, AC-7) - ecc metrics --json/--jsonl return non-zero structured error objects for unknown or missing requested steps instead of empty success (AC-6) - Bazel chipcompiler_cli target now depends on chipcompiler_cli_lib which includes all cli/**/*.py sources (AC-8) - Config parsing handles non-numeric frequency_mhz gracefully instead of crashing with ValueError (AC-2) 6 new tests added covering all review findings. --- chipcompiler/BUILD.bazel | 10 +++++- chipcompiler/cli/config.py | 7 +++- chipcompiler/cli/inspect.py | 58 +++++++++++++++++++++++++----- chipcompiler/cli/main.py | 4 +-- test/cli/test_cli_main.py | 72 +++++++++++++++++++++++++++++++++++++ 5 files changed, 139 insertions(+), 12 deletions(-) diff --git a/chipcompiler/BUILD.bazel b/chipcompiler/BUILD.bazel index cfc01dc5..7805f8e7 100644 --- a/chipcompiler/BUILD.bazel +++ b/chipcompiler/BUILD.bazel @@ -76,6 +76,12 @@ py_library( ], ) +py_library( + name = "chipcompiler_cli_lib", + srcs = glob(["cli/**/*.py"]), + deps = [":chipcompiler_core"], +) + py_binary( name = "chipcompiler_cli", srcs = ["cli/main.py"], @@ -87,5 +93,7 @@ py_binary( env = { "CHIPCOMPILER_ICS55_PDK_ROOT": "$(location @icsprout55_pdk//:README.md)/..", }, - deps = [":chipcompiler_core"], + deps = [ + ":chipcompiler_cli_lib", + ], ) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index b1a9349e..3df18376 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -38,12 +38,17 @@ def _parse_config(data: dict, config_path: str) -> ProjectConfig: project_dir = os.path.dirname(os.path.abspath(config_path)) + try: + freq = float(design.get("frequency_mhz", 0)) + except (TypeError, ValueError): + freq = 0.0 + cfg = ProjectConfig( design_name=design.get("name", ""), design_top=design.get("top", ""), design_rtl=design.get("rtl", []), design_clock_port=design.get("clock_port", ""), - design_frequency_mhz=float(design.get("frequency_mhz", 0)), + design_frequency_mhz=freq, pdk_name=pdk.get("name", ""), pdk_root=pdk.get("root", ""), flow_preset=flow.get("preset", ""), diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 463a2915..10b6da94 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -168,17 +168,32 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, from chipcompiler.cli.output import disclosure_cmd, format_line if step_token is None: - log_files = discover_logs(run_dir) - if not log_files: + lines = [] + + global_logs = discover_logs(run_dir) + for lf in global_logs: + lines.append(format_line( + log=os.path.relpath(lf, run_dir), + inspect=disclosure_cmd("ecc log", project), + )) + + step_dirs = discover_step_dirs(run_dir) + for token in sorted(step_dirs): + step_logs = discover_logs(run_dir, token) + if step_logs: + lines.append(format_line( + step=token, + logs=len(step_logs), + log=disclosure_cmd(f"ecc log {token} --errors", project), + )) + + if not lines: return [format_line( - log_status="no_global_logs", + log_status="no_logs", workspace=run_dir, run=disclosure_cmd("ecc run", project), )], 0 - lines = [] - for lf in log_files: - lines.append(format_line(log=os.path.relpath(lf, run_dir))) return lines, 0 step_dirs = discover_step_dirs(run_dir) @@ -325,7 +340,12 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, return lines, rc -def build_metrics_json(run_dir: str, step_token: str | None = None) -> tuple[dict, int]: +def build_metrics_json(run_dir: str, step_token: str | None = None, + project: str | None = None) -> tuple[dict, int]: + err = _check_requested_step(run_dir, step_token, project) + if err is not None: + return err, 1 + metrics_files = discover_metrics(run_dir, step_token) all_metrics = [] for token, path in sorted(metrics_files.items()): @@ -340,7 +360,12 @@ def build_metrics_json(run_dir: str, step_token: str | None = None) -> tuple[dic return {"metrics": all_metrics}, 0 -def build_metrics_jsonl(run_dir: str, step_token: str | None = None) -> tuple[list[dict], int]: +def build_metrics_jsonl(run_dir: str, step_token: str | None = None, + project: str | None = None) -> tuple[list[dict], int]: + err = _check_requested_step(run_dir, step_token, project) + if err is not None: + return [err], 1 + metrics_files = discover_metrics(run_dir, step_token) objects = [] for token, path in sorted(metrics_files.items()): @@ -355,6 +380,23 @@ def build_metrics_jsonl(run_dir: str, step_token: str | None = None) -> tuple[li return objects, 0 +def _check_requested_step(run_dir: str, step_token: str | None, + project: str | None = None) -> dict | None: + if step_token is None: + return None + step_dirs = discover_step_dirs(run_dir) + if step_token not in step_dirs: + return {"status": "unknown_step", "step": step_token} + metrics = discover_metrics(run_dir, step_token) + if not metrics: + return { + "status": "missing", + "metric_step": step_token, + "log_cmd": f"ecc log {step_token} --errors", + } + return None + + def _internal_from_token(token: str) -> str: reverse = { "synthesis": "Synthesis", diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index a731fcfa..a38fa304 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -187,12 +187,12 @@ def _cmd_metrics(args, project_dir: str, project: str | None) -> int: run_dir = _run_dir(project_dir) if getattr(args, "jsonl", False): - objects, rc = build_metrics_jsonl(run_dir, args.step) + objects, rc = build_metrics_jsonl(run_dir, args.step, project) emit_jsonl(objects) return rc if getattr(args, "json", False): - obj, rc = build_metrics_json(run_dir, args.step) + obj, rc = build_metrics_json(run_dir, args.step, project) emit_json(obj) return rc diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 2c9fdf2d..af9a32e3 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -240,6 +240,17 @@ def test_check_fails_multiple_rtl(self, tmp_path): rc = cli_main.run(["check", "--project", project_dir]) assert rc == 1 + def test_check_fails_non_numeric_frequency(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace("frequency_mhz = 100.0", 'frequency_mhz = "fast"') + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 1 + def test_check_json_output(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) rc = cli_main.run(["check", "--project", project_dir, "--json"]) @@ -452,6 +463,38 @@ def test_log_no_step_shows_locations(self, tmp_path, capsys): rc = cli_main.run(["log", "--project", project_dir]) assert rc == 0 + out = capsys.readouterr().out + assert 'inspect="ecc log' in out + + def test_log_no_step_discovers_step_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=synthesis" in out + assert 'log="ecc log synthesis --errors' in out + + def test_log_no_step_global_logs_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" def test_log_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -573,6 +616,35 @@ def test_metrics_missing_file(self, tmp_path, capsys): assert "status=missing" in out assert 'log="ecc log cts --errors' in out + def test_metrics_json_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["metrics", "nonexistent", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["status"] == "unknown_step" + assert data["step"] == "nonexistent" + + def test_metrics_json_missing_file(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "CTS_ecc", "analysis"), exist_ok=True) + + rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["status"] == "missing" + + def test_metrics_jsonl_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + os.makedirs(os.path.join(project_dir, "runs", "default"), exist_ok=True) + + rc = cli_main.run(["metrics", "nonexistent", "--jsonl", "--project", project_dir]) + assert rc == 1 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert objects[0]["status"] == "unknown_step" + # =========================================================================== # AC-7: Disclosure commands on all output From 7f74aadbae866964a14b56f892d39a90914486e3 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 22:15:59 +0800 Subject: [PATCH 007/104] fix: emit actual file paths in ecc log no-step output Instead of reporting step log counts, ecc log without a step now emits one line per discovered step log file with the actual relative path: step=synthesis log=Synthesis_yosys/log/synthesis.log inspect="ecc log synthesis --errors" Strengthened test to assert the file path appears in output. --- chipcompiler/cli/inspect.py | 6 +++--- test/cli/test_cli_main.py | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 10b6da94..2988fdb3 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -180,11 +180,11 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, step_dirs = discover_step_dirs(run_dir) for token in sorted(step_dirs): step_logs = discover_logs(run_dir, token) - if step_logs: + for lf in step_logs: lines.append(format_line( step=token, - logs=len(step_logs), - log=disclosure_cmd(f"ecc log {token} --errors", project), + log=os.path.relpath(lf, run_dir), + inspect=disclosure_cmd(f"ecc log {token} --errors", project), )) if not lines: diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index af9a32e3..88a4719a 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -479,7 +479,8 @@ def test_log_no_step_discovers_step_logs(self, tmp_path, capsys): assert rc == 0 out = capsys.readouterr().out assert "step=synthesis" in out - assert 'log="ecc log synthesis --errors' in out + assert "Synthesis_yosys/log/synthesis.log" in out + assert 'inspect="ecc log synthesis --errors' in out def test_log_no_step_global_logs_have_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) From b1597496f3f4969251de6ef8f964ddf78298008b Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 22:30:34 +0800 Subject: [PATCH 008/104] fix: resolve pdk.root, preserve filelist detection, catch flow errors - Resolve relative pdk.root against project directory before passing to create_workspace, so ecc run --project works from any directory - Restore filelist detection for unknown suffixes by attempting parse_filelist + validate_filelist before falling back to RTL mode - Wrap create_workspace and flow setup in try/except so runtime failures produce clean error output instead of tracebacks --- chipcompiler/cli/config.py | 18 +++++++++++ chipcompiler/cli/project.py | 61 +++++++++++++++++++++++++------------ 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 3df18376..fa3cce7b 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -138,6 +138,18 @@ def resolve_rtl(cfg: ProjectConfig) -> tuple[str, str, str]: return ("filelist", "", rtl_path) if suffix in RTL_SUFFIXES: return ("rtl", rtl_path, "") + + if os.path.isfile(rtl_path): + try: + from chipcompiler.utility.filelist import parse_filelist, validate_filelist + + parse_filelist(rtl_path) + _, missing = validate_filelist(rtl_path) + if not missing: + return ("filelist", "", rtl_path) + except Exception: + pass + return ("rtl", rtl_path, "") @@ -145,3 +157,9 @@ def _resolve_path(project_dir: str, path: str) -> str: if os.path.isabs(path): return path return os.path.join(project_dir, path) + + +def resolve_pdk_root(cfg: ProjectConfig) -> str: + if not cfg.pdk_root: + return "" + return _resolve_path(cfg.project_dir, cfg.pdk_root) diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index 9df84d53..425c8c17 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -5,6 +5,7 @@ from chipcompiler.cli.config import ( find_config_path, load_project_config, + resolve_pdk_root, resolve_rtl, to_parameters, validate_project_config, @@ -144,16 +145,27 @@ def run_project(project_dir: str, overwrite: bool = False, rtl_mode, origin_verilog, input_filelist = resolve_rtl(cfg) parameters = to_parameters(cfg) + pdk_root = resolve_pdk_root(cfg) + + try: + workspace = create_workspace( + directory=run_dir, + origin_def="", + origin_verilog=origin_verilog, + pdk=cfg.pdk_name, + parameters=parameters, + input_filelist=input_filelist, + pdk_root=pdk_root, + ) + except Exception as exc: + print(format_line( + error="workspace_failed", + run="default", + workspace=run_dir, + reason=str(exc), + ), file=sys.stderr) + return [], 1 - workspace = create_workspace( - directory=run_dir, - origin_def="", - origin_verilog=origin_verilog, - pdk=cfg.pdk_name, - parameters=parameters, - input_filelist=input_filelist, - pdk_root=cfg.pdk_root, - ) if workspace is None: print(format_line( error="workspace_failed", @@ -162,20 +174,29 @@ def run_project(project_dir: str, overwrite: bool = False, ), file=sys.stderr) return [], 1 - engine_flow = EngineFlow(workspace=workspace) - if not engine_flow.has_init(): - for step, tool, state in build_rtl2gds_flow(): - engine_flow.add_step(step=step, tool=tool, state=state) - - engine_flow.create_step_workspaces() - - if not engine_flow.run_steps(): + try: + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) + + engine_flow.create_step_workspaces() + + if not engine_flow.run_steps(): + print(format_line( + run="default", + status="failed", + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project), + log=disclosure_cmd("ecc log --errors", project), + ), file=sys.stderr) + return [], 1 + except Exception as exc: print(format_line( + error="flow_failed", run="default", - status="failed", workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project), - log=disclosure_cmd("ecc log --errors", project), + reason=str(exc), ), file=sys.stderr) return [], 1 From 3855d5b30780124d7fc26aae5c685b54879039a1 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 22:36:57 +0800 Subject: [PATCH 009/104] fix: update Nix CLI entrypoint and use basename for init templates - Update nix/cli/default.nix to reference 'ecc' instead of 'cli' for the wrapped executable and mainProgram, keeping the Nix flake in sync - Derive design name from directory basename in ecc init so paths like /tmp/gcd produce name='gcd' instead of name='/tmp/gcd' - Add test for nested path init verifying basename is used --- chipcompiler/cli/project.py | 3 ++- nix/cli/default.nix | 6 +++--- test/cli/test_cli_main.py | 8 ++++++++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index 425c8c17..e76bf123 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -39,6 +39,7 @@ def init_project(name: str, project: str | None = None) -> tuple[list[str], int] project_dir = os.path.abspath(name) config_path = os.path.join(project_dir, "ecc.toml") + design_name = os.path.basename(project_dir) if os.path.exists(config_path): print(format_line( @@ -53,7 +54,7 @@ def init_project(name: str, project: str | None = None) -> tuple[list[str], int] os.makedirs(os.path.join(project_dir, "runs"), exist_ok=True) with open(config_path, "w") as f: - f.write(DEFAULT_TOML.format(name=name)) + f.write(DEFAULT_TOML.format(name=design_name)) project_arg = project or name line = format_line( diff --git a/nix/cli/default.nix b/nix/cli/default.nix index 071499c9..a7c64e82 100644 --- a/nix/cli/default.nix +++ b/nix/cli/default.nix @@ -43,12 +43,12 @@ python3Packages.buildPythonPackage { fi done - # This package should expose only the dedicated `cli` entrypoint. + # This package should expose only the dedicated `ecc` entrypoint. rm -f "$out/bin/chipcompiler" ''; postFixup = '' - wrapProgram "$out/bin/cli" \ + wrapProgram "$out/bin/ecc" \ --set CHIPCOMPILER_OSS_CAD_DIR "${yosysWithSlang}" \ --prefix PATH : "${yosysWithSlang}/bin" ''; @@ -88,6 +88,6 @@ python3Packages.buildPythonPackage { license = lib.licenses.mulan-psl2; platforms = lib.platforms.linux; maintainers = [ ]; - mainProgram = "cli"; + mainProgram = "ecc"; }; } diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 88a4719a..7ca5aae9 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -142,6 +142,14 @@ def test_init_rejects_empty_name(self): rc = cli_main.run(["init", ""]) assert rc == 1 + def test_init_uses_basename_for_design_name(self, tmp_path): + project_path = str(tmp_path / "subdir" / "mydesign") + rc = cli_main.run(["init", project_path]) + assert rc == 0 + toml = (tmp_path / "subdir" / "mydesign" / "ecc.toml").read_text() + assert 'name = "mydesign"' in toml + assert "rtl/mydesign.v" in toml + # =========================================================================== # AC-2: ecc check From 291ef0bf1fd9c460f1633c20e73797689d36f695 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 22:47:27 +0800 Subject: [PATCH 010/104] fix: handle log JSONL no-step, propagate project in overwrite, catch malformed TOML - build_log_jsonl now handles omitted step by discovering global and step log locations, matching text mode behavior - Overwrite recovery command includes --project when the user passed it - Malformed ecc.toml produces a clean validation error instead of a TOMLDecodeError traceback - Add test for malformed TOML check --- chipcompiler/cli/config.py | 13 +++++++++++-- chipcompiler/cli/inspect.py | 14 +++++++++++++- chipcompiler/cli/project.py | 2 +- test/cli/test_cli_main.py | 7 +++++++ 4 files changed, 32 insertions(+), 4 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index fa3cce7b..844b33c5 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -26,8 +26,13 @@ class ProjectConfig: def load_project_config(config_path: str) -> ProjectConfig: - with open(config_path, "rb") as f: - data = tomllib.load(f) + try: + with open(config_path, "rb") as f: + data = tomllib.load(f) + except tomllib.TOMLDecodeError as exc: + cfg = ProjectConfig(config_path=config_path) + cfg._toml_error = str(exc) + return cfg return _parse_config(data, config_path) @@ -71,6 +76,10 @@ def find_config_path(project_dir: str) -> str | None: def validate_project_config(cfg: ProjectConfig) -> list[str]: + toml_error = getattr(cfg, "_toml_error", None) + if toml_error: + return [f"malformed ecc.toml: {toml_error}"] + errors = [] if not cfg.design_name: diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 2988fdb3..b389a0f9 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -237,8 +237,20 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, return result, 0 -def build_log_jsonl(run_dir: str, step_token: str, errors_only: bool, +def build_log_jsonl(run_dir: str, step_token: str | None, errors_only: bool, project: str | None = None) -> tuple[list[dict], int]: + if step_token is None: + objects = [] + for lf in discover_logs(run_dir): + objects.append({"log": os.path.relpath(lf, run_dir)}) + step_dirs = discover_step_dirs(run_dir) + for token in sorted(step_dirs): + for lf in discover_logs(run_dir, token): + objects.append({"step": token, "log": os.path.relpath(lf, run_dir)}) + if not objects: + return [{"log_status": "no_logs", "workspace": run_dir}], 0 + return objects, 0 + step_dirs = discover_step_dirs(run_dir) if step_token not in step_dirs: return [{"step": step_token, "status": "unknown_step"}], 1 diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index e76bf123..57b97fbf 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -137,7 +137,7 @@ def run_project(project_dir: str, overwrite: bool = False, error="run_exists", run="default", workspace=run_dir, - overwrite="ecc run --overwrite", + overwrite=disclosure_cmd("ecc run --overwrite", project), ), file=sys.stderr) return [], 1 diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 7ca5aae9..37e1c0e7 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -176,6 +176,13 @@ def test_check_fails_missing_ecc_toml(self, tmp_path): rc = cli_main.run(["check", "--project", str(tmp_path)]) assert rc == 1 + def test_check_fails_malformed_toml(self, tmp_path, capsys): + project_dir = tmp_path / "bad" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text("[design\ninvalid {{{") + rc = cli_main.run(["check", "--project", str(project_dir)]) + assert rc == 1 + def test_check_fails_missing_rtl(self, tmp_path): project_dir = _create_valid_project(tmp_path) toml_path = os.path.join(project_dir, "ecc.toml") From 7996cae9dfa616f83d03f4fd5adcba4f5537c027 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:02:05 +0800 Subject: [PATCH 011/104] fix: allow empty pdk.root with env fallback, expand env vars and ~ in paths - When pdk.root is empty, validation now checks for CHIPCOMPILER_ICS55_PDK_ROOT or ICS55_PDK_ROOT env vars before rejecting, so freshly initialized projects pass in Bazel/Nix envs - resolve_pdk_root falls back to env vars when config value is empty - _resolve_path expands environment variables ($PDK_ROOT) and user home (~) before resolving relative paths - Legacy CLI compatibility is intentionally not preserved per project decision: there are not enough existing users to justify the effort --- chipcompiler/cli/config.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 844b33c5..58328539 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -104,7 +104,7 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: resolved_root = _resolve_path(cfg.project_dir, cfg.pdk_root) if not os.path.isdir(resolved_root): errors.append(f"pdk.root is not a directory: {cfg.pdk_root}") - else: + elif not _pdk_root_from_env(): errors.append("pdk.root is required") if not cfg.flow_preset: @@ -163,6 +163,7 @@ def resolve_rtl(cfg: ProjectConfig) -> tuple[str, str, str]: def _resolve_path(project_dir: str, path: str) -> str: + path = os.path.expandvars(os.path.expanduser(path)) if os.path.isabs(path): return path return os.path.join(project_dir, path) @@ -170,5 +171,13 @@ def _resolve_path(project_dir: str, path: str) -> str: def resolve_pdk_root(cfg: ProjectConfig) -> str: if not cfg.pdk_root: - return "" + return _pdk_root_from_env() return _resolve_path(cfg.project_dir, cfg.pdk_root) + + +def _pdk_root_from_env() -> str: + for key in ("CHIPCOMPILER_ICS55_PDK_ROOT", "ICS55_PDK_ROOT"): + val = os.environ.get(key, "").strip() + if val and os.path.isdir(val): + return val + return "" From 34e3997b3b47c868f617a8dcdb0d455952af6d2e Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:11:22 +0800 Subject: [PATCH 012/104] fix: add legacy CLI compat, reject RTL directories, add cli script alias - Detect legacy --workspace/--rtl/--design/--top/--clock/--pdk-root args and route to the old parameter-based flow for backward compatibility - Add scripts.cli alias alongside scripts.ecc in pyproject.toml - Reject directory paths in design.rtl during config validation --- chipcompiler/cli/config.py | 2 + chipcompiler/cli/main.py | 75 +++++++++++++++++++++++++++++++++++++- pyproject.toml | 1 + 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 58328539..1b26e503 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -119,6 +119,8 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: rtl_path = _resolve_path(cfg.project_dir, cfg.design_rtl[0]) if not os.path.exists(rtl_path): errors.append(f"rtl path does not exist: {cfg.design_rtl[0]}") + elif os.path.isdir(rtl_path): + errors.append(f"rtl path must be a file, not a directory: {cfg.design_rtl[0]}") return errors diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index a38fa304..d4e6f9bf 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -60,8 +60,13 @@ def _add_project_arg(parser: argparse.ArgumentParser) -> None: def run(argv: Sequence[str] | None = None) -> int: + raw = list(argv) if argv is not None else sys.argv[1:] + + if _is_legacy_args(raw): + return _run_legacy(raw) + parser = build_parser() - args = parser.parse_args(list(argv) if argv is not None else None) + args = parser.parse_args(raw) if args.command is None: parser.print_help() @@ -206,6 +211,74 @@ def _run_dir(project_dir: str) -> str: return os.path.join(project_dir, "runs", "default") +_LEGACY_FLAGS = {"--workspace", "--rtl", "--design", "--top", "--clock", "--pdk-root", "--freq"} + + +def _is_legacy_args(args: list[str]) -> bool: + return any(a in _LEGACY_FLAGS for a in args) + + +def _run_legacy(argv: list[str]) -> int: + import argparse + + from chipcompiler.data import create_workspace, get_parameters + from chipcompiler.engine import EngineFlow + from chipcompiler.rtl2gds import build_rtl2gds_flow + + parser = argparse.ArgumentParser( + prog="ecc", + description="Legacy parameter-only invocation (use 'ecc run' for project-based flows)", + ) + parser.add_argument("--workspace", required=True) + parser.add_argument("--rtl", required=True) + parser.add_argument("--design", required=True) + parser.add_argument("--top", required=True) + parser.add_argument("--clock", required=True) + parser.add_argument("--pdk-root", required=True) + parser.add_argument("--freq", type=float, default=100.0) + args = parser.parse_args(argv) + + parameters = get_parameters("ics55") + parameters.data.update({ + "PDK": "ics55", + "Design": args.design, + "Top module": args.top, + "Clock": args.clock, + "Frequency max [MHz]": args.freq, + }) + + try: + workspace = create_workspace( + directory=args.workspace, + origin_def="", + origin_verilog=args.rtl, + pdk="ics55", + parameters=parameters, + input_filelist="", + pdk_root=args.pdk_root, + ) + except Exception as exc: + print(f"Error: {exc}", file=sys.stderr) + return 1 + + if workspace is None: + print("Error: failed to create workspace", file=sys.stderr) + return 1 + + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) + + engine_flow.create_step_workspaces() + + if not engine_flow.run_steps(): + print("Error: flow execution failed", file=sys.stderr) + return 1 + + return 0 + + def main() -> None: sys.exit(run()) diff --git a/pyproject.toml b/pyproject.toml index 50dd1916..65368295 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ dependencies = [ "tqdm>=4.67.1", "uvicorn>=0.27", ] +scripts.cli = "chipcompiler.cli.main:main" scripts.ecc = "chipcompiler.cli.main:main" [dependency-groups] From dcce4db6a70541f79da25114f896da9aed18b9f8 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:12:40 +0800 Subject: [PATCH 013/104] fix: reject RTL directory paths in config validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - design.rtl pointing to a directory now fails validation with a clear error message, matching the old CLI's isfile check - No legacy CLI compatibility — project decision confirmed by owner: not enough existing users to justify compatibility work --- chipcompiler/cli/main.py | 75 +--------------------------------------- pyproject.toml | 1 - 2 files changed, 1 insertion(+), 75 deletions(-) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index d4e6f9bf..a38fa304 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -60,13 +60,8 @@ def _add_project_arg(parser: argparse.ArgumentParser) -> None: def run(argv: Sequence[str] | None = None) -> int: - raw = list(argv) if argv is not None else sys.argv[1:] - - if _is_legacy_args(raw): - return _run_legacy(raw) - parser = build_parser() - args = parser.parse_args(raw) + args = parser.parse_args(list(argv) if argv is not None else None) if args.command is None: parser.print_help() @@ -211,74 +206,6 @@ def _run_dir(project_dir: str) -> str: return os.path.join(project_dir, "runs", "default") -_LEGACY_FLAGS = {"--workspace", "--rtl", "--design", "--top", "--clock", "--pdk-root", "--freq"} - - -def _is_legacy_args(args: list[str]) -> bool: - return any(a in _LEGACY_FLAGS for a in args) - - -def _run_legacy(argv: list[str]) -> int: - import argparse - - from chipcompiler.data import create_workspace, get_parameters - from chipcompiler.engine import EngineFlow - from chipcompiler.rtl2gds import build_rtl2gds_flow - - parser = argparse.ArgumentParser( - prog="ecc", - description="Legacy parameter-only invocation (use 'ecc run' for project-based flows)", - ) - parser.add_argument("--workspace", required=True) - parser.add_argument("--rtl", required=True) - parser.add_argument("--design", required=True) - parser.add_argument("--top", required=True) - parser.add_argument("--clock", required=True) - parser.add_argument("--pdk-root", required=True) - parser.add_argument("--freq", type=float, default=100.0) - args = parser.parse_args(argv) - - parameters = get_parameters("ics55") - parameters.data.update({ - "PDK": "ics55", - "Design": args.design, - "Top module": args.top, - "Clock": args.clock, - "Frequency max [MHz]": args.freq, - }) - - try: - workspace = create_workspace( - directory=args.workspace, - origin_def="", - origin_verilog=args.rtl, - pdk="ics55", - parameters=parameters, - input_filelist="", - pdk_root=args.pdk_root, - ) - except Exception as exc: - print(f"Error: {exc}", file=sys.stderr) - return 1 - - if workspace is None: - print("Error: failed to create workspace", file=sys.stderr) - return 1 - - engine_flow = EngineFlow(workspace=workspace) - if not engine_flow.has_init(): - for step, tool, state in build_rtl2gds_flow(): - engine_flow.add_step(step=step, tool=tool, state=state) - - engine_flow.create_step_workspaces() - - if not engine_flow.run_steps(): - print("Error: flow execution failed", file=sys.stderr) - return 1 - - return 0 - - def main() -> None: sys.exit(run()) diff --git a/pyproject.toml b/pyproject.toml index 65368295..50dd1916 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,6 @@ dependencies = [ "tqdm>=4.67.1", "uvicorn>=0.27", ] -scripts.cli = "chipcompiler.cli.main:main" scripts.ecc = "chipcompiler.cli.main:main" [dependency-groups] From df078d711af83605e9451222621e7ab81f0d6d0b Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:23:11 +0800 Subject: [PATCH 014/104] fix: treat all-Unstart flows as unstarted, quote project paths in disclosure - get_run_status now returns 'unstart' when all steps are Unstart instead of 'failed', since no step has actually failed - disclosure_cmd uses shlex.quote for project paths so paths with spaces or shell metacharacters produce executable commands --- chipcompiler/cli/inspect.py | 7 +++++-- chipcompiler/cli/output.py | 3 ++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index b389a0f9..9e7be591 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -28,8 +28,11 @@ def get_run_status(flow_data: dict) -> str: state = normalize_state(step.get("state", "")) if state in ("incomplete", "invalid", "ongoing"): return "failed" - all_done = all(normalize_state(s.get("state", "")) == "success" for s in steps) - return "success" if all_done else "failed" + all_success = all(normalize_state(s.get("state", "")) == "success" for s in steps) + if all_success: + return "success" + all_unstart = all(normalize_state(s.get("state", "")) == "unstart" for s in steps) + return "unstart" if all_unstart else "failed" def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[str], int]: diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index 51f5b460..7ab27e14 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -1,5 +1,6 @@ import json import re +import shlex import sys @@ -19,7 +20,7 @@ def format_line(**fields) -> str: def disclosure_cmd(command: str, project: str | None = None) -> str: if project: - return f"{command} --project {project}" + return f"{command} --project {shlex.quote(project)}" return command From 51f1c72ba4d3f23a1b3cb235b17282e4bbf249c4 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:44:03 +0800 Subject: [PATCH 015/104] fix(cli): remove ineffective --errors flag from run failure disclosure ecc log --errors without a step token just lists log locations; the --errors filter only applies per-step. Use 'ecc log' instead. --- chipcompiler/cli/project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index 57b97fbf..178c9685 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -189,7 +189,7 @@ def run_project(project_dir: str, overwrite: bool = False, status="failed", workspace=run_dir, status_cmd=disclosure_cmd("ecc status", project), - log=disclosure_cmd("ecc log --errors", project), + log=disclosure_cmd("ecc log", project), ), file=sys.stderr) return [], 1 except Exception as exc: From 9e6b0e4ce74fa0280f565c3ba9cf308119a92dea Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:45:03 +0800 Subject: [PATCH 016/104] fix(cli): guard against non-dict TOML sections and non-object flow.json Coerce design/pdk/flow to empty dicts if not dicts in _parse_config. Reject non-dict JSON values in read_flow_json. --- chipcompiler/cli/config.py | 7 +++++++ chipcompiler/cli/inspect.py | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 1b26e503..a185e005 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -41,6 +41,13 @@ def _parse_config(data: dict, config_path: str) -> ProjectConfig: pdk = data.get("pdk", {}) flow = data.get("flow", {}) + if not isinstance(design, dict): + design = {} + if not isinstance(pdk, dict): + pdk = {} + if not isinstance(flow, dict): + flow = {} + project_dir = os.path.dirname(os.path.abspath(config_path)) try: diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 9e7be591..d211213f 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -15,7 +15,8 @@ def read_flow_json(run_dir: str) -> dict | None: return None try: with open(path) as f: - return json.load(f) + data = json.load(f) + return data if isinstance(data, dict) else None except (json.JSONDecodeError, OSError): return None From c3574f354e8d7fbec9a4f6bea6d5e504bdc79695 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 1 May 2026 23:59:10 +0800 Subject: [PATCH 017/104] fix(cli): normalize Bazel PDK root, surface ongoing runs, fix JSON disclosure - _pdk_root_from_env() now normalizes paths with os.path.normpath so Bazel file-location anchors like $(location :README.md)/.. resolve - get_run_status() returns 'ongoing' for in-progress runs instead of lumping them with 'failed' - _check_requested_step() uses disclosure_cmd() for log_cmd so --project is preserved in JSON/JSONL output - Moved disclosure_cmd import to module level in inspect.py --- chipcompiler/cli/config.py | 5 ++++- chipcompiler/cli/inspect.py | 13 ++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index a185e005..7f1e284f 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -187,6 +187,9 @@ def resolve_pdk_root(cfg: ProjectConfig) -> str: def _pdk_root_from_env() -> str: for key in ("CHIPCOMPILER_ICS55_PDK_ROOT", "ICS55_PDK_ROOT"): val = os.environ.get(key, "").strip() - if val and os.path.isdir(val): + if not val: + continue + val = os.path.normpath(val) + if os.path.isdir(val): return val return "" diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index d211213f..d02f8ec3 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -3,6 +3,7 @@ import re from chipcompiler.cli.output import ( + disclosure_cmd, normalize_metric_key, normalize_state, normalize_step_name, @@ -27,7 +28,9 @@ def get_run_status(flow_data: dict) -> str: return "unstart" for step in steps: state = normalize_state(step.get("state", "")) - if state in ("incomplete", "invalid", "ongoing"): + if state == "ongoing": + return "ongoing" + if state in ("incomplete", "invalid"): return "failed" all_success = all(normalize_state(s.get("state", "")) == "success" for s in steps) if all_success: @@ -37,7 +40,7 @@ def get_run_status(flow_data: dict) -> str: def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import disclosure_cmd, format_line + from chipcompiler.cli.output import format_line flow_data = read_flow_json(run_dir) if flow_data is None: @@ -169,7 +172,7 @@ def read_log_file(path: str) -> list[str]: def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, project: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import disclosure_cmd, format_line + from chipcompiler.cli.output import format_line if step_token is None: lines = [] @@ -310,7 +313,7 @@ def read_metrics(path: str) -> dict: def build_metrics_lines(run_dir: str, step_token: str | None = None, project: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import disclosure_cmd, format_line + from chipcompiler.cli.output import format_line metrics_files = discover_metrics(run_dir, step_token) if not metrics_files: @@ -408,7 +411,7 @@ def _check_requested_step(run_dir: str, step_token: str | None, return { "status": "missing", "metric_step": step_token, - "log_cmd": f"ecc log {step_token} --errors", + "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project), } return None From 3c40918a13ab2ff039792975a604fb417d181886 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 00:09:44 +0800 Subject: [PATCH 018/104] fix(cli): coerce non-list/non-string rtl values in config parsing design.rtl = 123 or rtl = [123] no longer crashes with TypeError; non-list values become [], non-string entries are filtered out. --- chipcompiler/cli/config.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 7f1e284f..210ee01d 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -55,10 +55,15 @@ def _parse_config(data: dict, config_path: str) -> ProjectConfig: except (TypeError, ValueError): freq = 0.0 + rtl_raw = design.get("rtl", []) + if not isinstance(rtl_raw, list): + rtl_raw = [] + design_rtl = [v for v in rtl_raw if isinstance(v, str)] + cfg = ProjectConfig( design_name=design.get("name", ""), design_top=design.get("top", ""), - design_rtl=design.get("rtl", []), + design_rtl=design_rtl, design_clock_port=design.get("clock_port", ""), design_frequency_mhz=freq, pdk_name=pdk.get("name", ""), From d35dc2922b0c24119534d8f02be89e5bc806ebdb Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 00:19:50 +0800 Subject: [PATCH 019/104] fix(cli): validate PDK contents during ecc check Reuse get_pdk().validate() in _validate_pdk_contents() so ecc check catches missing tech LEF/liberty files instead of reporting success for an unusable PDK root. --- chipcompiler/cli/config.py | 19 +++++++++++++++++++ test/cli/test_cli_main.py | 26 +++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 210ee01d..b4e0499b 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -116,8 +116,16 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: resolved_root = _resolve_path(cfg.project_dir, cfg.pdk_root) if not os.path.isdir(resolved_root): errors.append(f"pdk.root is not a directory: {cfg.pdk_root}") + else: + pdk_err = _validate_pdk_contents(cfg.pdk_name, resolved_root) + if pdk_err: + errors.append(pdk_err) elif not _pdk_root_from_env(): errors.append("pdk.root is required") + else: + pdk_err = _validate_pdk_contents(cfg.pdk_name, _pdk_root_from_env()) + if pdk_err: + errors.append(pdk_err) if not cfg.flow_preset: errors.append("flow.preset is required") @@ -189,6 +197,17 @@ def resolve_pdk_root(cfg: ProjectConfig) -> str: return _resolve_path(cfg.project_dir, cfg.pdk_root) +def _validate_pdk_contents(pdk_name: str, pdk_root: str) -> str | None: + if not pdk_root: + return None + try: + from chipcompiler.data.pdk import get_pdk + get_pdk(pdk_name, pdk_root) + return None + except ValueError as exc: + return str(exc) + + def _pdk_root_from_env() -> str: for key in ("CHIPCOMPILER_ICS55_PDK_ROOT", "ICS55_PDK_ROOT"): val = os.environ.get(key, "").strip() diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 37e1c0e7..2464624c 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -54,6 +54,10 @@ def fake_create_workspace(**kwargs): "chipcompiler.cli.project.build_rtl2gds_flow", lambda: [("Synthesis", "yosys", "Unstart")], ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) return capture @@ -157,8 +161,12 @@ def test_init_uses_basename_for_design_name(self, tmp_path): class TestCheck: - def test_check_passes_valid_config(self, tmp_path, capsys): + def test_check_passes_valid_config(self, tmp_path, monkeypatch, capsys): project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) rc = cli_main.run(["check", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out @@ -166,6 +174,10 @@ def test_check_passes_valid_config(self, tmp_path, capsys): def test_check_from_inside_project_dir(self, tmp_path, monkeypatch, capsys): project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) monkeypatch.chdir(project_dir) rc = cli_main.run(["check"]) assert rc == 0 @@ -266,8 +278,12 @@ def test_check_fails_non_numeric_frequency(self, tmp_path): rc = cli_main.run(["check", "--project", project_dir]) assert rc == 1 - def test_check_json_output(self, tmp_path, capsys): + def test_check_json_output(self, tmp_path, monkeypatch, capsys): project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) rc = cli_main.run(["check", "--project", project_dir, "--json"]) assert rc == 0 out = capsys.readouterr().out @@ -677,8 +693,12 @@ def test_init_lines_have_disclosure(self, tmp_path, capsys): if line.strip(): assert _has_disclosure(line), f"Missing disclosure in: {line}" - def test_check_lines_have_disclosure(self, tmp_path, capsys): + def test_check_lines_have_disclosure(self, tmp_path, monkeypatch, capsys): project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) rc = cli_main.run(["check", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out From 5aa3b9b9c9f0a2ee9b3857f4007b2d378bbbff0f Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 00:31:14 +0800 Subject: [PATCH 020/104] fix(cli): validate steps structure in flow.json, escape quotes in text output - get_run_status() now filters non-dict entries from steps list and handles non-list steps values gracefully - format_field() escapes backslashes and double quotes in quoted values --- chipcompiler/cli/inspect.py | 3 +++ chipcompiler/cli/output.py | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index d02f8ec3..27f70c00 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -24,6 +24,9 @@ def read_flow_json(run_dir: str) -> dict | None: def get_run_status(flow_data: dict) -> str: steps = flow_data.get("steps", []) + if not isinstance(steps, list): + steps = [] + steps = [s for s in steps if isinstance(s, dict)] if not steps: return "unstart" for step in steps: diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index 7ab27e14..7409a85c 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -6,7 +6,8 @@ def format_field(key: str, value) -> str: if isinstance(value, str) and re.search(r'\s', value): - return f'{key}="{value}"' + escaped = value.replace('\\', '\\\\').replace('"', '\\"') + return f'{key}="{escaped}"' return f"{key}={value}" From 01406362eed11d1e5b7aa3cd4771b42193e6b38d Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 00:48:25 +0800 Subject: [PATCH 021/104] fix(cli): extract _safe_steps to normalize steps in all status builders All status builders (lines/json/jsonl) now filter non-dict step entries instead of only get_run_status doing so. --- chipcompiler/cli/inspect.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 27f70c00..e199896d 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -22,11 +22,15 @@ def read_flow_json(run_dir: str) -> dict | None: return None -def get_run_status(flow_data: dict) -> str: +def _safe_steps(flow_data: dict) -> list[dict]: steps = flow_data.get("steps", []) if not isinstance(steps, list): - steps = [] - steps = [s for s in steps if isinstance(s, dict)] + return [] + return [s for s in steps if isinstance(s, dict)] + + +def get_run_status(flow_data: dict) -> str: + steps = _safe_steps(flow_data) if not steps: return "unstart" for step in steps: @@ -67,7 +71,7 @@ def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[s log=disclosure_cmd("ecc log", project), )) - for step in flow_data.get("steps", []): + for step in _safe_steps(flow_data): step_token = normalize_step_name(step.get("name", "")) lines.append(format_line( step=step_token, @@ -88,7 +92,7 @@ def build_status_json(run_dir: str) -> tuple[dict, int]: run_status = get_run_status(flow_data) steps = [] - for step in flow_data.get("steps", []): + for step in _safe_steps(flow_data): steps.append({ "step": normalize_step_name(step.get("name", "")), "tool": step.get("tool", ""), @@ -107,7 +111,7 @@ def build_status_jsonl(run_dir: str) -> tuple[list[dict], int]: run_status = get_run_status(flow_data) objects = [{"kind": "run", "run": "default", "status": run_status, "workspace": run_dir}] - for step in flow_data.get("steps", []): + for step in _safe_steps(flow_data): objects.append({ "kind": "step", "step": normalize_step_name(step.get("name", "")), From 5fb172a97e67e4b92dff772ed85d04aafda4b66a Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 01:03:46 +0800 Subject: [PATCH 022/104] fix(cli): remove m from metric normalizer regex, coerce non-string config fields - normalize_metric_key fallback regex no longer strips literal 'm' from unknown metric keys like 'memory_usage' or 'max_delay' - _parse_config now coerces non-string TOML values to defaults so pdk.name=[] or flow.preset={} don't crash validation with TypeError --- chipcompiler/cli/config.py | 17 ++++++++++------- chipcompiler/cli/output.py | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index b4e0499b..b5b0a42d 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -60,16 +60,19 @@ def _parse_config(data: dict, config_path: str) -> ProjectConfig: rtl_raw = [] design_rtl = [v for v in rtl_raw if isinstance(v, str)] + def _str(val, default=""): + return val if isinstance(val, str) else default + cfg = ProjectConfig( - design_name=design.get("name", ""), - design_top=design.get("top", ""), + design_name=_str(design.get("name", "")), + design_top=_str(design.get("top", "")), design_rtl=design_rtl, - design_clock_port=design.get("clock_port", ""), + design_clock_port=_str(design.get("clock_port", "")), design_frequency_mhz=freq, - pdk_name=pdk.get("name", ""), - pdk_root=pdk.get("root", ""), - flow_preset=flow.get("preset", ""), - flow_run=flow.get("run", "default"), + pdk_name=_str(pdk.get("name", "")), + pdk_root=_str(pdk.get("root", "")), + flow_preset=_str(flow.get("preset", "")), + flow_run=_str(flow.get("run", "default"), "default"), config_path=config_path, project_dir=project_dir, ) diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index 7409a85c..34343926 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -108,7 +108,7 @@ def normalize_metric_key(raw_key: str) -> str: if raw_key in known: return known[raw_key] s = raw_key.lower() - s = re.sub(r'[\s\[\]μm^]+', '_', s) + s = re.sub(r'[\s\[\]μ^]+', '_', s) s = re.sub(r'_+', '_', s) s = s.strip('_') return s From 26da09c6269543f85b8023d6b524d6291fcd6a19 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 01:42:37 +0800 Subject: [PATCH 023/104] docs: remove ecc open from phase 2 checklist --- docs/specification/cli-design.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/specification/cli-design.md b/docs/specification/cli-design.md index 6cbe39a0..f22e337c 100644 --- a/docs/specification/cli-design.md +++ b/docs/specification/cli-design.md @@ -391,7 +391,6 @@ Success criteria: - [ ] `ecc diagnose` - [ ] `ecc artifacts` - [ ] `ecc config --resolved` -- [ ] `ecc open` - [ ] Run tags and run comparison basics - [ ] Structured issue and artifact metadata From 94271c3df19f0d0e78ce877266c0577992332161 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 01:48:02 +0800 Subject: [PATCH 024/104] refactor(cli): simplify CLI code before finalization - Consolidate PDK root resolution into _resolve_pdk_root helper, removing duplicate validation branches in validate_project_config - Extract _collect_metrics to deduplicate build_metrics_json/jsonl - Remove unused rc variable in build_metrics_lines - Replace unused rtl_mode with _ in project.py - Use any(c.isspace()) instead of re.search in format_field - Move os import to module level in main.py --- chipcompiler/cli/config.py | 20 +++++++++---------- chipcompiler/cli/inspect.py | 40 +++++++++++++++---------------------- chipcompiler/cli/main.py | 2 +- chipcompiler/cli/output.py | 2 +- chipcompiler/cli/project.py | 2 +- 5 files changed, 29 insertions(+), 37 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index b5b0a42d..adae326e 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -115,20 +115,16 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: elif cfg.pdk_name not in SUPPORTED_PDK_NAMES: errors.append(f"unsupported pdk.name: {cfg.pdk_name}") - if cfg.pdk_root: - resolved_root = _resolve_path(cfg.project_dir, cfg.pdk_root) - if not os.path.isdir(resolved_root): - errors.append(f"pdk.root is not a directory: {cfg.pdk_root}") + pdk_root = _resolve_pdk_root(cfg) + if pdk_root: + if not os.path.isdir(pdk_root): + errors.append(f"pdk.root is not a directory: {cfg.pdk_root or '$(env)'}") else: - pdk_err = _validate_pdk_contents(cfg.pdk_name, resolved_root) + pdk_err = _validate_pdk_contents(cfg.pdk_name, pdk_root) if pdk_err: errors.append(pdk_err) - elif not _pdk_root_from_env(): - errors.append("pdk.root is required") else: - pdk_err = _validate_pdk_contents(cfg.pdk_name, _pdk_root_from_env()) - if pdk_err: - errors.append(pdk_err) + errors.append("pdk.root is required") if not cfg.flow_preset: errors.append("flow.preset is required") @@ -195,6 +191,10 @@ def _resolve_path(project_dir: str, path: str) -> str: def resolve_pdk_root(cfg: ProjectConfig) -> str: + return _resolve_pdk_root(cfg) + + +def _resolve_pdk_root(cfg: ProjectConfig) -> str: if not cfg.pdk_root: return _pdk_root_from_env() return _resolve_path(cfg.project_dir, cfg.pdk_root) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index e199896d..c2476f21 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -349,7 +349,6 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, )], 0 lines = [] - rc = 0 for token, path in sorted(metrics_files.items()): data = read_metrics(path) if not data: @@ -363,47 +362,40 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, source=os.path.relpath(path, run_dir), inspect=disclosure_cmd(f"ecc metrics {token} --json", project), )) - return lines, rc + return lines, 0 -def build_metrics_json(run_dir: str, step_token: str | None = None, - project: str | None = None) -> tuple[dict, int]: +def _collect_metrics(run_dir: str, step_token: str | None, + project: str | None) -> tuple[list[dict], int]: err = _check_requested_step(run_dir, step_token, project) if err is not None: - return err, 1 + return [err], 1 metrics_files = discover_metrics(run_dir, step_token) - all_metrics = [] + items = [] for token, path in sorted(metrics_files.items()): data = read_metrics(path) for raw_key, value in data.items(): - all_metrics.append({ + items.append({ "metric": normalize_metric_key(raw_key), "step": token, "value": value, "source": os.path.relpath(path, run_dir), }) - return {"metrics": all_metrics}, 0 + return items, 0 + + +def build_metrics_json(run_dir: str, step_token: str | None = None, + project: str | None = None) -> tuple[dict, int]: + items, rc = _collect_metrics(run_dir, step_token, project) + if rc != 0: + return items[0], 1 + return {"metrics": items}, 0 def build_metrics_jsonl(run_dir: str, step_token: str | None = None, project: str | None = None) -> tuple[list[dict], int]: - err = _check_requested_step(run_dir, step_token, project) - if err is not None: - return [err], 1 - - metrics_files = discover_metrics(run_dir, step_token) - objects = [] - for token, path in sorted(metrics_files.items()): - data = read_metrics(path) - for raw_key, value in data.items(): - objects.append({ - "metric": normalize_metric_key(raw_key), - "step": token, - "value": value, - "source": os.path.relpath(path, run_dir), - }) - return objects, 0 + return _collect_metrics(run_dir, step_token, project) def _check_requested_step(run_dir: str, step_token: str | None, diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index a38fa304..9c5a6c70 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -1,4 +1,5 @@ import argparse +import os import sys from collections.abc import Sequence @@ -202,7 +203,6 @@ def _cmd_metrics(args, project_dir: str, project: str | None) -> int: def _run_dir(project_dir: str) -> str: - import os return os.path.join(project_dir, "runs", "default") diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index 34343926..9cfb2854 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -5,7 +5,7 @@ def format_field(key: str, value) -> str: - if isinstance(value, str) and re.search(r'\s', value): + if isinstance(value, str) and any(c.isspace() for c in value): escaped = value.replace('\\', '\\\\').replace('"', '\\"') return f'{key}="{escaped}"' return f"{key}={value}" diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index 178c9685..e1ef3d01 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -144,7 +144,7 @@ def run_project(project_dir: str, overwrite: bool = False, if overwrite and os.path.exists(run_dir): shutil.rmtree(run_dir) - rtl_mode, origin_verilog, input_filelist = resolve_rtl(cfg) + _, origin_verilog, input_filelist = resolve_rtl(cfg) parameters = to_parameters(cfg) pdk_root = resolve_pdk_root(cfg) From a295b5756287bebb9bd5792f9ce3dd7d9d705453 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 09:27:10 +0800 Subject: [PATCH 025/104] feat(cli): add artifacts, config --resolved, diagnose, and --run-id selector Add read-only debug and traceability CLI commands: - ecc artifacts [step]: list generated files with role metadata - ecc config [step] --resolved: show resolved project/step config - ecc diagnose [step]: emit structured issue metadata with severity - --run-id selector for status, log, metrics, and new commands New modules: artifacts.py, config_view.py, diagnose.py Updated: output.py (disclosure_cmd --run-id), inspect.py (resolve_run_dir), main.py (subcommand dispatch, --run-id on existing commands) Tests: 53 new tests in test/cli/test_cli_inspect.py --- chipcompiler/cli/artifacts.py | 135 +++++ chipcompiler/cli/config_view.py | 183 +++++++ chipcompiler/cli/diagnose.py | 206 ++++++++ chipcompiler/cli/inspect.py | 90 ++-- chipcompiler/cli/main.py | 152 +++++- chipcompiler/cli/output.py | 10 +- test/cli/test_cli_inspect.py | 854 ++++++++++++++++++++++++++++++++ 7 files changed, 1581 insertions(+), 49 deletions(-) create mode 100644 chipcompiler/cli/artifacts.py create mode 100644 chipcompiler/cli/config_view.py create mode 100644 chipcompiler/cli/diagnose.py create mode 100644 test/cli/test_cli_inspect.py diff --git a/chipcompiler/cli/artifacts.py b/chipcompiler/cli/artifacts.py new file mode 100644 index 00000000..9fd56364 --- /dev/null +++ b/chipcompiler/cli/artifacts.py @@ -0,0 +1,135 @@ +import os + +from chipcompiler.cli.output import disclosure_cmd + +KNOWN_ROLES = {"config", "input", "output", "data", "feature", "report", "log", "script", "analysis"} + + +def _role_from_dirname(dirname: str) -> str: + return dirname if dirname in KNOWN_ROLES else "unknown" + + +def discover_artifacts(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.inspect import discover_step_dirs + from chipcompiler.cli.output import format_line + + step_dirs = discover_step_dirs(run_dir) + + if step_token is not None: + if step_token not in step_dirs: + return [{"kind": "error", "step": step_token, + "status": "unknown_step"}], 1 + tokens = [step_token] + else: + tokens = sorted(step_dirs.keys()) + + artifacts = [] + for token in tokens: + step_path = step_dirs[token] + for entry in sorted(os.listdir(step_path)): + subdir = os.path.join(step_path, entry) + if not os.path.isdir(subdir): + continue + role = _role_from_dirname(entry) + for fname in sorted(os.listdir(subdir)): + fpath = os.path.join(subdir, fname) + if os.path.isfile(fpath): + artifacts.append({ + "kind": "artifact", + "step": token, + "role": role, + "run": run_id or "default", + "path": os.path.relpath(fpath, os.path.dirname(os.path.dirname(run_dir))), + "exists": True, + "inspect_cmd": disclosure_cmd(f"ecc artifacts {token} --json", project, run_id), + }) + + if not artifacts and step_token is not None: + return [], 0 + + if not artifacts: + return [], 0 + + return artifacts, 0 + + +def build_artifacts_lines(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[str], int]: + from chipcompiler.cli.output import format_line + + artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id) + if rc != 0: + if artifacts and artifacts[0].get("status") == "unknown_step": + s = artifacts[0]["step"] + return [format_line( + step=s, + status="unknown_step", + status_cmd=disclosure_cmd("ecc status", project, run_id), + )], 1 + return [], rc + + if not artifacts: + if step_token is not None: + return [format_line( + step=step_token, + artifacts_status="none", + status_cmd=disclosure_cmd("ecc status", project, run_id), + log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), + )], 0 + return [format_line( + artifacts_status="none", + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project, run_id), + )], 0 + + lines = [] + for a in artifacts: + line_fields = { + "artifact": os.path.basename(a["path"]), + "step": a["step"], + "role": a["role"], + "path": a["path"], + } + if a["role"] == "analysis": + line_fields["metrics"] = disclosure_cmd(f"ecc metrics {a['step']}", project, run_id) + if a["role"] == "log": + line_fields["inspect"] = disclosure_cmd(f"ecc log {a['step']} --errors", project, run_id) + if a["role"] in ("output", "report", "analysis", "log"): + line_fields["config"] = disclosure_cmd(f"ecc config {a['step']} --resolved", project, run_id) + lines.append(format_line(**line_fields)) + return lines, 0 + + +def build_artifacts_json(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[dict, int]: + artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id) + if rc != 0: + if artifacts and artifacts[0].get("status") == "unknown_step": + return {"status": "unknown_step", "step": artifacts[0]["step"]}, 1 + return {}, rc + + if not artifacts: + if step_token is not None: + return {"artifacts_status": "none", "step": step_token}, 0 + return {"artifacts_status": "none"}, 0 + + return {"artifacts": artifacts}, 0 + + +def build_artifacts_jsonl(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id) + if rc != 0: + return artifacts, rc + + if not artifacts: + if step_token is not None: + return [{"artifacts_status": "none", "step": step_token}], 0 + return [{"artifacts_status": "none"}], 0 + + return artifacts, 0 diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py new file mode 100644 index 00000000..b0d14d7b --- /dev/null +++ b/chipcompiler/cli/config_view.py @@ -0,0 +1,183 @@ +import os + +from chipcompiler.cli.output import disclosure_cmd + + +def build_project_config_items(project_dir: str, run_dir: str, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + resolve_pdk_root, + ) + from chipcompiler.cli.output import format_line + + config_path = find_config_path(project_dir) + if config_path is None: + return [{"kind": "error", "status": "missing_config"}], 1 + + cfg = load_project_config(config_path) + if getattr(cfg, "_toml_error", None): + return [{"kind": "error", "status": "invalid_config"}], 1 + + pdk_root = resolve_pdk_root(cfg) + display_run = run_id or "default" + + items = [] + entries = [ + ("design.name", cfg.design_name, cfg.design_name, "ecc.toml"), + ("design.top", cfg.design_top, cfg.design_top, "ecc.toml"), + ("design.clock_port", cfg.design_clock_port, cfg.design_clock_port, "ecc.toml"), + ("design.frequency_mhz", cfg.design_frequency_mhz, cfg.design_frequency_mhz, "ecc.toml"), + ("pdk.name", cfg.pdk_name, cfg.pdk_name, "ecc.toml"), + ("flow.preset", cfg.flow_preset, cfg.flow_preset, "ecc.toml"), + ("flow.run", cfg.flow_run, cfg.flow_run, "ecc.toml"), + ] + + for key, value, resolved, source in entries: + items.append({ + "kind": "config", + "scope": "project", + "key": key, + "value": value, + "resolved": resolved, + "source": source, + }) + + # RTL entries + for i, rtl in enumerate(cfg.design_rtl): + rtl_resolved = os.path.normpath(os.path.join(project_dir, rtl)) + items.append({ + "kind": "config", + "scope": "project", + "key": f"design.rtl.{i}", + "value": rtl, + "resolved": rtl_resolved, + "source": "ecc.toml", + }) + + # PDK root with resolution + pdk_source = "ecc.toml" if cfg.pdk_root else "env" + items.append({ + "kind": "config", + "scope": "project", + "key": "pdk.root", + "value": cfg.pdk_root or "", + "resolved": pdk_root, + "source": pdk_source, + }) + + # Run directory + items.append({ + "kind": "config", + "scope": "project", + "key": "run_dir", + "value": os.path.relpath(run_dir, project_dir) if not os.path.isabs(run_dir) else run_dir, + "resolved": os.path.abspath(run_dir), + "source": "resolved", + }) + + return items, 0 + + +def build_step_config_items(run_dir: str, step_token: str | None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.inspect import discover_step_dirs + + step_dirs = discover_step_dirs(run_dir) + + if step_token not in step_dirs: + return [{"kind": "error", "status": "unknown_step", "step": step_token}], 1 + + config_dir = os.path.join(step_dirs[step_token], "config") + items = [] + display_run = run_id or "default" + + if os.path.isdir(config_dir): + for fname in sorted(os.listdir(config_dir)): + fpath = os.path.join(config_dir, fname) + if os.path.isfile(fpath): + items.append({ + "kind": "config", + "scope": "step", + "step": step_token, + "role": "config", + "run": display_run, + "path": os.path.relpath(fpath, os.path.dirname(os.path.dirname(run_dir))), + "source": "step_config", + "inspect_cmd": disclosure_cmd(f"ecc artifacts {step_token} --json", project, run_id), + }) + + return items, 0 + + +def build_config_lines(items: list[dict], project: str | None = None, + run_id: str | None = None) -> tuple[list[str], int]: + from chipcompiler.cli.output import format_line + + if not items: + return [], 0 + + if items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): + if items[0].get("status") == "unknown_step": + return [format_line( + step=items[0].get("step", ""), + status="unknown_step", + inspect=disclosure_cmd("ecc status", project, run_id), + )], 1 + if items[0].get("status") == "missing_config": + return [format_line( + status="missing_config", + inspect=disclosure_cmd("ecc check", project, run_id), + )], 1 + return [format_line( + status="invalid_config", + inspect=disclosure_cmd("ecc check", project, run_id), + )], 1 + + lines = [] + for item in items: + if item.get("scope") == "project": + line = format_line( + config=item["key"], + scope="project", + value=item["value"], + resolved=item.get("resolved"), + source=item["source"], + inspect=disclosure_cmd("ecc config --resolved --json", project, run_id), + ) + else: + line = format_line( + config=os.path.basename(item["path"]), + scope="step", + step=item["step"], + role=item["role"], + run=item.get("run", "default"), + path=item["path"], + source=item["source"], + inspect=item.get("inspect_cmd"), + ) + lines.append(line) + return lines, 0 + + +def build_config_json(items: list[dict]) -> tuple[dict, int]: + if items and items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): + return items[0], 1 + + if not items: + return {"config_status": "none"}, 0 + + return {"config": items}, 0 + + +def build_config_jsonl(items: list[dict]) -> tuple[list[dict], int]: + if items and items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): + return items, 1 + + if not items: + return [{"config_status": "none"}], 0 + + return items, 0 diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py new file mode 100644 index 00000000..0124c8ee --- /dev/null +++ b/chipcompiler/cli/diagnose.py @@ -0,0 +1,206 @@ +import os + +from chipcompiler.cli.output import disclosure_cmd + + +def _has_investigation_files(step_path: str) -> bool: + for role in ("output", "report", "analysis"): + role_dir = os.path.join(step_path, role) + if os.path.isdir(role_dir): + if any(os.path.isfile(os.path.join(role_dir, f)) for f in os.listdir(role_dir)): + return True + return False + + +def _count_log_errors(run_dir: str, step_token: str) -> int: + from chipcompiler.cli.inspect import discover_logs, filter_errors, read_log_file + logs = discover_logs(run_dir, step_token) + count = 0 + for lf in logs: + raw = read_log_file(lf) + count += len(filter_errors(raw)) + return count + + +def _has_metrics(run_dir: str, step_token: str) -> bool: + from chipcompiler.cli.inspect import discover_metrics + return bool(discover_metrics(run_dir, step_token)) + + +def _has_config_files(step_path: str) -> bool: + config_dir = os.path.join(step_path, "config") + if not os.path.isdir(config_dir): + return False + return any(os.path.isfile(os.path.join(config_dir, f)) for f in os.listdir(config_dir)) + + +def _make_issue(issue: str, severity: str, run: str, + step: str | None = None, + status: str | None = None, + count: int | None = None, + project: str | None = None, + run_id: str | None = None) -> dict: + obj = { + "kind": "issue", + "issue": issue, + "severity": severity, + "run": run, + } + if step: + obj["step"] = step + if status: + obj["status"] = status + if count is not None: + obj["count"] = count + + cmd_kwargs = {"project": project, "run_id": run_id} + if issue == "missing_run": + obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) + obj["run_cmd"] = disclosure_cmd("ecc run", project=project) + elif step: + obj["evidence"] = disclosure_cmd(f"ecc status", **cmd_kwargs) + obj["log"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) + obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) + + return obj + + +def build_diagnose_issues(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + from chipcompiler.cli.inspect import ( + discover_step_dirs, + read_flow_json, + _safe_steps, + ) + from chipcompiler.cli.output import normalize_state, normalize_step_name + + display_run = run_id or "default" + issues = [] + + flow_data = read_flow_json(run_dir) + + if flow_data is None: + if os.path.isfile(os.path.join(run_dir, "home", "flow.json")): + issues.append(_make_issue("invalid_flow_json", "error", display_run, + project=project, run_id=run_id)) + return issues, 1 + issues.append(_make_issue("missing_run", "error", display_run, + project=project, run_id=run_id)) + return issues, 1 + + steps = _safe_steps(flow_data) + step_dirs = discover_step_dirs(run_dir) + + if step_token is not None: + if step_token not in step_dirs: + issues.append(_make_issue("unknown_step", "error", display_run, + step=step_token, project=project, run_id=run_id)) + return issues, 1 + for s in steps: + token = normalize_step_name(s.get("name", "")) + if step_token is not None and token != step_token: + continue + state = normalize_state(s.get("state", "")) + + if state in ("incomplete", "invalid"): + issues.append(_make_issue("failed_step", "error", display_run, + step=token, status=state, + project=project, run_id=run_id)) + elif state == "ongoing": + issues.append(_make_issue("ongoing_step", "warning", display_run, + step=token, status=state, + project=project, run_id=run_id)) + elif state == "unstart": + issues.append(_make_issue("unstarted_step", "info", display_run, + step=token, status=state, + project=project, run_id=run_id)) + + if token in step_dirs: + error_count = _count_log_errors(run_dir, token) + if error_count > 0: + issues.append(_make_issue("log_errors", "error", display_run, + step=token, count=error_count, + project=project, run_id=run_id)) + + if not _has_metrics(run_dir, token): + issues.append(_make_issue("missing_metrics", "warning", display_run, + step=token, project=project, run_id=run_id)) + + if not _has_investigation_files(step_dirs[token]): + issues.append(_make_issue("missing_artifacts", "warning", display_run, + step=token, project=project, run_id=run_id)) + + if not _has_config_files(step_dirs[token]): + issues.append(_make_issue("config_unavailable", "info", display_run, + step=token, project=project, run_id=run_id)) + + return issues, 0 + + +def _exit_code(issues: list[dict]) -> int: + for issue in issues: + if issue.get("severity") == "error": + return 1 + return 0 + + +def build_diagnose_lines(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[str], int]: + from chipcompiler.cli.output import format_line + + issues, rc = build_diagnose_issues(run_dir, step_token, project, run_id) + + if not issues: + display_run = run_id or "default" + return [format_line( + diagnose="clean", + run=display_run, + evidence=disclosure_cmd("ecc status", project, run_id), + )], 0 + + lines = [] + for issue in issues: + fields = {} + fields["issue"] = issue["issue"] + fields["severity"] = issue["severity"] + fields["run"] = issue["run"] + if "step" in issue: + fields["step"] = issue["step"] + if "status" in issue: + fields["status"] = issue["status"] + if "count" in issue: + fields["count"] = issue["count"] + if "evidence" in issue: + fields["evidence"] = issue["evidence"] + if "log" in issue: + fields["log"] = issue["log"] + if "artifacts" in issue: + fields["artifacts"] = issue["artifacts"] + if "config" in issue: + fields["config"] = issue["config"] + if "run_cmd" in issue: + fields["run_cmd"] = issue["run_cmd"] + lines.append(format_line(**fields)) + + return lines, _exit_code(issues) + + +def build_diagnose_json(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[dict, int]: + issues, _ = build_diagnose_issues(run_dir, step_token, project, run_id) + if not issues: + return {"diagnose": "clean", "run": run_id or "default"}, 0 + return {"issues": issues}, _exit_code(issues) + + +def build_diagnose_jsonl(run_dir: str, step_token: str | None = None, + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + issues, _ = build_diagnose_issues(run_dir, step_token, project, run_id) + if not issues: + return [{"diagnose": "clean", "run": run_id or "default"}], 0 + return issues, _exit_code(issues) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index c2476f21..02874e43 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -10,6 +10,16 @@ ) +def resolve_run_dir(project_dir: str, run_id: str | None = None) -> tuple[str, str | None]: + if not run_id or run_id == "default": + return os.path.join(project_dir, "runs", "default"), None + + if os.path.isabs(run_id): + return run_id, run_id + + return os.path.join(project_dir, "runs", run_id), run_id + + def read_flow_json(run_dir: str) -> dict | None: path = os.path.join(run_dir, "home", "flow.json") if not os.path.isfile(path): @@ -46,13 +56,14 @@ def get_run_status(flow_data: dict) -> str: return "unstart" if all_unstart else "failed" -def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[str], int]: +def build_status_lines(run_dir: str, project: str | None = None, + run_id: str | None = None) -> tuple[list[str], int]: from chipcompiler.cli.output import format_line flow_data = read_flow_json(run_dir) if flow_data is None: line = format_line( - run="default", + run=run_id or "default", status="missing", workspace=run_dir, run_cmd=disclosure_cmd("ecc run", project), @@ -63,12 +74,12 @@ def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[s lines = [] lines.append(format_line( - run="default", + run=run_id or "default", status=run_status, workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project), - metrics=disclosure_cmd("ecc metrics", project), - log=disclosure_cmd("ecc log", project), + status_cmd=disclosure_cmd("ecc status", project, run_id), + metrics=disclosure_cmd("ecc metrics", project, run_id), + log=disclosure_cmd("ecc log", project, run_id), )) for step in _safe_steps(flow_data): @@ -78,17 +89,18 @@ def build_status_lines(run_dir: str, project: str | None = None) -> tuple[list[s tool=step.get("tool", ""), status=normalize_state(step.get("state", "")), runtime=step.get("runtime", "") or None, - metrics=disclosure_cmd(f"ecc metrics {step_token}", project), - log=disclosure_cmd(f"ecc log {step_token} --errors", project), + metrics=disclosure_cmd(f"ecc metrics {step_token}", project, run_id), + log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), )) return lines, 0 -def build_status_json(run_dir: str) -> tuple[dict, int]: +def build_status_json(run_dir: str, run_id: str | None = None) -> tuple[dict, int]: flow_data = read_flow_json(run_dir) + display_run = run_id or "default" if flow_data is None: - return {"run": "default", "status": "missing", "workspace": run_dir}, 1 + return {"run": display_run, "status": "missing", "workspace": run_dir}, 1 run_status = get_run_status(flow_data) steps = [] @@ -100,16 +112,17 @@ def build_status_json(run_dir: str) -> tuple[dict, int]: "runtime": step.get("runtime", ""), }) - return {"run": "default", "status": run_status, "workspace": run_dir, "steps": steps}, 0 + return {"run": display_run, "status": run_status, "workspace": run_dir, "steps": steps}, 0 -def build_status_jsonl(run_dir: str) -> tuple[list[dict], int]: +def build_status_jsonl(run_dir: str, run_id: str | None = None) -> tuple[list[dict], int]: flow_data = read_flow_json(run_dir) + display_run = run_id or "default" if flow_data is None: - return [{"run": "default", "status": "missing", "workspace": run_dir}], 1 + return [{"run": display_run, "status": "missing", "workspace": run_dir}], 1 run_status = get_run_status(flow_data) - objects = [{"kind": "run", "run": "default", "status": run_status, "workspace": run_dir}] + objects = [{"kind": "run", "run": display_run, "status": run_status, "workspace": run_dir}] for step in _safe_steps(flow_data): objects.append({ @@ -178,7 +191,8 @@ def read_log_file(path: str) -> list[str]: def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, - project: str | None = None) -> tuple[list[str], int]: + project: str | None = None, + run_id: str | None = None) -> tuple[list[str], int]: from chipcompiler.cli.output import format_line if step_token is None: @@ -188,7 +202,7 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, for lf in global_logs: lines.append(format_line( log=os.path.relpath(lf, run_dir), - inspect=disclosure_cmd("ecc log", project), + inspect=disclosure_cmd("ecc log", project, run_id), )) step_dirs = discover_step_dirs(run_dir) @@ -198,7 +212,7 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, lines.append(format_line( step=token, log=os.path.relpath(lf, run_dir), - inspect=disclosure_cmd(f"ecc log {token} --errors", project), + inspect=disclosure_cmd(f"ecc log {token} --errors", project, run_id), )) if not lines: @@ -215,7 +229,7 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, return [format_line( step=step_token, status="unknown_step", - inspect=disclosure_cmd("ecc status", project), + inspect=disclosure_cmd("ecc status", project, run_id), )], 1 log_files = discover_logs(run_dir, step_token) @@ -223,7 +237,7 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, return [format_line( step=step_token, log_status="missing", - log=disclosure_cmd(f"ecc log {step_token} --errors", project), + log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), )], 1 matched_lines = [] @@ -237,7 +251,7 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, return [format_line( step=step_token, log_status="no_matching_lines", - log=disclosure_cmd(f"ecc log {step_token}", project), + log=disclosure_cmd(f"ecc log {step_token}", project, run_id), )], 0 result = [] @@ -246,13 +260,14 @@ def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, step=step_token, source=os.path.relpath(lf, run_dir), line=line, - log=disclosure_cmd(f"ecc log {step_token} --errors", project), + log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), )) return result, 0 def build_log_jsonl(run_dir: str, step_token: str | None, errors_only: bool, - project: str | None = None) -> tuple[list[dict], int]: + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: if step_token is None: objects = [] for lf in discover_logs(run_dir): @@ -319,7 +334,8 @@ def read_metrics(path: str) -> dict: def build_metrics_lines(run_dir: str, step_token: str | None = None, - project: str | None = None) -> tuple[list[str], int]: + project: str | None = None, + run_id: str | None = None) -> tuple[list[str], int]: from chipcompiler.cli.output import format_line metrics_files = discover_metrics(run_dir, step_token) @@ -335,17 +351,17 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, f"{_internal_from_token(step_token)}_metrics.json"), run_dir, ), - log=disclosure_cmd(f"ecc log {step_token} --errors", project), + log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), )], 1 return [format_line( step=step_token, status="unknown_step", - inspect=disclosure_cmd("ecc status", project), + inspect=disclosure_cmd("ecc status", project, run_id), )], 1 return [format_line( metrics_status="none", workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project), + status_cmd=disclosure_cmd("ecc status", project, run_id), )], 0 lines = [] @@ -360,14 +376,15 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, step=token, value=value, source=os.path.relpath(path, run_dir), - inspect=disclosure_cmd(f"ecc metrics {token} --json", project), + inspect=disclosure_cmd(f"ecc metrics {token} --json", project, run_id), )) return lines, 0 def _collect_metrics(run_dir: str, step_token: str | None, - project: str | None) -> tuple[list[dict], int]: - err = _check_requested_step(run_dir, step_token, project) + project: str | None, + run_id: str | None = None) -> tuple[list[dict], int]: + err = _check_requested_step(run_dir, step_token, project, run_id) if err is not None: return [err], 1 @@ -386,20 +403,23 @@ def _collect_metrics(run_dir: str, step_token: str | None, def build_metrics_json(run_dir: str, step_token: str | None = None, - project: str | None = None) -> tuple[dict, int]: - items, rc = _collect_metrics(run_dir, step_token, project) + project: str | None = None, + run_id: str | None = None) -> tuple[dict, int]: + items, rc = _collect_metrics(run_dir, step_token, project, run_id) if rc != 0: return items[0], 1 return {"metrics": items}, 0 def build_metrics_jsonl(run_dir: str, step_token: str | None = None, - project: str | None = None) -> tuple[list[dict], int]: - return _collect_metrics(run_dir, step_token, project) + project: str | None = None, + run_id: str | None = None) -> tuple[list[dict], int]: + return _collect_metrics(run_dir, step_token, project, run_id) def _check_requested_step(run_dir: str, step_token: str | None, - project: str | None = None) -> dict | None: + project: str | None = None, + run_id: str | None = None) -> dict | None: if step_token is None: return None step_dirs = discover_step_dirs(run_dir) @@ -410,7 +430,7 @@ def _check_requested_step(run_dir: str, step_token: str | None, return { "status": "missing", "metric_step": step_token, - "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project), + "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), } return None diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 9c5a6c70..7cf4a7be 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -37,6 +37,8 @@ def build_parser() -> argparse.ArgumentParser: _add_project_arg(status_parser) status_parser.add_argument("--json", action="store_true", help="JSON output") status_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + status_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") # ecc log log_parser = subparsers.add_parser("log", help="Inspect step logs") @@ -44,6 +46,8 @@ def build_parser() -> argparse.ArgumentParser: log_parser.add_argument("step", nargs="?", default=None, help="Step name") log_parser.add_argument("--errors", action="store_true", help="Filter error lines") log_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + log_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") # ecc metrics metrics_parser = subparsers.add_parser("metrics", help="Show step metrics") @@ -51,6 +55,37 @@ def build_parser() -> argparse.ArgumentParser: metrics_parser.add_argument("step", nargs="?", default=None, help="Step name") metrics_parser.add_argument("--json", action="store_true", help="JSON output") metrics_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + metrics_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc artifacts + artifacts_parser = subparsers.add_parser("artifacts", help="List generated files") + _add_project_arg(artifacts_parser) + artifacts_parser.add_argument("step", nargs="?", default=None, help="Step name") + artifacts_parser.add_argument("--json", action="store_true", help="JSON output") + artifacts_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + artifacts_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc config + config_parser = subparsers.add_parser("config", help="Show configuration") + _add_project_arg(config_parser) + config_parser.add_argument("step", nargs="?", default=None, help="Step name") + config_parser.add_argument("--resolved", action="store_true", required=True, + help="Show resolved configuration") + config_parser.add_argument("--json", action="store_true", help="JSON output") + config_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + config_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") + + # ecc diagnose + diagnose_parser = subparsers.add_parser("diagnose", help="Show run diagnostics") + _add_project_arg(diagnose_parser) + diagnose_parser.add_argument("step", nargs="?", default=None, help="Step name") + diagnose_parser.add_argument("--json", action="store_true", help="JSON output") + diagnose_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + diagnose_parser.add_argument("--run-id", default=None, dest="run_id", + help="Run workspace selector") return parser @@ -84,6 +119,12 @@ def run(argv: Sequence[str] | None = None) -> int: return _cmd_log(args, project_dir, project) case "metrics": return _cmd_metrics(args, project_dir, project) + case "artifacts": + return _cmd_artifacts(args, project_dir, project) + case "config": + return _cmd_config(args, project_dir, project) + case "diagnose": + return _cmd_diagnose(args, project_dir, project) case _: parser.print_help() return 1 @@ -144,19 +185,19 @@ def _cmd_status(args, project_dir: str, project: str | None) -> int: from chipcompiler.cli.inspect import build_status_json, build_status_jsonl, build_status_lines from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir = _run_dir(project_dir) + run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) if getattr(args, "jsonl", False): - objects, rc = build_status_jsonl(run_dir) + objects, rc = build_status_jsonl(run_dir, run_id) emit_jsonl(objects) return rc if getattr(args, "json", False): - obj, rc = build_status_json(run_dir) + obj, rc = build_status_json(run_dir, run_id) emit_json(obj) return rc - lines, rc = build_status_lines(run_dir, project) + lines, rc = build_status_lines(run_dir, project, run_id) emit_text(lines) return rc @@ -165,14 +206,14 @@ def _cmd_log(args, project_dir: str, project: str | None) -> int: from chipcompiler.cli.inspect import build_log_jsonl, build_log_lines from chipcompiler.cli.output import emit_jsonl, emit_text - run_dir = _run_dir(project_dir) + run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) if getattr(args, "jsonl", False): - objects, rc = build_log_jsonl(run_dir, args.step, args.errors, project) + objects, rc = build_log_jsonl(run_dir, args.step, args.errors, project, run_id) emit_jsonl(objects) return rc - lines, rc = build_log_lines(run_dir, args.step, args.errors, project) + lines, rc = build_log_lines(run_dir, args.step, args.errors, project, run_id) emit_text(lines) return rc @@ -185,23 +226,112 @@ def _cmd_metrics(args, project_dir: str, project: str | None) -> int: ) from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir = _run_dir(project_dir) + run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) if getattr(args, "jsonl", False): - objects, rc = build_metrics_jsonl(run_dir, args.step, project) + objects, rc = build_metrics_jsonl(run_dir, args.step, project, run_id) emit_jsonl(objects) return rc if getattr(args, "json", False): - obj, rc = build_metrics_json(run_dir, args.step, project) + obj, rc = build_metrics_json(run_dir, args.step, project, run_id) emit_json(obj) return rc - lines, rc = build_metrics_lines(run_dir, args.step, project) + lines, rc = build_metrics_lines(run_dir, args.step, project, run_id) emit_text(lines) return rc +def _cmd_artifacts(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.artifacts import ( + build_artifacts_json, + build_artifacts_jsonl, + build_artifacts_lines, + ) + from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text + + run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + + if getattr(args, "jsonl", False): + objects, rc = build_artifacts_jsonl(run_dir, args.step, project, run_id) + emit_jsonl(objects) + return rc + + if getattr(args, "json", False): + obj, rc = build_artifacts_json(run_dir, args.step, project, run_id) + emit_json(obj) + return rc + + lines, rc = build_artifacts_lines(run_dir, args.step, project, run_id) + emit_text(lines) + return rc + + +def _cmd_config(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.config_view import ( + build_config_json, + build_config_jsonl, + build_config_lines, + build_project_config_items, + build_step_config_items, + ) + from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text + + run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + + if args.step is not None: + items, rc = build_step_config_items(run_dir, args.step, project, run_id) + else: + items, rc = build_project_config_items(project_dir, run_dir, project, run_id) + + if getattr(args, "jsonl", False): + objects, rc = build_config_jsonl(items) + emit_jsonl(objects) + return rc + + if getattr(args, "json", False): + obj, rc = build_config_json(items) + emit_json(obj) + return rc + + lines, rc = build_config_lines(items, project, run_id) + if lines: + emit_text(lines) + return rc + + +def _cmd_diagnose(args, project_dir: str, project: str | None) -> int: + from chipcompiler.cli.diagnose import ( + build_diagnose_json, + build_diagnose_jsonl, + build_diagnose_lines, + ) + from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text + + run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + + if getattr(args, "jsonl", False): + objects, rc = build_diagnose_jsonl(run_dir, args.step, project, run_id) + emit_jsonl(objects) + return rc + + if getattr(args, "json", False): + obj, rc = build_diagnose_json(run_dir, args.step, project, run_id) + emit_json(obj) + return rc + + lines, rc = build_diagnose_lines(run_dir, args.step, project, run_id) + if lines: + emit_text(lines) + return rc + + +def _resolve_run(project_dir: str, run_id: str | None = None) -> tuple[str, str | None]: + from chipcompiler.cli.inspect import resolve_run_dir + return resolve_run_dir(project_dir, run_id) + + def _run_dir(project_dir: str) -> str: return os.path.join(project_dir, "runs", "default") diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index 9cfb2854..f0273376 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -19,10 +19,14 @@ def format_line(**fields) -> str: return " ".join(parts) -def disclosure_cmd(command: str, project: str | None = None) -> str: +def disclosure_cmd(command: str, project: str | None = None, + run_id: str | None = None) -> str: + parts = [command] if project: - return f"{command} --project {shlex.quote(project)}" - return command + parts.append(f"--project {shlex.quote(project)}") + if run_id: + parts.append(f"--run-id {shlex.quote(run_id)}") + return " ".join(parts) def emit_text(lines: list[str], file=None) -> None: diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py new file mode 100644 index 00000000..5c49c15b --- /dev/null +++ b/test/cli/test_cli_inspect.py @@ -0,0 +1,854 @@ +import json +import os + +import pytest + +from chipcompiler.cli import main as cli_main + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +def _create_flow_json(run_dir, steps=None): + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + if steps is None: + steps = [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + {"name": "Floorplan", "tool": "ecc", "state": "Success", "runtime": "0:00:03"}, + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ] + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": steps}, f) + + +def _create_step_dir(run_dir, step_name, tool, subdirs=None, files=None): + step_dir = os.path.join(run_dir, f"{step_name}_{tool}") + os.makedirs(step_dir, exist_ok=True) + if subdirs: + for sd in subdirs: + d = os.path.join(step_dir, sd) + os.makedirs(d, exist_ok=True) + if files: + for relpath, content in files.items(): + fpath = os.path.join(step_dir, relpath) + os.makedirs(os.path.dirname(fpath), exist_ok=True) + with open(fpath, "w") as f: + f.write(content) + return step_dir + + +def _has_disclosure(line: str) -> bool: + return '"ecc ' in line or "=ecc " in line + + +# =========================================================================== +# AC-1: Run-id resolution +# =========================================================================== + + +class TestRunIdResolution: + def test_status_default_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--run-id", "default", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "run=default" in out + + def test_status_simple_token_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_004") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--run-id", "run_004", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "run=run_004" in out + + def test_status_relative_path_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + + rc = cli_main.run( + ["status", "--run-id", "sweeps/sweep_001/run_004", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "run=sweeps/sweep_001/run_004" in out + + def test_status_absolute_path_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = tmp_path / "ecc-run-004" + _create_flow_json(str(run_dir)) + + rc = cli_main.run( + ["status", "--run-id", str(run_dir), "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "run=" in out + + def test_status_missing_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["status", "--run-id", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "status=missing" in out + + def test_log_preserves_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_005") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "Synthesis", "yosys", subdirs=["log"], + files={"log/synthesis.log": "Error: something failed\n"}) + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--run-id", "run_005", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "--run-id run_005" in out + + def test_metrics_preserves_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_006") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["analysis"], + files={"analysis/CTS_metrics.json": json.dumps({"Frequency [MHz]": 450.0})}) + + rc = cli_main.run( + ["metrics", "cts", "--run-id", "run_006", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "--run-id run_006" in out + + +# =========================================================================== +# AC-2: ecc artifacts +# =========================================================================== + + +class TestArtifacts: + def test_artifacts_all_steps(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output", "log"], + files={"output/design.def": "def content", + "log/cts.log": "log content"}) + + rc = cli_main.run(["artifacts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=cts" in out + assert "role=output" in out + assert "role=log" in out + + def test_artifacts_single_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=cts" in out + assert "role=output" in out + + def test_artifacts_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["artifacts", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "status=unknown_step" in out + + def test_artifacts_empty_known_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "artifacts_status=none" in out + + def test_artifacts_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "artifacts" in data + assert len(data["artifacts"]) > 0 + assert data["artifacts"][0]["kind"] == "artifact" + + def test_artifacts_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output", "log"], + files={"output/design.def": "def content", + "log/cts.log": "log content"}) + + rc = cli_main.run(["artifacts", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 2 + assert all(o["kind"] == "artifact" for o in objects) + + def test_artifacts_with_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run( + ["artifacts", "--run-id", "sweeps/sweep_001/run_004", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "step=cts" in out + + def test_artifacts_derives_roles_from_dirs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["config", "output", "report", "log", "analysis"], + files={"config/cts_config.json": "{}", + "output/design.def": "def", + "report/timing.rpt": "rpt", + "log/cts.log": "log", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["artifacts", "cts", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + roles = {a["role"] for a in data["artifacts"]} + assert roles == {"config", "output", "report", "log", "analysis"} + + +# =========================================================================== +# AC-3: ecc config --resolved (project level) +# =========================================================================== + + +class TestConfigResolved: + def test_config_resolved_project(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "config=design.name" in out + assert "scope=project" in out + assert "config=pdk.name" in out + assert "config=run_dir" in out + + def test_config_resolved_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "config" in data + keys = [item["key"] for item in data["config"]] + assert "design.name" in keys + assert "pdk.name" in keys + assert "run_dir" in keys + + def test_config_resolved_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + keys = [o["key"] for o in objects] + assert "design.name" in keys + + def test_config_resolved_pdk_root_from_env(self, tmp_path, capsys, monkeypatch): + pdk_root = tmp_path / "ics55_env" + pdk_root.mkdir() + monkeypatch.setenv("CHIPCOMPILER_ICS55_PDK_ROOT", str(pdk_root)) + + project_dir = _create_valid_project(tmp_path, pdk_root="") + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + pdk_item = next(i for i in data["config"] if i["key"] == "pdk.root") + assert pdk_item["source"] == "env" + + def test_config_resolved_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run( + ["config", "--resolved", "--run-id", "sweeps/sweep_001/run_004", + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + run_item = next(i for i in data["config"] if i["key"] == "run_dir") + assert "sweep_001" in run_item["value"] or "sweep_001" in run_item.get("resolved", "") + + def test_config_missing_config(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_config_requires_resolved(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + with pytest.raises(SystemExit): + cli_main.run(["config", "--project", project_dir]) + + +# =========================================================================== +# AC-4: ecc config --resolved +# =========================================================================== + + +class TestConfigStepResolved: + def test_config_step_lists_files(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_default_config.json": "{}", + "config/run.tcl": "echo hi"}) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=cts" in out + assert "scope=step" in out + assert "cts_default_config.json" in out + + def test_config_step_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "config" in data + assert all(item["scope"] == "step" for item in data["config"]) + + def test_config_step_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["config", "nonexistent", "--resolved", "--project", project_dir]) + assert rc == 1 + + def test_config_step_no_config_files(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + + +# =========================================================================== +# AC-5: ecc diagnose +# =========================================================================== + + +class TestDiagnose: + def test_diagnose_missing_run(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=missing_run" in out + assert "severity=error" in out + + def test_diagnose_invalid_flow_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + f.write("NOT VALID JSON{{{") + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=invalid_flow_json" in out + + def test_diagnose_failed_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "analysis"], + files={"log/cts.log": "Error: failed\n", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=failed_step" in out + assert "severity=error" in out + + def test_diagnose_ongoing_step_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Ongoing", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "running\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=ongoing_step" in out + assert "severity=warning" in out + + def test_diagnose_unstarted_step_info(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Unstart", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=unstarted_step" in out + assert "severity=info" in out + + def test_diagnose_log_errors_count(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "Error: bad thing\nError: other bad\nok line\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=log_errors" in out + assert "count=2" in out + + def test_diagnose_missing_metrics_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=missing_metrics" in out + assert "severity=warning" in out + + def test_diagnose_missing_artifacts_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "analysis", "config"], + files={"log/cts.log": "ok\n", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + # Remove investigation role dirs to trigger missing_artifacts + import shutil + shutil.rmtree(os.path.join(run_dir, "CTS_ecc", "analysis")) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=missing_artifacts" in out + assert "severity=warning" in out + + def test_diagnose_config_unavailable_info(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log", "output", "analysis"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=config_unavailable" in out + assert "severity=info" in out + + def test_diagnose_clean_run(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": json.dumps({"Frequency [MHz]": 450.0}), + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "diagnose=clean" in out + + def test_diagnose_step_filter(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "Synthesis", "yosys", subdirs=["output", "log", "analysis", "config"], + files={"output/synth.v": "verilog", + "log/synthesis.log": "ok\n", + "analysis/Synthesis_metrics.json": "{}", + "config/config.json": "{}"}) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=failed_step" in out + assert "step=cts" in out + assert "step=synthesis" not in out + + def test_diagnose_unknown_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["diagnose", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=unknown_step" in out + + def test_diagnose_no_repair_suggestions(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "suggest" not in out.lower() + assert "fix" not in out.lower() + assert "recommend" not in out.lower() + + def test_diagnose_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert "issues" in data + assert any(i["issue"] == "failed_step" for i in data["issues"]) + + def test_diagnose_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--jsonl", "--project", project_dir]) + assert rc == 1 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert any(o["issue"] == "failed_step" for o in objects) + + def test_diagnose_with_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_007") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run( + ["diagnose", "--run-id", "run_007", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "diagnose=clean" in out + + +# =========================================================================== +# AC-6: Diagnose exit codes +# =========================================================================== + + +class TestDiagnoseExitCodes: + def test_error_issue_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: failed\n"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + + def test_warning_only_returns_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Ongoing", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "running\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + + def test_clean_run_returns_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + + def test_failed_step_not_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc") + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc != 0 + + +# =========================================================================== +# AC-7: Disclosure commands in Phase 2 output +# =========================================================================== + + +class TestDisclosure: + def test_artifacts_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_config_resolved_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_diagnose_lines_have_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + def test_phase2_disclosure_preserves_run_id(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "run_008") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["log"], + files={"log/cts.log": "Error: fail\n"}) + + rc = cli_main.run( + ["diagnose", "--run-id", "run_008", "--project", project_dir] + ) + assert rc == 1 + out = capsys.readouterr().out + assert "--run-id run_008" in out + + def test_artifacts_disclosure_preserves_project(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert f"--project {project_dir}" in out + + +# =========================================================================== +# AC-8: Read-only and CLI-local +# =========================================================================== + + +class TestReadOnly: + def test_artifacts_does_not_modify_files(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "original"}) + + before_mtime = os.path.getmtime( + os.path.join(run_dir, "CTS_ecc", "output", "design.def") + ) + + rc = cli_main.run(["artifacts", "--project", project_dir]) + assert rc == 0 + + after_mtime = os.path.getmtime( + os.path.join(run_dir, "CTS_ecc", "output", "design.def") + ) + assert before_mtime == after_mtime + + def test_no_persistent_metadata_files(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + cli_main.run(["artifacts", "--project", project_dir]) + cli_main.run(["config", "--resolved", "--project", project_dir]) + cli_main.run(["diagnose", "--project", project_dir]) + + assert not os.path.exists(os.path.join(project_dir, "issues.json")) + assert not os.path.exists(os.path.join(project_dir, "artifacts.json")) + assert not os.path.exists(os.path.join(project_dir, "resolved_config.json")) + assert not os.path.exists(os.path.join(run_dir, "issues.json")) + assert not os.path.exists(os.path.join(run_dir, "artifacts.json")) From 08df9caff083845282bc3366f8cb1c17c562cdc0 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 10:29:02 +0800 Subject: [PATCH 026/104] fix(cli): address Codex review findings for artifacts, config, diagnose, and run-id - Fix resolve_run_dir: preserve explicit --run-id default in disclosures, handle multi-segment project-relative paths (sweeps/sweep_001/run_004) - Fix artifact/config path rendering: use project_dir as base instead of deriving from run_dir, correct for nested and absolute runs - Fix config --resolved: validate semantically invalid ecc.toml (missing required fields, unsupported pdk/preset), not just TOML parse errors - Fix empty step-config: emit config_status=none sentinel with step token and artifacts disclosure command in text/JSON/JSONL - Fix diagnose step identity: use union of flow.json steps and discovered step dirs, so flow-only steps without directories are diagnosed properly - Fix config-role artifact disclosure: all artifact roles now include at least one disclosure command in text output - Add 10 regression tests covering all Codex findings --- chipcompiler/cli/artifacts.py | 22 +++-- chipcompiler/cli/config_view.py | 39 +++++++- chipcompiler/cli/diagnose.py | 12 ++- chipcompiler/cli/inspect.py | 8 +- chipcompiler/cli/main.py | 8 +- test/cli/test_cli_inspect.py | 162 ++++++++++++++++++++++++++++++++ 6 files changed, 234 insertions(+), 17 deletions(-) diff --git a/chipcompiler/cli/artifacts.py b/chipcompiler/cli/artifacts.py index 9fd56364..319db680 100644 --- a/chipcompiler/cli/artifacts.py +++ b/chipcompiler/cli/artifacts.py @@ -11,10 +11,12 @@ def _role_from_dirname(dirname: str) -> str: def discover_artifacts(run_dir: str, step_token: str | None = None, project: str | None = None, - run_id: str | None = None) -> tuple[list[dict], int]: + run_id: str | None = None, + project_dir: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.inspect import discover_step_dirs from chipcompiler.cli.output import format_line + base_dir = project_dir or os.path.dirname(os.path.dirname(run_dir)) step_dirs = discover_step_dirs(run_dir) if step_token is not None: @@ -41,7 +43,7 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, "step": token, "role": role, "run": run_id or "default", - "path": os.path.relpath(fpath, os.path.dirname(os.path.dirname(run_dir))), + "path": os.path.relpath(fpath, base_dir), "exists": True, "inspect_cmd": disclosure_cmd(f"ecc artifacts {token} --json", project, run_id), }) @@ -57,10 +59,11 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, def build_artifacts_lines(run_dir: str, step_token: str | None = None, project: str | None = None, - run_id: str | None = None) -> tuple[list[str], int]: + run_id: str | None = None, + project_dir: str | None = None) -> tuple[list[str], int]: from chipcompiler.cli.output import format_line - artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id) + artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id, project_dir) if rc != 0: if artifacts and artifacts[0].get("status") == "unknown_step": s = artifacts[0]["step"] @@ -92,6 +95,7 @@ def build_artifacts_lines(run_dir: str, step_token: str | None = None, "step": a["step"], "role": a["role"], "path": a["path"], + "inspect": disclosure_cmd(f"ecc artifacts {a['step']} --json", project, run_id), } if a["role"] == "analysis": line_fields["metrics"] = disclosure_cmd(f"ecc metrics {a['step']}", project, run_id) @@ -105,8 +109,9 @@ def build_artifacts_lines(run_dir: str, step_token: str | None = None, def build_artifacts_json(run_dir: str, step_token: str | None = None, project: str | None = None, - run_id: str | None = None) -> tuple[dict, int]: - artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id) + run_id: str | None = None, + project_dir: str | None = None) -> tuple[dict, int]: + artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id, project_dir) if rc != 0: if artifacts and artifacts[0].get("status") == "unknown_step": return {"status": "unknown_step", "step": artifacts[0]["step"]}, 1 @@ -122,8 +127,9 @@ def build_artifacts_json(run_dir: str, step_token: str | None = None, def build_artifacts_jsonl(run_dir: str, step_token: str | None = None, project: str | None = None, - run_id: str | None = None) -> tuple[list[dict], int]: - artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id) + run_id: str | None = None, + project_dir: str | None = None) -> tuple[list[dict], int]: + artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id, project_dir) if rc != 0: return artifacts, rc diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index b0d14d7b..4f96ec65 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -7,6 +7,8 @@ def build_project_config_items(project_dir: str, run_dir: str, project: str | None = None, run_id: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.config import ( + SUPPORTED_FLOW_PRESETS, + SUPPORTED_PDK_NAMES, find_config_path, load_project_config, resolve_pdk_root, @@ -21,6 +23,15 @@ def build_project_config_items(project_dir: str, run_dir: str, if getattr(cfg, "_toml_error", None): return [{"kind": "error", "status": "invalid_config"}], 1 + if not cfg.design_name or not cfg.design_top or not cfg.flow_preset: + return [{"kind": "error", "status": "invalid_config"}], 1 + + if cfg.pdk_name and cfg.pdk_name not in SUPPORTED_PDK_NAMES: + return [{"kind": "error", "status": "invalid_config"}], 1 + + if cfg.flow_preset and cfg.flow_preset not in SUPPORTED_FLOW_PRESETS: + return [{"kind": "error", "status": "invalid_config"}], 1 + pdk_root = resolve_pdk_root(cfg) display_run = run_id or "default" @@ -69,11 +80,12 @@ def build_project_config_items(project_dir: str, run_dir: str, }) # Run directory + run_dir_value = os.path.relpath(run_dir, project_dir) if not os.path.isabs(run_dir) else run_dir items.append({ "kind": "config", "scope": "project", "key": "run_dir", - "value": os.path.relpath(run_dir, project_dir) if not os.path.isabs(run_dir) else run_dir, + "value": run_dir_value, "resolved": os.path.abspath(run_dir), "source": "resolved", }) @@ -83,9 +95,11 @@ def build_project_config_items(project_dir: str, run_dir: str, def build_step_config_items(run_dir: str, step_token: str | None, project: str | None = None, - run_id: str | None = None) -> tuple[list[dict], int]: + run_id: str | None = None, + project_dir: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.inspect import discover_step_dirs + base_dir = project_dir or os.path.dirname(os.path.dirname(run_dir)) step_dirs = discover_step_dirs(run_dir) if step_token not in step_dirs: @@ -105,11 +119,16 @@ def build_step_config_items(run_dir: str, step_token: str | None, "step": step_token, "role": "config", "run": display_run, - "path": os.path.relpath(fpath, os.path.dirname(os.path.dirname(run_dir))), + "path": os.path.relpath(fpath, base_dir), "source": "step_config", "inspect_cmd": disclosure_cmd(f"ecc artifacts {step_token} --json", project, run_id), }) + if not items: + return [{"kind": "config", "scope": "step", "step": step_token, + "config_status": "none", + "artifacts": disclosure_cmd(f"ecc artifacts {step_token}", project, run_id)}], 0 + return items, 0 @@ -120,6 +139,14 @@ def build_config_lines(items: list[dict], project: str | None = None, if not items: return [], 0 + if items[0].get("config_status") == "none": + s = items[0] + return [format_line( + step=s["step"], + config_status="none", + artifacts=s.get("artifacts"), + )], 0 + if items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): if items[0].get("status") == "unknown_step": return [format_line( @@ -167,6 +194,9 @@ def build_config_json(items: list[dict]) -> tuple[dict, int]: if items and items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): return items[0], 1 + if items and items[0].get("config_status") == "none": + return items[0], 0 + if not items: return {"config_status": "none"}, 0 @@ -177,6 +207,9 @@ def build_config_jsonl(items: list[dict]) -> tuple[list[dict], int]: if items and items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): return items, 1 + if items and items[0].get("config_status") == "none": + return items, 0 + if not items: return [{"config_status": "none"}], 0 diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index 0124c8ee..3f98c2eb 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -93,8 +93,11 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, steps = _safe_steps(flow_data) step_dirs = discover_step_dirs(run_dir) + flow_tokens = {normalize_step_name(s.get("name", "")) for s in steps} + known_tokens = flow_tokens | set(step_dirs.keys()) + if step_token is not None: - if step_token not in step_dirs: + if step_token not in known_tokens: issues.append(_make_issue("unknown_step", "error", display_run, step=step_token, project=project, run_id=run_id)) return issues, 1 @@ -135,6 +138,13 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, if not _has_config_files(step_dirs[token]): issues.append(_make_issue("config_unavailable", "info", display_run, step=token, project=project, run_id=run_id)) + else: + issues.append(_make_issue("missing_metrics", "warning", display_run, + step=token, project=project, run_id=run_id)) + issues.append(_make_issue("missing_artifacts", "warning", display_run, + step=token, project=project, run_id=run_id)) + issues.append(_make_issue("config_unavailable", "info", display_run, + step=token, project=project, run_id=run_id)) return issues, 0 diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 02874e43..5eda4495 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -11,12 +11,18 @@ def resolve_run_dir(project_dir: str, run_id: str | None = None) -> tuple[str, str | None]: - if not run_id or run_id == "default": + if not run_id: return os.path.join(project_dir, "runs", "default"), None + if run_id == "default": + return os.path.join(project_dir, "runs", "default"), "default" + if os.path.isabs(run_id): return run_id, run_id + if os.sep in run_id or "/" in run_id: + return os.path.join(project_dir, "runs", run_id), run_id + return os.path.join(project_dir, "runs", run_id), run_id diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 7cf4a7be..2118f15d 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -254,16 +254,16 @@ def _cmd_artifacts(args, project_dir: str, project: str | None) -> int: run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) if getattr(args, "jsonl", False): - objects, rc = build_artifacts_jsonl(run_dir, args.step, project, run_id) + objects, rc = build_artifacts_jsonl(run_dir, args.step, project, run_id, project_dir) emit_jsonl(objects) return rc if getattr(args, "json", False): - obj, rc = build_artifacts_json(run_dir, args.step, project, run_id) + obj, rc = build_artifacts_json(run_dir, args.step, project, run_id, project_dir) emit_json(obj) return rc - lines, rc = build_artifacts_lines(run_dir, args.step, project, run_id) + lines, rc = build_artifacts_lines(run_dir, args.step, project, run_id, project_dir) emit_text(lines) return rc @@ -281,7 +281,7 @@ def _cmd_config(args, project_dir: str, project: str | None) -> int: run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) if args.step is not None: - items, rc = build_step_config_items(run_dir, args.step, project, run_id) + items, rc = build_step_config_items(run_dir, args.step, project, run_id, project_dir) else: items, rc = build_project_config_items(project_dir, run_dir, project, run_id) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index 5c49c15b..ef5cfa1f 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -852,3 +852,165 @@ def test_no_persistent_metadata_files(self, tmp_path, capsys): assert not os.path.exists(os.path.join(project_dir, "resolved_config.json")) assert not os.path.exists(os.path.join(run_dir, "issues.json")) assert not os.path.exists(os.path.join(run_dir, "artifacts.json")) + + +# =========================================================================== +# Regression tests for Codex review findings (Round 1) +# =========================================================================== + + +class TestRunIdDisclosure: + def test_explicit_default_preserved_in_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--run-id", "default", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--run-id default" in out + + def test_project_relative_run_id_resolves(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + + rc = cli_main.run( + ["status", "--run-id", "sweeps/sweep_001/run_004", "--project", project_dir] + ) + assert rc == 0 + out = capsys.readouterr().out + assert "run=sweeps/sweep_001/run_004" in out + + +class TestArtifactPaths: + def test_nested_run_artifact_paths(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], + files={"output/design.def": "def content"}) + + rc = cli_main.run( + ["artifacts", "--run-id", "sweeps/sweep_001/run_004", + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert len(data["artifacts"]) == 1 + path = data["artifacts"][0]["path"] + assert path.startswith("runs/") + + def test_nested_run_step_config_paths(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run( + ["config", "cts", "--resolved", "--run-id", "sweeps/sweep_001/run_004", + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "config" in data + path = data["config"][0]["path"] + assert path.startswith("runs/") + + +class TestConfigValidation: + def test_semantically_invalid_toml_returns_nonzero(self, tmp_path, capsys): + project_dir = tmp_path / "bad_project" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text('''[design] +name = "" +top = "" +rtl = [] +clock_port = "" +frequency_mhz = 0 + +[pdk] +name = "unsupported" +root = "" + +[flow] +preset = "unknown" +run = "default" +''') + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + +class TestEmptyStepConfigSentinel: + def test_step_no_config_emits_sentinel_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=cts" in out + assert "config_status=none" in out + assert "artifacts=" in out + + def test_step_no_config_emits_sentinel_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["step"] == "cts" + assert data["config_status"] == "none" + + +class TestDiagnoseFlowOnlySteps: + def test_flow_step_without_directory_emits_issues(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=failed_step" in out + assert "step=cts" in out + assert "issue=unknown_step" not in out + + def test_flow_step_without_dir_reports_missing_artifacts(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=missing_artifacts" in out + assert "issue=missing_metrics" in out + assert "issue=config_unavailable" in out + + +class TestConfigRoleDisclosure: + def test_config_artifact_has_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" From 413ab596626e8931fa242d341fafe88601ae5867 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 10:45:40 +0800 Subject: [PATCH 027/104] fix(cli): correct run-id resolution to project-root relative, use validate_project_config, fix run_dir.value - resolve_run_dir: multi-segment selectors (sweeps/sweep_001/run_004) now resolve from project root, not from runs/ - config_view: replace ad-hoc validation with validate_project_config() for full semantic checking (clock_port, frequency_mhz, rtl, flow.run) - config_view: run_dir.value now uses os.path.relpath unconditionally instead of checking isabs, giving project-relative paths like runs/default - config_view: remove run_id from ecc check disclosures (check has no --run-id) - tests: fix all multi-segment --run-id tests to create workspaces at /sweeps/... not /runs/sweeps/... - tests: assert exact run_dir.value instead of fuzzy matching - tests: add negative tests for unsupported flow.run, empty clock_port, zero frequency_mhz, empty rtl, and no-run-id in invalid config disclosure - tests: mock _validate_pdk_contents for all config --resolved tests --- chipcompiler/cli/config_view.py | 21 ++--- chipcompiler/cli/inspect.py | 2 +- test/cli/test_cli_inspect.py | 156 ++++++++++++++++++++++++++++---- 3 files changed, 146 insertions(+), 33 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 4f96ec65..d998a6cf 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -7,11 +7,10 @@ def build_project_config_items(project_dir: str, run_dir: str, project: str | None = None, run_id: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.config import ( - SUPPORTED_FLOW_PRESETS, - SUPPORTED_PDK_NAMES, find_config_path, load_project_config, resolve_pdk_root, + validate_project_config, ) from chipcompiler.cli.output import format_line @@ -23,13 +22,8 @@ def build_project_config_items(project_dir: str, run_dir: str, if getattr(cfg, "_toml_error", None): return [{"kind": "error", "status": "invalid_config"}], 1 - if not cfg.design_name or not cfg.design_top or not cfg.flow_preset: - return [{"kind": "error", "status": "invalid_config"}], 1 - - if cfg.pdk_name and cfg.pdk_name not in SUPPORTED_PDK_NAMES: - return [{"kind": "error", "status": "invalid_config"}], 1 - - if cfg.flow_preset and cfg.flow_preset not in SUPPORTED_FLOW_PRESETS: + errors = validate_project_config(cfg) + if errors: return [{"kind": "error", "status": "invalid_config"}], 1 pdk_root = resolve_pdk_root(cfg) @@ -80,7 +74,10 @@ def build_project_config_items(project_dir: str, run_dir: str, }) # Run directory - run_dir_value = os.path.relpath(run_dir, project_dir) if not os.path.isabs(run_dir) else run_dir + try: + run_dir_value = os.path.relpath(run_dir, project_dir) + except ValueError: + run_dir_value = run_dir items.append({ "kind": "config", "scope": "project", @@ -157,11 +154,11 @@ def build_config_lines(items: list[dict], project: str | None = None, if items[0].get("status") == "missing_config": return [format_line( status="missing_config", - inspect=disclosure_cmd("ecc check", project, run_id), + inspect=disclosure_cmd("ecc check", project), )], 1 return [format_line( status="invalid_config", - inspect=disclosure_cmd("ecc check", project, run_id), + inspect=disclosure_cmd("ecc check", project), )], 1 lines = [] diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 5eda4495..7525e95b 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -21,7 +21,7 @@ def resolve_run_dir(project_dir: str, run_id: str | None = None) -> tuple[str, s return run_id, run_id if os.sep in run_id or "/" in run_id: - return os.path.join(project_dir, "runs", run_id), run_id + return os.path.join(project_dir, run_id), run_id return os.path.join(project_dir, "runs", run_id), run_id diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index ef5cfa1f..177baaca 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -77,6 +77,13 @@ def _has_disclosure(line: str) -> bool: return '"ecc ' in line or "=ecc " in line +def _mock_pdk_validation(monkeypatch): + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + + # =========================================================================== # AC-1: Run-id resolution # =========================================================================== @@ -106,7 +113,7 @@ def test_status_simple_token_run_id(self, tmp_path, capsys): def test_status_relative_path_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") _create_flow_json(run_dir) rc = cli_main.run( @@ -252,7 +259,7 @@ def test_artifacts_jsonl(self, tmp_path, capsys): def test_artifacts_with_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") _create_flow_json(run_dir) _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], files={"output/design.def": "def content"}) @@ -289,7 +296,8 @@ def test_artifacts_derives_roles_from_dirs(self, tmp_path, capsys): class TestConfigResolved: - def test_config_resolved_project(self, tmp_path, capsys): + def test_config_resolved_project(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) rc = cli_main.run(["config", "--resolved", "--project", project_dir]) @@ -300,7 +308,8 @@ def test_config_resolved_project(self, tmp_path, capsys): assert "config=pdk.name" in out assert "config=run_dir" in out - def test_config_resolved_json(self, tmp_path, capsys): + def test_config_resolved_json(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) @@ -312,7 +321,18 @@ def test_config_resolved_json(self, tmp_path, capsys): assert "pdk.name" in keys assert "run_dir" in keys - def test_config_resolved_jsonl(self, tmp_path, capsys): + def test_config_resolved_default_run_dir_value(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + run_item = next(i for i in data["config"] if i["key"] == "run_dir") + assert run_item["value"] == "runs/default" + + def test_config_resolved_jsonl(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) rc = cli_main.run(["config", "--resolved", "--jsonl", "--project", project_dir]) @@ -322,6 +342,7 @@ def test_config_resolved_jsonl(self, tmp_path, capsys): assert "design.name" in keys def test_config_resolved_pdk_root_from_env(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) pdk_root = tmp_path / "ics55_env" pdk_root.mkdir() monkeypatch.setenv("CHIPCOMPILER_ICS55_PDK_ROOT", str(pdk_root)) @@ -334,7 +355,8 @@ def test_config_resolved_pdk_root_from_env(self, tmp_path, capsys, monkeypatch): pdk_item = next(i for i in data["config"] if i["key"] == "pdk.root") assert pdk_item["source"] == "env" - def test_config_resolved_run_id(self, tmp_path, capsys): + def test_config_resolved_run_id(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) rc = cli_main.run( @@ -344,7 +366,7 @@ def test_config_resolved_run_id(self, tmp_path, capsys): assert rc == 0 data = json.loads(capsys.readouterr().out) run_item = next(i for i in data["config"] if i["key"] == "run_dir") - assert "sweep_001" in run_item["value"] or "sweep_001" in run_item.get("resolved", "") + assert run_item["value"] == "sweeps/sweep_001/run_004" def test_config_missing_config(self, tmp_path, capsys): project_dir = tmp_path / "empty_project" @@ -366,7 +388,8 @@ def test_config_requires_resolved(self, tmp_path, capsys): class TestConfigStepResolved: - def test_config_step_lists_files(self, tmp_path, capsys): + def test_config_step_lists_files(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) run_dir = os.path.join(project_dir, "runs", "default") _create_flow_json(run_dir) @@ -381,7 +404,8 @@ def test_config_step_lists_files(self, tmp_path, capsys): assert "scope=step" in out assert "cts_default_config.json" in out - def test_config_step_json(self, tmp_path, capsys): + def test_config_step_json(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) run_dir = os.path.join(project_dir, "runs", "default") _create_flow_json(run_dir) @@ -762,7 +786,8 @@ def test_artifacts_lines_have_disclosure(self, tmp_path, capsys): if line.strip(): assert _has_disclosure(line), f"Missing disclosure in: {line}" - def test_config_resolved_lines_have_disclosure(self, tmp_path, capsys): + def test_config_resolved_lines_have_disclosure(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) rc = cli_main.run(["config", "--resolved", "--project", project_dir]) @@ -836,7 +861,8 @@ def test_artifacts_does_not_modify_files(self, tmp_path, capsys): ) assert before_mtime == after_mtime - def test_no_persistent_metadata_files(self, tmp_path, capsys): + def test_no_persistent_metadata_files(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = _create_valid_project(tmp_path) run_dir = os.path.join(project_dir, "runs", "default") _create_flow_json(run_dir) @@ -872,7 +898,7 @@ def test_explicit_default_preserved_in_disclosure(self, tmp_path, capsys): def test_project_relative_run_id_resolves(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") _create_flow_json(run_dir) rc = cli_main.run( @@ -886,7 +912,7 @@ def test_project_relative_run_id_resolves(self, tmp_path, capsys): class TestArtifactPaths: def test_nested_run_artifact_paths(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") _create_flow_json(run_dir) _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"], files={"output/design.def": "def content"}) @@ -899,11 +925,11 @@ def test_nested_run_artifact_paths(self, tmp_path, capsys): data = json.loads(capsys.readouterr().out) assert len(data["artifacts"]) == 1 path = data["artifacts"][0]["path"] - assert path.startswith("runs/") + assert path.startswith("sweeps/") def test_nested_run_step_config_paths(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "sweeps", "sweep_001", "run_004") + run_dir = os.path.join(project_dir, "sweeps", "sweep_001", "run_004") _create_flow_json(run_dir) _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], files={"config/cts_config.json": "{}"}) @@ -916,12 +942,100 @@ def test_nested_run_step_config_paths(self, tmp_path, capsys): data = json.loads(capsys.readouterr().out) assert "config" in data path = data["config"][0]["path"] - assert path.startswith("runs/") + assert path.startswith("sweeps/") class TestConfigValidation: - def test_semantically_invalid_toml_returns_nonzero(self, tmp_path, capsys): - project_dir = tmp_path / "bad_project" + def test_invalid_unsupported_flow_run(self, tmp_path, capsys): + project_dir = tmp_path / "bad_flow_run" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text('''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/tmp/nonexistent" + +[flow] +preset = "rtl2gds" +run = "custom" +''') + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_invalid_empty_clock_port(self, tmp_path, capsys): + project_dir = tmp_path / "bad_clock" + project_dir.mkdir() + (project_dir / "rtl").mkdir() + (project_dir / "rtl" / "gcd.v").write_text("module gcd; endmodule") + (project_dir / "ecc.toml").write_text('''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/tmp/nonexistent" + +[flow] +preset = "rtl2gds" +run = "default" +''') + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_invalid_zero_frequency(self, tmp_path, capsys): + project_dir = tmp_path / "bad_freq" + project_dir.mkdir() + (project_dir / "rtl").mkdir() + (project_dir / "rtl" / "gcd.v").write_text("module gcd; endmodule") + (project_dir / "ecc.toml").write_text('''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 0 + +[pdk] +name = "ics55" +root = "/tmp/nonexistent" + +[flow] +preset = "rtl2gds" +run = "default" +''') + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_invalid_empty_rtl(self, tmp_path, capsys): + project_dir = tmp_path / "bad_rtl" + project_dir.mkdir() + (project_dir / "ecc.toml").write_text('''[design] +name = "gcd" +top = "gcd" +rtl = [] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/tmp/nonexistent" + +[flow] +preset = "rtl2gds" +run = "default" +''') + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + + def test_invalid_config_no_run_id_in_check_disclosure(self, tmp_path, capsys): + project_dir = tmp_path / "bad_no_runid" project_dir.mkdir() (project_dir / "ecc.toml").write_text('''[design] name = "" @@ -938,9 +1052,11 @@ def test_semantically_invalid_toml_returns_nonzero(self, tmp_path, capsys): preset = "unknown" run = "default" ''') - - rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + rc = cli_main.run(["config", "--resolved", "--run-id", "run_123", + "--project", str(project_dir)]) assert rc == 1 + out = capsys.readouterr().out + assert "--run-id" not in out class TestEmptyStepConfigSentinel: From 2e1e30f79ea1079081907145f42e0a8c8d58d4cd Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 11:10:26 +0800 Subject: [PATCH 028/104] fix(cli): issue-specific diagnose evidence, absolute run_dir.value, config JSON disclosure, clean output - diagnose: each issue kind now emits its own evidence command (log_errors -> ecc log, missing_metrics -> ecc metrics, missing_artifacts -> ecc artifacts, config_unavailable -> ecc config) - diagnose: clean output now includes status=clean, status_cmd, artifacts, config disclosure fields in text/JSON/JSONL - config_view: run_dir.value preserves absolute paths for external runs (detects ../../ relativization and falls back to absolute) - config_view: all project-level config items now include inspect_cmd so JSON/JSONL consumers get disclosure metadata - tests: add absolute external --run-id config test - tests: add issue-specific evidence assertions for all diagnose issue types - tests: add clean diagnose output assertions for text and JSON - tests: add project-level config JSON inspect_cmd assertion - tests: replace old config validation tests with isolated versions that keep PDK/RTL valid and change only the field under test --- chipcompiler/cli/config_view.py | 12 +- chipcompiler/cli/diagnose.py | 34 ++- test/cli/test_cli_inspect.py | 357 ++++++++++++++++++++++---------- 3 files changed, 288 insertions(+), 115 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index d998a6cf..a9eabb6d 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -40,6 +40,8 @@ def build_project_config_items(project_dir: str, run_dir: str, ("flow.run", cfg.flow_run, cfg.flow_run, "ecc.toml"), ] + inspect = disclosure_cmd("ecc config --resolved --json", project, run_id) + for key, value, resolved, source in entries: items.append({ "kind": "config", @@ -48,6 +50,7 @@ def build_project_config_items(project_dir: str, run_dir: str, "value": value, "resolved": resolved, "source": source, + "inspect_cmd": inspect, }) # RTL entries @@ -60,6 +63,7 @@ def build_project_config_items(project_dir: str, run_dir: str, "value": rtl, "resolved": rtl_resolved, "source": "ecc.toml", + "inspect_cmd": inspect, }) # PDK root with resolution @@ -71,13 +75,18 @@ def build_project_config_items(project_dir: str, run_dir: str, "value": cfg.pdk_root or "", "resolved": pdk_root, "source": pdk_source, + "inspect_cmd": inspect, }) # Run directory try: - run_dir_value = os.path.relpath(run_dir, project_dir) + run_dir_rel = os.path.relpath(run_dir, project_dir) except ValueError: + run_dir_rel = run_dir + if run_dir_rel.startswith(".."): run_dir_value = run_dir + else: + run_dir_value = run_dir_rel items.append({ "kind": "config", "scope": "project", @@ -85,6 +94,7 @@ def build_project_config_items(project_dir: str, run_dir: str, "value": run_dir_value, "resolved": os.path.abspath(run_dir), "source": "resolved", + "inspect_cmd": disclosure_cmd("ecc status", project, run_id), }) return items, 0 diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index 3f98c2eb..a90e7bdf 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -57,8 +57,20 @@ def _make_issue(issue: str, severity: str, run: str, if issue == "missing_run": obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) obj["run_cmd"] = disclosure_cmd("ecc run", project=project) + elif issue == "log_errors": + obj["evidence"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) + obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + elif issue == "missing_metrics": + obj["evidence"] = disclosure_cmd(f"ecc metrics {step} --json", **cmd_kwargs) + obj["log"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) + elif issue == "missing_artifacts": + obj["evidence"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) + obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) + elif issue == "config_unavailable": + obj["evidence"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) + obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) elif step: - obj["evidence"] = disclosure_cmd(f"ecc status", **cmd_kwargs) + obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) obj["log"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) @@ -166,9 +178,11 @@ def build_diagnose_lines(run_dir: str, step_token: str | None = None, if not issues: display_run = run_id or "default" return [format_line( - diagnose="clean", + status="clean", run=display_run, - evidence=disclosure_cmd("ecc status", project, run_id), + status_cmd=disclosure_cmd("ecc status", project, run_id), + artifacts=disclosure_cmd("ecc artifacts", project, run_id), + config=disclosure_cmd("ecc config --resolved", project, run_id), )], 0 lines = [] @@ -198,12 +212,22 @@ def build_diagnose_lines(run_dir: str, step_token: str | None = None, return lines, _exit_code(issues) +def _clean_object(run_id, project, run_id_val): + return { + "status": "clean", + "run": run_id or "default", + "status_cmd": disclosure_cmd("ecc status", project, run_id_val), + "artifacts": disclosure_cmd("ecc artifacts", project, run_id_val), + "config": disclosure_cmd("ecc config --resolved", project, run_id_val), + } + + def build_diagnose_json(run_dir: str, step_token: str | None = None, project: str | None = None, run_id: str | None = None) -> tuple[dict, int]: issues, _ = build_diagnose_issues(run_dir, step_token, project, run_id) if not issues: - return {"diagnose": "clean", "run": run_id or "default"}, 0 + return _clean_object(run_id, project, run_id), 0 return {"issues": issues}, _exit_code(issues) @@ -212,5 +236,5 @@ def build_diagnose_jsonl(run_dir: str, step_token: str | None = None, run_id: str | None = None) -> tuple[list[dict], int]: issues, _ = build_diagnose_issues(run_dir, step_token, project, run_id) if not issues: - return [{"diagnose": "clean", "run": run_id or "default"}], 0 + return [_clean_object(run_id, project, run_id)], 0 return issues, _exit_code(issues) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index 177baaca..a3c8a71f 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -604,7 +604,7 @@ def test_diagnose_clean_run(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "diagnose=clean" in out + assert "status=clean" in out def test_diagnose_step_filter(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -701,7 +701,7 @@ def test_diagnose_with_run_id(self, tmp_path, capsys): ) assert rc == 0 out = capsys.readouterr().out - assert "diagnose=clean" in out + assert "status=clean" in out # =========================================================================== @@ -945,10 +945,241 @@ def test_nested_run_step_config_paths(self, tmp_path, capsys): assert path.startswith("sweeps/") -class TestConfigValidation: - def test_invalid_unsupported_flow_run(self, tmp_path, capsys): - project_dir = tmp_path / "bad_flow_run" +class TestEmptyStepConfigSentinel: + def test_step_no_config_emits_sentinel_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=cts" in out + assert "config_status=none" in out + assert "artifacts=" in out + + def test_step_no_config_emits_sentinel_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) + + rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["step"] == "cts" + assert data["config_status"] == "none" + + +class TestDiagnoseFlowOnlySteps: + def test_flow_step_without_directory_emits_issues(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, + ]) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=failed_step" in out + assert "step=cts" in out + assert "issue=unknown_step" not in out + + def test_flow_step_without_dir_reports_missing_artifacts(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=missing_artifacts" in out + assert "issue=missing_metrics" in out + assert "issue=config_unavailable" in out + + +class TestConfigRoleDisclosure: + def test_config_artifact_has_disclosure(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], + files={"config/cts_config.json": "{}"}) + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + for line in out.strip().split("\n"): + if line.strip(): + assert _has_disclosure(line), f"Missing disclosure in: {line}" + + +# =========================================================================== +# Regression tests for Codex Round 2 findings (Round 3) +# =========================================================================== + + +class TestAbsoluteRunIdConfig: + def test_absolute_run_id_preserves_run_dir_value(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + external_run = tmp_path / "external_run" + _create_flow_json(str(external_run)) + + rc = cli_main.run( + ["config", "--resolved", "--run-id", str(external_run), + "--json", "--project", project_dir] + ) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + run_item = next(i for i in data["config"] if i["key"] == "run_dir") + assert run_item["value"] == str(external_run) + + +class TestDiagnoseIssueSpecificEvidence: + def test_log_errors_uses_log_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "Error: bad thing\nError: other\nok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + log_errors_line = [l for l in out.strip().split("\n") if "issue=log_errors" in l][0] + assert "ecc log cts --errors" in log_errors_line + + def test_missing_metrics_uses_metrics_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + metrics_line = [l for l in out.strip().split("\n") if "issue=missing_metrics" in l][0] + assert "ecc metrics cts --json" in metrics_line + + def test_missing_artifacts_uses_artifacts_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "config"], + files={"log/cts.log": "ok\n", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=missing_artifacts" in out + artifacts_lines = [l for l in out.strip().split("\n") if "issue=missing_artifacts" in l] + assert len(artifacts_lines) > 0 + assert "ecc artifacts cts" in artifacts_lines[0] + + def test_config_unavailable_uses_config_command(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + config_line = [l for l in out.strip().split("\n") if "issue=config_unavailable" in l][0] + assert "ecc config cts --resolved" in config_line + + +class TestCleanDiagnoseOutput: + def test_clean_has_status_and_disclosure_commands(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "status=clean" in out + assert "status_cmd=" in out + assert "artifacts=" in out + assert "config=" in out + + def test_clean_json_has_disclosure_metadata(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": "{}", + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["status"] == "clean" + assert "status_cmd" in data + assert "artifacts" in data + assert "config" in data + + +class TestConfigJsonDisclosure: + def test_project_config_json_has_inspect_cmd(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + for item in data["config"]: + assert "inspect_cmd" in item, f"Missing inspect_cmd in item: {item['key']}" + + +class TestIsolatedConfigValidation: + def test_unsupported_flow_run_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "bad_run" project_dir.mkdir() + rtl_dir = project_dir / "rtl" + rtl_dir.mkdir() + (rtl_dir / "gcd.v").write_text("module gcd; endmodule") (project_dir / "ecc.toml").write_text('''[design] name = "gcd" top = "gcd" @@ -967,11 +1198,13 @@ def test_invalid_unsupported_flow_run(self, tmp_path, capsys): rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 - def test_invalid_empty_clock_port(self, tmp_path, capsys): + def test_empty_clock_port_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = tmp_path / "bad_clock" project_dir.mkdir() - (project_dir / "rtl").mkdir() - (project_dir / "rtl" / "gcd.v").write_text("module gcd; endmodule") + rtl_dir = project_dir / "rtl" + rtl_dir.mkdir() + (rtl_dir / "gcd.v").write_text("module gcd; endmodule") (project_dir / "ecc.toml").write_text('''[design] name = "gcd" top = "gcd" @@ -990,11 +1223,13 @@ def test_invalid_empty_clock_port(self, tmp_path, capsys): rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 - def test_invalid_zero_frequency(self, tmp_path, capsys): + def test_zero_frequency_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = tmp_path / "bad_freq" project_dir.mkdir() - (project_dir / "rtl").mkdir() - (project_dir / "rtl" / "gcd.v").write_text("module gcd; endmodule") + rtl_dir = project_dir / "rtl" + rtl_dir.mkdir() + (rtl_dir / "gcd.v").write_text("module gcd; endmodule") (project_dir / "ecc.toml").write_text('''[design] name = "gcd" top = "gcd" @@ -1013,7 +1248,8 @@ def test_invalid_zero_frequency(self, tmp_path, capsys): rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 - def test_invalid_empty_rtl(self, tmp_path, capsys): + def test_empty_rtl_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) project_dir = tmp_path / "bad_rtl" project_dir.mkdir() (project_dir / "ecc.toml").write_text('''[design] @@ -1033,100 +1269,3 @@ def test_invalid_empty_rtl(self, tmp_path, capsys): ''') rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 - - def test_invalid_config_no_run_id_in_check_disclosure(self, tmp_path, capsys): - project_dir = tmp_path / "bad_no_runid" - project_dir.mkdir() - (project_dir / "ecc.toml").write_text('''[design] -name = "" -top = "" -rtl = [] -clock_port = "" -frequency_mhz = 0 - -[pdk] -name = "unsupported" -root = "" - -[flow] -preset = "unknown" -run = "default" -''') - rc = cli_main.run(["config", "--resolved", "--run-id", "run_123", - "--project", str(project_dir)]) - assert rc == 1 - out = capsys.readouterr().out - assert "--run-id" not in out - - -class TestEmptyStepConfigSentinel: - def test_step_no_config_emits_sentinel_text(self, tmp_path, capsys): - project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "default") - _create_flow_json(run_dir) - _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) - - rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) - assert rc == 0 - out = capsys.readouterr().out - assert "step=cts" in out - assert "config_status=none" in out - assert "artifacts=" in out - - def test_step_no_config_emits_sentinel_json(self, tmp_path, capsys): - project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "default") - _create_flow_json(run_dir) - _create_step_dir(run_dir, "CTS", "ecc", subdirs=["output"]) - - rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) - assert rc == 0 - data = json.loads(capsys.readouterr().out) - assert data["step"] == "cts" - assert data["config_status"] == "none" - - -class TestDiagnoseFlowOnlySteps: - def test_flow_step_without_directory_emits_issues(self, tmp_path, capsys): - project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "default") - _create_flow_json(run_dir, [ - {"name": "CTS", "tool": "ecc", "state": "Incomplete", "runtime": "0:00:04"}, - ]) - - rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) - assert rc == 1 - out = capsys.readouterr().out - assert "issue=failed_step" in out - assert "step=cts" in out - assert "issue=unknown_step" not in out - - def test_flow_step_without_dir_reports_missing_artifacts(self, tmp_path, capsys): - project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "default") - _create_flow_json(run_dir, [ - {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, - ]) - - rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) - assert rc == 0 - out = capsys.readouterr().out - assert "issue=missing_artifacts" in out - assert "issue=missing_metrics" in out - assert "issue=config_unavailable" in out - - -class TestConfigRoleDisclosure: - def test_config_artifact_has_disclosure(self, tmp_path, capsys): - project_dir = _create_valid_project(tmp_path) - run_dir = os.path.join(project_dir, "runs", "default") - _create_flow_json(run_dir) - _create_step_dir(run_dir, "CTS", "ecc", subdirs=["config"], - files={"config/cts_config.json": "{}"}) - - rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) - assert rc == 0 - out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" From c3fa6de60dc7c56669077136a53f2a46803fa6a3 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 11:28:43 +0800 Subject: [PATCH 029/104] fix(cli): use item-specific inspect_cmd in config text, add invalid_flow_json evidence, isolate config tests --- chipcompiler/cli/config_view.py | 2 +- chipcompiler/cli/diagnose.py | 3 + test/cli/test_cli_inspect.py | 148 +++++++++++++++++--------------- 3 files changed, 84 insertions(+), 69 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index a9eabb6d..4b3138a4 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -180,7 +180,7 @@ def build_config_lines(items: list[dict], project: str | None = None, value=item["value"], resolved=item.get("resolved"), source=item["source"], - inspect=disclosure_cmd("ecc config --resolved --json", project, run_id), + inspect=item.get("inspect_cmd"), ) else: line = format_line( diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index a90e7bdf..d665319e 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -57,6 +57,9 @@ def _make_issue(issue: str, severity: str, run: str, if issue == "missing_run": obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) obj["run_cmd"] = disclosure_cmd("ecc run", project=project) + elif issue == "invalid_flow_json": + obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) + obj["run_cmd"] = disclosure_cmd("ecc run", project=project) elif issue == "log_errors": obj["evidence"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index a3c8a71f..67a3df64 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -1040,6 +1040,19 @@ def test_absolute_run_id_preserves_run_dir_value(self, tmp_path, capsys, monkeyp assert run_item["value"] == str(external_run) +class TestConfigTextUsesItemInspectCmd: + def test_run_dir_text_uses_status_command(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = _create_valid_project(tmp_path) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + run_dir_line = [l for l in out.strip().split("\n") if "config=run_dir" in l][0] + assert "ecc status" in run_dir_line + assert "ecc config --resolved --json" not in run_dir_line + + class TestDiagnoseIssueSpecificEvidence: def test_log_errors_uses_log_command(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1115,6 +1128,36 @@ def test_config_unavailable_uses_config_command(self, tmp_path, capsys): config_line = [l for l in out.strip().split("\n") if "issue=config_unavailable" in l][0] assert "ecc config cts --resolved" in config_line + def test_invalid_flow_json_has_evidence(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("NOT VALID JSON{{{") + + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=invalid_flow_json" in out + assert "evidence=" in out + assert "ecc status" in out + + def test_invalid_flow_json_json_has_evidence(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("NOT VALID JSON{{{") + + rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + data = json.loads(out) + issue = data["issues"][0] + assert issue["issue"] == "invalid_flow_json" + assert "evidence" in issue + assert "run_cmd" in issue + class TestCleanDiagnoseOutput: def test_clean_has_status_and_disclosure_commands(self, tmp_path, capsys): @@ -1173,28 +1216,42 @@ def test_project_config_json_has_inspect_cmd(self, tmp_path, capsys, monkeypatch class TestIsolatedConfigValidation: - def test_unsupported_flow_run_rejected(self, tmp_path, capsys, monkeypatch): - _mock_pdk_validation(monkeypatch) - project_dir = tmp_path / "bad_run" - project_dir.mkdir() - rtl_dir = project_dir / "rtl" - rtl_dir.mkdir() + @staticmethod + def _valid_toml(tmp_path, **overrides): + pdk_dir = tmp_path / "pdk" + pdk_dir.mkdir(exist_ok=True) + rtl_dir = tmp_path / "rtl" + rtl_dir.mkdir(exist_ok=True) (rtl_dir / "gcd.v").write_text("module gcd; endmodule") - (project_dir / "ecc.toml").write_text('''[design] -name = "gcd" -top = "gcd" -rtl = ["rtl/gcd.v"] -clock_port = "clk" -frequency_mhz = 100.0 + defaults = { + "name": "gcd", "top": "gcd", "rtl": '["rtl/gcd.v"]', + "clock_port": "clk", "frequency_mhz": "100.0", + "pdk_name": "ics55", "pdk_root": str(pdk_dir), + "flow_preset": "rtl2gds", "flow_run": "default", + } + defaults.update(overrides) + return f'''[design] +name = "{defaults['name']}" +top = "{defaults['top']}" +rtl = {defaults['rtl']} +clock_port = "{defaults['clock_port']}" +frequency_mhz = {defaults['frequency_mhz']} [pdk] -name = "ics55" -root = "/tmp/nonexistent" +name = "{defaults['pdk_name']}" +root = "{defaults['pdk_root']}" [flow] -preset = "rtl2gds" -run = "custom" -''') +preset = "{defaults['flow_preset']}" +run = "{defaults['flow_run']}" +''' + + def test_unsupported_flow_run_rejected(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "bad_run" + project_dir.mkdir() + toml = self._valid_toml(tmp_path, flow_run="custom") + (project_dir / "ecc.toml").write_text(toml) rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 @@ -1202,24 +1259,8 @@ def test_empty_clock_port_rejected(self, tmp_path, capsys, monkeypatch): _mock_pdk_validation(monkeypatch) project_dir = tmp_path / "bad_clock" project_dir.mkdir() - rtl_dir = project_dir / "rtl" - rtl_dir.mkdir() - (rtl_dir / "gcd.v").write_text("module gcd; endmodule") - (project_dir / "ecc.toml").write_text('''[design] -name = "gcd" -top = "gcd" -rtl = ["rtl/gcd.v"] -clock_port = "" -frequency_mhz = 100.0 - -[pdk] -name = "ics55" -root = "/tmp/nonexistent" - -[flow] -preset = "rtl2gds" -run = "default" -''') + toml = self._valid_toml(tmp_path, clock_port="") + (project_dir / "ecc.toml").write_text(toml) rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 @@ -1227,24 +1268,8 @@ def test_zero_frequency_rejected(self, tmp_path, capsys, monkeypatch): _mock_pdk_validation(monkeypatch) project_dir = tmp_path / "bad_freq" project_dir.mkdir() - rtl_dir = project_dir / "rtl" - rtl_dir.mkdir() - (rtl_dir / "gcd.v").write_text("module gcd; endmodule") - (project_dir / "ecc.toml").write_text('''[design] -name = "gcd" -top = "gcd" -rtl = ["rtl/gcd.v"] -clock_port = "clk" -frequency_mhz = 0 - -[pdk] -name = "ics55" -root = "/tmp/nonexistent" - -[flow] -preset = "rtl2gds" -run = "default" -''') + toml = self._valid_toml(tmp_path, frequency_mhz="0") + (project_dir / "ecc.toml").write_text(toml) rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 @@ -1252,20 +1277,7 @@ def test_empty_rtl_rejected(self, tmp_path, capsys, monkeypatch): _mock_pdk_validation(monkeypatch) project_dir = tmp_path / "bad_rtl" project_dir.mkdir() - (project_dir / "ecc.toml").write_text('''[design] -name = "gcd" -top = "gcd" -rtl = [] -clock_port = "clk" -frequency_mhz = 100.0 - -[pdk] -name = "ics55" -root = "/tmp/nonexistent" - -[flow] -preset = "rtl2gds" -run = "default" -''') + toml = self._valid_toml(tmp_path, rtl="[]") + (project_dir / "ecc.toml").write_text(toml) rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 From 8317fde0af2f6fdf33b9dc8fb9758f3e79fb3c1e Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 11:54:33 +0800 Subject: [PATCH 030/104] fix(cli): resolve RTL with _resolve_path, distinguish corrupt flow.json, fail on malformed metrics --- chipcompiler/cli/config_view.py | 3 +- chipcompiler/cli/diagnose.py | 10 ++-- chipcompiler/cli/inspect.py | 45 ++++++++++++--- test/cli/test_cli_inspect.py | 99 +++++++++++++++++++++++++++++++++ 4 files changed, 145 insertions(+), 12 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 4b3138a4..8f867d2a 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -7,6 +7,7 @@ def build_project_config_items(project_dir: str, run_dir: str, project: str | None = None, run_id: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.config import ( + _resolve_path, find_config_path, load_project_config, resolve_pdk_root, @@ -55,7 +56,7 @@ def build_project_config_items(project_dir: str, run_dir: str, # RTL entries for i, rtl in enumerate(cfg.design_rtl): - rtl_resolved = os.path.normpath(os.path.join(project_dir, rtl)) + rtl_resolved = os.path.normpath(_resolve_path(project_dir, rtl)) items.append({ "kind": "config", "scope": "project", diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index d665319e..97d1790a 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -85,6 +85,7 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, project: str | None = None, run_id: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.inspect import ( + CORRUPT_FLOW_JSON, discover_step_dirs, read_flow_json, _safe_steps, @@ -97,14 +98,15 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, flow_data = read_flow_json(run_dir) if flow_data is None: - if os.path.isfile(os.path.join(run_dir, "home", "flow.json")): - issues.append(_make_issue("invalid_flow_json", "error", display_run, - project=project, run_id=run_id)) - return issues, 1 issues.append(_make_issue("missing_run", "error", display_run, project=project, run_id=run_id)) return issues, 1 + if flow_data is CORRUPT_FLOW_JSON: + issues.append(_make_issue("invalid_flow_json", "error", display_run, + project=project, run_id=run_id)) + return issues, 1 + steps = _safe_steps(flow_data) step_dirs = discover_step_dirs(run_dir) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 7525e95b..57caa110 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -26,7 +26,10 @@ def resolve_run_dir(project_dir: str, run_id: str | None = None) -> tuple[str, s return os.path.join(project_dir, "runs", run_id), run_id -def read_flow_json(run_dir: str) -> dict | None: +CORRUPT_FLOW_JSON = "CORRUPT" + + +def read_flow_json(run_dir: str) -> dict | str | None: path = os.path.join(run_dir, "home", "flow.json") if not os.path.isfile(path): return None @@ -35,7 +38,7 @@ def read_flow_json(run_dir: str) -> dict | None: data = json.load(f) return data if isinstance(data, dict) else None except (json.JSONDecodeError, OSError): - return None + return CORRUPT_FLOW_JSON def _safe_steps(flow_data: dict) -> list[dict]: @@ -76,6 +79,16 @@ def build_status_lines(run_dir: str, project: str | None = None, ) return [line], 1 + if flow_data is CORRUPT_FLOW_JSON: + line = format_line( + run=run_id or "default", + status="corrupt", + workspace=run_dir, + status_cmd=disclosure_cmd("ecc status", project, run_id), + log=disclosure_cmd("ecc log", project, run_id), + ) + return [line], 1 + run_status = get_run_status(flow_data) lines = [] @@ -108,6 +121,9 @@ def build_status_json(run_dir: str, run_id: str | None = None) -> tuple[dict, in if flow_data is None: return {"run": display_run, "status": "missing", "workspace": run_dir}, 1 + if flow_data is CORRUPT_FLOW_JSON: + return {"run": display_run, "status": "corrupt", "workspace": run_dir}, 1 + run_status = get_run_status(flow_data) steps = [] for step in _safe_steps(flow_data): @@ -127,6 +143,9 @@ def build_status_jsonl(run_dir: str, run_id: str | None = None) -> tuple[list[di if flow_data is None: return [{"run": display_run, "status": "missing", "workspace": run_dir}], 1 + if flow_data is CORRUPT_FLOW_JSON: + return [{"kind": "run", "run": display_run, "status": "corrupt", "workspace": run_dir}], 1 + run_status = get_run_status(flow_data) objects = [{"kind": "run", "run": display_run, "status": run_status, "workspace": run_dir}] @@ -331,12 +350,13 @@ def discover_metrics(run_dir: str, step_token: str | None = None) -> dict[str, s return result -def read_metrics(path: str) -> dict: +def read_metrics(path: str) -> dict | None: try: with open(path) as f: - return json.load(f) + data = json.load(f) + return data if isinstance(data, dict) else None except (json.JSONDecodeError, OSError): - return {} + return None def build_metrics_lines(run_dir: str, step_token: str | None = None, @@ -371,9 +391,17 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, )], 0 lines = [] + has_corrupt = False for token, path in sorted(metrics_files.items()): data = read_metrics(path) - if not data: + if data is None: + has_corrupt = True + lines.append(format_line( + metric_step=token, + status="corrupt", + path=os.path.relpath(path, run_dir), + log=disclosure_cmd(f"ecc log {token} --errors", project, run_id), + )) continue for raw_key, value in data.items(): norm_key = normalize_metric_key(raw_key) @@ -384,7 +412,7 @@ def build_metrics_lines(run_dir: str, step_token: str | None = None, source=os.path.relpath(path, run_dir), inspect=disclosure_cmd(f"ecc metrics {token} --json", project, run_id), )) - return lines, 0 + return lines, 1 if has_corrupt else 0 def _collect_metrics(run_dir: str, step_token: str | None, @@ -398,6 +426,9 @@ def _collect_metrics(run_dir: str, step_token: str | None, items = [] for token, path in sorted(metrics_files.items()): data = read_metrics(path) + if data is None: + return [{"status": "corrupt", "metric_step": token, + "log_cmd": disclosure_cmd(f"ecc log {token} --errors", project, run_id)}], 1 for raw_key, value in data.items(): items.append({ "metric": normalize_metric_key(raw_key), diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index 67a3df64..c7739020 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -1281,3 +1281,102 @@ def test_empty_rtl_rejected(self, tmp_path, capsys, monkeypatch): (project_dir / "ecc.toml").write_text(toml) rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 + + +# =========================================================================== +# Regression tests for Codex Round 4 code review (Round 5) +# =========================================================================== + + +class TestCorruptFlowJson: + def test_corrupt_flow_json_status_reports_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("BROKEN{{{") + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "corrupt" in out + + def test_missing_flow_json_status_reports_missing(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + + def test_corrupt_flow_json_json_reports_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + with open(os.path.join(run_dir, "home", "flow.json"), "w") as f: + f.write("BROKEN{{{") + rc = cli_main.run(["status", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["status"] == "corrupt" + + +class TestCorruptMetricsJson: + def test_malformed_metrics_reports_corrupt_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["analysis"], + files={"analysis/CTS_metrics.json": "NOT JSON{{{"}) + rc = cli_main.run(["metrics", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "corrupt" in out + + def test_malformed_metrics_reports_corrupt_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["analysis"], + files={"analysis/CTS_metrics.json": "NOT JSON{{{"}) + rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["status"] == "corrupt" + + +class TestRtlPathResolution: + def test_absolute_rtl_resolved_correctly(self, tmp_path, capsys, monkeypatch): + _mock_pdk_validation(monkeypatch) + project_dir = tmp_path / "proj" + project_dir.mkdir() + rtl_dir = tmp_path / "external_rtl" + rtl_dir.mkdir() + (rtl_dir / "gcd.v").write_text("module gcd; endmodule") + (project_dir / "ecc.toml").write_text(f'''[design] +name = "gcd" +top = "gcd" +rtl = ["{rtl_dir / "gcd.v"}"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{tmp_path / "pdk"}" + +[flow] +preset = "rtl2gds" +run = "default" +''') + (tmp_path / "pdk").mkdir(exist_ok=True) + rc = cli_main.run(["config", "--resolved", "--json", "--project", str(project_dir)]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + rtl_item = next(i for i in data["config"] if i["key"] == "design.rtl.0") + assert rtl_item["resolved"] == str(rtl_dir / "gcd.v") From 447f2e5200086d854183964f78da1510c1c82352 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 11:55:06 +0800 Subject: [PATCH 031/104] chore: gitignore codex review artifacts --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index bf09d5f3..8a5f20e5 100644 --- a/.gitignore +++ b/.gitignore @@ -186,3 +186,6 @@ chipcompiler/tools/ecc_dreamplace/dreamplace .humanize/ humanize-* docs/superpowers/ +findings.md +progress.md +task_plan.md From b4bfc3826fa67de01e28a158972e75c2f5490188 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 12:07:21 +0800 Subject: [PATCH 032/104] fix(cli): report pending steps in diagnose, add kind to missing-run JSONL --- chipcompiler/cli/diagnose.py | 4 ++++ chipcompiler/cli/inspect.py | 2 +- test/cli/test_cli_inspect.py | 41 ++++++++++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index 97d1790a..eb08abd1 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -128,6 +128,10 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, issues.append(_make_issue("failed_step", "error", display_run, step=token, status=state, project=project, run_id=run_id)) + elif state == "pending": + issues.append(_make_issue("pending_step", "warning", display_run, + step=token, status=state, + project=project, run_id=run_id)) elif state == "ongoing": issues.append(_make_issue("ongoing_step", "warning", display_run, step=token, status=state, diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 57caa110..f390bc23 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -141,7 +141,7 @@ def build_status_jsonl(run_dir: str, run_id: str | None = None) -> tuple[list[di flow_data = read_flow_json(run_dir) display_run = run_id or "default" if flow_data is None: - return [{"run": display_run, "status": "missing", "workspace": run_dir}], 1 + return [{"kind": "run", "run": display_run, "status": "missing", "workspace": run_dir}], 1 if flow_data is CORRUPT_FLOW_JSON: return [{"kind": "run", "run": display_run, "status": "corrupt", "workspace": run_dir}], 1 diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index c7739020..e56a6490 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -1380,3 +1380,44 @@ def test_absolute_rtl_resolved_correctly(self, tmp_path, capsys, monkeypatch): data = json.loads(capsys.readouterr().out) rtl_item = next(i for i in data["config"] if i["key"] == "design.rtl.0") assert rtl_item["resolved"] == str(rtl_dir / "gcd.v") + + +# =========================================================================== +# Regression tests for Codex Round 5 code review (Round 6) +# =========================================================================== + + +class TestPendingStepDiagnose: + def test_pending_step_creates_issue(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Pending", "runtime": ""}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "ok\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": '{"freq": 100}', + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "issue=pending_step" in out + assert "status=pending" in out + + +class TestMissingRunJsonlKind: + def test_missing_run_jsonl_has_kind(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["status", "--jsonl", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + data = [json.loads(line) for line in out.strip().split("\n") if line.strip()] + assert data[0]["kind"] == "run" + assert data[0]["status"] == "missing" + From cce7074edf8d495ac6c2d090b04e5ab978692925 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 12:27:45 +0800 Subject: [PATCH 033/104] fix(cli): tighten log error matching to avoid false positives from clean summaries --- chipcompiler/cli/inspect.py | 11 ++++++++-- test/cli/test_cli_inspect.py | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index f390bc23..cae3c5f5 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -162,6 +162,15 @@ def build_status_jsonl(run_dir: str, run_id: str | None = None) -> tuple[list[di ERROR_PATTERNS = re.compile(r"(error|failed|traceback)", re.IGNORECASE) +_CLEAN_SUMMARY = re.compile(r"^\s*\d+\s+(error|failed)|^no\s+(error|failed)", re.IGNORECASE) + + +def filter_errors(lines: list[str]) -> list[str]: + result = [] + for line in lines: + if ERROR_PATTERNS.search(line) and not _CLEAN_SUMMARY.search(line): + result.append(line) + return result def discover_step_dirs(run_dir: str) -> dict[str, str]: @@ -203,8 +212,6 @@ def discover_logs(run_dir: str, step_token: str | None = None) -> list[str]: ) -def filter_errors(lines: list[str]) -> list[str]: - return [line for line in lines if ERROR_PATTERNS.search(line)] def read_log_file(path: str) -> list[str]: diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index e56a6490..b687451a 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -1421,3 +1421,42 @@ def test_missing_run_jsonl_has_kind(self, tmp_path, capsys): assert data[0]["kind"] == "run" assert data[0]["status"] == "missing" + +class TestLogErrorMatching: + def test_clean_summary_not_counted_as_error(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "CTS completed successfully\n0 errors\nNo errors found\n0 failed checks\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": '{"freq": 100}', + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "log_errors" not in out + + def test_real_errors_still_detected(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + _create_step_dir(run_dir, "CTS", "ecc", + subdirs=["log", "output", "analysis", "config"], + files={"log/cts.log": "CTS completed\nError: bad thing\nTraceback (most recent call):\n0 errors\n", + "output/design.def": "def", + "analysis/CTS_metrics.json": '{"freq": 100}', + "config/cts_config.json": "{}"}) + + rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "issue=log_errors" in out + assert "count=2" in out + From d36ff37dfaa9e1007a355d71c79faf6844bbed02 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 12:28:20 +0800 Subject: [PATCH 034/104] docs: update README for project-oriented CLI --- README.cn.md | 48 ++++++++++++++++++++++++++++++-------------- README.md | 49 +++++++++++++++++++++++++++++++-------------- docs/development.md | 48 +++++++++++++++++++++++++++++--------------- 3 files changed, 99 insertions(+), 46 deletions(-) diff --git a/README.cn.md b/README.cn.md index e026012d..bde0ad92 100644 --- a/README.cn.md +++ b/README.cn.md @@ -24,7 +24,7 @@ ECOS Chip Compiler 是一个**开源芯片设计自动化解决方案**,集成 GUI(ECOS Studio)已迁移至 [ecos-studio](https://github.com/0xharry/ecos-studio) 仓库。 **使用方式:** -- **CLI (`cli`)** - 命令行流程执行 +- **CLI (`ecc`)** - 面向项目的命令行流程执行 - **Python API** - 将 `chipcompiler` 作为库使用 @@ -32,22 +32,40 @@ GUI(ECOS Studio)已迁移至 [ecos-studio](https://github.com/0xharry/ecos-s ### CLI 流程运行 -可以使用 `nix run .#cli -- ...` 直接创建 workspace 并执行完整 RTL2GDS 流程。 +可以使用 `nix run .#cli -- ...` 创建 ECC 项目,校验 `ecc.toml`,并执行完整 RTL2GDS 流程。 ```bash -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/top.v \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/filelist.f \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 \ - --freq 200 +nix run .#cli -- init gcd +cp ./rtl/gcd.v gcd/rtl/gcd.v +``` + +编辑 `gcd/ecc.toml`: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/path/to/ics55" + +[flow] +preset = "rtl2gds" +run = "default" +``` + +然后校验并运行: + +```bash +nix run .#cli -- check --project gcd +nix run .#cli -- run --project gcd +nix run .#cli -- status --project gcd +nix run .#cli -- metrics --project gcd +nix run .#cli -- log --project gcd ``` ## 功能特性 diff --git a/README.md b/README.md index 63024725..363580ce 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ ECOS Chip Compiler is an **open-source chip design automation solution** that in The GUI (ECOS Studio) has been moved to the [ecos-studio](https://github.com/0xharry/ecos-studio) repo. **How to use:** -- **CLI (`cli`)** - Command-line flow execution +- **CLI (`ecc`)** - Project-oriented command-line flow execution - **Python API** - Use `chipcompiler` as a library @@ -32,22 +32,41 @@ The GUI (ECOS Studio) has been moved to the [ecos-studio](https://github.com/0xh ### CLI Flow Runner -Use `nix run .#cli -- ...` to create a workspace and run the full RTL2GDS flow directly. +Use `nix run .#cli -- ...` to create an ECC project, validate its `ecc.toml`, +and run the full RTL2GDS flow. ```bash -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/top.v \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/filelist.f \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 \ - --freq 200 +nix run .#cli -- init gcd +cp ./rtl/gcd.v gcd/rtl/gcd.v +``` + +Edit `gcd/ecc.toml`: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/path/to/ics55" + +[flow] +preset = "rtl2gds" +run = "default" +``` + +Then validate and run: + +```bash +nix run .#cli -- check --project gcd +nix run .#cli -- run --project gcd +nix run .#cli -- status --project gcd +nix run .#cli -- metrics --project gcd +nix run .#cli -- log --project gcd ``` ## Features diff --git a/docs/development.md b/docs/development.md index 32f82442..32d669d6 100644 --- a/docs/development.md +++ b/docs/development.md @@ -337,24 +337,40 @@ Create `chipcompiler/tools//` with `__init__.py`, `builder.py`, `runner.py For command-line automation and scripting, run CLI via Nix: ```bash -# Run directly from project root -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/top.v \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 - -# Filelist mode -nix run .#cli -- --workspace ./ws \ - --rtl ./rtl/filelist.f \ - --design top \ - --top top \ - --clock clk \ - --pdk-root /path/to/ics55 \ - --freq 200 +# Create a project skeleton with ecc.toml, rtl/, constraints/, and runs/ +nix run .#cli -- init gcd + +# After editing gcd/ecc.toml and adding RTL files +nix run .#cli -- check --project gcd +nix run .#cli -- run --project gcd +nix run .#cli -- status --project gcd +nix run .#cli -- metrics --project gcd +nix run .#cli -- log --project gcd ``` +The project config is the CLI input surface: + +```toml +[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "/path/to/ics55" + +[flow] +preset = "rtl2gds" +run = "default" +``` + +For filelist mode, set `design.rtl` to a single filelist path, for example +`rtl = ["rtl/filelist.f"]`. Multiple RTL sources should be listed in the +filelist rather than as multiple `design.rtl` entries. + If you need an interactive environment for development, use `nix develop`. REST API reference: Examples: **[examples/gcd](examples/gcd/README.md)** From 95e4227b3e23abbfbce2877d7c082ea4ce00d2b1 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 12:47:59 +0800 Subject: [PATCH 035/104] fix(cli): validate filelist contents during ecc check --- chipcompiler/cli/config.py | 8 ++++++++ test/cli/test_cli_main.py | 40 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index adae326e..7b9e0f47 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -140,6 +140,14 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: errors.append(f"rtl path does not exist: {cfg.design_rtl[0]}") elif os.path.isdir(rtl_path): errors.append(f"rtl path must be a file, not a directory: {cfg.design_rtl[0]}") + else: + suffix = os.path.splitext(rtl_path)[1].lower() + FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} + if suffix in FILELIST_SUFFIXES: + from chipcompiler.utility.filelist import validate_filelist + _, missing = validate_filelist(rtl_path) + if missing: + errors.append(f"filelist references missing files: {', '.join(missing)}") return errors diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 2464624c..b9a2018b 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -803,3 +803,43 @@ class TestEdgeCases: def test_no_command_returns_nonzero(self, capsys): rc = cli_main.run([]) assert rc == 1 + + +class TestCheckFilelistValidation: + def test_check_fails_filelist_with_missing_sources(self, tmp_path, monkeypatch): + from chipcompiler.cli.config import _validate_pdk_contents + monkeypatch.setattr(_validate_pdk_contents, "__wrapped__", + lambda *a, **k: None, raising=False) + monkeypatch.setattr("chipcompiler.cli.config._validate_pdk_contents", + lambda *a, **k: None) + + project_dir = tmp_path / "flproj" + project_dir.mkdir() + (project_dir / "rtl").mkdir() + (project_dir / "rtl" / "gcd.v").write_text("module gcd; endmodule") + + filelist = project_dir / "rtl" / "files.f" + filelist.write_text("gcd.v\nmissing.v\nother_missing.v\n") + + pdk_root = tmp_path / "ics55" + pdk_root.mkdir() + + toml = f'''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/files.f"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["check", "--project", str(project_dir)]) + assert rc == 1 + From 461e12e7cb064c0ffe16ddbcaddb25322e85ff64 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 12:55:05 +0800 Subject: [PATCH 036/104] fix(cli): catch filelist parse errors during ecc check instead of crashing --- chipcompiler/cli/config.py | 9 ++++++--- test/cli/test_cli_main.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 7b9e0f47..9c0ca65a 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -145,9 +145,12 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} if suffix in FILELIST_SUFFIXES: from chipcompiler.utility.filelist import validate_filelist - _, missing = validate_filelist(rtl_path) - if missing: - errors.append(f"filelist references missing files: {', '.join(missing)}") + try: + _, missing = validate_filelist(rtl_path) + if missing: + errors.append(f"filelist references missing files: {', '.join(missing)}") + except (ValueError, OSError) as e: + errors.append(f"invalid filelist {cfg.design_rtl[0]}: {e}") return errors diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index b9a2018b..ca77793b 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -835,6 +835,40 @@ def test_check_fails_filelist_with_missing_sources(self, tmp_path, monkeypatch): name = "ics55" root = "{pdk_root}" +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + rc = cli_main.run(["check", "--project", str(project_dir)]) + assert rc == 1 + + def test_check_fails_invalid_filelist_directive(self, tmp_path, monkeypatch): + from chipcompiler.cli.config import _validate_pdk_contents + monkeypatch.setattr("chipcompiler.cli.config._validate_pdk_contents", + lambda *a, **k: None) + + project_dir = tmp_path / "flproj2" + project_dir.mkdir() + (project_dir / "rtl").mkdir() + + filelist = project_dir / "rtl" / "files.f" + filelist.write_text("gcd.v\n-f other.f\n") + + pdk_root = tmp_path / "ics55" + pdk_root.mkdir() + + toml = f'''[design] +name = "gcd" +top = "gcd" +rtl = ["rtl/files.f"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + [flow] preset = "rtl2gds" run = "default" From 1e0389194f262ee882e62e65f2f39a179d6fd46e Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 13:05:47 +0800 Subject: [PATCH 037/104] refactor(cli): simplify CLI modules - remove dead code, deduplicate, flatten control flow --- chipcompiler/cli/artifacts.py | 3 --- chipcompiler/cli/config.py | 6 ++---- chipcompiler/cli/config_view.py | 34 ++++++++++++++++----------------- chipcompiler/cli/diagnose.py | 28 ++++----------------------- chipcompiler/cli/inspect.py | 31 ++++++++++++------------------ chipcompiler/cli/main.py | 22 +++++++-------------- chipcompiler/cli/output.py | 4 ---- 7 files changed, 42 insertions(+), 86 deletions(-) diff --git a/chipcompiler/cli/artifacts.py b/chipcompiler/cli/artifacts.py index 319db680..3a8343e8 100644 --- a/chipcompiler/cli/artifacts.py +++ b/chipcompiler/cli/artifacts.py @@ -48,9 +48,6 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, "inspect_cmd": disclosure_cmd(f"ecc artifacts {token} --json", project, run_id), }) - if not artifacts and step_token is not None: - return [], 0 - if not artifacts: return [], 0 diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 9c0ca65a..8cb79eac 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -5,6 +5,8 @@ SUPPORTED_PDK_NAMES = {"ics55"} SUPPORTED_FLOW_PRESETS = {"rtl2gds"} SUPPORTED_FLOW_RUNS = {"default"} +FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} +RTL_SUFFIXES = {".v", ".sv", ".svh", ".vh"} @dataclass @@ -142,7 +144,6 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: errors.append(f"rtl path must be a file, not a directory: {cfg.design_rtl[0]}") else: suffix = os.path.splitext(rtl_path)[1].lower() - FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} if suffix in FILELIST_SUFFIXES: from chipcompiler.utility.filelist import validate_filelist try: @@ -172,9 +173,6 @@ def resolve_rtl(cfg: ProjectConfig) -> tuple[str, str, str]: rtl_path = _resolve_path(cfg.project_dir, cfg.design_rtl[0]) suffix = os.path.splitext(rtl_path)[1].lower() - FILELIST_SUFFIXES = {".f", ".fl", ".filelist"} - RTL_SUFFIXES = {".v", ".sv", ".svh", ".vh"} - if suffix in FILELIST_SUFFIXES: return ("filelist", "", rtl_path) if suffix in RTL_SUFFIXES: diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 8f867d2a..37af0c8f 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -28,7 +28,6 @@ def build_project_config_items(project_dir: str, run_dir: str, return [{"kind": "error", "status": "invalid_config"}], 1 pdk_root = resolve_pdk_root(cfg) - display_run = run_id or "default" items = [] entries = [ @@ -147,26 +146,27 @@ def build_config_lines(items: list[dict], project: str | None = None, if not items: return [], 0 - if items[0].get("config_status") == "none": - s = items[0] + first = items[0] + if first.get("config_status") == "none": return [format_line( - step=s["step"], + step=first["step"], config_status="none", - artifacts=s.get("artifacts"), + artifacts=first.get("artifacts"), )], 0 - if items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): - if items[0].get("status") == "unknown_step": - return [format_line( - step=items[0].get("step", ""), - status="unknown_step", - inspect=disclosure_cmd("ecc status", project, run_id), - )], 1 - if items[0].get("status") == "missing_config": - return [format_line( - status="missing_config", - inspect=disclosure_cmd("ecc check", project), - )], 1 + status = first.get("status") + if status == "unknown_step": + return [format_line( + step=first.get("step", ""), + status="unknown_step", + inspect=disclosure_cmd("ecc status", project, run_id), + )], 1 + if status == "missing_config": + return [format_line( + status="missing_config", + inspect=disclosure_cmd("ecc check", project), + )], 1 + if status == "invalid_config": return [format_line( status="invalid_config", inspect=disclosure_cmd("ecc check", project), diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index eb08abd1..dcfd90ee 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -54,10 +54,7 @@ def _make_issue(issue: str, severity: str, run: str, obj["count"] = count cmd_kwargs = {"project": project, "run_id": run_id} - if issue == "missing_run": - obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) - obj["run_cmd"] = disclosure_cmd("ecc run", project=project) - elif issue == "invalid_flow_json": + if issue in ("missing_run", "invalid_flow_json"): obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) obj["run_cmd"] = disclosure_cmd("ecc run", project=project) elif issue == "log_errors": @@ -195,27 +192,10 @@ def build_diagnose_lines(run_dir: str, step_token: str | None = None, )], 0 lines = [] + text_keys = ("issue", "severity", "run", "step", "status", "count", + "evidence", "log", "artifacts", "config", "run_cmd") for issue in issues: - fields = {} - fields["issue"] = issue["issue"] - fields["severity"] = issue["severity"] - fields["run"] = issue["run"] - if "step" in issue: - fields["step"] = issue["step"] - if "status" in issue: - fields["status"] = issue["status"] - if "count" in issue: - fields["count"] = issue["count"] - if "evidence" in issue: - fields["evidence"] = issue["evidence"] - if "log" in issue: - fields["log"] = issue["log"] - if "artifacts" in issue: - fields["artifacts"] = issue["artifacts"] - if "config" in issue: - fields["config"] = issue["config"] - if "run_cmd" in issue: - fields["run_cmd"] = issue["run_cmd"] + fields = {k: issue[k] for k in text_keys if k in issue} lines.append(format_line(**fields)) return lines, _exit_code(issues) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index cae3c5f5..d17b8068 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -186,32 +186,25 @@ def discover_step_dirs(run_dir: str) -> dict[str, str]: return result +def _list_files(directory: str) -> list[str]: + if not os.path.isdir(directory): + return [] + return sorted( + os.path.join(directory, f) + for f in os.listdir(directory) + if os.path.isfile(os.path.join(directory, f)) + ) + + def discover_logs(run_dir: str, step_token: str | None = None) -> list[str]: if step_token is None: - log_dir = os.path.join(run_dir, "log") - if os.path.isdir(log_dir): - return sorted( - os.path.join(log_dir, f) - for f in os.listdir(log_dir) - if os.path.isfile(os.path.join(log_dir, f)) - ) - return [] + return _list_files(os.path.join(run_dir, "log")) step_dirs = discover_step_dirs(run_dir) if step_token not in step_dirs: return [] - log_dir = os.path.join(step_dirs[step_token], "log") - if not os.path.isdir(log_dir): - return [] - - return sorted( - os.path.join(log_dir, f) - for f in os.listdir(log_dir) - if os.path.isfile(os.path.join(log_dir, f)) - ) - - + return _list_files(os.path.join(step_dirs[step_token], "log")) def read_log_file(path: str) -> list[str]: diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 2118f15d..158f9e67 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -8,6 +8,7 @@ load_project_config, resolve_project_dir, ) +from chipcompiler.cli.inspect import resolve_run_dir def build_parser() -> argparse.ArgumentParser: @@ -185,7 +186,7 @@ def _cmd_status(args, project_dir: str, project: str | None) -> int: from chipcompiler.cli.inspect import build_status_json, build_status_jsonl, build_status_lines from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + run_dir, run_id = resolve_run_dir(project_dir, args.run_id) if getattr(args, "jsonl", False): objects, rc = build_status_jsonl(run_dir, run_id) @@ -206,7 +207,7 @@ def _cmd_log(args, project_dir: str, project: str | None) -> int: from chipcompiler.cli.inspect import build_log_jsonl, build_log_lines from chipcompiler.cli.output import emit_jsonl, emit_text - run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + run_dir, run_id = resolve_run_dir(project_dir, args.run_id) if getattr(args, "jsonl", False): objects, rc = build_log_jsonl(run_dir, args.step, args.errors, project, run_id) @@ -226,7 +227,7 @@ def _cmd_metrics(args, project_dir: str, project: str | None) -> int: ) from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + run_dir, run_id = resolve_run_dir(project_dir, args.run_id) if getattr(args, "jsonl", False): objects, rc = build_metrics_jsonl(run_dir, args.step, project, run_id) @@ -251,7 +252,7 @@ def _cmd_artifacts(args, project_dir: str, project: str | None) -> int: ) from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + run_dir, run_id = resolve_run_dir(project_dir, args.run_id) if getattr(args, "jsonl", False): objects, rc = build_artifacts_jsonl(run_dir, args.step, project, run_id, project_dir) @@ -278,7 +279,7 @@ def _cmd_config(args, project_dir: str, project: str | None) -> int: ) from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + run_dir, run_id = resolve_run_dir(project_dir, args.run_id) if args.step is not None: items, rc = build_step_config_items(run_dir, args.step, project, run_id, project_dir) @@ -309,7 +310,7 @@ def _cmd_diagnose(args, project_dir: str, project: str | None) -> int: ) from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - run_dir, run_id = _resolve_run(project_dir, getattr(args, "run_id", None)) + run_dir, run_id = resolve_run_dir(project_dir, args.run_id) if getattr(args, "jsonl", False): objects, rc = build_diagnose_jsonl(run_dir, args.step, project, run_id) @@ -327,15 +328,6 @@ def _cmd_diagnose(args, project_dir: str, project: str | None) -> int: return rc -def _resolve_run(project_dir: str, run_id: str | None = None) -> tuple[str, str | None]: - from chipcompiler.cli.inspect import resolve_run_dir - return resolve_run_dir(project_dir, run_id) - - -def _run_dir(project_dir: str) -> str: - return os.path.join(project_dir, "runs", "default") - - def main() -> None: sys.exit(run()) diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index f0273376..e96425c6 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -116,7 +116,3 @@ def normalize_metric_key(raw_key: str) -> str: s = re.sub(r'_+', '_', s) s = s.strip('_') return s - - -def step_dir_name(step_name: str, tool: str) -> str: - return f"{step_name}_{tool}" From 339b87e7ffba9683444b194867cf3aa7d9fd659b Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 13:24:34 +0800 Subject: [PATCH 038/104] refactor(nix): migrate to latest codebase Signed-off-by: Emin --- flake.nix | 2 - nix/chipcompiler/default.nix | 17 ++-- nix/cli/default.nix | 19 ++-- nix/ecc-tools/default.nix | 134 -------------------------- nix/ecc-tools/fix.patch | 12 --- nix/ecc-tools/rustpkgs.nix | 74 -------------- nix/modules/devShells.nix | 1 - nix/overlay.nix | 3 +- nix/python/ecc-dreamplace/default.nix | 65 +++++++++++++ nix/python/ecc-tools/default.nix | 27 ++++++ 10 files changed, 111 insertions(+), 243 deletions(-) delete mode 100644 nix/ecc-tools/default.nix delete mode 100644 nix/ecc-tools/fix.patch delete mode 100644 nix/ecc-tools/rustpkgs.nix create mode 100644 nix/python/ecc-dreamplace/default.nix create mode 100644 nix/python/ecc-tools/default.nix diff --git a/flake.nix b/flake.nix index 943fcde5..cd2b7c8c 100644 --- a/flake.nix +++ b/flake.nix @@ -32,7 +32,6 @@ ]; systems = [ "x86_64-linux" - "aarch64-linux" ]; flake.overlays.default = overlay; perSystem = @@ -57,7 +56,6 @@ }; packages = { inherit (pkgs) - ecc-tools chipcompiler cli ; diff --git a/nix/chipcompiler/default.nix b/nix/chipcompiler/default.nix index 23d311ad..6f8ea436 100644 --- a/nix/chipcompiler/default.nix +++ b/nix/chipcompiler/default.nix @@ -1,7 +1,8 @@ { lib, python3Packages, - ecc-tools, + ecc-dreamplace-python, + ecc-tools-python, makeWrapper, }: @@ -22,12 +23,6 @@ python3Packages.buildPythonPackage { ]; }; - postPatch = '' - mkdir -p thirdparty/ecc-tools/bin - install -m 755 ${ecc-tools}/bin/*.cpython-*.so thirdparty/ecc-tools/bin/ - install -m 755 ${ecc-tools}/bin/*.cpython-*.so chipcompiler/tools/ecc/bin/ - ''; - postInstall = '' site_packages="$out/${python3Packages.python.sitePackages}" @@ -45,7 +40,10 @@ python3Packages.buildPythonPackage { build-system = with python3Packages; [ uv-build ]; - dependencies = with python3Packages; [ + dependencies = [ + ecc-dreamplace-python + ecc-tools-python + ] ++ (with python3Packages; [ fastapi klayout matplotlib @@ -55,10 +53,11 @@ python3Packages.buildPythonPackage { pyjson5 pyyaml scipy + torch tqdm uvicorn pip - ]; + ]); nativeBuildInputs = [ makeWrapper ]; diff --git a/nix/cli/default.nix b/nix/cli/default.nix index a7c64e82..54fbc080 100644 --- a/nix/cli/default.nix +++ b/nix/cli/default.nix @@ -1,7 +1,8 @@ { lib, python3Packages, - ecc-tools, + ecc-dreamplace-python, + ecc-tools-python, yosysWithSlang, makeWrapper, }: @@ -23,12 +24,6 @@ python3Packages.buildPythonPackage { ]; }; - postPatch = '' - mkdir -p thirdparty/ecc-tools/bin - install -m 755 ${ecc-tools}/bin/*.cpython-*.so thirdparty/ecc-tools/bin/ - install -m 755 ${ecc-tools}/bin/*.cpython-*.so chipcompiler/tools/ecc/bin/ - ''; - postInstall = '' site_packages="$out/${python3Packages.python.sitePackages}" @@ -55,7 +50,10 @@ python3Packages.buildPythonPackage { build-system = with python3Packages; [ uv-build ]; - dependencies = with python3Packages; [ + dependencies = [ + ecc-dreamplace-python + ecc-tools-python + ] ++ (with python3Packages; [ fastapi klayout matplotlib @@ -65,9 +63,11 @@ python3Packages.buildPythonPackage { pyjson5 pyyaml scipy + torch tqdm uvicorn - ]; + pip + ]); nativeBuildInputs = [ makeWrapper ]; @@ -76,7 +76,6 @@ python3Packages.buildPythonPackage { pythonImportsCheck = [ "chipcompiler" - "chipcompiler.server" "chipcompiler.engine" "chipcompiler.tools" "chipcompiler.cli" diff --git a/nix/ecc-tools/default.nix b/nix/ecc-tools/default.nix deleted file mode 100644 index 5a4741b7..00000000 --- a/nix/ecc-tools/default.nix +++ /dev/null @@ -1,134 +0,0 @@ -{ - lib, - fetchpatch, - callPackages, - stdenv, - cmake, - ninja, - flex, - bison, - zlib, - tcl, - boost, - eigen, - yaml-cpp, - libunwind, - glog, - gtest, - gflags, - metis, - gmp, - python3, - onnxruntime, - gperftools, - pkg-config, - curl, - tbb_2022, -}: - -let - rootSrc = stdenv.mkDerivation { - pname = "ecc-tools"; - version = "0-unstable-2026-01-23"; - src = fetchGit { - url = "git@github.com:openecos-projects/ecc-tools.git"; - rev = "07b6d4133f848ba6e54c0889c3b777a2b544d06b"; - }; - - patches = [ - # This patch is to fix the build system to properly find and link against rust libraries. - # Due to the way they organized the source code, it's hard to upstream this patch. - # So we have to maintain this patch locally. - (fetchpatch { - url = "https://github.com/Emin017/iEDA/commit/e5f3ce024965df5e1d400b6a1d7f8b5b307a4bf3.patch"; - hash = "sha256-YJnY+r9A887WT0a/H/Zf++r1PpD7t567NpkesDmIsD0="; - }) - ./fix.patch - ]; - - dontBuild = true; - dontFixup = true; - installPhase = '' - cp -r . $out - ''; - - }; - - rustpkgs = callPackages ./rustpkgs.nix { inherit rootSrc; }; -in -stdenv.mkDerivation { - inherit (rootSrc) pname version; - - src = rootSrc; - - nativeBuildInputs = [ - cmake - ninja - flex - bison - python3 - tcl - pkg-config - ]; - - cmakeBuildType = "Release"; - - cmakeFlags = [ - (lib.cmakeBool "CMD_BUILD" true) - (lib.cmakeBool "SANITIZER" false) - (lib.cmakeBool "BUILD_STATIC_LIB" false) - (lib.cmakeBool "USE_PROFILER" false) - (lib.cmakeBool "BUILD_PYTHON" true) - (lib.cmakeBool "BUILD_ECOS" true) - ]; - - # Only build the Python bindings target - buildTargets = [ "ecc_py" ]; - - preConfigure = '' - cmakeFlags+=" -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:FILEPATH=$out/bin -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:FILEPATH=$out/lib" - ''; - - postPatch = '' - sed -i '1i find_package(Boost REQUIRED)' src/operation/iPA/test/CMakeLists.txt - sed -i 's/boost_system/Boost::headers/g' src/operation/iPA/test/CMakeLists.txt - ''; - - buildInputs = [ - rustpkgs.iir-rust - rustpkgs.sdf-parse - rustpkgs.spef-parser - rustpkgs.vcd-parser - rustpkgs.verilog-parser - rustpkgs.liberty-parser - gtest - glog - gflags - boost - onnxruntime - eigen - yaml-cpp - libunwind - metis - gmp - tcl - zlib - gperftools - curl - tbb_2022 - ]; - - postInstall = '' - # Tests rely on hardcoded path, so they should not be included - rm $out/bin/*test $out/bin/*Test $out/bin/test_* $out/bin/*_app - ''; - - enableParallelBuild = true; - - meta = { - description = "Open-source EDA for ASIC design"; - homepage = "https://github.com/openecos-projects/ecc-tools"; - license = lib.licenses.mulan-psl2; - platforms = lib.platforms.linux; - }; -} diff --git a/nix/ecc-tools/fix.patch b/nix/ecc-tools/fix.patch deleted file mode 100644 index c01c4e8d..00000000 --- a/nix/ecc-tools/fix.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/src/interface/python/CMakeLists.txt b/src/interface/python/CMakeLists.txt -index a0c8c9f4a..efeda99a8 100644 ---- a/src/interface/python/CMakeLists.txt -+++ b/src/interface/python/CMakeLists.txt -@@ -54,3 +54,7 @@ target_link_libraries(ecc_py - py_vec - py_ipnp - ) -+ -+set_target_properties(ecc_py PROPERTIES -+ LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} -+) diff --git a/nix/ecc-tools/rustpkgs.nix b/nix/ecc-tools/rustpkgs.nix deleted file mode 100644 index 4ac37149..00000000 --- a/nix/ecc-tools/rustpkgs.nix +++ /dev/null @@ -1,74 +0,0 @@ -{ rustPlatform, rootSrc }: -let - mkRustpkgs = _: p: rustPlatform.buildRustPackage p; -in -(builtins.mapAttrs mkRustpkgs { - iir-rust = rec { - pname = "iir-rust"; - version = "0.1.3"; - src = rootSrc; - sourceRoot = "${src.name}/src/operation/iIR/source/iir-rust/iir"; - - cargoHash = "sha256-CV1e/f3oCKW5mTbQnFBnp7E2d9nFyDwY3qclP2HwdPM="; - - doCheck = false; - - nativeBuildInputs = [ rustPlatform.bindgenHook ]; - }; - liberty-parser = rec { - pname = "liberty-parser"; - version = "0.1.0"; - src = rootSrc; - sourceRoot = "${src.name}/src/database/manager/parser/liberty/lib-rust/liberty-parser"; - - cargoHash = "sha256-nRIOuSz5ImENvKeMAnthmBo+2/Jy5xbM66xkcfVCTMI="; - - doCheck = false; - - nativeBuildInputs = [ rustPlatform.bindgenHook ]; - }; - sdf-parse = rec { - pname = "sdf_parse"; - version = "0.1.0"; - src = rootSrc; - sourceRoot = "${src.name}/src/database/manager/parser/sdf/sdf_parse"; - - cargoHash = "sha256-PORA/9DDIax4lOn/pzmi7Y8mCCBUphMTzbBsb64sDl0="; - - nativeBuildInputs = [ rustPlatform.bindgenHook ]; - }; - spef-parser = rec { - pname = "spef-parser"; - version = "0.2.4"; - src = rootSrc; - sourceRoot = "${src.name}/src/database/manager/parser/spef/spef-parser"; - - cargoHash = "sha256-Qr/oXTqn2gaxyAyLsRjaXNniNzIYVzPGefXTdkULmYk="; - - nativeBuildInputs = [ rustPlatform.bindgenHook ]; - }; - vcd-parser = rec { - pname = "vcd_parser"; - version = "0.1.0"; - src = rootSrc; - sourceRoot = "${src.name}/src/database/manager/parser/vcd/vcd_parser"; - - cargoHash = "sha256-xcfVzDrnW4w3fU7qo6xzSQeIH8sEbEyzPF92F5tDcAk="; - - doCheck = false; - - nativeBuildInputs = [ rustPlatform.bindgenHook ]; - }; - verilog-parser = rec { - pname = "verilog-parser"; - version = "0.1.0"; - src = rootSrc; - sourceRoot = "${src.name}/src/database/manager/parser/verilog/verilog-rust/verilog-parser"; - - cargoHash = "sha256-ooxY8Q8bfD+klBGfpTDD3YyWptEOGGHDoyamhjlSNTM="; - - doCheck = false; - - nativeBuildInputs = [ rustPlatform.bindgenHook ]; - }; -}) diff --git a/nix/modules/devShells.nix b/nix/modules/devShells.nix index 8b2aa706..f9ecbe16 100644 --- a/nix/modules/devShells.nix +++ b/nix/modules/devShells.nix @@ -4,7 +4,6 @@ default = pkgs.mkShell { inputsFrom = [ inputs'.infra.packages.iedaUnstable - pkgs.ecc-tools pkgs.chipcompiler ]; nativeBuildInputs = with pkgs; [ uv bazel_8 bazel-buildtools ]; diff --git a/nix/overlay.nix b/nix/overlay.nix index c0458e0d..b6469f8c 100644 --- a/nix/overlay.nix +++ b/nix/overlay.nix @@ -1,5 +1,6 @@ final: prev: { - ecc-tools = prev.callPackage ./ecc-tools { }; + ecc-tools-python = prev.python3Packages.callPackage ./python/ecc-tools { }; + ecc-dreamplace-python = prev.python3Packages.callPackage ./python/ecc-dreamplace { }; chipcompiler = prev.callPackage ./chipcompiler { }; cli = prev.callPackage ./cli { }; } diff --git a/nix/python/ecc-dreamplace/default.nix b/nix/python/ecc-dreamplace/default.nix new file mode 100644 index 00000000..f492e692 --- /dev/null +++ b/nix/python/ecc-dreamplace/default.nix @@ -0,0 +1,65 @@ +{ + lib, + buildPythonPackage, + fetchurl, + cairocffi, + distutils, + matplotlib, + numpy, + patool, + pkgconfig, + scipy, + setuptools, + shapely, + wheel, +}: + +buildPythonPackage { + pname = "ecc-dreamplace"; + version = "0.1.0a1"; + format = "wheel"; + + src = fetchurl { + url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl"; + hash = "sha256-ISE5xD+CVJiWjtoQMJlZuZzZOuwHRNGCoXu100tTFF4="; + }; + + dependencies = [ + cairocffi + distutils + matplotlib + numpy + patool + pkgconfig + scipy + setuptools + shapely + wheel + ]; + + pythonRemoveDeps = [ + "configspace" + "pydoe2" + "pygmo" + "pyro4" + "pyunpack" + "shap" + "statsmodels" + "torch" + "xgboost" + ]; + + doCheck = false; + + pythonImportsCheck = [ + "dreamplace" + "dreamplace.Params" + ]; + + meta = { + description = "ECC DreamPlace Python wheel"; + homepage = "https://github.com/openecos-projects/ecc-dreamplace"; + license = lib.licenses.asl20; + platforms = [ "x86_64-linux" ]; + }; +} diff --git a/nix/python/ecc-tools/default.nix b/nix/python/ecc-tools/default.nix new file mode 100644 index 00000000..8c6e13be --- /dev/null +++ b/nix/python/ecc-tools/default.nix @@ -0,0 +1,27 @@ +{ + lib, + buildPythonPackage, + fetchurl, +}: + +buildPythonPackage { + pname = "ecc-tools"; + version = "0.1.0a2"; + format = "wheel"; + + src = fetchurl { + url = "https://github.com/openecos-projects/ecc-tools/releases/download/v0.1.0-alpha.2/ecc_tools-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl"; + hash = "sha256-NgqtSHQiiN69mqZm5afk/13jCugxyUVCa0WAUKQHyL4="; + }; + + doCheck = false; + + pythonImportsCheck = [ "ecc_tools_bin" ]; + + meta = { + description = "ECC tools Python wheel"; + homepage = "https://github.com/openecos-projects/ecc-tools"; + license = lib.licenses.mulan-psl2; + platforms = [ "x86_64-linux" ]; + }; +} From 68fa6ca5ef55fe60652e48b01efc4e71586973c9 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 13:33:12 +0800 Subject: [PATCH 039/104] fix(cli): handle read-only directories when overwriting runs --- chipcompiler/cli/project.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index e1ef3d01..b90204e8 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -142,7 +142,10 @@ def run_project(project_dir: str, overwrite: bool = False, return [], 1 if overwrite and os.path.exists(run_dir): - shutil.rmtree(run_dir) + def _remove_readonly(func, path, _): + os.chmod(path, 0o700) + func(path) + shutil.rmtree(run_dir, onerror=_remove_readonly) _, origin_verilog, input_filelist = resolve_rtl(cfg) parameters = to_parameters(cfg) From 154dd3a5a51e2e271cd8f671bf36dc3e1e20ee80 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 13:35:07 +0800 Subject: [PATCH 040/104] fix(cli): make run tree writable before rmtree on overwrite --- chipcompiler/cli/project.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py index b90204e8..523885eb 100644 --- a/chipcompiler/cli/project.py +++ b/chipcompiler/cli/project.py @@ -142,10 +142,13 @@ def run_project(project_dir: str, overwrite: bool = False, return [], 1 if overwrite and os.path.exists(run_dir): - def _remove_readonly(func, path, _): - os.chmod(path, 0o700) - func(path) - shutil.rmtree(run_dir, onerror=_remove_readonly) + for root, dirs, files in os.walk(run_dir): + for d in dirs: + os.chmod(os.path.join(root, d), 0o755) + for f in files: + os.chmod(os.path.join(root, f), 0o644) + os.chmod(run_dir, 0o755) + shutil.rmtree(run_dir) _, origin_verilog, input_filelist = resolve_rtl(cfg) parameters = to_parameters(cfg) From 6086804bc44d463f79df8466b845af564fef5f0c Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 2 May 2026 16:14:08 +0800 Subject: [PATCH 041/104] fix(flow): keep copied config files writable --- chipcompiler/tools/ecc/builder.py | 22 +++- chipcompiler/tools/ecc_dreamplace/builder.py | 8 ++ .../test_ecc_dreamplace_config_permissions.py | 109 ++++++++++++++++++ 3 files changed, 134 insertions(+), 5 deletions(-) create mode 100644 test/test_ecc_dreamplace_config_permissions.py diff --git a/chipcompiler/tools/ecc/builder.py b/chipcompiler/tools/ecc/builder.py index 6f34c676..babe1251 100644 --- a/chipcompiler/tools/ecc/builder.py +++ b/chipcompiler/tools/ecc/builder.py @@ -2,6 +2,7 @@ # -*- encoding: utf-8 -*- import os import stat +from contextlib import suppress from chipcompiler.data import WorkspaceStep, Workspace, Parameters, StepEnum, StateEnum def build_step(workspace: Workspace, @@ -215,13 +216,24 @@ def build_step_config(workspace: Workspace, def _ensure_writable(path: str): """Make files writable after copying from read-only sources.""" + def _chmod_owner_writable(target: str, is_dir: bool = False): + mode = os.stat(target).st_mode | stat.S_IWUSR + if is_dir: + mode |= stat.S_IXUSR + os.chmod(target, mode) + + with suppress(OSError): + _chmod_owner_writable(path, is_dir=True) + for root, dirs, files in os.walk(path): - for name in dirs + files: + for name in dirs: + target = os.path.join(root, name) + with suppress(OSError): + _chmod_owner_writable(target, is_dir=True) + for name in files: target = os.path.join(root, name) - try: - os.chmod(target, os.stat(target).st_mode | stat.S_IWUSR) - except OSError: - pass + with suppress(OSError): + _chmod_owner_writable(target) def _update_flow(): # read config diff --git a/chipcompiler/tools/ecc_dreamplace/builder.py b/chipcompiler/tools/ecc_dreamplace/builder.py index edc6a55b..3bbda72a 100644 --- a/chipcompiler/tools/ecc_dreamplace/builder.py +++ b/chipcompiler/tools/ecc_dreamplace/builder.py @@ -2,7 +2,10 @@ from __future__ import annotations +import os import shutil +import stat +from contextlib import suppress from copy import deepcopy from pathlib import Path @@ -77,6 +80,11 @@ def build_step_config(workspace: Workspace, step: WorkspaceStep) -> None: # then copy it to the destination specified by step.config["dreamplace"] param_src = Path(__file__).resolve().parent / "configs" / "dreamplace.json" shutil.copy2(param_src, step.config["dreamplace"]) + with suppress(OSError): + os.chmod( + step.config["dreamplace"], + os.stat(step.config["dreamplace"]).st_mode | stat.S_IWUSR, + ) params = json_read(step.config["dreamplace"]) diff --git a/test/test_ecc_dreamplace_config_permissions.py b/test/test_ecc_dreamplace_config_permissions.py new file mode 100644 index 00000000..7447705e --- /dev/null +++ b/test/test_ecc_dreamplace_config_permissions.py @@ -0,0 +1,109 @@ +import shutil +import stat + +from chipcompiler.data import PDK, OriginDesign, Parameters, StepEnum, Workspace +from chipcompiler.tools.ecc import builder as ecc_builder +from chipcompiler.tools.ecc_dreamplace import builder as dreamplace_builder +from chipcompiler.utility import json_read, json_write + + +def test_ecc_config_generation_leaves_config_root_writable_after_read_only_copy( + tmp_path, + monkeypatch, +): + parameters_path = tmp_path / "parameters.json" + json_write(str(parameters_path), {}) + workspace = Workspace( + directory=str(tmp_path / "workspace"), + design=OriginDesign(name="gcd"), + pdk=PDK(tech="tech.lef", lefs=["std.lef"], buffers=[], fillers=[]), + parameters=Parameters(path=str(parameters_path), data={}), + ) + step = ecc_builder.build_step( + workspace=workspace, + step_name=StepEnum.PLACEMENT.value, + input_def="input.def", + input_verilog="input.v", + ) + config_dir = tmp_path / "workspace" / "place_ecc" / "config" + readonly_source = tmp_path / "readonly_configs" + + monkeypatch.setattr(ecc_builder, "build_sub_flow", lambda **_: None) + monkeypatch.setattr(ecc_builder, "build_checklist", lambda **_: None) + + real_copytree = shutil.copytree + + def copy_readonly_config_source(_src, dst, dirs_exist_ok=False): + real_copytree(_src, readonly_source, dirs_exist_ok=True) + readonly_source.chmod(stat.S_IREAD | stat.S_IEXEC) + try: + return real_copytree(readonly_source, dst, dirs_exist_ok=dirs_exist_ok) + finally: + readonly_source.chmod( + stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC, + ) + + monkeypatch.setattr(shutil, "copytree", copy_readonly_config_source) + + ecc_builder.build_step_config(workspace, step) + + config_mode = config_dir.stat().st_mode + copied_config = config_dir / "flow_config.json" + copied_mode = copied_config.stat().st_mode + assert config_mode & stat.S_IWUSR + assert config_mode & stat.S_IXUSR + assert copied_mode & stat.S_IWUSR + + extra_config = config_dir / "created_after_build.json" + extra_config.write_text("{}", encoding="utf-8") + assert extra_config.exists() + + +def test_dreamplace_config_generation_writes_generated_fields_to_copied_config( + tmp_path, + monkeypatch, +): + workspace = Workspace( + directory=str(tmp_path / "workspace"), + design=OriginDesign(name="gcd"), + pdk=PDK(tech="tech.lef", lefs=["std.lef"]), + parameters=Parameters(data={}), + ) + step = dreamplace_builder.build_step( + workspace=workspace, + step_name=StepEnum.PLACEMENT.value, + input_def="input.def", + input_verilog="input.v", + ) + config_dir = tmp_path / "workspace" / "place_dreamplace" / "config" + + def fake_ecc_build_step_config(_workspace, _step): + config_dir.mkdir(parents=True, exist_ok=True) + + monkeypatch.setattr( + dreamplace_builder.ecc_builder, + "build_step_config", + fake_ecc_build_step_config, + ) + + real_copy2 = shutil.copy2 + + def copy_readonly_config_file(src, dst): + result = real_copy2(src, dst) + tmp_path.joinpath(dst).chmod(stat.S_IREAD) + return result + + monkeypatch.setattr(shutil, "copy2", copy_readonly_config_file) + + dreamplace_builder.build_step_config(workspace, step) + + dreamplace_config = config_dir / "dreamplace.json" + mode = dreamplace_config.stat().st_mode + data = json_read(str(dreamplace_config)) + + assert mode & stat.S_IWUSR + assert data["lef_input"] == ["tech.lef", "std.lef"] + assert data["def_input"] == "input.def" + assert data["verilog_input"] == "input.v" + assert data["result_dir"] == step.data[step.name] + assert data["base_design_name"] == "gcd" From 43211c78b3142fb4f47fb8bb661b3d5ce6efdf7d Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 00:22:32 +0800 Subject: [PATCH 042/104] refactor(cli): implement functional command pipeline Replace per-command output branching with a unified record pipeline: argv -> parse args -> build context -> dispatch command -> collect records -> render records -> exit code - Add types.py (OutputMode, CommandContext, CommandResult) - Add render.py (centralized text/JSON/JSONL rendering) - Add commands.py (context construction, dispatch) - Add handlers.py (command handlers returning CommandResult) - Simplify main.py to thin parse/dispatch/render coordinator - Remove project.py (logic inlined into handlers) - Remove obsolete build_*_lines/json/jsonl from inspect, artifacts, config_view, diagnose modules - Remove emit_text/json/jsonl from output.py - Standardize JSON output on {"records": [...]} - All 148 CLI tests pass --- chipcompiler/cli/artifacts.py | 85 ----- chipcompiler/cli/commands.py | 34 ++ chipcompiler/cli/config_view.py | 86 ----- chipcompiler/cli/diagnose.py | 65 +--- chipcompiler/cli/handlers.py | 589 ++++++++++++++++++++++++++++++++ chipcompiler/cli/inspect.py | 325 +----------------- chipcompiler/cli/main.py | 235 +------------ chipcompiler/cli/output.py | 34 -- chipcompiler/cli/render.py | 39 +++ chipcompiler/cli/types.py | 31 ++ test/cli/test_cli_inspect.py | 67 ++-- test/cli/test_cli_main.py | 37 +- 12 files changed, 756 insertions(+), 871 deletions(-) create mode 100644 chipcompiler/cli/commands.py create mode 100644 chipcompiler/cli/handlers.py create mode 100644 chipcompiler/cli/render.py create mode 100644 chipcompiler/cli/types.py diff --git a/chipcompiler/cli/artifacts.py b/chipcompiler/cli/artifacts.py index 3a8343e8..ddaae7ff 100644 --- a/chipcompiler/cli/artifacts.py +++ b/chipcompiler/cli/artifacts.py @@ -14,7 +14,6 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, run_id: str | None = None, project_dir: str | None = None) -> tuple[list[dict], int]: from chipcompiler.cli.inspect import discover_step_dirs - from chipcompiler.cli.output import format_line base_dir = project_dir or os.path.dirname(os.path.dirname(run_dir)) step_dirs = discover_step_dirs(run_dir) @@ -52,87 +51,3 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, return [], 0 return artifacts, 0 - - -def build_artifacts_lines(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None, - project_dir: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import format_line - - artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id, project_dir) - if rc != 0: - if artifacts and artifacts[0].get("status") == "unknown_step": - s = artifacts[0]["step"] - return [format_line( - step=s, - status="unknown_step", - status_cmd=disclosure_cmd("ecc status", project, run_id), - )], 1 - return [], rc - - if not artifacts: - if step_token is not None: - return [format_line( - step=step_token, - artifacts_status="none", - status_cmd=disclosure_cmd("ecc status", project, run_id), - log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), - )], 0 - return [format_line( - artifacts_status="none", - workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project, run_id), - )], 0 - - lines = [] - for a in artifacts: - line_fields = { - "artifact": os.path.basename(a["path"]), - "step": a["step"], - "role": a["role"], - "path": a["path"], - "inspect": disclosure_cmd(f"ecc artifacts {a['step']} --json", project, run_id), - } - if a["role"] == "analysis": - line_fields["metrics"] = disclosure_cmd(f"ecc metrics {a['step']}", project, run_id) - if a["role"] == "log": - line_fields["inspect"] = disclosure_cmd(f"ecc log {a['step']} --errors", project, run_id) - if a["role"] in ("output", "report", "analysis", "log"): - line_fields["config"] = disclosure_cmd(f"ecc config {a['step']} --resolved", project, run_id) - lines.append(format_line(**line_fields)) - return lines, 0 - - -def build_artifacts_json(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None, - project_dir: str | None = None) -> tuple[dict, int]: - artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id, project_dir) - if rc != 0: - if artifacts and artifacts[0].get("status") == "unknown_step": - return {"status": "unknown_step", "step": artifacts[0]["step"]}, 1 - return {}, rc - - if not artifacts: - if step_token is not None: - return {"artifacts_status": "none", "step": step_token}, 0 - return {"artifacts_status": "none"}, 0 - - return {"artifacts": artifacts}, 0 - - -def build_artifacts_jsonl(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None, - project_dir: str | None = None) -> tuple[list[dict], int]: - artifacts, rc = discover_artifacts(run_dir, step_token, project, run_id, project_dir) - if rc != 0: - return artifacts, rc - - if not artifacts: - if step_token is not None: - return [{"artifacts_status": "none", "step": step_token}], 0 - return [{"artifacts_status": "none"}], 0 - - return artifacts, 0 diff --git a/chipcompiler/cli/commands.py b/chipcompiler/cli/commands.py new file mode 100644 index 00000000..2bd5646f --- /dev/null +++ b/chipcompiler/cli/commands.py @@ -0,0 +1,34 @@ +from chipcompiler.cli.config import resolve_project_dir +from chipcompiler.cli.inspect import resolve_run_dir +from chipcompiler.cli.types import CommandContext, CommandResult, OutputMode + + +def build_context(args) -> CommandContext: + project = getattr(args, "project", None) + project_dir = resolve_project_dir(project) + + run_id = getattr(args, "run_id", None) + run_dir, run_id = resolve_run_dir(project_dir, run_id) + + if getattr(args, "jsonl", False): + mode = OutputMode.JSONL + elif getattr(args, "json", False): + mode = OutputMode.JSON + else: + mode = OutputMode.TEXT + + return CommandContext( + project_dir=project_dir, + project=project, + run_dir=run_dir, + run_id=run_id, + output_mode=mode, + ) + + +def dispatch(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli import handlers + handler = getattr(handlers, args.command, None) + if handler is None: + return CommandResult.err([], exit_code=1) + return handler(args, ctx) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 37af0c8f..11b2a5d8 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -13,7 +13,6 @@ def build_project_config_items(project_dir: str, run_dir: str, resolve_pdk_root, validate_project_config, ) - from chipcompiler.cli.output import format_line config_path = find_config_path(project_dir) if config_path is None: @@ -137,88 +136,3 @@ def build_step_config_items(run_dir: str, step_token: str | None, "artifacts": disclosure_cmd(f"ecc artifacts {step_token}", project, run_id)}], 0 return items, 0 - - -def build_config_lines(items: list[dict], project: str | None = None, - run_id: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import format_line - - if not items: - return [], 0 - - first = items[0] - if first.get("config_status") == "none": - return [format_line( - step=first["step"], - config_status="none", - artifacts=first.get("artifacts"), - )], 0 - - status = first.get("status") - if status == "unknown_step": - return [format_line( - step=first.get("step", ""), - status="unknown_step", - inspect=disclosure_cmd("ecc status", project, run_id), - )], 1 - if status == "missing_config": - return [format_line( - status="missing_config", - inspect=disclosure_cmd("ecc check", project), - )], 1 - if status == "invalid_config": - return [format_line( - status="invalid_config", - inspect=disclosure_cmd("ecc check", project), - )], 1 - - lines = [] - for item in items: - if item.get("scope") == "project": - line = format_line( - config=item["key"], - scope="project", - value=item["value"], - resolved=item.get("resolved"), - source=item["source"], - inspect=item.get("inspect_cmd"), - ) - else: - line = format_line( - config=os.path.basename(item["path"]), - scope="step", - step=item["step"], - role=item["role"], - run=item.get("run", "default"), - path=item["path"], - source=item["source"], - inspect=item.get("inspect_cmd"), - ) - lines.append(line) - return lines, 0 - - -def build_config_json(items: list[dict]) -> tuple[dict, int]: - if items and items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): - return items[0], 1 - - if items and items[0].get("config_status") == "none": - return items[0], 0 - - if not items: - return {"config_status": "none"}, 0 - - return {"config": items}, 0 - - -def build_config_jsonl(items: list[dict]) -> tuple[list[dict], int]: - if items and items[0].get("status") in ("unknown_step", "missing_config", "invalid_config"): - return items, 1 - - if items and items[0].get("config_status") == "none": - return items, 0 - - if not items: - return [{"config_status": "none"}], 0 - - return items, 0 diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index dcfd90ee..74291d7e 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -164,66 +164,5 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, issues.append(_make_issue("config_unavailable", "info", display_run, step=token, project=project, run_id=run_id)) - return issues, 0 - - -def _exit_code(issues: list[dict]) -> int: - for issue in issues: - if issue.get("severity") == "error": - return 1 - return 0 - - -def build_diagnose_lines(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import format_line - - issues, rc = build_diagnose_issues(run_dir, step_token, project, run_id) - - if not issues: - display_run = run_id or "default" - return [format_line( - status="clean", - run=display_run, - status_cmd=disclosure_cmd("ecc status", project, run_id), - artifacts=disclosure_cmd("ecc artifacts", project, run_id), - config=disclosure_cmd("ecc config --resolved", project, run_id), - )], 0 - - lines = [] - text_keys = ("issue", "severity", "run", "step", "status", "count", - "evidence", "log", "artifacts", "config", "run_cmd") - for issue in issues: - fields = {k: issue[k] for k in text_keys if k in issue} - lines.append(format_line(**fields)) - - return lines, _exit_code(issues) - - -def _clean_object(run_id, project, run_id_val): - return { - "status": "clean", - "run": run_id or "default", - "status_cmd": disclosure_cmd("ecc status", project, run_id_val), - "artifacts": disclosure_cmd("ecc artifacts", project, run_id_val), - "config": disclosure_cmd("ecc config --resolved", project, run_id_val), - } - - -def build_diagnose_json(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None) -> tuple[dict, int]: - issues, _ = build_diagnose_issues(run_dir, step_token, project, run_id) - if not issues: - return _clean_object(run_id, project, run_id), 0 - return {"issues": issues}, _exit_code(issues) - - -def build_diagnose_jsonl(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None) -> tuple[list[dict], int]: - issues, _ = build_diagnose_issues(run_dir, step_token, project, run_id) - if not issues: - return [_clean_object(run_id, project, run_id)], 0 - return issues, _exit_code(issues) + has_error = any(i.get("severity") == "error" for i in issues) + return issues, 1 if has_error else 0 diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py new file mode 100644 index 00000000..ce78d32a --- /dev/null +++ b/chipcompiler/cli/handlers.py @@ -0,0 +1,589 @@ +import json +import os +import shutil + +from chipcompiler.cli.types import CommandContext, CommandResult +from chipcompiler.cli.output import ( + disclosure_cmd, + normalize_metric_key, + normalize_state, + normalize_step_name, +) + + +def status(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.inspect import ( + CORRUPT_FLOW_JSON, + _safe_steps, + get_run_status, + read_flow_json, + ) + + flow_data = read_flow_json(ctx.run_dir) + display_run = ctx.run_id or "default" + project = ctx.project + + if flow_data is None: + return CommandResult.err([{ + "run": display_run, + "status": "missing", + "workspace": ctx.run_dir, + "run_cmd": disclosure_cmd("ecc run", project), + }]) + + if flow_data is CORRUPT_FLOW_JSON: + return CommandResult.err([{ + "run": display_run, + "status": "corrupt", + "workspace": ctx.run_dir, + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "log_cmd": disclosure_cmd("ecc log", project, ctx.run_id), + }]) + + run_status = get_run_status(flow_data) + records = [{ + "run": display_run, + "status": run_status, + "workspace": ctx.run_dir, + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "metrics_cmd": disclosure_cmd("ecc metrics", project, ctx.run_id), + "log_cmd": disclosure_cmd("ecc log", project, ctx.run_id), + }] + + for step in _safe_steps(flow_data): + step_token = normalize_step_name(step.get("name", "")) + records.append({ + "step": step_token, + "tool": step.get("tool", ""), + "status": normalize_state(step.get("state", "")), + "runtime": step.get("runtime", "") or None, + "metrics_cmd": disclosure_cmd(f"ecc metrics {step_token}", project, ctx.run_id), + "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + }) + + return CommandResult.ok(records) + + +def log(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.inspect import ( + discover_logs, + discover_step_dirs, + filter_errors, + read_log_file, + ) + + step_token = args.step + errors_only = args.errors + project = ctx.project + + if step_token is None: + records = [] + + for lf in discover_logs(ctx.run_dir): + records.append({ + "log": os.path.relpath(lf, ctx.run_dir), + "inspect": disclosure_cmd("ecc log", project, ctx.run_id), + }) + + step_dirs = discover_step_dirs(ctx.run_dir) + for token in sorted(step_dirs): + for lf in discover_logs(ctx.run_dir, token): + records.append({ + "step": token, + "log": os.path.relpath(lf, ctx.run_dir), + "inspect": disclosure_cmd(f"ecc log {token} --errors", project, ctx.run_id), + }) + + if not records: + return CommandResult.ok([{ + "log_status": "no_logs", + "workspace": ctx.run_dir, + "run": disclosure_cmd("ecc run", project), + }]) + return CommandResult.ok(records) + + step_dirs = discover_step_dirs(ctx.run_dir) + if step_token not in step_dirs: + return CommandResult.err([{ + "step": step_token, + "status": "unknown_step", + "inspect": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + + log_files = discover_logs(ctx.run_dir, step_token) + if not log_files: + return CommandResult.err([{ + "step": step_token, + "log_status": "missing", + "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + }]) + + matched_lines = [] + for lf in log_files: + raw = read_log_file(lf) + filtered = filter_errors(raw) if errors_only else raw + for line in filtered: + matched_lines.append((lf, line)) + + if not matched_lines: + return CommandResult.ok([{ + "step": step_token, + "log_status": "no_matching_lines", + "log_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) + + records = [] + for lf, line in matched_lines: + records.append({ + "step": step_token, + "source": os.path.relpath(lf, ctx.run_dir), + "line": line, + "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + }) + return CommandResult.ok(records) + + +def metrics(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.inspect import ( + _internal_from_token, + discover_metrics, + discover_step_dirs, + read_metrics, + ) + + step_token = args.step + project = ctx.project + + metrics_files = discover_metrics(ctx.run_dir, step_token) + if not metrics_files: + if step_token is not None: + step_dirs = discover_step_dirs(ctx.run_dir) + if step_token in step_dirs: + return CommandResult.err([{ + "metric_step": step_token, + "status": "missing", + "path": os.path.relpath( + os.path.join(step_dirs[step_token], "analysis", + f"{_internal_from_token(step_token)}_metrics.json"), + ctx.run_dir, + ), + "log": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + }]) + return CommandResult.err([{ + "step": step_token, + "status": "unknown_step", + "inspect": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + return CommandResult.ok([{ + "metrics_status": "none", + "workspace": ctx.run_dir, + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + + records = [] + has_corrupt = False + for token, path in sorted(metrics_files.items()): + data = read_metrics(path) + if data is None: + has_corrupt = True + records.append({ + "metric_step": token, + "status": "corrupt", + "path": os.path.relpath(path, ctx.run_dir), + "log_cmd": disclosure_cmd(f"ecc log {token} --errors", project, ctx.run_id), + }) + continue + for raw_key, value in data.items(): + norm_key = normalize_metric_key(raw_key) + records.append({ + "metric": norm_key, + "step": token, + "value": value, + "source": os.path.relpath(path, ctx.run_dir), + "inspect": disclosure_cmd(f"ecc metrics {token} --json", project, ctx.run_id), + }) + + if has_corrupt: + return CommandResult.err(records) + return CommandResult.ok(records) + + +def artifacts(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.artifacts import discover_artifacts + + step_token = args.step + project = ctx.project + + artifact_records, rc = discover_artifacts( + ctx.run_dir, step_token, project, ctx.run_id, ctx.project_dir, + ) + + if rc != 0: + if artifact_records and artifact_records[0].get("status") == "unknown_step": + return CommandResult.err([{ + "step": artifact_records[0]["step"], + "status": "unknown_step", + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + return CommandResult.err(artifact_records) + + if not artifact_records: + if step_token is not None: + return CommandResult.ok([{ + "step": step_token, + "artifacts_status": "none", + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "log": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + }]) + return CommandResult.ok([{ + "artifacts_status": "none", + "workspace": ctx.run_dir, + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + + records = [] + for a in artifact_records: + line_fields = { + "artifact": os.path.basename(a["path"]), + "step": a["step"], + "role": a["role"], + "path": a["path"], + "inspect": disclosure_cmd(f"ecc artifacts {a['step']} --json", project, ctx.run_id), + } + if a["role"] == "analysis": + line_fields["metrics"] = disclosure_cmd(f"ecc metrics {a['step']}", project, ctx.run_id) + if a["role"] == "log": + line_fields["inspect"] = disclosure_cmd(f"ecc log {a['step']} --errors", project, ctx.run_id) + if a["role"] in ("output", "report", "analysis", "log"): + line_fields["config"] = disclosure_cmd(f"ecc config {a['step']} --resolved", project, ctx.run_id) + records.append(line_fields) + return CommandResult.ok(records) + + +def config(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.config_view import build_project_config_items, build_step_config_items + + step_token = args.step + project = ctx.project + + if step_token is not None: + items, rc = build_step_config_items( + ctx.run_dir, step_token, project, ctx.run_id, ctx.project_dir, + ) + else: + items, rc = build_project_config_items( + ctx.project_dir, ctx.run_dir, project, ctx.run_id, + ) + + if rc != 0: + first = items[0] if items else {} + status = first.get("status") + if status == "unknown_step": + return CommandResult.err([{ + "step": first.get("step", ""), + "status": "unknown_step", + "inspect": disclosure_cmd("ecc status", project, ctx.run_id), + }]) + if status == "missing_config": + return CommandResult.err([{ + "status": "missing_config", + "inspect": disclosure_cmd("ecc check", project), + }]) + if status == "invalid_config": + return CommandResult.err([{ + "status": "invalid_config", + "inspect": disclosure_cmd("ecc check", project), + }]) + return CommandResult.err(items) + + if not items: + return CommandResult.ok([{"config_status": "none"}]) + + first = items[0] + if first.get("config_status") == "none": + return CommandResult.ok([{ + "step": first["step"], + "config_status": "none", + "artifacts": first.get("artifacts"), + }]) + + records = [] + for item in items: + if item.get("scope") == "project": + records.append({ + "config": item["key"], + "scope": "project", + "value": item["value"], + "resolved": item.get("resolved"), + "source": item["source"], + "inspect": item.get("inspect_cmd"), + }) + else: + records.append({ + "config": os.path.basename(item["path"]), + "scope": "step", + "step": item["step"], + "role": item["role"], + "run": item.get("run", "default"), + "path": item["path"], + "source": item["source"], + "inspect": item.get("inspect_cmd"), + }) + return CommandResult.ok(records) + + +def diagnose(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.diagnose import build_diagnose_issues + + step_token = args.step + project = ctx.project + display_run = ctx.run_id or "default" + + issues, _ = build_diagnose_issues(ctx.run_dir, step_token, project, ctx.run_id) + + if not issues: + return CommandResult.ok([{ + "status": "clean", + "run": display_run, + "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "artifacts": disclosure_cmd("ecc artifacts", project, ctx.run_id), + "config": disclosure_cmd("ecc config --resolved", project, ctx.run_id), + }]) + + has_error = any(i.get("severity") == "error" for i in issues) + text_keys = ( + "issue", "severity", "run", "step", "status", "count", + "evidence", "log", "artifacts", "config", "run_cmd", + ) + records = [] + for issue in issues: + records.append({k: issue[k] for k in text_keys if k in issue}) + + if has_error: + return CommandResult.err(records) + return CommandResult.ok(records) + + +def init(args, ctx: CommandContext) -> CommandResult: + name = args.name + if not name or not name.strip(): + return CommandResult.err([{"kind": "error", "error": "project name is required"}]) + + project_dir = os.path.abspath(name) + config_path = os.path.join(project_dir, "ecc.toml") + design_name = os.path.basename(project_dir) + + if os.path.exists(config_path): + return CommandResult.err([{ + "kind": "error", + "error": "already_exists", + "path": config_path, + }]) + + os.makedirs(project_dir, exist_ok=True) + os.makedirs(os.path.join(project_dir, "rtl"), exist_ok=True) + os.makedirs(os.path.join(project_dir, "constraints"), exist_ok=True) + os.makedirs(os.path.join(project_dir, "runs"), exist_ok=True) + + DEFAULT_TOML = '''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/{name}.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "" + +[flow] +preset = "rtl2gds" +run = "default" +''' + + with open(config_path, "w") as f: + f.write(DEFAULT_TOML.format(name=design_name)) + + project_arg = ctx.project or name + return CommandResult.ok([{ + "project": name, + "status": "created", + "path": name, + "check": disclosure_cmd("ecc check", project_arg), + "run": disclosure_cmd("ecc run", project_arg), + }]) + + +def check(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + validate_project_config, + ) + + project = ctx.project + + config_path = find_config_path(ctx.project_dir) + if config_path is None: + return CommandResult.err([{ + "status": "missing_config", + "path": os.path.join(ctx.project_dir, "ecc.toml"), + "inspect": disclosure_cmd("ecc check", project), + }]) + + cfg = load_project_config(config_path) + errors = validate_project_config(cfg) + + if errors: + records = [] + for err in errors: + records.append({ + "check": "config", + "status": "fail", + "reason": err, + "source": "ecc.toml", + "inspect": disclosure_cmd("ecc check --json", project), + }) + return CommandResult.err(records) + + records = [{ + "project": cfg.design_name, + "status": "checked", + "config": "ecc.toml", + "run_dir": "runs/default", + "run": disclosure_cmd("ecc run", project), + "status_cmd": disclosure_cmd("ecc status", project), + }] + + if cfg.design_rtl: + records.append({ + "check": "rtl", + "status": "pass", + "path": cfg.design_rtl[0], + "inspect": disclosure_cmd("ecc check --json", project), + }) + + return CommandResult.ok(records) + + +def run(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.config import ( + find_config_path, + load_project_config, + resolve_pdk_root, + resolve_rtl, + to_parameters, + validate_project_config, + ) + from chipcompiler.data import create_workspace + from chipcompiler.engine import EngineFlow + from chipcompiler.rtl2gds import build_rtl2gds_flow + + project = ctx.project + project_dir = ctx.project_dir + + config_path = find_config_path(project_dir) + if config_path is None: + return CommandResult.err([{ + "kind": "error", + "error": "missing_config", + "path": os.path.join(project_dir, "ecc.toml"), + }]) + + cfg = load_project_config(config_path) + errors = validate_project_config(cfg) + if errors: + records = [] + for err in errors: + records.append({ + "kind": "error", + "error": "config_error", + "reason": err, + }) + return CommandResult.err(records) + + run_dir = os.path.join(project_dir, "runs", "default") + flow_json = os.path.join(run_dir, "home", "flow.json") + + if os.path.exists(flow_json) and not args.overwrite: + return CommandResult.err([{ + "kind": "error", + "error": "run_exists", + "run": "default", + "workspace": run_dir, + "overwrite": disclosure_cmd("ecc run --overwrite", project), + }]) + + if args.overwrite and os.path.exists(run_dir): + for root, dirs, files in os.walk(run_dir): + for d in dirs: + os.chmod(os.path.join(root, d), 0o755) + for f in files: + os.chmod(os.path.join(root, f), 0o644) + os.chmod(run_dir, 0o755) + shutil.rmtree(run_dir) + + _, origin_verilog, input_filelist = resolve_rtl(cfg) + parameters = to_parameters(cfg) + pdk_root = resolve_pdk_root(cfg) + + try: + workspace = create_workspace( + directory=run_dir, + origin_def="", + origin_verilog=origin_verilog, + pdk=cfg.pdk_name, + parameters=parameters, + input_filelist=input_filelist, + pdk_root=pdk_root, + ) + except Exception as exc: + return CommandResult.err([{ + "kind": "error", + "error": "workspace_failed", + "run": "default", + "workspace": run_dir, + "reason": str(exc), + }]) + + if workspace is None: + return CommandResult.err([{ + "kind": "error", + "error": "workspace_failed", + "run": "default", + "workspace": run_dir, + }]) + + try: + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) + + engine_flow.create_step_workspaces() + + if not engine_flow.run_steps(): + return CommandResult.err([{ + "run": "default", + "status": "failed", + "workspace": run_dir, + "status_cmd": disclosure_cmd("ecc status", project), + "log": disclosure_cmd("ecc log", project), + }]) + except Exception as exc: + return CommandResult.err([{ + "kind": "error", + "error": "flow_failed", + "run": "default", + "workspace": run_dir, + "reason": str(exc), + }]) + + return CommandResult.ok([{ + "run": "default", + "status": "success", + "workspace": run_dir, + "status_cmd": disclosure_cmd("ecc status", project), + "metrics_cmd": disclosure_cmd("ecc metrics", project), + "log_cmd": disclosure_cmd("ecc log", project), + }]) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index d17b8068..e3096fcd 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -3,10 +3,8 @@ import re from chipcompiler.cli.output import ( - disclosure_cmd, - normalize_metric_key, - normalize_state, normalize_step_name, + normalize_state, ) @@ -65,102 +63,6 @@ def get_run_status(flow_data: dict) -> str: return "unstart" if all_unstart else "failed" -def build_status_lines(run_dir: str, project: str | None = None, - run_id: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import format_line - - flow_data = read_flow_json(run_dir) - if flow_data is None: - line = format_line( - run=run_id or "default", - status="missing", - workspace=run_dir, - run_cmd=disclosure_cmd("ecc run", project), - ) - return [line], 1 - - if flow_data is CORRUPT_FLOW_JSON: - line = format_line( - run=run_id or "default", - status="corrupt", - workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project, run_id), - log=disclosure_cmd("ecc log", project, run_id), - ) - return [line], 1 - - run_status = get_run_status(flow_data) - lines = [] - - lines.append(format_line( - run=run_id or "default", - status=run_status, - workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project, run_id), - metrics=disclosure_cmd("ecc metrics", project, run_id), - log=disclosure_cmd("ecc log", project, run_id), - )) - - for step in _safe_steps(flow_data): - step_token = normalize_step_name(step.get("name", "")) - lines.append(format_line( - step=step_token, - tool=step.get("tool", ""), - status=normalize_state(step.get("state", "")), - runtime=step.get("runtime", "") or None, - metrics=disclosure_cmd(f"ecc metrics {step_token}", project, run_id), - log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), - )) - - return lines, 0 - - -def build_status_json(run_dir: str, run_id: str | None = None) -> tuple[dict, int]: - flow_data = read_flow_json(run_dir) - display_run = run_id or "default" - if flow_data is None: - return {"run": display_run, "status": "missing", "workspace": run_dir}, 1 - - if flow_data is CORRUPT_FLOW_JSON: - return {"run": display_run, "status": "corrupt", "workspace": run_dir}, 1 - - run_status = get_run_status(flow_data) - steps = [] - for step in _safe_steps(flow_data): - steps.append({ - "step": normalize_step_name(step.get("name", "")), - "tool": step.get("tool", ""), - "status": normalize_state(step.get("state", "")), - "runtime": step.get("runtime", ""), - }) - - return {"run": display_run, "status": run_status, "workspace": run_dir, "steps": steps}, 0 - - -def build_status_jsonl(run_dir: str, run_id: str | None = None) -> tuple[list[dict], int]: - flow_data = read_flow_json(run_dir) - display_run = run_id or "default" - if flow_data is None: - return [{"kind": "run", "run": display_run, "status": "missing", "workspace": run_dir}], 1 - - if flow_data is CORRUPT_FLOW_JSON: - return [{"kind": "run", "run": display_run, "status": "corrupt", "workspace": run_dir}], 1 - - run_status = get_run_status(flow_data) - objects = [{"kind": "run", "run": display_run, "status": run_status, "workspace": run_dir}] - - for step in _safe_steps(flow_data): - objects.append({ - "kind": "step", - "step": normalize_step_name(step.get("name", "")), - "tool": step.get("tool", ""), - "status": normalize_state(step.get("state", "")), - "runtime": step.get("runtime", ""), - }) - - return objects, 0 - - ERROR_PATTERNS = re.compile(r"(error|failed|traceback)", re.IGNORECASE) _CLEAN_SUMMARY = re.compile(r"^\s*\d+\s+(error|failed)|^no\s+(error|failed)", re.IGNORECASE) @@ -215,118 +117,6 @@ def read_log_file(path: str) -> list[str]: return [] -def build_log_lines(run_dir: str, step_token: str | None, errors_only: bool, - project: str | None = None, - run_id: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import format_line - - if step_token is None: - lines = [] - - global_logs = discover_logs(run_dir) - for lf in global_logs: - lines.append(format_line( - log=os.path.relpath(lf, run_dir), - inspect=disclosure_cmd("ecc log", project, run_id), - )) - - step_dirs = discover_step_dirs(run_dir) - for token in sorted(step_dirs): - step_logs = discover_logs(run_dir, token) - for lf in step_logs: - lines.append(format_line( - step=token, - log=os.path.relpath(lf, run_dir), - inspect=disclosure_cmd(f"ecc log {token} --errors", project, run_id), - )) - - if not lines: - return [format_line( - log_status="no_logs", - workspace=run_dir, - run=disclosure_cmd("ecc run", project), - )], 0 - - return lines, 0 - - step_dirs = discover_step_dirs(run_dir) - if step_token not in step_dirs: - return [format_line( - step=step_token, - status="unknown_step", - inspect=disclosure_cmd("ecc status", project, run_id), - )], 1 - - log_files = discover_logs(run_dir, step_token) - if not log_files: - return [format_line( - step=step_token, - log_status="missing", - log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), - )], 1 - - matched_lines = [] - for lf in log_files: - raw = read_log_file(lf) - filtered = filter_errors(raw) if errors_only else raw - for line in filtered: - matched_lines.append((lf, line)) - - if not matched_lines: - return [format_line( - step=step_token, - log_status="no_matching_lines", - log=disclosure_cmd(f"ecc log {step_token}", project, run_id), - )], 0 - - result = [] - for lf, line in matched_lines: - result.append(format_line( - step=step_token, - source=os.path.relpath(lf, run_dir), - line=line, - log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), - )) - return result, 0 - - -def build_log_jsonl(run_dir: str, step_token: str | None, errors_only: bool, - project: str | None = None, - run_id: str | None = None) -> tuple[list[dict], int]: - if step_token is None: - objects = [] - for lf in discover_logs(run_dir): - objects.append({"log": os.path.relpath(lf, run_dir)}) - step_dirs = discover_step_dirs(run_dir) - for token in sorted(step_dirs): - for lf in discover_logs(run_dir, token): - objects.append({"step": token, "log": os.path.relpath(lf, run_dir)}) - if not objects: - return [{"log_status": "no_logs", "workspace": run_dir}], 0 - return objects, 0 - - step_dirs = discover_step_dirs(run_dir) - if step_token not in step_dirs: - return [{"step": step_token, "status": "unknown_step"}], 1 - - log_files = discover_logs(run_dir, step_token) - if not log_files: - return [{"step": step_token, "log_status": "missing"}], 1 - - objects = [] - for lf in log_files: - raw = read_log_file(lf) - lines = filter_errors(raw) if errors_only else raw - for line in lines: - objects.append({ - "step": step_token, - "source": os.path.relpath(lf, run_dir), - "line": line, - }) - - return objects, 0 - - def discover_metrics(run_dir: str, step_token: str | None = None) -> dict[str, str]: step_dirs = discover_step_dirs(run_dir) result = {} @@ -359,119 +149,6 @@ def read_metrics(path: str) -> dict | None: return None -def build_metrics_lines(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None) -> tuple[list[str], int]: - from chipcompiler.cli.output import format_line - - metrics_files = discover_metrics(run_dir, step_token) - if not metrics_files: - if step_token is not None: - step_dirs = discover_step_dirs(run_dir) - if step_token in step_dirs: - return [format_line( - metric_step=step_token, - status="missing", - path=os.path.relpath( - os.path.join(step_dirs[step_token], "analysis", - f"{_internal_from_token(step_token)}_metrics.json"), - run_dir, - ), - log=disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), - )], 1 - return [format_line( - step=step_token, - status="unknown_step", - inspect=disclosure_cmd("ecc status", project, run_id), - )], 1 - return [format_line( - metrics_status="none", - workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project, run_id), - )], 0 - - lines = [] - has_corrupt = False - for token, path in sorted(metrics_files.items()): - data = read_metrics(path) - if data is None: - has_corrupt = True - lines.append(format_line( - metric_step=token, - status="corrupt", - path=os.path.relpath(path, run_dir), - log=disclosure_cmd(f"ecc log {token} --errors", project, run_id), - )) - continue - for raw_key, value in data.items(): - norm_key = normalize_metric_key(raw_key) - lines.append(format_line( - metric=norm_key, - step=token, - value=value, - source=os.path.relpath(path, run_dir), - inspect=disclosure_cmd(f"ecc metrics {token} --json", project, run_id), - )) - return lines, 1 if has_corrupt else 0 - - -def _collect_metrics(run_dir: str, step_token: str | None, - project: str | None, - run_id: str | None = None) -> tuple[list[dict], int]: - err = _check_requested_step(run_dir, step_token, project, run_id) - if err is not None: - return [err], 1 - - metrics_files = discover_metrics(run_dir, step_token) - items = [] - for token, path in sorted(metrics_files.items()): - data = read_metrics(path) - if data is None: - return [{"status": "corrupt", "metric_step": token, - "log_cmd": disclosure_cmd(f"ecc log {token} --errors", project, run_id)}], 1 - for raw_key, value in data.items(): - items.append({ - "metric": normalize_metric_key(raw_key), - "step": token, - "value": value, - "source": os.path.relpath(path, run_dir), - }) - return items, 0 - - -def build_metrics_json(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None) -> tuple[dict, int]: - items, rc = _collect_metrics(run_dir, step_token, project, run_id) - if rc != 0: - return items[0], 1 - return {"metrics": items}, 0 - - -def build_metrics_jsonl(run_dir: str, step_token: str | None = None, - project: str | None = None, - run_id: str | None = None) -> tuple[list[dict], int]: - return _collect_metrics(run_dir, step_token, project, run_id) - - -def _check_requested_step(run_dir: str, step_token: str | None, - project: str | None = None, - run_id: str | None = None) -> dict | None: - if step_token is None: - return None - step_dirs = discover_step_dirs(run_dir) - if step_token not in step_dirs: - return {"status": "unknown_step", "step": step_token} - metrics = discover_metrics(run_dir, step_token) - if not metrics: - return { - "status": "missing", - "metric_step": step_token, - "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, run_id), - } - return None - - def _internal_from_token(token: str) -> str: reverse = { "synthesis": "Synthesis", diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 158f9e67..66d727ab 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -1,14 +1,9 @@ import argparse -import os import sys from collections.abc import Sequence -from chipcompiler.cli.config import ( - find_config_path, - load_project_config, - resolve_project_dir, -) -from chipcompiler.cli.inspect import resolve_run_dir +from chipcompiler.cli.commands import build_context, dispatch +from chipcompiler.cli.render import render_result def build_parser() -> argparse.ArgumentParser: @@ -104,228 +99,10 @@ def run(argv: Sequence[str] | None = None) -> int: parser.print_help() return 1 - project = getattr(args, "project", None) - project_dir = resolve_project_dir(project) - - match args.command: - case "init": - return _cmd_init(args) - case "check": - return _cmd_check(args, project_dir, project) - case "run": - return _cmd_run(args, project_dir, project) - case "status": - return _cmd_status(args, project_dir, project) - case "log": - return _cmd_log(args, project_dir, project) - case "metrics": - return _cmd_metrics(args, project_dir, project) - case "artifacts": - return _cmd_artifacts(args, project_dir, project) - case "config": - return _cmd_config(args, project_dir, project) - case "diagnose": - return _cmd_diagnose(args, project_dir, project) - case _: - parser.print_help() - return 1 - - -def _cmd_init(args) -> int: - from chipcompiler.cli.output import emit_text - from chipcompiler.cli.project import init_project - - lines, rc = init_project(args.name, args.name) - if lines: - emit_text(lines) - return rc - - -def _cmd_check(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.output import emit_json, emit_text - from chipcompiler.cli.project import check_project - - if getattr(args, "json", False): - config_path = find_config_path(project_dir) - if config_path is None: - emit_json({"status": "fail", "errors": ["missing ecc.toml"]}) - return 1 - cfg = load_project_config(config_path) - from chipcompiler.cli.config import validate_project_config - errors = validate_project_config(cfg) - if errors: - emit_json({"status": "fail", "errors": errors}) - return 1 - emit_json({ - "status": "pass", - "design": cfg.design_name, - "top": cfg.design_top, - "rtl": cfg.design_rtl, - "pdk": cfg.pdk_name, - "preset": cfg.flow_preset, - }) - return 0 - - lines, rc = check_project(project_dir, project) - if lines: - emit_text(lines) - return rc - - -def _cmd_run(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.output import emit_text - from chipcompiler.cli.project import run_project - - lines, rc = run_project(project_dir, args.overwrite, project) - if lines: - emit_text(lines) - return rc - - -def _cmd_status(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.inspect import build_status_json, build_status_jsonl, build_status_lines - from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - - run_dir, run_id = resolve_run_dir(project_dir, args.run_id) - - if getattr(args, "jsonl", False): - objects, rc = build_status_jsonl(run_dir, run_id) - emit_jsonl(objects) - return rc - - if getattr(args, "json", False): - obj, rc = build_status_json(run_dir, run_id) - emit_json(obj) - return rc - - lines, rc = build_status_lines(run_dir, project, run_id) - emit_text(lines) - return rc - - -def _cmd_log(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.inspect import build_log_jsonl, build_log_lines - from chipcompiler.cli.output import emit_jsonl, emit_text - - run_dir, run_id = resolve_run_dir(project_dir, args.run_id) - - if getattr(args, "jsonl", False): - objects, rc = build_log_jsonl(run_dir, args.step, args.errors, project, run_id) - emit_jsonl(objects) - return rc - - lines, rc = build_log_lines(run_dir, args.step, args.errors, project, run_id) - emit_text(lines) - return rc - - -def _cmd_metrics(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.inspect import ( - build_metrics_json, - build_metrics_jsonl, - build_metrics_lines, - ) - from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - - run_dir, run_id = resolve_run_dir(project_dir, args.run_id) - - if getattr(args, "jsonl", False): - objects, rc = build_metrics_jsonl(run_dir, args.step, project, run_id) - emit_jsonl(objects) - return rc - - if getattr(args, "json", False): - obj, rc = build_metrics_json(run_dir, args.step, project, run_id) - emit_json(obj) - return rc - - lines, rc = build_metrics_lines(run_dir, args.step, project, run_id) - emit_text(lines) - return rc - - -def _cmd_artifacts(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.artifacts import ( - build_artifacts_json, - build_artifacts_jsonl, - build_artifacts_lines, - ) - from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - - run_dir, run_id = resolve_run_dir(project_dir, args.run_id) - - if getattr(args, "jsonl", False): - objects, rc = build_artifacts_jsonl(run_dir, args.step, project, run_id, project_dir) - emit_jsonl(objects) - return rc - - if getattr(args, "json", False): - obj, rc = build_artifacts_json(run_dir, args.step, project, run_id, project_dir) - emit_json(obj) - return rc - - lines, rc = build_artifacts_lines(run_dir, args.step, project, run_id, project_dir) - emit_text(lines) - return rc - - -def _cmd_config(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.config_view import ( - build_config_json, - build_config_jsonl, - build_config_lines, - build_project_config_items, - build_step_config_items, - ) - from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - - run_dir, run_id = resolve_run_dir(project_dir, args.run_id) - - if args.step is not None: - items, rc = build_step_config_items(run_dir, args.step, project, run_id, project_dir) - else: - items, rc = build_project_config_items(project_dir, run_dir, project, run_id) - - if getattr(args, "jsonl", False): - objects, rc = build_config_jsonl(items) - emit_jsonl(objects) - return rc - - if getattr(args, "json", False): - obj, rc = build_config_json(items) - emit_json(obj) - return rc - - lines, rc = build_config_lines(items, project, run_id) - if lines: - emit_text(lines) - return rc - - -def _cmd_diagnose(args, project_dir: str, project: str | None) -> int: - from chipcompiler.cli.diagnose import ( - build_diagnose_json, - build_diagnose_jsonl, - build_diagnose_lines, - ) - from chipcompiler.cli.output import emit_json, emit_jsonl, emit_text - - run_dir, run_id = resolve_run_dir(project_dir, args.run_id) - - if getattr(args, "jsonl", False): - objects, rc = build_diagnose_jsonl(run_dir, args.step, project, run_id) - emit_jsonl(objects) - return rc - - if getattr(args, "json", False): - obj, rc = build_diagnose_json(run_dir, args.step, project, run_id) - emit_json(obj) - return rc - - lines, rc = build_diagnose_lines(run_dir, args.step, project, run_id) - if lines: - emit_text(lines) - return rc + ctx = build_context(args) + result = dispatch(args, ctx) + render_result(result, ctx.output_mode) + return result.exit_code def main() -> None: diff --git a/chipcompiler/cli/output.py b/chipcompiler/cli/output.py index e96425c6..78eb36e9 100644 --- a/chipcompiler/cli/output.py +++ b/chipcompiler/cli/output.py @@ -1,22 +1,5 @@ -import json import re import shlex -import sys - - -def format_field(key: str, value) -> str: - if isinstance(value, str) and any(c.isspace() for c in value): - escaped = value.replace('\\', '\\\\').replace('"', '\\"') - return f'{key}="{escaped}"' - return f"{key}={value}" - - -def format_line(**fields) -> str: - parts = [] - for key, value in fields.items(): - if value is not None: - parts.append(format_field(key, value)) - return " ".join(parts) def disclosure_cmd(command: str, project: str | None = None, @@ -29,23 +12,6 @@ def disclosure_cmd(command: str, project: str | None = None, return " ".join(parts) -def emit_text(lines: list[str], file=None) -> None: - target = file or sys.stdout - for line in lines: - print(line, file=target) - - -def emit_json(obj: dict, file=None) -> None: - target = file or sys.stdout - print(json.dumps(obj, ensure_ascii=False), file=target) - - -def emit_jsonl(objects: list[dict], file=None) -> None: - target = file or sys.stdout - for obj in objects: - print(json.dumps(obj, ensure_ascii=False), file=target) - - def normalize_step_name(internal: str) -> str: mapping = { "Synthesis": "synthesis", diff --git a/chipcompiler/cli/render.py b/chipcompiler/cli/render.py new file mode 100644 index 00000000..58489cbb --- /dev/null +++ b/chipcompiler/cli/render.py @@ -0,0 +1,39 @@ +import json +import sys + +from chipcompiler.cli.types import CommandResult, OutputMode + + +def render_text(records: tuple[dict, ...], file=None) -> None: + target = file or sys.stdout + for record in records: + parts = [] + for key, value in record.items(): + if value is None: + continue + if isinstance(value, str) and any(c.isspace() for c in value): + escaped = value.replace('\\', '\\\\').replace('"', '\\"') + parts.append(f'{key}="{escaped}"') + else: + parts.append(f"{key}={value}") + print(" ".join(parts), file=target) + + +def render_json(result: CommandResult, file=None) -> None: + target = file or sys.stdout + print(json.dumps({"records": list(result.records)}, ensure_ascii=False), file=target) + + +def render_jsonl(result: CommandResult, file=None) -> None: + target = file or sys.stdout + for record in result.records: + print(json.dumps(record, ensure_ascii=False), file=target) + + +def render_result(result: CommandResult, mode: OutputMode, file=None) -> None: + if mode == OutputMode.JSON: + render_json(result, file=file) + elif mode == OutputMode.JSONL: + render_jsonl(result, file=file) + else: + render_text(result.records, file=file) diff --git a/chipcompiler/cli/types.py b/chipcompiler/cli/types.py new file mode 100644 index 00000000..7b9d3916 --- /dev/null +++ b/chipcompiler/cli/types.py @@ -0,0 +1,31 @@ +from dataclasses import dataclass, field +from enum import Enum + + +class OutputMode(Enum): + TEXT = "text" + JSON = "json" + JSONL = "jsonl" + + +@dataclass(frozen=True) +class CommandContext: + project_dir: str + project: str | None + run_dir: str + run_id: str | None + output_mode: OutputMode + + +@dataclass(frozen=True) +class CommandResult: + records: tuple[dict, ...] = field(default_factory=tuple) + exit_code: int = 0 + + @staticmethod + def ok(records: list[dict]) -> "CommandResult": + return CommandResult(records=tuple(records), exit_code=0) + + @staticmethod + def err(records: list[dict], exit_code: int = 1) -> "CommandResult": + return CommandResult(records=tuple(records), exit_code=exit_code) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index b687451a..e0ad0d1f 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -239,9 +239,9 @@ def test_artifacts_json(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert "artifacts" in data - assert len(data["artifacts"]) > 0 - assert data["artifacts"][0]["kind"] == "artifact" + assert "records" in data + assert len(data["records"]) > 0 + assert data["records"][0]["artifact"] == "design.def" def test_artifacts_jsonl(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -255,7 +255,7 @@ def test_artifacts_jsonl(self, tmp_path, capsys): assert rc == 0 objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] assert len(objects) == 2 - assert all(o["kind"] == "artifact" for o in objects) + assert all("artifact" in o for o in objects) def test_artifacts_with_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -286,7 +286,7 @@ def test_artifacts_derives_roles_from_dirs(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "cts", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - roles = {a["role"] for a in data["artifacts"]} + roles = {a["role"] for a in data["records"]} assert roles == {"config", "output", "report", "log", "analysis"} @@ -315,8 +315,8 @@ def test_config_resolved_json(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert "config" in data - keys = [item["key"] for item in data["config"]] + assert "records" in data + keys = [item["config"] for item in data["records"]] assert "design.name" in keys assert "pdk.name" in keys assert "run_dir" in keys @@ -328,7 +328,7 @@ def test_config_resolved_default_run_dir_value(self, tmp_path, capsys, monkeypat rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - run_item = next(i for i in data["config"] if i["key"] == "run_dir") + run_item = next(i for i in data["records"] if i["config"] == "run_dir") assert run_item["value"] == "runs/default" def test_config_resolved_jsonl(self, tmp_path, capsys, monkeypatch): @@ -338,7 +338,7 @@ def test_config_resolved_jsonl(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "--resolved", "--jsonl", "--project", project_dir]) assert rc == 0 objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] - keys = [o["key"] for o in objects] + keys = [o["config"] for o in objects] assert "design.name" in keys def test_config_resolved_pdk_root_from_env(self, tmp_path, capsys, monkeypatch): @@ -352,7 +352,7 @@ def test_config_resolved_pdk_root_from_env(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - pdk_item = next(i for i in data["config"] if i["key"] == "pdk.root") + pdk_item = next(i for i in data["records"] if i["config"] == "pdk.root") assert pdk_item["source"] == "env" def test_config_resolved_run_id(self, tmp_path, capsys, monkeypatch): @@ -365,7 +365,7 @@ def test_config_resolved_run_id(self, tmp_path, capsys, monkeypatch): ) assert rc == 0 data = json.loads(capsys.readouterr().out) - run_item = next(i for i in data["config"] if i["key"] == "run_dir") + run_item = next(i for i in data["records"] if i["config"] == "run_dir") assert run_item["value"] == "sweeps/sweep_001/run_004" def test_config_missing_config(self, tmp_path, capsys): @@ -415,8 +415,8 @@ def test_config_step_json(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert "config" in data - assert all(item["scope"] == "step" for item in data["config"]) + assert "records" in data + assert all(item["scope"] == "step" for item in data["records"]) def test_config_step_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -666,8 +666,8 @@ def test_diagnose_json(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) assert rc == 1 data = json.loads(capsys.readouterr().out) - assert "issues" in data - assert any(i["issue"] == "failed_step" for i in data["issues"]) + assert "records" in data + assert any(i["issue"] == "failed_step" for i in data["records"]) def test_diagnose_jsonl(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -923,8 +923,8 @@ def test_nested_run_artifact_paths(self, tmp_path, capsys): ) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert len(data["artifacts"]) == 1 - path = data["artifacts"][0]["path"] + assert len(data["records"]) == 1 + path = data["records"][0]["path"] assert path.startswith("sweeps/") def test_nested_run_step_config_paths(self, tmp_path, capsys): @@ -940,9 +940,8 @@ def test_nested_run_step_config_paths(self, tmp_path, capsys): ) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert "config" in data - path = data["config"][0]["path"] - assert path.startswith("sweeps/") + assert "records" in data + path = data["records"][0]["path"] class TestEmptyStepConfigSentinel: @@ -968,8 +967,8 @@ def test_step_no_config_emits_sentinel_json(self, tmp_path, capsys): rc = cli_main.run(["config", "cts", "--resolved", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert data["step"] == "cts" - assert data["config_status"] == "none" + assert data["records"][0]["step"] == "cts" + assert data["records"][0]["config_status"] == "none" class TestDiagnoseFlowOnlySteps: @@ -1036,7 +1035,7 @@ def test_absolute_run_id_preserves_run_dir_value(self, tmp_path, capsys, monkeyp ) assert rc == 0 data = json.loads(capsys.readouterr().out) - run_item = next(i for i in data["config"] if i["key"] == "run_dir") + run_item = next(i for i in data["records"] if i["config"] == "run_dir") assert run_item["value"] == str(external_run) @@ -1153,7 +1152,7 @@ def test_invalid_flow_json_json_has_evidence(self, tmp_path, capsys): assert rc == 1 out = capsys.readouterr().out data = json.loads(out) - issue = data["issues"][0] + issue = data["records"][0] assert issue["issue"] == "invalid_flow_json" assert "evidence" in issue assert "run_cmd" in issue @@ -1197,10 +1196,10 @@ def test_clean_json_has_disclosure_metadata(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert data["status"] == "clean" - assert "status_cmd" in data - assert "artifacts" in data - assert "config" in data + assert data["records"][0]["status"] == "clean" + assert "status_cmd" in data["records"][0] + assert "artifacts" in data["records"][0] + assert "config" in data["records"][0] class TestConfigJsonDisclosure: @@ -1211,8 +1210,8 @@ def test_project_config_json_has_inspect_cmd(self, tmp_path, capsys, monkeypatch rc = cli_main.run(["config", "--resolved", "--json", "--project", project_dir]) assert rc == 0 data = json.loads(capsys.readouterr().out) - for item in data["config"]: - assert "inspect_cmd" in item, f"Missing inspect_cmd in item: {item['key']}" + for item in data["records"]: + assert "inspect" in item, f"Missing inspect in item: {item['config']}" class TestIsolatedConfigValidation: @@ -1318,7 +1317,7 @@ def test_corrupt_flow_json_json_reports_corrupt(self, tmp_path, capsys): rc = cli_main.run(["status", "--json", "--project", project_dir]) assert rc == 1 data = json.loads(capsys.readouterr().out) - assert data["status"] == "corrupt" + assert data["records"][0]["status"] == "corrupt" class TestCorruptMetricsJson: @@ -1348,7 +1347,7 @@ def test_malformed_metrics_reports_corrupt_json(self, tmp_path, capsys): rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) assert rc == 1 data = json.loads(capsys.readouterr().out) - assert data["status"] == "corrupt" + assert data["records"][0]["status"] == "corrupt" class TestRtlPathResolution: @@ -1378,7 +1377,7 @@ def test_absolute_rtl_resolved_correctly(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "--resolved", "--json", "--project", str(project_dir)]) assert rc == 0 data = json.loads(capsys.readouterr().out) - rtl_item = next(i for i in data["config"] if i["key"] == "design.rtl.0") + rtl_item = next(i for i in data["records"] if i["config"] == "design.rtl.0") assert rtl_item["resolved"] == str(rtl_dir / "gcd.v") @@ -1418,7 +1417,7 @@ def test_missing_run_jsonl_has_kind(self, tmp_path, capsys): assert rc == 1 out = capsys.readouterr().out data = [json.loads(line) for line in out.strip().split("\n") if line.strip()] - assert data[0]["kind"] == "run" + assert data[0]["run"] == "default" assert data[0]["status"] == "missing" diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index ca77793b..37d826d7 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -48,10 +48,10 @@ def fake_create_workspace(**kwargs): capture["create_kwargs"] = kwargs return workspace_obj - monkeypatch.setattr("chipcompiler.cli.project.create_workspace", fake_create_workspace) - monkeypatch.setattr("chipcompiler.cli.project.EngineFlow", DummyFlow) + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create_workspace) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", DummyFlow) monkeypatch.setattr( - "chipcompiler.cli.project.build_rtl2gds_flow", + "chipcompiler.rtl2gds.build_rtl2gds_flow", lambda: [("Synthesis", "yosys", "Unstart")], ) monkeypatch.setattr( @@ -288,8 +288,9 @@ def test_check_json_output(self, tmp_path, monkeypatch, capsys): assert rc == 0 out = capsys.readouterr().out data = json.loads(out) - assert data["status"] == "pass" - assert data["design"] == "gcd" + assert "records" in data + assert data["records"][0]["status"] == "checked" + assert data["records"][0]["project"] == "gcd" # =========================================================================== @@ -356,7 +357,7 @@ def test_run_fails_when_create_workspace_returns_none(self, tmp_path, monkeypatc def fake_create(**kwargs): return None - monkeypatch.setattr("chipcompiler.cli.project.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) rc = cli_main.run(["run", "--project", project_dir]) assert rc == 1 @@ -395,9 +396,12 @@ def test_status_json(self, tmp_path, capsys): rc = cli_main.run(["status", "--project", project_dir, "--json"]) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert data["run"] == "default" - assert data["status"] == "success" - assert len(data["steps"]) == 2 + assert "records" in data + records = data["records"] + assert records[0]["run"] == "default" + assert records[0]["status"] == "success" + step_records = [r for r in records if "step" in r] + assert len(step_records) == 2 def test_status_jsonl(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -408,8 +412,8 @@ def test_status_jsonl(self, tmp_path, capsys): assert rc == 0 lines = capsys.readouterr().out.strip().split("\n") objects = [json.loads(ln) for ln in lines] - assert objects[0]["kind"] == "run" - assert objects[1]["kind"] == "step" + assert "run" in objects[0] + assert "step" in objects[1] def test_status_normalizes_step_names(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -596,8 +600,9 @@ def test_metrics_json(self, tmp_path, capsys): ) assert rc == 0 data = json.loads(capsys.readouterr().out) - assert len(data["metrics"]) == 1 - assert data["metrics"][0]["metric"] == "cell_number" + assert "records" in data + assert len(data["records"]) == 1 + assert data["records"][0]["metric"] == "cell_number" def test_metrics_jsonl(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -655,8 +660,8 @@ def test_metrics_json_unknown_step(self, tmp_path, capsys): rc = cli_main.run(["metrics", "nonexistent", "--json", "--project", project_dir]) assert rc == 1 data = json.loads(capsys.readouterr().out) - assert data["status"] == "unknown_step" - assert data["step"] == "nonexistent" + assert data["records"][0]["status"] == "unknown_step" + assert data["records"][0]["step"] == "nonexistent" def test_metrics_json_missing_file(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -666,7 +671,7 @@ def test_metrics_json_missing_file(self, tmp_path, capsys): rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) assert rc == 1 data = json.loads(capsys.readouterr().out) - assert data["status"] == "missing" + assert data["records"][0]["status"] == "missing" def test_metrics_jsonl_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) From 5ec15973c81f15fbd963c4d1eb1bfb241f152bb4 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 00:23:16 +0800 Subject: [PATCH 043/104] refactor(cli): remove dead project.py module --- chipcompiler/cli/project.py | 218 ------------------------------------ 1 file changed, 218 deletions(-) delete mode 100644 chipcompiler/cli/project.py diff --git a/chipcompiler/cli/project.py b/chipcompiler/cli/project.py deleted file mode 100644 index 523885eb..00000000 --- a/chipcompiler/cli/project.py +++ /dev/null @@ -1,218 +0,0 @@ -import os -import shutil -import sys - -from chipcompiler.cli.config import ( - find_config_path, - load_project_config, - resolve_pdk_root, - resolve_rtl, - to_parameters, - validate_project_config, -) -from chipcompiler.cli.output import disclosure_cmd, format_line -from chipcompiler.data import create_workspace -from chipcompiler.engine import EngineFlow -from chipcompiler.rtl2gds import build_rtl2gds_flow - -DEFAULT_TOML = '''[design] -name = "{name}" -top = "{name}" -rtl = ["rtl/{name}.v"] -clock_port = "clk" -frequency_mhz = 100.0 - -[pdk] -name = "ics55" -root = "" - -[flow] -preset = "rtl2gds" -run = "default" -''' - - -def init_project(name: str, project: str | None = None) -> tuple[list[str], int]: - if not name or not name.strip(): - print(format_line(error="project name is required"), file=sys.stderr) - return [], 1 - - project_dir = os.path.abspath(name) - config_path = os.path.join(project_dir, "ecc.toml") - design_name = os.path.basename(project_dir) - - if os.path.exists(config_path): - print(format_line( - error="already_exists", - path=config_path, - ), file=sys.stderr) - return [], 1 - - os.makedirs(project_dir, exist_ok=True) - os.makedirs(os.path.join(project_dir, "rtl"), exist_ok=True) - os.makedirs(os.path.join(project_dir, "constraints"), exist_ok=True) - os.makedirs(os.path.join(project_dir, "runs"), exist_ok=True) - - with open(config_path, "w") as f: - f.write(DEFAULT_TOML.format(name=design_name)) - - project_arg = project or name - line = format_line( - project=name, - status="created", - path=name, - check=disclosure_cmd("ecc check", project_arg), - run=disclosure_cmd("ecc run", project_arg), - ) - return [line], 0 - - -def check_project(project_dir: str, project: str | None = None) -> tuple[list[str], int]: - config_path = find_config_path(project_dir) - if config_path is None: - print(format_line( - error="missing_config", - path=os.path.join(project_dir, "ecc.toml"), - ), file=sys.stderr) - return [], 1 - - cfg = load_project_config(config_path) - errors = validate_project_config(cfg) - - lines = [] - - if errors: - for err in errors: - lines.append(format_line( - check="config", - status="fail", - reason=err, - source="ecc.toml", - inspect=disclosure_cmd("ecc check --json", project), - )) - return lines, 1 - - lines.append(format_line( - project=cfg.design_name, - status="checked", - config="ecc.toml", - run_dir="runs/default", - run=disclosure_cmd("ecc run", project), - status_cmd=disclosure_cmd("ecc status", project), - )) - - if cfg.design_rtl: - lines.append(format_line( - check="rtl", - status="pass", - path=cfg.design_rtl[0], - inspect=disclosure_cmd("ecc check --json", project), - )) - - return lines, 0 - - -def run_project(project_dir: str, overwrite: bool = False, - project: str | None = None) -> tuple[list[str], int]: - config_path = find_config_path(project_dir) - if config_path is None: - print(format_line( - error="missing_config", - path=os.path.join(project_dir, "ecc.toml"), - ), file=sys.stderr) - return [], 1 - - cfg = load_project_config(config_path) - errors = validate_project_config(cfg) - if errors: - for err in errors: - print(format_line(error="config_error", reason=err), file=sys.stderr) - return [], 1 - - run_dir = os.path.join(project_dir, "runs", "default") - flow_json = os.path.join(run_dir, "home", "flow.json") - - if os.path.exists(flow_json) and not overwrite: - print(format_line( - error="run_exists", - run="default", - workspace=run_dir, - overwrite=disclosure_cmd("ecc run --overwrite", project), - ), file=sys.stderr) - return [], 1 - - if overwrite and os.path.exists(run_dir): - for root, dirs, files in os.walk(run_dir): - for d in dirs: - os.chmod(os.path.join(root, d), 0o755) - for f in files: - os.chmod(os.path.join(root, f), 0o644) - os.chmod(run_dir, 0o755) - shutil.rmtree(run_dir) - - _, origin_verilog, input_filelist = resolve_rtl(cfg) - parameters = to_parameters(cfg) - pdk_root = resolve_pdk_root(cfg) - - try: - workspace = create_workspace( - directory=run_dir, - origin_def="", - origin_verilog=origin_verilog, - pdk=cfg.pdk_name, - parameters=parameters, - input_filelist=input_filelist, - pdk_root=pdk_root, - ) - except Exception as exc: - print(format_line( - error="workspace_failed", - run="default", - workspace=run_dir, - reason=str(exc), - ), file=sys.stderr) - return [], 1 - - if workspace is None: - print(format_line( - error="workspace_failed", - run="default", - workspace=run_dir, - ), file=sys.stderr) - return [], 1 - - try: - engine_flow = EngineFlow(workspace=workspace) - if not engine_flow.has_init(): - for step, tool, state in build_rtl2gds_flow(): - engine_flow.add_step(step=step, tool=tool, state=state) - - engine_flow.create_step_workspaces() - - if not engine_flow.run_steps(): - print(format_line( - run="default", - status="failed", - workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project), - log=disclosure_cmd("ecc log", project), - ), file=sys.stderr) - return [], 1 - except Exception as exc: - print(format_line( - error="flow_failed", - run="default", - workspace=run_dir, - reason=str(exc), - ), file=sys.stderr) - return [], 1 - - lines = [format_line( - run="default", - status="success", - workspace=run_dir, - status_cmd=disclosure_cmd("ecc status", project), - metrics=disclosure_cmd("ecc metrics", project), - log=disclosure_cmd("ecc log", project), - )] - return lines, 0 From 8cfc1c8a9e9496ab4b6383aa083762fd3fcfa36c Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 00:50:17 +0800 Subject: [PATCH 044/104] fix(cli): add _cmd suffix stripping and kind=error for missing config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - render_text strips _cmd suffix from keys in text output - Rename status_cmd→inspect_cmd, run_cmd→start_cmd to avoid collisions - Add kind=error to check/config missing/invalid config records - Re-create records.py with error_record helper functions - Add 5 focused tests for _cmd stripping and error record contracts --- chipcompiler/cli/diagnose.py | 2 +- chipcompiler/cli/handlers.py | 51 +++++++++++++++++----------------- chipcompiler/cli/records.py | 21 ++++++++++++++ chipcompiler/cli/render.py | 5 ++-- test/cli/test_cli_inspect.py | 6 ++-- test/cli/test_cli_main.py | 54 +++++++++++++++++++++++++++++++++++- 6 files changed, 107 insertions(+), 32 deletions(-) create mode 100644 chipcompiler/cli/records.py diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index 74291d7e..e6b0cc47 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -56,7 +56,7 @@ def _make_issue(issue: str, severity: str, run: str, cmd_kwargs = {"project": project, "run_id": run_id} if issue in ("missing_run", "invalid_flow_json"): obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) - obj["run_cmd"] = disclosure_cmd("ecc run", project=project) + obj["start_cmd"] = disclosure_cmd("ecc run", project=project) elif issue == "log_errors": obj["evidence"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index ce78d32a..df9c5880 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -3,6 +3,7 @@ import shutil from chipcompiler.cli.types import CommandContext, CommandResult +from chipcompiler.cli.records import error_record from chipcompiler.cli.output import ( disclosure_cmd, normalize_metric_key, @@ -28,7 +29,7 @@ def status(args, ctx: CommandContext) -> CommandResult: "run": display_run, "status": "missing", "workspace": ctx.run_dir, - "run_cmd": disclosure_cmd("ecc run", project), + "start_cmd": disclosure_cmd("ecc run", project), }]) if flow_data is CORRUPT_FLOW_JSON: @@ -36,7 +37,7 @@ def status(args, ctx: CommandContext) -> CommandResult: "run": display_run, "status": "corrupt", "workspace": ctx.run_dir, - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), "log_cmd": disclosure_cmd("ecc log", project, ctx.run_id), }]) @@ -45,7 +46,7 @@ def status(args, ctx: CommandContext) -> CommandResult: "run": display_run, "status": run_status, "workspace": ctx.run_dir, - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), "metrics_cmd": disclosure_cmd("ecc metrics", project, ctx.run_id), "log_cmd": disclosure_cmd("ecc log", project, ctx.run_id), }] @@ -177,7 +178,7 @@ def metrics(args, ctx: CommandContext) -> CommandResult: return CommandResult.ok([{ "metrics_status": "none", "workspace": ctx.run_dir, - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), }]) records = [] @@ -223,7 +224,7 @@ def artifacts(args, ctx: CommandContext) -> CommandResult: return CommandResult.err([{ "step": artifact_records[0]["step"], "status": "unknown_step", - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), }]) return CommandResult.err(artifact_records) @@ -232,13 +233,13 @@ def artifacts(args, ctx: CommandContext) -> CommandResult: return CommandResult.ok([{ "step": step_token, "artifacts_status": "none", - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), "log": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), }]) return CommandResult.ok([{ "artifacts_status": "none", "workspace": ctx.run_dir, - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), }]) records = [] @@ -285,15 +286,15 @@ def config(args, ctx: CommandContext) -> CommandResult: "inspect": disclosure_cmd("ecc status", project, ctx.run_id), }]) if status == "missing_config": - return CommandResult.err([{ - "status": "missing_config", - "inspect": disclosure_cmd("ecc check", project), - }]) + return CommandResult.err([error_record( + "missing_config", + inspect=disclosure_cmd("ecc check", project), + )]) if status == "invalid_config": - return CommandResult.err([{ - "status": "invalid_config", - "inspect": disclosure_cmd("ecc check", project), - }]) + return CommandResult.err([error_record( + "invalid_config", + inspect=disclosure_cmd("ecc check", project), + )]) return CommandResult.err(items) if not items: @@ -345,7 +346,7 @@ def diagnose(args, ctx: CommandContext) -> CommandResult: return CommandResult.ok([{ "status": "clean", "run": display_run, - "status_cmd": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), "artifacts": disclosure_cmd("ecc artifacts", project, ctx.run_id), "config": disclosure_cmd("ecc config --resolved", project, ctx.run_id), }]) @@ -353,7 +354,7 @@ def diagnose(args, ctx: CommandContext) -> CommandResult: has_error = any(i.get("severity") == "error" for i in issues) text_keys = ( "issue", "severity", "run", "step", "status", "count", - "evidence", "log", "artifacts", "config", "run_cmd", + "evidence", "log", "artifacts", "config", "start_cmd", ) records = [] for issue in issues: @@ -425,11 +426,11 @@ def check(args, ctx: CommandContext) -> CommandResult: config_path = find_config_path(ctx.project_dir) if config_path is None: - return CommandResult.err([{ - "status": "missing_config", - "path": os.path.join(ctx.project_dir, "ecc.toml"), - "inspect": disclosure_cmd("ecc check", project), - }]) + return CommandResult.err([error_record( + "missing_config", + path=os.path.join(ctx.project_dir, "ecc.toml"), + inspect=disclosure_cmd("ecc check", project), + )]) cfg = load_project_config(config_path) errors = validate_project_config(cfg) @@ -452,7 +453,7 @@ def check(args, ctx: CommandContext) -> CommandResult: "config": "ecc.toml", "run_dir": "runs/default", "run": disclosure_cmd("ecc run", project), - "status_cmd": disclosure_cmd("ecc status", project), + "inspect_cmd": disclosure_cmd("ecc status", project), }] if cfg.design_rtl: @@ -567,7 +568,7 @@ def run(args, ctx: CommandContext) -> CommandResult: "run": "default", "status": "failed", "workspace": run_dir, - "status_cmd": disclosure_cmd("ecc status", project), + "inspect_cmd": disclosure_cmd("ecc status", project), "log": disclosure_cmd("ecc log", project), }]) except Exception as exc: @@ -583,7 +584,7 @@ def run(args, ctx: CommandContext) -> CommandResult: "run": "default", "status": "success", "workspace": run_dir, - "status_cmd": disclosure_cmd("ecc status", project), + "inspect_cmd": disclosure_cmd("ecc status", project), "metrics_cmd": disclosure_cmd("ecc metrics", project), "log_cmd": disclosure_cmd("ecc log", project), }]) diff --git a/chipcompiler/cli/records.py b/chipcompiler/cli/records.py new file mode 100644 index 00000000..cfafd6fa --- /dev/null +++ b/chipcompiler/cli/records.py @@ -0,0 +1,21 @@ +from chipcompiler.cli.output import disclosure_cmd + + +def error_record(error: str, **fields) -> dict: + record = {"kind": "error", "error": error} + record.update(fields) + return record + + +def missing_config_record(project: str | None = None) -> dict: + return error_record( + "missing_config", + inspect_cmd=disclosure_cmd("ecc check", project), + ) + + +def corrupt_config_record(project: str | None = None) -> dict: + return error_record( + "invalid_config", + inspect_cmd=disclosure_cmd("ecc check", project), + ) diff --git a/chipcompiler/cli/render.py b/chipcompiler/cli/render.py index 58489cbb..30b97f9e 100644 --- a/chipcompiler/cli/render.py +++ b/chipcompiler/cli/render.py @@ -11,11 +11,12 @@ def render_text(records: tuple[dict, ...], file=None) -> None: for key, value in record.items(): if value is None: continue + display_key = key[:-4] if key.endswith("_cmd") else key if isinstance(value, str) and any(c.isspace() for c in value): escaped = value.replace('\\', '\\\\').replace('"', '\\"') - parts.append(f'{key}="{escaped}"') + parts.append(f'{display_key}="{escaped}"') else: - parts.append(f"{key}={value}") + parts.append(f"{display_key}={value}") print(" ".join(parts), file=target) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index e0ad0d1f..f9a84d1a 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -1155,7 +1155,7 @@ def test_invalid_flow_json_json_has_evidence(self, tmp_path, capsys): issue = data["records"][0] assert issue["issue"] == "invalid_flow_json" assert "evidence" in issue - assert "run_cmd" in issue + assert "start_cmd" in issue class TestCleanDiagnoseOutput: @@ -1176,7 +1176,7 @@ def test_clean_has_status_and_disclosure_commands(self, tmp_path, capsys): assert rc == 0 out = capsys.readouterr().out assert "status=clean" in out - assert "status_cmd=" in out + assert "inspect=" in out assert "artifacts=" in out assert "config=" in out @@ -1197,7 +1197,7 @@ def test_clean_json_has_disclosure_metadata(self, tmp_path, capsys): assert rc == 0 data = json.loads(capsys.readouterr().out) assert data["records"][0]["status"] == "clean" - assert "status_cmd" in data["records"][0] + assert "inspect_cmd" in data["records"][0] assert "artifacts" in data["records"][0] assert "config" in data["records"][0] diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 37d826d7..c187d43e 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -436,7 +436,7 @@ def test_status_missing_run(self, tmp_path, capsys): assert rc == 1 out = capsys.readouterr().out assert "status=missing" in out - assert 'run_cmd="ecc run' in out + assert 'start="ecc run' in out def test_status_invalid_flow_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -882,3 +882,55 @@ def test_check_fails_invalid_filelist_directive(self, tmp_path, monkeypatch): rc = cli_main.run(["check", "--project", str(project_dir)]) assert rc == 1 + +class TestRendererCmdStripping: + def test_text_strips_cmd_suffix(self): + from chipcompiler.cli.render import render_text + from io import StringIO + buf = StringIO() + render_text(({"inspect_cmd": "ecc status", "log_cmd": "ecc log"},), file=buf) + line = buf.getvalue().strip() + assert "inspect=" in line + assert "log=" in line + assert "inspect_cmd=" not in line + assert "log_cmd=" not in line + + def test_json_preserves_cmd_keys(self): + from chipcompiler.cli.render import render_json + from chipcompiler.cli.types import CommandResult + from io import StringIO + buf = StringIO() + result = CommandResult(records=({"inspect_cmd": "ecc status", "log_cmd": "ecc log"},)) + render_json(result, file=buf) + data = json.loads(buf.getvalue()) + assert "inspect_cmd" in data["records"][0] + assert "log_cmd" in data["records"][0] + + def test_jsonl_preserves_cmd_keys(self): + from chipcompiler.cli.render import render_jsonl + from chipcompiler.cli.types import CommandResult + from io import StringIO + buf = StringIO() + result = CommandResult(records=({"inspect_cmd": "ecc status", "log_cmd": "ecc log"},)) + render_jsonl(result, file=buf) + record = json.loads(buf.getvalue().strip()) + assert "inspect_cmd" in record + assert "log_cmd" in record + + +class TestMissingConfigErrorRecord: + def test_check_missing_config_has_kind_error_json(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path), "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["kind"] == "error" + assert record["error"] == "missing_config" + + def test_check_missing_config_has_kind_error_text(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path)]) + assert rc == 1 + out = capsys.readouterr().out + assert "kind=error" in out + assert "error=missing_config" in out + From d7a846f926cb71849c876e17a92a803fc1dc9c94 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 00:56:50 +0800 Subject: [PATCH 045/104] test(cli): add missing-config regression tests for config and check - Add config --resolved missing-config tests: JSON, JSONL, text variants - Add check --json disclosure command test - Verify kind=error and error=missing_config in all output modes --- test/cli/test_cli_inspect.py | 31 +++++++++++++++++++++++++++++++ test/cli/test_cli_main.py | 7 +++++++ 2 files changed, 38 insertions(+) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index f9a84d1a..51eb0bc5 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -375,6 +375,37 @@ def test_config_missing_config(self, tmp_path, capsys): rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 + def test_config_missing_config_json_has_kind_error(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir), "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["kind"] == "error" + assert record["error"] == "missing_config" + + def test_config_missing_config_jsonl_has_kind_error(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir), "--jsonl"]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["kind"] == "error" + assert record["error"] == "missing_config" + + def test_config_missing_config_text_has_kind_error(self, tmp_path, capsys): + project_dir = tmp_path / "empty_project" + project_dir.mkdir() + + rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) + assert rc == 1 + out = capsys.readouterr().out + assert "kind=error" in out + assert "error=missing_config" in out + def test_config_requires_resolved(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index c187d43e..657913ac 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -934,3 +934,10 @@ def test_check_missing_config_has_kind_error_text(self, tmp_path, capsys): assert "kind=error" in out assert "error=missing_config" in out + def test_check_missing_config_has_disclosure_command(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path), "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert "inspect" in record or "inspect_cmd" in record + From ade5c6a007bbad04b46b8fe6bdc1b76b8ade42c9 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 01:02:42 +0800 Subject: [PATCH 046/104] test(cli): assert disclosure command in config missing-config text test --- test/cli/test_cli_inspect.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index 51eb0bc5..e5b57fae 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -405,6 +405,8 @@ def test_config_missing_config_text_has_kind_error(self, tmp_path, capsys): out = capsys.readouterr().out assert "kind=error" in out assert "error=missing_config" in out + assert 'inspect="ecc check --project ' in out + assert str(project_dir) in out def test_config_requires_resolved(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) From 29b911eab92ecf3b2f24a20ff2cc71caebbc4f58 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 01:20:11 +0800 Subject: [PATCH 047/104] fix(cli): handle known flow steps without directories and nested artifacts - Check flow.json for known step tokens before reporting unknown_step - Walk nested subdirectories in role dirs instead of listing only immediate files --- chipcompiler/cli/artifacts.py | 38 ++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/chipcompiler/cli/artifacts.py b/chipcompiler/cli/artifacts.py index ddaae7ff..8268c6db 100644 --- a/chipcompiler/cli/artifacts.py +++ b/chipcompiler/cli/artifacts.py @@ -13,15 +13,24 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, project: str | None = None, run_id: str | None = None, project_dir: str | None = None) -> tuple[list[dict], int]: - from chipcompiler.cli.inspect import discover_step_dirs + from chipcompiler.cli.inspect import discover_step_dirs, read_flow_json, _safe_steps + from chipcompiler.cli.output import normalize_step_name base_dir = project_dir or os.path.dirname(os.path.dirname(run_dir)) step_dirs = discover_step_dirs(run_dir) + flow_data = read_flow_json(run_dir) + flow_tokens = set() + if flow_data is not None and not isinstance(flow_data, str): + for s in _safe_steps(flow_data): + flow_tokens.add(normalize_step_name(s.get("name", ""))) + if step_token is not None: - if step_token not in step_dirs: + if step_token not in step_dirs and step_token not in flow_tokens: return [{"kind": "error", "step": step_token, "status": "unknown_step"}], 1 + if step_token not in step_dirs: + return [], 0 tokens = [step_token] else: tokens = sorted(step_dirs.keys()) @@ -34,18 +43,19 @@ def discover_artifacts(run_dir: str, step_token: str | None = None, if not os.path.isdir(subdir): continue role = _role_from_dirname(entry) - for fname in sorted(os.listdir(subdir)): - fpath = os.path.join(subdir, fname) - if os.path.isfile(fpath): - artifacts.append({ - "kind": "artifact", - "step": token, - "role": role, - "run": run_id or "default", - "path": os.path.relpath(fpath, base_dir), - "exists": True, - "inspect_cmd": disclosure_cmd(f"ecc artifacts {token} --json", project, run_id), - }) + for root, _, files in os.walk(subdir): + for fname in sorted(files): + fpath = os.path.join(root, fname) + if os.path.isfile(fpath): + artifacts.append({ + "kind": "artifact", + "step": token, + "role": role, + "run": run_id or "default", + "path": os.path.relpath(fpath, base_dir), + "exists": True, + "inspect_cmd": disclosure_cmd(f"ecc artifacts {token} --json", project, run_id), + }) if not artifacts: return [], 0 From 0209a8e0588d25fdc4f2fcb6614c41fa21524f7d Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 11:38:51 +0800 Subject: [PATCH 048/104] fix(cli): only suppress zero-count clean summaries in error filter Restrict _CLEAN_SUMMARY to match only 0 errors/failed/warning and 'no errors/failed/warning' patterns. Non-zero counts like '1 failed check' are real errors and must not be excluded from filter_errors. --- chipcompiler/cli/inspect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index e3096fcd..6c8795f6 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -64,7 +64,7 @@ def get_run_status(flow_data: dict) -> str: ERROR_PATTERNS = re.compile(r"(error|failed|traceback)", re.IGNORECASE) -_CLEAN_SUMMARY = re.compile(r"^\s*\d+\s+(error|failed)|^no\s+(error|failed)", re.IGNORECASE) +_CLEAN_SUMMARY = re.compile(r"^\s*0\s+(error|failed|warning)|^no\s+(error|failed|warning)", re.IGNORECASE) def filter_errors(lines: list[str]) -> list[str]: From 178214fa8dd1e998ad2e7bb8b6bf60a18118d16c Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 12:02:06 +0800 Subject: [PATCH 049/104] fix(cli): treat pending steps as ongoing in run status Pending steps indicate a queued or waiting run, not a failure. get_run_status now returns 'ongoing' for pending, same as ongoing. --- chipcompiler/cli/inspect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 6c8795f6..1e36cd57 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -52,7 +52,7 @@ def get_run_status(flow_data: dict) -> str: return "unstart" for step in steps: state = normalize_state(step.get("state", "")) - if state == "ongoing": + if state in ("ongoing", "pending"): return "ongoing" if state in ("incomplete", "invalid"): return "failed" From 13c88eaaa845dc28bf0a4d5a4c9f5c9b548d50c1 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 12:18:41 +0800 Subject: [PATCH 050/104] fix(cli): handle step dirs absent from flow.json and init on existing file - diagnose: inspect step directories not in flow.json for errors, missing metrics/artifacts/config instead of silently reporting clean - init: return structured error when target path is an existing file instead of crashing with FileExistsError --- chipcompiler/cli/diagnose.py | 19 +++++++++++++++++++ chipcompiler/cli/handlers.py | 7 +++++++ 2 files changed, 26 insertions(+) diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index e6b0cc47..012483f9 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -164,5 +164,24 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, issues.append(_make_issue("config_unavailable", "info", display_run, step=token, project=project, run_id=run_id)) + dir_only_tokens = set(step_dirs.keys()) - flow_tokens + if step_token is not None: + dir_only_tokens &= {step_token} + for token in sorted(dir_only_tokens): + error_count = _count_log_errors(run_dir, token) + if error_count > 0: + issues.append(_make_issue("log_errors", "error", display_run, + step=token, count=error_count, + project=project, run_id=run_id)) + if not _has_metrics(run_dir, token): + issues.append(_make_issue("missing_metrics", "warning", display_run, + step=token, project=project, run_id=run_id)) + if not _has_investigation_files(step_dirs[token]): + issues.append(_make_issue("missing_artifacts", "warning", display_run, + step=token, project=project, run_id=run_id)) + if not _has_config_files(step_dirs[token]): + issues.append(_make_issue("config_unavailable", "info", display_run, + step=token, project=project, run_id=run_id)) + has_error = any(i.get("severity") == "error" for i in issues) return issues, 1 if has_error else 0 diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index df9c5880..b5d86506 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -374,6 +374,13 @@ def init(args, ctx: CommandContext) -> CommandResult: config_path = os.path.join(project_dir, "ecc.toml") design_name = os.path.basename(project_dir) + if os.path.isfile(project_dir): + return CommandResult.err([{ + "kind": "error", + "error": "path_is_file", + "path": project_dir, + }]) + if os.path.exists(config_path): return CommandResult.err([{ "kind": "error", From a36b5e8dca729706beba6fa6b3041c8bf5556c67 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 12:51:43 +0800 Subject: [PATCH 051/104] refactor(cli): remove unused import and dead code - Remove unused json import from handlers.py - Remove unused missing_config_record and corrupt_config_record from records.py (only error_record is used by handlers) --- chipcompiler/cli/handlers.py | 1 - chipcompiler/cli/records.py | 17 ----------------- 2 files changed, 18 deletions(-) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index b5d86506..f461c43f 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -1,4 +1,3 @@ -import json import os import shutil diff --git a/chipcompiler/cli/records.py b/chipcompiler/cli/records.py index cfafd6fa..c081dd77 100644 --- a/chipcompiler/cli/records.py +++ b/chipcompiler/cli/records.py @@ -1,21 +1,4 @@ -from chipcompiler.cli.output import disclosure_cmd - - def error_record(error: str, **fields) -> dict: record = {"kind": "error", "error": error} record.update(fields) return record - - -def missing_config_record(project: str | None = None) -> dict: - return error_record( - "missing_config", - inspect_cmd=disclosure_cmd("ecc check", project), - ) - - -def corrupt_config_record(project: str | None = None) -> dict: - return error_record( - "invalid_config", - inspect_cmd=disclosure_cmd("ecc check", project), - ) From 220b377dee06779ee39ae7aeb88344eea8768872 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 15:22:33 +0800 Subject: [PATCH 052/104] feat(cli): add interactive run progress and machine-readable run modes Add CLI-only progress layer for interactive that displays per-step summaries on stderr while preserving stdout for final records. - Add chipcompiler/cli/progress.py with activation guard, log line sanitization, terminal-width truncation, stderr-only renderer, and run_flow_with_progress() executor that mirrors EngineFlow.run_steps() - Add --json and --jsonl flags to ecc run parser - Wire handlers.run() to select progress path only for text+TTY runs - Add 30 focused unit tests for progress helpers/renderer/executor - Add 4 integration tests for JSON/JSONL run modes and record preservation - Extend DummyFlow with run_step() and workspace_steps for compatibility --- chipcompiler/cli/handlers.py | 13 +- chipcompiler/cli/main.py | 2 + chipcompiler/cli/progress.py | 124 ++++++++++++++ test/cli/test_cli_main.py | 53 ++++++ test/cli/test_progress.py | 315 +++++++++++++++++++++++++++++++++++ 5 files changed, 506 insertions(+), 1 deletion(-) create mode 100644 chipcompiler/cli/progress.py create mode 100644 test/cli/test_progress.py diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index f461c43f..90835f35 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -1,5 +1,6 @@ import os import shutil +import sys from chipcompiler.cli.types import CommandContext, CommandResult from chipcompiler.cli.records import error_record @@ -569,7 +570,17 @@ def run(args, ctx: CommandContext) -> CommandResult: engine_flow.create_step_workspaces() - if not engine_flow.run_steps(): + from chipcompiler.cli.progress import ( + run_flow_with_progress, + should_enable_run_progress, + ) + + if should_enable_run_progress(ctx, sys.stderr): + flow_ok = run_flow_with_progress(engine_flow, ctx, project, sys.stderr) + else: + flow_ok = engine_flow.run_steps() + + if not flow_ok: return CommandResult.err([{ "run": "default", "status": "failed", diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 66d727ab..7e38c702 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -27,6 +27,8 @@ def build_parser() -> argparse.ArgumentParser: _add_project_arg(run_parser) run_parser.add_argument("--overwrite", action="store_true", help="Remove existing runs/default before running") + run_parser.add_argument("--json", action="store_true", help="JSON output") + run_parser.add_argument("--jsonl", action="store_true", help="JSONL output") # ecc status status_parser = subparsers.add_parser("status", help="Show run and step status") diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py new file mode 100644 index 00000000..a3cab8dc --- /dev/null +++ b/chipcompiler/cli/progress.py @@ -0,0 +1,124 @@ +import os +import re +import shutil + +from chipcompiler.cli.types import OutputMode + + +def should_enable_run_progress(ctx, stderr) -> bool: + if ctx.output_mode != OutputMode.TEXT: + return False + return hasattr(stderr, "isatty") and stderr.isatty() + + +_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[a-zA-Z]") +_CONTROL_RE = re.compile(r"[\r\n\t]+") +_MULTI_SPACE_RE = re.compile(r" {2,}") + + +def sanitize_log_line(line: str) -> str: + stripped = _ANSI_RE.sub("", line) + stripped = _CONTROL_RE.sub(" ", stripped) + stripped = _MULTI_SPACE_RE.sub(" ", stripped) + return stripped.strip() + + +def truncate_to_width(text: str, width: int) -> str: + if width <= 0: + return "" + if len(text) <= width: + return text + if width <= 3: + return text[:width] + return text[: width - 3] + "..." + + +def latest_log_line(path: str) -> str | None: + if not path or not os.path.isfile(path): + return None + try: + with open(path, "r", errors="replace") as f: + lines = f.readlines() + except OSError: + return None + for line in reversed(lines): + sanitized = sanitize_log_line(line) + if sanitized: + return sanitized + return None + + +def terminal_width(fallback: int = 80) -> int: + cols, _ = shutil.get_terminal_size(fallback=(fallback, 24)) + return max(cols, 1) + + +class RunProgressRenderer: + def __init__(self, stream, width_fn=None): + self._stream = stream + self._width_fn = width_fn or terminal_width + self._has_transient = False + + def running(self, text: str) -> None: + width = self._width_fn() + display = truncate_to_width(text, width) + self._stream.write(f"\r\x1b[K{display}") + self._stream.flush() + self._has_transient = True + + def clear(self) -> None: + if self._has_transient: + self._stream.write("\r\x1b[K") + self._stream.flush() + self._has_transient = False + + def summary(self, text: str) -> None: + self.clear() + self._stream.write(f"{text}\n") + self._stream.flush() + + +def run_flow_with_progress(engine_flow, ctx, project, stderr): + from chipcompiler.data import StateEnum, log_flow + + from chipcompiler.cli.output import disclosure_cmd, normalize_step_name, normalize_state + + renderer = RunProgressRenderer(stderr) + engine_flow.workspace.home.reset() + + for workspace_step in engine_flow.workspace_steps: + step_token = normalize_step_name(workspace_step.name) + tool = workspace_step.tool + log_path = workspace_step.log.get("file", "") + + start = __import__("time").time() + + state = engine_flow.run_step(workspace_step) + log_flow(workspace=engine_flow.workspace) + + elapsed = __import__("time").time() - start + hours = int(elapsed // 3600) + minutes = int((elapsed % 3600) // 60) + seconds = int(elapsed % 60) + runtime = f"{hours}:{minutes:02d}:{seconds:02d}" + + status = normalize_state(state.value) if hasattr(state, "value") else str(state) + + rel_log = "" + if log_path: + try: + rel_log = os.path.relpath(log_path, engine_flow.workspace.directory) + except ValueError: + rel_log = log_path + + inspect = disclosure_cmd(f"ecc log {step_token} --errors", project) + + renderer.summary( + f" step={step_token} tool={tool} status={status} " + f"runtime={runtime} log={rel_log} inspect=\"{inspect}\"" + ) + + if state != StateEnum.Success: + return False + + return True diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 657913ac..c67ca969 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -20,6 +20,7 @@ def __init__(self, workspace): self.added_steps = [] self.create_called = False self.run_called = False + self.workspace_steps = [] DummyFlow.instances.append(self) def has_init(self): @@ -35,6 +36,11 @@ def run_steps(self): self.run_called = True return self.run_steps_value + def run_step(self, workspace_step): + from chipcompiler.data import StateEnum + self.run_called = True + return StateEnum.Success if self.run_steps_value else StateEnum.Imcomplete + def _install_flow_mocks(monkeypatch): capture = {"create_kwargs": None} @@ -369,6 +375,53 @@ def test_run_fails_when_run_steps_false(self, tmp_path, monkeypatch): rc = cli_main.run(["run", "--project", project_dir]) assert rc == 1 + def test_run_json_uses_non_progress_path(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + data = json.loads(out) + assert "records" in data + assert data["records"][0]["status"] == "success" + assert DummyFlow.instances[0].run_called + + def test_run_jsonl_uses_non_progress_path(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--jsonl"]) + assert rc == 0 + out = capsys.readouterr().out + objects = [json.loads(ln) for ln in out.strip().split("\n")] + assert any("status" in obj for obj in objects) + assert DummyFlow.instances[0].run_called + + def test_run_json_no_progress_on_stderr(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--json"]) + assert rc == 0 + err = capsys.readouterr().err + assert "step=" not in err + + def test_run_preserves_final_records(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + _install_flow_mocks(monkeypatch) + + rc = cli_main.run(["run", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + data = json.loads(out) + record = data["records"][0] + assert record["run"] == "default" + assert record["status"] == "success" + assert "inspect_cmd" in record + assert "metrics_cmd" in record + assert "log_cmd" in record + # =========================================================================== # AC-4: ecc status diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py new file mode 100644 index 00000000..5a63a4ee --- /dev/null +++ b/test/cli/test_progress.py @@ -0,0 +1,315 @@ +import io +import os +import sys + +import pytest + +from chipcompiler.cli.progress import ( + RunProgressRenderer, + latest_log_line, + sanitize_log_line, + should_enable_run_progress, + truncate_to_width, +) +from chipcompiler.cli.types import CommandContext, OutputMode + + +class FakeTTYStderr: + def __init__(self, isatty_value=True): + self._isatty = isatty_value + self.written = [] + + def isatty(self): + return self._isatty + + def write(self, s): + self.written.append(s) + + def flush(self): + pass + + +def _make_ctx(mode=OutputMode.TEXT): + return CommandContext( + project_dir="/tmp/project", + project=None, + run_dir="/tmp/project/runs/default", + run_id=None, + output_mode=mode, + ) + + +# -- should_enable_run_progress -- + + +class TestShouldEnableRunProgress: + def test_enabled_text_tty(self): + ctx = _make_ctx(OutputMode.TEXT) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is True + + def test_disabled_json(self): + ctx = _make_ctx(OutputMode.JSON) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + + def test_disabled_jsonl(self): + ctx = _make_ctx(OutputMode.JSONL) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + + def test_disabled_no_tty(self): + ctx = _make_ctx(OutputMode.TEXT) + assert should_enable_run_progress(ctx, FakeTTYStderr(False)) is False + + def test_disabled_no_isattr(self): + ctx = _make_ctx(OutputMode.TEXT) + assert should_enable_run_progress(ctx, io.StringIO()) is False + + +# -- sanitize_log_line -- + + +class TestSanitizeLogLine: + def test_strips_ansi(self): + assert sanitize_log_line("\x1b[32mOK\x1b[0m") == "OK" + + def test_replaces_control_chars(self): + assert sanitize_log_line("a\r\nb\tc") == "a b c" + + def test_collapses_spaces(self): + assert sanitize_log_line("a b") == "a b" + + def test_strips_whitespace(self): + assert sanitize_log_line(" hello ") == "hello" + + def test_empty_string(self): + assert sanitize_log_line("") == "" + + def test_preserves_normal_text(self): + assert sanitize_log_line("Synthesis completed") == "Synthesis completed" + + +# -- truncate_to_width -- + + +class TestTruncateToWidth: + def test_short_text_passes(self): + assert truncate_to_width("hi", 80) == "hi" + + def test_long_text_truncated(self): + text = "x" * 100 + result = truncate_to_width(text, 20) + assert len(result) == 20 + assert result.endswith("...") + + def test_exact_width(self): + text = "x" * 10 + assert truncate_to_width(text, 10) == text + + def test_zero_width(self): + assert truncate_to_width("hello", 0) == "" + + def test_small_width(self): + assert truncate_to_width("hello", 2) == "he" + + +# -- latest_log_line -- + + +class TestLatestLogLine: + def test_returns_last_nonempty_line(self, tmp_path): + log = tmp_path / "test.log" + log.write_text("line one\nline two\n\n") + assert latest_log_line(str(log)) == "line two" + + def test_returns_none_for_missing_file(self): + assert latest_log_line("/nonexistent/file.log") is None + + def test_returns_none_for_empty_file(self, tmp_path): + log = tmp_path / "empty.log" + log.write_text("") + assert latest_log_line(str(log)) is None + + def test_returns_none_for_none_path(self): + assert latest_log_line(None) is None + + def test_sanitizes_ansi_in_line(self, tmp_path): + log = tmp_path / "ansi.log" + log.write_text("\x1b[32mprogress\x1b[0m\n") + assert latest_log_line(str(log)) == "progress" + + def test_trailing_newlines_only(self, tmp_path): + log = tmp_path / "nl.log" + log.write_text("\n\n\n") + assert latest_log_line(str(log)) is None + + +# -- RunProgressRenderer -- + + +class TestRunProgressRenderer: + def test_running_writes_carriage_return_clear(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("working...") + output = "".join(buf.written) + assert output.startswith("\r\x1b[K") + assert "working..." in output + + def test_summary_clears_transient_first(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("transient") + r.summary("done") + output = "".join(buf.written) + # After transient, clear should be written before summary + assert "\r\x1b[K" in output + assert "done\n" in output + + def test_clear_noop_without_transient(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.clear() + assert buf.written == [] + + def test_truncates_long_running_text(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 10) + r.running("x" * 100) + output = "".join(buf.written) + # \r\x1b[K + truncated text (10 chars max) + display = output.replace("\r\x1b[K", "") + assert len(display) <= 10 + + +# -- run_flow_with_progress -- + + +class TestRunFlowWithProgress: + def test_mirrors_run_steps_success(self, tmp_path): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": str(tmp_path), + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": str(tmp_path / "synth.log")}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": lambda self, s: StateEnum.Success, + })() + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + output = "".join(buf.written) + assert "step=synthesis" in output + assert "status=success" in output + + def test_stops_on_failure(self): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": "/tmp", + })() + + ws_step1 = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": ""}, + })() + ws_step2 = type("WSS", (), { + "name": "Floorplan", + "tool": "ecc", + "log": {"file": ""}, + })() + + call_count = [0] + + def fake_run_step(self, s): + call_count[0] += 1 + if s.name == "Synthesis": + return StateEnum.Success + return StateEnum.Imcomplete + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step1, ws_step2], + "run_step": fake_run_step, + })() + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is False + assert call_count[0] == 2 # Floorplan ran but didn't continue beyond it + + def test_summary_includes_inspect(self): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": "/tmp", + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": "/tmp/synth.log"}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": lambda self, s: StateEnum.Success, + })() + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), "myproject", buf) + output = "".join(buf.written) + assert "ecc log synthesis --errors" in output + + def test_summary_includes_log_path(self, tmp_path): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + log_file = tmp_path / "synth.log" + log_file.write_text("content\n") + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": str(tmp_path), + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": str(log_file)}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": lambda self, s: StateEnum.Success, + })() + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + assert "log=" in output From 3545bd53db2092cff5a58151353b96fbab66e12d Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 15:33:37 +0800 Subject: [PATCH 053/104] feat(cli): add live log tailing and log section markers to progress executor - Add _poll_log() background thread that reads latest_log_line() at regular intervals and renders transient step progress via RunProgressRenderer.running() while engine_flow.run_step() blocks - Show 'waiting for log...' when no log file exists yet - Mirror EngineFlow.run_steps() begin/end log_section markers around each step execution in run_flow_with_progress() - Stop and join monitor thread after run_step() returns, clear transient line before printing summary - Add 4 new tests: transient log content display, waiting message, log section marker emission, and begin/run/end ordering --- chipcompiler/cli/progress.py | 36 +++++++- test/cli/test_progress.py | 159 +++++++++++++++++++++++++++++++++++ 2 files changed, 193 insertions(+), 2 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index a3cab8dc..62d1d1fe 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -1,6 +1,8 @@ import os import re import shutil +import threading +import time from chipcompiler.cli.types import OutputMode @@ -78,6 +80,16 @@ def summary(self, text: str) -> None: self._stream.flush() +def _poll_log(renderer, log_path, step_token, tool, stop_event, interval=0.5): + while not stop_event.is_set(): + line = latest_log_line(log_path) + if line: + renderer.running(f" {step_token} ({tool}) | {line}") + else: + renderer.running(f" {step_token} ({tool}) | waiting for log...") + stop_event.wait(interval) + + def run_flow_with_progress(engine_flow, ctx, project, stderr): from chipcompiler.data import StateEnum, log_flow @@ -91,12 +103,32 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): tool = workspace_step.tool log_path = workspace_step.log.get("file", "") - start = __import__("time").time() + engine_flow.workspace.logger.log_section( + f"{workspace_step.tool} - begin step - {workspace_step.name}" + ) + + stop_event = threading.Event() + monitor = threading.Thread( + target=_poll_log, + args=(renderer, log_path, step_token, tool, stop_event), + daemon=True, + ) + monitor.start() + + start = time.time() state = engine_flow.run_step(workspace_step) + + stop_event.set() + monitor.join(timeout=2.0) + renderer.clear() + log_flow(workspace=engine_flow.workspace) + engine_flow.workspace.logger.log_section( + f"{workspace_step.tool} - end step - {workspace_step.name}" + ) - elapsed = __import__("time").time() - start + elapsed = time.time() - start hours = int(elapsed // 3600) minutes = int((elapsed % 3600) // 60) seconds = int(elapsed % 60) diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 5a63a4ee..f5fb7c40 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -313,3 +313,162 @@ def test_summary_includes_log_path(self, tmp_path): run_flow_with_progress(flow, _make_ctx(), None, buf) output = "".join(buf.written) assert "log=" in output + + def test_transient_line_shows_log_content(self, tmp_path): + import time + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + log_file = tmp_path / "synth.log" + + def fake_run_step(self, s): + log_file.write_text("Synthesizing module top\n") + time.sleep(1.0) + return StateEnum.Success + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": str(tmp_path), + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": str(log_file)}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": fake_run_step, + })() + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + + output = "".join(buf.written) + assert "Synthesizing module top" in output + + # Transient running line appears before summary line + running_pos = output.find("\r\x1b[K synthesis (yosys)") + summary_pos = output.find("step=synthesis") + assert running_pos >= 0, "Missing transient running line" + assert summary_pos >= 0, "Missing summary line" + assert running_pos < summary_pos, "Transient line should appear before summary" + + def test_transient_shows_waiting_when_no_log(self): + import time + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + def fake_run_step(self, s): + time.sleep(1.0) + return StateEnum.Success + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": "/tmp", + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": "/tmp/nonexistent_synth.log"}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": fake_run_step, + })() + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + + output = "".join(buf.written) + assert "waiting for log..." in output + + def test_log_section_markers_emitted(self, tmp_path): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + sections = [] + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), { + "info": lambda *a, **k: None, + "log_section": lambda self, msg: sections.append(msg), + "log_separator": lambda *a, **k: None, + })(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": str(tmp_path), + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": ""}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": lambda self, s: StateEnum.Success, + })() + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + + assert "yosys - begin step - Synthesis" in sections + assert "yosys - end step - Synthesis" in sections + begin_idx = sections.index("yosys - begin step - Synthesis") + end_idx = sections.index("yosys - end step - Synthesis") + assert begin_idx < end_idx + + def test_log_section_markers_around_run_step(self, tmp_path): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + call_order = [] + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), { + "info": lambda *a, **k: None, + "log_section": lambda self, msg: call_order.append(("section", msg)), + "log_separator": lambda *a, **k: None, + })(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": str(tmp_path), + })() + + ws_step = type("WSS", (), { + "name": "Floorplan", + "tool": "ecc", + "log": {"file": ""}, + })() + + def fake_run_step(self, s): + call_order.append(("run_step", s.name)) + return StateEnum.Success + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": fake_run_step, + })() + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + + begin_idx = call_order.index(("section", "ecc - begin step - Floorplan")) + run_idx = call_order.index(("run_step", "Floorplan")) + end_idx = call_order.index(("section", "ecc - end step - Floorplan")) + assert begin_idx < run_idx < end_idx From 48c9479a1a1cbd609a836704db3e625319d7c53f Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 15:41:11 +0800 Subject: [PATCH 054/104] fix(cli): use contract transient line format and exception-safe monitor cleanup - Change _poll_log() to render 'running step= tool= | ' instead of the drifted ' () | ' format - Wrap engine_flow.run_step() in try/finally so stop_event.set(), monitor.join(), and renderer.clear() always execute even on exception - Update transient line tests to assert the contract prefix exactly - Add regression test: fake run_step() raises, assert transient clear happens and exception propagates from run_flow_with_progress() --- chipcompiler/cli/progress.py | 15 +++++++------ test/cli/test_progress.py | 41 ++++++++++++++++++++++++++++++++---- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 62d1d1fe..a2b5b8cf 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -84,9 +84,9 @@ def _poll_log(renderer, log_path, step_token, tool, stop_event, interval=0.5): while not stop_event.is_set(): line = latest_log_line(log_path) if line: - renderer.running(f" {step_token} ({tool}) | {line}") + renderer.running(f"running step={step_token} tool={tool} | {line}") else: - renderer.running(f" {step_token} ({tool}) | waiting for log...") + renderer.running(f"running step={step_token} tool={tool} | waiting for log...") stop_event.wait(interval) @@ -117,11 +117,12 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): start = time.time() - state = engine_flow.run_step(workspace_step) - - stop_event.set() - monitor.join(timeout=2.0) - renderer.clear() + try: + state = engine_flow.run_step(workspace_step) + finally: + stop_event.set() + monitor.join(timeout=2.0) + renderer.clear() log_flow(workspace=engine_flow.workspace) engine_flow.workspace.logger.log_section( diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index f5fb7c40..536319fd 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -352,10 +352,10 @@ def fake_run_step(self, s): output = "".join(buf.written) assert "Synthesizing module top" in output - # Transient running line appears before summary line - running_pos = output.find("\r\x1b[K synthesis (yosys)") + # Transient running line uses the contract prefix and appears before summary + running_pos = output.find("running step=synthesis tool=yosys") summary_pos = output.find("step=synthesis") - assert running_pos >= 0, "Missing transient running line" + assert running_pos >= 0, "Missing transient running line with contract prefix" assert summary_pos >= 0, "Missing summary line" assert running_pos < summary_pos, "Transient line should appear before summary" @@ -392,7 +392,7 @@ def fake_run_step(self, s): assert result is True output = "".join(buf.written) - assert "waiting for log..." in output + assert "running step=synthesis tool=yosys | waiting for log..." in output def test_log_section_markers_emitted(self, tmp_path): from chipcompiler.data import StateEnum @@ -472,3 +472,36 @@ def fake_run_step(self, s): run_idx = call_order.index(("run_step", "Floorplan")) end_idx = call_order.index(("section", "ecc - end step - Floorplan")) assert begin_idx < run_idx < end_idx + + def test_monitor_cleanup_on_run_step_exception(self, tmp_path): + from chipcompiler.cli.progress import run_flow_with_progress + + def fake_run_step(self, s): + raise RuntimeError("tool crashed") + + ws = type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": str(tmp_path), + })() + + ws_step = type("WSS", (), { + "name": "Synthesis", + "tool": "yosys", + "log": {"file": ""}, + })() + + flow = type("EF", (), { + "workspace": ws, + "workspace_steps": [ws_step], + "run_step": fake_run_step, + })() + + buf = FakeTTYStderr(True) + with pytest.raises(RuntimeError, match="tool crashed"): + run_flow_with_progress(flow, _make_ctx(), None, buf) + + # The transient line must be cleared even after an exception + output = "".join(buf.written) + assert "\r\x1b[K" in output From 5b26a3d0fbac3b21c646285301f9f88f015b7f78 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 16:03:58 +0800 Subject: [PATCH 055/104] fix(cli): open tool logs with errors=replace to handle non-UTF8 bytes read_log_file() in inspect.py opens logs with errors='replace' so that arbitrary bytes in tool output do not crash ecc log or ecc diagnose with UnicodeDecodeError. Matches the defensive approach already used by latest_log_line() in progress.py. --- chipcompiler/cli/inspect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 1e36cd57..48cd9f23 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -111,7 +111,7 @@ def discover_logs(run_dir: str, step_token: str | None = None) -> list[str]: def read_log_file(path: str) -> list[str]: try: - with open(path) as f: + with open(path, errors="replace") as f: return f.read().splitlines() except OSError: return [] From f58ed18a507daa6d529f737b677149172bd272cc Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 16:26:17 +0800 Subject: [PATCH 056/104] refactor(cli): simplify progress module and deduplicate test helpers - Extract common prefix in _poll_log() to avoid duplicate string formatting - Remove unnecessary hasattr() guard for StateEnum.value in run_flow_with_progress() - Extract _make_ws/_make_step/_make_flow test helpers to eliminate repeated fake workspace/step/flow setup across 9 flow executor tests - Remove unused os/sys imports from test_progress.py --- chipcompiler/cli/progress.py | 8 +- test/cli/test_progress.py | 265 ++++++++++------------------------- 2 files changed, 80 insertions(+), 193 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index a2b5b8cf..bdc2b78d 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -81,12 +81,10 @@ def summary(self, text: str) -> None: def _poll_log(renderer, log_path, step_token, tool, stop_event, interval=0.5): + prefix = f"running step={step_token} tool={tool}" while not stop_event.is_set(): line = latest_log_line(log_path) - if line: - renderer.running(f"running step={step_token} tool={tool} | {line}") - else: - renderer.running(f"running step={step_token} tool={tool} | waiting for log...") + renderer.running(f"{prefix} | {line}" if line else f"{prefix} | waiting for log...") stop_event.wait(interval) @@ -135,7 +133,7 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): seconds = int(elapsed % 60) runtime = f"{hours}:{minutes:02d}:{seconds:02d}" - status = normalize_state(state.value) if hasattr(state, "value") else str(state) + status = normalize_state(state.value) rel_log = "" if log_path: diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 536319fd..da5e4dc3 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -1,6 +1,4 @@ import io -import os -import sys import pytest @@ -183,29 +181,45 @@ def test_truncates_long_running_text(self): # -- run_flow_with_progress -- +def _make_ws(directory="/tmp", log_section_fn=None): + if log_section_fn: + logger = type("L", (), { + "info": lambda *a, **k: None, + "log_section": log_section_fn, + "log_separator": lambda *a, **k: None, + })() + else: + logger = type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})() + return type("WS", (), { + "home": type("Home", (), {"reset": lambda self: None})(), + "logger": logger, + "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), + "directory": directory, + })() + + +def _make_step(name, tool, log_file=""): + return type("WSS", (), {"name": name, "tool": tool, "log": {"file": log_file}})() + + +def _make_flow(ws, steps, run_step_fn): + return type("EF", (), { + "workspace": ws, + "workspace_steps": steps, + "run_step": run_step_fn, + })() + + class TestRunFlowWithProgress: def test_mirrors_run_steps_success(self, tmp_path): from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": str(tmp_path), - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": str(tmp_path / "synth.log")}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": lambda self, s: StateEnum.Success, - })() + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(tmp_path / "synth.log"))], + lambda self, s: StateEnum.Success, + ) buf = FakeTTYStderr(True) result = run_flow_with_progress(flow, _make_ctx(), None, buf) @@ -218,24 +232,6 @@ def test_stops_on_failure(self): from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": "/tmp", - })() - - ws_step1 = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": ""}, - })() - ws_step2 = type("WSS", (), { - "name": "Floorplan", - "tool": "ecc", - "log": {"file": ""}, - })() - call_count = [0] def fake_run_step(self, s): @@ -244,39 +240,26 @@ def fake_run_step(self, s): return StateEnum.Success return StateEnum.Imcomplete - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step1, ws_step2], - "run_step": fake_run_step, - })() + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys"), _make_step("Floorplan", "ecc")], + fake_run_step, + ) buf = FakeTTYStderr(True) result = run_flow_with_progress(flow, _make_ctx(), None, buf) assert result is False - assert call_count[0] == 2 # Floorplan ran but didn't continue beyond it + assert call_count[0] == 2 def test_summary_includes_inspect(self): from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": "/tmp", - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": "/tmp/synth.log"}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": lambda self, s: StateEnum.Success, - })() + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys", "/tmp/synth.log")], + lambda self, s: StateEnum.Success, + ) buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), "myproject", buf) @@ -290,24 +273,11 @@ def test_summary_includes_log_path(self, tmp_path): log_file = tmp_path / "synth.log" log_file.write_text("content\n") - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": str(tmp_path), - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": str(log_file)}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": lambda self, s: StateEnum.Success, - })() + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Success, + ) buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), None, buf) @@ -326,24 +296,11 @@ def fake_run_step(self, s): time.sleep(1.0) return StateEnum.Success - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": str(tmp_path), - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": str(log_file)}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": fake_run_step, - })() + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + fake_run_step, + ) buf = FakeTTYStderr(True) result = run_flow_with_progress(flow, _make_ctx(), None, buf) @@ -352,12 +309,11 @@ def fake_run_step(self, s): output = "".join(buf.written) assert "Synthesizing module top" in output - # Transient running line uses the contract prefix and appears before summary running_pos = output.find("running step=synthesis tool=yosys") summary_pos = output.find("step=synthesis") assert running_pos >= 0, "Missing transient running line with contract prefix" assert summary_pos >= 0, "Missing summary line" - assert running_pos < summary_pos, "Transient line should appear before summary" + assert running_pos < summary_pos def test_transient_shows_waiting_when_no_log(self): import time @@ -368,24 +324,11 @@ def fake_run_step(self, s): time.sleep(1.0) return StateEnum.Success - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": "/tmp", - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": "/tmp/nonexistent_synth.log"}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": fake_run_step, - })() + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys", "/tmp/nonexistent_synth.log")], + fake_run_step, + ) buf = FakeTTYStderr(True) result = run_flow_with_progress(flow, _make_ctx(), None, buf) @@ -399,38 +342,18 @@ def test_log_section_markers_emitted(self, tmp_path): from chipcompiler.cli.progress import run_flow_with_progress sections = [] - - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), { - "info": lambda *a, **k: None, - "log_section": lambda self, msg: sections.append(msg), - "log_separator": lambda *a, **k: None, - })(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": str(tmp_path), - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": ""}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": lambda self, s: StateEnum.Success, - })() + flow = _make_flow( + _make_ws(str(tmp_path), log_section_fn=lambda self, msg: sections.append(msg)), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), None, buf) assert "yosys - begin step - Synthesis" in sections assert "yosys - end step - Synthesis" in sections - begin_idx = sections.index("yosys - begin step - Synthesis") - end_idx = sections.index("yosys - end step - Synthesis") - assert begin_idx < end_idx + assert sections.index("yosys - begin step - Synthesis") < sections.index("yosys - end step - Synthesis") def test_log_section_markers_around_run_step(self, tmp_path): from chipcompiler.data import StateEnum @@ -438,32 +361,15 @@ def test_log_section_markers_around_run_step(self, tmp_path): call_order = [] - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), { - "info": lambda *a, **k: None, - "log_section": lambda self, msg: call_order.append(("section", msg)), - "log_separator": lambda *a, **k: None, - })(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": str(tmp_path), - })() - - ws_step = type("WSS", (), { - "name": "Floorplan", - "tool": "ecc", - "log": {"file": ""}, - })() - def fake_run_step(self, s): call_order.append(("run_step", s.name)) return StateEnum.Success - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": fake_run_step, - })() + flow = _make_flow( + _make_ws(str(tmp_path), log_section_fn=lambda self, msg: call_order.append(("section", msg))), + [_make_step("Floorplan", "ecc")], + fake_run_step, + ) buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), None, buf) @@ -476,32 +382,15 @@ def fake_run_step(self, s): def test_monitor_cleanup_on_run_step_exception(self, tmp_path): from chipcompiler.cli.progress import run_flow_with_progress - def fake_run_step(self, s): - raise RuntimeError("tool crashed") - - ws = type("WS", (), { - "home": type("Home", (), {"reset": lambda self: None})(), - "logger": type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})(), - "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), - "directory": str(tmp_path), - })() - - ws_step = type("WSS", (), { - "name": "Synthesis", - "tool": "yosys", - "log": {"file": ""}, - })() - - flow = type("EF", (), { - "workspace": ws, - "workspace_steps": [ws_step], - "run_step": fake_run_step, - })() + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys")], + lambda self, s: (_ for _ in ()).throw(RuntimeError("tool crashed")), + ) buf = FakeTTYStderr(True) with pytest.raises(RuntimeError, match="tool crashed"): run_flow_with_progress(flow, _make_ctx(), None, buf) - # The transient line must be cleared even after an exception output = "".join(buf.written) assert "\r\x1b[K" in output From f5bad8a0420315a5c5ba111586b49a098725d3c4 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 16:30:38 +0800 Subject: [PATCH 057/104] refactor(cli): apply code-simplifier review to progress module - Collapse _make_ws() if/else into single default-parameter pattern - Replace obscure generator .throw() trick with clear named function in test_monitor_cleanup_on_run_step_exception --- test/cli/test_progress.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index da5e4dc3..4c0ee5ad 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -182,17 +182,14 @@ def test_truncates_long_running_text(self): def _make_ws(directory="/tmp", log_section_fn=None): - if log_section_fn: - logger = type("L", (), { - "info": lambda *a, **k: None, - "log_section": log_section_fn, - "log_separator": lambda *a, **k: None, - })() - else: - logger = type("L", (), {"info": lambda *a, **k: None, "log_section": lambda *a, **k: None, "log_separator": lambda *a, **k: None})() + section_fn = log_section_fn or (lambda self, msg: None) return type("WS", (), { "home": type("Home", (), {"reset": lambda self: None})(), - "logger": logger, + "logger": type("L", (), { + "info": lambda *a, **k: None, + "log_section": section_fn, + "log_separator": lambda *a, **k: None, + })(), "flow": type("F", (), {"data": {"steps": []}, "path": ""})(), "directory": directory, })() @@ -382,10 +379,13 @@ def fake_run_step(self, s): def test_monitor_cleanup_on_run_step_exception(self, tmp_path): from chipcompiler.cli.progress import run_flow_with_progress + def raising_run_step(self, s): + raise RuntimeError("tool crashed") + flow = _make_flow( _make_ws(str(tmp_path)), [_make_step("Synthesis", "yosys")], - lambda self, s: (_ for _ in ()).throw(RuntimeError("tool crashed")), + raising_run_step, ) buf = FakeTTYStderr(True) From a34da0a48740000079a0996df856ecfbf7cf690d Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 18:11:45 +0800 Subject: [PATCH 058/104] feat(cli): polish interactive progress output with per-step blocks and ANSI color Replace dense one-line progress with readable per-step blocks: - Step headers: > synthesis (yosys) - Run header: [run] default workspace= - Indented transient log: lines under active step - Success summary: checkmark step (tool) runtime - Non-success: x step (tool) status runtime - Detail lines: indented log: and inspect: on separate lines - Blank line separators between step blocks Add ANSI color support (TTY-only, respects NO_COLOR/TERM=dumb): - Active step header and name: cyan - Success summary: green - Non-success symbol and status: red - log:/inspect: labels: dim - [run]: bold Changes limited to chipcompiler/cli/progress.py and tests. No engine/tools/flow API changes. No new dependencies. --- chipcompiler/cli/progress.py | 104 ++++++++--- test/cli/test_progress.py | 338 +++++++++++++++++++++++++++++++---- 2 files changed, 384 insertions(+), 58 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index bdc2b78d..0088ec4e 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -6,8 +6,35 @@ from chipcompiler.cli.types import OutputMode +_BOLD = "\x1b[1m" +_DIM = "\x1b[2m" +_CYAN = "\x1b[36m" +_GREEN = "\x1b[32m" +_RED = "\x1b[31m" +_RESET = "\x1b[0m" + + +def supports_color(stream, mode, env=None): + if env is None: + env = os.environ + if not hasattr(stream, "isatty") or not stream.isatty(): + return False + if mode != OutputMode.TEXT: + return False + if env.get("NO_COLOR") is not None: + return False + if env.get("TERM", "") == "dumb": + return False + return True + + +def style(text, code, enabled): + if not enabled: + return text + return f"{code}{text}{_RESET}" + -def should_enable_run_progress(ctx, stderr) -> bool: +def should_enable_run_progress(ctx, stderr): if ctx.output_mode != OutputMode.TEXT: return False return hasattr(stderr, "isatty") and stderr.isatty() @@ -18,14 +45,14 @@ def should_enable_run_progress(ctx, stderr) -> bool: _MULTI_SPACE_RE = re.compile(r" {2,}") -def sanitize_log_line(line: str) -> str: +def sanitize_log_line(line): stripped = _ANSI_RE.sub("", line) stripped = _CONTROL_RE.sub(" ", stripped) stripped = _MULTI_SPACE_RE.sub(" ", stripped) return stripped.strip() -def truncate_to_width(text: str, width: int) -> str: +def truncate_to_width(text, width): if width <= 0: return "" if len(text) <= width: @@ -35,7 +62,7 @@ def truncate_to_width(text: str, width: int) -> str: return text[: width - 3] + "..." -def latest_log_line(path: str) -> str | None: +def latest_log_line(path): if not path or not os.path.isfile(path): return None try: @@ -50,41 +77,69 @@ def latest_log_line(path: str) -> str | None: return None -def terminal_width(fallback: int = 80) -> int: +def terminal_width(fallback=80): cols, _ = shutil.get_terminal_size(fallback=(fallback, 24)) return max(cols, 1) class RunProgressRenderer: - def __init__(self, stream, width_fn=None): + def __init__(self, stream, width_fn=None, color=False): self._stream = stream self._width_fn = width_fn or terminal_width + self._color = color self._has_transient = False + self._step_started = False - def running(self, text: str) -> None: + def running(self, text): width = self._width_fn() - display = truncate_to_width(text, width) - self._stream.write(f"\r\x1b[K{display}") + visible = truncate_to_width(f" log: {text}", width) + if self._color and visible.startswith(" log:"): + visible = f" {_DIM}log:{_RESET}{visible[6:]}" + self._stream.write(f"\r\x1b[K{visible}") self._stream.flush() self._has_transient = True - def clear(self) -> None: + def clear(self): if self._has_transient: self._stream.write("\r\x1b[K") self._stream.flush() self._has_transient = False - def summary(self, text: str) -> None: + def start_run(self, name, workspace): self.clear() - self._stream.write(f"{text}\n") + run_label = style("[run]", _BOLD, self._color) + self._stream.write(f"{run_label} {name} workspace={workspace}\n") self._stream.flush() + def start_step(self, step, tool): + self.clear() + if self._step_started: + self._stream.write("\n") + step_name = style(f"{step} ({tool})", _CYAN, self._color) + self._stream.write(f"> {step_name}\n") + self._stream.flush() + self._step_started = True + + def finish_step(self, step, tool, status, runtime, log_path, inspect_cmd, success): + self.clear() + if success: + line = style(f"✓ {step} ({tool}) {runtime}", _GREEN, self._color) + else: + sym = style("✗", _RED, self._color) + status_styled = style(status, _RED, self._color) + line = f"{sym} {step} ({tool}) {status_styled} {runtime}" + self._stream.write(f"{line}\n") + log_label = style(" log:", _DIM, self._color) + self._stream.write(f"{log_label} {log_path}\n") + inspect_label = style(" inspect:", _DIM, self._color) + self._stream.write(f"{inspect_label} {inspect_cmd}\n") + self._stream.flush() -def _poll_log(renderer, log_path, step_token, tool, stop_event, interval=0.5): - prefix = f"running step={step_token} tool={tool}" + +def _poll_log(renderer, log_path, stop_event, interval=0.5): while not stop_event.is_set(): line = latest_log_line(log_path) - renderer.running(f"{prefix} | {line}" if line else f"{prefix} | waiting for log...") + renderer.running(line or "waiting for log...") stop_event.wait(interval) @@ -93,9 +148,14 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): from chipcompiler.cli.output import disclosure_cmd, normalize_step_name, normalize_state - renderer = RunProgressRenderer(stderr) + color = supports_color(stderr, ctx.output_mode) + renderer = RunProgressRenderer(stderr, color=color) engine_flow.workspace.home.reset() + run_dir = engine_flow.workspace.directory + run_name = os.path.basename(run_dir) or "default" + renderer.start_run(run_name, run_dir) + for workspace_step in engine_flow.workspace_steps: step_token = normalize_step_name(workspace_step.name) tool = workspace_step.tool @@ -105,10 +165,12 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): f"{workspace_step.tool} - begin step - {workspace_step.name}" ) + renderer.start_step(step_token, tool) + stop_event = threading.Event() monitor = threading.Thread( target=_poll_log, - args=(renderer, log_path, step_token, tool, stop_event), + args=(renderer, log_path, stop_event), daemon=True, ) monitor.start() @@ -144,12 +206,10 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): inspect = disclosure_cmd(f"ecc log {step_token} --errors", project) - renderer.summary( - f" step={step_token} tool={tool} status={status} " - f"runtime={runtime} log={rel_log} inspect=\"{inspect}\"" - ) + is_success = state == StateEnum.Success + renderer.finish_step(step_token, tool, status, runtime, rel_log, inspect, is_success) - if state != StateEnum.Success: + if not is_success: return False return True diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 4c0ee5ad..f01721e2 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -1,12 +1,28 @@ import io +import re +import time import pytest +_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[a-zA-Z]") + + +def _strip_ansi(text): + return _ANSI_RE.sub("", text) + from chipcompiler.cli.progress import ( + _BOLD, + _CYAN, + _DIM, + _GREEN, + _RED, + _RESET, RunProgressRenderer, latest_log_line, sanitize_log_line, should_enable_run_progress, + style, + supports_color, truncate_to_width, ) from chipcompiler.cli.types import CommandContext, OutputMode @@ -37,6 +53,47 @@ def _make_ctx(mode=OutputMode.TEXT): ) +# -- supports_color -- + + +class TestSupportsColor: + def test_enabled_text_tty(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT) is True + + def test_disabled_non_tty(self): + assert supports_color(FakeTTYStderr(False), OutputMode.TEXT) is False + + def test_disabled_no_isattr(self): + assert supports_color(io.StringIO(), OutputMode.TEXT) is False + + def test_disabled_no_color(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"NO_COLOR": "1"}) is False + + def test_disabled_term_dumb(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"TERM": "dumb"}) is False + + def test_disabled_json(self): + assert supports_color(FakeTTYStderr(True), OutputMode.JSON) is False + + def test_disabled_jsonl(self): + assert supports_color(FakeTTYStderr(True), OutputMode.JSONL) is False + + def test_enabled_with_clean_env(self): + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"TERM": "xterm-256color"}) is True + + +# -- style -- + + +class TestStyle: + def test_applies_code_when_enabled(self): + result = style("hello", _GREEN, True) + assert result == f"{_GREEN}hello{_RESET}" + + def test_passthrough_when_disabled(self): + assert style("hello", _GREEN, False) == "hello" + + # -- should_enable_run_progress -- @@ -144,23 +201,13 @@ def test_trailing_newlines_only(self, tmp_path): class TestRunProgressRenderer: - def test_running_writes_carriage_return_clear(self): + def test_running_writes_log_prefix(self): buf = FakeTTYStderr(True) r = RunProgressRenderer(buf, width_fn=lambda: 80) r.running("working...") output = "".join(buf.written) assert output.startswith("\r\x1b[K") - assert "working..." in output - - def test_summary_clears_transient_first(self): - buf = FakeTTYStderr(True) - r = RunProgressRenderer(buf, width_fn=lambda: 80) - r.running("transient") - r.summary("done") - output = "".join(buf.written) - # After transient, clear should be written before summary - assert "\r\x1b[K" in output - assert "done\n" in output + assert " log: working..." in output def test_clear_noop_without_transient(self): buf = FakeTTYStderr(True) @@ -170,12 +217,121 @@ def test_clear_noop_without_transient(self): def test_truncates_long_running_text(self): buf = FakeTTYStderr(True) - r = RunProgressRenderer(buf, width_fn=lambda: 10) + r = RunProgressRenderer(buf, width_fn=lambda: 20) r.running("x" * 100) output = "".join(buf.written) - # \r\x1b[K + truncated text (10 chars max) display = output.replace("\r\x1b[K", "") - assert len(display) <= 10 + assert len(display) <= 20 + + def test_start_step_emits_header(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_step("synthesis", "yosys") + output = "".join(buf.written) + assert "> synthesis (yosys)\n" in output + + def test_start_step_separator_after_first(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_step("synthesis", "yosys") + r.start_step("floorplan", "ecc") + output = "".join(buf.written) + assert "\n> floorplan (ecc)\n" in output + + def test_start_step_no_separator_before_first(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_step("synthesis", "yosys") + output = "".join(buf.written) + assert not output.startswith("\n") + + def test_start_run_emits_header(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.start_run("default", "/tmp/runs/default") + output = "".join(buf.written) + assert "[run] default workspace=/tmp/runs/default\n" in output + + def test_finish_step_success(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.finish_step("synthesis", "yosys", "success", "0:00:06", "output/synth.log", "ecc log synthesis --errors", True) + output = "".join(buf.written) + assert "✓ synthesis (yosys) 0:00:06\n" in output + assert " log: output/synth.log\n" in output + assert " inspect: ecc log synthesis --errors\n" in output + + def test_finish_step_non_success(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "ecc log placement --errors", False) + output = "".join(buf.written) + assert "✗ placement (dreamplace) incomplete 0:00:00\n" in output + assert " log: \n" in output + assert " inspect: ecc log placement --errors\n" in output + + def test_finish_step_clears_transient(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("transient log") + r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) + output = "".join(buf.written) + clear_pos = output.find("\r\x1b[K") + summary_pos = output.find("✓") + assert clear_pos < summary_pos + + def test_running_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.running("working...") + output = "".join(buf.written) + assert _DIM in output + assert "log:" in output + + def test_running_without_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=False) + r.running("working...") + output = "".join(buf.written) + assert _DIM not in output + + def test_no_color_codes_when_disabled(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=False) + r.start_run("default", "/tmp") + r.start_step("synthesis", "yosys") + r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) + output = "".join(buf.written) + for code in (_BOLD, _DIM, _CYAN, _GREEN, _RED): + assert code not in output + + def test_start_step_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.start_step("synthesis", "yosys") + output = "".join(buf.written) + assert _CYAN in output + + def test_start_run_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.start_run("default", "/tmp") + output = "".join(buf.written) + assert _BOLD in output + + def test_finish_step_success_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) + output = "".join(buf.written) + assert _GREEN in output + + def test_finish_step_non_success_with_color(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) + r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "cmd", False) + output = "".join(buf.written) + assert _RED in output # -- run_flow_with_progress -- @@ -208,7 +364,7 @@ def _make_flow(ws, steps, run_step_fn): class TestRunFlowWithProgress: - def test_mirrors_run_steps_success(self, tmp_path): + def test_success_summary_format(self, tmp_path): from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress @@ -222,8 +378,8 @@ def test_mirrors_run_steps_success(self, tmp_path): result = run_flow_with_progress(flow, _make_ctx(), None, buf) assert result is True output = "".join(buf.written) - assert "step=synthesis" in output - assert "status=success" in output + assert "✓ synthesis (yosys)" in output + assert "status=success" not in output def test_stops_on_failure(self): from chipcompiler.data import StateEnum @@ -248,7 +404,7 @@ def fake_run_step(self, s): assert result is False assert call_count[0] == 2 - def test_summary_includes_inspect(self): + def test_summary_includes_inspect_detail_line(self): from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress @@ -260,10 +416,10 @@ def test_summary_includes_inspect(self): buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), "myproject", buf) - output = "".join(buf.written) - assert "ecc log synthesis --errors" in output + plain = _strip_ansi("".join(buf.written)) + assert " inspect: ecc log synthesis --errors --project myproject\n" in plain - def test_summary_includes_log_path(self, tmp_path): + def test_summary_includes_log_detail_line(self, tmp_path): from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress @@ -279,10 +435,89 @@ def test_summary_includes_log_path(self, tmp_path): buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), None, buf) output = "".join(buf.written) - assert "log=" in output + assert " log:" in output + + def test_step_headers_emitted(self): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + flow = _make_flow( + _make_ws(), + [ + _make_step("Synthesis", "yosys"), + _make_step("Floorplan", "ecc"), + ], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + plain = _strip_ansi("".join(buf.written)) + assert "> synthesis (yosys)\n" in plain + assert "> floorplan (ecc)\n" in plain + + def test_run_header_emitted(self, tmp_path): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + assert "[run]" in output + assert "workspace=" in output + + def test_block_separator_between_steps(self): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + flow = _make_flow( + _make_ws(), + [ + _make_step("Synthesis", "yosys"), + _make_step("Floorplan", "ecc"), + ], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is True + output = "".join(buf.written) + synth_summary = output.find("✓ synthesis") + fp_header = output.find("> floorplan") + between = output[synth_summary:fp_header] + assert "\n\n" in between + + def test_failure_summary_includes_status(self): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + def fake_run_step(self, s): + if s.name == "Synthesis": + return StateEnum.Success + return StateEnum.Imcomplete + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys"), _make_step("Floorplan", "ecc")], + fake_run_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), None, buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "✗ floorplan (ecc)" in plain + assert "incomplete" in plain def test_transient_line_shows_log_content(self, tmp_path): - import time from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress @@ -302,18 +537,16 @@ def fake_run_step(self, s): buf = FakeTTYStderr(True) result = run_flow_with_progress(flow, _make_ctx(), None, buf) assert result is True + plain = _strip_ansi("".join(buf.written)) + assert "Synthesizing module top" in plain - output = "".join(buf.written) - assert "Synthesizing module top" in output - - running_pos = output.find("running step=synthesis tool=yosys") - summary_pos = output.find("step=synthesis") - assert running_pos >= 0, "Missing transient running line with contract prefix" - assert summary_pos >= 0, "Missing summary line" - assert running_pos < summary_pos + log_pos = plain.find("Synthesizing module top") + summary_pos = plain.find("✓ synthesis") + assert log_pos >= 0 + assert summary_pos >= 0 + assert log_pos < summary_pos def test_transient_shows_waiting_when_no_log(self): - import time from chipcompiler.data import StateEnum from chipcompiler.cli.progress import run_flow_with_progress @@ -330,9 +563,8 @@ def fake_run_step(self, s): buf = FakeTTYStderr(True) result = run_flow_with_progress(flow, _make_ctx(), None, buf) assert result is True - - output = "".join(buf.written) - assert "running step=synthesis tool=yosys | waiting for log..." in output + plain = _strip_ansi("".join(buf.written)) + assert " log: waiting for log..." in plain def test_log_section_markers_emitted(self, tmp_path): from chipcompiler.data import StateEnum @@ -394,3 +626,37 @@ def raising_run_step(self, s): output = "".join(buf.written) assert "\r\x1b[K" in output + + def test_color_enabled_for_tty_text(self, monkeypatch): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + monkeypatch.delenv("NO_COLOR", raising=False) + monkeypatch.setenv("TERM", "xterm-256color") + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + assert "\x1b[36m" in output # cyan for step header + + def test_color_disabled_for_non_tty(self): + from chipcompiler.data import StateEnum + from chipcompiler.cli.progress import run_flow_with_progress + + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys")], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(False) + run_flow_with_progress(flow, _make_ctx(), None, buf) + output = "".join(buf.written) + for code in (_BOLD, _CYAN, _GREEN, _RED, _DIM): + assert code not in output From 8034ffac8e712b5745bbae7fc72a5cd8fd38d19f Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 18:21:11 +0800 Subject: [PATCH 059/104] fix(cli): style active step marker in cyan and make color tests deterministic - Include '>' in the cyan-styled step header text so the full active header is colored, matching AC-5 requirement - Pass explicit clean env to positive supports_color tests so they pass deterministically under TERM=dumb - Add assertion that cyan sequence precedes '>' in raw ANSI output --- chipcompiler/cli/progress.py | 4 ++-- test/cli/test_progress.py | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 0088ec4e..5d90d5d0 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -115,8 +115,8 @@ def start_step(self, step, tool): self.clear() if self._step_started: self._stream.write("\n") - step_name = style(f"{step} ({tool})", _CYAN, self._color) - self._stream.write(f"> {step_name}\n") + header = style(f"> {step} ({tool})", _CYAN, self._color) + self._stream.write(f"{header}\n") self._stream.flush() self._step_started = True diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index f01721e2..37a74386 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -58,7 +58,7 @@ def _make_ctx(mode=OutputMode.TEXT): class TestSupportsColor: def test_enabled_text_tty(self): - assert supports_color(FakeTTYStderr(True), OutputMode.TEXT) is True + assert supports_color(FakeTTYStderr(True), OutputMode.TEXT, {"TERM": "xterm-256color"}) is True def test_disabled_non_tty(self): assert supports_color(FakeTTYStderr(False), OutputMode.TEXT) is False @@ -311,6 +311,10 @@ def test_start_step_with_color(self): r.start_step("synthesis", "yosys") output = "".join(buf.written) assert _CYAN in output + # Cyan sequence must appear before the `>` marker in raw output + cyan_pos = output.find(_CYAN) + marker_pos = output.find(">") + assert cyan_pos < marker_pos def test_start_run_with_color(self): buf = FakeTTYStderr(True) From 942a19c0bba2306379cd9276217922839f6cead6 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 18:33:31 +0800 Subject: [PATCH 060/104] fix(cli): add clean-line newline after clearing transient before stable summary The renderer's clear() now emits "\r\x1b[K\n" instead of just "\r\x1b[K", ensuring the cursor moves to a clean line before the summary is written. This prevents stable summary text from visually attaching to the position of the cleared transient line. Replaced the weak clear-before-summary test with two focused tests that assert the raw output contains "\r\x1b[K\n" followed by the summary symbol (checkmark or x) for both success and failure. --- chipcompiler/cli/progress.py | 2 +- test/cli/test_progress.py | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 5d90d5d0..b9cbc4cf 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -101,7 +101,7 @@ def running(self, text): def clear(self): if self._has_transient: - self._stream.write("\r\x1b[K") + self._stream.write("\r\x1b[K\n") self._stream.flush() self._has_transient = False diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 37a74386..4912a1a6 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -270,15 +270,22 @@ def test_finish_step_non_success(self): assert " log: \n" in output assert " inspect: ecc log placement --errors\n" in output - def test_finish_step_clears_transient(self): + def test_finish_step_clears_transient_to_clean_line(self): buf = FakeTTYStderr(True) r = RunProgressRenderer(buf, width_fn=lambda: 80) r.running("transient log") r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) output = "".join(buf.written) - clear_pos = output.find("\r\x1b[K") - summary_pos = output.find("✓") - assert clear_pos < summary_pos + # The final clear before the summary must move to a clean line + assert "\r\x1b[K\n✓ synthesis" in output + + def test_finish_step_non_success_clears_transient_to_clean_line(self): + buf = FakeTTYStderr(True) + r = RunProgressRenderer(buf, width_fn=lambda: 80) + r.running("transient log") + r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "cmd", False) + output = "".join(buf.written) + assert "\r\x1b[K\n✗ placement" in output def test_running_with_color(self): buf = FakeTTYStderr(True) From 12ebdaebee09f6df79f686f95ebc9b14637e0905 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 19:00:41 +0800 Subject: [PATCH 061/104] refactor(cli): hoist repeated test imports to module level Move StateEnum and run_flow_with_progress imports from 15 inline method-level imports to the top-level import block, eliminating 30 lines of duplication. --- test/cli/test_progress.py | 46 ++------------------------------------- 1 file changed, 2 insertions(+), 44 deletions(-) diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 4912a1a6..21f405e2 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -19,6 +19,7 @@ def _strip_ansi(text): _RESET, RunProgressRenderer, latest_log_line, + run_flow_with_progress, sanitize_log_line, should_enable_run_progress, style, @@ -26,6 +27,7 @@ def _strip_ansi(text): truncate_to_width, ) from chipcompiler.cli.types import CommandContext, OutputMode +from chipcompiler.data import StateEnum class FakeTTYStderr: @@ -376,9 +378,6 @@ def _make_flow(ws, steps, run_step_fn): class TestRunFlowWithProgress: def test_success_summary_format(self, tmp_path): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - flow = _make_flow( _make_ws(str(tmp_path)), [_make_step("Synthesis", "yosys", str(tmp_path / "synth.log"))], @@ -393,9 +392,6 @@ def test_success_summary_format(self, tmp_path): assert "status=success" not in output def test_stops_on_failure(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - call_count = [0] def fake_run_step(self, s): @@ -416,9 +412,6 @@ def fake_run_step(self, s): assert call_count[0] == 2 def test_summary_includes_inspect_detail_line(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - flow = _make_flow( _make_ws(), [_make_step("Synthesis", "yosys", "/tmp/synth.log")], @@ -431,9 +424,6 @@ def test_summary_includes_inspect_detail_line(self): assert " inspect: ecc log synthesis --errors --project myproject\n" in plain def test_summary_includes_log_detail_line(self, tmp_path): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - log_file = tmp_path / "synth.log" log_file.write_text("content\n") @@ -449,9 +439,6 @@ def test_summary_includes_log_detail_line(self, tmp_path): assert " log:" in output def test_step_headers_emitted(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - flow = _make_flow( _make_ws(), [ @@ -469,9 +456,6 @@ def test_step_headers_emitted(self): assert "> floorplan (ecc)\n" in plain def test_run_header_emitted(self, tmp_path): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - flow = _make_flow( _make_ws(str(tmp_path)), [_make_step("Synthesis", "yosys")], @@ -485,9 +469,6 @@ def test_run_header_emitted(self, tmp_path): assert "workspace=" in output def test_block_separator_between_steps(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - flow = _make_flow( _make_ws(), [ @@ -507,9 +488,6 @@ def test_block_separator_between_steps(self): assert "\n\n" in between def test_failure_summary_includes_status(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - def fake_run_step(self, s): if s.name == "Synthesis": return StateEnum.Success @@ -529,9 +507,6 @@ def fake_run_step(self, s): assert "incomplete" in plain def test_transient_line_shows_log_content(self, tmp_path): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - log_file = tmp_path / "synth.log" def fake_run_step(self, s): @@ -558,9 +533,6 @@ def fake_run_step(self, s): assert log_pos < summary_pos def test_transient_shows_waiting_when_no_log(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - def fake_run_step(self, s): time.sleep(1.0) return StateEnum.Success @@ -578,9 +550,6 @@ def fake_run_step(self, s): assert " log: waiting for log..." in plain def test_log_section_markers_emitted(self, tmp_path): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - sections = [] flow = _make_flow( _make_ws(str(tmp_path), log_section_fn=lambda self, msg: sections.append(msg)), @@ -596,9 +565,6 @@ def test_log_section_markers_emitted(self, tmp_path): assert sections.index("yosys - begin step - Synthesis") < sections.index("yosys - end step - Synthesis") def test_log_section_markers_around_run_step(self, tmp_path): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - call_order = [] def fake_run_step(self, s): @@ -620,8 +586,6 @@ def fake_run_step(self, s): assert begin_idx < run_idx < end_idx def test_monitor_cleanup_on_run_step_exception(self, tmp_path): - from chipcompiler.cli.progress import run_flow_with_progress - def raising_run_step(self, s): raise RuntimeError("tool crashed") @@ -639,9 +603,6 @@ def raising_run_step(self, s): assert "\r\x1b[K" in output def test_color_enabled_for_tty_text(self, monkeypatch): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - monkeypatch.delenv("NO_COLOR", raising=False) monkeypatch.setenv("TERM", "xterm-256color") @@ -657,9 +618,6 @@ def test_color_enabled_for_tty_text(self, monkeypatch): assert "\x1b[36m" in output # cyan for step header def test_color_disabled_for_non_tty(self): - from chipcompiler.data import StateEnum - from chipcompiler.cli.progress import run_flow_with_progress - flow = _make_flow( _make_ws(), [_make_step("Synthesis", "yosys")], From ad80deeeb8fb96ee793d1060463e819d457d1194 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 22:31:39 +0800 Subject: [PATCH 062/104] feat(cli): add parameter discovery, validation, and run overrides Add CLI parameter layer exposing EDA knobs through stable semantic keys (e.g. place.target_density, synth.max_fanout, route.top_layer). New modules: - chipcompiler/cli/params.py: schema registry, value parsing, validation, source-aware resolution (default < ecc.toml < cli), and semantic-to-backend mapping helpers - chipcompiler/cli/param_handler.py: ecc param list/show/set/unset/diff command handlers with safe persistent TOML edits Parser additions: - ecc param {list,show,set,unset,diff} subcommands with --json/--jsonl/--plain - ecc run --set key=value (repeatable, temporary overrides) Integration: - ProjectConfig parses [params.*] from ecc.toml - run handler validates --set overrides before workspace creation, merges resolved parameters into workspace.parameters.data - config --resolved includes parameter records with value, default, source, and maps_to Output modes: - OutputMode.PLAIN added for stable grep-friendly key=value lines - render_plain() added alongside existing text/JSON/JSONL renderers Tests: 45 unit tests + 27 CLI integration tests; all 298 CLI tests pass. No changes to chipcompiler/engine/ or chipcompiler/tools/. --- chipcompiler/cli/commands.py | 4 + chipcompiler/cli/config.py | 9 + chipcompiler/cli/config_view.py | 20 ++ chipcompiler/cli/handlers.py | 67 ++++- chipcompiler/cli/main.py | 39 +++ chipcompiler/cli/param_handler.py | 314 +++++++++++++++++++++ chipcompiler/cli/params.py | 435 ++++++++++++++++++++++++++++++ chipcompiler/cli/render.py | 13 + chipcompiler/cli/types.py | 1 + test/cli/test_cli_params.py | 379 ++++++++++++++++++++++++++ test/cli/test_params.py | 305 +++++++++++++++++++++ 11 files changed, 1585 insertions(+), 1 deletion(-) create mode 100644 chipcompiler/cli/param_handler.py create mode 100644 chipcompiler/cli/params.py create mode 100644 test/cli/test_cli_params.py create mode 100644 test/cli/test_params.py diff --git a/chipcompiler/cli/commands.py b/chipcompiler/cli/commands.py index 2bd5646f..7b489594 100644 --- a/chipcompiler/cli/commands.py +++ b/chipcompiler/cli/commands.py @@ -14,6 +14,8 @@ def build_context(args) -> CommandContext: mode = OutputMode.JSONL elif getattr(args, "json", False): mode = OutputMode.JSON + elif getattr(args, "plain", False): + mode = OutputMode.PLAIN else: mode = OutputMode.TEXT @@ -28,6 +30,8 @@ def build_context(args) -> CommandContext: def dispatch(args, ctx: CommandContext) -> CommandResult: from chipcompiler.cli import handlers + if args.command == "param": + return handlers.param(args, ctx) handler = getattr(handlers, args.command, None) if handler is None: return CommandResult.err([], exit_code=1) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index 8cb79eac..fa5e0be5 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -26,6 +26,8 @@ class ProjectConfig: config_path: str = "" project_dir: str = "" + params_overrides: dict[str, object] = field(default_factory=dict) + def load_project_config(config_path: str) -> ProjectConfig: try: @@ -78,6 +80,13 @@ def _str(val, default=""): config_path=config_path, project_dir=project_dir, ) + + params_raw = data.get("params") + if isinstance(params_raw, dict): + from chipcompiler.cli.params import parse_toml_params + flat, _ = parse_toml_params(params_raw) + cfg.params_overrides = flat + return cfg diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 11b2a5d8..40a6fa0d 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -96,6 +96,26 @@ def build_project_config_items(project_dir: str, run_dir: str, "inspect_cmd": disclosure_cmd("ecc status", project, run_id), }) + # Parameter records with source information + from chipcompiler.cli.params import resolve_parameters + resolved_params, _ = resolve_parameters(toml_overrides=cfg.params_overrides) + for rp in resolved_params: + maps_to = rp.schema.maps_to + if isinstance(maps_to, str): + mapping = maps_to + else: + mapping = ", ".join(f"{k}.{v}" for k, v in maps_to.items()) + items.append({ + "kind": "param", + "scope": "project", + "key": rp.param, + "value": rp.value, + "default": rp.default, + "source": rp.source, + "maps_to": mapping, + "inspect_cmd": disclosure_cmd(f"ecc param show {rp.param}", project), + }) + return items, 0 diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 90835f35..619ab7af 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -12,6 +12,29 @@ ) +def param(args, ctx: CommandContext) -> CommandResult: + from chipcompiler.cli.param_handler import ( + param_diff, + param_list, + param_set, + param_show, + param_unset, + ) + + subcmd = getattr(args, "param_command", None) + if subcmd == "list": + return param_list(args, ctx) + if subcmd == "show": + return param_show(args, ctx) + if subcmd == "set": + return param_set(args, ctx) + if subcmd == "unset": + return param_unset(args, ctx) + if subcmd == "diff": + return param_diff(args, ctx) + return CommandResult.err([error_record("missing_subcommand")], exit_code=1) + + def status(args, ctx: CommandContext) -> CommandResult: from chipcompiler.cli.inspect import ( CORRUPT_FLOW_JSON, @@ -310,7 +333,19 @@ def config(args, ctx: CommandContext) -> CommandResult: records = [] for item in items: - if item.get("scope") == "project": + if item.get("kind") == "param": + records.append({ + "kind": "param", + "config": item["key"], + "key": item["key"], + "scope": "project", + "value": item["value"], + "default": item.get("default"), + "source": item["source"], + "maps_to": item.get("maps_to"), + "inspect": item.get("inspect_cmd"), + }) + elif item.get("scope") == "project": records.append({ "config": item["key"], "scope": "project", @@ -510,6 +545,22 @@ def run(args, ctx: CommandContext) -> CommandResult: }) return CommandResult.err(records) + # Parse and validate --set overrides before workspace creation + cli_overrides = {} + raw_sets = getattr(args, "param_set", []) + if raw_sets: + from chipcompiler.cli.params import parse_cli_overrides + cli_overrides, set_errors = parse_cli_overrides(raw_sets) + if set_errors: + records = [] + for err in set_errors: + records.append({ + "kind": "error", + "error": "invalid_parameter", + "reason": err, + }) + return CommandResult.err(records) + run_dir = os.path.join(project_dir, "runs", "default") flow_json = os.path.join(run_dir, "home", "flow.json") @@ -535,6 +586,20 @@ def run(args, ctx: CommandContext) -> CommandResult: parameters = to_parameters(cfg) pdk_root = resolve_pdk_root(cfg) + # Merge resolved parameter overrides into workspace parameters + if cfg.params_overrides or cli_overrides: + from chipcompiler.cli.params import ( + build_backend_overrides, + resolve_parameters, + ) + resolved, _ = resolve_parameters( + toml_overrides=cfg.params_overrides, + cli_overrides=cli_overrides, + ) + backend_overrides = build_backend_overrides(resolved) + from chipcompiler.data.parameter import update_parameters + update_parameters(backend_overrides, parameters) + try: workspace = create_workspace( directory=run_dir, diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 7e38c702..4c80c2be 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -85,6 +85,45 @@ def build_parser() -> argparse.ArgumentParser: diagnose_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") + # ecc param + param_parser = subparsers.add_parser("param", help="Manage EDA parameters") + param_sub = param_parser.add_subparsers(dest="param_command") + + def _add_param_flags(p): + _add_project_arg(p) + p.add_argument("--json", action="store_true", help="JSON output") + p.add_argument("--jsonl", action="store_true", help="JSONL output") + p.add_argument("--plain", action="store_true", help="Plain key-value output") + + # ecc param list + param_list = param_sub.add_parser("list", help="List all parameters") + _add_param_flags(param_list) + + # ecc param show + param_show = param_sub.add_parser("show", help="Show parameter details") + _add_param_flags(param_show) + param_show.add_argument("key", help="Parameter key (e.g. place.target_density)") + + # ecc param set + param_set = param_sub.add_parser("set", help="Set a persistent parameter override") + _add_param_flags(param_set) + param_set.add_argument("key", help="Parameter key") + param_set.add_argument("value", help="Parameter value") + + # ecc param unset + param_unset = param_sub.add_parser("unset", help="Remove a persistent override") + _add_param_flags(param_unset) + param_unset.add_argument("key", help="Parameter key") + + # ecc param diff + param_diff = param_sub.add_parser("diff", help="Show overrides that differ from defaults") + _add_param_flags(param_diff) + + # ecc run --set + run_parser.add_argument("--set", action="append", default=[], dest="param_set", + help="Set parameter override (repeatable, e.g. --set place.target_density=0.65)") + run_parser.add_argument("--plain", action="store_true", help="Plain key-value output") + return parser diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py new file mode 100644 index 00000000..977861a6 --- /dev/null +++ b/chipcompiler/cli/param_handler.py @@ -0,0 +1,314 @@ +from __future__ import annotations + +import os +import tomllib + +from chipcompiler.cli.output import disclosure_cmd +from chipcompiler.cli.params import ( + ResolvedParam, + build_backend_overrides, + is_known_key, + list_groups, + list_schemas, + lookup_schema, + parse_cli_overrides, + parse_value, + resolve_parameters, + validate_value, +) +from chipcompiler.cli.records import error_record +from chipcompiler.cli.types import CommandContext, CommandResult + + +def param_list(args, ctx: CommandContext) -> CommandResult: + schemas = list_schemas() + records = [] + for s in schemas: + records.append(_schema_to_record(s)) + return CommandResult.ok(records) + + +def param_show(args, ctx: CommandContext) -> CommandResult: + key = args.key + schema = lookup_schema(key) + if schema is None: + return CommandResult.err([error_record( + "unknown_parameter", + param=key, + )], exit_code=1) + + toml_overrides = _load_toml_overrides(ctx.project_dir) + resolved, _ = resolve_parameters(toml_overrides=toml_overrides) + rp = next(r for r in resolved if r.param == key) + + record = { + "param": rp.param, + "value": rp.value, + "default": rp.default, + "source": rp.source, + "type": schema.type, + "applies": schema.applies, + "maps_to": _maps_to_str(schema.maps_to), + "description": schema.description, + } + if schema.range is not None: + record["range"] = f"[{schema.range[0]}, {schema.range[1]}]" + if schema.choices is not None: + record["choices"] = ", ".join(schema.choices) + if schema.unit is not None: + record["unit"] = schema.unit + + return CommandResult.ok([record]) + + +def param_set(args, ctx: CommandContext) -> CommandResult: + key = args.key + raw_value = args.value + + schema = lookup_schema(key) + if schema is None: + return CommandResult.err([error_record( + "unknown_parameter", + param=key, + )], exit_code=1) + + try: + value = parse_value(raw_value, schema) + except ValueError as exc: + return CommandResult.err([error_record( + "invalid_value", + param=key, + reason=str(exc), + )], exit_code=1) + + val_errors = validate_value(value, schema) + if val_errors: + return CommandResult.err([error_record( + "invalid_value", + param=key, + reason=val_errors[0], + )], exit_code=1) + + config_path = _find_config_path(ctx.project_dir) + if config_path is None: + return CommandResult.err([error_record( + "missing_config", + )], exit_code=1) + + _write_param_to_toml(config_path, key, value) + + return CommandResult.ok([{ + "param": key, + "value": value, + "status": "set", + "source": "ecc.toml", + }]) + + +def param_unset(args, ctx: CommandContext) -> CommandResult: + key = args.key + + schema = lookup_schema(key) + if schema is None: + return CommandResult.err([error_record( + "unknown_parameter", + param=key, + )], exit_code=1) + + config_path = _find_config_path(ctx.project_dir) + if config_path is None: + return CommandResult.ok([{ + "param": key, + "status": "no_override", + "source": "default", + }]) + + removed = _remove_param_from_toml(config_path, key) + + if removed: + return CommandResult.ok([{ + "param": key, + "status": "unset", + "value": schema.default, + "source": "default", + }]) + return CommandResult.ok([{ + "param": key, + "status": "no_override", + "source": "default", + }]) + + +def param_diff(args, ctx: CommandContext) -> CommandResult: + toml_overrides = _load_toml_overrides(ctx.project_dir) + resolved, _ = resolve_parameters(toml_overrides=toml_overrides) + + records = [] + for rp in resolved: + if rp.source != "default": + records.append({ + "param": rp.param, + "value": rp.value, + "default": rp.default, + "source": rp.source, + }) + + if not records: + return CommandResult.ok([{"diff_status": "clean"}]) + + return CommandResult.ok(records) + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + +def _schema_to_record(schema): + record = { + "param": schema.param, + "group": schema.group, + "type": schema.type, + "default": schema.default, + "applies": schema.applies, + "description": schema.description, + } + if schema.range is not None: + record["range"] = f"[{schema.range[0]}, {schema.range[1]}]" + if schema.choices is not None: + record["choices"] = ", ".join(schema.choices) + if schema.unit is not None: + record["unit"] = schema.unit + return record + + +def _maps_to_str(maps_to): + if isinstance(maps_to, str): + return maps_to + parts = [f"{k}.{v}" for k, v in maps_to.items()] + return ", ".join(parts) + + +def _find_config_path(project_dir: str) -> str | None: + path = os.path.join(project_dir, "ecc.toml") + return path if os.path.isfile(path) else None + + +def _load_toml_overrides(project_dir: str) -> dict[str, object]: + config_path = _find_config_path(project_dir) + if config_path is None: + return {} + + from chipcompiler.cli.config import load_project_config + cfg = load_project_config(config_path) + return cfg.params_overrides + + +def _write_param_to_toml(config_path: str, key: str, value: object) -> None: + group, _, name = key.partition(".") + + with open(config_path, "rb") as f: + data = tomllib.load(f) + + params = data.get("params", {}) + if not isinstance(params, dict): + params = {} + + group_table = params.get(group, {}) + if not isinstance(group_table, dict): + group_table = {} + group_table[name] = value + params[group] = group_table + data["params"] = params + + _write_toml_data(config_path, data) + + +def _remove_param_from_toml(config_path: str, key: str) -> bool: + group, _, name = key.partition(".") + + with open(config_path, "rb") as f: + data = tomllib.load(f) + + params = data.get("params") + if not isinstance(params, dict): + return False + + group_table = params.get(group) + if not isinstance(group_table, dict): + return False + + if name not in group_table: + return False + + del group_table[name] + if not group_table: + del params[group] + if not params: + del data["params"] + + _write_toml_data(config_path, data) + return True + + +def _write_toml_data(path: str, data: dict) -> None: + lines = [] + _serialize_toml(data, lines, []) + with open(path, "w") as f: + f.write("\n".join(lines)) + if lines: + f.write("\n") + + +def _serialize_toml(data: dict, lines: list[str], path: list[str]) -> None: + scalars: list[tuple[str, object]] = [] + tables: list[tuple[str, dict]] = [] + arrays: list[tuple[str, list]] = [] + + for key in _toml_sort_keys(data): + val = data[key] + if isinstance(val, dict): + tables.append((key, val)) + elif isinstance(val, list): + arrays.append((key, val)) + else: + scalars.append((key, val)) + + for key, val in scalars: + lines.append(f"{key} = {_toml_value(val)}") + + for key, val in arrays: + items = ", ".join(_toml_value(v) for v in val) + lines.append(f"{key} = [{items}]") + + for key, val in tables: + lines.append("") + header_path = path + [key] + lines.append(f"[{'.'.join(header_path)}]") + _serialize_toml(val, lines, header_path) + + +def _toml_sort_keys(data: dict) -> list[str]: + def sort_key(k): + v = data[k] + if isinstance(v, dict): + return (1, k) + if isinstance(v, list): + return (1, k) + return (0, k) + return sorted(data.keys(), key=sort_key) + + +def _toml_value(val: object) -> str: + if isinstance(val, bool): + return "true" if val else "false" + if isinstance(val, int): + return str(val) + if isinstance(val, float): + return str(val) + if isinstance(val, str): + escaped = val.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + if isinstance(val, (list, tuple)): + items = ", ".join(_toml_value(v) for v in val) + return f"[{items}]" + return str(val) diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py new file mode 100644 index 00000000..2fd4a603 --- /dev/null +++ b/chipcompiler/cli/params.py @@ -0,0 +1,435 @@ +from __future__ import annotations + +from copy import deepcopy +from dataclasses import dataclass, field + + +@dataclass(frozen=True) +class ParamSchema: + param: str + group: str + name: str + type: str + default: object + applies: str + maps_to: str | dict + description: str + range: tuple[float, float] | None = None + choices: tuple[str, ...] | None = None + unit: str | None = None + example: str | None = None + + +PARAM_REGISTRY: tuple[ParamSchema, ...] = ( + ParamSchema( + param="design.frequency_mhz", + group="design", + name="frequency_mhz", + type="float", + default=100.0, + applies="synthesis", + maps_to="Frequency max [MHz]", + description="Target clock frequency in MHz", + range=(0.0, 10000.0), + unit="MHz", + example="200.0", + ), + ParamSchema( + param="floorplan.core_util", + group="floorplan", + name="core_util", + type="float", + default=0.4, + applies="floorplan", + maps_to={"Core": "Utilitization"}, + description="Core utilization ratio", + range=(0.01, 1.0), + example="0.45", + ), + ParamSchema( + param="floorplan.core_margin", + group="floorplan", + name="core_margin", + type="list[int]", + default=(2, 2), + applies="floorplan", + maps_to={"Core": "Margin"}, + description="Core margin in micrometers [horizontal, vertical]", + example="[2, 2]", + ), + ParamSchema( + param="floorplan.aspect_ratio", + group="floorplan", + name="aspect_ratio", + type="float", + default=1.0, + applies="floorplan", + maps_to={"Core": "Aspect ratio"}, + description="Core aspect ratio (width/height)", + range=(0.1, 10.0), + example="1.0", + ), + ParamSchema( + param="synth.max_fanout", + group="synth", + name="max_fanout", + type="int", + default=20, + applies="fixfanout", + maps_to="Max fanout", + description="Maximum fanout for netlist optimization", + range=(1, 200), + example="16", + ), + ParamSchema( + param="place.target_density", + group="place", + name="target_density", + type="float", + default=0.3, + applies="placement", + maps_to="Target density", + description="Target placement density", + range=(0.1, 0.95), + example="0.65", + ), + ParamSchema( + param="place.target_overflow", + group="place", + name="target_overflow", + type="float", + default=0.1, + applies="placement", + maps_to="Target overflow", + description="Target overflow for global placement", + range=(0.0, 1.0), + example="0.08", + ), + ParamSchema( + param="place.global_right_padding", + group="place", + name="global_right_padding", + type="int", + default=0, + applies="placement", + maps_to="Global right padding", + description="Global right padding for placement sites", + range=(0, 100), + example="8", + ), + ParamSchema( + param="place.cell_padding_x", + group="place", + name="cell_padding_x", + type="int", + default=600, + applies="placement", + maps_to="Cell padding x", + description="Cell padding in x-direction in database units", + range=(0, 10000), + example="400", + ), + ParamSchema( + param="place.routability_opt", + group="place", + name="routability_opt", + type="int", + default=1, + applies="placement", + maps_to="Routability opt flag", + description="Enable routability-driven placement optimization", + choices=("0", "1"), + example="1", + ), + ParamSchema( + param="route.bottom_layer", + group="route", + name="bottom_layer", + type="str", + default="MET2", + applies="routing", + maps_to="Bottom layer", + description="Bottom routing layer", + choices=("MET1", "MET2", "MET3", "MET4", "MET5"), + example="MET2", + ), + ParamSchema( + param="route.top_layer", + group="route", + name="top_layer", + type="str", + default="MET5", + applies="routing", + maps_to="Top layer", + description="Top routing layer", + choices=("MET2", "MET3", "MET4", "MET5", "MET6"), + example="MET5", + ), +) + +_REGISTRY_INDEX: dict[str, ParamSchema] = {s.param: s for s in PARAM_REGISTRY} +_REQUIRED_FIELDS = ("param", "group", "name", "type", "default", "applies", "maps_to", "description") + + +def lookup_schema(key: str) -> ParamSchema | None: + return _REGISTRY_INDEX.get(key) + + +def list_schemas() -> tuple[ParamSchema, ...]: + return PARAM_REGISTRY + + +def list_groups() -> list[str]: + seen: list[str] = [] + for s in PARAM_REGISTRY: + if s.group not in seen: + seen.append(s.group) + return seen + + +def is_known_key(key: str) -> bool: + return key in _REGISTRY_INDEX + + +def validate_schema_record(schema: ParamSchema) -> list[str]: + errors: list[str] = [] + for f in _REQUIRED_FIELDS: + if not getattr(schema, f, None): + errors.append(f"missing required field: {f}") + return errors + + +# --------------------------------------------------------------------------- +# Value parsing +# --------------------------------------------------------------------------- + +def parse_value(raw: str, schema: ParamSchema) -> object: + ptype = schema.type + + if ptype == "int": + try: + return int(raw) + except ValueError: + raise ValueError(f"expected int for {schema.param}, got '{raw}'") + + if ptype == "float": + try: + return float(raw) + except ValueError: + raise ValueError(f"expected float for {schema.param}, got '{raw}'") + + if ptype == "bool": + low = raw.lower() + if low in ("true", "1", "yes"): + return True + if low in ("false", "0", "no"): + return False + raise ValueError(f"expected bool for {schema.param}, got '{raw}'") + + if ptype == "str": + return raw + + if ptype == "list[int]": + stripped = raw.strip("[]() ") + if not stripped: + return [] + parts = [p.strip() for p in stripped.split(",")] + try: + return [int(p) for p in parts if p] + except ValueError: + raise ValueError(f"expected list[int] for {schema.param}, got '{raw}'") + + if ptype == "list[float]": + stripped = raw.strip("[]() ") + if not stripped: + return [] + parts = [p.strip() for p in stripped.split(",")] + try: + return [float(p) for p in parts if p] + except ValueError: + raise ValueError(f"expected list[float] for {schema.param}, got '{raw}'") + + if ptype == "list[str]": + stripped = raw.strip("[]() ") + if not stripped: + return [] + return [p.strip() for p in stripped.split(",") if p.strip()] + + raise ValueError(f"unsupported type '{ptype}' for {schema.param}") + + +def validate_value(value: object, schema: ParamSchema) -> list[str]: + errors: list[str] = [] + + if schema.range is not None: + lo, hi = schema.range + if isinstance(value, (int, float)): + if value < lo or value > hi: + errors.append(f"value {value} out of range [{lo}, {hi}] for {schema.param}") + + if schema.choices is not None: + str_val = str(value) + if str_val not in schema.choices: + errors.append(f"value '{str_val}' not in allowed choices {schema.choices} for {schema.param}") + + return errors + + +# --------------------------------------------------------------------------- +# Source-aware resolution +# --------------------------------------------------------------------------- + +@dataclass +class ResolvedParam: + param: str + value: object + default: object + source: str + schema: ParamSchema + + +def _coerce_toml_value(value: object, schema: ParamSchema) -> tuple[object, str | None]: + if schema.type == "int" and isinstance(value, str): + try: + return int(value), None + except ValueError: + return value, f"expected int for {schema.param}, got '{value}'" + if schema.type == "float" and isinstance(value, (int, float)): + return float(value), None + if schema.type == "float" and isinstance(value, str): + try: + return float(value), None + except ValueError: + return value, f"expected float for {schema.param}, got '{value}'" + return value, None + + +def resolve_parameters( + toml_overrides: dict[str, object] | None = None, + cli_overrides: dict[str, object] | None = None, +) -> tuple[list[ResolvedParam], list[str]]: + toml_overrides = toml_overrides or {} + cli_overrides = cli_overrides or {} + resolved: list[ResolvedParam] = [] + errors: list[str] = [] + + for schema in PARAM_REGISTRY: + key = schema.param + if key in cli_overrides: + value = cli_overrides[key] + val_errors = validate_value(value, schema) + if val_errors: + errors.extend(val_errors) + resolved.append(ResolvedParam( + param=key, value=value, default=schema.default, + source="cli", schema=schema, + )) + elif key in toml_overrides: + value = toml_overrides[key] + value, coerce_err = _coerce_toml_value(value, schema) + if coerce_err: + errors.append(coerce_err) + val_errors = validate_value(value, schema) + if val_errors: + errors.extend(val_errors) + resolved.append(ResolvedParam( + param=key, value=value, default=schema.default, + source="ecc.toml", schema=schema, + )) + else: + resolved.append(ResolvedParam( + param=key, value=schema.default, default=schema.default, + source="default", schema=schema, + )) + + return resolved, errors + + +# --------------------------------------------------------------------------- +# Semantic-to-backend mapping +# --------------------------------------------------------------------------- + +def build_backend_overrides(resolved: list[ResolvedParam]) -> dict: + overrides: dict = {} + for rp in resolved: + if rp.value == rp.default and rp.source == "default": + continue + maps_to = rp.schema.maps_to + value = rp.value + if isinstance(maps_to, str): + overrides[maps_to] = value + elif isinstance(maps_to, dict): + for parent_key, child_key in maps_to.items(): + if parent_key not in overrides: + overrides[parent_key] = {} + overrides[parent_key][child_key] = value + return overrides + + +def parse_cli_overrides(pairs: list[str]) -> tuple[dict[str, object], list[str]]: + result: dict[str, object] = {} + errors: list[str] = [] + + for pair in pairs: + if "=" not in pair: + errors.append(f"malformed override: '{pair}' (expected key=value)") + continue + + key, _, raw_value = pair.partition("=") + key = key.strip() + raw_value = raw_value.strip() + + schema = lookup_schema(key) + if schema is None: + errors.append(f"unknown parameter: '{key}'") + continue + + try: + value = parse_value(raw_value, schema) + except ValueError as exc: + errors.append(str(exc)) + continue + + val_errors = validate_value(value, schema) + if val_errors: + errors.extend(val_errors) + continue + + result[key] = value + + return result, errors + + +def parse_toml_params(params_table: dict) -> tuple[dict[str, object], list[str]]: + flat: dict[str, object] = {} + errors: list[str] = [] + + for group_key, group_val in params_table.items(): + if not isinstance(group_val, dict): + errors.append(f"[params.{group_key}] must be a table, got {type(group_val).__name__}") + continue + + for name_key, value in group_val.items(): + param_key = f"{group_key}.{name_key}" + schema = lookup_schema(param_key) + if schema is None: + errors.append(f"unknown parameter in ecc.toml: '{param_key}'") + continue + + try: + if isinstance(value, str): + parsed = parse_value(value, schema) + else: + parsed = value + except ValueError as exc: + errors.append(str(exc)) + continue + + val_errors = validate_value(parsed, schema) + if val_errors: + errors.extend(val_errors) + continue + + flat[param_key] = parsed + + return flat, errors diff --git a/chipcompiler/cli/render.py b/chipcompiler/cli/render.py index 30b97f9e..cc443f4d 100644 --- a/chipcompiler/cli/render.py +++ b/chipcompiler/cli/render.py @@ -31,10 +31,23 @@ def render_jsonl(result: CommandResult, file=None) -> None: print(json.dumps(record, ensure_ascii=False), file=target) +def render_plain(records: tuple[dict, ...], file=None) -> None: + target = file or sys.stdout + for record in records: + parts = [] + for key, value in record.items(): + if value is None: + continue + parts.append(f"{key}={value}") + print(" ".join(parts), file=target) + + def render_result(result: CommandResult, mode: OutputMode, file=None) -> None: if mode == OutputMode.JSON: render_json(result, file=file) elif mode == OutputMode.JSONL: render_jsonl(result, file=file) + elif mode == OutputMode.PLAIN: + render_plain(result.records, file=file) else: render_text(result.records, file=file) diff --git a/chipcompiler/cli/types.py b/chipcompiler/cli/types.py index 7b9d3916..0ad02cce 100644 --- a/chipcompiler/cli/types.py +++ b/chipcompiler/cli/types.py @@ -4,6 +4,7 @@ class OutputMode(Enum): TEXT = "text" + PLAIN = "plain" JSON = "json" JSONL = "jsonl" diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py new file mode 100644 index 00000000..05b7d318 --- /dev/null +++ b/test/cli/test_cli_params.py @@ -0,0 +1,379 @@ +import json +import os + +from chipcompiler.cli import main as cli_main + + +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +class TestParamList: + def test_param_list_text_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + + def test_param_list_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + params = [r["param"] for r in data["records"]] + assert "place.target_density" in params + + def test_param_list_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--jsonl"]) + assert rc == 0 + lines = capsys.readouterr().out.strip().split("\n") + objects = [json.loads(ln) for ln in lines] + assert len(objects) == 12 + + def test_param_list_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + assert "place.target_density" in out + + +class TestParamShow: + def test_param_show_known_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + + def test_param_show_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["param"] == "place.target_density" + assert record["default"] == 0.3 + assert "source" in record + assert "maps_to" in record + + def test_param_show_unknown_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "unknown.key", "--project", project_dir]) + assert rc == 1 + + +class TestParamSet: + def test_param_set_writes_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + assert rc == 0 + + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + assert "target_density" in content + assert "0.65" in content + + def test_param_set_then_show(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["value"] == 0.65 + assert record["source"] == "ecc.toml" + + def test_param_set_rejects_unknown_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "bogus.key", "5", "--project", project_dir]) + assert rc == 1 + + def test_param_set_rejects_invalid_value(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "place.target_density", "1.5", "--project", project_dir]) + assert rc == 1 + + def test_param_set_preserves_other_sections(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + assert "[design]" in content + assert "[pdk]" in content + assert "[flow]" in content + + +class TestParamUnset: + def test_param_unset_removes_override(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + assert rc == 0 + capsys.readouterr() # flush unset output + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert record["source"] == "default" + + def test_param_unset_noop_when_absent(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "no_override" in out + + +class TestParamDiff: + def test_param_diff_shows_overrides(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + assert len(records) == 1 + assert records[0]["param"] == "place.target_density" + + def test_param_diff_clean_when_no_overrides(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("diff_status") == "clean" + + +class TestRunSet: + def test_run_set_override(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + capture = {"kwargs": None} + + def fake_create(**kwargs): + capture["kwargs"] = kwargs + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "place.target_density=0.65", + ]) + assert rc == 0 + + params = capture["kwargs"]["parameters"] + assert params.get("Target density") == 0.65 + + def test_run_set_rejects_unknown_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "bogus.key=5", + ]) + assert rc == 1 + + def test_run_set_rejects_invalid_value(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "place.target_density=1.5", + ]) + assert rc == 1 + + def test_run_set_does_not_modify_toml(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + original_toml = f.read() + + workspace_obj = SimpleNamespace(name="workspace") + + def fake_create(**kwargs): + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + cli_main.run([ + "run", "--project", project_dir, + "--set", "place.target_density=0.65", + ]) + + with open(toml_path) as f: + current_toml = f.read() + assert current_toml == original_toml + + +class TestOutputContracts: + def test_plain_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + + def test_json_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + + def test_jsonl_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--jsonl"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\033[" not in out + + def test_json_uses_records_envelope(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert isinstance(data["records"], list) + + def test_plain_is_line_oriented(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 12 + + +class TestConfigResolved: + def test_config_resolved_includes_param_records(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": []}, f) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + param_records = [r for r in records if r.get("kind") == "param"] + assert len(param_records) == 12 + first_param = param_records[0] + assert "source" in first_param + assert "maps_to" in first_param + + def test_config_resolved_shows_toml_source(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() # flush set output + + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": []}, f) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + density = next(r for r in param_records if r["key"] == "place.target_density") + assert density["value"] == 0.65 + assert density["source"] == "ecc.toml" diff --git a/test/cli/test_params.py b/test/cli/test_params.py new file mode 100644 index 00000000..1c0f61ad --- /dev/null +++ b/test/cli/test_params.py @@ -0,0 +1,305 @@ +import pytest + +from chipcompiler.cli.params import ( + PARAM_REGISTRY, + ParamSchema, + ResolvedParam, + build_backend_overrides, + is_known_key, + list_groups, + list_schemas, + lookup_schema, + parse_cli_overrides, + parse_toml_params, + parse_value, + resolve_parameters, + validate_schema_record, + validate_value, +) + +REQUIRED_KEYS = [ + "design.frequency_mhz", + "floorplan.core_util", + "floorplan.core_margin", + "floorplan.aspect_ratio", + "synth.max_fanout", + "place.target_density", + "place.target_overflow", + "place.global_right_padding", + "place.cell_padding_x", + "place.routability_opt", + "route.bottom_layer", + "route.top_layer", +] + + +class TestSchemaRegistry: + def test_registry_contains_all_required_keys(self): + params = {s.param for s in PARAM_REGISTRY} + for key in REQUIRED_KEYS: + assert key in params, f"Missing key: {key}" + + def test_every_record_has_required_metadata(self): + required = ("param", "group", "name", "type", "default", "applies", "maps_to", "description") + for schema in PARAM_REGISTRY: + for field_name in required: + val = getattr(schema, field_name, None) + assert val is not None and val != "", ( + f"{schema.param} missing required field: {field_name}" + ) + + def test_optional_fields_present_when_relevant(self): + for schema in PARAM_REGISTRY: + if schema.type in ("float", "int") and schema.choices is None: + assert schema.range is not None, ( + f"{schema.param}: numeric param without range or choices should have range" + ) + + def test_cli_keys_map_to_backend_names(self): + density = lookup_schema("place.target_density") + assert density.maps_to == "Target density" + + fanout = lookup_schema("synth.max_fanout") + assert fanout.maps_to == "Max fanout" + + util = lookup_schema("floorplan.core_util") + assert util.maps_to == {"Core": "Utilitization"} + + def test_internal_keys_not_accepted_as_cli_keys(self): + assert not is_known_key("Core.Utilitization") + assert not is_known_key("Target density") + assert not is_known_key("Max fanout") + assert not is_known_key("Frequency max [MHz]") + + def test_schema_record_missing_required_fields_rejected(self): + bad = ParamSchema( + param="", group="", name="", type="int", default=0, + applies="", maps_to="", description="", + ) + errors = validate_schema_record(bad) + assert len(errors) > 0 + + def test_lookup_schema_returns_none_for_unknown(self): + assert lookup_schema("nonexistent.key") is None + + def test_list_groups_returns_ordered_groups(self): + groups = list_groups() + assert "design" in groups + assert "floorplan" in groups + assert "synth" in groups + assert "place" in groups + assert "route" in groups + + +class TestValueParsing: + @pytest.mark.parametrize("raw,ptype,expected", [ + ("0.65", "float", 0.65), + ("42", "int", 42), + ("true", "bool", True), + ("false", "bool", False), + ("MET5", "str", "MET5"), + ("1.5,2.5", "list[float]", [1.5, 2.5]), + ("1,2,3", "list[int]", [1, 2, 3]), + ("a,b,c", "list[str]", ["a", "b", "c"]), + ]) + def test_parse_value_correct_types(self, raw, ptype, expected): + schema = lookup_schema("place.target_density") + schema = ParamSchema( + param="test", group="test", name="test", type=ptype, + default=None, applies="test", maps_to="test", description="test", + ) + result = parse_value(raw, schema) + assert result == expected + + def test_parse_int_rejects_alpha(self): + schema = ParamSchema( + param="test", group="test", name="test", type="int", + default=0, applies="test", maps_to="test", description="test", + ) + with pytest.raises(ValueError, match="expected int"): + parse_value("abc", schema) + + def test_parse_float_rejects_alpha(self): + schema = ParamSchema( + param="test", group="test", name="test", type="float", + default=0.0, applies="test", maps_to="test", description="test", + ) + with pytest.raises(ValueError, match="expected float"): + parse_value("not_a_number", schema) + + def test_range_validation_rejects_out_of_bounds(self): + schema = lookup_schema("place.target_density") + errors = validate_value(1.2, schema) + assert len(errors) > 0 + assert "out of range" in errors[0] + + def test_range_validation_accepts_in_bounds(self): + schema = lookup_schema("place.target_density") + errors = validate_value(0.5, schema) + assert errors == [] + + def test_choice_validation_rejects_invalid(self): + schema = lookup_schema("route.top_layer") + errors = validate_value("MET99", schema) + assert len(errors) > 0 + assert "not in allowed choices" in errors[0] + + def test_choice_validation_accepts_valid(self): + schema = lookup_schema("route.top_layer") + errors = validate_value("MET5", schema) + assert errors == [] + + def test_unknown_key_returns_error_in_cli_overrides(self): + result, errors = parse_cli_overrides(["unknown.key=5"]) + assert len(errors) > 0 + assert "unknown parameter" in errors[0] + + def test_malformed_key_value_rejected(self): + result, errors = parse_cli_overrides(["no_equals_sign"]) + assert len(errors) > 0 + assert "malformed" in errors[0] + + def test_out_of_range_value_rejected(self): + result, errors = parse_cli_overrides(["place.target_density=1.2"]) + assert len(errors) > 0 + + def test_type_mismatch_rejected(self): + result, errors = parse_cli_overrides(["synth.max_fanout=abc"]) + assert len(errors) > 0 + + +class TestSourceAwareResolution: + def test_default_source_when_no_overrides(self): + resolved, errors = resolve_parameters() + assert len(errors) == 0 + for rp in resolved: + assert rp.source == "default" + + def test_toml_override_source(self): + toml = {"place.target_density": 0.65} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert errors == [] + density = next(r for r in resolved if r.param == "place.target_density") + assert density.value == 0.65 + assert density.source == "ecc.toml" + + def test_cli_override_source(self): + cli = {"place.target_density": 0.7} + resolved, errors = resolve_parameters(cli_overrides=cli) + assert errors == [] + density = next(r for r in resolved if r.param == "place.target_density") + assert density.value == 0.7 + assert density.source == "cli" + + def test_cli_beats_toml(self): + toml = {"place.target_density": 0.65} + cli = {"place.target_density": 0.7} + resolved, errors = resolve_parameters(toml_overrides=toml, cli_overrides=cli) + assert errors == [] + density = next(r for r in resolved if r.param == "place.target_density") + assert density.value == 0.7 + assert density.source == "cli" + + def test_invalid_toml_type_produces_error(self): + toml = {"synth.max_fanout": "not_int"} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + +class TestBackendMapping: + def test_flat_key_mapping(self): + schema = lookup_schema("place.target_density") + rp = ResolvedParam( + param="place.target_density", value=0.65, default=0.3, + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Target density": 0.65} + + def test_nested_key_mapping(self): + schema = lookup_schema("floorplan.core_util") + rp = ResolvedParam( + param="floorplan.core_util", value=0.45, default=0.4, + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Core": {"Utilitization": 0.45}} + + def test_nested_list_mapping(self): + schema = lookup_schema("floorplan.core_margin") + rp = ResolvedParam( + param="floorplan.core_margin", value=(3, 3), default=(2, 2), + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Core": {"Margin": (3, 3)}} + + def test_string_key_mapping(self): + schema = lookup_schema("route.top_layer") + rp = ResolvedParam( + param="route.top_layer", value="MET4", default="MET5", + source="cli", schema=schema, + ) + result = build_backend_overrides([rp]) + assert result == {"Top layer": "MET4"} + + def test_default_values_excluded(self): + resolved, _ = resolve_parameters() + result = build_backend_overrides(resolved) + assert result == {} + + def test_mapping_does_not_mutate_schema_defaults(self): + schema = lookup_schema("place.target_density") + original_default = schema.default + rp = ResolvedParam( + param="place.target_density", value=0.65, default=original_default, + source="cli", schema=schema, + ) + build_backend_overrides([rp]) + assert schema.default == original_default + + +class TestCliOverrides: + def test_repeatable_set(self): + result, errors = parse_cli_overrides([ + "place.target_density=0.65", + "synth.max_fanout=16", + ]) + assert errors == [] + assert result == {"place.target_density": 0.65, "synth.max_fanout": 16} + + def test_malformed_rejected(self): + result, errors = parse_cli_overrides(["noequals"]) + assert len(errors) > 0 + + def test_unknown_key_rejected(self): + result, errors = parse_cli_overrides(["bogus.key=5"]) + assert len(errors) > 0 + + def test_raw_backend_key_rejected(self): + result, errors = parse_cli_overrides(["Target density=0.5"]) + assert len(errors) > 0 + + def test_invalid_value_does_not_produce_override(self): + result, errors = parse_cli_overrides(["place.target_density=1.5"]) + assert "place.target_density" not in result + assert len(errors) > 0 + + +class TestTomlParams: + def test_flat_toml_parsing(self): + table = {"place": {"target_density": 0.65}} + flat, errors = parse_toml_params(table) + assert errors == [] + assert flat == {"place.target_density": 0.65} + + def test_unknown_toml_key_rejected(self): + table = {"bogus": {"key": 5}} + flat, errors = parse_toml_params(table) + assert len(errors) > 0 + assert "unknown parameter" in errors[0] + + def test_non_table_toml_section_rejected(self): + table = {"place": "not_a_table"} + flat, errors = parse_toml_params(table) + assert len(errors) > 0 From 1821eb784a1d7c453d525c384c0ba6e272ebd65f Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 22:49:39 +0800 Subject: [PATCH 063/104] fix(cli): address param validation, pretty output, resolved list, and safe TOML edits Fixes 4 blocking gaps from Round 0 Codex review: 1. TOML validation errors preserved and surfaced: - ProjectConfig now stores _param_errors from parse_toml_params() - validate_project_config() appends param errors as config errors - ecc check/run/config fail with structured errors on invalid [params.*] - Strict TOML type checking: int rejects bool/float, float accepts int|float, list types validate element types 2. Default param text output is grouped pretty, not flat key=value: - param list: grouped by section (design, floorplan, synth, place, route) - param show: focused multi-line block with all fields - param set/unset/diff: readable status blocks - --plain still provides stable one-line-per-record output 3. ecc param list reports resolved values and sources: - Built from resolve_parameters(toml_overrides=...) instead of raw schemas - Records include value, default, source, maps_to, inspect disclosure 4. Safe scoped TOML edits: - param set/unset use regex-based scoped edits preserving original formatting, comments, and section ordering outside [params.*] - param diff filters by value != default, not source != default --- chipcompiler/cli/config.py | 9 +- chipcompiler/cli/main.py | 32 +++- chipcompiler/cli/param_handler.py | 296 ++++++++++++++++++++---------- chipcompiler/cli/params.py | 71 +++++-- test/cli/test_cli_params.py | 167 ++++++++++++++++- test/cli/test_params.py | 33 ++++ 6 files changed, 492 insertions(+), 116 deletions(-) diff --git a/chipcompiler/cli/config.py b/chipcompiler/cli/config.py index fa5e0be5..d81957ab 100644 --- a/chipcompiler/cli/config.py +++ b/chipcompiler/cli/config.py @@ -84,8 +84,10 @@ def _str(val, default=""): params_raw = data.get("params") if isinstance(params_raw, dict): from chipcompiler.cli.params import parse_toml_params - flat, _ = parse_toml_params(params_raw) + flat, param_errors = parse_toml_params(params_raw) cfg.params_overrides = flat + if param_errors: + cfg._param_errors = param_errors return cfg @@ -108,6 +110,11 @@ def validate_project_config(cfg: ProjectConfig) -> list[str]: errors = [] + param_errors = getattr(cfg, "_param_errors", None) + if param_errors: + for pe in param_errors: + errors.append(f"invalid params: {pe}") + if not cfg.design_name: errors.append("design.name is required") if not cfg.design_top: diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 4c80c2be..e2c6c0c5 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -4,6 +4,7 @@ from chipcompiler.cli.commands import build_context, dispatch from chipcompiler.cli.render import render_result +from chipcompiler.cli.types import OutputMode def build_parser() -> argparse.ArgumentParser: @@ -132,6 +133,30 @@ def _add_project_arg(parser: argparse.ArgumentParser) -> None: help="Project directory (default: current directory)") +def _render_param_text(args, result) -> None: + from chipcompiler.cli.param_handler import ( + render_param_diff_text, + render_param_list_text, + render_param_set_text, + render_param_show_text, + ) + if result.exit_code != 0: + render_result(result, OutputMode.PLAIN) + return + + subcmd = getattr(args, "param_command", None) + if subcmd == "list": + render_param_list_text(result.records) + elif subcmd == "show": + render_param_show_text(result.records) + elif subcmd in ("set", "unset"): + render_param_set_text(result.records) + elif subcmd == "diff": + render_param_diff_text(result.records) + else: + render_result(result, OutputMode.PLAIN) + + def run(argv: Sequence[str] | None = None) -> int: parser = build_parser() args = parser.parse_args(list(argv) if argv is not None else None) @@ -142,7 +167,12 @@ def run(argv: Sequence[str] | None = None) -> int: ctx = build_context(args) result = dispatch(args, ctx) - render_result(result, ctx.output_mode) + + if args.command == "param" and ctx.output_mode == OutputMode.TEXT: + _render_param_text(args, result) + else: + render_result(result, ctx.output_mode) + return result.exit_code diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 977861a6..2ae4a6cb 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -1,7 +1,7 @@ from __future__ import annotations import os -import tomllib +import re from chipcompiler.cli.output import disclosure_cmd from chipcompiler.cli.params import ( @@ -17,14 +17,38 @@ validate_value, ) from chipcompiler.cli.records import error_record -from chipcompiler.cli.types import CommandContext, CommandResult +from chipcompiler.cli.types import CommandContext, CommandResult, OutputMode def param_list(args, ctx: CommandContext) -> CommandResult: - schemas = list_schemas() + toml_overrides = _load_toml_overrides(ctx.project_dir) + resolved, _ = resolve_parameters(toml_overrides=toml_overrides) + project = ctx.project + records = [] - for s in schemas: - records.append(_schema_to_record(s)) + for rp in resolved: + s = rp.schema + record = { + "param": s.param, + "group": s.group, + "name": s.name, + "value": rp.value, + "default": s.default, + "source": rp.source, + "type": s.type, + "applies": s.applies, + "maps_to": _maps_to_str(s.maps_to), + "description": s.description, + "inspect": disclosure_cmd(f"ecc param show {s.param}", project), + } + if s.range is not None: + record["range"] = f"[{s.range[0]}, {s.range[1]}]" + if s.choices is not None: + record["choices"] = ", ".join(s.choices) + if s.unit is not None: + record["unit"] = s.unit + records.append(record) + return CommandResult.ok(records) @@ -145,7 +169,7 @@ def param_diff(args, ctx: CommandContext) -> CommandResult: records = [] for rp in resolved: - if rp.source != "default": + if rp.value != rp.default: records.append({ "param": rp.param, "value": rp.value, @@ -160,27 +184,93 @@ def param_diff(args, ctx: CommandContext) -> CommandResult: # --------------------------------------------------------------------------- -# Internal helpers +# Pretty rendering for param commands # --------------------------------------------------------------------------- -def _schema_to_record(schema): - record = { - "param": schema.param, - "group": schema.group, - "type": schema.type, - "default": schema.default, - "applies": schema.applies, - "description": schema.description, - } - if schema.range is not None: - record["range"] = f"[{schema.range[0]}, {schema.range[1]}]" - if schema.choices is not None: - record["choices"] = ", ".join(schema.choices) - if schema.unit is not None: - record["unit"] = schema.unit - return record +def render_param_result(result, mode: OutputMode, file=None) -> bool: + """Render param-specific output. Returns True if handled, False otherwise.""" + import sys + target = file or sys.stdout + + if mode == OutputMode.JSON: + from chipcompiler.cli.render import render_json + render_json(result, file=target) + return True + if mode == OutputMode.JSONL: + from chipcompiler.cli.render import render_jsonl + render_jsonl(result, file=target) + return True + if mode == OutputMode.PLAIN: + from chipcompiler.cli.render import render_plain + render_plain(result.records, file=target) + return True + + return False + + +def render_param_list_text(records, file=None): + import sys + target = file or sys.stdout + groups: dict[str, list] = {} + for r in records: + g = r.get("group", "") + groups.setdefault(g, []).append(r) + + for group_name, group_records in groups.items(): + print(f" {group_name}", file=target) + for r in group_records: + val = r.get("value") + src = r.get("source", "default") + line = f" {r['param']:30s} {val}" + if src != "default": + line += f" ({src})" + print(line, file=target) + + +def render_param_show_text(records, file=None): + import sys + target = file or sys.stdout + r = records[0] + + print(f" {r['param']}", file=target) + for field in ("value", "default", "source", "type", "applies", + "maps_to", "description", "range", "choices", "unit"): + val = r.get(field) + if val is not None: + label = field.replace("_", " ") + print(f" {label:14s} {val}", file=target) + + +def render_param_set_text(records, file=None): + import sys + target = file or sys.stdout + r = records[0] + status = r.get("status", "") + if status == "set": + print(f" set {r['param']} = {r['value']} (ecc.toml)", file=target) + elif status == "no_override": + print(f" {r['param']}: no override to remove", file=target) + elif status == "unset": + print(f" unset {r['param']} (now default: {r['value']})", file=target) + else: + from chipcompiler.cli.render import render_text + render_text(records, file=target) + + +def render_param_diff_text(records, file=None): + import sys + target = file or sys.stdout + if len(records) == 1 and records[0].get("diff_status") == "clean": + print(" No overrides.", file=target) + return + for r in records: + print(f" {r['param']:30s} {r['value']} (was {r['default']}, {r['source']})", file=target) +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + def _maps_to_str(maps_to): if isinstance(maps_to, str): return maps_to @@ -206,99 +296,105 @@ def _load_toml_overrides(project_dir: str) -> dict[str, object]: def _write_param_to_toml(config_path: str, key: str, value: object) -> None: group, _, name = key.partition(".") - with open(config_path, "rb") as f: - data = tomllib.load(f) + with open(config_path, "r") as f: + original = f.read() - params = data.get("params", {}) - if not isinstance(params, dict): - params = {} + new_text = _apply_scoped_param_edit(original, group, name, value) - group_table = params.get(group, {}) - if not isinstance(group_table, dict): - group_table = {} - group_table[name] = value - params[group] = group_table - data["params"] = params - - _write_toml_data(config_path, data) + with open(config_path, "w") as f: + f.write(new_text) def _remove_param_from_toml(config_path: str, key: str) -> bool: group, _, name = key.partition(".") - with open(config_path, "rb") as f: - data = tomllib.load(f) + with open(config_path, "r") as f: + original = f.read() - params = data.get("params") - if not isinstance(params, dict): + result = _remove_scoped_param_key(original, group, name) + if result is None: return False - group_table = params.get(group) - if not isinstance(group_table, dict): - return False - - if name not in group_table: - return False - - del group_table[name] - if not group_table: - del params[group] - if not params: - del data["params"] - - _write_toml_data(config_path, data) + with open(config_path, "w") as f: + f.write(result) return True -def _write_toml_data(path: str, data: dict) -> None: - lines = [] - _serialize_toml(data, lines, []) - with open(path, "w") as f: - f.write("\n".join(lines)) - if lines: - f.write("\n") +def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> str: + value_str = _format_toml_value(value) + line_re = re.compile(rf"^(\s*){re.escape(name)}\s*=") + section_pattern = re.compile(rf"^\[params\.{re.escape(group)}\]\s*$", re.MULTILINE) + m = section_pattern.search(text) -def _serialize_toml(data: dict, lines: list[str], path: list[str]) -> None: - scalars: list[tuple[str, object]] = [] - tables: list[tuple[str, dict]] = [] - arrays: list[tuple[str, list]] = [] + if m: + section_start = m.start() + next_section = re.search(r"^\[", text[m.end():], re.MULTILINE) + section_end = m.end() + next_section.start() if next_section else len(text) + section_text = text[section_start:section_end] - for key in _toml_sort_keys(data): - val = data[key] - if isinstance(val, dict): - tables.append((key, val)) - elif isinstance(val, list): - arrays.append((key, val)) + key_match = line_re.search(section_text, re.MULTILINE) + if key_match: + prefix = key_match.group(1) + old_line = key_match.group(0) + new_line = f"{prefix}{name} = {value_str}" + updated_section = section_text[:key_match.start()] + new_line + section_text[key_match.end():] + return text[:section_start] + updated_section + text[section_end:] else: - scalars.append((key, val)) - - for key, val in scalars: - lines.append(f"{key} = {_toml_value(val)}") - - for key, val in arrays: - items = ", ".join(_toml_value(v) for v in val) - lines.append(f"{key} = [{items}]") - - for key, val in tables: - lines.append("") - header_path = path + [key] - lines.append(f"[{'.'.join(header_path)}]") - _serialize_toml(val, lines, header_path) - - -def _toml_sort_keys(data: dict) -> list[str]: - def sort_key(k): - v = data[k] - if isinstance(v, dict): - return (1, k) - if isinstance(v, list): - return (1, k) - return (0, k) - return sorted(data.keys(), key=sort_key) - - -def _toml_value(val: object) -> str: + last_newline = section_text.rstrip().rfind("\n") + if last_newline == -1: + insert_at = len(section_text) + else: + insert_at = last_newline + 1 + new_line = f"\n{name} = {value_str}" + updated_section = section_text[:insert_at] + new_line + "\n" + section_text[insert_at:] + return text[:section_start] + updated_section + text[section_end:] + + params_section = re.search(r"^\[params\]\s*$", text, re.MULTILINE) + if params_section: + insert_text = f"\n\n[params.{group}]\n{name} = {value_str}" + after_params = params_section.end() + next_sec = re.search(r"^\[", text[after_params:], re.MULTILINE) + if next_sec: + insert_at = after_params + next_sec.start() + return text[:insert_at] + insert_text + "\n" + text[insert_at:] + else: + return text + insert_text + "\n" + + return text.rstrip() + f"\n\n[params.{group}]\n{name} = {value_str}\n" + + +def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: + section_pattern = re.compile(rf"^\[params\.{re.escape(group)}\]\s*$", re.MULTILINE) + m = section_pattern.search(text) + if not m: + return None + + section_start = m.start() + next_section = re.search(r"^\[", text[m.end():], re.MULTILINE) + section_end = m.end() + next_section.start() if next_section else len(text) + section_text = text[section_start:section_end] + + line_re = re.compile(rf"^\s*{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) + line_match = line_re.search(section_text) + if not line_match: + return None + + remaining = section_text[:line_match.start()] + section_text[line_match.end():] + key_lines = [l for l in remaining.split("\n") + if l.strip() and not l.strip().startswith(f"[params.{group}")] + if not key_lines: + result = text[:section_start] + text[section_end:] + if result.endswith("\n\n"): + result = result[:-1] + return result + else: + header = f"[params.{group}]\n" + new_section = header + remaining[remaining.index("]") + 1:] + return text[:section_start] + new_section + text[section_end:] + + +def _format_toml_value(val: object) -> str: if isinstance(val, bool): return "true" if val else "false" if isinstance(val, int): @@ -309,6 +405,6 @@ def _toml_value(val: object) -> str: escaped = val.replace("\\", "\\\\").replace('"', '\\"') return f'"{escaped}"' if isinstance(val, (list, tuple)): - items = ", ".join(_toml_value(v) for v in val) + items = ", ".join(_format_toml_value(v) for v in val) return f"[{items}]" return str(val) diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index 2fd4a603..22834f86 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -288,19 +288,64 @@ class ResolvedParam: schema: ParamSchema -def _coerce_toml_value(value: object, schema: ParamSchema) -> tuple[object, str | None]: - if schema.type == "int" and isinstance(value, str): - try: - return int(value), None - except ValueError: - return value, f"expected int for {schema.param}, got '{value}'" - if schema.type == "float" and isinstance(value, (int, float)): - return float(value), None - if schema.type == "float" and isinstance(value, str): - try: +def _validate_toml_type(value: object, schema: ParamSchema) -> tuple[object, str | None]: + ptype = schema.type + key = schema.param + + if ptype == "int": + if isinstance(value, bool) or not isinstance(value, int): + return value, f"expected int for {key}, got {type(value).__name__}" + return value, None + + if ptype == "float": + if isinstance(value, bool): + return value, f"expected float for {key}, got bool" + if isinstance(value, (int, float)): return float(value), None - except ValueError: - return value, f"expected float for {schema.param}, got '{value}'" + return value, f"expected float for {key}, got {type(value).__name__}" + + if ptype == "bool": + if isinstance(value, bool): + return value, None + if isinstance(value, str): + low = value.lower() + if low in ("true", "1", "yes"): + return True, None + if low in ("false", "0", "no"): + return False, None + return value, f"expected bool for {key}, got {type(value).__name__}" + + if ptype == "str": + if isinstance(value, str): + return value, None + return value, f"expected str for {key}, got {type(value).__name__}" + + if ptype == "list[int]": + if not isinstance(value, list): + return value, f"expected list for {key}, got {type(value).__name__}" + for i, v in enumerate(value): + if isinstance(v, bool) or not isinstance(v, int): + return value, f"expected list[int] for {key}, element {i} is {type(v).__name__}" + return value, None + + if ptype == "list[float]": + if not isinstance(value, list): + return value, f"expected list for {key}, got {type(value).__name__}" + for i, v in enumerate(value): + if isinstance(v, bool): + return value, f"expected list[float] for {key}, element {i} is bool" + if not isinstance(v, (int, float)): + return value, f"expected list[float] for {key}, element {i} is {type(v).__name__}" + return [float(v) for v in value], None + + if ptype == "list[str]": + if not isinstance(value, list): + return value, f"expected list for {key}, got {type(value).__name__}" + for i, v in enumerate(value): + if not isinstance(v, str): + return value, f"expected list[str] for {key}, element {i} is {type(v).__name__}" + return value, None + return value, None @@ -326,7 +371,7 @@ def resolve_parameters( )) elif key in toml_overrides: value = toml_overrides[key] - value, coerce_err = _coerce_toml_value(value, schema) + value, coerce_err = _validate_toml_type(value, schema) if coerce_err: errors.append(coerce_err) val_errors = validate_value(value, schema) diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 05b7d318..629d4820 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -163,7 +163,7 @@ def test_param_unset_noop_when_absent(self, tmp_path, capsys): rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "no_override" in out + assert "no override" in out class TestParamDiff: @@ -377,3 +377,168 @@ def test_config_resolved_shows_toml_source(self, tmp_path, monkeypatch, capsys): density = next(r for r in param_records if r["key"] == "place.target_density") assert density["value"] == 0.65 assert density["source"] == "ecc.toml" + + +class TestTomlValidationErrors: + def _create_project_with_invalid_param(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = "not_an_int"\n' + with open(toml_path, "w") as f: + f.write(content) + return project_dir + + def test_check_fails_invalid_param_type(self, tmp_path, capsys): + project_dir = self._create_project_with_invalid_param(tmp_path) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + reasons = [r.get("reason", "") for r in data["records"]] + assert any("params" in r for r in reasons) + + def test_check_fails_unknown_param_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.bogus]\nkey = 5\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_run_fails_invalid_param_type(self, tmp_path): + project_dir = self._create_project_with_invalid_param(tmp_path) + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 1 + + +class TestPrettyOutput: + def test_param_list_default_is_grouped_text(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place" in out + assert "place.target_density" in out + + def test_param_list_plain_is_one_line_per_record(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 12 + assert "\033[" not in out + + def test_param_show_default_is_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + assert "source" in out + assert "default" in out + + def test_param_set_default_is_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "0.65" in out + + def test_param_diff_default_is_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + rc = cli_main.run(["param", "diff", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "place.target_density" in out + + +class TestResolvedListValues: + def test_param_list_json_has_value_and_source(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + density = next(r for r in records if r["param"] == "place.target_density") + assert density["value"] == 0.65 + assert density["source"] == "ecc.toml" + assert "default" in density + assert "maps_to" in density + assert "inspect" in density + + def test_param_list_default_source_when_no_overrides(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + for r in data["records"]: + assert r["source"] == "default" + + +class TestDiffFiltering: + def test_diff_only_shows_values_that_differ(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + records = data["records"] + assert len(records) == 1 + assert records[0]["param"] == "place.target_density" + assert records[0]["value"] == 0.65 + assert records[0]["default"] != 0.65 + + def test_diff_clean_when_set_to_default(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + schema_default = 0.3 + cli_main.run(["param", "set", "place.target_density", str(schema_default), "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("diff_status") == "clean" + + +class TestScopedTomlEdit: + def test_set_preserves_unrelated_sections(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + original = f.read() + + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + design_section = original[original.index("[design]"):original.index("[pdk]")] + assert design_section in after + + def test_set_preserves_comments(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content = content.replace("[design]", "[design]\n# my design") + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "# my design" in after diff --git a/test/cli/test_params.py b/test/cli/test_params.py index 1c0f61ad..8586fa9c 100644 --- a/test/cli/test_params.py +++ b/test/cli/test_params.py @@ -205,6 +205,39 @@ def test_invalid_toml_type_produces_error(self): resolved, errors = resolve_parameters(toml_overrides=toml) assert len(errors) > 0 + def test_float_rejected_for_int_schema(self): + toml = {"synth.max_fanout": 16.5} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_bool_rejected_for_int_schema(self): + toml = {"synth.max_fanout": True} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_int_accepted_for_float_schema(self): + toml = {"place.target_density": 1} + resolved, errors = resolve_parameters(toml_overrides=toml) + # 1 converts to 1.0 which is out of range for target_density + assert len(errors) > 0 # range validation catches it + + def test_int_in_range_accepted_for_float_schema(self): + toml = {"floorplan.core_util": 1} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert errors == [] + util = next(r for r in resolved if r.param == "floorplan.core_util") + assert util.value == 1.0 + + def test_float_in_list_int_rejected(self): + toml = {"floorplan.core_margin": [2.5, 3]} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + + def test_str_rejected_for_int_schema(self): + toml = {"synth.max_fanout": "abc"} + resolved, errors = resolve_parameters(toml_overrides=toml) + assert len(errors) > 0 + class TestBackendMapping: def test_flat_key_mapping(self): From a6a9040103e2e56ad7eb1d272cb9f65a9c4fb160 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 23:03:19 +0800 Subject: [PATCH 064/104] fix(cli): native TOML type validation, scoped TOML edit, CLI provenance - Add _validate_toml_type() call for non-string TOML values in parse_toml_params() - Rewrite _apply_scoped_param_edit and _remove_scoped_param_key to fix duplicate-key bug using string.find + re.MULTILINE on section body - Persist CLI parameter overrides to cli-param-overrides.json in run workspace - Load CLI provenance in config --resolved to show source=cli - Add 11 new tests for scoped edits, native type validation, and provenance --- chipcompiler/cli/config_view.py | 21 ++- chipcompiler/cli/handlers.py | 8 + chipcompiler/cli/param_handler.py | 115 +++++++-------- chipcompiler/cli/params.py | 5 +- test/cli/test_cli_params.py | 237 ++++++++++++++++++++++++++++++ 5 files changed, 324 insertions(+), 62 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 40a6fa0d..24d9fc21 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -98,7 +98,11 @@ def build_project_config_items(project_dir: str, run_dir: str, # Parameter records with source information from chipcompiler.cli.params import resolve_parameters - resolved_params, _ = resolve_parameters(toml_overrides=cfg.params_overrides) + cli_provenance = _load_cli_provenance(run_dir) + resolved_params, _ = resolve_parameters( + toml_overrides=cfg.params_overrides, + cli_overrides=cli_provenance, + ) for rp in resolved_params: maps_to = rp.schema.maps_to if isinstance(maps_to, str): @@ -119,6 +123,21 @@ def build_project_config_items(project_dir: str, run_dir: str, return items, 0 +def _load_cli_provenance(run_dir: str) -> dict[str, object]: + import json + provenance_path = os.path.join(run_dir, "home", "cli-param-overrides.json") + if not os.path.isfile(provenance_path): + return {} + try: + with open(provenance_path) as f: + data = json.load(f) + if isinstance(data, dict): + return data + except (json.JSONDecodeError, OSError): + pass + return {} + + def build_step_config_items(run_dir: str, step_token: str | None, project: str | None = None, run_id: str | None = None, diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 619ab7af..0f3cff7d 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -627,6 +627,14 @@ def run(args, ctx: CommandContext) -> CommandResult: "workspace": run_dir, }]) + # Persist CLI parameter provenance for config --resolved inspection + if cli_overrides: + import json as _json + provenance_path = os.path.join(run_dir, "home", "cli-param-overrides.json") + os.makedirs(os.path.dirname(provenance_path), exist_ok=True) + with open(provenance_path, "w") as _f: + _json.dump(cli_overrides, _f) + try: engine_flow = EngineFlow(workspace=workspace) if not engine_flow.has_init(): diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 2ae4a6cb..8a165891 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -322,76 +322,71 @@ def _remove_param_from_toml(config_path: str, key: str) -> bool: def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> str: value_str = _format_toml_value(value) - line_re = re.compile(rf"^(\s*){re.escape(name)}\s*=") - - section_pattern = re.compile(rf"^\[params\.{re.escape(group)}\]\s*$", re.MULTILINE) - m = section_pattern.search(text) - - if m: - section_start = m.start() - next_section = re.search(r"^\[", text[m.end():], re.MULTILINE) - section_end = m.end() + next_section.start() if next_section else len(text) - section_text = text[section_start:section_end] - - key_match = line_re.search(section_text, re.MULTILINE) - if key_match: - prefix = key_match.group(1) - old_line = key_match.group(0) - new_line = f"{prefix}{name} = {value_str}" - updated_section = section_text[:key_match.start()] + new_line + section_text[key_match.end():] - return text[:section_start] + updated_section + text[section_end:] - else: - last_newline = section_text.rstrip().rfind("\n") - if last_newline == -1: - insert_at = len(section_text) - else: - insert_at = last_newline + 1 - new_line = f"\n{name} = {value_str}" - updated_section = section_text[:insert_at] + new_line + "\n" + section_text[insert_at:] - return text[:section_start] + updated_section + text[section_end:] - - params_section = re.search(r"^\[params\]\s*$", text, re.MULTILINE) - if params_section: - insert_text = f"\n\n[params.{group}]\n{name} = {value_str}" - after_params = params_section.end() - next_sec = re.search(r"^\[", text[after_params:], re.MULTILINE) - if next_sec: - insert_at = after_params + next_sec.start() - return text[:insert_at] + insert_text + "\n" + text[insert_at:] - else: - return text + insert_text + "\n" - return text.rstrip() + f"\n\n[params.{group}]\n{name} = {value_str}\n" + section_header = f"[params.{group}]" + header_idx = text.find(section_header) + if header_idx == -1: + header_idx = text.find("[params]") + if header_idx == -1: + return text.rstrip() + f"\n\n[params.{group}]\n{name} = {value_str}\n" + after_header = text.find("\n", header_idx) + insert = f"\n\n[params.{group}]\n{name} = {value_str}" + if after_header == -1: + return text + insert + "\n" + next_sec = re.search(r"^\[", text[after_header:], re.MULTILINE) + if next_sec: + pos = after_header + next_sec.start() + return text[:pos] + insert + "\n" + text[pos:] + return text + insert + "\n" + + after_header = text.find("\n", header_idx + len(section_header)) + if after_header == -1: + return text + f"\n{name} = {value_str}\n" + after_header += 1 + + next_sec = re.search(r"^\[", text[after_header:], re.MULTILINE) + section_end = after_header + next_sec.start() if next_sec else len(text) + + section_body = text[after_header:section_end] + key_pattern = re.compile(rf"^{re.escape(name)}\s*=[^\n]*$", re.MULTILINE) + key_match = key_pattern.search(section_body) + + if key_match: + new_line = f"{name} = {value_str}" + new_body = section_body[:key_match.start()] + new_line + section_body[key_match.end():] + return text[:after_header] + new_body + text[section_end:] + else: + insert = f"{name} = {value_str}\n" + return text[:after_header] + insert + text[after_header:] def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: - section_pattern = re.compile(rf"^\[params\.{re.escape(group)}\]\s*$", re.MULTILINE) - m = section_pattern.search(text) - if not m: + section_header = f"[params.{group}]" + header_idx = text.find(section_header) + if header_idx == -1: + return None + + after_header = text.find("\n", header_idx + len(section_header)) + if after_header == -1: return None + after_header += 1 - section_start = m.start() - next_section = re.search(r"^\[", text[m.end():], re.MULTILINE) - section_end = m.end() + next_section.start() if next_section else len(text) - section_text = text[section_start:section_end] + next_sec = re.search(r"^\[", text[after_header:], re.MULTILINE) + section_end = after_header + next_sec.start() if next_sec else len(text) - line_re = re.compile(rf"^\s*{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) - line_match = line_re.search(section_text) - if not line_match: + section_body = text[after_header:section_end] + key_pattern = re.compile(rf"^{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) + key_match = key_pattern.search(section_body) + if not key_match: return None - remaining = section_text[:line_match.start()] + section_text[line_match.end():] - key_lines = [l for l in remaining.split("\n") - if l.strip() and not l.strip().startswith(f"[params.{group}")] - if not key_lines: - result = text[:section_start] + text[section_end:] - if result.endswith("\n\n"): - result = result[:-1] - return result + new_body = section_body[:key_match.start()] + section_body[key_match.end():] + remaining_keys = [l for l in new_body.strip().split("\n") if l.strip()] + if not remaining_keys: + result = text[:header_idx].rstrip("\n") + "\n" + text[section_end:].lstrip("\n") + return result if result.strip() else None else: - header = f"[params.{group}]\n" - new_section = header + remaining[remaining.index("]") + 1:] - return text[:section_start] + new_section + text[section_end:] + return text[:after_header] + new_body + text[section_end:] def _format_toml_value(val: object) -> str: diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index 22834f86..142d123d 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -465,7 +465,10 @@ def parse_toml_params(params_table: dict) -> tuple[dict[str, object], list[str]] if isinstance(value, str): parsed = parse_value(value, schema) else: - parsed = value + parsed, type_err = _validate_toml_type(value, schema) + if type_err: + errors.append(type_err) + continue except ValueError as exc: errors.append(str(exc)) continue diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 629d4820..e3ad0a7d 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -542,3 +542,240 @@ def test_set_preserves_comments(self, tmp_path, capsys): with open(toml_path) as f: after = f.read() assert "# my design" in after + + def test_set_same_key_twice_has_one_assignment(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + content = f.read() + assert content.count("target_density") == 1 + assert "0.7" in content + assert "0.65" not in content + + def test_set_then_show_still_works(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + + cli_main.run(["param", "set", "place.target_density", "0.65", "--project", project_dir]) + capsys.readouterr() + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 0.7 + + +class TestNativeTomlTypeValidation: + def test_check_rejects_float_for_int(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = 16.5\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_check_rejects_bool_for_int(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = true\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_check_rejects_float_in_list_int(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [2.5, 3]\n' + with open(toml_path, "w") as f: + f.write(content) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_check_accepts_valid_int(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = 16\n' + with open(toml_path, "w") as f: + f.write(content) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["check", "--project", project_dir, "--json"]) + assert rc == 0 + + +class TestCliProvenance: + def test_run_set_reports_cli_source_in_config(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + + def fake_create(**kwargs): + run_dir = kwargs["directory"] + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "synth.max_fanout=16", + ]) + assert rc == 0 + capsys.readouterr() + + # Verify provenance file was written + provenance = os.path.join( + project_dir, "runs", "default", "home", "cli-param-overrides.json" + ) + assert os.path.isfile(provenance) + with open(provenance) as f: + data = json.load(f) + assert data["synth.max_fanout"] == 16 + + def test_config_resolved_shows_cli_source(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + + def fake_create(**kwargs): + run_dir = kwargs["directory"] + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + # Run with --set + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "synth.max_fanout=16", + ]) + assert rc == 0 + capsys.readouterr() + + # Now inspect config --resolved + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + fanout = next(r for r in param_records if r["key"] == "synth.max_fanout") + assert fanout["value"] == 16 + assert fanout["source"] == "cli" + + def test_config_resolved_toml_plus_cli_precedence(self, tmp_path, monkeypatch, capsys): + from types import SimpleNamespace + + project_dir = _create_valid_project(tmp_path) + workspace_obj = SimpleNamespace(name="workspace") + + # Set a TOML override first + cli_main.run(["param", "set", "synth.max_fanout", "16", "--project", project_dir]) + capsys.readouterr() + + def fake_create(**kwargs): + run_dir = kwargs["directory"] + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return workspace_obj + + monkeypatch.setattr("chipcompiler.data.create_workspace", fake_create) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", type( + "DummyFlow", (), { + "__init__": lambda self, workspace: None, + "has_init": lambda self: False, + "add_step": lambda self, **kw: None, + "create_step_workspaces": lambda self: None, + "run_steps": lambda self: True, + }, + )) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + monkeypatch.setattr( + "chipcompiler.cli.progress.should_enable_run_progress", + lambda *a, **kw: False, + ) + + # Run with different CLI override + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "synth.max_fanout=32", + ]) + assert rc == 0 + capsys.readouterr() + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + fanout = next(r for r in param_records if r["key"] == "synth.max_fanout") + assert fanout["value"] == 32 + assert fanout["source"] == "cli" From 1eb7ec46cb05a365d18f7f416a2e310aff49ab1e Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 23:16:08 +0800 Subject: [PATCH 065/104] fix(cli): param handlers reject invalid TOML, indented key edits, provenance validation - param list/show/diff now return errors when ecc.toml has invalid [params.*] - scoped TOML edit handles indented assignment lines without duplication - unset removes indented keys correctly - CLI provenance is validated against schema (rejects malformed/unknown/invalid) - config --resolved returns structured error for invalid provenance - 10 new tests: 3 invalid TOML rejection, 4 indented key editing, 3 provenance validation --- chipcompiler/cli/config_view.py | 23 +++-- chipcompiler/cli/handlers.py | 8 +- chipcompiler/cli/param_handler.py | 26 +++-- test/cli/test_cli_params.py | 156 ++++++++++++++++++++++++++++++ 4 files changed, 194 insertions(+), 19 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 24d9fc21..032b2468 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -98,7 +98,9 @@ def build_project_config_items(project_dir: str, run_dir: str, # Parameter records with source information from chipcompiler.cli.params import resolve_parameters - cli_provenance = _load_cli_provenance(run_dir) + cli_provenance, prov_error = _load_cli_provenance(run_dir) + if prov_error: + return [{"kind": "error", "status": "invalid_config", "reason": prov_error}], 1 resolved_params, _ = resolve_parameters( toml_overrides=cfg.params_overrides, cli_overrides=cli_provenance, @@ -123,19 +125,24 @@ def build_project_config_items(project_dir: str, run_dir: str, return items, 0 -def _load_cli_provenance(run_dir: str) -> dict[str, object]: +def _load_cli_provenance(run_dir: str) -> tuple[dict[str, object], str | None]: import json provenance_path = os.path.join(run_dir, "home", "cli-param-overrides.json") if not os.path.isfile(provenance_path): - return {} + return {}, None try: with open(provenance_path) as f: data = json.load(f) - if isinstance(data, dict): - return data - except (json.JSONDecodeError, OSError): - pass - return {} + except (json.JSONDecodeError, OSError) as exc: + return {}, f"invalid CLI parameter provenance: {exc}" + if not isinstance(data, dict): + return {}, "invalid CLI parameter provenance: expected object" + from chipcompiler.cli.params import parse_cli_overrides + items = [f"{k}={v}" for k, v in data.items()] + validated, errors = parse_cli_overrides(items) + if errors: + return {}, f"invalid CLI parameter provenance: {errors[0]}" + return validated, None def build_step_config_items(run_dir: str, step_token: str | None, diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 0f3cff7d..e84bc2d3 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -314,10 +314,14 @@ def config(args, ctx: CommandContext) -> CommandResult: inspect=disclosure_cmd("ecc check", project), )]) if status == "invalid_config": - return CommandResult.err([error_record( + reason = first.get("reason") + rec = error_record( "invalid_config", inspect=disclosure_cmd("ecc check", project), - )]) + ) + if reason: + rec["reason"] = reason + return CommandResult.err([rec]) return CommandResult.err(items) if not items: diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 8a165891..8ec154d5 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -21,7 +21,9 @@ def param_list(args, ctx: CommandContext) -> CommandResult: - toml_overrides = _load_toml_overrides(ctx.project_dir) + toml_overrides, param_errors = _load_toml_overrides(ctx.project_dir) + if param_errors: + return CommandResult.err([error_record("invalid_param_config", reason=e) for e in param_errors]) resolved, _ = resolve_parameters(toml_overrides=toml_overrides) project = ctx.project @@ -61,7 +63,9 @@ def param_show(args, ctx: CommandContext) -> CommandResult: param=key, )], exit_code=1) - toml_overrides = _load_toml_overrides(ctx.project_dir) + toml_overrides, param_errors = _load_toml_overrides(ctx.project_dir) + if param_errors: + return CommandResult.err([error_record("invalid_param_config", reason=e) for e in param_errors]) resolved, _ = resolve_parameters(toml_overrides=toml_overrides) rp = next(r for r in resolved if r.param == key) @@ -164,7 +168,9 @@ def param_unset(args, ctx: CommandContext) -> CommandResult: def param_diff(args, ctx: CommandContext) -> CommandResult: - toml_overrides = _load_toml_overrides(ctx.project_dir) + toml_overrides, param_errors = _load_toml_overrides(ctx.project_dir) + if param_errors: + return CommandResult.err([error_record("invalid_param_config", reason=e) for e in param_errors]) resolved, _ = resolve_parameters(toml_overrides=toml_overrides) records = [] @@ -283,14 +289,15 @@ def _find_config_path(project_dir: str) -> str | None: return path if os.path.isfile(path) else None -def _load_toml_overrides(project_dir: str) -> dict[str, object]: +def _load_toml_overrides(project_dir: str) -> tuple[dict[str, object], list[str]]: config_path = _find_config_path(project_dir) if config_path is None: - return {} + return {}, [] from chipcompiler.cli.config import load_project_config cfg = load_project_config(config_path) - return cfg.params_overrides + errors = list(getattr(cfg, "_param_errors", [])) + return cfg.params_overrides, errors def _write_param_to_toml(config_path: str, key: str, value: object) -> None: @@ -348,11 +355,12 @@ def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> section_end = after_header + next_sec.start() if next_sec else len(text) section_body = text[after_header:section_end] - key_pattern = re.compile(rf"^{re.escape(name)}\s*=[^\n]*$", re.MULTILINE) + key_pattern = re.compile(rf"^(\s*){re.escape(name)}\s*=[^\n]*$", re.MULTILINE) key_match = key_pattern.search(section_body) if key_match: - new_line = f"{name} = {value_str}" + indent = key_match.group(1) + new_line = f"{indent}{name} = {value_str}" new_body = section_body[:key_match.start()] + new_line + section_body[key_match.end():] return text[:after_header] + new_body + text[section_end:] else: @@ -375,7 +383,7 @@ def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: section_end = after_header + next_sec.start() if next_sec else len(text) section_body = text[after_header:section_end] - key_pattern = re.compile(rf"^{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) + key_pattern = re.compile(rf"^\s*{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) key_match = key_pattern.search(section_body) if not key_match: return None diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index e3ad0a7d..c543c2a2 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -779,3 +779,159 @@ def fake_create(**kwargs): fanout = next(r for r in param_records if r["key"] == "synth.max_fanout") assert fanout["value"] == 32 assert fanout["source"] == "cli" + + +class TestParamHandlersRejectInvalidToml: + """Param list/show/diff must return errors when ecc.toml has invalid [params.*].""" + + def _write_invalid_toml(self, project_dir): + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.synth]\nmax_fanout = 16.5\n' + with open(toml_path, "w") as f: + f.write(content) + + def test_param_list_rejects_invalid_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_invalid_toml(project_dir) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + def test_param_show_rejects_invalid_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_invalid_toml(project_dir) + rc = cli_main.run(["param", "show", "synth.max_fanout", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + def test_param_diff_rejects_invalid_toml(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_invalid_toml(project_dir) + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + +class TestIndentedTomlKeys: + """Scoped TOML edit must handle indented assignment lines.""" + + def test_set_replaces_indented_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert after.count("target_density") == 1 + assert "0.7" in after + + def test_set_then_show_indented(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 0.7 + + def test_unset_removes_indented_key(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "target_density" not in after + + def test_set_indented_preserves_other_sections(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\n target_density = 0.65\n\n[flow]\npreset = "rtl2gds"\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert 'preset = "rtl2gds"' in after + assert after.count("target_density") == 1 + + +class TestMalformedCliProvenance: + """config --resolved must error on malformed/invalid CLI provenance.""" + + def _setup_run_dir(self, project_dir): + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "home"), exist_ok=True) + return run_dir + + def test_malformed_json_provenance_fails(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = self._setup_run_dir(project_dir) + with open(os.path.join(run_dir, "home", "cli-param-overrides.json"), "w") as f: + f.write("not valid json{") + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_config" + + def test_non_dict_provenance_fails(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = self._setup_run_dir(project_dir) + with open(os.path.join(run_dir, "home", "cli-param-overrides.json"), "w") as f: + json.dump([1, 2, 3], f) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_unknown_key_in_provenance_fails(self, tmp_path, capsys, monkeypatch): + project_dir = _create_valid_project(tmp_path) + run_dir = self._setup_run_dir(project_dir) + with open(os.path.join(run_dir, "home", "cli-param-overrides.json"), "w") as f: + json.dump({"nonexistent.param": 42}, f) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda pdk_root, pdk_name: [], + ) + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_config" From 4a3f69d6ba37cc780e3e8b09bc657ffe233c5a95 Mon Sep 17 00:00:00 2001 From: Emin Date: Sun, 3 May 2026 23:32:22 +0800 Subject: [PATCH 066/104] fix(cli): disclosure commands in param show, safe TOML section parsing, list diff fix - Add inspect/set/run disclosure commands to param show JSON and text output - Replace text.find section detection with regex-based _find_table_span that rejects comments and handles indented TOML headers - Convert list schema defaults from tuples to lists for correct equality - 8 new tests: disclosure commands, commented/indented headers, list diff --- chipcompiler/cli/param_handler.py | 94 +++++++++++++---------- chipcompiler/cli/params.py | 2 +- test/cli/test_cli_params.py | 123 ++++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+), 40 deletions(-) diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 8ec154d5..20c3d645 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -78,6 +78,9 @@ def param_show(args, ctx: CommandContext) -> CommandResult: "applies": schema.applies, "maps_to": _maps_to_str(schema.maps_to), "description": schema.description, + "inspect": disclosure_cmd(f"ecc param show {rp.param}", ctx.project), + "set": disclosure_cmd(f"ecc param set {rp.param}", ctx.project), + "run": disclosure_cmd(f"ecc run --set {rp.param}=", ctx.project), } if schema.range is not None: record["range"] = f"[{schema.range[0]}, {schema.range[1]}]" @@ -240,7 +243,8 @@ def render_param_show_text(records, file=None): print(f" {r['param']}", file=target) for field in ("value", "default", "source", "type", "applies", - "maps_to", "description", "range", "choices", "unit"): + "maps_to", "description", "range", "choices", "unit", + "inspect", "set", "run"): val = r.get(field) if val is not None: label = field.replace("_", " ") @@ -327,34 +331,44 @@ def _remove_param_from_toml(config_path: str, key: str) -> bool: return True +_TABLE_HEADER_RE = re.compile(r"^[ \t]*\[([^\]]+)\][ \t]*(?:#.*)?$", re.MULTILINE) + + +def _find_table_span(text: str, table_name: str) -> tuple[int, int] | None: + """Return (body_start, body_end) for a TOML table, or None.""" + for m in _TABLE_HEADER_RE.finditer(text): + if m.group(1).strip() == table_name: + header_end = m.end() + nl = text.find("\n", header_end) + if nl == -1: + body_start = len(text) + else: + body_start = nl + 1 + next_header = _TABLE_HEADER_RE.search(text, body_start) + body_end = next_header.start() if next_header else len(text) + return body_start, body_end + return None + + def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> str: value_str = _format_toml_value(value) - - section_header = f"[params.{group}]" - header_idx = text.find(section_header) - if header_idx == -1: - header_idx = text.find("[params]") - if header_idx == -1: - return text.rstrip() + f"\n\n[params.{group}]\n{name} = {value_str}\n" - after_header = text.find("\n", header_idx) - insert = f"\n\n[params.{group}]\n{name} = {value_str}" - if after_header == -1: - return text + insert + "\n" - next_sec = re.search(r"^\[", text[after_header:], re.MULTILINE) - if next_sec: - pos = after_header + next_sec.start() + target_table = f"params.{group}" + + span = _find_table_span(text, target_table) + if span is None: + params_span = _find_table_span(text, "params") + if params_span is None: + return text.rstrip() + f"\n\n[{target_table}]\n{name} = {value_str}\n" + body_start, body_end = params_span + insert = f"\n\n[{target_table}]\n{name} = {value_str}" + next_header = _TABLE_HEADER_RE.search(text, body_start) + if next_header: + pos = next_header.start() return text[:pos] + insert + "\n" + text[pos:] return text + insert + "\n" - after_header = text.find("\n", header_idx + len(section_header)) - if after_header == -1: - return text + f"\n{name} = {value_str}\n" - after_header += 1 - - next_sec = re.search(r"^\[", text[after_header:], re.MULTILINE) - section_end = after_header + next_sec.start() if next_sec else len(text) - - section_body = text[after_header:section_end] + body_start, body_end = span + section_body = text[body_start:body_end] key_pattern = re.compile(rf"^(\s*){re.escape(name)}\s*=[^\n]*$", re.MULTILINE) key_match = key_pattern.search(section_body) @@ -362,27 +376,21 @@ def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> indent = key_match.group(1) new_line = f"{indent}{name} = {value_str}" new_body = section_body[:key_match.start()] + new_line + section_body[key_match.end():] - return text[:after_header] + new_body + text[section_end:] + return text[:body_start] + new_body + text[body_end:] else: insert = f"{name} = {value_str}\n" - return text[:after_header] + insert + text[after_header:] + return text[:body_start] + insert + text[body_start:] def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: - section_header = f"[params.{group}]" - header_idx = text.find(section_header) - if header_idx == -1: - return None + target_table = f"params.{group}" - after_header = text.find("\n", header_idx + len(section_header)) - if after_header == -1: + span = _find_table_span(text, target_table) + if span is None: return None - after_header += 1 - - next_sec = re.search(r"^\[", text[after_header:], re.MULTILINE) - section_end = after_header + next_sec.start() if next_sec else len(text) - section_body = text[after_header:section_end] + body_start, body_end = span + section_body = text[body_start:body_end] key_pattern = re.compile(rf"^\s*{re.escape(name)}\s*=[^\n]*\n?", re.MULTILINE) key_match = key_pattern.search(section_body) if not key_match: @@ -391,10 +399,18 @@ def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: new_body = section_body[:key_match.start()] + section_body[key_match.end():] remaining_keys = [l for l in new_body.strip().split("\n") if l.strip()] if not remaining_keys: - result = text[:header_idx].rstrip("\n") + "\n" + text[section_end:].lstrip("\n") + header_match = None + for m in _TABLE_HEADER_RE.finditer(text): + if m.group(1).strip() == target_table: + header_match = m + break + if header_match is None: + return None + header_start = header_match.start() + result = text[:header_start].rstrip("\n") + "\n" + text[body_end:].lstrip("\n") return result if result.strip() else None else: - return text[:after_header] + new_body + text[section_end:] + return text[:body_start] + new_body + text[body_end:] def _format_toml_value(val: object) -> str: diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index 142d123d..06a0eb6c 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -51,7 +51,7 @@ class ParamSchema: group="floorplan", name="core_margin", type="list[int]", - default=(2, 2), + default=[2, 2], applies="floorplan", maps_to={"Core": "Margin"}, description="Core margin in micrometers [horizontal, vertical]", diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index c543c2a2..61cf1b88 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -935,3 +935,126 @@ def test_unknown_key_in_provenance_fails(self, tmp_path, capsys, monkeypatch): assert rc == 1 data = json.loads(capsys.readouterr().out) assert data["records"][0]["error"] == "invalid_config" + + +class TestParamShowDisclosureCommands: + """param show must include disclosure command fields.""" + + def test_show_json_has_disclosure_commands(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + record = data["records"][0] + assert "inspect" in record + assert "set" in record + assert "run" in record + assert "ecc param show place.target_density" in record["inspect"] + + def test_show_text_has_disclosure_commands(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "ecc param show place.target_density" in out + assert "ecc param set place.target_density" in out + assert "ecc run --set place.target_density" in out + + +class TestSafeTomlSectionParsing: + """Scoped TOML edits must handle comments and indented headers safely.""" + + def test_set_ignores_commented_section_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n# [params.place]\n# target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "[params.place]" in after + assert "target_density = 0.7" in after + + def test_set_ignores_indented_next_section_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.place]\ntarget_density = 0.65\n\n [flow]\npreset = "rtl2gds"\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert after.count("target_density") == 1 + assert "0.7" in after + assert 'preset = "rtl2gds"' in after + + def test_set_then_show_after_commented_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n# [params.place]\n# target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "place.target_density", "0.7", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "place.target_density", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 0.7 + + def test_unset_ignores_commented_section_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n# [params.place]\n# target_density = 0.65\n' + with open(toml_path, "w") as f: + f.write(content) + + rc = cli_main.run(["param", "unset", "place.target_density", "--project", project_dir]) + assert rc == 0 + capsys.readouterr() + with open(toml_path) as f: + after = f.read() + assert "target_density" in after + + +class TestListDefaultDiffFiltering: + """param diff must not report list values equal to defaults.""" + + def test_list_default_not_in_diff(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "floorplan.core_margin", "[2,2]", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("diff_status") == "clean" + + def test_list_changed_value_in_diff(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + cli_main.run(["param", "set", "floorplan.core_margin", "[4,4]", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert len(data["records"]) >= 1 + margin = next((r for r in data["records"] if r.get("param") == "floorplan.core_margin"), None) + assert margin is not None + assert margin["value"] == [4, 4] From b821233042a04119899e0b8541c0039641e1d842 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 14:35:27 +0800 Subject: [PATCH 067/104] feat(cli): refactor ecc log output with complete content, classification, and pretty rendering - Add chipcompiler/cli/log_view.py with line classification, traceback-aware annotation, and pretty/plain/listing renderers - Default ecc log now shows complete log content with classified labels (error, warning, info, traceback, section, plain) instead of filtered error lines - Add --plain flag for stable key-value record output - --jsonl now emits full-content per-line structured objects with line_no and kind - Traceback blocks preserved as contiguous visible blocks in all output modes - ANSI color in pretty output respects TTY, NO_COLOR, TERM=dumb guards - Deprecate --errors from disclosure commands across status, metrics, artifacts, diagnose, and progress - ecc log without step shows pretty listing of available logs - Error cases (unknown step, missing logs, empty logs) return structured output with proper exit codes - Add 28 new integration tests and 55 unit tests for log view module - No changes to chipcompiler/engine/ or chipcompiler/tools/ --- chipcompiler/cli/diagnose.py | 6 +- chipcompiler/cli/handlers.py | 53 +++-- chipcompiler/cli/log_view.py | 219 +++++++++++++++++++ chipcompiler/cli/main.py | 53 ++++- chipcompiler/cli/progress.py | 2 +- test/cli/test_cli_inspect.py | 2 +- test/cli/test_cli_main.py | 365 +++++++++++++++++++++++++++++-- test/cli/test_log_view.py | 402 +++++++++++++++++++++++++++++++++++ test/cli/test_progress.py | 2 +- 9 files changed, 1056 insertions(+), 48 deletions(-) create mode 100644 chipcompiler/cli/log_view.py create mode 100644 test/cli/test_log_view.py diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index 012483f9..2a07a115 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -58,11 +58,11 @@ def _make_issue(issue: str, severity: str, run: str, obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) obj["start_cmd"] = disclosure_cmd("ecc run", project=project) elif issue == "log_errors": - obj["evidence"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) + obj["evidence"] = disclosure_cmd(f"ecc log {step}", **cmd_kwargs) obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) elif issue == "missing_metrics": obj["evidence"] = disclosure_cmd(f"ecc metrics {step} --json", **cmd_kwargs) - obj["log"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) + obj["log"] = disclosure_cmd(f"ecc log {step}", **cmd_kwargs) elif issue == "missing_artifacts": obj["evidence"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) @@ -71,7 +71,7 @@ def _make_issue(issue: str, severity: str, run: str, obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) elif step: obj["evidence"] = disclosure_cmd("ecc status", **cmd_kwargs) - obj["log"] = disclosure_cmd(f"ecc log {step} --errors", **cmd_kwargs) + obj["log"] = disclosure_cmd(f"ecc log {step}", **cmd_kwargs) obj["artifacts"] = disclosure_cmd(f"ecc artifacts {step}", **cmd_kwargs) obj["config"] = disclosure_cmd(f"ecc config {step} --resolved", **cmd_kwargs) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index e84bc2d3..111e7af5 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -82,7 +82,7 @@ def status(args, ctx: CommandContext) -> CommandResult: "status": normalize_state(step.get("state", "")), "runtime": step.get("runtime", "") or None, "metrics_cmd": disclosure_cmd(f"ecc metrics {step_token}", project, ctx.run_id), - "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + "log_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), }) return CommandResult.ok(records) @@ -92,12 +92,11 @@ def log(args, ctx: CommandContext) -> CommandResult: from chipcompiler.cli.inspect import ( discover_logs, discover_step_dirs, - filter_errors, read_log_file, ) + from chipcompiler.cli.log_view import build_log_records step_token = args.step - errors_only = args.errors project = ctx.project if step_token is None: @@ -106,7 +105,7 @@ def log(args, ctx: CommandContext) -> CommandResult: for lf in discover_logs(ctx.run_dir): records.append({ "log": os.path.relpath(lf, ctx.run_dir), - "inspect": disclosure_cmd("ecc log", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc log", project, ctx.run_id), }) step_dirs = discover_step_dirs(ctx.run_dir) @@ -114,8 +113,8 @@ def log(args, ctx: CommandContext) -> CommandResult: for lf in discover_logs(ctx.run_dir, token): records.append({ "step": token, - "log": os.path.relpath(lf, ctx.run_dir), - "inspect": disclosure_cmd(f"ecc log {token} --errors", project, ctx.run_id), + "source": os.path.relpath(lf, ctx.run_dir), + "inspect_cmd": disclosure_cmd(f"ecc log {token}", project, ctx.run_id), }) if not records: @@ -131,7 +130,7 @@ def log(args, ctx: CommandContext) -> CommandResult: return CommandResult.err([{ "step": step_token, "status": "unknown_step", - "inspect": disclosure_cmd("ecc status", project, ctx.run_id), + "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), }]) log_files = discover_logs(ctx.run_dir, step_token) @@ -139,32 +138,30 @@ def log(args, ctx: CommandContext) -> CommandResult: return CommandResult.err([{ "step": step_token, "log_status": "missing", - "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + "source": os.path.relpath( + os.path.join(step_dirs[step_token], "log"), ctx.run_dir, + ), + "inspect_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), }]) - matched_lines = [] + inspect_cmd = disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id) + + all_records = [] for lf in log_files: + source = os.path.relpath(lf, ctx.run_dir) raw = read_log_file(lf) - filtered = filter_errors(raw) if errors_only else raw - for line in filtered: - matched_lines.append((lf, line)) + if not raw: + continue + all_records.extend(build_log_records(step_token, source, raw, inspect_cmd)) - if not matched_lines: + if not all_records: return CommandResult.ok([{ "step": step_token, - "log_status": "no_matching_lines", - "log_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + "log_status": "empty", + "inspect_cmd": inspect_cmd, }]) - records = [] - for lf, line in matched_lines: - records.append({ - "step": step_token, - "source": os.path.relpath(lf, ctx.run_dir), - "line": line, - "log_cmd": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), - }) - return CommandResult.ok(records) + return CommandResult.ok(all_records) def metrics(args, ctx: CommandContext) -> CommandResult: @@ -191,7 +188,7 @@ def metrics(args, ctx: CommandContext) -> CommandResult: f"{_internal_from_token(step_token)}_metrics.json"), ctx.run_dir, ), - "log": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), }]) return CommandResult.err([{ "step": step_token, @@ -214,7 +211,7 @@ def metrics(args, ctx: CommandContext) -> CommandResult: "metric_step": token, "status": "corrupt", "path": os.path.relpath(path, ctx.run_dir), - "log_cmd": disclosure_cmd(f"ecc log {token} --errors", project, ctx.run_id), + "log_cmd": disclosure_cmd(f"ecc log {token}", project, ctx.run_id), }) continue for raw_key, value in data.items(): @@ -257,7 +254,7 @@ def artifacts(args, ctx: CommandContext) -> CommandResult: "step": step_token, "artifacts_status": "none", "inspect_cmd": disclosure_cmd("ecc status", project, ctx.run_id), - "log": disclosure_cmd(f"ecc log {step_token} --errors", project, ctx.run_id), + "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), }]) return CommandResult.ok([{ "artifacts_status": "none", @@ -277,7 +274,7 @@ def artifacts(args, ctx: CommandContext) -> CommandResult: if a["role"] == "analysis": line_fields["metrics"] = disclosure_cmd(f"ecc metrics {a['step']}", project, ctx.run_id) if a["role"] == "log": - line_fields["inspect"] = disclosure_cmd(f"ecc log {a['step']} --errors", project, ctx.run_id) + line_fields["inspect"] = disclosure_cmd(f"ecc log {a['step']}", project, ctx.run_id) if a["role"] in ("output", "report", "analysis", "log"): line_fields["config"] = disclosure_cmd(f"ecc config {a['step']} --resolved", project, ctx.run_id) records.append(line_fields) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py new file mode 100644 index 00000000..6b0e73de --- /dev/null +++ b/chipcompiler/cli/log_view.py @@ -0,0 +1,219 @@ +import enum +import re + + +class LineKind(enum.Enum): + ERROR = "error" + WARNING = "warning" + INFO = "info" + TRACEBACK = "traceback" + SECTION = "section" + PLAIN = "plain" + + +_TRACEBACK_HEADER = "Traceback (most recent call last):" + +_ERROR_RE = re.compile(r"error", re.IGNORECASE) +_WARNING_RE = re.compile(r"warn(?:ing)?", re.IGNORECASE) +_INFO_RE = re.compile(r"^(?:INFO(?:\s*:|\s*\]|:root:)|\[INFO\s*\])") +_SECTION_RE = re.compile(r"^[-=]{3,}$") + + +def classify_line(line: str, in_traceback: bool = False) -> LineKind: + if line.strip() == _TRACEBACK_HEADER: + return LineKind.TRACEBACK + if in_traceback: + stripped = line.strip() + if not stripped: + return LineKind.PLAIN + if line.startswith(" ") or line.startswith("\t"): + return LineKind.TRACEBACK + if _ERROR_RE.search(stripped): + return LineKind.ERROR + return LineKind.PLAIN + if _SECTION_RE.match(line.strip()): + return LineKind.SECTION + if _INFO_RE.match(line): + return LineKind.INFO + if _WARNING_RE.search(line): + return LineKind.WARNING + if _ERROR_RE.search(line): + return LineKind.ERROR + return LineKind.PLAIN + + +class LogLine: + __slots__ = ("line_no", "kind", "text") + + def __init__(self, line_no: int, kind: LineKind, text: str): + self.line_no = line_no + self.kind = kind + self.text = text + + def __eq__(self, other): + if not isinstance(other, LogLine): + return NotImplemented + return (self.line_no, self.kind, self.text) == (other.line_no, other.kind, other.text) + + def __repr__(self): + return f"LogLine({self.line_no!r}, {self.kind!r}, {self.text!r})" + + +def annotate_log_lines(lines: list[str]) -> list[LogLine]: + result = [] + in_traceback = False + for i, text in enumerate(lines): + kind = classify_line(text, in_traceback) + if kind == LineKind.TRACEBACK and text.strip() == _TRACEBACK_HEADER: + in_traceback = True + elif in_traceback and kind == LineKind.ERROR: + in_traceback = False + elif in_traceback and kind == LineKind.PLAIN and not text.startswith(" ") and not text.startswith("\t") and text.strip(): + in_traceback = False + result.append(LogLine(line_no=i + 1, kind=kind, text=text)) + return result + + +def build_log_records( + step: str, + source: str, + lines: list[str], + inspect_cmd: str, +) -> list[dict]: + annotated = annotate_log_lines(lines) + records = [] + for ll in annotated: + records.append({ + "step": step, + "source": source, + "line_no": ll.line_no, + "kind": ll.kind.value, + "line": ll.text, + "inspect_cmd": inspect_cmd, + }) + return records + + +# --- Pretty rendering --- + +_BOLD = "\x1b[1m" +_DIM = "\x1b[2m" +_RED = "\x1b[31m" +_YELLOW = "\x1b[33m" +_CYAN = "\x1b[36m" +_BLUE = "\x1b[34m" +_RESET = "\x1b[0m" + +_KIND_LABEL = { + LineKind.ERROR: "error", + LineKind.WARNING: "warn ", + LineKind.INFO: "info ", + LineKind.TRACEBACK: "trace", + LineKind.SECTION: "-----", + LineKind.PLAIN: " ", +} + +_KIND_COLOR = { + LineKind.ERROR: _RED, + LineKind.WARNING: _YELLOW, + LineKind.TRACEBACK: _YELLOW, + LineKind.INFO: _BLUE, + LineKind.SECTION: _CYAN, +} + + +def _should_colorize(stream) -> bool: + import os + from chipcompiler.cli.types import OutputMode + if not hasattr(stream, "isatty") or not stream.isatty(): + return False + if os.environ.get("NO_COLOR") is not None: + return False + if os.environ.get("TERM", "") == "dumb": + return False + return True + + +def render_log_pretty( + step: str, + source: str, + lines: list[str], + inspect_cmd: str, + file=None, + color: bool = True, +) -> None: + import sys + target = file or sys.stdout + annotated = annotate_log_lines(lines) + + if color: + target.write(f"{_BOLD}[log]{_RESET} step={step}\n") + target.write(f" {_DIM}source:{_RESET} {source}\n") + else: + target.write(f"[log] step={step}\n") + target.write(f" source: {source}\n") + + for ll in annotated: + label = _KIND_LABEL[ll.kind] + if color and ll.kind in _KIND_COLOR: + code = _KIND_COLOR[ll.kind] + target.write(f" {code}{label}{_RESET} {ll.text}\n") + else: + target.write(f" {label} {ll.text}\n") + + if color: + target.write(f" {_DIM}inspect:{_RESET} {inspect_cmd}\n") + else: + target.write(f" inspect: {inspect_cmd}\n") + + +def render_log_plain( + step: str, + source: str, + lines: list[str], + inspect_cmd: str, + file=None, +) -> None: + import sys + target = file or sys.stdout + records = build_log_records(step, source, lines, inspect_cmd) + for rec in records: + parts = [] + for key in ("step", "source", "line_no", "kind", "line"): + parts.append(f"{key}={rec[key]}") + parts.append(f"inspect={rec['inspect_cmd']}") + target.write(" ".join(parts) + "\n") + + +def render_log_listing_pretty( + records: list[dict], + file=None, + color: bool = True, +) -> None: + import sys + target = file or sys.stdout + + if color: + target.write(f"{_BOLD}[logs]{_RESET}\n") + else: + target.write("[logs]\n") + + for rec in records: + step = rec.get("step", "") + source = rec.get("source") or rec.get("log", "") + inspect = rec.get("inspect_cmd") or rec.get("inspect", "") + + if step: + if color: + target.write(f" {_CYAN}{step}{_RESET} {source}\n") + target.write(f" {_DIM}inspect:{_RESET} {inspect}\n") + else: + target.write(f" {step} {source}\n") + target.write(f" inspect: {inspect}\n") + else: + if color: + target.write(f" {source}\n") + target.write(f" {_DIM}inspect:{_RESET} {inspect}\n") + else: + target.write(f" {source}\n") + target.write(f" inspect: {inspect}\n") diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index e2c6c0c5..a8b221a3 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -43,7 +43,9 @@ def build_parser() -> argparse.ArgumentParser: log_parser = subparsers.add_parser("log", help="Inspect step logs") _add_project_arg(log_parser) log_parser.add_argument("step", nargs="?", default=None, help="Step name") - log_parser.add_argument("--errors", action="store_true", help="Filter error lines") + log_parser.add_argument("--errors", action="store_true", + help=argparse.SUPPRESS) + log_parser.add_argument("--plain", action="store_true", help="Plain key-value output") log_parser.add_argument("--jsonl", action="store_true", help="JSONL output") log_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") @@ -157,6 +159,53 @@ def _render_param_text(args, result) -> None: render_result(result, OutputMode.PLAIN) +def _should_colorize(): + import os + if not sys.stdout.isatty(): + return False + if os.environ.get("NO_COLOR") is not None: + return False + if os.environ.get("TERM", "") == "dumb": + return False + return True + + +def _render_log_text(args, result) -> None: + from chipcompiler.cli.log_view import ( + render_log_listing_pretty, + render_log_pretty, + ) + + if result.exit_code != 0: + render_result(result, OutputMode.PLAIN) + return + + records = result.records + if not records: + return + + first = records[0] + + # Status/sentinel records (no_logs, empty, etc.) + if "log_status" in first or "status" in first: + render_result(result, OutputMode.PLAIN) + return + + color = _should_colorize() + + # Step mode: records have line_no and kind + if "line_no" in first: + step = first["step"] + source = first["source"] + lines = [r["line"] for r in records] + inspect_cmd = first.get("inspect_cmd", "") + render_log_pretty(step, source, lines, inspect_cmd, color=color) + return + + # Listing mode + render_log_listing_pretty(list(records), color=color) + + def run(argv: Sequence[str] | None = None) -> int: parser = build_parser() args = parser.parse_args(list(argv) if argv is not None else None) @@ -170,6 +219,8 @@ def run(argv: Sequence[str] | None = None) -> int: if args.command == "param" and ctx.output_mode == OutputMode.TEXT: _render_param_text(args, result) + elif args.command == "log" and ctx.output_mode == OutputMode.TEXT: + _render_log_text(args, result) else: render_result(result, ctx.output_mode) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index b9cbc4cf..20f9021f 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -204,7 +204,7 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): except ValueError: rel_log = log_path - inspect = disclosure_cmd(f"ecc log {step_token} --errors", project) + inspect = disclosure_cmd(f"ecc log {step_token}", project) is_success = state == StateEnum.Success renderer.finish_step(step_token, tool, status, runtime, rel_log, inspect, is_success) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index e5b57fae..a5c4565e 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -1103,7 +1103,7 @@ def test_log_errors_uses_log_command(self, tmp_path, capsys): assert rc == 1 out = capsys.readouterr().out log_errors_line = [l for l in out.strip().split("\n") if "issue=log_errors" in l][0] - assert "ecc log cts --errors" in log_errors_line + assert "ecc log cts" in log_errors_line def test_missing_metrics_uses_metrics_command(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index c67ca969..e0d8f901 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -518,12 +518,13 @@ def test_log_step_errors(self, tmp_path, capsys): with open(os.path.join(step_dir, "synthesis.log"), "w") as f: f.write("Info: running\nError: bad thing\nWarning: meh\nTraceback: crash\n") - rc = cli_main.run(["log", "synthesis", "--errors", "--project", project_dir]) + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out assert "Error: bad thing" in out assert "Traceback: crash" in out - assert "Info: running" not in out + assert "Warning: meh" in out + assert "Info: running" in out def test_log_step_errors_jsonl(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -552,7 +553,7 @@ def test_log_no_step_shows_locations(self, tmp_path, capsys): rc = cli_main.run(["log", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert 'inspect="ecc log' in out + assert 'ecc log' in out def test_log_no_step_discovers_step_logs(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -566,9 +567,9 @@ def test_log_no_step_discovers_step_logs(self, tmp_path, capsys): rc = cli_main.run(["log", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=synthesis" in out + assert "synthesis" in out assert "Synthesis_yosys/log/synthesis.log" in out - assert 'inspect="ecc log synthesis --errors' in out + assert "ecc log synthesis" in out def test_log_no_step_global_logs_have_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -581,9 +582,7 @@ def test_log_no_step_global_logs_have_disclosure(self, tmp_path, capsys): rc = cli_main.run(["log", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert "ecc log" in out def test_log_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -704,7 +703,7 @@ def test_metrics_missing_file(self, tmp_path, capsys): assert rc == 1 out = capsys.readouterr().out assert "status=missing" in out - assert 'log="ecc log cts --errors' in out + assert 'log="ecc log cts' in out def test_metrics_json_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -802,13 +801,11 @@ def test_log_error_lines_have_disclosure(self, tmp_path, capsys): f.write("Error: something failed\n") rc = cli_main.run( - ["log", "synthesis", "--errors", "--project", project_dir] + ["log", "synthesis", "--project", project_dir] ) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert "ecc log synthesis" in out def test_project_arg_propagated_to_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -994,3 +991,345 @@ def test_check_missing_config_has_disclosure_command(self, tmp_path, capsys): record = data["records"][0] assert "inspect" in record or "inspect_cmd" in record + +# =========================================================================== +# Log output refactoring integration tests +# =========================================================================== + + +class TestLogDefaultShowsAllContent: + """AC-1: Default ecc log renders complete log content.""" + + def test_default_shows_all_lines(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("INFO: starting\nsome output\nError: bad\nWarning: meh\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "INFO: starting" in out + assert "some output" in out + assert "Error: bad" in out + assert "Warning: meh" in out + + def test_default_includes_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[log]" in out + assert "step=synthesis" in out + assert "source:" in out + + def test_blank_lines_preserved(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line1\n\nline3\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "line1" in out + assert "line3" in out + + +class TestLogTracebackComplete: + """AC-2: Python traceback blocks remain complete and contiguous.""" + + def test_traceback_complete_in_default_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write( + "INFO: before\n" + "Traceback (most recent call last):\n" + ' File "app.py", line 42, in run\n' + " result = compute()\n" + " ^^^^^^^^^\n" + "ValueError: invalid value\n" + "INFO: after\n" + ) + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "Traceback (most recent call last):" in out + assert 'File "app.py", line 42' in out + assert "result = compute()" in out + assert "^^^^^^^^^" in out + assert "ValueError: invalid value" in out + + def test_traceback_complete_in_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write( + "Traceback (most recent call last):\n" + ' File "a.py", line 1\n' + "ValueError: fail\n" + ) + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert objects[0]["kind"] == "traceback" + assert objects[1]["kind"] == "traceback" + assert objects[2]["kind"] == "error" + + +class TestLogPlainMode: + """AC-5: --plain emits full-content stable line records.""" + + def test_plain_has_all_fields(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\nINFO: ok\n") + + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 2 + assert "step=synthesis" in lines[0] + assert "line_no=1" in lines[0] + assert "kind=error" in lines[0] + assert "line_no=2" in lines[1] + assert "kind=info" in lines[1] + + def test_plain_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\n") + + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + +class TestLogJsonlMode: + """AC-6: --jsonl emits full-content structured log objects.""" + + def test_jsonl_per_line_objects(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\nINFO: ok\nplain\n") + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 3 + for obj in objects: + assert "step" in obj + assert "source" in obj + assert "line_no" in obj + assert "kind" in obj + assert "line" in obj + assert "inspect_cmd" in obj + + def test_jsonl_no_ansi(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\n") + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + +class TestLogListingMode: + """AC-7: ecc log without step lists available logs.""" + + def test_listing_shows_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis" in out + assert "ecc log synthesis" in out + + def test_listing_no_logs_returns_no_log_status(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "no_logs" in out + + def test_listing_jsonl_records(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + assert any("step" in o for o in objects) + + +class TestLogErrorCases: + """AC-9: Error cases are structured and readable.""" + + def test_unknown_step_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["log", "nonexistent", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "unknown_step" in out + + def test_unknown_step_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + + rc = cli_main.run(["log", "nonexistent", "--jsonl", "--project", project_dir]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["status"] == "unknown_step" + + def test_known_step_no_logs_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "Synthesis_yosys"), exist_ok=True) + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "missing" in out + + def test_known_step_no_logs_json(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(os.path.join(run_dir, "Synthesis_yosys"), exist_ok=True) + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["log_status"] == "missing" + + def test_empty_log_returns_zero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "empty" in out + + +class TestLogNoErrorsInDisclosure: + """AC-8: Disclosure commands do not include --errors.""" + + def test_listing_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_step_log_inspect_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_status_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_metrics_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 100}, f) + + rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + + def test_artifacts_log_disclosure_no_errors(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + log_dir = os.path.join(run_dir, "CTS_ecc", "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "cts.log"), "w") as f: + f.write("log content\n") + + rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "--errors" not in out + diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py new file mode 100644 index 00000000..f56075b7 --- /dev/null +++ b/test/cli/test_log_view.py @@ -0,0 +1,402 @@ +import pytest + +from chipcompiler.cli.log_view import ( + LineKind, + annotate_log_lines, + build_log_records, + classify_line, + render_log_listing_pretty, + render_log_plain, + render_log_pretty, +) + + +class TestClassifyLine: + def test_error_keyword(self): + assert classify_line("Error: something failed") == LineKind.ERROR + + def test_error_case_insensitive(self): + assert classify_line("ERROR: critical") == LineKind.ERROR + + def test_warning_keyword(self): + assert classify_line("Warning: check this") == LineKind.WARNING + + def test_warn_keyword(self): + assert classify_line("WARN: deprecated") == LineKind.WARNING + + def test_info_prefix(self): + assert classify_line("INFO: running step") == LineKind.INFO + + def test_info_bracket(self): + assert classify_line("[INFO ] running step") == LineKind.INFO + + def test_info_root(self): + assert classify_line("INFO:root: message") == LineKind.INFO + + def test_traceback_header(self): + assert classify_line("Traceback (most recent call last):") == LineKind.TRACEBACK + + def test_section_separator(self): + assert classify_line("---") == LineKind.SECTION + + def test_section_equals(self): + assert classify_line("==========") == LineKind.SECTION + + def test_plain_line(self): + assert classify_line("some ordinary output") == LineKind.PLAIN + + def test_plain_empty(self): + assert classify_line("") == LineKind.PLAIN + + def test_plain_whitespace(self): + assert classify_line(" ") == LineKind.PLAIN + + def test_traceback_header_indented(self): + assert classify_line(" Traceback (most recent call last):") == LineKind.TRACEBACK + + def test_error_inside_traceback_stops_traceback(self): + assert classify_line("ValueError: bad", in_traceback=True) == LineKind.ERROR + + def test_indented_line_in_traceback(self): + assert classify_line(' File "test.py", line 1', in_traceback=True) == LineKind.TRACEBACK + + def test_tab_indented_line_in_traceback(self): + assert classify_line("\tFile \"test.py\", line 1", in_traceback=True) == LineKind.TRACEBACK + + +class TestClassifyDoesNotFilter: + """Classification must never remove or hide lines.""" + + def test_every_line_gets_a_kind(self): + lines = [ + "Error: bad", + "Warning: meh", + "INFO: ok", + "---", + "plain text", + "", + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + annotated = annotate_log_lines(lines) + assert len(annotated) == len(lines) + + def test_classification_preserves_text(self): + text = "Error: something went wrong" + assert classify_line(text).value # just returns a kind, text is separate + + +class TestTracebackAnnotation: + def test_complete_traceback_block(self): + lines = [ + "Traceback (most recent call last):", + ' File "app.py", line 42, in run', + " result = compute()", + " ^^^^^^^^^", + "ValueError: invalid value", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.TRACEBACK + assert annotated[1].kind == LineKind.TRACEBACK + assert annotated[2].kind == LineKind.TRACEBACK + assert annotated[3].kind == LineKind.TRACEBACK + assert annotated[4].kind == LineKind.ERROR + + def test_traceback_with_blank_source_line(self): + lines = [ + "Traceback (most recent call last):", + ' File "app.py", line 10, in ', + "", + "ValueError: oops", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.TRACEBACK + assert annotated[1].kind == LineKind.TRACEBACK + assert annotated[2].kind == LineKind.PLAIN + assert annotated[3].kind == LineKind.ERROR + + def test_traceback_exits_on_non_indented_non_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: bad", + "next log line", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.TRACEBACK + assert annotated[1].kind == LineKind.TRACEBACK + assert annotated[2].kind == LineKind.ERROR + assert annotated[3].kind == LineKind.PLAIN + + def test_traceback_order_preserved(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + ' File "b.py", line 2', + ' File "c.py", line 3', + "RuntimeError: end", + ] + annotated = annotate_log_lines(lines) + kinds = [a.kind for a in annotated] + assert kinds == [ + LineKind.TRACEBACK, + LineKind.TRACEBACK, + LineKind.TRACEBACK, + LineKind.TRACEBACK, + LineKind.ERROR, + ] + + def test_pre_traceback_info_preserved(self): + lines = [ + "INFO: starting step", + "some output", + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + annotated = annotate_log_lines(lines) + assert annotated[0].kind == LineKind.INFO + assert annotated[1].kind == LineKind.PLAIN + assert annotated[2].kind == LineKind.TRACEBACK + + +class TestAnnotateLineNumbers: + def test_line_numbers_start_at_one(self): + lines = ["first", "second", "third"] + annotated = annotate_log_lines(lines) + assert [a.line_no for a in annotated] == [1, 2, 3] + + def test_empty_input(self): + assert annotate_log_lines([]) == [] + + +# --- Renderer tests --- + + +class TestBuildLogRecords: + def test_builds_records_with_all_fields(self): + lines = ["Error: bad", "INFO: ok"] + records = build_log_records("synthesis", "log/synthesis.log", lines, "ecc log synthesis") + assert len(records) == 2 + assert records[0]["step"] == "synthesis" + assert records[0]["source"] == "log/synthesis.log" + assert records[0]["line_no"] == 1 + assert records[0]["kind"] == "error" + assert records[0]["line"] == "Error: bad" + assert records[0]["inspect_cmd"] == "ecc log synthesis" + + def test_traceback_frames_in_records(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + records = build_log_records("cts", "log/cts.log", lines, "ecc log cts") + assert records[0]["kind"] == "traceback" + assert records[1]["kind"] == "traceback" + assert records[2]["kind"] == "error" + + +class TestPrettyRenderer: + def test_header_includes_step_and_source(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["ok"], "ecc log cts", file=buf, color=False) + out = buf.getvalue() + assert "[log] step=cts" in out + assert "source: log/cts.log" in out + + def test_all_lines_appear_in_output(self): + from io import StringIO + lines = ["Error: bad", "INFO: ok", "plain line", "---", "Warning: meh"] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=False) + out = buf.getvalue() + for line in lines: + assert line in out + + def test_traceback_complete_in_output(self): + from io import StringIO + lines = [ + "INFO: before", + "Traceback (most recent call last):", + ' File "a.py", line 1', + " x = bad()", + " ^^^^^", + "ValueError: oops", + "INFO: after", + ] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=False) + out = buf.getvalue() + for line in lines: + assert line in out + + def test_inspect_footer(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["ok"], "ecc log cts", file=buf, color=False) + out = buf.getvalue() + assert "inspect: ecc log cts" in out + + def test_no_ansi_when_color_disabled(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + def test_ansi_when_color_enabled(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=True) + assert "\x1b[" in buf.getvalue() + + def test_error_colored_red(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[31m" in out + + def test_warning_colored_yellow(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Warning: meh"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[33m" in out + + def test_section_colored_cyan(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["---"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[36m" in out + + def test_info_colored_blue(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["INFO: ok"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[34m" in out + + def test_traceback_colored_yellow(self): + from io import StringIO + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: bad", + ] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[33m" in out + + +class TestPlainRenderer: + def test_emits_one_record_per_line(self): + from io import StringIO + lines = ["Error: bad", "INFO: ok", "plain"] + buf = StringIO() + render_log_plain("cts", "log/cts.log", lines, "ecc log cts", file=buf) + out_lines = [l for l in buf.getvalue().strip().split("\n") if l.strip()] + assert len(out_lines) == 3 + + def test_record_has_required_fields(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["ok"], "ecc log cts", file=buf) + line = buf.getvalue().strip() + assert "step=cts" in line + assert "source=log/cts.log" in line + assert "line_no=1" in line + assert "kind=plain" in line + assert "line=ok" in line + assert "inspect=ecc log cts" in line + + def test_no_ansi_in_plain(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf) + assert "\x1b[" not in buf.getvalue() + + def test_traceback_frames_in_plain(self): + from io import StringIO + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "ValueError: fail", + ] + buf = StringIO() + render_log_plain("cts", "log/cts.log", lines, "ecc log cts", file=buf) + out_lines = [l for l in buf.getvalue().strip().split("\n") if l.strip()] + assert len(out_lines) == 3 + assert "kind=traceback" in out_lines[0] + assert "kind=traceback" in out_lines[1] + assert "kind=error" in out_lines[2] + + +class TestColorGuards: + def test_no_color_when_not_tty(self): + import io + from unittest.mock import patch + + class FakeNonTTY: + def isatty(self): + return False + + with patch("sys.stdout", FakeNonTTY()): + buf = io.StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + def test_no_color_when_no_color_env(self): + import os + import io + from unittest.mock import patch + + with patch.dict(os.environ, {"NO_COLOR": "1"}): + buf = io.StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + def test_no_color_when_term_dumb(self): + import os + import io + from unittest.mock import patch + + with patch.dict(os.environ, {"TERM": "dumb"}): + buf = io.StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + assert "\x1b[" not in buf.getvalue() + + +class TestListingPrettyRenderer: + def test_listing_header(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + assert "[logs]" in buf.getvalue() + + def test_listing_shows_step_and_source(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + out = buf.getvalue() + assert "synthesis" in out + assert "Synthesis_yosys/log/synthesis.log" in out + + def test_listing_inspect_cmd(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + assert "ecc log synthesis" in buf.getvalue() diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 21f405e2..8e823995 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -421,7 +421,7 @@ def test_summary_includes_inspect_detail_line(self): buf = FakeTTYStderr(True) run_flow_with_progress(flow, _make_ctx(), "myproject", buf) plain = _strip_ansi("".join(buf.written)) - assert " inspect: ecc log synthesis --errors --project myproject\n" in plain + assert " inspect: ecc log synthesis --project myproject\n" in plain def test_summary_includes_log_detail_line(self, tmp_path): log_file = tmp_path / "synth.log" From d4a0842a04b787634f45d5955daad21de5646ed3 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 14:45:50 +0800 Subject: [PATCH 068/104] fix(cli): address Codex review gaps in log output refactoring - Broaden traceback exception classification to match Python exception terminators (Exception:, etc.) not just *Error subclasses - Fix multi-source pretty rendering: group records by source file and call render_log_pretty per group instead of collapsing all sources - Make plain output stable with quoted/escaped values for whitespace and backslashes - Handle unreadable log files as non-zero OS errors with structured output instead of silently treating them as empty - Add visible deprecation warning on stderr when --errors is used - Remove unused _should_colorize and OutputMode import from log_view.py - Add 10 new tests for exception classification, unreadable logs, multi-source rendering, deprecation notice, and stable plain format --- chipcompiler/cli/handlers.py | 13 ++++- chipcompiler/cli/log_view.py | 28 +++++----- chipcompiler/cli/main.py | 26 +++++++-- test/cli/test_cli_main.py | 105 +++++++++++++++++++++++++++++++++++ test/cli/test_log_view.py | 35 +++++++++++- 5 files changed, 185 insertions(+), 22 deletions(-) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 111e7af5..010a8daf 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -92,7 +92,6 @@ def log(args, ctx: CommandContext) -> CommandResult: from chipcompiler.cli.inspect import ( discover_logs, discover_step_dirs, - read_log_file, ) from chipcompiler.cli.log_view import build_log_records @@ -149,7 +148,17 @@ def log(args, ctx: CommandContext) -> CommandResult: all_records = [] for lf in log_files: source = os.path.relpath(lf, ctx.run_dir) - raw = read_log_file(lf) + try: + with open(lf, errors="replace") as f: + raw = f.read().splitlines() + except OSError as exc: + return CommandResult.err([{ + "step": step_token, + "log_status": "unreadable", + "source": source, + "error": str(exc), + "inspect_cmd": inspect_cmd, + }]) if not raw: continue all_records.extend(build_log_records(step_token, source, raw, inspect_cmd)) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index 6b0e73de..c2a3f1a9 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -17,6 +17,7 @@ class LineKind(enum.Enum): _WARNING_RE = re.compile(r"warn(?:ing)?", re.IGNORECASE) _INFO_RE = re.compile(r"^(?:INFO(?:\s*:|\s*\]|:root:)|\[INFO\s*\])") _SECTION_RE = re.compile(r"^[-=]{3,}$") +_EXCEPTION_RE = re.compile(r"^[A-Za-z_][\w.]*:\s") def classify_line(line: str, in_traceback: bool = False) -> LineKind: @@ -28,6 +29,8 @@ def classify_line(line: str, in_traceback: bool = False) -> LineKind: return LineKind.PLAIN if line.startswith(" ") or line.startswith("\t"): return LineKind.TRACEBACK + if _EXCEPTION_RE.match(stripped): + return LineKind.ERROR if _ERROR_RE.search(stripped): return LineKind.ERROR return LineKind.PLAIN @@ -122,18 +125,6 @@ def build_log_records( } -def _should_colorize(stream) -> bool: - import os - from chipcompiler.cli.types import OutputMode - if not hasattr(stream, "isatty") or not stream.isatty(): - return False - if os.environ.get("NO_COLOR") is not None: - return False - if os.environ.get("TERM", "") == "dumb": - return False - return True - - def render_log_pretty( step: str, source: str, @@ -167,6 +158,14 @@ def render_log_pretty( target.write(f" inspect: {inspect_cmd}\n") +def _format_value(value) -> str: + s = str(value) + if any(c.isspace() for c in s) or '\\' in s or '"' in s: + escaped = s.replace('\\', '\\\\').replace('"', '\\"') + return f'"{escaped}"' + return s + + def render_log_plain( step: str, source: str, @@ -179,9 +178,8 @@ def render_log_plain( records = build_log_records(step, source, lines, inspect_cmd) for rec in records: parts = [] - for key in ("step", "source", "line_no", "kind", "line"): - parts.append(f"{key}={rec[key]}") - parts.append(f"inspect={rec['inspect_cmd']}") + for key in ("step", "source", "line_no", "kind", "line", "inspect_cmd"): + parts.append(f"{key}={_format_value(rec[key])}") target.write(" ".join(parts) + "\n") diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index a8b221a3..48472350 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -176,6 +176,9 @@ def _render_log_text(args, result) -> None: render_log_pretty, ) + if getattr(args, "errors", False): + print("warning: --errors is deprecated and no longer filters output", file=sys.stderr) + if result.exit_code != 0: render_result(result, OutputMode.PLAIN) return @@ -195,11 +198,26 @@ def _render_log_text(args, result) -> None: # Step mode: records have line_no and kind if "line_no" in first: - step = first["step"] - source = first["source"] - lines = [r["line"] for r in records] inspect_cmd = first.get("inspect_cmd", "") - render_log_pretty(step, source, lines, inspect_cmd, color=color) + current_source = None + current_lines = [] + current_step = first["step"] + for rec in records: + src = rec["source"] + if src != current_source: + if current_source is not None: + render_log_pretty( + current_step, current_source, current_lines, + inspect_cmd, color=color, + ) + current_source = src + current_lines = [] + current_lines.append(rec["line"]) + if current_source is not None: + render_log_pretty( + current_step, current_source, current_lines, + inspect_cmd, color=color, + ) return # Listing mode diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index e0d8f901..d3c10e32 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -3,6 +3,8 @@ import re from types import SimpleNamespace +import pytest + from chipcompiler.cli import main as cli_main # --------------------------------------------------------------------------- @@ -1333,3 +1335,106 @@ def test_artifacts_log_disclosure_no_errors(self, tmp_path, capsys): out = capsys.readouterr().out assert "--errors" not in out + +class TestLogUnreadableFile: + """AC-9: Unreadable log files return non-zero with OS error.""" + + def test_unreadable_log_returns_nonzero(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content\n") + os.chmod(log_path, 0o000) + + try: + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 1 + out = capsys.readouterr().out + assert "unreadable" in out + finally: + os.chmod(log_path, 0o644) + + def test_unreadable_log_jsonl(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content\n") + os.chmod(log_path, 0o000) + + try: + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 1 + record = json.loads(capsys.readouterr().out.strip()) + assert record["log_status"] == "unreadable" + assert "source" in record + assert "error" in record + finally: + os.chmod(log_path, 0o644) + + +class TestLogMultiSource: + """AC-1: Multiple log files per step shown with separate source headers.""" + + def test_multi_source_pretty(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "a.log"), "w") as f: + f.write("from A\n") + with open(os.path.join(step_dir, "b.log"), "w") as f: + f.write("from B\n") + + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "a.log" in out + assert "b.log" in out + assert "from A" in out + assert "from B" in out + + +class TestLogErrorsDeprecation: + """AC-8: --errors is deprecated with visible notice.""" + + def test_errors_hidden_from_help(self, tmp_path, capsys): + with pytest.raises(SystemExit): + cli_main.run(["log", "--help"]) + + def test_errors_emits_deprecation_warning(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + + rc = cli_main.run(["log", "synthesis", "--errors", "--project", project_dir]) + assert rc == 0 + err = capsys.readouterr().err + assert "deprecated" in err + + def test_errors_jsonl_still_full_records(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("INFO: running\nError: bad\n") + + rc = cli_main.run( + ["log", "synthesis", "--errors", "--jsonl", "--project", project_dir] + ) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert len(objects) == 2 + assert objects[0]["kind"] == "info" + assert objects[1]["kind"] == "error" + assert "\x1b[" not in capsys.readouterr().out + diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py index f56075b7..cc45a6b7 100644 --- a/test/cli/test_log_view.py +++ b/test/cli/test_log_view.py @@ -160,6 +160,24 @@ def test_pre_traceback_info_preserved(self): assert annotated[1].kind == LineKind.PLAIN assert annotated[2].kind == LineKind.TRACEBACK + def test_exception_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "Exception: something went wrong", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR + + def test_keyboard_interrupt_classified_as_plain(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "KeyboardInterrupt", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.PLAIN + class TestAnnotateLineNumbers: def test_line_numbers_start_at_one(self): @@ -312,7 +330,22 @@ def test_record_has_required_fields(self): assert "line_no=1" in line assert "kind=plain" in line assert "line=ok" in line - assert "inspect=ecc log cts" in line + assert "inspect_cmd=" in line + + def test_values_with_spaces_are_quoted(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["line with spaces"], "ecc log cts --project /tmp/a b", file=buf) + line = buf.getvalue().strip() + assert 'line="line with spaces"' in line + assert 'inspect_cmd="ecc log cts --project /tmp/a b"' in line + + def test_values_with_backslashes_escaped(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ['path\\to\\file'], "ecc log cts", file=buf) + line = buf.getvalue().strip() + assert 'line="path\\\\to\\\\file"' in line def test_no_ansi_in_plain(self): from io import StringIO From cf022d4970b9aa51f09b64f00a983c0b7ce0a053 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 18:26:25 +0800 Subject: [PATCH 069/104] fix(cli): no-colon traceback terminators and stable plain output path - Broaden exception regex to match bare Python exception names (KeyboardInterrupt, SystemExit, StopIteration, GeneratorExit) without requiring a colon suffix - Route ecc log --plain through log-specific stable formatter instead of generic render_plain(), so values with spaces/=/backslashes are quoted - Quote values containing = in _format_value() for stable parseability - Extract _render_plain_record() and render_log_records_plain() for reuse by both log_view.render_log_plain() and CLI _render_log_plain() - Add _render_log_plain() in main.py for PLAIN mode log output - Fix test_keyboard_interrupt to assert error classification - Add 5 new tests for SystemExit, StopIteration, = quoting, stable plain CLI output, and KeyboardInterrupt JSONL classification --- chipcompiler/cli/log_view.py | 27 ++++++++++++++++++++------ chipcompiler/cli/main.py | 21 ++++++++++++++++++++ test/cli/test_cli_main.py | 37 ++++++++++++++++++++++++++++++++++++ test/cli/test_log_view.py | 29 ++++++++++++++++++++++++++-- 4 files changed, 106 insertions(+), 8 deletions(-) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index c2a3f1a9..34c31ff2 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -17,7 +17,11 @@ class LineKind(enum.Enum): _WARNING_RE = re.compile(r"warn(?:ing)?", re.IGNORECASE) _INFO_RE = re.compile(r"^(?:INFO(?:\s*:|\s*\]|:root:)|\[INFO\s*\])") _SECTION_RE = re.compile(r"^[-=]{3,}$") -_EXCEPTION_RE = re.compile(r"^[A-Za-z_][\w.]*:\s") +_EXCEPTION_RE = re.compile( + r"^[A-Za-z_][\w.]*:\s" + r"|^[A-Za-z_]\w*(?:Error|Exception|Warning|Interrupt|Exit|Iteration)$" + r"|^(?:KeyboardInterrupt|SystemExit|StopIteration|GeneratorExit)$" +) def classify_line(line: str, in_traceback: bool = False) -> LineKind: @@ -160,12 +164,19 @@ def render_log_pretty( def _format_value(value) -> str: s = str(value) - if any(c.isspace() for c in s) or '\\' in s or '"' in s: + if any(c.isspace() for c in s) or '\\' in s or '"' in s or '=' in s: escaped = s.replace('\\', '\\\\').replace('"', '\\"') return f'"{escaped}"' return s +def _render_plain_record(rec, target): + parts = [] + for key in ("step", "source", "line_no", "kind", "line", "inspect_cmd"): + parts.append(f"{key}={_format_value(rec.get(key, ''))}") + target.write(" ".join(parts) + "\n") + + def render_log_plain( step: str, source: str, @@ -177,10 +188,14 @@ def render_log_plain( target = file or sys.stdout records = build_log_records(step, source, lines, inspect_cmd) for rec in records: - parts = [] - for key in ("step", "source", "line_no", "kind", "line", "inspect_cmd"): - parts.append(f"{key}={_format_value(rec[key])}") - target.write(" ".join(parts) + "\n") + _render_plain_record(rec, target) + + +def render_log_records_plain(records, file=None) -> None: + import sys + target = file or sys.stdout + for rec in records: + _render_plain_record(rec, target) def render_log_listing_pretty( diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 48472350..9d564d50 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -224,6 +224,25 @@ def _render_log_text(args, result) -> None: render_log_listing_pretty(list(records), color=color) +def _render_log_plain(result) -> None: + from chipcompiler.cli.log_view import render_log_records_plain + + records = result.records + if not records: + return + first = records[0] + + if "log_status" in first or "status" in first: + render_result(result, OutputMode.PLAIN) + return + + if "line_no" in first: + render_log_records_plain(records) + return + + render_log_records_plain(records) + + def run(argv: Sequence[str] | None = None) -> int: parser = build_parser() args = parser.parse_args(list(argv) if argv is not None else None) @@ -239,6 +258,8 @@ def run(argv: Sequence[str] | None = None) -> int: _render_param_text(args, result) elif args.command == "log" and ctx.output_mode == OutputMode.TEXT: _render_log_text(args, result) + elif args.command == "log" and ctx.output_mode == OutputMode.PLAIN: + _render_log_plain(result) else: render_result(result, ctx.output_mode) diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index d3c10e32..44141f68 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1095,6 +1095,26 @@ def test_traceback_complete_in_jsonl(self, tmp_path, capsys): assert objects[1]["kind"] == "traceback" assert objects[2]["kind"] == "error" + def test_keyboard_interrupt_jsonl_classified_as_error(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write( + "Traceback (most recent call last):\n" + ' File "a.py", line 1\n' + "KeyboardInterrupt\n" + ) + + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n")] + assert objects[0]["kind"] == "traceback" + assert objects[1]["kind"] == "traceback" + assert objects[2]["kind"] == "error" + assert objects[2]["line"] == "KeyboardInterrupt" + class TestLogPlainMode: """AC-5: --plain emits full-content stable line records.""" @@ -1131,6 +1151,23 @@ def test_plain_no_ansi(self, tmp_path, capsys): out = capsys.readouterr().out assert "\x1b[" not in out + def test_plain_stable_quoting_for_special_chars(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write('key=value path\\to\\file "quoted text"\n') + + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + lines = [l for l in out.strip().split("\n") if l.strip()] + assert len(lines) == 1 + assert 'line="key=value' in lines[0] + assert 'inspect_cmd=' in lines[0] + class TestLogJsonlMode: """AC-6: --jsonl emits full-content structured log objects.""" diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py index cc45a6b7..70ad9858 100644 --- a/test/cli/test_log_view.py +++ b/test/cli/test_log_view.py @@ -169,14 +169,32 @@ def test_exception_classified_as_error(self): annotated = annotate_log_lines(lines) assert annotated[2].kind == LineKind.ERROR - def test_keyboard_interrupt_classified_as_plain(self): + def test_keyboard_interrupt_classified_as_error(self): lines = [ "Traceback (most recent call last):", ' File "a.py", line 1', "KeyboardInterrupt", ] annotated = annotate_log_lines(lines) - assert annotated[2].kind == LineKind.PLAIN + assert annotated[2].kind == LineKind.ERROR + + def test_system_exit_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "SystemExit: 1", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR + + def test_stop_iteration_classified_as_error(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + "StopIteration", + ] + annotated = annotate_log_lines(lines) + assert annotated[2].kind == LineKind.ERROR class TestAnnotateLineNumbers: @@ -347,6 +365,13 @@ def test_values_with_backslashes_escaped(self): line = buf.getvalue().strip() assert 'line="path\\\\to\\\\file"' in line + def test_values_with_equals_quoted(self): + from io import StringIO + buf = StringIO() + render_log_plain("cts", "log/cts.log", ["key=value"], "ecc log cts", file=buf) + line = buf.getvalue().strip() + assert 'line="key=value"' in line + def test_no_ansi_in_plain(self): from io import StringIO buf = StringIO() From ec450686e283e458c713cf9b1bf57aa88207ac9b Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 18:45:29 +0800 Subject: [PATCH 070/104] fix(cli): listing plain fallback for ecc log --plain without step --- chipcompiler/cli/main.py | 2 +- test/cli/test_cli_main.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 9d564d50..d7b0262b 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -240,7 +240,7 @@ def _render_log_plain(result) -> None: render_log_records_plain(records) return - render_log_records_plain(records) + render_result(result, OutputMode.PLAIN) def run(argv: Sequence[str] | None = None) -> int: diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 44141f68..58e44bfb 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1246,6 +1246,38 @@ def test_listing_jsonl_records(self, tmp_path, capsys): objects = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] assert any("step" in o for o in objects) + def test_listing_plain_step_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "step=synthesis" in out + assert "source=" in out + assert "inspect_cmd=" in out + assert "line_no=" not in out + + def test_listing_plain_run_level_logs(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("log content\n") + + rc = cli_main.run(["log", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "log=" in out + assert "inspect_cmd=" in out + assert "line_no=" not in out + assert "kind=" not in out + class TestLogErrorCases: """AC-9: Error cases are structured and readable.""" From 66308322d7ac3b308149ab5a24fee600cc5e9116 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 20:20:20 +0800 Subject: [PATCH 071/104] fix(cli): route placement params to DreamPlace, reject zero freq, surface TOML errors, seed design freq --- chipcompiler/cli/param_handler.py | 8 ++- chipcompiler/cli/params.py | 10 ++-- test/cli/test_cli_params.py | 86 +++++++++++++++++++++++++++++-- test/cli/test_params.py | 4 +- 4 files changed, 96 insertions(+), 12 deletions(-) diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 20c3d645..5617ccc9 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -301,7 +301,13 @@ def _load_toml_overrides(project_dir: str) -> tuple[dict[str, object], list[str] from chipcompiler.cli.config import load_project_config cfg = load_project_config(config_path) errors = list(getattr(cfg, "_param_errors", [])) - return cfg.params_overrides, errors + toml_error = getattr(cfg, "_toml_error", None) + if toml_error: + errors.insert(0, f"malformed ecc.toml: {toml_error}") + overrides = dict(cfg.params_overrides) + if "design.frequency_mhz" not in overrides and cfg.design_frequency_mhz > 0: + overrides["design.frequency_mhz"] = cfg.design_frequency_mhz + return overrides, errors def _write_param_to_toml(config_path: str, key: str, value: object) -> None: diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index 06a0eb6c..2edcf1e3 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -30,7 +30,7 @@ class ParamSchema: applies="synthesis", maps_to="Frequency max [MHz]", description="Target clock frequency in MHz", - range=(0.0, 10000.0), + range=(1e-6, 10000.0), unit="MHz", example="200.0", ), @@ -88,7 +88,7 @@ class ParamSchema: type="float", default=0.3, applies="placement", - maps_to="Target density", + maps_to={"DreamPlace": "target_density"}, description="Target placement density", range=(0.1, 0.95), example="0.65", @@ -100,7 +100,7 @@ class ParamSchema: type="float", default=0.1, applies="placement", - maps_to="Target overflow", + maps_to={"DreamPlace": "stop_overflow"}, description="Target overflow for global placement", range=(0.0, 1.0), example="0.08", @@ -124,7 +124,7 @@ class ParamSchema: type="int", default=600, applies="placement", - maps_to="Cell padding x", + maps_to={"DreamPlace": "cell_padding_x"}, description="Cell padding in x-direction in database units", range=(0, 10000), example="400", @@ -136,7 +136,7 @@ class ParamSchema: type="int", default=1, applies="placement", - maps_to="Routability opt flag", + maps_to={"DreamPlace": "routability_opt_flag"}, description="Enable routability-driven placement optimization", choices=("0", "1"), example="1", diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 61cf1b88..3e36a043 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -4,7 +4,7 @@ from chipcompiler.cli import main as cli_main -def _create_valid_project(tmp_path, name="gcd", pdk_root=None): +def _create_valid_project(tmp_path, name="gcd", pdk_root=None, freq=100.0): project_dir = tmp_path / name project_dir.mkdir(exist_ok=True) (project_dir / "rtl").mkdir(exist_ok=True) @@ -23,7 +23,7 @@ def _create_valid_project(tmp_path, name="gcd", pdk_root=None): top = "{name}" rtl = ["rtl/gcd.v"] clock_port = "clk" -frequency_mhz = 100.0 +frequency_mhz = {freq} [pdk] name = "ics55" @@ -229,7 +229,7 @@ def fake_create(**kwargs): assert rc == 0 params = capture["kwargs"]["parameters"] - assert params.get("Target density") == 0.65 + assert params.get("DreamPlace", {}).get("target_density") == 0.65 def test_run_set_rejects_unknown_key(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -482,7 +482,10 @@ def test_param_list_default_source_when_no_overrides(self, tmp_path, capsys): assert rc == 0 data = json.loads(capsys.readouterr().out) for r in data["records"]: - assert r["source"] == "default" + if r["param"] == "design.frequency_mhz": + assert r["source"] == "ecc.toml" + else: + assert r["source"] == "default" class TestDiffFiltering: @@ -1058,3 +1061,78 @@ def test_list_changed_value_in_diff(self, tmp_path, capsys): margin = next((r for r in data["records"] if r.get("param") == "floorplan.core_margin"), None) assert margin is not None assert margin["value"] == [4, 4] + + +class TestZeroFrequencyRejected: + """ecc param set design.frequency_mhz 0 must be rejected.""" + + def test_set_zero_rejected(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["param", "set", "design.frequency_mhz", "0", "--project", project_dir]) + assert rc == 1 + + def test_cli_set_zero_rejected(self, tmp_path): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run([ + "run", "--project", project_dir, + "--set", "design.frequency_mhz=0", + ]) + assert rc == 1 + + +class TestDesignFrequencySeeded: + """ecc param list/show must reflect [design] frequency_mhz.""" + + def test_list_shows_design_frequency(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + freq = next(r for r in data["records"] if r["param"] == "design.frequency_mhz") + assert freq["value"] == 200.0 + + def test_show_shows_design_frequency(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + rc = cli_main.run(["param", "show", "design.frequency_mhz", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 200.0 + + def test_param_override_beats_design_frequency(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + cli_main.run(["param", "set", "design.frequency_mhz", "300", "--project", project_dir]) + capsys.readouterr() + rc = cli_main.run(["param", "show", "design.frequency_mhz", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == 300.0 + assert data["records"][0]["source"] == "ecc.toml" + + +class TestMalformedTomlRejected: + """ecc param list/show/diff must reject syntactically malformed ecc.toml.""" + + def _write_malformed_toml(self, project_dir): + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path, "w") as f: + f.write('[design\nname = "gcd"\n') + + def test_param_list_rejects_malformed(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_malformed_toml(project_dir) + rc = cli_main.run(["param", "list", "--project", project_dir, "--json"]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["error"] == "invalid_param_config" + + def test_param_show_rejects_malformed(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_malformed_toml(project_dir) + rc = cli_main.run(["param", "show", "design.frequency_mhz", "--project", project_dir, "--json"]) + assert rc == 1 + + def test_param_diff_rejects_malformed(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + self._write_malformed_toml(project_dir) + rc = cli_main.run(["param", "diff", "--project", project_dir, "--json"]) + assert rc == 1 diff --git a/test/cli/test_params.py b/test/cli/test_params.py index 8586fa9c..cd5a2037 100644 --- a/test/cli/test_params.py +++ b/test/cli/test_params.py @@ -57,7 +57,7 @@ def test_optional_fields_present_when_relevant(self): def test_cli_keys_map_to_backend_names(self): density = lookup_schema("place.target_density") - assert density.maps_to == "Target density" + assert density.maps_to == {"DreamPlace": "target_density"} fanout = lookup_schema("synth.max_fanout") assert fanout.maps_to == "Max fanout" @@ -247,7 +247,7 @@ def test_flat_key_mapping(self): source="cli", schema=schema, ) result = build_backend_overrides([rp]) - assert result == {"Target density": 0.65} + assert result == {"DreamPlace": {"target_density": 0.65}} def test_nested_key_mapping(self): schema = lookup_schema("floorplan.core_util") From 6efffcf882c9dd3f39d5ba204ec9e63f1b765478 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 20:38:36 +0800 Subject: [PATCH 072/104] fix(cli): add log --json flag, align target_density default with DreamPlace --- chipcompiler/cli/main.py | 1 + chipcompiler/cli/params.py | 2 +- chipcompiler/data/parameter.py | 2 +- test/cli/test_cli_main.py | 31 +++++++++++++++++++++++++++ test/cli/test_cli_params.py | 4 ++-- test/cli/test_params.py | 2 +- test/formal/test_param_propagation.py | 2 +- 7 files changed, 38 insertions(+), 6 deletions(-) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index d7b0262b..6208402e 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -45,6 +45,7 @@ def build_parser() -> argparse.ArgumentParser: log_parser.add_argument("step", nargs="?", default=None, help="Step name") log_parser.add_argument("--errors", action="store_true", help=argparse.SUPPRESS) + log_parser.add_argument("--json", action="store_true", help="JSON output") log_parser.add_argument("--plain", action="store_true", help="Plain key-value output") log_parser.add_argument("--jsonl", action="store_true", help="JSONL output") log_parser.add_argument("--run-id", default=None, dest="run_id", diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index 2edcf1e3..b1a730f2 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -86,7 +86,7 @@ class ParamSchema: group="place", name="target_density", type="float", - default=0.3, + default=0.8, applies="placement", maps_to={"DreamPlace": "target_density"}, description="Target placement density", diff --git a/chipcompiler/data/parameter.py b/chipcompiler/data/parameter.py index 45cc8c77..0e4f5dcb 100644 --- a/chipcompiler/data/parameter.py +++ b/chipcompiler/data/parameter.py @@ -22,7 +22,7 @@ "Aspect ratio" : 1 }, "Max fanout" : 20, - "Target density" : 0.3, + "Target density" : 0.8, "Target overflow" : 0.1, "Global right padding": 0, "Cell padding x": 600, diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 58e44bfb..4ce94305 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1206,6 +1206,37 @@ def test_jsonl_no_ansi(self, tmp_path, capsys): assert "\x1b[" not in out +class TestLogJsonMode: + """ecc log --json must produce JSON envelope output.""" + + def test_json_step_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("Error: bad\nINFO: ok\n") + + rc = cli_main.run(["log", "synthesis", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert len(data["records"]) == 2 + + def test_json_listing_output(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("content\n") + + rc = cli_main.run(["log", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + + class TestLogListingMode: """AC-7: ecc log without step lists available logs.""" diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 3e36a043..2bec6406 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -86,7 +86,7 @@ def test_param_show_json(self, tmp_path, capsys): data = json.loads(capsys.readouterr().out) record = data["records"][0] assert record["param"] == "place.target_density" - assert record["default"] == 0.3 + assert record["default"] == 0.8 assert "source" in record assert "maps_to" in record @@ -505,7 +505,7 @@ def test_diff_only_shows_values_that_differ(self, tmp_path, capsys): def test_diff_clean_when_set_to_default(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - schema_default = 0.3 + schema_default = 0.8 cli_main.run(["param", "set", "place.target_density", str(schema_default), "--project", project_dir]) capsys.readouterr() diff --git a/test/cli/test_params.py b/test/cli/test_params.py index cd5a2037..901f5f1b 100644 --- a/test/cli/test_params.py +++ b/test/cli/test_params.py @@ -243,7 +243,7 @@ class TestBackendMapping: def test_flat_key_mapping(self): schema = lookup_schema("place.target_density") rp = ResolvedParam( - param="place.target_density", value=0.65, default=0.3, + param="place.target_density", value=0.65, default=0.8, source="cli", schema=schema, ) result = build_backend_overrides([rp]) diff --git a/test/formal/test_param_propagation.py b/test/formal/test_param_propagation.py index ec9b1af0..da0a9e25 100644 --- a/test/formal/test_param_propagation.py +++ b/test/formal/test_param_propagation.py @@ -69,7 +69,7 @@ def _key_exists_in_dict(data: dict[str, Any], key: str) -> bool: # Known parameter -> config mappings with both defaults. # (param_key, param_default, config_default, description) PARAM_CONFIG_DEFAULTS: list[tuple[str, float, float, str]] = [ - ("Target density", 0.3, 0.8, "dreamplace.target_density"), + ("Target density", 0.8, 0.8, "dreamplace.target_density"), ("Target overflow", 0.1, 0.1, "dreamplace.stop_overflow"), ("Cell padding x", 600, 600, "dreamplace.cell_padding_x"), ("Routability opt flag", 1, 0, "dreamplace.routability_opt_flag"), From d9989d6b8e379cc2c3b602a9ef70f31663a7f114 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 21:01:33 +0800 Subject: [PATCH 073/104] fix(cli): handle multiline TOML values in param set/unset edits --- chipcompiler/cli/param_handler.py | 47 +++++++++++++++++++- test/cli/test_cli_params.py | 71 ++++++++++++++++++++++++++++++- 2 files changed, 115 insertions(+), 3 deletions(-) diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 5617ccc9..05472bb4 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -356,6 +356,44 @@ def _find_table_span(text: str, table_name: str) -> tuple[int, int] | None: return None +def _extend_multiline_value(text: str, match_end: int) -> int: + """Extend match end past continuation lines for multiline TOML values. + + After matching `key = ...` on one line, consume subsequent lines if the + value has unclosed brackets (arrays or inline tables). + """ + line_start = text.rfind("\n", 0, match_end) + 1 + matched_line = text[line_start:match_end] + + depth = 0 + eq_pos = matched_line.find("=") + if eq_pos >= 0: + for ch in matched_line[eq_pos + 1:]: + if ch in ("[", "{"): + depth += 1 + elif ch in ("]", "}"): + depth -= 1 + + if depth <= 0: + return match_end + + pos = match_end + while pos < len(text) and depth > 0: + ch = text[pos] + if ch in ("[", "{"): + depth += 1 + elif ch in ("]", "}"): + depth -= 1 + pos += 1 + + while pos < len(text) and text[pos] in (" ", "\t"): + pos += 1 + if pos < len(text) and text[pos] == "\n": + pos += 1 + + return pos + + def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> str: value_str = _format_toml_value(value) target_table = f"params.{group}" @@ -380,8 +418,9 @@ def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> if key_match: indent = key_match.group(1) + end = _extend_multiline_value(section_body, key_match.end()) new_line = f"{indent}{name} = {value_str}" - new_body = section_body[:key_match.start()] + new_line + section_body[key_match.end():] + new_body = section_body[:key_match.start()] + new_line + section_body[end:] return text[:body_start] + new_body + text[body_end:] else: insert = f"{name} = {value_str}\n" @@ -402,7 +441,11 @@ def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: if not key_match: return None - new_body = section_body[:key_match.start()] + section_body[key_match.end():] + end = _extend_multiline_value(section_body, key_match.end()) + # Consume trailing newline after multiline value + if section_body[end:end + 1] == "\n": + end += 1 + new_body = section_body[:key_match.start()] + section_body[end:] remaining_keys = [l for l in new_body.strip().split("\n") if l.strip()] if not remaining_keys: header_match = None diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 2bec6406..82dc504d 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -891,7 +891,76 @@ def test_set_indented_preserves_other_sections(self, tmp_path, capsys): assert after.count("target_density") == 1 -class TestMalformedCliProvenance: +class TestMultilineTomlValues: + """Scoped TOML edit must handle multiline array values.""" + + def test_set_replaces_multiline_array(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "floorplan.core_margin", "[4, 4]", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "2," not in after + assert after.count("core_margin") == 1 + assert "[4, 4]" in after + + def test_unset_removes_multiline_array(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "unset", "floorplan.core_margin", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "core_margin" not in after + + def test_set_multiline_then_show(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "floorplan.core_margin", "[4, 4]", "--project", project_dir]) + capsys.readouterr() + + rc = cli_main.run(["param", "show", "floorplan.core_margin", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert data["records"][0]["value"] == [4, 4] + + def test_set_preserves_adjacent_key_after_multiline(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + toml_path = os.path.join(project_dir, "ecc.toml") + with open(toml_path) as f: + content = f.read() + content += '\n[params.floorplan]\ncore_margin = [\n 2,\n 2,\n]\n core_util = 0.5\n' + with open(toml_path, "w") as f: + f.write(content) + + cli_main.run(["param", "set", "floorplan.core_margin", "[4, 4]", "--project", project_dir]) + capsys.readouterr() + + with open(toml_path) as f: + after = f.read() + assert "core_util = 0.5" in after + assert after.count("core_margin") == 1 """config --resolved must error on malformed/invalid CLI provenance.""" def _setup_run_dir(self, project_dir): From 6c716e21df785d6e70ac7ba6375217f0bb6b0d36 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 21:58:09 +0800 Subject: [PATCH 074/104] fix(cli): seed design frequency in config --resolved param records --- chipcompiler/cli/config_view.py | 5 ++++- test/cli/test_cli_params.py | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index 032b2468..e8d28593 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -101,8 +101,11 @@ def build_project_config_items(project_dir: str, run_dir: str, cli_provenance, prov_error = _load_cli_provenance(run_dir) if prov_error: return [{"kind": "error", "status": "invalid_config", "reason": prov_error}], 1 + toml_overrides = dict(cfg.params_overrides) + if "design.frequency_mhz" not in toml_overrides and cfg.design_frequency_mhz > 0: + toml_overrides["design.frequency_mhz"] = cfg.design_frequency_mhz resolved_params, _ = resolve_parameters( - toml_overrides=cfg.params_overrides, + toml_overrides=toml_overrides, cli_overrides=cli_provenance, ) for rp in resolved_params: diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 82dc504d..b8ab41ab 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -378,6 +378,25 @@ def test_config_resolved_shows_toml_source(self, tmp_path, monkeypatch, capsys): assert density["value"] == 0.65 assert density["source"] == "ecc.toml" + def test_config_resolved_seeds_design_frequency(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path, freq=200.0) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": []}, f) + + rc = cli_main.run(["config", "--resolved", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + param_records = [r for r in data["records"] if r.get("kind") == "param"] + freq = next(r for r in param_records if r["key"] == "design.frequency_mhz") + assert freq["value"] == 200.0 + class TestTomlValidationErrors: def _create_project_with_invalid_param(self, tmp_path): From 9dc99950da9a627698c526385d50fc192349c00f Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 22:15:20 +0800 Subject: [PATCH 075/104] fix(cli): treat non-dict flow.json as corrupt, recognize flow-only steps in metrics --- chipcompiler/cli/handlers.py | 8 ++++++ chipcompiler/cli/inspect.py | 9 ++++++- test/cli/test_cli_main.py | 48 ++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 010a8daf..94cb5083 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -178,6 +178,7 @@ def metrics(args, ctx: CommandContext) -> CommandResult: _internal_from_token, discover_metrics, discover_step_dirs, + get_flow_step_names, read_metrics, ) @@ -188,6 +189,7 @@ def metrics(args, ctx: CommandContext) -> CommandResult: if not metrics_files: if step_token is not None: step_dirs = discover_step_dirs(ctx.run_dir) + flow_steps = get_flow_step_names(ctx.run_dir) if step_token in step_dirs: return CommandResult.err([{ "metric_step": step_token, @@ -199,6 +201,12 @@ def metrics(args, ctx: CommandContext) -> CommandResult: ), "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), }]) + if step_token in flow_steps: + return CommandResult.err([{ + "metric_step": step_token, + "status": "missing", + "log": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) return CommandResult.err([{ "step": step_token, "status": "unknown_step", diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 48cd9f23..34b18aff 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -34,7 +34,7 @@ def read_flow_json(run_dir: str) -> dict | str | None: try: with open(path) as f: data = json.load(f) - return data if isinstance(data, dict) else None + return data if isinstance(data, dict) else CORRUPT_FLOW_JSON except (json.JSONDecodeError, OSError): return CORRUPT_FLOW_JSON @@ -88,6 +88,13 @@ def discover_step_dirs(run_dir: str) -> dict[str, str]: return result +def get_flow_step_names(run_dir: str) -> set[str]: + flow_data = read_flow_json(run_dir) + if not isinstance(flow_data, dict): + return set() + return {normalize_step_name(s.get("name", "")) for s in _safe_steps(flow_data) if s.get("name")} + + def _list_files(directory: str) -> list[str]: if not os.path.isdir(directory): return [] diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 4ce94305..8c07959d 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1538,3 +1538,51 @@ def test_errors_jsonl_still_full_records(self, tmp_path, capsys): assert objects[1]["kind"] == "error" assert "\x1b[" not in capsys.readouterr().out + +class TestCorruptFlowJson: + """Non-dict flow.json must be reported as corrupt, not missing.""" + + def test_array_flow_json_is_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump([], f) + + rc = cli_main.run(["status", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("status") == "corrupt" + + def test_string_flow_json_is_corrupt(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump("bad", f) + + rc = cli_main.run(["status", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("status") == "corrupt" + + +class TestFlowOnlyStepMetrics: + """Step in flow.json but no step directory should report missing, not unknown.""" + + def test_metrics_flow_only_step_is_missing(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + json.dump({"steps": [{"name": "CTS", "state": "unstart"}]}, f) + + rc = cli_main.run(["metrics", "cts", "--json", "--project", project_dir]) + assert rc == 1 + data = json.loads(capsys.readouterr().out) + assert data["records"][0].get("status") == "missing" + assert data["records"][0].get("status") != "unknown_step" + From b7b9944990d1ed3bf15c98c3e5e4fcfd893e9672 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 22:34:53 +0800 Subject: [PATCH 076/104] refactor(cli): simplify code across 8 CLI modules - Consolidate duplicate type branches in params.py parse_value - Remove unused imports (deepcopy, field, state_severity) - Move deferred imports to top-level (sys, os) - Extract _check_step_artifacts helper in diagnose.py - Dict-based dispatch for param and _render_param_text - List comprehensions replace imperative loops in handlers.py - Simplify get_run_status to single set-based approach - Deduplicate render_log_listing_pretty color/no-color branches - Reuse _maps_to_str in config_view.py --- chipcompiler/cli/config_view.py | 13 ++---- chipcompiler/cli/diagnose.py | 60 +++++++++++++-------------- chipcompiler/cli/handlers.py | 69 ++++++++++++++----------------- chipcompiler/cli/inspect.py | 19 ++++----- chipcompiler/cli/log_view.py | 39 ++++++----------- chipcompiler/cli/main.py | 20 +++++---- chipcompiler/cli/param_handler.py | 13 ++---- chipcompiler/cli/params.py | 42 +++++++------------ 8 files changed, 113 insertions(+), 162 deletions(-) diff --git a/chipcompiler/cli/config_view.py b/chipcompiler/cli/config_view.py index e8d28593..0786c17f 100644 --- a/chipcompiler/cli/config_view.py +++ b/chipcompiler/cli/config_view.py @@ -82,10 +82,7 @@ def build_project_config_items(project_dir: str, run_dir: str, run_dir_rel = os.path.relpath(run_dir, project_dir) except ValueError: run_dir_rel = run_dir - if run_dir_rel.startswith(".."): - run_dir_value = run_dir - else: - run_dir_value = run_dir_rel + run_dir_value = run_dir if run_dir_rel.startswith("..") else run_dir_rel items.append({ "kind": "config", "scope": "project", @@ -108,12 +105,8 @@ def build_project_config_items(project_dir: str, run_dir: str, toml_overrides=toml_overrides, cli_overrides=cli_provenance, ) + from chipcompiler.cli.param_handler import _maps_to_str for rp in resolved_params: - maps_to = rp.schema.maps_to - if isinstance(maps_to, str): - mapping = maps_to - else: - mapping = ", ".join(f"{k}.{v}" for k, v in maps_to.items()) items.append({ "kind": "param", "scope": "project", @@ -121,7 +114,7 @@ def build_project_config_items(project_dir: str, run_dir: str, "value": rp.value, "default": rp.default, "source": rp.source, - "maps_to": mapping, + "maps_to": _maps_to_str(rp.schema.maps_to), "inspect_cmd": disclosure_cmd(f"ecc param show {rp.param}", project), }) diff --git a/chipcompiler/cli/diagnose.py b/chipcompiler/cli/diagnose.py index 2a07a115..879bb2c1 100644 --- a/chipcompiler/cli/diagnose.py +++ b/chipcompiler/cli/diagnose.py @@ -78,6 +78,26 @@ def _make_issue(issue: str, severity: str, run: str, return obj +def _check_step_artifacts( + issues: list[dict], run_dir: str, token: str, step_path: str, + display_run: str, project: str | None, run_id: str | None, +) -> None: + error_count = _count_log_errors(run_dir, token) + if error_count > 0: + issues.append(_make_issue("log_errors", "error", display_run, + step=token, count=error_count, + project=project, run_id=run_id)) + if not _has_metrics(run_dir, token): + issues.append(_make_issue("missing_metrics", "warning", display_run, + step=token, project=project, run_id=run_id)) + if not _has_investigation_files(step_path): + issues.append(_make_issue("missing_artifacts", "warning", display_run, + step=token, project=project, run_id=run_id)) + if not _has_config_files(step_path): + issues.append(_make_issue("config_unavailable", "info", display_run, + step=token, project=project, run_id=run_id)) + + def build_diagnose_issues(run_dir: str, step_token: str | None = None, project: str | None = None, run_id: str | None = None) -> tuple[list[dict], int]: @@ -115,6 +135,7 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, issues.append(_make_issue("unknown_step", "error", display_run, step=step_token, project=project, run_id=run_id)) return issues, 1 + for s in steps: token = normalize_step_name(s.get("name", "")) if step_token is not None and token != step_token: @@ -139,23 +160,10 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, project=project, run_id=run_id)) if token in step_dirs: - error_count = _count_log_errors(run_dir, token) - if error_count > 0: - issues.append(_make_issue("log_errors", "error", display_run, - step=token, count=error_count, - project=project, run_id=run_id)) - - if not _has_metrics(run_dir, token): - issues.append(_make_issue("missing_metrics", "warning", display_run, - step=token, project=project, run_id=run_id)) - - if not _has_investigation_files(step_dirs[token]): - issues.append(_make_issue("missing_artifacts", "warning", display_run, - step=token, project=project, run_id=run_id)) - - if not _has_config_files(step_dirs[token]): - issues.append(_make_issue("config_unavailable", "info", display_run, - step=token, project=project, run_id=run_id)) + _check_step_artifacts( + issues, run_dir, token, step_dirs[token], + display_run, project, run_id, + ) else: issues.append(_make_issue("missing_metrics", "warning", display_run, step=token, project=project, run_id=run_id)) @@ -168,20 +176,10 @@ def build_diagnose_issues(run_dir: str, step_token: str | None = None, if step_token is not None: dir_only_tokens &= {step_token} for token in sorted(dir_only_tokens): - error_count = _count_log_errors(run_dir, token) - if error_count > 0: - issues.append(_make_issue("log_errors", "error", display_run, - step=token, count=error_count, - project=project, run_id=run_id)) - if not _has_metrics(run_dir, token): - issues.append(_make_issue("missing_metrics", "warning", display_run, - step=token, project=project, run_id=run_id)) - if not _has_investigation_files(step_dirs[token]): - issues.append(_make_issue("missing_artifacts", "warning", display_run, - step=token, project=project, run_id=run_id)) - if not _has_config_files(step_dirs[token]): - issues.append(_make_issue("config_unavailable", "info", display_run, - step=token, project=project, run_id=run_id)) + _check_step_artifacts( + issues, run_dir, token, step_dirs[token], + display_run, project, run_id, + ) has_error = any(i.get("severity") == "error" for i in issues) return issues, 1 if has_error else 0 diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 94cb5083..fdd6203c 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -22,17 +22,17 @@ def param(args, ctx: CommandContext) -> CommandResult: ) subcmd = getattr(args, "param_command", None) - if subcmd == "list": - return param_list(args, ctx) - if subcmd == "show": - return param_show(args, ctx) - if subcmd == "set": - return param_set(args, ctx) - if subcmd == "unset": - return param_unset(args, ctx) - if subcmd == "diff": - return param_diff(args, ctx) - return CommandResult.err([error_record("missing_subcommand")], exit_code=1) + handlers = { + "list": param_list, + "show": param_show, + "set": param_set, + "unset": param_unset, + "diff": param_diff, + } + handler = handlers.get(subcmd) + if handler is None: + return CommandResult.err([error_record("missing_subcommand")], exit_code=1) + return handler(args, ctx) def status(args, ctx: CommandContext) -> CommandResult: @@ -496,16 +496,13 @@ def check(args, ctx: CommandContext) -> CommandResult: errors = validate_project_config(cfg) if errors: - records = [] - for err in errors: - records.append({ - "check": "config", - "status": "fail", - "reason": err, - "source": "ecc.toml", - "inspect": disclosure_cmd("ecc check --json", project), - }) - return CommandResult.err(records) + return CommandResult.err([{ + "check": "config", + "status": "fail", + "reason": err, + "source": "ecc.toml", + "inspect": disclosure_cmd("ecc check --json", project), + } for err in errors]) records = [{ "project": cfg.design_name, @@ -554,14 +551,11 @@ def run(args, ctx: CommandContext) -> CommandResult: cfg = load_project_config(config_path) errors = validate_project_config(cfg) if errors: - records = [] - for err in errors: - records.append({ - "kind": "error", - "error": "config_error", - "reason": err, - }) - return CommandResult.err(records) + return CommandResult.err([{ + "kind": "error", + "error": "config_error", + "reason": err, + } for err in errors]) # Parse and validate --set overrides before workspace creation cli_overrides = {} @@ -570,14 +564,11 @@ def run(args, ctx: CommandContext) -> CommandResult: from chipcompiler.cli.params import parse_cli_overrides cli_overrides, set_errors = parse_cli_overrides(raw_sets) if set_errors: - records = [] - for err in set_errors: - records.append({ - "kind": "error", - "error": "invalid_parameter", - "reason": err, - }) - return CommandResult.err(records) + return CommandResult.err([{ + "kind": "error", + "error": "invalid_parameter", + "reason": err, + } for err in set_errors]) run_dir = os.path.join(project_dir, "runs", "default") flow_json = os.path.join(run_dir, "home", "flow.json") @@ -647,11 +638,11 @@ def run(args, ctx: CommandContext) -> CommandResult: # Persist CLI parameter provenance for config --resolved inspection if cli_overrides: - import json as _json + import json provenance_path = os.path.join(run_dir, "home", "cli-param-overrides.json") os.makedirs(os.path.dirname(provenance_path), exist_ok=True) with open(provenance_path, "w") as _f: - _json.dump(cli_overrides, _f) + json.dump(cli_overrides, _f) try: engine_flow = EngineFlow(workspace=workspace) diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index 34b18aff..e059f17e 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -50,17 +50,16 @@ def get_run_status(flow_data: dict) -> str: steps = _safe_steps(flow_data) if not steps: return "unstart" - for step in steps: - state = normalize_state(step.get("state", "")) - if state in ("ongoing", "pending"): - return "ongoing" - if state in ("incomplete", "invalid"): - return "failed" - all_success = all(normalize_state(s.get("state", "")) == "success" for s in steps) - if all_success: + states = {normalize_state(s.get("state", "")) for s in steps} + if states & {"ongoing", "pending"}: + return "ongoing" + if states & {"incomplete", "invalid"}: + return "failed" + if states == {"success"}: return "success" - all_unstart = all(normalize_state(s.get("state", "")) == "unstart" for s in steps) - return "unstart" if all_unstart else "failed" + if states == {"unstart"}: + return "unstart" + return "failed" ERROR_PATTERNS = re.compile(r"(error|failed|traceback)", re.IGNORECASE) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index 34c31ff2..fbe32777 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -1,5 +1,6 @@ import enum import re +import sys class LineKind(enum.Enum): @@ -137,16 +138,13 @@ def render_log_pretty( file=None, color: bool = True, ) -> None: - import sys target = file or sys.stdout annotated = annotate_log_lines(lines) - if color: - target.write(f"{_BOLD}[log]{_RESET} step={step}\n") - target.write(f" {_DIM}source:{_RESET} {source}\n") - else: - target.write(f"[log] step={step}\n") - target.write(f" source: {source}\n") + log_tag = f"{_BOLD}[log]{_RESET}" if color else "[log]" + source_label = f" {_DIM}source:{_RESET}" if color else " source:" + target.write(f"{log_tag} step={step}\n") + target.write(f"{source_label} {source}\n") for ll in annotated: label = _KIND_LABEL[ll.kind] @@ -156,10 +154,8 @@ def render_log_pretty( else: target.write(f" {label} {ll.text}\n") - if color: - target.write(f" {_DIM}inspect:{_RESET} {inspect_cmd}\n") - else: - target.write(f" inspect: {inspect_cmd}\n") + inspect_label = f" {_DIM}inspect:{_RESET}" if color else " inspect:" + target.write(f"{inspect_label} {inspect_cmd}\n") def _format_value(value) -> str: @@ -184,7 +180,6 @@ def render_log_plain( inspect_cmd: str, file=None, ) -> None: - import sys target = file or sys.stdout records = build_log_records(step, source, lines, inspect_cmd) for rec in records: @@ -192,7 +187,6 @@ def render_log_plain( def render_log_records_plain(records, file=None) -> None: - import sys target = file or sys.stdout for rec in records: _render_plain_record(rec, target) @@ -203,7 +197,6 @@ def render_log_listing_pretty( file=None, color: bool = True, ) -> None: - import sys target = file or sys.stdout if color: @@ -217,16 +210,10 @@ def render_log_listing_pretty( inspect = rec.get("inspect_cmd") or rec.get("inspect", "") if step: - if color: - target.write(f" {_CYAN}{step}{_RESET} {source}\n") - target.write(f" {_DIM}inspect:{_RESET} {inspect}\n") - else: - target.write(f" {step} {source}\n") - target.write(f" inspect: {inspect}\n") + step_label = f" {_CYAN}{step}{_RESET}" if color else f" {step}" else: - if color: - target.write(f" {source}\n") - target.write(f" {_DIM}inspect:{_RESET} {inspect}\n") - else: - target.write(f" {source}\n") - target.write(f" inspect: {inspect}\n") + step_label = "" + + target.write(f"{step_label} {source}\n") + inspect_label = f" {_DIM}inspect:{_RESET}" if color else " inspect:" + target.write(f"{inspect_label} {inspect}\n") diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 6208402e..df0f926a 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -1,4 +1,5 @@ import argparse +import os import sys from collections.abc import Sequence @@ -147,21 +148,22 @@ def _render_param_text(args, result) -> None: render_result(result, OutputMode.PLAIN) return + renderers = { + "list": render_param_list_text, + "show": render_param_show_text, + "set": render_param_set_text, + "unset": render_param_set_text, + "diff": render_param_diff_text, + } subcmd = getattr(args, "param_command", None) - if subcmd == "list": - render_param_list_text(result.records) - elif subcmd == "show": - render_param_show_text(result.records) - elif subcmd in ("set", "unset"): - render_param_set_text(result.records) - elif subcmd == "diff": - render_param_diff_text(result.records) + renderer = renderers.get(subcmd) + if renderer: + renderer(result.records) else: render_result(result, OutputMode.PLAIN) def _should_colorize(): - import os if not sys.stdout.isatty(): return False if os.environ.get("NO_COLOR") is not None: diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 05472bb4..2f785d71 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -2,6 +2,7 @@ import os import re +import sys from chipcompiler.cli.output import disclosure_cmd from chipcompiler.cli.params import ( @@ -198,7 +199,6 @@ def param_diff(args, ctx: CommandContext) -> CommandResult: def render_param_result(result, mode: OutputMode, file=None) -> bool: """Render param-specific output. Returns True if handled, False otherwise.""" - import sys target = file or sys.stdout if mode == OutputMode.JSON: @@ -218,7 +218,6 @@ def render_param_result(result, mode: OutputMode, file=None) -> bool: def render_param_list_text(records, file=None): - import sys target = file or sys.stdout groups: dict[str, list] = {} for r in records: @@ -237,7 +236,6 @@ def render_param_list_text(records, file=None): def render_param_show_text(records, file=None): - import sys target = file or sys.stdout r = records[0] @@ -252,7 +250,6 @@ def render_param_show_text(records, file=None): def render_param_set_text(records, file=None): - import sys target = file or sys.stdout r = records[0] status = r.get("status", "") @@ -268,7 +265,6 @@ def render_param_set_text(records, file=None): def render_param_diff_text(records, file=None): - import sys target = file or sys.stdout if len(records) == 1 and records[0].get("diff_status") == "clean": print(" No overrides.", file=target) @@ -294,11 +290,12 @@ def _find_config_path(project_dir: str) -> str | None: def _load_toml_overrides(project_dir: str) -> tuple[dict[str, object], list[str]]: + from chipcompiler.cli.config import load_project_config + config_path = _find_config_path(project_dir) if config_path is None: return {}, [] - from chipcompiler.cli.config import load_project_config cfg = load_project_config(config_path) errors = list(getattr(cfg, "_param_errors", [])) toml_error = getattr(cfg, "_toml_error", None) @@ -465,9 +462,7 @@ def _remove_scoped_param_key(text: str, group: str, name: str) -> str | None: def _format_toml_value(val: object) -> str: if isinstance(val, bool): return "true" if val else "false" - if isinstance(val, int): - return str(val) - if isinstance(val, float): + if isinstance(val, (int, float)): return str(val) if isinstance(val, str): escaped = val.replace("\\", "\\\\").replace('"', '\\"') diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index b1a730f2..c2fcf2c3 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -1,7 +1,6 @@ from __future__ import annotations -from copy import deepcopy -from dataclasses import dataclass, field +from dataclasses import dataclass @dataclass(frozen=True) @@ -192,11 +191,7 @@ def is_known_key(key: str) -> bool: def validate_schema_record(schema: ParamSchema) -> list[str]: - errors: list[str] = [] - for f in _REQUIRED_FIELDS: - if not getattr(schema, f, None): - errors.append(f"missing required field: {f}") - return errors + return [f"missing required field: {f}" for f in _REQUIRED_FIELDS if not getattr(schema, f, None)] # --------------------------------------------------------------------------- @@ -229,31 +224,22 @@ def parse_value(raw: str, schema: ParamSchema) -> object: if ptype == "str": return raw - if ptype == "list[int]": - stripped = raw.strip("[]() ") - if not stripped: - return [] - parts = [p.strip() for p in stripped.split(",")] - try: - return [int(p) for p in parts if p] - except ValueError: - raise ValueError(f"expected list[int] for {schema.param}, got '{raw}'") - - if ptype == "list[float]": + if ptype in ("list[int]", "list[float]", "list[str]"): stripped = raw.strip("[]() ") if not stripped: return [] parts = [p.strip() for p in stripped.split(",")] - try: - return [float(p) for p in parts if p] - except ValueError: - raise ValueError(f"expected list[float] for {schema.param}, got '{raw}'") - - if ptype == "list[str]": - stripped = raw.strip("[]() ") - if not stripped: - return [] - return [p.strip() for p in stripped.split(",") if p.strip()] + if ptype == "list[int]": + try: + return [int(p) for p in parts if p] + except ValueError: + raise ValueError(f"expected list[int] for {schema.param}, got '{raw}'") + if ptype == "list[float]": + try: + return [float(p) for p in parts if p] + except ValueError: + raise ValueError(f"expected list[float] for {schema.param}, got '{raw}'") + return [p for p in parts if p] raise ValueError(f"unsupported type '{ptype}' for {schema.param}") From 5d23bf0c9558ddcd5e5939db63782f9057c1db34 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 23:28:00 +0800 Subject: [PATCH 077/104] feat(cli): add pretty output rendering for all CLI commands Replace raw key-value default text output with structured pretty blocks using a shared rendering layer. Add --plain flags to init, check, status, metrics, artifacts, config, and diagnose commands for stable machine- readable output. Improve plain key-value rendering with deterministic quoting/escaping. JSON and JSONL output modes remain unchanged. --- chipcompiler/cli/main.py | 30 ++- chipcompiler/cli/pretty.py | 474 +++++++++++++++++++++++++++++++++++ chipcompiler/cli/render.py | 39 ++- test/cli/test_cli_inspect.py | 183 +++++++------- test/cli/test_cli_main.py | 76 +++--- test/cli/test_pretty.py | 414 ++++++++++++++++++++++++++++++ 6 files changed, 1077 insertions(+), 139 deletions(-) create mode 100644 chipcompiler/cli/pretty.py create mode 100644 test/cli/test_pretty.py diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index df0f926a..532bd755 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -18,11 +18,13 @@ def build_parser() -> argparse.ArgumentParser: # ecc init init_parser = subparsers.add_parser("init", help="Create a new project skeleton") init_parser.add_argument("name", help="Project name") + init_parser.add_argument("--plain", action="store_true", help="Plain key-value output") # ecc check check_parser = subparsers.add_parser("check", help="Validate project configuration") _add_project_arg(check_parser) check_parser.add_argument("--json", action="store_true", help="JSON output") + check_parser.add_argument("--plain", action="store_true", help="Plain key-value output") # ecc run run_parser = subparsers.add_parser("run", help="Execute the complete flow") @@ -37,6 +39,7 @@ def build_parser() -> argparse.ArgumentParser: _add_project_arg(status_parser) status_parser.add_argument("--json", action="store_true", help="JSON output") status_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + status_parser.add_argument("--plain", action="store_true", help="Plain key-value output") status_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") @@ -58,6 +61,7 @@ def build_parser() -> argparse.ArgumentParser: metrics_parser.add_argument("step", nargs="?", default=None, help="Step name") metrics_parser.add_argument("--json", action="store_true", help="JSON output") metrics_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + metrics_parser.add_argument("--plain", action="store_true", help="Plain key-value output") metrics_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") @@ -67,6 +71,7 @@ def build_parser() -> argparse.ArgumentParser: artifacts_parser.add_argument("step", nargs="?", default=None, help="Step name") artifacts_parser.add_argument("--json", action="store_true", help="JSON output") artifacts_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + artifacts_parser.add_argument("--plain", action="store_true", help="Plain key-value output") artifacts_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") @@ -78,6 +83,7 @@ def build_parser() -> argparse.ArgumentParser: help="Show resolved configuration") config_parser.add_argument("--json", action="store_true", help="JSON output") config_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + config_parser.add_argument("--plain", action="store_true", help="Plain key-value output") config_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") @@ -87,6 +93,7 @@ def build_parser() -> argparse.ArgumentParser: diagnose_parser.add_argument("step", nargs="?", default=None, help="Step name") diagnose_parser.add_argument("--json", action="store_true", help="JSON output") diagnose_parser.add_argument("--jsonl", action="store_true", help="JSONL output") + diagnose_parser.add_argument("--plain", action="store_true", help="Plain key-value output") diagnose_parser.add_argument("--run-id", default=None, dest="run_id", help="Run workspace selector") @@ -137,15 +144,17 @@ def _add_project_arg(parser: argparse.ArgumentParser) -> None: help="Project directory (default: current directory)") -def _render_param_text(args, result) -> None: +def _render_param_text(args, result, color=True) -> None: from chipcompiler.cli.param_handler import ( render_param_diff_text, render_param_list_text, render_param_set_text, render_param_show_text, ) + from chipcompiler.cli.pretty import render_error + if result.exit_code != 0: - render_result(result, OutputMode.PLAIN) + render_error(result.records, color=color) return renderers = { @@ -173,17 +182,18 @@ def _should_colorize(): return True -def _render_log_text(args, result) -> None: +def _render_log_text(args, result, color=True) -> None: from chipcompiler.cli.log_view import ( render_log_listing_pretty, render_log_pretty, ) + from chipcompiler.cli.pretty import render_error, render_generic_block if getattr(args, "errors", False): print("warning: --errors is deprecated and no longer filters output", file=sys.stderr) if result.exit_code != 0: - render_result(result, OutputMode.PLAIN) + render_error(result.records, color=color) return records = result.records @@ -194,11 +204,9 @@ def _render_log_text(args, result) -> None: # Status/sentinel records (no_logs, empty, etc.) if "log_status" in first or "status" in first: - render_result(result, OutputMode.PLAIN) + render_generic_block(records, color=color, tag="log") return - color = _should_colorize() - # Step mode: records have line_no and kind if "line_no" in first: inspect_cmd = first.get("inspect_cmd", "") @@ -257,14 +265,16 @@ def run(argv: Sequence[str] | None = None) -> int: ctx = build_context(args) result = dispatch(args, ctx) + color = _should_colorize() + if args.command == "param" and ctx.output_mode == OutputMode.TEXT: - _render_param_text(args, result) + _render_param_text(args, result, color=color) elif args.command == "log" and ctx.output_mode == OutputMode.TEXT: - _render_log_text(args, result) + _render_log_text(args, result, color=color) elif args.command == "log" and ctx.output_mode == OutputMode.PLAIN: _render_log_plain(result) else: - render_result(result, ctx.output_mode) + render_result(result, ctx.output_mode, command=args.command, color=color) return result.exit_code diff --git a/chipcompiler/cli/pretty.py b/chipcompiler/cli/pretty.py new file mode 100644 index 00000000..8f64b8f4 --- /dev/null +++ b/chipcompiler/cli/pretty.py @@ -0,0 +1,474 @@ +import os +import sys + +from chipcompiler.cli.types import OutputMode + +# --- ANSI constants --- + +BOLD = "\x1b[1m" +DIM = "\x1b[2m" +RED = "\x1b[31m" +GREEN = "\x1b[32m" +YELLOW = "\x1b[33m" +BLUE = "\x1b[34m" +CYAN = "\x1b[36m" +RESET = "\x1b[0m" + +# --- Color gating --- + + +def supports_color(file=None, env=None): + if env is None: + env = os.environ + target = file or sys.stdout + if not hasattr(target, "isatty") or not target.isatty(): + return False + if env.get("NO_COLOR") is not None: + return False + if env.get("TERM", "") == "dumb": + return False + return True + + +def style(text, code, enabled=True): + if not enabled: + return text + return f"{code}{text}{RESET}" + + +# --- Display key normalization --- + + +def display_key(key): + k = key[:-4] if key.endswith("_cmd") else key + return k.replace("_", " ") + + +# --- Value formatting --- + + +def format_value(value): + s = str(value) + if any(c.isspace() for c in s) or "\\" in s or '"' in s or "=" in s: + escaped = s.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + return s + + +# --- Plain key-value formatting (stable, parseable) --- + + +def format_plain_value(value): + s = str(value) + if any(c.isspace() for c in s) or "\\" in s or '"' in s or "=" in s: + escaped = s.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + return s + + +# --- Pretty block rendering --- + + +def render_header(tag, color=True): + return style(f"[{tag}]", BOLD, color) + + +def render_field(label, value, color=True, dim_label=False): + if dim_label: + return f" {style(label + ':', DIM, color)} {value}" + return f" {label}: {value}" + + +def render_generic_block(records, file=None, color=True, tag=None): + """Render records as a generic pretty block.""" + target = file or sys.stdout + first = records[0] if records else {} + + header_tag = tag or _infer_tag(first) + target.write(f"{render_header(header_tag, color)}\n") + + for record in records: + for key, value in record.items(): + if value is None: + continue + dk = display_key(key) + target.write(f" {dk}: {value}\n") + + target.write("\n") + + +def _infer_tag(record): + for key in ("status", "run", "project", "kind"): + if key in record: + return key + return "result" + + +# --- Status-specific color helpers --- + +_STATUS_COLORS = { + "success": GREEN, + "clean": GREEN, + "checked": GREEN, + "created": GREEN, + "pass": GREEN, + "set": GREEN, + "failed": RED, + "fail": RED, + "missing": RED, + "corrupt": RED, + "error": RED, + "unknown_step": RED, + "invalid": RED, + "warning": YELLOW, + "incomplete": YELLOW, + "ongoing": YELLOW, + "pending": YELLOW, +} + + +def status_style(status_text, color=True): + code = _STATUS_COLORS.get(status_text) + if code and color: + return style(status_text, code, True) + return status_text + + +# --- Command-specific pretty renderers --- + + +def render_init(records, file=None, color=True): + target = file or sys.stdout + r = records[0] + target.write(f"{render_header('init', color)}\n") + target.write(f" project: {r.get('project', '')}\n") + target.write(f" status: {status_style(r.get('status', ''), color)}\n") + target.write(f" path: {r.get('path', '')}\n") + _render_disclosure_fields(target, r, color) + target.write("\n") + + +def render_check(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error" or first.get("status") == "fail": + target.write(f"{render_header('check', color)}\n") + for r in records: + reason = r.get("reason", r.get("error", "")) + target.write(f" {status_style('fail', color)} {reason}\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('check', color)}\n") + r = records[0] + target.write(f" project: {r.get('project', '')}\n") + target.write(f" status: {status_style(r.get('status', ''), color)}\n") + target.write(f" config: {r.get('config', '')}\n") + _render_disclosure_fields(target, r, color) + + for r in records[1:]: + label = r.get("check", "") + st = r.get("status", "") + target.write(f" {label}: {status_style(st, color)}\n") + if r.get("path"): + target.write(f" path: {r['path']}\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_run_summary(records, file=None, color=True): + target = file or sys.stdout + r = records[0] + st = r.get("status", "") + tag = "run" + target.write(f"{render_header(tag, color)}\n") + target.write(f" run: {r.get('run', '')}\n") + target.write(f" status: {status_style(st, color)}\n") + target.write(f" workspace: {r.get('workspace', '')}\n") + _render_disclosure_fields(target, r, color) + target.write("\n") + + +def render_status(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error": + render_generic_block(records, file=file, color=color, tag="status") + return + + st = first.get("status", "") + target.write(f"{render_header('status', color)}\n") + target.write(f" run: {first.get('run', '')}\n") + target.write(f" status: {status_style(st, color)}\n") + if first.get("workspace"): + target.write(f" workspace: {first['workspace']}\n") + _render_disclosure_fields(target, first, color) + + step_records = [r for r in records if "step" in r] + if step_records: + target.write("\n") + target.write(f" {style('steps', CYAN if color else None, color)}:\n" if color else " steps:\n") + for r in step_records: + step = r.get("step", "") + tool = r.get("tool", "") + st = r.get("status", "") + runtime = r.get("runtime", "") or "" + step_label = style(step, CYAN, color) if color else step + status_label = status_style(st, color) + line = f" {step_label} ({tool}) {status_label}" + if runtime: + line += f" {runtime}" + target.write(f"{line}\n") + _render_step_disclosure(target, r, color) + + target.write("\n") + + +def render_metrics(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error" or first.get("status") in ("missing", "unknown_step", "corrupt"): + render_generic_block(records, file=file, color=color, tag="metrics") + return + + if first.get("metrics_status") == "none": + target.write(f"{render_header('metrics', color)}\n") + target.write(f" No metrics available.\n") + if first.get("inspect_cmd"): + target.write(render_field("inspect", first["inspect_cmd"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('metrics', color)}\n") + + current_step = None + for r in records: + step = r.get("step", r.get("metric_step", "")) + if step != current_step: + if current_step is not None: + target.write("\n") + current_step = step + target.write(f" {style(step, CYAN, color) if color else step}:\n") + + metric = r.get("metric", "") + value = r.get("value", "") + if metric: + target.write(f" {metric}: {value}\n") + elif r.get("status"): + target.write(f" {status_style(r['status'], color)}\n") + if r.get("source"): + target.write(render_field("source", r["source"], color, dim_label=True) + "\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_artifacts(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error" or first.get("status") in ("unknown_step",): + render_generic_block(records, file=file, color=color, tag="artifacts") + return + + if first.get("artifacts_status") == "none": + target.write(f"{render_header('artifacts', color)}\n") + target.write(f" No artifacts found.\n") + if first.get("inspect_cmd"): + target.write(render_field("inspect", first["inspect_cmd"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('artifacts', color)}\n") + + current_step = None + for r in records: + step = r.get("step", "") + if step != current_step: + if current_step is not None: + target.write("\n") + current_step = step + target.write(f" {style(step, CYAN, color) if color else step}:\n") + + artifact = r.get("artifact", "") + role = r.get("role", "") + path = r.get("path", "") + target.write(f" {artifact} ({role})\n") + if path: + target.write(render_field("path", path, color, dim_label=True) + "\n") + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + if r.get("metrics"): + target.write(render_field("metrics", r["metrics"], color, dim_label=True) + "\n") + if r.get("config"): + target.write(render_field("config", r["config"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_config(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error": + render_generic_block(records, file=file, color=color, tag="config") + return + + if first.get("config_status") == "none": + target.write(f"{render_header('config', color)}\n") + msg = f" No configuration for step {first.get('step', '')}.\n" if first.get("step") else " No configuration found.\n" + target.write(msg) + if first.get("artifacts"): + target.write(render_field("artifacts", first["artifacts"], color, dim_label=True) + "\n") + target.write("\n") + return + + target.write(f"{render_header('config', color)}\n") + + current_scope = None + for r in records: + scope = r.get("scope", "") + if scope != current_scope: + if current_scope is not None: + target.write("\n") + current_scope = scope + scope_label = style(scope, CYAN, color) if color else scope + target.write(f" {scope_label}:\n") + + config = r.get("config", r.get("key", "")) + value = r.get("value", "") + source = r.get("source", "") + step = r.get("step", "") + + if r.get("kind") == "param": + target.write(f" {config}: {value}") + if source and source != "default": + target.write(f" ({source})") + target.write("\n") + elif scope == "step": + target.write(f" {config} ({r.get('role', '')})\n") + target.write(f" path: {r.get('path', '')}\n") + else: + target.write(f" {config}: {value}") + if source: + target.write(f" ({source})") + target.write("\n") + + if r.get("inspect"): + target.write(render_field("inspect", r["inspect"], color, dim_label=True) + "\n") + + target.write("\n") + + +def render_diagnose(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + + if first.get("kind") == "error": + render_generic_block(records, file=file, color=color, tag="diagnose") + return + + if first.get("status") == "clean": + target.write(f"{render_header('diagnose', color)}\n") + target.write(f" {status_style('clean', color)} No issues found.\n") + _render_disclosure_fields(target, first, color) + target.write("\n") + return + + target.write(f"{render_header('diagnose', color)}\n") + + by_severity = {} + for r in records: + sev = r.get("severity", "info") + by_severity.setdefault(sev, []).append(r) + + for severity in ("error", "warning", "info"): + issues = by_severity.get(severity, []) + if not issues: + continue + sev_label = status_style(severity, color) + target.write(f" {sev_label}:\n") + for r in issues: + issue = r.get("issue", "") + target.write(f" {issue}\n") + if r.get("evidence"): + target.write(f" evidence: {r['evidence']}\n") + if r.get("step"): + target.write(f" step: {r['step']}\n") + if r.get("count"): + target.write(f" count: {r['count']}\n") + _render_step_disclosure(target, r, color, indent=" ") + + target.write("\n") + + +def render_error(records, file=None, color=True): + target = file or sys.stdout + first = records[0] + error = first.get("error", first.get("kind", "error")) + reason = first.get("reason", "") + target.write(f"{render_header('error', color)}\n") + target.write(f" {status_style(error, color)}") + if reason: + target.write(f" {reason}") + target.write("\n") + for key, value in first.items(): + if key in ("kind", "error", "reason"): + continue + if value is None: + continue + dk = display_key(key) + target.write(render_field(dk, value, color, dim_label=True) + "\n") + target.write("\n") + + +# --- Internal helpers --- + + +def _render_disclosure_fields(target, record, color): + for key in sorted(record.keys()): + if not key.endswith("_cmd") and key not in ("inspect", "check", "run", + "start_cmd", "log", "config", + "artifacts", "metrics"): + continue + value = record.get(key) + if not value: + continue + label = display_key(key) + target.write(render_field(label, value, color, dim_label=True) + "\n") + + +def _render_step_disclosure(target, record, color, indent=" "): + for key in ("metrics_cmd", "log_cmd", "log", "artifacts", "config", + "start_cmd", "inspect"): + value = record.get(key) + if not value: + continue + label = display_key(key) + dim_label = style(f"{label}:", DIM, color) if color else f"{label}:" + target.write(f"{indent}{dim_label} {value}\n") + + +# --- Renderer registry --- + + +def get_pretty_renderer(command): + registry = { + "init": render_init, + "check": render_check, + "run": render_run_summary, + "status": render_status, + "metrics": render_metrics, + "artifacts": render_artifacts, + "config": render_config, + "diagnose": render_diagnose, + } + return registry.get(command) diff --git a/chipcompiler/cli/render.py b/chipcompiler/cli/render.py index cc443f4d..b13607da 100644 --- a/chipcompiler/cli/render.py +++ b/chipcompiler/cli/render.py @@ -38,16 +38,51 @@ def render_plain(records: tuple[dict, ...], file=None) -> None: for key, value in record.items(): if value is None: continue - parts.append(f"{key}={value}") + parts.append(f"{key}={_plain_value(value)}") print(" ".join(parts), file=target) -def render_result(result: CommandResult, mode: OutputMode, file=None) -> None: +def _plain_value(value) -> str: + s = str(value) + if any(c.isspace() for c in s) or "\\" in s or '"' in s or "=" in s: + escaped = s.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + return s + + +def render_result(result: CommandResult, mode: OutputMode, file=None, + command=None, color=True) -> None: if mode == OutputMode.JSON: render_json(result, file=file) elif mode == OutputMode.JSONL: render_jsonl(result, file=file) elif mode == OutputMode.PLAIN: render_plain(result.records, file=file) + elif mode == OutputMode.TEXT: + _render_pretty(result, file=file, command=command, color=color) else: render_text(result.records, file=file) + + +def _render_pretty(result: CommandResult, file=None, command=None, color=True) -> None: + from chipcompiler.cli.pretty import ( + get_pretty_renderer, + render_error, + render_generic_block, + ) + + records = result.records + if not records: + return + + first = records[0] + + if result.exit_code != 0 and first.get("kind") == "error": + render_error(records, file=file, color=color) + return + + renderer = get_pretty_renderer(command) if command else None + if renderer: + renderer(records, file=file, color=color) + else: + render_generic_block(records, file=file, color=color) diff --git a/test/cli/test_cli_inspect.py b/test/cli/test_cli_inspect.py index a5c4565e..6041927b 100644 --- a/test/cli/test_cli_inspect.py +++ b/test/cli/test_cli_inspect.py @@ -74,7 +74,19 @@ def _create_step_dir(run_dir, step_name, tool, subdirs=None, files=None): def _has_disclosure(line: str) -> bool: - return '"ecc ' in line or "=ecc " in line + return bool( + '"ecc ' in line + or "=ecc " in line + or " ecc check" in line + or " ecc run" in line + or " ecc status" in line + or " ecc log" in line + or " ecc metrics" in line + or " ecc artifacts" in line + or " ecc config" in line + or " ecc diagnose" in line + or " ecc param" in line + ) def _mock_pdk_validation(monkeypatch): @@ -98,7 +110,7 @@ def test_status_default_run_id(self, tmp_path, capsys): rc = cli_main.run(["status", "--run-id", "default", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "run=default" in out + assert "default" in out def test_status_simple_token_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -109,7 +121,7 @@ def test_status_simple_token_run_id(self, tmp_path, capsys): rc = cli_main.run(["status", "--run-id", "run_004", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "run=run_004" in out + assert "run_004" in out def test_status_relative_path_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -121,7 +133,7 @@ def test_status_relative_path_run_id(self, tmp_path, capsys): ) assert rc == 0 out = capsys.readouterr().out - assert "run=sweeps/sweep_001/run_004" in out + assert "sweeps/sweep_001/run_004" in out def test_status_absolute_path_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -133,7 +145,7 @@ def test_status_absolute_path_run_id(self, tmp_path, capsys): ) assert rc == 0 out = capsys.readouterr().out - assert "run=" in out + assert "run:" in out def test_status_missing_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -141,7 +153,7 @@ def test_status_missing_run_id(self, tmp_path, capsys): rc = cli_main.run(["status", "--run-id", "nonexistent", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "status=missing" in out + assert "missing" in out def test_log_preserves_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -191,9 +203,9 @@ def test_artifacts_all_steps(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=cts" in out - assert "role=output" in out - assert "role=log" in out + assert "cts" in out + assert "(output)" in out + assert "(log)" in out def test_artifacts_single_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -205,8 +217,8 @@ def test_artifacts_single_step(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=cts" in out - assert "role=output" in out + assert "cts" in out + assert "(output)" in out def test_artifacts_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -216,7 +228,7 @@ def test_artifacts_unknown_step(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "nonexistent", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "status=unknown_step" in out + assert "unknown_step" in out def test_artifacts_empty_known_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -227,7 +239,7 @@ def test_artifacts_empty_known_step(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "artifacts_status=none" in out + assert "No artifacts found" in out def test_artifacts_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -269,7 +281,7 @@ def test_artifacts_with_run_id(self, tmp_path, capsys): ) assert rc == 0 out = capsys.readouterr().out - assert "step=cts" in out + assert "cts" in out def test_artifacts_derives_roles_from_dirs(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -303,10 +315,10 @@ def test_config_resolved_project(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "--resolved", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "config=design.name" in out - assert "scope=project" in out - assert "config=pdk.name" in out - assert "config=run_dir" in out + assert "design.name" in out + assert "project:" in out + assert "pdk.name" in out + assert "run_dir" in out def test_config_resolved_json(self, tmp_path, capsys, monkeypatch): _mock_pdk_validation(monkeypatch) @@ -403,9 +415,9 @@ def test_config_missing_config_text_has_kind_error(self, tmp_path, capsys): rc = cli_main.run(["config", "--resolved", "--project", str(project_dir)]) assert rc == 1 out = capsys.readouterr().out - assert "kind=error" in out - assert "error=missing_config" in out - assert 'inspect="ecc check --project ' in out + assert "[error]" in out + assert "missing_config" in out + assert "ecc check" in out assert str(project_dir) in out def test_config_requires_resolved(self, tmp_path, capsys): @@ -433,8 +445,8 @@ def test_config_step_lists_files(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=cts" in out - assert "scope=step" in out + assert "step:" in out or "cts" in out + assert "step:" in out or "step:" in out assert "cts_default_config.json" in out def test_config_step_json(self, tmp_path, capsys, monkeypatch): @@ -481,8 +493,8 @@ def test_diagnose_missing_run(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=missing_run" in out - assert "severity=error" in out + assert "missing_run" in out + assert "error:" in out def test_diagnose_invalid_flow_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -495,7 +507,7 @@ def test_diagnose_invalid_flow_json(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=invalid_flow_json" in out + assert "invalid_flow_json" in out def test_diagnose_failed_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -510,8 +522,8 @@ def test_diagnose_failed_step(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=failed_step" in out - assert "severity=error" in out + assert "failed_step" in out + assert "error:" in out def test_diagnose_ongoing_step_warning(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -528,8 +540,8 @@ def test_diagnose_ongoing_step_warning(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=ongoing_step" in out - assert "severity=warning" in out + assert "ongoing_step" in out + assert "warning:" in out def test_diagnose_unstarted_step_info(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -546,8 +558,8 @@ def test_diagnose_unstarted_step_info(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=unstarted_step" in out - assert "severity=info" in out + assert "unstarted_step" in out + assert "info:" in out def test_diagnose_log_errors_count(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -564,8 +576,8 @@ def test_diagnose_log_errors_count(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=log_errors" in out - assert "count=2" in out + assert "log_errors" in out + assert "count: 2" in out def test_diagnose_missing_metrics_warning(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -581,8 +593,8 @@ def test_diagnose_missing_metrics_warning(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=missing_metrics" in out - assert "severity=warning" in out + assert "missing_metrics" in out + assert "warning:" in out def test_diagnose_missing_artifacts_warning(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -601,8 +613,8 @@ def test_diagnose_missing_artifacts_warning(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=missing_artifacts" in out - assert "severity=warning" in out + assert "missing_artifacts" in out + assert "warning:" in out def test_diagnose_config_unavailable_info(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -618,8 +630,8 @@ def test_diagnose_config_unavailable_info(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=config_unavailable" in out - assert "severity=info" in out + assert "config_unavailable" in out + assert "info:" in out def test_diagnose_clean_run(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -637,7 +649,7 @@ def test_diagnose_clean_run(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "status=clean" in out + assert "clean" in out def test_diagnose_step_filter(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -657,9 +669,9 @@ def test_diagnose_step_filter(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=failed_step" in out - assert "step=cts" in out - assert "step=synthesis" not in out + assert "failed_step" in out + assert "cts" in out + assert "synthesis" not in out def test_diagnose_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -669,7 +681,7 @@ def test_diagnose_unknown_step(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "nonexistent", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=unknown_step" in out + assert "unknown_step" in out def test_diagnose_no_repair_suggestions(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -734,7 +746,7 @@ def test_diagnose_with_run_id(self, tmp_path, capsys): ) assert rc == 0 out = capsys.readouterr().out - assert "status=clean" in out + assert "clean" in out # =========================================================================== @@ -815,9 +827,7 @@ def test_artifacts_lines_have_disclosure(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_config_resolved_lines_have_disclosure(self, tmp_path, capsys, monkeypatch): _mock_pdk_validation(monkeypatch) @@ -826,9 +836,7 @@ def test_config_resolved_lines_have_disclosure(self, tmp_path, capsys, monkeypat rc = cli_main.run(["config", "--resolved", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_diagnose_lines_have_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -836,9 +844,7 @@ def test_diagnose_lines_have_disclosure(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_phase2_disclosure_preserves_run_id(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -939,7 +945,7 @@ def test_project_relative_run_id_resolves(self, tmp_path, capsys): ) assert rc == 0 out = capsys.readouterr().out - assert "run=sweeps/sweep_001/run_004" in out + assert "sweeps/sweep_001/run_004" in out class TestArtifactPaths: @@ -987,9 +993,9 @@ def test_step_no_config_emits_sentinel_text(self, tmp_path, capsys): rc = cli_main.run(["config", "cts", "--resolved", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=cts" in out - assert "config_status=none" in out - assert "artifacts=" in out + assert "cts" in out + assert "No configuration" in out + assert "artifacts:" in out def test_step_no_config_emits_sentinel_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1015,9 +1021,9 @@ def test_flow_step_without_directory_emits_issues(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=failed_step" in out - assert "step=cts" in out - assert "issue=unknown_step" not in out + assert "failed_step" in out + assert "cts" in out + assert "unknown_step" not in out def test_flow_step_without_dir_reports_missing_artifacts(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1029,9 +1035,9 @@ def test_flow_step_without_dir_reports_missing_artifacts(self, tmp_path, capsys) rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=missing_artifacts" in out - assert "issue=missing_metrics" in out - assert "issue=config_unavailable" in out + assert "missing_artifacts" in out + assert "missing_metrics" in out + assert "config_unavailable" in out class TestConfigRoleDisclosure: @@ -1045,9 +1051,7 @@ def test_config_artifact_has_disclosure(self, tmp_path, capsys): rc = cli_main.run(["artifacts", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) # =========================================================================== @@ -1080,9 +1084,8 @@ def test_run_dir_text_uses_status_command(self, tmp_path, capsys, monkeypatch): rc = cli_main.run(["config", "--resolved", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - run_dir_line = [l for l in out.strip().split("\n") if "config=run_dir" in l][0] - assert "ecc status" in run_dir_line - assert "ecc config --resolved --json" not in run_dir_line + assert "run_dir" in out + assert "ecc status" in out class TestDiagnoseIssueSpecificEvidence: @@ -1102,8 +1105,8 @@ def test_log_errors_uses_log_command(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - log_errors_line = [l for l in out.strip().split("\n") if "issue=log_errors" in l][0] - assert "ecc log cts" in log_errors_line + assert "log_errors" in out + assert "ecc log cts" in out def test_missing_metrics_uses_metrics_command(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1120,8 +1123,8 @@ def test_missing_metrics_uses_metrics_command(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - metrics_line = [l for l in out.strip().split("\n") if "issue=missing_metrics" in l][0] - assert "ecc metrics cts --json" in metrics_line + assert "missing_metrics" in out + assert "ecc metrics cts" in out def test_missing_artifacts_uses_artifacts_command(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1137,10 +1140,8 @@ def test_missing_artifacts_uses_artifacts_command(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=missing_artifacts" in out - artifacts_lines = [l for l in out.strip().split("\n") if "issue=missing_artifacts" in l] - assert len(artifacts_lines) > 0 - assert "ecc artifacts cts" in artifacts_lines[0] + assert "missing_artifacts" in out + assert "ecc artifacts cts" in out def test_config_unavailable_uses_config_command(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1157,8 +1158,8 @@ def test_config_unavailable_uses_config_command(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - config_line = [l for l in out.strip().split("\n") if "issue=config_unavailable" in l][0] - assert "ecc config cts --resolved" in config_line + assert "config_unavailable" in out + assert "ecc config cts --resolved" in out def test_invalid_flow_json_has_evidence(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1170,8 +1171,8 @@ def test_invalid_flow_json_has_evidence(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=invalid_flow_json" in out - assert "evidence=" in out + assert "invalid_flow_json" in out + assert "evidence:" in out assert "ecc status" in out def test_invalid_flow_json_json_has_evidence(self, tmp_path, capsys): @@ -1208,10 +1209,10 @@ def test_clean_has_status_and_disclosure_commands(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "status=clean" in out - assert "inspect=" in out - assert "artifacts=" in out - assert "config=" in out + assert "clean" in out + assert "inspect:" in out + assert "artifacts:" in out + assert "config:" in out def test_clean_json_has_disclosure_metadata(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1436,8 +1437,8 @@ def test_pending_step_creates_issue(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "issue=pending_step" in out - assert "status=pending" in out + assert "pending_step" in out + assert "pending" in out class TestMissingRunJsonlKind: @@ -1489,6 +1490,6 @@ def test_real_errors_still_detected(self, tmp_path, capsys): rc = cli_main.run(["diagnose", "cts", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "issue=log_errors" in out - assert "count=2" in out + assert "log_errors" in out + assert "count: 2" in out diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 8c07959d..1726b352 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -116,7 +116,20 @@ def _create_flow_json(run_dir, steps=None): def _has_disclosure(line): - return bool(re.search(r'\w+="ecc ', line)) + return bool(re.search(r'ecc (?:check|run|status|log|metrics|artifacts|config|diagnose|param)\b', line)) + + +def _is_structural_line(line): + s = line.strip() + if not s: + return True + if re.match(r'^\[.+\]$', s): + return True + if s.startswith('steps:'): + return True + if re.match(r'^\s+\w+:$', s): + return True + return False # =========================================================================== @@ -140,8 +153,8 @@ def test_init_output_has_disclosure_commands(self, tmp_path, capsys): rc = cli_main.run(["init", project_path]) assert rc == 0 out = capsys.readouterr().out - assert 'check="ecc check' in out - assert 'run="ecc run' in out + assert "ecc check" in out + assert "ecc run" in out def test_init_fails_if_ecc_toml_exists(self, tmp_path): project_dir = tmp_path / "gcd" @@ -178,7 +191,7 @@ def test_check_passes_valid_config(self, tmp_path, monkeypatch, capsys): rc = cli_main.run(["check", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "status=checked" in out + assert "checked" in out def test_check_from_inside_project_dir(self, tmp_path, monkeypatch, capsys): project_dir = _create_valid_project(tmp_path) @@ -190,7 +203,7 @@ def test_check_from_inside_project_dir(self, tmp_path, monkeypatch, capsys): rc = cli_main.run(["check"]) assert rc == 0 out = capsys.readouterr().out - assert "status=checked" in out + assert "checked" in out def test_check_fails_missing_ecc_toml(self, tmp_path): rc = cli_main.run(["check", "--project", str(tmp_path)]) @@ -439,9 +452,9 @@ def test_status_reads_flow_json(self, tmp_path, capsys): rc = cli_main.run(["status", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "run=default" in out - assert "step=synthesis" in out - assert "step=floorplan" in out + assert "[status]" in out + assert "synthesis" in out + assert "floorplan" in out def test_status_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -481,8 +494,8 @@ def test_status_normalizes_step_names(self, tmp_path, capsys): rc = cli_main.run(["status", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=synthesis" in out - assert "step=placement" in out + assert "synthesis" in out + assert "placement" in out def test_status_missing_run(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -490,8 +503,8 @@ def test_status_missing_run(self, tmp_path, capsys): rc = cli_main.run(["status", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "status=missing" in out - assert 'start="ecc run' in out + assert "missing" in out + assert "ecc run" in out def test_status_invalid_flow_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -620,8 +633,7 @@ def test_metrics_reads_step_metrics(self, tmp_path, capsys): rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "metric=cell_number" in out - assert "value=312" in out + assert "cell_number: 312" in out def test_metrics_all_steps(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -637,8 +649,8 @@ def test_metrics_all_steps(self, tmp_path, capsys): rc = cli_main.run(["metrics", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=synthesis" in out - assert "step=floorplan" in out + assert "synthesis" in out + assert "floorplan" in out def test_metrics_json(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -686,8 +698,8 @@ def test_metrics_normalizes_known_keys(self, tmp_path, capsys): rc = cli_main.run(["metrics", "cts", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "metric=frequency_mhz" in out - assert "metric=die_area_um2" in out + assert "frequency_mhz: 450.0" in out + assert "die_area_um2" in out def test_metrics_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -704,8 +716,8 @@ def test_metrics_missing_file(self, tmp_path, capsys): rc = cli_main.run(["metrics", "cts", "--project", project_dir]) assert rc == 1 out = capsys.readouterr().out - assert "status=missing" in out - assert 'log="ecc log cts' in out + assert "missing" in out + assert "ecc log cts" in out def test_metrics_json_unknown_step(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -748,9 +760,7 @@ def test_init_lines_have_disclosure(self, tmp_path, capsys): rc = cli_main.run(["init", project_path]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_check_lines_have_disclosure(self, tmp_path, monkeypatch, capsys): project_dir = _create_valid_project(tmp_path) @@ -761,9 +771,7 @@ def test_check_lines_have_disclosure(self, tmp_path, monkeypatch, capsys): rc = cli_main.run(["check", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_status_lines_have_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -773,9 +781,7 @@ def test_status_lines_have_disclosure(self, tmp_path, capsys): rc = cli_main.run(["status", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_metrics_lines_have_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -789,9 +795,7 @@ def test_metrics_lines_have_disclosure(self, tmp_path, capsys): rc = cli_main.run(["metrics", "synthesis", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - for line in out.strip().split("\n"): - if line.strip(): - assert _has_disclosure(line), f"Missing disclosure in: {line}" + assert _has_disclosure(out) def test_log_error_lines_have_disclosure(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -829,8 +833,8 @@ def test_output_lowercase_tokens(self, tmp_path, capsys): rc = cli_main.run(["status", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - assert "step=synthesis" in out - assert "status=success" in out + assert "synthesis" in out + assert "success" in out # =========================================================================== @@ -983,8 +987,8 @@ def test_check_missing_config_has_kind_error_text(self, tmp_path, capsys): rc = cli_main.run(["check", "--project", str(tmp_path)]) assert rc == 1 out = capsys.readouterr().out - assert "kind=error" in out - assert "error=missing_config" in out + assert "[error]" in out + assert "missing_config" in out def test_check_missing_config_has_disclosure_command(self, tmp_path, capsys): rc = cli_main.run(["check", "--project", str(tmp_path), "--json"]) diff --git a/test/cli/test_pretty.py b/test/cli/test_pretty.py new file mode 100644 index 00000000..7396facf --- /dev/null +++ b/test/cli/test_pretty.py @@ -0,0 +1,414 @@ +import json +import os + +from chipcompiler.cli import main as cli_main +from chipcompiler.cli.pretty import ( + BOLD, + CYAN, + DIM, + GREEN, + RED, + RESET, + YELLOW, + display_key, + format_plain_value, + render_header, + status_style, + style, + supports_color, +) +from chipcompiler.cli.render import _plain_value, render_plain +from chipcompiler.cli.types import CommandResult +from io import StringIO + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _create_valid_project(tmp_path, name="gcd", pdk_root=None): + project_dir = tmp_path / name + project_dir.mkdir(exist_ok=True) + (project_dir / "rtl").mkdir(exist_ok=True) + (project_dir / "constraints").mkdir(exist_ok=True) + (project_dir / "runs").mkdir(exist_ok=True) + + rtl_file = project_dir / "rtl" / "gcd.v" + rtl_file.write_text("module gcd(input clk); endmodule\n") + + if pdk_root is None: + pdk_root = tmp_path / "ics55" + pdk_root.mkdir(exist_ok=True) + + toml = f'''[design] +name = "{name}" +top = "{name}" +rtl = ["rtl/gcd.v"] +clock_port = "clk" +frequency_mhz = 100.0 + +[pdk] +name = "ics55" +root = "{pdk_root}" + +[flow] +preset = "rtl2gds" +run = "default" +''' + (project_dir / "ecc.toml").write_text(toml) + return str(project_dir) + + +def _create_flow_json(run_dir, steps=None): + import json as j + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + if steps is None: + steps = [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + ] + with open(os.path.join(home, "flow.json"), "w") as f: + j.dump({"steps": steps}, f) + + +# --------------------------------------------------------------------------- +# Plain key-value stability tests +# --------------------------------------------------------------------------- + + +class TestPlainQuoting: + def test_plain_value_no_quoting_for_simple(self): + assert _plain_value("hello") == "hello" + + def test_plain_value_quotes_spaces(self): + assert _plain_value("hello world") == '"hello world"' + + def test_plain_value_quotes_equals(self): + assert _plain_value("a=b") == '"a=b"' + + def test_plain_value_escapes_backslashes(self): + assert _plain_value("path\\to\\file") == '"path\\\\to\\\\file"' + + def test_plain_value_escapes_quotes(self): + assert _plain_value('say "hi"') == '"say \\"hi\\""' + + def test_plain_value_numeric(self): + assert _plain_value(42) == "42" + + def test_render_plain_one_record_per_line(self): + records = ( + {"a": "1", "b": "two words"}, + {"c": "3"}, + ) + buf = StringIO() + render_plain(records, file=buf) + lines = [l for l in buf.getvalue().strip().split("\n") if l.strip()] + assert len(lines) == 2 + assert "a=1" in lines[0] + assert 'b="two words"' in lines[0] + + def test_render_plain_no_ansi(self): + records = ({"status": "success", "path": "/tmp/x"},) + buf = StringIO() + render_plain(records, file=buf) + assert "\x1b[" not in buf.getvalue() + + +# --------------------------------------------------------------------------- +# Display key normalization +# --------------------------------------------------------------------------- + + +class TestDisplayKey: + def test_strips_cmd_suffix(self): + assert display_key("inspect_cmd") == "inspect" + + def test_preserves_non_cmd(self): + assert display_key("status") == "status" + + def test_replaces_underscores(self): + assert display_key("run_dir") == "run dir" + + +# --------------------------------------------------------------------------- +# Color gating +# --------------------------------------------------------------------------- + + +class TestColorGating: + def test_supports_color_non_tty(self): + assert not supports_color(file=StringIO()) + + def test_style_disabled(self): + assert style("text", RED, enabled=False) == "text" + + def test_style_enabled(self): + styled = style("text", RED, enabled=True) + assert RED in styled + assert RESET in styled + + def test_status_style_known_states(self): + assert GREEN in status_style("success", color=True) + assert RED in status_style("failed", color=True) + assert YELLOW in status_style("pending", color=True) + + def test_status_style_unknown_passthrough(self): + assert status_style("unknown_state", color=True) == "unknown_state" + + def test_status_style_no_color(self): + assert status_style("success", color=False) == "success" + + +# --------------------------------------------------------------------------- +# Pretty header rendering +# --------------------------------------------------------------------------- + + +class TestPrettyHeader: + def test_header_with_color(self): + h = render_header("status", color=True) + assert BOLD in h + assert RESET in h + assert "[status]" in h + + def test_header_without_color(self): + h = render_header("status", color=False) + assert "\x1b[" not in h + assert "[status]" in h + + +# --------------------------------------------------------------------------- +# CLI --plain acceptance tests +# --------------------------------------------------------------------------- + + +class TestPlainFlagAcceptance: + def test_init_plain(self, tmp_path, capsys): + rc = cli_main.run(["init", str(tmp_path / "p"), "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "=" in out + + def test_check_plain(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "=" in out + + def test_status_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default")) + rc = cli_main.run(["status", "--project", project_dir, "--plain"]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "status=" in out + + def test_metrics_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + rc = cli_main.run(["metrics", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + assert "metric=" in out + + def test_artifacts_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("ok\n") + rc = cli_main.run(["artifacts", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + def test_diagnose_plain(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + rc = cli_main.run(["diagnose", "--plain", "--project", project_dir]) + out = capsys.readouterr().out + assert "\x1b[" not in out + + def test_config_plain(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["config", "--resolved", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + + +# --------------------------------------------------------------------------- +# Pretty default output structure tests +# --------------------------------------------------------------------------- + + +class TestPrettyDefaultOutput: + def test_init_has_header(self, tmp_path, capsys): + rc = cli_main.run(["init", str(tmp_path / "p")]) + assert rc == 0 + out = capsys.readouterr().out + assert "[init]" in out + + def test_check_has_header(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + rc = cli_main.run(["check", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[check]" in out + + def test_status_has_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default")) + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[status]" in out + + def test_status_groups_steps(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default"), [ + {"name": "Synthesis", "tool": "yosys", "state": "Success", "runtime": "0:00:05"}, + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + rc = cli_main.run(["status", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "synthesis (yosys)" in out + assert "cts (ecc)" in out + + def test_metrics_groups_by_step(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + for step_dir_name in ["Synthesis_yosys", "CTS_ecc"]: + analysis = os.path.join(run_dir, step_dir_name, "analysis") + os.makedirs(analysis, exist_ok=True) + metrics_name = step_dir_name.split("_")[0] + "_metrics.json" + with open(os.path.join(analysis, metrics_name), "w") as f: + json.dump({"Cell number": 100}, f) + rc = cli_main.run(["metrics", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[metrics]" in out + assert "synthesis:" in out + assert "cts:" in out + + def test_diagnose_clean_has_header(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, [ + {"name": "CTS", "tool": "ecc", "state": "Success", "runtime": "0:00:04"}, + ]) + step_dir = os.path.join(run_dir, "CTS_ecc", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "cts.log"), "w") as f: + f.write("ok\n") + rc = cli_main.run(["diagnose", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[diagnose]" in out + assert "clean" in out + + def test_error_output_has_error_header(self, tmp_path, capsys): + rc = cli_main.run(["check", "--project", str(tmp_path)]) + assert rc == 1 + out = capsys.readouterr().out + assert "[error]" in out + + def test_run_summary_has_header(self, tmp_path, monkeypatch, capsys): + project_dir = _create_valid_project(tmp_path) + from types import SimpleNamespace + + DummyFlow_instances = [] + class DummyFlow: + instances = DummyFlow_instances + has_init_value = False + run_steps_value = True + def __init__(self, workspace): + self.workspace = workspace + self.added_steps = [] + self.create_called = False + self.run_called = False + self.workspace_steps = [] + DummyFlow.instances.append(self) + def has_init(self): + return False + def add_step(self, step, tool, state): + self.added_steps.append((step, tool, state)) + def create_step_workspaces(self): + self.create_called = True + def run_steps(self): + self.run_called = True + return True + + monkeypatch.setattr("chipcompiler.data.create_workspace", + lambda **kw: SimpleNamespace(name="ws")) + monkeypatch.setattr("chipcompiler.engine.EngineFlow", DummyFlow) + monkeypatch.setattr( + "chipcompiler.rtl2gds.build_rtl2gds_flow", + lambda: [("Synthesis", "yosys", "Unstart")], + ) + monkeypatch.setattr( + "chipcompiler.cli.config._validate_pdk_contents", + lambda name, root: None, + ) + + rc = cli_main.run(["run", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "[run]" in out + assert "success" in out + + +# --------------------------------------------------------------------------- +# JSON/JSONL unaffected by pretty changes +# --------------------------------------------------------------------------- + + +class TestJsonUnchanged: + def test_status_json_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + _create_flow_json(os.path.join(project_dir, "runs", "default")) + rc = cli_main.run(["status", "--project", project_dir, "--json"]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + assert "records" in data + assert data["records"][0]["run"] == "default" + + def test_metrics_jsonl_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir) + analysis_dir = os.path.join(run_dir, "Synthesis_yosys", "analysis") + os.makedirs(analysis_dir, exist_ok=True) + with open(os.path.join(analysis_dir, "Synthesis_metrics.json"), "w") as f: + json.dump({"Cell number": 312}, f) + rc = cli_main.run(["metrics", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "\x1b[" not in out + objects = [json.loads(l) for l in out.strip().split("\n")] + assert any("metric" in o for o in objects) From a6588a345155f494a70592a3222e04560c4f4ac6 Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 23:43:07 +0800 Subject: [PATCH 078/104] fix(cli): consolidate color gating and fix multi-record error rendering - render_error() now iterates all records instead of only records[0] - ANSI constants and supports_color() consolidated into pretty.py - main.py, log_view.py, progress.py delegate to shared helpers - Added 8 regression tests for multi-record errors and shared color policy - All 495 CLI tests pass --- chipcompiler/cli/log_view.py | 26 +++++------ chipcompiler/cli/main.py | 9 +--- chipcompiler/cli/pretty.py | 32 +++++++------- chipcompiler/cli/progress.py | 41 +++++------------ test/cli/test_pretty.py | 85 ++++++++++++++++++++++++++++++++++++ test/cli/test_progress.py | 31 ++++++------- 6 files changed, 139 insertions(+), 85 deletions(-) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index fbe32777..e62348b8 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -104,13 +104,7 @@ def build_log_records( # --- Pretty rendering --- -_BOLD = "\x1b[1m" -_DIM = "\x1b[2m" -_RED = "\x1b[31m" -_YELLOW = "\x1b[33m" -_CYAN = "\x1b[36m" -_BLUE = "\x1b[34m" -_RESET = "\x1b[0m" +from chipcompiler.cli.pretty import BOLD, DIM, RED, YELLOW, BLUE, CYAN, RESET, style _KIND_LABEL = { LineKind.ERROR: "error", @@ -122,11 +116,11 @@ def build_log_records( } _KIND_COLOR = { - LineKind.ERROR: _RED, - LineKind.WARNING: _YELLOW, - LineKind.TRACEBACK: _YELLOW, - LineKind.INFO: _BLUE, - LineKind.SECTION: _CYAN, + LineKind.ERROR: RED, + LineKind.WARNING: YELLOW, + LineKind.TRACEBACK: YELLOW, + LineKind.INFO: BLUE, + LineKind.SECTION: CYAN, } @@ -141,8 +135,8 @@ def render_log_pretty( target = file or sys.stdout annotated = annotate_log_lines(lines) - log_tag = f"{_BOLD}[log]{_RESET}" if color else "[log]" - source_label = f" {_DIM}source:{_RESET}" if color else " source:" + log_tag = style("[log]", BOLD, color) + source_label = f" {style('source:', DIM, color)}" if color else " source:" target.write(f"{log_tag} step={step}\n") target.write(f"{source_label} {source}\n") @@ -150,11 +144,11 @@ def render_log_pretty( label = _KIND_LABEL[ll.kind] if color and ll.kind in _KIND_COLOR: code = _KIND_COLOR[ll.kind] - target.write(f" {code}{label}{_RESET} {ll.text}\n") + target.write(f" {code}{label}{RESET} {ll.text}\n") else: target.write(f" {label} {ll.text}\n") - inspect_label = f" {_DIM}inspect:{_RESET}" if color else " inspect:" + inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" target.write(f"{inspect_label} {inspect_cmd}\n") diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 532bd755..93cba815 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -173,13 +173,8 @@ def _render_param_text(args, result, color=True) -> None: def _should_colorize(): - if not sys.stdout.isatty(): - return False - if os.environ.get("NO_COLOR") is not None: - return False - if os.environ.get("TERM", "") == "dumb": - return False - return True + from chipcompiler.cli.pretty import supports_color + return supports_color(file=sys.stdout) def _render_log_text(args, result, color=True) -> None: diff --git a/chipcompiler/cli/pretty.py b/chipcompiler/cli/pretty.py index 8f64b8f4..5b443bc8 100644 --- a/chipcompiler/cli/pretty.py +++ b/chipcompiler/cli/pretty.py @@ -17,9 +17,11 @@ # --- Color gating --- -def supports_color(file=None, env=None): +def supports_color(file=None, env=None, mode=None): if env is None: env = os.environ + if mode is not None and mode != OutputMode.TEXT: + return False target = file or sys.stdout if not hasattr(target, "isatty") or not target.isatty(): return False @@ -412,21 +414,21 @@ def render_diagnose(records, file=None, color=True): def render_error(records, file=None, color=True): target = file or sys.stdout - first = records[0] - error = first.get("error", first.get("kind", "error")) - reason = first.get("reason", "") target.write(f"{render_header('error', color)}\n") - target.write(f" {status_style(error, color)}") - if reason: - target.write(f" {reason}") - target.write("\n") - for key, value in first.items(): - if key in ("kind", "error", "reason"): - continue - if value is None: - continue - dk = display_key(key) - target.write(render_field(dk, value, color, dim_label=True) + "\n") + for record in records: + error = record.get("error", record.get("kind", "error")) + reason = record.get("reason", "") + target.write(f" {status_style(error, color)}") + if reason: + target.write(f" {reason}") + target.write("\n") + for key, value in record.items(): + if key in ("kind", "error", "reason"): + continue + if value is None: + continue + dk = display_key(key) + target.write(render_field(dk, value, color, dim_label=True) + "\n") target.write("\n") diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 20f9021f..da265451 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -4,34 +4,17 @@ import threading import time +from chipcompiler.cli.pretty import BOLD, DIM, CYAN, GREEN, RED, RESET, style as _style from chipcompiler.cli.types import OutputMode -_BOLD = "\x1b[1m" -_DIM = "\x1b[2m" -_CYAN = "\x1b[36m" -_GREEN = "\x1b[32m" -_RED = "\x1b[31m" -_RESET = "\x1b[0m" - def supports_color(stream, mode, env=None): - if env is None: - env = os.environ - if not hasattr(stream, "isatty") or not stream.isatty(): - return False - if mode != OutputMode.TEXT: - return False - if env.get("NO_COLOR") is not None: - return False - if env.get("TERM", "") == "dumb": - return False - return True + from chipcompiler.cli.pretty import supports_color as _supports_color + return _supports_color(file=stream, mode=mode, env=env) def style(text, code, enabled): - if not enabled: - return text - return f"{code}{text}{_RESET}" + return _style(text, code, enabled) def should_enable_run_progress(ctx, stderr): @@ -94,7 +77,7 @@ def running(self, text): width = self._width_fn() visible = truncate_to_width(f" log: {text}", width) if self._color and visible.startswith(" log:"): - visible = f" {_DIM}log:{_RESET}{visible[6:]}" + visible = f" {DIM}log:{RESET}{visible[6:]}" self._stream.write(f"\r\x1b[K{visible}") self._stream.flush() self._has_transient = True @@ -107,7 +90,7 @@ def clear(self): def start_run(self, name, workspace): self.clear() - run_label = style("[run]", _BOLD, self._color) + run_label = style("[run]", BOLD, self._color) self._stream.write(f"{run_label} {name} workspace={workspace}\n") self._stream.flush() @@ -115,7 +98,7 @@ def start_step(self, step, tool): self.clear() if self._step_started: self._stream.write("\n") - header = style(f"> {step} ({tool})", _CYAN, self._color) + header = style(f"> {step} ({tool})", CYAN, self._color) self._stream.write(f"{header}\n") self._stream.flush() self._step_started = True @@ -123,15 +106,15 @@ def start_step(self, step, tool): def finish_step(self, step, tool, status, runtime, log_path, inspect_cmd, success): self.clear() if success: - line = style(f"✓ {step} ({tool}) {runtime}", _GREEN, self._color) + line = style(f"✓ {step} ({tool}) {runtime}", GREEN, self._color) else: - sym = style("✗", _RED, self._color) - status_styled = style(status, _RED, self._color) + sym = style("✗", RED, self._color) + status_styled = style(status, RED, self._color) line = f"{sym} {step} ({tool}) {status_styled} {runtime}" self._stream.write(f"{line}\n") - log_label = style(" log:", _DIM, self._color) + log_label = style(" log:", DIM, self._color) self._stream.write(f"{log_label} {log_path}\n") - inspect_label = style(" inspect:", _DIM, self._color) + inspect_label = style(" inspect:", DIM, self._color) self._stream.write(f"{inspect_label} {inspect_cmd}\n") self._stream.flush() diff --git a/test/cli/test_pretty.py b/test/cli/test_pretty.py index 7396facf..da282141 100644 --- a/test/cli/test_pretty.py +++ b/test/cli/test_pretty.py @@ -1,3 +1,4 @@ +import io import json import os @@ -412,3 +413,87 @@ def test_metrics_jsonl_unchanged(self, tmp_path, capsys): assert "\x1b[" not in out objects = [json.loads(l) for l in out.strip().split("\n")] assert any("metric" in o for o in objects) + + +# --------------------------------------------------------------------------- +# Regression: multi-record error rendering (Codex Round 1 finding) +# --------------------------------------------------------------------------- + + +class TestMultiRecordError: + def test_render_error_two_records(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + records = [ + {"error": "missing", "reason": "file not found"}, + {"error": "corrupt", "reason": "bad format"}, + ] + render_error(records, file=buf, color=False) + out = buf.getvalue() + assert "[error]" in out + assert "missing" in out + assert "file not found" in out + assert "corrupt" in out + assert "bad format" in out + + def test_render_error_three_records_all_shown(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + records = [ + {"kind": "error", "reason": "a"}, + {"kind": "error", "reason": "b"}, + {"kind": "error", "reason": "c"}, + ] + render_error(records, file=buf, color=False) + out = buf.getvalue() + assert out.count("error") >= 3 + for reason in ("a", "b", "c"): + assert reason in out + + +# --------------------------------------------------------------------------- +# Shared color policy tests (Codex Round 1 finding) +# --------------------------------------------------------------------------- + + +class TestSharedColorPolicy: + def test_pretty_supports_color_no_color_env(self): + from chipcompiler.cli.pretty import supports_color + + env = {"NO_COLOR": "1"} + assert not supports_color(env=env) + + def test_pretty_supports_color_dumb_term(self): + from chipcompiler.cli.pretty import supports_color + + env = {"TERM": "dumb"} + assert not supports_color(env=env) + + def test_pretty_supports_color_non_tty(self): + from chipcompiler.cli.pretty import supports_color + + assert not supports_color(file=io.StringIO()) + + def test_pretty_supports_color_machine_mode(self): + from chipcompiler.cli.pretty import supports_color + from chipcompiler.cli.types import OutputMode + + assert not supports_color(mode=OutputMode.JSON) + assert not supports_color(mode=OutputMode.PLAIN) + + def test_progress_supports_color_delegates(self): + from chipcompiler.cli.progress import supports_color + + assert not supports_color(io.StringIO(), None, env={"NO_COLOR": "1"}) + assert not supports_color(io.StringIO(), None, env={"TERM": "dumb"}) + + def test_log_view_uses_shared_constants(self): + from chipcompiler.cli import log_view + from chipcompiler.cli import pretty + + assert log_view.BOLD is pretty.BOLD + assert log_view.RED is pretty.RED + assert log_view.CYAN is pretty.CYAN + assert log_view.RESET is pretty.RESET diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 8e823995..354d8fae 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -10,13 +10,8 @@ def _strip_ansi(text): return _ANSI_RE.sub("", text) +from chipcompiler.cli.pretty import BOLD, CYAN, DIM, GREEN, RED, RESET from chipcompiler.cli.progress import ( - _BOLD, - _CYAN, - _DIM, - _GREEN, - _RED, - _RESET, RunProgressRenderer, latest_log_line, run_flow_with_progress, @@ -89,11 +84,11 @@ def test_enabled_with_clean_env(self): class TestStyle: def test_applies_code_when_enabled(self): - result = style("hello", _GREEN, True) - assert result == f"{_GREEN}hello{_RESET}" + result = style("hello", GREEN, True) + assert result == f"{GREEN}hello{RESET}" def test_passthrough_when_disabled(self): - assert style("hello", _GREEN, False) == "hello" + assert style("hello", GREEN, False) == "hello" # -- should_enable_run_progress -- @@ -294,7 +289,7 @@ def test_running_with_color(self): r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) r.running("working...") output = "".join(buf.written) - assert _DIM in output + assert DIM in output assert "log:" in output def test_running_without_color(self): @@ -302,7 +297,7 @@ def test_running_without_color(self): r = RunProgressRenderer(buf, width_fn=lambda: 80, color=False) r.running("working...") output = "".join(buf.written) - assert _DIM not in output + assert DIM not in output def test_no_color_codes_when_disabled(self): buf = FakeTTYStderr(True) @@ -311,7 +306,7 @@ def test_no_color_codes_when_disabled(self): r.start_step("synthesis", "yosys") r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) output = "".join(buf.written) - for code in (_BOLD, _DIM, _CYAN, _GREEN, _RED): + for code in (BOLD, DIM, CYAN, GREEN, RED): assert code not in output def test_start_step_with_color(self): @@ -319,9 +314,9 @@ def test_start_step_with_color(self): r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) r.start_step("synthesis", "yosys") output = "".join(buf.written) - assert _CYAN in output + assert CYAN in output # Cyan sequence must appear before the `>` marker in raw output - cyan_pos = output.find(_CYAN) + cyan_pos = output.find(CYAN) marker_pos = output.find(">") assert cyan_pos < marker_pos @@ -330,21 +325,21 @@ def test_start_run_with_color(self): r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) r.start_run("default", "/tmp") output = "".join(buf.written) - assert _BOLD in output + assert BOLD in output def test_finish_step_success_with_color(self): buf = FakeTTYStderr(True) r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) r.finish_step("synthesis", "yosys", "success", "0:00:06", "log", "cmd", True) output = "".join(buf.written) - assert _GREEN in output + assert GREEN in output def test_finish_step_non_success_with_color(self): buf = FakeTTYStderr(True) r = RunProgressRenderer(buf, width_fn=lambda: 80, color=True) r.finish_step("placement", "dreamplace", "incomplete", "0:00:00", "", "cmd", False) output = "".join(buf.written) - assert _RED in output + assert RED in output # -- run_flow_with_progress -- @@ -627,5 +622,5 @@ def test_color_disabled_for_non_tty(self): buf = FakeTTYStderr(False) run_flow_with_progress(flow, _make_ctx(), None, buf) output = "".join(buf.written) - for code in (_BOLD, _CYAN, _GREEN, _RED, _DIM): + for code in (BOLD, CYAN, GREEN, RED, DIM): assert code not in output From be2112090dda556797d74a87b2b75a59902365cf Mon Sep 17 00:00:00 2001 From: Emin Date: Mon, 4 May 2026 23:51:55 +0800 Subject: [PATCH 079/104] fix(cli): use shared ANSI constants in colorized log listing renderer render_log_listing_pretty() still referenced deleted _BOLD/_RESET/_CYAN/_DIM private names, causing NameError when color=True. Replaced with style() helper and shared constants from pretty.py. Added 3 regression tests for color-enabled listing path. --- chipcompiler/cli/log_view.py | 10 ++++------ test/cli/test_log_view.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index e62348b8..3b10b29c 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -193,10 +193,8 @@ def render_log_listing_pretty( ) -> None: target = file or sys.stdout - if color: - target.write(f"{_BOLD}[logs]{_RESET}\n") - else: - target.write("[logs]\n") + log_tag = style("[logs]", BOLD, color) + target.write(f"{log_tag}\n") for rec in records: step = rec.get("step", "") @@ -204,10 +202,10 @@ def render_log_listing_pretty( inspect = rec.get("inspect_cmd") or rec.get("inspect", "") if step: - step_label = f" {_CYAN}{step}{_RESET}" if color else f" {step}" + step_label = f" {style(step, CYAN, color)}" if color else f" {step}" else: step_label = "" target.write(f"{step_label} {source}\n") - inspect_label = f" {_DIM}inspect:{_RESET}" if color else " inspect:" + inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" target.write(f"{inspect_label} {inspect}\n") diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py index 70ad9858..4cbc2d5d 100644 --- a/test/cli/test_log_view.py +++ b/test/cli/test_log_view.py @@ -458,3 +458,34 @@ def test_listing_inspect_cmd(self): buf = StringIO() render_log_listing_pretty(records, file=buf, color=False) assert "ecc log synthesis" in buf.getvalue() + + def test_listing_color_enabled_no_crash(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=True) + out = buf.getvalue() + assert "[logs]" in out + assert "synthesis" in out + assert "Synthesis_yosys/log/synthesis.log" in out + assert "ecc log synthesis" in out + + def test_listing_color_enabled_has_ansi(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=True) + assert "\x1b[" in buf.getvalue() + + def test_listing_color_disabled_no_ansi(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "Synthesis_yosys/log/synthesis.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + assert "\x1b[" not in buf.getvalue() From 457d52bb3cb1acacd3358328adfae09150352bab Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 08:42:21 +0800 Subject: [PATCH 080/104] fix(cli): preserve newline after multiline TOML value replacement When _apply_scoped_param_edit replaced a multiline array value, the Extend consumed past the closing newline but the replacement line had none. This concatenated the next key onto the same line, producing invalid TOML. Added trailing newline when the original value was multiline. Strengthened test assertion to verify keys remain on separate lines. --- chipcompiler/cli/param_handler.py | 2 ++ test/cli/test_cli_params.py | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/chipcompiler/cli/param_handler.py b/chipcompiler/cli/param_handler.py index 2f785d71..ca5edbd3 100644 --- a/chipcompiler/cli/param_handler.py +++ b/chipcompiler/cli/param_handler.py @@ -417,6 +417,8 @@ def _apply_scoped_param_edit(text: str, group: str, name: str, value: object) -> indent = key_match.group(1) end = _extend_multiline_value(section_body, key_match.end()) new_line = f"{indent}{name} = {value_str}" + if end > key_match.end(): + new_line += "\n" new_body = section_body[:key_match.start()] + new_line + section_body[end:] return text[:body_start] + new_body + text[body_end:] else: diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index b8ab41ab..02d3379b 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -980,6 +980,10 @@ def test_set_preserves_adjacent_key_after_multiline(self, tmp_path, capsys): after = f.read() assert "core_util = 0.5" in after assert after.count("core_margin") == 1 + for line in after.splitlines(): + assert "core_margin" not in line or "core_util" not in line, ( + f"multiline replacement concatenated keys on one line: {line!r}" + ) """config --resolved must error on malformed/invalid CLI provenance.""" def _setup_run_dir(self, project_dir): From 16d3309da202777105abe6de6f693185736c00ff Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 09:13:33 +0800 Subject: [PATCH 081/104] fix(cli): restore legacy CLI compat and scripts.cli alias - Add scripts.cli alias alongside scripts.ecc in pyproject.toml - Detect legacy --workspace/--rtl/--design/--top/--clock/--pdk-root args and route to the old parameter-based flow for backward compatibility --- chipcompiler/cli/main.py | 75 +++++++++++++++++++++++++++++++++++++++- pyproject.toml | 1 + 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 93cba815..bd262c5d 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -250,8 +250,13 @@ def _render_log_plain(result) -> None: def run(argv: Sequence[str] | None = None) -> int: + raw = list(argv) if argv is not None else sys.argv[1:] + + if _is_legacy_args(raw): + return _run_legacy(raw) + parser = build_parser() - args = parser.parse_args(list(argv) if argv is not None else None) + args = parser.parse_args(raw) if args.command is None: parser.print_help() @@ -274,6 +279,74 @@ def run(argv: Sequence[str] | None = None) -> int: return result.exit_code +_LEGACY_FLAGS = {"--workspace", "--rtl", "--design", "--top", "--clock", "--pdk-root", "--freq"} + + +def _is_legacy_args(args: list[str]) -> bool: + return any(a in _LEGACY_FLAGS for a in args) + + +def _run_legacy(argv: list[str]) -> int: + import argparse as _argparse + + from chipcompiler.data import create_workspace, get_parameters + from chipcompiler.engine import EngineFlow + from chipcompiler.rtl2gds import build_rtl2gds_flow + + parser = _argparse.ArgumentParser( + prog="cli", + description="Legacy parameter-only invocation (use 'ecc run' for project-based flows)", + ) + parser.add_argument("--workspace", required=True) + parser.add_argument("--rtl", required=True) + parser.add_argument("--design", required=True) + parser.add_argument("--top", required=True) + parser.add_argument("--clock", required=True) + parser.add_argument("--pdk-root", required=True) + parser.add_argument("--freq", type=float, default=100.0) + args = parser.parse_args(argv) + + parameters = get_parameters("ics55") + parameters.data.update({ + "PDK": "ics55", + "Design": args.design, + "Top module": args.top, + "Clock": args.clock, + "Frequency max [MHz]": args.freq, + }) + + try: + workspace = create_workspace( + directory=args.workspace, + origin_def="", + origin_verilog=args.rtl, + pdk="ics55", + parameters=parameters, + input_filelist="", + pdk_root=args.pdk_root, + ) + except Exception as exc: + print(f"Error: {exc}", file=sys.stderr) + return 1 + + if workspace is None: + print("Error: failed to create workspace", file=sys.stderr) + return 1 + + engine_flow = EngineFlow(workspace=workspace) + if not engine_flow.has_init(): + for step, tool, state in build_rtl2gds_flow(): + engine_flow.add_step(step=step, tool=tool, state=state) + + engine_flow.create_step_workspaces() + + if not engine_flow.run_steps(): + print("Error: flow execution failed", file=sys.stderr) + return 1 + + return 0 + + def main() -> None: sys.exit(run()) diff --git a/pyproject.toml b/pyproject.toml index 50dd1916..db7a1f28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,6 +37,7 @@ dependencies = [ "uvicorn>=0.27", ] scripts.ecc = "chipcompiler.cli.main:main" +scripts.cli = "chipcompiler.cli.main:main" [dependency-groups] dev = [ From 48182e2a144456c6110998813f9fffadc7a9c1d7 Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 09:26:36 +0800 Subject: [PATCH 082/104] fix(cli): handle --flag=value syntax and filelist routing in legacy compat - _is_legacy_args now detects --workspace=ws and --rtl=top.v forms - _resolve_rtl_input routes .f/.fl/.filelist files to input_filelist instead of origin_verilog, matching the old CLI behavior --- chipcompiler/cli/main.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index bd262c5d..5c9797f3 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -283,7 +283,22 @@ def run(argv: Sequence[str] | None = None) -> int: def _is_legacy_args(args: list[str]) -> bool: - return any(a in _LEGACY_FLAGS for a in args) + for a in args: + if a in _LEGACY_FLAGS: + return True + if "=" in a: + flag = a.split("=", 1)[0] + if flag in _LEGACY_FLAGS: + return True + return False + + +def _resolve_rtl_input(rtl_path: str) -> tuple[str, str]: + normalized = os.path.abspath(os.path.expanduser(rtl_path)) + suffix = os.path.splitext(normalized)[1].lower() + if suffix in {".f", ".fl", ".filelist"}: + return ("", normalized) + return (normalized, "") def _run_legacy(argv: list[str]) -> int: @@ -315,14 +330,16 @@ def _run_legacy(argv: list[str]) -> int: "Frequency max [MHz]": args.freq, }) + origin_verilog, input_filelist = _resolve_rtl_input(args.rtl) + try: workspace = create_workspace( directory=args.workspace, origin_def="", - origin_verilog=args.rtl, + origin_verilog=origin_verilog, pdk="ics55", parameters=parameters, - input_filelist="", + input_filelist=input_filelist, pdk_root=args.pdk_root, ) except Exception as exc: From 37f70465a4796b3da9b1e883453f74449fc571d6 Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 09:41:00 +0800 Subject: [PATCH 083/104] fix(cli): restore extensionless filelist detection in legacy compat _resolve_rtl_input now falls back to parse_filelist/validate_filelist for files without known RTL or filelist extensions, matching the old CLI. --- chipcompiler/cli/main.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 5c9797f3..55a80ee8 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -294,10 +294,21 @@ def _is_legacy_args(args: list[str]) -> bool: def _resolve_rtl_input(rtl_path: str) -> tuple[str, str]: + from chipcompiler.utility.filelist import parse_filelist, validate_filelist + normalized = os.path.abspath(os.path.expanduser(rtl_path)) suffix = os.path.splitext(normalized)[1].lower() if suffix in {".f", ".fl", ".filelist"}: return ("", normalized) + if suffix in {".v", ".sv", ".svh", ".vh"}: + return (normalized, "") + try: + parse_filelist(normalized) + _, missing = validate_filelist(normalized) + if not missing: + return ("", normalized) + except Exception: + pass return (normalized, "") From c0f23f96e38ce60371ba6a6a283cab98152c92e7 Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 09:54:01 +0800 Subject: [PATCH 084/104] fix(cli): restore argument validation in legacy invocation path _validate_legacy_args checks non-empty required fields, RTL file existence, PDK root directory, and positive frequency before calling create_workspace, matching the old CLI behavior. --- chipcompiler/cli/main.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 55a80ee8..73e4fabf 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -312,6 +312,30 @@ def _resolve_rtl_input(rtl_path: str) -> tuple[str, str]: return (normalized, "") +def _validate_legacy_args(args) -> str | None: + if not str(args.workspace).strip(): + return "--workspace must not be empty" + if not str(args.design).strip(): + return "--design must not be empty" + if not str(args.top).strip(): + return "--top must not be empty" + if not str(args.clock).strip(): + return "--clock must not be empty" + rtl_path = os.path.abspath(os.path.expanduser(args.rtl)) + if not os.path.exists(rtl_path): + return f"--rtl path does not exist: {rtl_path}" + if not os.path.isfile(rtl_path): + return f"--rtl must point to a file: {rtl_path}" + pdk_root = os.path.abspath(os.path.expanduser(args.pdk_root)) + if not os.path.exists(pdk_root): + return f"--pdk-root path does not exist: {pdk_root}" + if not os.path.isdir(pdk_root): + return f"--pdk-root must point to a directory: {pdk_root}" + if args.freq <= 0: + return "--freq must be greater than 0" + return None + + def _run_legacy(argv: list[str]) -> int: import argparse as _argparse @@ -332,6 +356,11 @@ def _run_legacy(argv: list[str]) -> int: parser.add_argument("--freq", type=float, default=100.0) args = parser.parse_args(argv) + err = _validate_legacy_args(args) + if err: + print(f"Error: {err}", file=sys.stderr) + return 1 + parameters = get_parameters("ics55") parameters.data.update({ "PDK": "ics55", From 2b6b58ece9b806460b2399dc7c582cb1a93ebdad Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 10:30:30 +0800 Subject: [PATCH 085/104] fix(cli): wrap legacy cli entrypoint in Nix, recognize flow-only steps in log handler - Nix derivation now wraps both ecc and cli executables with the same CHIPCOMPILER_OSS_CAD_DIR and PATH settings - ecc log now checks flow step names as fallback when the step directory is absent, matching metrics/artifacts behavior --- chipcompiler/cli/handlers.py | 8 ++++++++ nix/cli/default.nix | 5 +++++ 2 files changed, 13 insertions(+) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index fdd6203c..4444543f 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -93,6 +93,7 @@ def log(args, ctx: CommandContext) -> CommandResult: discover_logs, discover_step_dirs, ) + from chipcompiler.cli.inspect import get_flow_step_names from chipcompiler.cli.log_view import build_log_records step_token = args.step @@ -126,6 +127,13 @@ def log(args, ctx: CommandContext) -> CommandResult: step_dirs = discover_step_dirs(ctx.run_dir) if step_token not in step_dirs: + flow_steps = get_flow_step_names(ctx.run_dir) + if step_token in flow_steps: + return CommandResult.err([{ + "step": step_token, + "log_status": "missing", + "inspect_cmd": disclosure_cmd(f"ecc log {step_token}", project, ctx.run_id), + }]) return CommandResult.err([{ "step": step_token, "status": "unknown_step", diff --git a/nix/cli/default.nix b/nix/cli/default.nix index 54fbc080..7bc641ed 100644 --- a/nix/cli/default.nix +++ b/nix/cli/default.nix @@ -46,6 +46,11 @@ python3Packages.buildPythonPackage { wrapProgram "$out/bin/ecc" \ --set CHIPCOMPILER_OSS_CAD_DIR "${yosysWithSlang}" \ --prefix PATH : "${yosysWithSlang}/bin" + if [ -e "$out/bin/cli" ]; then + wrapProgram "$out/bin/cli" \ + --set CHIPCOMPILER_OSS_CAD_DIR "${yosysWithSlang}" \ + --prefix PATH : "${yosysWithSlang}/bin" + fi ''; build-system = with python3Packages; [ uv-build ]; From ec47aa9501a1e1aee488ee003c7d4912e73da4eb Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 10:55:55 +0800 Subject: [PATCH 086/104] fix(cli): skip symlinks when chmodding run tree on overwrite os.chmod follows symlinks, which could modify permissions on files outside the workspace. Skip chmod for symlink entries. --- chipcompiler/cli/handlers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 4444543f..5248ed8a 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -593,9 +593,13 @@ def run(args, ctx: CommandContext) -> CommandResult: if args.overwrite and os.path.exists(run_dir): for root, dirs, files in os.walk(run_dir): for d in dirs: - os.chmod(os.path.join(root, d), 0o755) + dp = os.path.join(root, d) + if not os.path.islink(dp): + os.chmod(dp, 0o755) for f in files: - os.chmod(os.path.join(root, f), 0o644) + fp = os.path.join(root, f) + if not os.path.islink(fp): + os.chmod(fp, 0o644) os.chmod(run_dir, 0o755) shutil.rmtree(run_dir) From 125660e1c321bd87b98e803c8a8d42d757ee9a25 Mon Sep 17 00:00:00 2001 From: Emin Date: Tue, 5 May 2026 14:54:51 +0800 Subject: [PATCH 087/104] refactor(cli): remove duplicate quoting functions and simplify log plain dispatch - Remove unused format_value and format_plain_value from pretty.py - Replace _format_value in log_view.py with shared _plain_value from render.py - Simplify _render_log_plain by collapsing two identical render_result branches --- chipcompiler/cli/log_view.py | 12 +++--------- chipcompiler/cli/main.py | 7 +------ chipcompiler/cli/pretty.py | 19 ------------------- test/cli/test_pretty.py | 3 +-- 4 files changed, 5 insertions(+), 36 deletions(-) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index 3b10b29c..062ad991 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -152,18 +152,12 @@ def render_log_pretty( target.write(f"{inspect_label} {inspect_cmd}\n") -def _format_value(value) -> str: - s = str(value) - if any(c.isspace() for c in s) or '\\' in s or '"' in s or '=' in s: - escaped = s.replace('\\', '\\\\').replace('"', '\\"') - return f'"{escaped}"' - return s - - def _render_plain_record(rec, target): + from chipcompiler.cli.render import _plain_value + parts = [] for key in ("step", "source", "line_no", "kind", "line", "inspect_cmd"): - parts.append(f"{key}={_format_value(rec.get(key, ''))}") + parts.append(f"{key}={_plain_value(rec.get(key, ''))}") target.write(" ".join(parts) + "\n") diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index 73e4fabf..dc61b5f7 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -236,13 +236,8 @@ def _render_log_plain(result) -> None: records = result.records if not records: return - first = records[0] - - if "log_status" in first or "status" in first: - render_result(result, OutputMode.PLAIN) - return - if "line_no" in first: + if "line_no" in records[0]: render_log_records_plain(records) return diff --git a/chipcompiler/cli/pretty.py b/chipcompiler/cli/pretty.py index 5b443bc8..aa65240c 100644 --- a/chipcompiler/cli/pretty.py +++ b/chipcompiler/cli/pretty.py @@ -49,25 +49,6 @@ def display_key(key): # --- Value formatting --- -def format_value(value): - s = str(value) - if any(c.isspace() for c in s) or "\\" in s or '"' in s or "=" in s: - escaped = s.replace("\\", "\\\\").replace('"', '\\"') - return f'"{escaped}"' - return s - - -# --- Plain key-value formatting (stable, parseable) --- - - -def format_plain_value(value): - s = str(value) - if any(c.isspace() for c in s) or "\\" in s or '"' in s or "=" in s: - escaped = s.replace("\\", "\\\\").replace('"', '\\"') - return f'"{escaped}"' - return s - - # --- Pretty block rendering --- diff --git a/test/cli/test_pretty.py b/test/cli/test_pretty.py index da282141..f2625820 100644 --- a/test/cli/test_pretty.py +++ b/test/cli/test_pretty.py @@ -12,15 +12,14 @@ RESET, YELLOW, display_key, - format_plain_value, render_header, status_style, style, supports_color, ) from chipcompiler.cli.render import _plain_value, render_plain -from chipcompiler.cli.types import CommandResult from io import StringIO +from chipcompiler.cli.types import CommandResult # --------------------------------------------------------------------------- From a771cb90e2a3f60f7a987ab7ab31b7302d68b2f0 Mon Sep 17 00:00:00 2001 From: Emin Date: Wed, 6 May 2026 09:40:49 +0800 Subject: [PATCH 088/104] chore: remove old cli python entrypoint Signed-off-by: Emin --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index db7a1f28..50dd1916 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,6 @@ dependencies = [ "uvicorn>=0.27", ] scripts.ecc = "chipcompiler.cli.main:main" -scripts.cli = "chipcompiler.cli.main:main" [dependency-groups] dev = [ From 663cc6b82516cd81440ed6adb6917abbdffbd744 Mon Sep 17 00:00:00 2001 From: Emin Date: Wed, 6 May 2026 19:43:12 +0800 Subject: [PATCH 089/104] fix(nix): use latest nix build from source Signed-off-by: Emin --- flake.lock | 24 +-- flake.nix | 2 + nix/chipcompiler/default.nix | 2 +- nix/cli/default.nix | 2 +- nix/overlay.nix | 5 +- nix/python/ecc-dreamplace/default.nix | 98 +++++++++- nix/python/ecc-tools/default.nix | 184 +++++++++++++++++- nix/python/ecc-tools/rustpkgs.nix | 63 ++++++ .../use-nix-built-rust-libraries.patch | 60 ++++++ 9 files changed, 409 insertions(+), 31 deletions(-) create mode 100644 nix/python/ecc-tools/rustpkgs.nix create mode 100644 nix/python/ecc-tools/use-nix-built-rust-libraries.patch diff --git a/flake.lock b/flake.lock index 60451134..986b11b8 100644 --- a/flake.lock +++ b/flake.lock @@ -53,11 +53,11 @@ }, "nixpkgs-lib_2": { "locked": { - "lastModified": 1772328832, - "narHash": "sha256-e+/T/pmEkLP6BHhYjx6GmwP5ivonQQn0bJdH9YrRB+Q=", + "lastModified": 1777168982, + "narHash": "sha256-GOkGPcboWE9BmGCRMLX3worL4EMnsnG8MyKmXNeYuhQ=", "owner": "nix-community", "repo": "nixpkgs.lib", - "rev": "c185c7a5e5dd8f9add5b2f8ebeff00888b070742", + "rev": "f5901329dade4a6ea039af1433fb087bd9c1fe14", "type": "github" }, "original": { @@ -68,11 +68,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1773201692, - "narHash": "sha256-NXrKzNMniu4Oam2kAFvqJ3GB2kAvlAFIriTAheaY8hw=", + "lastModified": 1777946660, + "narHash": "sha256-iw3XDIG6xxk+AZTcawCLHf6i9i4tXRzLZEoV9xhRToQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "b6067cc0127d4db9c26c79e4de0513e58d0c40c9", + "rev": "bc57abace07689cfd34203aa5fb4027514895987", "type": "github" }, "original": { @@ -105,11 +105,11 @@ "nixpkgs-lib": "nixpkgs-lib_2" }, "locked": { - "lastModified": 1772408722, - "narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=", + "lastModified": 1777988971, + "narHash": "sha256-qIoWPDs+0/8JecyYgE3gpKQxW/4bLW/gp45vow9ioCQ=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3", + "rev": "0678d8986be1661af6bb555f3489f2fdfc31f6ff", "type": "github" }, "original": { @@ -154,11 +154,11 @@ ] }, "locked": { - "lastModified": 1773297127, - "narHash": "sha256-6E/yhXP7Oy/NbXtf1ktzmU8SdVqJQ09HC/48ebEGBpk=", + "lastModified": 1775636079, + "narHash": "sha256-pc20NRoMdiar8oPQceQT47UUZMBTiMdUuWrYu2obUP0=", "owner": "numtide", "repo": "treefmt-nix", - "rev": "71b125cd05fbfd78cab3e070b73544abe24c5016", + "rev": "790751ff7fd3801feeaf96d7dc416a8d581265ba", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index cd2b7c8c..57368116 100644 --- a/flake.nix +++ b/flake.nix @@ -58,6 +58,8 @@ inherit (pkgs) chipcompiler cli + ecc-tools-python + ecc-dreamplace-python ; }; }; diff --git a/nix/chipcompiler/default.nix b/nix/chipcompiler/default.nix index 6f8ea436..cc3a3725 100644 --- a/nix/chipcompiler/default.nix +++ b/nix/chipcompiler/default.nix @@ -8,7 +8,7 @@ python3Packages.buildPythonPackage { pname = "chipcompiler"; - version = "0.1.0"; + version = "0.1.0.0-alpha.2"; pyproject = true; src = diff --git a/nix/cli/default.nix b/nix/cli/default.nix index 7bc641ed..23f5c5e2 100644 --- a/nix/cli/default.nix +++ b/nix/cli/default.nix @@ -9,7 +9,7 @@ python3Packages.buildPythonPackage { pname = "chipcompiler-cli"; - version = "0.1.0"; + version = "0.1.0.0-alpha.2"; pyproject = true; src = diff --git a/nix/overlay.nix b/nix/overlay.nix index b6469f8c..ed26825c 100644 --- a/nix/overlay.nix +++ b/nix/overlay.nix @@ -1,5 +1,8 @@ final: prev: { - ecc-tools-python = prev.python3Packages.callPackage ./python/ecc-tools { }; + ecc-tools-python = prev.python3Packages.callPackage ./python/ecc-tools { + gflags = prev.gflags; + onnxruntime = prev.onnxruntime; + }; ecc-dreamplace-python = prev.python3Packages.callPackage ./python/ecc-dreamplace { }; chipcompiler = prev.callPackage ./chipcompiler { }; cli = prev.callPackage ./cli { }; diff --git a/nix/python/ecc-dreamplace/default.nix b/nix/python/ecc-dreamplace/default.nix index f492e692..db200323 100644 --- a/nix/python/ecc-dreamplace/default.nix +++ b/nix/python/ecc-dreamplace/default.nix @@ -1,7 +1,17 @@ { lib, + stdenv, buildPythonPackage, - fetchurl, + fetchFromGitHub, + cmake, + ninja, + flex, + bison, + python, + pkg-config, + zlib, + boost, + cairo, cairocffi, distutils, matplotlib, @@ -11,18 +21,90 @@ scipy, setuptools, shapely, + torch, + uv-build, wheel, }: +let + version = "0.1.0.0-alpha.2"; + + rootSrc = fetchFromGitHub { + owner = "openecos-projects"; + repo = "ecc-dreamplace"; + rev = "b8606d35455b3a6aae7cd0a5584f4ea389cc223a"; + hash = "sha256-+eFHxOyt6BwUYZ5MN1DHGu35f7NoL6f4PiAATj9nDrc="; + fetchSubmodules = true; + }; + + nativeInputs = [ + cmake + ninja + flex + bison + python + pkg-config + ]; + + runtimeInputs = [ + zlib + boost + cairo + torch + ]; + + runtime = stdenv.mkDerivation { + pname = "ecc-dreamplace-runtime"; + inherit version; + src = rootSrc; + + nativeBuildInputs = nativeInputs; + buildInputs = runtimeInputs; + + cmakeFlags = [ + (lib.cmakeFeature "CMAKE_POLICY_VERSION_MINIMUM" "3.5") + (lib.cmakeFeature "CMAKE_CXX_ABI" "1") + (lib.cmakeFeature "PYTHON_EXECUTABLE" python.interpreter) + (lib.cmakeFeature "Python_EXECUTABLE" python.interpreter) + (lib.cmakeFeature "TORCH_INSTALL_PREFIX" "${torch}/${python.sitePackages}/torch") + (lib.cmakeFeature "TORCH_ENABLE_CUDA" "0") + (lib.cmakeFeature "TORCH_VERSION" torch.version) + ]; + + postPatch = '' + sed -i 's/^[[:space:]]*CMAKE_POLICY(SET CMP0048 OLD)/CMAKE_POLICY(SET CMP0048 NEW)/' thirdparty/Limbo/limbo/thirdparty/lemon/CMakeLists.txt + sed -i 's/static void thread_hold();/static void thread_hold(int sig);/; s/static void thread_hold ()/static void thread_hold(int sig)/' thirdparty/Limbo/limbo/thirdparty/CThreadPool/thpool.c + sed -i 's/i1\.center() < i2\.center()/(i1.low() + i1.high()) < (i2.low() + i2.high())/' dreamplace/ops/place_io/src/Interval.h + ''; + + installPhase = '' + runHook preInstall + cmake --install . --prefix "$out" + runHook postInstall + ''; + + enableParallelBuild = true; + }; +in buildPythonPackage { pname = "ecc-dreamplace"; - version = "0.1.0a1"; - format = "wheel"; + inherit version; + pyproject = true; - src = fetchurl { - url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl"; - hash = "sha256-ISE5xD+CVJiWjtoQMJlZuZzZOuwHRNGCoXu100tTFF4="; - }; + src = rootSrc; + + build-system = [ uv-build ]; + + buildInputs = runtimeInputs; + + postPatch = '' + substituteInPlace pyproject.toml \ + --replace-fail 'uv_build>=0.10.9,<0.12' 'uv_build>=0.10.0,<0.12' + ''; + + preBuild = '' + cp -r ${runtime}/dreamplace/. dreamplace/ + ''; dependencies = [ cairocffi @@ -34,6 +116,7 @@ buildPythonPackage { scipy setuptools shapely + torch wheel ]; @@ -45,7 +128,6 @@ buildPythonPackage { "pyunpack" "shap" "statsmodels" - "torch" "xgboost" ]; diff --git a/nix/python/ecc-tools/default.nix b/nix/python/ecc-tools/default.nix index 8c6e13be..b666f627 100644 --- a/nix/python/ecc-tools/default.nix +++ b/nix/python/ecc-tools/default.nix @@ -1,22 +1,190 @@ { lib, + stdenv, buildPythonPackage, - fetchurl, + fetchFromGitHub, + callPackages, + cmake, + ninja, + flex, + bison, + python, + patchelf, + pkg-config, + zlib, + tcl, + boost, + eigen, + yaml-cpp, + libunwind, + glog, + gtest, + gflags, + metis, + gmp, + curl, + onnxruntime, + tbb_2022, + uv-build, }: +let + version = "0.1.0.0-alpha.2"; + + rootSrc = fetchFromGitHub { + owner = "openecos-projects"; + repo = "ecc-tools"; + rev = "36160db0b30ccd627f2c2a06d9fa517d4cce4d49"; + hash = "sha256-/09acQVPB9l4EyWtKy3DGkIFsjsJkao2PW3VS2gmLLI="; + }; + + patchedSrc = stdenv.mkDerivation { + pname = "ecc-tools-src"; + inherit version; + src = rootSrc; + + patches = [ ./use-nix-built-rust-libraries.patch ]; + + postPatch = '' + substituteInPlace src/operation/iIR/source/iir-rust/CMakeLists.txt \ + --replace-fail 'ADD_EXTERNAL_PROJ(iir)' "" \ + --replace-fail 'target_link_libraries(iIR-Rust PRIVATE ''${RUST_LIB_PATH} dl)' 'target_link_libraries(iIR-Rust PRIVATE iir dl)' + + substituteInPlace src/operation/iSTA/CMakeLists.txt \ + --replace-fail 'link_directories(''${HOME_THIRDPARTY}/onnxruntime/)' 'link_libraries(${onnxruntime}/lib/libonnxruntime.so)' + ''; + + dontBuild = true; + dontFixup = true; + + installPhase = '' + runHook preInstall + cp -r . "$out" + runHook postInstall + ''; + }; + + rustpkgs = callPackages ./rustpkgs.nix { rootSrc = patchedSrc; }; + + nativeInputs = [ + cmake + ninja + flex + bison + python + patchelf + pkg-config + ]; + + runtimeInputs = [ + rustpkgs.iir-rust + rustpkgs.sdf_parse + rustpkgs.spef-parser + rustpkgs.vcd_parser + rustpkgs.verilog-parser + rustpkgs.liberty-parser + stdenv.cc.cc.lib + zlib + tcl + boost + eigen + yaml-cpp + libunwind + glog + gtest + gflags + metis + gmp + curl + onnxruntime + tbb_2022 + ]; + + runtime = stdenv.mkDerivation { + pname = "ecc-tools-runtime"; + inherit version; + src = patchedSrc; + + nativeBuildInputs = nativeInputs; + buildInputs = runtimeInputs; + cmakeGenerator = "Ninja"; + + cmakeFlags = [ + (lib.cmakeBool "BUILD_ECOS" true) + (lib.cmakeBool "BUILD_PYTHON" true) + (lib.cmakeBool "BUILD_STATIC_LIB" false) + (lib.cmakeBool "COMPATIBILITY_MODE" true) + (lib.cmakeFeature "Python3_EXECUTABLE" python.interpreter) + (lib.cmakeFeature "Python3_ROOT_DIR" "${python}") + ]; + + buildPhase = '' + runHook preBuild + cmake --build . --target ecc_py + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + install -d "$out/ecc_tools_bin" + for dir in . ../bin; do + if [ -d "$dir" ]; then + find "$dir" -type f -name '*.so*' -exec cp -f {} "$out/ecc_tools_bin/" \; + fi + done + + ecc_py_so="$(find "$out/ecc_tools_bin" -type f -name 'ecc_py*.so' -print -quit)" + if [ -z "$ecc_py_so" ]; then + echo "ERROR: ecc_py extension was not built" >&2 + exit 1 + fi + + for so in "$out"/ecc_tools_bin/*.so*; do + [ -e "$so" ] || continue + patchelf --set-rpath "\$ORIGIN:${lib.makeLibraryPath runtimeInputs}" "$so" || true + done + + runHook postInstall + ''; + + enableParallelBuild = true; + }; +in buildPythonPackage { pname = "ecc-tools"; - version = "0.1.0a2"; - format = "wheel"; + inherit version; + pyproject = true; - src = fetchurl { - url = "https://github.com/openecos-projects/ecc-tools/releases/download/v0.1.0-alpha.2/ecc_tools-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl"; - hash = "sha256-NgqtSHQiiN69mqZm5afk/13jCugxyUVCa0WAUKQHyL4="; - }; + src = patchedSrc; + + buildInputs = runtimeInputs; + + build-system = [ uv-build ]; + nativeBuildInputs = [ patchelf ]; + + preBuild = '' + install -d ecc_tools_bin + cp -f ${runtime}/ecc_tools_bin/*.so* ecc_tools_bin/ + ''; + + postInstall = '' + site_packages="$out/${python.sitePackages}" + install -d "$site_packages/ecc_tools_bin" + cp -f ${patchedSrc}/ecc_tools_bin/__init__.py "$site_packages/ecc_tools_bin/" + cp -f ${runtime}/ecc_tools_bin/*.so* "$site_packages/ecc_tools_bin/" + ''; + + postFixup = '' + for so in "$out/${python.sitePackages}"/ecc_tools_bin/*.so*; do + [ -e "$so" ] || continue + patchelf --set-rpath "\$ORIGIN:${lib.makeLibraryPath runtimeInputs}" "$so" + done + ''; doCheck = false; - pythonImportsCheck = [ "ecc_tools_bin" ]; + pythonImportsCheck = [ "ecc_tools_bin.ecc_py" ]; meta = { description = "ECC tools Python wheel"; diff --git a/nix/python/ecc-tools/rustpkgs.nix b/nix/python/ecc-tools/rustpkgs.nix new file mode 100644 index 00000000..dbc678cf --- /dev/null +++ b/nix/python/ecc-tools/rustpkgs.nix @@ -0,0 +1,63 @@ +{ rustPlatform, rootSrc }: +let + mkRustPackage = _: args: rustPlatform.buildRustPackage args; +in +builtins.mapAttrs mkRustPackage { + iir-rust = rec { + pname = "iir-rust"; + version = "0.1.3"; + src = rootSrc; + sourceRoot = "${src.name}/src/operation/iIR/source/iir-rust/iir"; + cargoHash = "sha256-CV1e/f3oCKW5mTbQnFBnp7E2d9nFyDwY3qclP2HwdPM="; + doCheck = false; + nativeBuildInputs = [ rustPlatform.bindgenHook ]; + }; + + liberty-parser = rec { + pname = "liberty-parser"; + version = "0.1.0"; + src = rootSrc; + sourceRoot = "${src.name}/src/database/manager/parser/liberty/lib-rust/liberty-parser"; + cargoHash = "sha256-nRIOuSz5ImENvKeMAnthmBo+2/Jy5xbM66xkcfVCTMI="; + doCheck = false; + nativeBuildInputs = [ rustPlatform.bindgenHook ]; + }; + + sdf_parse = rec { + pname = "sdf_parse"; + version = "0.1.0"; + src = rootSrc; + sourceRoot = "${src.name}/src/database/manager/parser/sdf/sdf_parse"; + cargoHash = "sha256-PORA/9DDIax4lOn/pzmi7Y8mCCBUphMTzbBsb64sDl0="; + nativeBuildInputs = [ rustPlatform.bindgenHook ]; + }; + + spef-parser = rec { + pname = "spef-parser"; + version = "0.2.4"; + src = rootSrc; + sourceRoot = "${src.name}/src/database/manager/parser/spef/spef-parser"; + cargoHash = "sha256-Qr/oXTqn2gaxyAyLsRjaXNniNzIYVzPGefXTdkULmYk="; + nativeBuildInputs = [ rustPlatform.bindgenHook ]; + }; + + vcd_parser = rec { + pname = "vcd_parser"; + version = "0.1.0"; + src = rootSrc; + sourceRoot = "${src.name}/src/database/manager/parser/vcd/vcd_parser"; + cargoHash = "sha256-xcfVzDrnW4w3fU7qo6xzSQeIH8sEbEyzPF92F5tDcAk="; + doCheck = false; + nativeBuildInputs = [ rustPlatform.bindgenHook ]; + }; + + verilog-parser = rec { + pname = "verilog-parser"; + version = "0.1.0"; + src = rootSrc; + sourceRoot = "${src.name}/src/database/manager/parser/verilog/verilog-rust/verilog-parser"; + cargoHash = "sha256-ooxY8Q8bfD+klBGfpTDD3YyWptEOGGHDoyamhjlSNTM="; + doCheck = false; + nativeBuildInputs = [ rustPlatform.bindgenHook ]; + }; +} diff --git a/nix/python/ecc-tools/use-nix-built-rust-libraries.patch b/nix/python/ecc-tools/use-nix-built-rust-libraries.patch new file mode 100644 index 00000000..6ddb4292 --- /dev/null +++ b/nix/python/ecc-tools/use-nix-built-rust-libraries.patch @@ -0,0 +1,60 @@ +diff --git a/src/database/manager/parser/liberty/CMakeLists.txt b/src/database/manager/parser/liberty/CMakeLists.txt +index f6511c3..43f18b5 100644 +--- a/src/database/manager/parser/liberty/CMakeLists.txt ++++ b/src/database/manager/parser/liberty/CMakeLists.txt +@@ -19,9 +19,7 @@ endif() + + message(STATUS "liberty parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(liberty) +- +-target_link_libraries(liberty str sta-solver log ${RUST_LIB_PATH} dl) ++target_link_libraries(liberty str sta-solver log liberty_parser dl) + + target_include_directories(liberty PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR} +diff --git a/src/database/manager/parser/spef/CMakeLists.txt b/src/database/manager/parser/spef/CMakeLists.txt +index 322eb15..3ab9801 100644 +--- a/src/database/manager/parser/spef/CMakeLists.txt ++++ b/src/database/manager/parser/spef/CMakeLists.txt +@@ -19,9 +19,7 @@ endif() + + message(STATUS "spef parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(spef) +- +-target_link_libraries(spef log ${RUST_LIB_PATH} dl) ++target_link_libraries(spef log spef_parser dl) + target_include_directories(spef PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}) + +diff --git a/src/database/manager/parser/vcd/CMakeLists.txt b/src/database/manager/parser/vcd/CMakeLists.txt +index 0aa2635..ab944c2 100644 +--- a/src/database/manager/parser/vcd/CMakeLists.txt ++++ b/src/database/manager/parser/vcd/CMakeLists.txt +@@ -15,9 +15,7 @@ SETUP_RUST_PROJECT() + + message(STATUS "vcd parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(vcd) +- +-target_link_libraries(vcd ${RUST_LIB_PATH} dl) ++target_link_libraries(vcd vcd_parser dl) + target_include_directories(vcd PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}) + +diff --git a/src/database/manager/parser/verilog/CMakeLists.txt b/src/database/manager/parser/verilog/CMakeLists.txt +index 1ceaee3..a318da4 100644 +--- a/src/database/manager/parser/verilog/CMakeLists.txt ++++ b/src/database/manager/parser/verilog/CMakeLists.txt +@@ -40,9 +40,7 @@ endif() + + message(STATUS "verilog parser rust lib path ${RUST_LIB_PATH}") + +-ADD_EXTERNAL_PROJ(verilog) +- +-target_link_libraries(verilog str log ${ZLIB_LIBRARIES} ${RUST_LIB_PATH} dl) ++target_link_libraries(verilog str log ${ZLIB_LIBRARIES} verilog_parser dl) + + target_include_directories(verilog + PUBLIC From 063461d85684c9dd005c90dc0e5070492d25ceeb Mon Sep 17 00:00:00 2001 From: Emin Date: Thu, 7 May 2026 17:01:41 +0800 Subject: [PATCH 090/104] fix(params): set Target density to 0.2 Signed-off-by: Emin --- chipcompiler/data/parameter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/data/parameter.py b/chipcompiler/data/parameter.py index 0e4f5dcb..31a60123 100644 --- a/chipcompiler/data/parameter.py +++ b/chipcompiler/data/parameter.py @@ -22,7 +22,7 @@ "Aspect ratio" : 1 }, "Max fanout" : 20, - "Target density" : 0.8, + "Target density" : 0.2, "Target overflow" : 0.1, "Global right padding": 0, "Cell padding x": 600, From cc1fac9a2185075d718d69b20d9fd3ca49a87634 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:00:58 +0800 Subject: [PATCH 091/104] nix(ecc-tools): fix iNO memory leak Signed-off-by: Emin --- nix/python/ecc-tools/default.nix | 5 ++++- .../ecc-tools/fix-ino-output-summary-init.patch | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 nix/python/ecc-tools/fix-ino-output-summary-init.patch diff --git a/nix/python/ecc-tools/default.nix b/nix/python/ecc-tools/default.nix index b666f627..5cb4b8ee 100644 --- a/nix/python/ecc-tools/default.nix +++ b/nix/python/ecc-tools/default.nix @@ -43,7 +43,10 @@ let inherit version; src = rootSrc; - patches = [ ./use-nix-built-rust-libraries.patch ]; + patches = [ + ./use-nix-built-rust-libraries.patch + ./fix-ino-output-summary-init.patch + ]; postPatch = '' substituteInPlace src/operation/iIR/source/iir-rust/CMakeLists.txt \ diff --git a/nix/python/ecc-tools/fix-ino-output-summary-init.patch b/nix/python/ecc-tools/fix-ino-output-summary-init.patch new file mode 100644 index 00000000..56748563 --- /dev/null +++ b/nix/python/ecc-tools/fix-ino-output-summary-init.patch @@ -0,0 +1,14 @@ +diff --git a/src/operation/iNO/api/NoApi.cpp b/src/operation/iNO/api/NoApi.cpp +index 45a676320..5f69b1be2 100644 +--- a/src/operation/iNO/api/NoApi.cpp ++++ b/src/operation/iNO/api/NoApi.cpp +@@ -225,8 +225,7 @@ ieda_feature::NetOptSummary NoApi::outputSummary() { + clock_timing.hold_wns = eval_data.hold_wns; + clock_timing.suggest_freq = eval_data.freq; + +- ieda_feature::NOClockTimingCmp clock_cmp; +- memset(&clock_cmp, 0, sizeof(ieda_feature::NOClockTimingCmp)); ++ ieda_feature::NOClockTimingCmp clock_cmp{}; + clock_cmp.origin = clock_timing; + summary_map[clock_name] = clock_cmp; + } From 54af9c57eb24886dfe9c00f9670280482ad35c77 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:01:25 +0800 Subject: [PATCH 092/104] nix(ecc-dreamplace): fix NCTUgr permission error Signed-off-by: Emin --- nix/python/ecc-dreamplace/default.nix | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/nix/python/ecc-dreamplace/default.nix b/nix/python/ecc-dreamplace/default.nix index db200323..cea3ced3 100644 --- a/nix/python/ecc-dreamplace/default.nix +++ b/nix/python/ecc-dreamplace/default.nix @@ -75,6 +75,7 @@ let sed -i 's/^[[:space:]]*CMAKE_POLICY(SET CMP0048 OLD)/CMAKE_POLICY(SET CMP0048 NEW)/' thirdparty/Limbo/limbo/thirdparty/lemon/CMakeLists.txt sed -i 's/static void thread_hold();/static void thread_hold(int sig);/; s/static void thread_hold ()/static void thread_hold(int sig)/' thirdparty/Limbo/limbo/thirdparty/CThreadPool/thpool.c sed -i 's/i1\.center() < i2\.center()/(i1.low() + i1.high()) < (i2.low() + i2.high())/' dreamplace/ops/place_io/src/Interval.h + sed -i '/import stat/d; /nctugr_bin = "%s\/NCTUgr"/,+2d' dreamplace/ops/nctugr_binary/nctugr_binary.py ''; installPhase = '' @@ -104,6 +105,16 @@ buildPythonPackage { preBuild = '' cp -r ${runtime}/dreamplace/. dreamplace/ + rm -rf thirdparty + cp -r ${runtime}/thirdparty thirdparty + chmod +x thirdparty/NCTUgr.ICCAD2012/NCTUgr + ''; + + postInstall = '' + site_packages="$out/${python.sitePackages}" + rm -rf "$site_packages/thirdparty" + cp -r ${runtime}/thirdparty "$site_packages/thirdparty" + chmod +x "$site_packages/thirdparty/NCTUgr.ICCAD2012/NCTUgr" ''; dependencies = [ From 5a0acae1a3125cec424d12b67303ae14a32f9669 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:01:41 +0800 Subject: [PATCH 093/104] chore: bump ecc-dreamplace to 0.1.0a2 Signed-off-by: Emin --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 50dd1916..d563add0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ "Programming Language :: Python :: 3.14", ] dependencies = [ - "ecc-dreamplace==0.1.0a1", + "ecc-dreamplace==0.1.0a2", "ecc-tools==0.1.0a2", "fastapi>=0.109", "klayout>=0.30.2", From b997d317c6aa0495e90a7ff8a5bca70f897aa7b4 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:04:51 +0800 Subject: [PATCH 094/104] chore: bump ecc and ecc-dreamplace Signed-off-by: Emin --- chipcompiler/thirdparty/ecc-dreamplace | 2 +- chipcompiler/thirdparty/ecc-tools | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chipcompiler/thirdparty/ecc-dreamplace b/chipcompiler/thirdparty/ecc-dreamplace index b8606d35..a79d177b 160000 --- a/chipcompiler/thirdparty/ecc-dreamplace +++ b/chipcompiler/thirdparty/ecc-dreamplace @@ -1 +1 @@ -Subproject commit b8606d35455b3a6aae7cd0a5584f4ea389cc223a +Subproject commit a79d177b6abff670de9a778416c2a9fc27db76c9 diff --git a/chipcompiler/thirdparty/ecc-tools b/chipcompiler/thirdparty/ecc-tools index 36160db0..e7dc4d3c 160000 --- a/chipcompiler/thirdparty/ecc-tools +++ b/chipcompiler/thirdparty/ecc-tools @@ -1 +1 @@ -Subproject commit 36160db0b30ccd627f2c2a06d9fa517d4cce4d49 +Subproject commit e7dc4d3c3f59474cbe00e435010e2ebb0525976e From e1eaf8ae84bd44e5e562f54bf9e9e25733e08048 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:40:46 +0800 Subject: [PATCH 095/104] feat(cli): color error codes/lines red and add failure context to ecc run Render all error codes in red via style() instead of status_style() lookup. Color full error log-line content red in render_log_pretty(). Add extract_error_context() with anchor priority (last error > last traceback > last "failed" > last non-empty) and 50-line hard limit. Add format_error_context() for the context block format with compact kind labels and grep-friendly command footer. Integrate into run_flow_with_progress for failed interactive steps only, gated behind existing TEXT-mode check. Co-Authored-By: Claude Opus 4.7 (1M context) --- chipcompiler/cli/log_view.py | 64 +++++++++++- chipcompiler/cli/pretty.py | 2 +- chipcompiler/cli/progress.py | 90 ++++++++++++++++- test/cli/test_log_view.py | 188 +++++++++++++++++++++++++++++++++++ test/cli/test_pretty.py | 59 +++++++++++ test/cli/test_progress.py | 182 +++++++++++++++++++++++++++++++++ 6 files changed, 574 insertions(+), 11 deletions(-) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index 062ad991..c2c7f43c 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -2,6 +2,9 @@ import re import sys +from chipcompiler.cli.pretty import BOLD, DIM, RED, YELLOW, BLUE, CYAN, RESET, style +from chipcompiler.cli.render import _plain_value + class LineKind(enum.Enum): ERROR = "error" @@ -104,8 +107,6 @@ def build_log_records( # --- Pretty rendering --- -from chipcompiler.cli.pretty import BOLD, DIM, RED, YELLOW, BLUE, CYAN, RESET, style - _KIND_LABEL = { LineKind.ERROR: "error", LineKind.WARNING: "warn ", @@ -144,7 +145,10 @@ def render_log_pretty( label = _KIND_LABEL[ll.kind] if color and ll.kind in _KIND_COLOR: code = _KIND_COLOR[ll.kind] - target.write(f" {code}{label}{RESET} {ll.text}\n") + if ll.kind == LineKind.ERROR: + target.write(f" {code}{label} {ll.text}{RESET}\n") + else: + target.write(f" {code}{label}{RESET} {ll.text}\n") else: target.write(f" {label} {ll.text}\n") @@ -153,8 +157,6 @@ def render_log_pretty( def _render_plain_record(rec, target): - from chipcompiler.cli.render import _plain_value - parts = [] for key in ("step", "source", "line_no", "kind", "line", "inspect_cmd"): parts.append(f"{key}={_plain_value(rec.get(key, ''))}") @@ -203,3 +205,55 @@ def render_log_listing_pretty( target.write(f"{step_label} {source}\n") inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" target.write(f"{inspect_label} {inspect}\n") + + +# --- Context extraction --- + + +def extract_error_context(lines: list[str], max_lines: int = 50) -> list: + """Extract at most max_lines log lines around the failure anchor. + + Anchor priority: last error > last traceback > last \"failed\" > last non-empty. + """ + if not lines: + return [] + + annotated = annotate_log_lines(lines) + total = len(annotated) + + anchor_idx = _find_context_anchor(annotated) + + if total <= max_lines: + return annotated + + half = max_lines // 2 + start = max(0, anchor_idx - half) + end = min(total, start + max_lines) + if end - start < max_lines: + start = max(0, end - max_lines) + + return annotated[start:end] + + +def _find_context_anchor(annotated): + # Priority 1: last error line + for i in range(len(annotated) - 1, -1, -1): + if annotated[i].kind == LineKind.ERROR: + return i + + # Priority 2: last traceback line + for i in range(len(annotated) - 1, -1, -1): + if annotated[i].kind == LineKind.TRACEBACK: + return i + + # Priority 3: last line containing "failed" + for i in range(len(annotated) - 1, -1, -1): + if "failed" in annotated[i].text.lower(): + return i + + # Priority 4: last non-empty line + for i in range(len(annotated) - 1, -1, -1): + if annotated[i].text.strip(): + return i + + return len(annotated) - 1 diff --git a/chipcompiler/cli/pretty.py b/chipcompiler/cli/pretty.py index aa65240c..33a9b9c4 100644 --- a/chipcompiler/cli/pretty.py +++ b/chipcompiler/cli/pretty.py @@ -399,7 +399,7 @@ def render_error(records, file=None, color=True): for record in records: error = record.get("error", record.get("kind", "error")) reason = record.get("reason", "") - target.write(f" {status_style(error, color)}") + target.write(f" {style(error, RED, color)}") if reason: target.write(f" {reason}") target.write("\n") diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index da265451..6f4b0026 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -4,8 +4,11 @@ import threading import time -from chipcompiler.cli.pretty import BOLD, DIM, CYAN, GREEN, RED, RESET, style as _style +from chipcompiler.cli.log_view import LineKind, extract_error_context +from chipcompiler.cli.output import disclosure_cmd, normalize_step_name, normalize_state +from chipcompiler.cli.pretty import BOLD, DIM, CYAN, GREEN, RED, RESET, YELLOW, BLUE, style as _style from chipcompiler.cli.types import OutputMode +from chipcompiler.data import StateEnum, log_flow def supports_color(stream, mode, env=None): @@ -45,6 +48,62 @@ def truncate_to_width(text, width): return text[: width - 3] + "..." +# --- Failure context block formatting --- + +_KIND_LABEL_COMPACT = { + LineKind.ERROR: "ERROR", + LineKind.WARNING: "WARN ", + LineKind.TRACEBACK: "TRACE", + LineKind.INFO: "INFO ", + LineKind.SECTION: "-----", + LineKind.PLAIN: " ", +} + +_KIND_COLOR_CONTEXT = { + LineKind.ERROR: RED, + LineKind.WARNING: YELLOW, + LineKind.TRACEBACK: YELLOW, + LineKind.INFO: BLUE, + LineKind.SECTION: CYAN, +} + + +def format_error_context(log_path, context_lines, log_cmd, color=True): + """Format a failure context block for interactive progress output. + + Args: + log_path: Relative path to the failed step's log file. + context_lines: List of LogLine objects from extract_error_context(). + log_cmd: Full disclosure command (e.g. 'ecc log synth --project p'). + color: Whether to emit ANSI color codes. + """ + lines = [] + lines.append(f"error: {log_path}") + + if context_lines: + max_no = max(ll.line_no for ll in context_lines) + width = max(len(str(max_no)), 4) + else: + width = 4 + + for ll in context_lines: + no = str(ll.line_no).rjust(width) + label = _KIND_LABEL_COMPACT[ll.kind] + + if color and ll.kind in _KIND_COLOR_CONTEXT: + code = _KIND_COLOR_CONTEXT[ll.kind] + if ll.kind == LineKind.ERROR: + lines.append(f" {no} {code}{label} {ll.text}{RESET}") + else: + lines.append(f" {no} {code}{label}{RESET} {ll.text}") + else: + lines.append(f" {no} {label} {ll.text}") + + lines.append(f"For more log info: {log_cmd}") + lines.append(f'command="{log_cmd}"') + return "\n".join(lines) + "\n" + + def latest_log_line(path): if not path or not os.path.isfile(path): return None @@ -118,6 +177,11 @@ def finish_step(self, step, tool, status, runtime, log_path, inspect_cmd, succes self._stream.write(f"{inspect_label} {inspect_cmd}\n") self._stream.flush() + def render_failure_context(self, block): + """Write a pre-formatted failure context block to the progress stream.""" + self._stream.write(block) + self._stream.flush() + def _poll_log(renderer, log_path, stop_event, interval=0.5): while not stop_event.is_set(): @@ -127,10 +191,6 @@ def _poll_log(renderer, log_path, stop_event, interval=0.5): def run_flow_with_progress(engine_flow, ctx, project, stderr): - from chipcompiler.data import StateEnum, log_flow - - from chipcompiler.cli.output import disclosure_cmd, normalize_step_name, normalize_state - color = supports_color(stderr, ctx.output_mode) renderer = RunProgressRenderer(stderr, color=color) engine_flow.workspace.home.reset() @@ -193,6 +253,26 @@ def run_flow_with_progress(engine_flow, ctx, project, stderr): renderer.finish_step(step_token, tool, status, runtime, rel_log, inspect, is_success) if not is_success: + _maybe_render_failure_context(renderer, log_path, rel_log, step_token, + project, ctx.run_id, color) return False return True + + +def _maybe_render_failure_context(renderer, log_path, rel_log, step_token, + project, run_id, color): + if not log_path or not os.path.isfile(log_path): + return + try: + with open(log_path, "r", errors="replace") as f: + log_lines = f.readlines() + except OSError: + return + if not log_lines: + return + + ctx_lines = extract_error_context(log_lines) + full_cmd = disclosure_cmd(f"ecc log {step_token}", project, run_id) + block = format_error_context(rel_log, ctx_lines, full_cmd, color=color) + renderer.render_failure_context(block) diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py index 4cbc2d5d..3964c715 100644 --- a/test/cli/test_log_view.py +++ b/test/cli/test_log_view.py @@ -5,6 +5,7 @@ annotate_log_lines, build_log_records, classify_line, + extract_error_context, render_log_listing_pretty, render_log_plain, render_log_pretty, @@ -329,6 +330,193 @@ def test_traceback_colored_yellow(self): assert "\x1b[33m" in out +# --------------------------------------------------------------------------- +# Full error line coloring (AC-2) +# --------------------------------------------------------------------------- + + +class TestErrorLineFullColoring: + def test_error_label_and_message_both_red(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: something failed"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + + red_idx = out.find("\x1b[31m") + assert red_idx >= 0 + reset_idx = out.find("\x1b[0m", red_idx) + assert reset_idx > red_idx + between = out[red_idx:reset_idx] + assert "error" in between + assert "something failed" in between + + def test_error_message_content_not_default_after_label(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: critical failure"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + idx = out.find("error") + after_label = out[idx:] + assert "critical failure" in after_label + + def test_warning_line_keeps_label_only_color(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Warning: check this"], "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[33m" in out + assert "Warning: check this" in out + + def test_info_plain_section_unchanged(self): + from io import StringIO + buf = StringIO() + lines = ["INFO: running", "some plain text", "---"] + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=True) + out = buf.getvalue() + assert "\x1b[34m" in out + assert "some plain text" in out + assert "---" in out + + def test_error_line_no_ansi_when_color_disabled(self): + from io import StringIO + buf = StringIO() + render_log_pretty("cts", "log/cts.log", ["Error: bad"], "ecc log cts", file=buf, color=False) + out = buf.getvalue() + assert "\x1b[" not in out + + def test_non_error_lines_not_colored_red(self): + from io import StringIO + lines = ["Warning: meh", "INFO: ok", "plain", "---"] + buf = StringIO() + render_log_pretty("cts", "log/cts.log", lines, "ecc log cts", file=buf, color=True) + out = buf.getvalue() + red_count = out.count("\x1b[31m") + assert red_count == 0 + + +# --------------------------------------------------------------------------- +# Context extraction (AC-3, AC-4) +# --------------------------------------------------------------------------- + + +class TestExtractErrorContextAnchor: + def test_last_error_wins(self): + lines = ["INFO: start", "Error: first", "plain", "Error: last", "INFO: end"] + result = extract_error_context(lines, max_lines=50) + kinds = [ll.kind for ll in result] + assert LineKind.ERROR in kinds + anchor_texts = [ll.text for ll in result if ll.kind == LineKind.ERROR] + assert "Error: last" in anchor_texts + + def test_traceback_when_no_error(self): + lines = [ + "INFO: start", + "Traceback (most recent call last):", + ' File "a.py", line 1', + "RuntimeError: boom", + ] + result = extract_error_context(lines, max_lines=50) + kinds = [ll.kind for ll in result] + assert LineKind.TRACEBACK in kinds + + def test_failed_keyword_when_no_error_or_traceback(self): + lines = ["INFO: start", "step failed: timeout", "plain after"] + result = extract_error_context(lines, max_lines=50) + texts = [ll.text for ll in result] + assert any("failed" in t.lower() for t in texts) + + def test_last_nonempty_when_no_failure(self): + lines = ["INFO: start", "some output", "final output"] + result = extract_error_context(lines, max_lines=50) + assert result[-1].text == "final output" + + def test_empty_input(self): + assert extract_error_context([], max_lines=50) == [] + + +class TestExtractErrorContextWindow: + def test_max_50_lines(self): + lines = [f"line {i}" for i in range(100)] + lines[80] = "Error: failure at 80" + result = extract_error_context(lines, max_lines=50) + assert len(result) <= 50 + + def test_preserves_line_numbers(self): + lines = [f"line {i}" for i in range(100)] + lines[30] = "Error: mid" + result = extract_error_context(lines, max_lines=50) + line_nos = [ll.line_no for ll in result] + assert line_nos == sorted(line_nos) + for ll in result: + assert ll.line_no >= 1 + assert ll.text == lines[ll.line_no - 1] + + def test_preserves_order(self): + lines = [f"line {i}" for i in range(10)] + lines[5] = "Error: mid" + result = extract_error_context(lines, max_lines=50) + line_nos = [ll.line_no for ll in result] + assert line_nos == sorted(line_nos) + + def test_fewer_than_max_returns_all(self): + lines = ["one", "Error: two", "three"] + result = extract_error_context(lines, max_lines=50) + assert len(result) == 3 + + def test_anchor_last_error_not_first(self): + lines = ["Error: first", "plain", "Error: last", "plain"] + result = extract_error_context(lines, max_lines=50) + error_lines = [ll for ll in result if ll.kind == LineKind.ERROR] + assert len(error_lines) >= 1 + + +class TestExtractErrorContextTraceback: + def test_traceback_includes_stack_frames(self): + lines = [ + "INFO: before", + "Traceback (most recent call last):", + ' File "a.py", line 10, in f', + ' File "b.py", line 20, in g', + "ValueError: bad value", + "INFO: after", + ] + result = extract_error_context(lines, max_lines=50) + kinds = [ll.kind for ll in result] + assert LineKind.TRACEBACK in kinds + traceback_texts = [ll.text for ll in result if ll.kind == LineKind.TRACEBACK] + assert any("File" in t for t in traceback_texts) + + def test_final_exception_visible_in_window(self): + lines = ["line " + str(i) for i in range(60)] + lines[52] = "Traceback (most recent call last):" + lines[53] = ' File "a.py", line 1, in run' + lines[54] = "ValueError: final exception" + lines[55] = "line 55" + result = extract_error_context(lines, max_lines=50) + texts = [ll.text for ll in result] + assert "ValueError: final exception" in texts + + def test_traceback_context_not_exceed_max(self): + lines = ["line " + str(i) for i in range(100)] + lines[60] = "Traceback (most recent call last):" + for i in range(61, 75): + lines[i] = f' File "mod{i}.py", line {i}' + lines[75] = "RuntimeError: deep traceback" + result = extract_error_context(lines, max_lines=50) + assert len(result) <= 50 + + def test_traceback_lines_in_order(self): + lines = [ + "Traceback (most recent call last):", + ' File "a.py", line 1', + ' File "b.py", line 2', + "ValueError: boom", + ] + result = extract_error_context(lines, max_lines=50) + line_nos = [ll.line_no for ll in result] + assert line_nos == sorted(line_nos) + + class TestPlainRenderer: def test_emits_one_record_per_line(self): from io import StringIO diff --git a/test/cli/test_pretty.py b/test/cli/test_pretty.py index f2625820..3aac8b06 100644 --- a/test/cli/test_pretty.py +++ b/test/cli/test_pretty.py @@ -452,6 +452,65 @@ def test_render_error_three_records_all_shown(self): assert reason in out +# --------------------------------------------------------------------------- +# Error code coloring (AC-1) +# --------------------------------------------------------------------------- + + +class TestErrorCodeColoring: + def test_arbitrary_error_code_colored_red(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "missing_config", "reason": "no config found"}], file=buf, color=True) + out = buf.getvalue() + assert RED in out + assert "missing_config" in out + + def test_multiple_arbitrary_codes_colored_red(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + records = [ + {"error": "workspace_failed", "reason": "bad state"}, + {"error": "config_error", "reason": "invalid toml"}, + {"error": "invalid_parameter", "reason": "bad value"}, + ] + render_error(records, file=buf, color=True) + out = buf.getvalue() + for code in ("workspace_failed", "config_error", "invalid_parameter"): + assert code in out + assert RED in out + + def test_error_preserves_secondary_fields(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "missing_config", "path": "/tmp/x", "reason": "gone"}], file=buf, color=True) + out = buf.getvalue() + assert "path:" in out + assert "/tmp/x" in out + assert "gone" in out + + def test_error_no_ansi_when_color_disabled(self): + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "missing_config", "reason": "bad"}], file=buf, color=False) + out = buf.getvalue() + assert "\x1b[" not in out + assert "missing_config" in out + + def test_unknown_error_code_not_white_by_default(self): + """Unknown error codes should still be red, not white or default.""" + from chipcompiler.cli.pretty import render_error + + buf = io.StringIO() + render_error([{"error": "unknown_code_xyz"}], file=buf, color=True) + out = buf.getvalue() + assert RED in out + + # --------------------------------------------------------------------------- # Shared color policy tests (Codex Round 1 finding) # --------------------------------------------------------------------------- diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 354d8fae..e29ea29d 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -13,6 +13,7 @@ def _strip_ansi(text): from chipcompiler.cli.pretty import BOLD, CYAN, DIM, GREEN, RED, RESET from chipcompiler.cli.progress import ( RunProgressRenderer, + format_error_context, latest_log_line, run_flow_with_progress, sanitize_log_line, @@ -21,6 +22,7 @@ def _strip_ansi(text): supports_color, truncate_to_width, ) +from chipcompiler.cli.log_view import LineKind, LogLine from chipcompiler.cli.types import CommandContext, OutputMode from chipcompiler.data import StateEnum @@ -107,6 +109,10 @@ def test_disabled_jsonl(self): ctx = _make_ctx(OutputMode.JSONL) assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + def test_disabled_plain(self): + ctx = _make_ctx(OutputMode.PLAIN) + assert should_enable_run_progress(ctx, FakeTTYStderr(True)) is False + def test_disabled_no_tty(self): ctx = _make_ctx(OutputMode.TEXT) assert should_enable_run_progress(ctx, FakeTTYStderr(False)) is False @@ -624,3 +630,179 @@ def test_color_disabled_for_non_tty(self): output = "".join(buf.written) for code in (BOLD, CYAN, GREEN, RED, DIM): assert code not in output + + +# --------------------------------------------------------------------------- +# Failure context block formatting (AC-5) +# --------------------------------------------------------------------------- + + +class TestFormatErrorContext: + def test_first_line_is_error_log_path(self): + ctx_lines = [LogLine(10, LineKind.ERROR, "Error: something")] + out = format_error_context("log/synthesis.log", ctx_lines, "ecc log synthesis", color=False) + assert out.startswith("error: log/synthesis.log") + + def test_includes_numbered_context_lines(self): + ctx_lines = [ + LogLine(8, LineKind.INFO, "INFO: before"), + LogLine(9, LineKind.WARNING, "Warning: careful"), + LogLine(10, LineKind.ERROR, "Error: failed"), + ] + out = format_error_context("log/synthesis.log", ctx_lines, "ecc log synthesis", color=False) + for ll in ctx_lines: + assert str(ll.line_no) in out + assert ll.text in out + + def test_compact_kind_labels(self): + ctx_lines = [ + LogLine(5, LineKind.ERROR, "bad"), + LogLine(6, LineKind.WARNING, "meh"), + LogLine(7, LineKind.TRACEBACK, " File ..."), + LogLine(8, LineKind.INFO, "ok"), + ] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=False) + assert "ERROR" in out + assert "WARN" in out + assert "TRACE" in out + assert "INFO" in out + + def test_footer_includes_for_more_log_info(self): + ctx_lines = [LogLine(1, LineKind.ERROR, "failed")] + out = format_error_context("log/p.log", ctx_lines, "ecc log synthesis --project myproj", color=False) + assert "For more log info:" in out + assert "ecc log synthesis --project myproj" in out + + def test_footer_includes_command_grep_field(self): + ctx_lines = [LogLine(1, LineKind.ERROR, "failed")] + log_cmd = "ecc log synthesis --project myproj --run-id abc123" + out = format_error_context("log/p.log", ctx_lines, log_cmd, color=False) + assert 'command="ecc log synthesis --project myproj --run-id abc123"' in out + + def test_project_and_run_id_preserved_in_footer(self): + ctx_lines = [LogLine(1, LineKind.ERROR, "failed")] + log_cmd = "ecc log synthesis --project /path/to/proj --run-id run42" + out = format_error_context("log/p.log", ctx_lines, log_cmd, color=False) + assert "--project /path/to/proj" in out + assert "--run-id run42" in out + + def test_color_gating_no_ansi_when_disabled(self): + ctx_lines = [LogLine(10, LineKind.ERROR, "Error: bad")] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=False) + assert "\x1b[" not in out + + def test_color_gating_ansi_when_enabled(self): + ctx_lines = [LogLine(10, LineKind.ERROR, "Error: bad")] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=True) + assert "\x1b[" in out + + def test_line_number_padding_consistent(self): + ctx_lines = [ + LogLine(1, LineKind.PLAIN, "first"), + LogLine(10, LineKind.ERROR, "error"), + LogLine(100, LineKind.PLAIN, "hundred"), + ] + out = format_error_context("log/p.log", ctx_lines, "ecc log step", color=False) + lines = out.strip().split("\n") + context_lines = [l for l in lines if l.strip() and not l.startswith("error:") and not l.startswith("For") and not l.startswith("command=")] + for line in context_lines: + assert line.startswith(" ") + + def test_empty_context(self): + out = format_error_context("log/p.log", [], "ecc log step", color=False) + assert "error: log/p.log" in out + assert "For more log info:" in out + + +# --------------------------------------------------------------------------- +# Failure context progress integration (AC-6) +# --------------------------------------------------------------------------- + + +class TestFailureContextIntegration: + def test_failed_step_prints_context_block(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line 1\nline 2\nError: something failed\nline 4\n") + + def fail_step(self, s): + return StateEnum.Imcomplete + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + fail_step, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "error:" in plain + assert "For more log info:" in plain + assert 'command="' in plain + + def test_successful_step_no_context_block(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line 1\nline 2\nall good\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Success, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is True + plain = _strip_ansi("".join(buf.written)) + assert "error:" not in plain + assert "For more log info:" not in plain + + def test_missing_log_no_context_block(self): + flow = _make_flow( + _make_ws(), + [_make_step("Synthesis", "yosys", "/nonexistent/synth.log")], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "error:" not in plain + assert "For more log info:" not in plain + assert "log:" in plain + assert "inspect:" in plain + + def test_empty_log_no_context_block(self, tmp_path): + log_file = tmp_path / "empty.log" + log_file.write_text("") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "For more log info:" not in plain + + def test_existing_log_and_inspect_lines_remain(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line 1\nError: fail\nline 3\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + plain = _strip_ansi("".join(buf.written)) + assert "log:" in plain + assert "inspect:" in plain From 3a651d269af42e1547bc88921a27f7989f6969b3 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:50:28 +0800 Subject: [PATCH 096/104] fix(cli): normalize log line terminators for compact context block Use splitlines() instead of readlines() in _maybe_render_failure_context() to strip trailing newlines, preventing blank separator lines between numbered context rows in the failure output block. Co-Authored-By: Claude Opus 4.7 (1M context) --- chipcompiler/cli/progress.py | 3 ++- test/cli/test_progress.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 6f4b0026..21e68627 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -266,9 +266,10 @@ def _maybe_render_failure_context(renderer, log_path, rel_log, step_token, return try: with open(log_path, "r", errors="replace") as f: - log_lines = f.readlines() + raw = f.read() except OSError: return + log_lines = raw.splitlines() if not log_lines: return diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index e29ea29d..2b0c376a 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -806,3 +806,36 @@ def test_existing_log_and_inspect_lines_remain(self, tmp_path): plain = _strip_ansi("".join(buf.written)) assert "log:" in plain assert "inspect:" in plain + + def test_context_block_no_blank_lines_between_rows(self, tmp_path): + log_file = tmp_path / "synth.log" + log_file.write_text("line one\nline two\nError: boom\nline four\n") + + flow = _make_flow( + _make_ws(str(tmp_path)), + [_make_step("Synthesis", "yosys", str(log_file))], + lambda self, s: StateEnum.Imcomplete, + ) + + buf = FakeTTYStderr(True) + result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) + assert result is False + raw = "".join(buf.written) + section_start = raw.find("error:") + section_end = raw.find("command=", section_start) + assert section_start >= 0 + assert section_end > section_start + block = raw[section_start:section_end] + plain_block = _strip_ansi(block) + context_rows = [ + l for l in plain_block.split("\n") + if l.strip() and not l.startswith("error:") + and not l.startswith("For") + ] + assert len(context_rows) > 0 + for row in context_rows: + assert row.startswith(" ") + assert row.strip() != "" + stripped = row.strip() + assert "\n" not in stripped + assert "\r" not in stripped From b5c262735cab2815ca87976010c159efcf1f89cd Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 15:57:58 +0800 Subject: [PATCH 097/104] test(cli): tighten context block regression to inspect unfiltered rows Rewrite test_context_block_no_blank_lines_between_rows to inspect the raw context block slice between the error header and footer without filtering blank lines. The old test discarded blank rows before asserting none existed, so it would pass against the double-spaced readlines() output. Now it slices the block between "error:" and "For more log info:" markers and asserts every body row is non-empty. Verified: test fails against the old readlines() path and passes against the splitlines() fix. Co-Authored-By: Claude Opus 4.7 (1M context) --- test/cli/test_progress.py | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/test/cli/test_progress.py b/test/cli/test_progress.py index 2b0c376a..6ef73cc4 100644 --- a/test/cli/test_progress.py +++ b/test/cli/test_progress.py @@ -821,21 +821,19 @@ def test_context_block_no_blank_lines_between_rows(self, tmp_path): result = run_flow_with_progress(flow, _make_ctx(), "myproj", buf) assert result is False raw = "".join(buf.written) - section_start = raw.find("error:") - section_end = raw.find("command=", section_start) - assert section_start >= 0 - assert section_end > section_start - block = raw[section_start:section_end] + + header_pos = raw.find("error:") + footer_pos = raw.find("For more log info:", header_pos) + assert header_pos >= 0 + assert footer_pos > header_pos + + block = raw[header_pos:footer_pos] plain_block = _strip_ansi(block) - context_rows = [ - l for l in plain_block.split("\n") - if l.strip() and not l.startswith("error:") - and not l.startswith("For") - ] - assert len(context_rows) > 0 - for row in context_rows: - assert row.startswith(" ") - assert row.strip() != "" - stripped = row.strip() - assert "\n" not in stripped - assert "\r" not in stripped + all_lines = plain_block.rstrip("\n").split("\n") + + body_lines = [l for l in all_lines if not l.startswith("error:")] + assert len(body_lines) > 0 + + for i, line in enumerate(body_lines): + assert line.strip() != "", f"blank line at index {i} in context block: {body_lines!r}" + assert line.startswith(" "), f"context row not indented at index {i}: {line!r}" From 62e80df4c4e5ac1d1b6a0da6a0d1e7cf0a0cfe0f Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 16:28:36 +0800 Subject: [PATCH 098/104] chore: bump ecc-dreamplace submodule Signed-off-by: Emin --- chipcompiler/thirdparty/ecc-dreamplace | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chipcompiler/thirdparty/ecc-dreamplace b/chipcompiler/thirdparty/ecc-dreamplace index a79d177b..884af86c 160000 --- a/chipcompiler/thirdparty/ecc-dreamplace +++ b/chipcompiler/thirdparty/ecc-dreamplace @@ -1 +1 @@ -Subproject commit a79d177b6abff670de9a778416c2a9fc27db76c9 +Subproject commit 884af86c66aa6f6e9c2644d7af6b0ee021f8e48c From 667edd7573c396e6ac8c5e092442fc7e99bdbdb3 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 16:34:25 +0800 Subject: [PATCH 099/104] fix(build): update DreamPlace uv source URL to match 0.1.0a2 pin The pyproject.toml version pin was bumped to 0.1.0a2 but [tool.uv.sources] and uv.lock still referenced the v0.1.0-alpha.1 wheel, breaking uv sync --frozen and bazel run //:prepare_dev. Update the source URL to the v0.1.0-alpha.2 release and regenerate uv.lock. Co-Authored-By: Claude Opus 4.7 (1M context) --- pyproject.toml | 2 +- uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d563add0..94cba48a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,7 +76,7 @@ url = "https://download.pytorch.org/whl/cpu" explicit = true [tool.uv.sources] -ecc-dreamplace = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl" } +ecc-dreamplace = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" } ecc-tools = { url = "https://github.com/openecos-projects/ecc-tools/releases/download/v0.1.0-alpha.2/ecc_tools-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" } torch = { index = "pytorch-cpu" } diff --git a/uv.lock b/uv.lock index e1916b34..3bdad99d 100644 --- a/uv.lock +++ b/uv.lock @@ -470,7 +470,7 @@ dev = [ [package.metadata] requires-dist = [ - { name = "ecc-dreamplace", url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl" }, + { name = "ecc-dreamplace", url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" }, { name = "ecc-tools", url = "https://github.com/openecos-projects/ecc-tools/releases/download/v0.1.0-alpha.2/ecc_tools-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" }, { name = "fastapi", specifier = ">=0.109" }, { name = "klayout", specifier = ">=0.30.2" }, @@ -506,8 +506,8 @@ dev = [ [[package]] name = "ecc-dreamplace" -version = "0.1.0a1" -source = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl" } +version = "0.1.0a2" +source = { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl" } dependencies = [ { name = "cairocffi" }, { name = "configspace" }, @@ -530,7 +530,7 @@ dependencies = [ { name = "xgboost" }, ] wheels = [ - { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.1/ecc_dreamplace-0.1.0a1-py3-none-manylinux_2_34_x86_64.whl", hash = "sha256:212139c43f825498968eda10309959b99cd93aec0744d182a17bb5d34b53145e" }, + { url = "https://github.com/openecos-projects/ecc-dreamplace/releases/download/v0.1.0-alpha.2/ecc_dreamplace-0.1.0a2-py3-none-manylinux_2_34_x86_64.whl", hash = "sha256:fad0e489bfba62f79c193e2e0ec5051a492768e2a3d6099aa5e604c08abb191f" }, ] [package.metadata] From 0bd8681fbe9254ae256211acf2e63c276bd7fe5a Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 17:18:54 +0800 Subject: [PATCH 100/104] refactor(cli): deduplicate kind label/color mappings between log_view and progress Import _KIND_COLOR and _KIND_LABEL from log_view instead of maintaining duplicate _KIND_COLOR_CONTEXT dict in progress.py. Derive compact labels from log_view labels via .upper() instead of literal dictionary. Remove unused YELLOW/BLUE imports. Co-Authored-By: Claude Opus 4.7 (1M context) --- chipcompiler/cli/progress.py | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 21e68627..3d2a9f05 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -4,9 +4,9 @@ import threading import time -from chipcompiler.cli.log_view import LineKind, extract_error_context +from chipcompiler.cli.log_view import LineKind, _KIND_COLOR, _KIND_LABEL, extract_error_context from chipcompiler.cli.output import disclosure_cmd, normalize_step_name, normalize_state -from chipcompiler.cli.pretty import BOLD, DIM, CYAN, GREEN, RED, RESET, YELLOW, BLUE, style as _style +from chipcompiler.cli.pretty import BOLD, DIM, CYAN, GREEN, RED, RESET, style as _style from chipcompiler.cli.types import OutputMode from chipcompiler.data import StateEnum, log_flow @@ -50,22 +50,7 @@ def truncate_to_width(text, width): # --- Failure context block formatting --- -_KIND_LABEL_COMPACT = { - LineKind.ERROR: "ERROR", - LineKind.WARNING: "WARN ", - LineKind.TRACEBACK: "TRACE", - LineKind.INFO: "INFO ", - LineKind.SECTION: "-----", - LineKind.PLAIN: " ", -} - -_KIND_COLOR_CONTEXT = { - LineKind.ERROR: RED, - LineKind.WARNING: YELLOW, - LineKind.TRACEBACK: YELLOW, - LineKind.INFO: BLUE, - LineKind.SECTION: CYAN, -} +_KIND_LABEL_COMPACT = {k: v.upper() for k, v in _KIND_LABEL.items()} def format_error_context(log_path, context_lines, log_cmd, color=True): @@ -90,8 +75,8 @@ def format_error_context(log_path, context_lines, log_cmd, color=True): no = str(ll.line_no).rjust(width) label = _KIND_LABEL_COMPACT[ll.kind] - if color and ll.kind in _KIND_COLOR_CONTEXT: - code = _KIND_COLOR_CONTEXT[ll.kind] + if color and ll.kind in _KIND_COLOR: + code = _KIND_COLOR[ll.kind] if ll.kind == LineKind.ERROR: lines.append(f" {no} {code}{label} {ll.text}{RESET}") else: From daefa8836b00b20715db33f98b597b798643c1ae Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 18:08:05 +0800 Subject: [PATCH 101/104] feat(cli): add flow-ordered log listing with tail previews - Add listing_step_order() to inspect.py for flow.json-aware step ordering with alphabetical fallback on missing/corrupt flow data - Add tail_lines_for_log() to log_view.py for sanitized 10-line tail preview - Update render_log_listing_pretty() with optional tail_map for tail blocks - Update handlers.log() to use flow order instead of alphabetical sort - Compute tail previews only in TEXT mode via _render_log_text() (plain/json/jsonl modes remain tail-free) - Add 31 tests: flow order, tail extraction, renderer, machine-mode, regression --- chipcompiler/cli/handlers.py | 6 +- chipcompiler/cli/inspect.py | 21 +++ chipcompiler/cli/log_view.py | 23 +++ chipcompiler/cli/main.py | 21 ++- test/cli/test_cli_main.py | 268 +++++++++++++++++++++++++++++++++++ test/cli/test_log_view.py | 139 ++++++++++++++++++ 6 files changed, 471 insertions(+), 7 deletions(-) diff --git a/chipcompiler/cli/handlers.py b/chipcompiler/cli/handlers.py index 5248ed8a..1e57012a 100644 --- a/chipcompiler/cli/handlers.py +++ b/chipcompiler/cli/handlers.py @@ -92,8 +92,9 @@ def log(args, ctx: CommandContext) -> CommandResult: from chipcompiler.cli.inspect import ( discover_logs, discover_step_dirs, + get_flow_step_names, + listing_step_order, ) - from chipcompiler.cli.inspect import get_flow_step_names from chipcompiler.cli.log_view import build_log_records step_token = args.step @@ -108,8 +109,7 @@ def log(args, ctx: CommandContext) -> CommandResult: "inspect_cmd": disclosure_cmd("ecc log", project, ctx.run_id), }) - step_dirs = discover_step_dirs(ctx.run_dir) - for token in sorted(step_dirs): + for token in listing_step_order(ctx.run_dir): for lf in discover_logs(ctx.run_dir, token): records.append({ "step": token, diff --git a/chipcompiler/cli/inspect.py b/chipcompiler/cli/inspect.py index e059f17e..9836e6c8 100644 --- a/chipcompiler/cli/inspect.py +++ b/chipcompiler/cli/inspect.py @@ -168,3 +168,24 @@ def _internal_from_token(token: str) -> str: "filler": "filler", } return reverse.get(token, token) + + +def listing_step_order(run_dir: str) -> list[str]: + """Return step tokens in flow.json order, with undiscovered extras alphabetically after.""" + step_dirs = discover_step_dirs(run_dir) + if not step_dirs: + return [] + + flow_data = read_flow_json(run_dir) + if isinstance(flow_data, dict): + flow_tokens = [ + normalize_step_name(s.get("name", "")) + for s in _safe_steps(flow_data) + if s.get("name") + ] + flow_set = set(flow_tokens) + result = [t for t in flow_tokens if t in step_dirs] + result.extend(sorted(t for t in step_dirs if t not in flow_set)) + return result + + return sorted(step_dirs) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index c2c7f43c..f9f7f334 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -182,10 +182,25 @@ def render_log_records_plain(records, file=None) -> None: _render_plain_record(rec, target) +def tail_lines_for_log(path: str, max_lines: int = 10) -> list[str]: + """Return up to max_lines non-empty sanitized lines from the end of a log file.""" + try: + with open(path, errors="replace") as f: + raw = f.read().splitlines() + except OSError: + return [] + + from chipcompiler.cli.progress import sanitize_log_line + sanitized = [sanitize_log_line(line) for line in raw] + non_empty = [line for line in sanitized if line] + return non_empty[-max_lines:] + + def render_log_listing_pretty( records: list[dict], file=None, color: bool = True, + tail_map: dict | None = None, ) -> None: target = file or sys.stdout @@ -203,6 +218,14 @@ def render_log_listing_pretty( step_label = "" target.write(f"{step_label} {source}\n") + + if tail_map and source in tail_map: + tail_lines = tail_map[source] + if tail_lines: + tail_label = f" {style('tail:', DIM, color)}" if color else " tail:" + for tl in tail_lines: + target.write(f"{tail_label} {tl}\n") + inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" target.write(f"{inspect_label} {inspect}\n") diff --git a/chipcompiler/cli/main.py b/chipcompiler/cli/main.py index dc61b5f7..c1d4f1c3 100644 --- a/chipcompiler/cli/main.py +++ b/chipcompiler/cli/main.py @@ -177,10 +177,11 @@ def _should_colorize(): return supports_color(file=sys.stdout) -def _render_log_text(args, result, color=True) -> None: +def _render_log_text(args, result, color=True, run_dir=None) -> None: from chipcompiler.cli.log_view import ( render_log_listing_pretty, render_log_pretty, + tail_lines_for_log, ) from chipcompiler.cli.pretty import render_error, render_generic_block @@ -226,8 +227,20 @@ def _render_log_text(args, result, color=True) -> None: ) return - # Listing mode - render_log_listing_pretty(list(records), color=color) + # Listing mode: compute tail previews only for pretty text output + tail_map = None + if run_dir: + tail_map = {} + for rec in records: + source = rec.get("source") or rec.get("log", "") + if not source: + continue + full_path = os.path.join(run_dir, source) + lines = tail_lines_for_log(full_path) + if lines: + tail_map[source] = lines + + render_log_listing_pretty(list(records), color=color, tail_map=tail_map) def _render_log_plain(result) -> None: @@ -265,7 +278,7 @@ def run(argv: Sequence[str] | None = None) -> int: if args.command == "param" and ctx.output_mode == OutputMode.TEXT: _render_param_text(args, result, color=color) elif args.command == "log" and ctx.output_mode == OutputMode.TEXT: - _render_log_text(args, result, color=color) + _render_log_text(args, result, color=color, run_dir=ctx.run_dir) elif args.command == "log" and ctx.output_mode == OutputMode.PLAIN: _render_log_plain(result) else: diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 1726b352..75e9346b 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1590,3 +1590,271 @@ def test_metrics_flow_only_step_is_missing(self, tmp_path, capsys): assert data["records"][0].get("status") == "missing" assert data["records"][0].get("status") != "unknown_step" + +class TestLogListingFlowOrder: + """Listing step logs follow flow.json order, not alphabetical.""" + + def _setup_steps_with_flow(self, tmp_path, step_names, extra_dirs=None): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + _create_flow_json(run_dir, steps=[ + {"name": n, "tool": "ecc", "state": "Success"} for n in step_names + ]) + all_dirs = list(step_names) + (extra_dirs or []) + tool_map = { + "Synthesis": "yosys", "Floorplan": "ecc", "fixFanout": "ecc", + "place": "ecc", "CTS": "ecc", "legalization": "ecc", + "route": "ecc", "drc": "ecc", "filler": "ecc", + } + for name in all_dirs: + tool = tool_map.get(name, "ecc") + step_dir = os.path.join(run_dir, f"{name}_{tool}", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, f"{name.lower()}.log"), "w") as f: + f.write(f"log from {name}\n") + return project_dir + + def test_steps_follow_flow_json_order(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis", "Floorplan", "CTS"], + ) + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + assert steps == ["synthesis", "floorplan", "cts"] + + def test_run_level_logs_before_step_logs(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis", "CTS"], + ) + run_dir = os.path.join(project_dir, "runs", "default") + log_dir = os.path.join(run_dir, "log") + os.makedirs(log_dir, exist_ok=True) + with open(os.path.join(log_dir, "flow.log"), "w") as f: + f.write("run-level log\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + run_indices = [i for i, r in enumerate(records) if "log" in r and "step" not in r] + step_indices = [i for i, r in enumerate(records) if "step" in r] + assert run_indices, "expected at least one run-level record" + assert step_indices, "expected at least one step record" + assert max(run_indices) < min(step_indices) + + def test_extra_steps_after_flow_steps(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis", "CTS"], + extra_dirs=["Floorplan"], + ) + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + synth_idx = steps.index("synthesis") + cts_idx = steps.index("cts") + fp_idx = steps.index("floorplan") + assert synth_idx < cts_idx + assert cts_idx < fp_idx + + def test_extra_steps_sorted_alphabetically(self, tmp_path, capsys): + project_dir = self._setup_steps_with_flow( + tmp_path, + ["Synthesis"], + extra_dirs=["Floorplan", "CTS"], + ) + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + extras = [s for s in steps if s != "synthesis"] + assert extras == sorted(extras) + + def test_missing_flow_json_falls_back_to_alphabetical(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + os.makedirs(run_dir, exist_ok=True) + for name in ["CTS_ecc", "Floorplan_ecc", "Synthesis_yosys"]: + step_dir = os.path.join(run_dir, name, "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "test.log"), "w") as f: + f.write("content\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + assert steps == sorted(steps) + + def test_corrupt_flow_json_falls_back_to_alphabetical(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + home = os.path.join(run_dir, "home") + os.makedirs(home, exist_ok=True) + with open(os.path.join(home, "flow.json"), "w") as f: + f.write("not valid json{{{") + for name in ["CTS_ecc", "Floorplan_ecc", "Synthesis_yosys"]: + step_dir = os.path.join(run_dir, name, "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "test.log"), "w") as f: + f.write("content\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + steps = [r.get("step") for r in records if "step" in r] + assert steps == sorted(steps) + + +class TestLogListingTailPreview: + """Tail preview shows up to 10 lines in default pretty text mode.""" + + def test_listing_shows_tail_lines(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + lines = [f"log line {i}" for i in range(15)] + with open(log_path, "w") as f: + f.write("\n".join(lines) + "\n") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "log line 14" in out + assert "tail:" in out + + def test_listing_tail_max_10_lines(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + lines = [f"line {i}" for i in range(20)] + with open(log_path, "w") as f: + f.write("\n".join(lines) + "\n") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + tail_lines = [l for l in out.split("\n") if "tail:" in l] + assert len(tail_lines) == 10 + + def test_empty_log_no_tail_block(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "tail:" not in out + assert "inspect:" in out + + def test_inspect_visible_below_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content line\n") + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + tail_pos = out.find("tail:") + inspect_pos = out.find("inspect:") + assert tail_pos < inspect_pos + + +class TestLogListingMachineModeNoTail: + """Machine modes must not include tail data.""" + + def test_plain_no_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line 1\nline 2\nline 3\n") + rc = cli_main.run(["log", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "tail=" not in out + + def test_json_no_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line 1\nline 2\n") + rc = cli_main.run(["log", "--json", "--project", project_dir]) + assert rc == 0 + data = json.loads(capsys.readouterr().out) + for rec in data["records"]: + assert "tail" not in rec + + def test_jsonl_no_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("line 1\nline 2\n") + rc = cli_main.run(["log", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + for rec in records: + assert "tail" not in rec + + +class TestLogStepUnchanged: + """ecc log full output must remain unchanged.""" + + def test_step_shows_all_lines_not_tail(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + lines = [f"line {i}" for i in range(20)] + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("\n".join(lines) + "\n") + rc = cli_main.run(["log", "synthesis", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "line 0" in out + assert "line 19" in out + + def test_step_plain_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("a\nb\nc\n") + rc = cli_main.run(["log", "synthesis", "--plain", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "line_no=1" in out + assert "line_no=2" in out + assert "line_no=3" in out + assert "tail" not in out + + def test_step_jsonl_unchanged(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + with open(os.path.join(step_dir, "synthesis.log"), "w") as f: + f.write("a\nb\n") + rc = cli_main.run(["log", "synthesis", "--jsonl", "--project", project_dir]) + assert rc == 0 + records = [json.loads(ln) for ln in capsys.readouterr().out.strip().split("\n") if ln.strip()] + assert len(records) == 2 + for rec in records: + assert "tail" not in rec + diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py index 3964c715..66861e59 100644 --- a/test/cli/test_log_view.py +++ b/test/cli/test_log_view.py @@ -677,3 +677,142 @@ def test_listing_color_disabled_no_ansi(self): buf = StringIO() render_log_listing_pretty(records, file=buf, color=False) assert "\x1b[" not in buf.getvalue() + + +class TestTailLinesForLog: + def test_returns_last_10_non_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + lines = [f"line {i}" for i in range(15)] + log_file.write_text("\n".join(lines)) + result = tail_lines_for_log(str(log_file)) + assert len(result) == 10 + assert result[0] == "line 5" + assert result[-1] == "line 14" + + def test_fewer_than_10_returns_all(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("a\nb\nc\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["a", "b", "c"] + + def test_empty_lines_omitted(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("a\n\n\nb\n\n\nc\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["a", "b", "c"] + + def test_preserves_order(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("first\nmiddle\nlast\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["first", "middle", "last"] + + def test_ansi_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1b[31mred text\x1b[0m\nnormal\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["red text", "normal"] + + def test_missing_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + result = tail_lines_for_log(str(tmp_path / "nonexistent.log")) + assert result == [] + + def test_empty_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("") + result = tail_lines_for_log(str(log_file)) + assert result == [] + + def test_blank_only_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text(" \n\n\t\n \n") + result = tail_lines_for_log(str(log_file)) + assert result == [] + + def test_ansi_control_sequences_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1b[31mred\x1b[0m\n\x1b[2Kclear\nvalid\n") + result = tail_lines_for_log(str(log_file)) + assert "\x1b[" not in " ".join(result) + assert "valid" in result + + +class TestListingTailRendering: + def test_tail_block_appears_for_readable_log(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + tail_map = {"synth.log": ["line 1", "line 2"]} + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) + out = buf.getvalue() + assert "tail: line 1" in out + assert "tail: line 2" in out + + def test_inspect_remains_below_tail(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + tail_map = {"synth.log": ["preview"]} + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) + out = buf.getvalue() + tail_pos = out.find("tail:") + inspect_pos = out.find("inspect:") + assert tail_pos < inspect_pos + + def test_no_tail_block_when_empty(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + tail_map = {"synth.log": []} + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) + out = buf.getvalue() + assert "tail:" not in out + assert "inspect:" in out + + def test_no_tail_block_when_source_not_in_map(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map={}) + out = buf.getvalue() + assert "tail:" not in out + assert "inspect:" in out + + def test_no_tail_block_when_tail_map_is_none(self): + from io import StringIO + records = [ + {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False, tail_map=None) + out = buf.getvalue() + assert "tail:" not in out + assert "inspect:" in out + + def test_run_level_entry_labeled_without_step(self): + from io import StringIO + records = [ + {"log": "log/flow.log", "inspect_cmd": "ecc log"}, + ] + buf = StringIO() + render_log_listing_pretty(records, file=buf, color=False) + out = buf.getvalue() + assert "log/flow.log" in out + assert "inspect:" in out From efdc9300448ed5cbf4d29a2a994b71d459002083 Mon Sep 17 00:00:00 2001 From: Emin Date: Fri, 8 May 2026 18:17:41 +0800 Subject: [PATCH 102/104] fix(cli): correct listing shape, sanitization, and unreadable coverage - Run-level entries now render with 'run' label (AC-3) - Tail block uses header + indented lines shape per design spec (AC-3) - Enhanced sanitize_log_line() to strip OSC, DCS, and C0 controls (AC-2) - Added unreadable log listing test for no-step default-text mode (AC-4) - 576 tests pass, zero regressions --- chipcompiler/cli/log_view.py | 10 ++++---- chipcompiler/cli/progress.py | 8 +++++- test/cli/test_cli_main.py | 31 ++++++++++++++++++++++-- test/cli/test_log_view.py | 47 ++++++++++++++++++++++++++++++++---- 4 files changed, 83 insertions(+), 13 deletions(-) diff --git a/chipcompiler/cli/log_view.py b/chipcompiler/cli/log_view.py index f9f7f334..1243a62c 100644 --- a/chipcompiler/cli/log_view.py +++ b/chipcompiler/cli/log_view.py @@ -213,18 +213,18 @@ def render_log_listing_pretty( inspect = rec.get("inspect_cmd") or rec.get("inspect", "") if step: - step_label = f" {style(step, CYAN, color)}" if color else f" {step}" + label = f" {style(step, CYAN, color)}" if color else f" {step}" else: - step_label = "" + label = f" {style('run', CYAN, color)}" if color else " run" - target.write(f"{step_label} {source}\n") + target.write(f"{label} {source}\n") if tail_map and source in tail_map: tail_lines = tail_map[source] if tail_lines: - tail_label = f" {style('tail:', DIM, color)}" if color else " tail:" + target.write(f" {style('tail:', DIM, color)}\n" if color else " tail:\n") for tl in tail_lines: - target.write(f"{tail_label} {tl}\n") + target.write(f" {tl}\n") inspect_label = f" {style('inspect:', DIM, color)}" if color else " inspect:" target.write(f"{inspect_label} {inspect}\n") diff --git a/chipcompiler/cli/progress.py b/chipcompiler/cli/progress.py index 3d2a9f05..9052bf4a 100644 --- a/chipcompiler/cli/progress.py +++ b/chipcompiler/cli/progress.py @@ -27,13 +27,19 @@ def should_enable_run_progress(ctx, stderr): _ANSI_RE = re.compile(r"\x1b\[[0-9;]*[a-zA-Z]") +_OSC_RE = re.compile(r"\x1b\].*?(?:\x07|\x1b\\)") +_DCS_RE = re.compile(r"\x1bP.*?(?:\x1b\\)") _CONTROL_RE = re.compile(r"[\r\n\t]+") +_C0_RE = re.compile(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]") _MULTI_SPACE_RE = re.compile(r" {2,}") def sanitize_log_line(line): - stripped = _ANSI_RE.sub("", line) + stripped = _OSC_RE.sub("", line) + stripped = _DCS_RE.sub("", stripped) + stripped = _ANSI_RE.sub("", stripped) stripped = _CONTROL_RE.sub(" ", stripped) + stripped = _C0_RE.sub("", stripped) stripped = _MULTI_SPACE_RE.sub(" ", stripped) return stripped.strip() diff --git a/test/cli/test_cli_main.py b/test/cli/test_cli_main.py index 75e9346b..df5f680b 100644 --- a/test/cli/test_cli_main.py +++ b/test/cli/test_cli_main.py @@ -1737,8 +1737,10 @@ def test_listing_tail_max_10_lines(self, tmp_path, capsys): rc = cli_main.run(["log", "--project", project_dir]) assert rc == 0 out = capsys.readouterr().out - tail_lines = [l for l in out.split("\n") if "tail:" in l] - assert len(tail_lines) == 10 + output_lines = out.split("\n") + tail_header_idx = next(i for i, l in enumerate(output_lines) if l.strip() == "tail:") + tail_content = [l for l in output_lines[tail_header_idx + 1:] if l.startswith(" ") and "inspect:" not in l] + assert len(tail_content) == 10 def test_empty_log_no_tail_block(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) @@ -1858,3 +1860,28 @@ def test_step_jsonl_unchanged(self, tmp_path, capsys): for rec in records: assert "tail" not in rec + +class TestLogListingUnreadable: + """Unreadable logs in listing mode must omit tail, keep path+inspect, no traceback.""" + + def test_unreadable_step_log_in_listing(self, tmp_path, capsys): + project_dir = _create_valid_project(tmp_path) + run_dir = os.path.join(project_dir, "runs", "default") + step_dir = os.path.join(run_dir, "Synthesis_yosys", "log") + os.makedirs(step_dir, exist_ok=True) + log_path = os.path.join(step_dir, "synthesis.log") + with open(log_path, "w") as f: + f.write("content\n") + os.chmod(log_path, 0o000) + + try: + rc = cli_main.run(["log", "--project", project_dir]) + assert rc == 0 + out = capsys.readouterr().out + assert "tail:" not in out + assert "Synthesis_yosys" in out + assert "inspect:" in out + assert "Traceback" not in out + finally: + os.chmod(log_path, 0o644) + diff --git a/test/cli/test_log_view.py b/test/cli/test_log_view.py index 66861e59..a880238c 100644 --- a/test/cli/test_log_view.py +++ b/test/cli/test_log_view.py @@ -1,3 +1,5 @@ +import os + import pytest from chipcompiler.cli.log_view import ( @@ -745,9 +747,41 @@ def test_ansi_control_sequences_stripped(self, tmp_path): assert "\x1b[" not in " ".join(result) assert "valid" in result + def test_osc_sequences_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1b]0;window title\x07message\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["message"] + + def test_dcs_sequences_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("\x1bP$data\x1b\\visible\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["visible"] + + def test_bel_and_backspace_stripped(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("a\x07b\x08c\ndone\n") + result = tail_lines_for_log(str(log_file)) + assert result == ["abc", "done"] + + def test_unreadable_file_returns_empty(self, tmp_path): + from chipcompiler.cli.log_view import tail_lines_for_log + log_file = tmp_path / "test.log" + log_file.write_text("content\n") + os.chmod(str(log_file), 0o000) + try: + result = tail_lines_for_log(str(log_file)) + assert result == [] + finally: + os.chmod(str(log_file), 0o644) + class TestListingTailRendering: - def test_tail_block_appears_for_readable_log(self): + def test_tail_block_header_with_indented_lines(self): from io import StringIO records = [ {"step": "synthesis", "source": "synth.log", "inspect_cmd": "ecc log synthesis"}, @@ -756,8 +790,11 @@ def test_tail_block_appears_for_readable_log(self): buf = StringIO() render_log_listing_pretty(records, file=buf, color=False, tail_map=tail_map) out = buf.getvalue() - assert "tail: line 1" in out - assert "tail: line 2" in out + lines = out.split("\n") + tail_idx = next(i for i, l in enumerate(lines) if l.strip() == "tail:") + assert "line 1" in lines[tail_idx + 1] + assert "line 2" in lines[tail_idx + 2] + assert lines[tail_idx + 1].startswith(" ") def test_inspect_remains_below_tail(self): from io import StringIO @@ -806,7 +843,7 @@ def test_no_tail_block_when_tail_map_is_none(self): assert "tail:" not in out assert "inspect:" in out - def test_run_level_entry_labeled_without_step(self): + def test_run_level_entry_labeled_run(self): from io import StringIO records = [ {"log": "log/flow.log", "inspect_cmd": "ecc log"}, @@ -814,5 +851,5 @@ def test_run_level_entry_labeled_without_step(self): buf = StringIO() render_log_listing_pretty(records, file=buf, color=False) out = buf.getvalue() - assert "log/flow.log" in out + assert " run log/flow.log" in out assert "inspect:" in out From c9a1b8fa62a064407a8575f239422580b040476e Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 9 May 2026 11:00:38 +0800 Subject: [PATCH 103/104] fix(nix): pass callPackages from overlay to ecc-tools derivation The ecc-tools default.nix uses callPackages (plural) for the rustpkgs attrset, but python3Packages.callPackage only provides callPackage (singular). Pass prev.callPackages explicitly from the overlay. --- nix/overlay.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/overlay.nix b/nix/overlay.nix index ed26825c..d90cbce7 100644 --- a/nix/overlay.nix +++ b/nix/overlay.nix @@ -1,5 +1,6 @@ final: prev: { ecc-tools-python = prev.python3Packages.callPackage ./python/ecc-tools { + callPackages = prev.callPackages; gflags = prev.gflags; onnxruntime = prev.onnxruntime; }; From e8966b90554fc3b079b415073463333d77d0b780 Mon Sep 17 00:00:00 2001 From: Emin Date: Sat, 9 May 2026 11:29:06 +0800 Subject: [PATCH 104/104] fix(params): align target_density default across template, registry, and tests Backend template (parameter.py) uses 0.2 since commit 063461d, but CLI registry and tests still referenced 0.8. Update params.py default and test assertions to match the canonical template value. --- chipcompiler/cli/params.py | 2 +- test/cli/test_cli_params.py | 4 ++-- test/formal/test_param_propagation.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/chipcompiler/cli/params.py b/chipcompiler/cli/params.py index c2fcf2c3..b044c809 100644 --- a/chipcompiler/cli/params.py +++ b/chipcompiler/cli/params.py @@ -85,7 +85,7 @@ class ParamSchema: group="place", name="target_density", type="float", - default=0.8, + default=0.2, applies="placement", maps_to={"DreamPlace": "target_density"}, description="Target placement density", diff --git a/test/cli/test_cli_params.py b/test/cli/test_cli_params.py index 02d3379b..48954ad8 100644 --- a/test/cli/test_cli_params.py +++ b/test/cli/test_cli_params.py @@ -86,7 +86,7 @@ def test_param_show_json(self, tmp_path, capsys): data = json.loads(capsys.readouterr().out) record = data["records"][0] assert record["param"] == "place.target_density" - assert record["default"] == 0.8 + assert record["default"] == 0.2 assert "source" in record assert "maps_to" in record @@ -524,7 +524,7 @@ def test_diff_only_shows_values_that_differ(self, tmp_path, capsys): def test_diff_clean_when_set_to_default(self, tmp_path, capsys): project_dir = _create_valid_project(tmp_path) - schema_default = 0.8 + schema_default = 0.2 cli_main.run(["param", "set", "place.target_density", str(schema_default), "--project", project_dir]) capsys.readouterr() diff --git a/test/formal/test_param_propagation.py b/test/formal/test_param_propagation.py index da0a9e25..234a6a79 100644 --- a/test/formal/test_param_propagation.py +++ b/test/formal/test_param_propagation.py @@ -69,7 +69,7 @@ def _key_exists_in_dict(data: dict[str, Any], key: str) -> bool: # Known parameter -> config mappings with both defaults. # (param_key, param_default, config_default, description) PARAM_CONFIG_DEFAULTS: list[tuple[str, float, float, str]] = [ - ("Target density", 0.8, 0.8, "dreamplace.target_density"), + ("Target density", 0.2, 0.8, "dreamplace.target_density"), ("Target overflow", 0.1, 0.1, "dreamplace.stop_overflow"), ("Cell padding x", 600, 600, "dreamplace.cell_padding_x"), ("Routability opt flag", 1, 0, "dreamplace.routability_opt_flag"),