diff --git a/README.md b/README.md
index a52a486..ef6b4bd 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,13 @@
[](https://github.com/hashgraph-online/codex-plugin-scanner/actions/workflows/ci.yml)
[](https://scorecard.dev/viewer/?uri=github.com/hashgraph-online/codex-plugin-scanner)
-A security and security-ops scanner for [Codex plugins](https://developers.openai.com/codex/plugins). It scores the applicable plugin surface from 0-100, emits structured findings, and can run Cisco's `skill-scanner` against plugin skills for deeper analysis.
+A security, publishability, and security-ops scanner for [Codex plugins](https://developers.openai.com/codex/plugins). It scores the applicable plugin surface from 0-100, emits structured findings, validates install-surface metadata, hardens MCP transport expectations, and can run Cisco's `skill-scanner` against plugin skills for deeper analysis.
+
+## What's New in v1.2.0
+
+- Publishability checks for Codex `interface` metadata, assets, and HTTPS links.
+- MCP transport hardening for remote `.mcp.json` endpoints.
+- A new `Operational Security` category for GitHub Actions pinning, privileged workflow patterns, Dependabot, and lockfile hygiene.
## Installation
@@ -48,16 +54,18 @@ codex-plugin-scanner ./my-plugin --cisco-skill-scan on --cisco-policy strict
### Example Output
```
-🔗 Codex Plugin Scanner v1.1.0
+🔗 Codex Plugin Scanner v1.2.0
Scanning: ./my-plugin
-── Manifest Validation (25/25) ──
+── Manifest Validation (31/31) ──
✅ plugin.json exists +4
✅ Valid JSON +4
✅ Required fields present +5
✅ Version follows semver +3
✅ Name is kebab-case +2
✅ Recommended metadata present +4
+ ✅ Interface metadata complete if declared +3
+ ✅ Interface links and assets valid if declared +3
✅ Declared paths are safe +3
── Security (16/16) ──
@@ -65,8 +73,16 @@ Scanning: ./my-plugin
✅ LICENSE found +3
✅ No hardcoded secrets +7
✅ No dangerous MCP commands +0
+ ✅ MCP remote transports are hardened +0
✅ No approval bypass defaults +3
+── Operational Security (0/0) ──
+ ✅ Third-party GitHub Actions pinned to SHAs +0
+ ✅ No write-all GitHub Actions permissions +0
+ ✅ No privileged untrusted checkout patterns +0
+ ✅ Dependabot configured for automation surfaces +0
+ ✅ Dependency manifests have lockfiles +0
+
── Skill Security (15/15) ──
✅ Cisco skill scan completed +3
✅ No elevated Cisco skill findings +8
@@ -85,8 +101,9 @@ Optional surfaces such as `marketplace.json`, `.mcp.json`, and plugin skills are
| Category | Max Points | Checks |
|----------|-----------|--------|
-| Manifest Validation | 25 | plugin.json, required fields, semver, kebab-case, recommended metadata, safe declared paths |
-| Security | 20 | SECURITY.md, LICENSE, no hardcoded secrets, no dangerous MCP commands, no approval bypass defaults |
+| Manifest Validation | 31 | plugin.json, required fields, semver, kebab-case, recommended metadata, interface metadata, interface assets, safe declared paths |
+| Security | 24 | SECURITY.md, LICENSE, no hardcoded secrets, no dangerous MCP commands, MCP remote transport hardening, no approval bypass defaults |
+| Operational Security | 20 | GitHub Actions SHA pinning, no `write-all`, no privileged untrusted checkout, Dependabot, dependency lockfiles |
| Best Practices | 15 | README.md, skills directory, SKILL.md frontmatter, no committed `.env`, `.codexignore` |
| Marketplace | 15 | marketplace.json validity, policy fields, safe source paths |
| Skill Security | 15 | Cisco scan availability, elevated skill findings, analyzability |
@@ -108,10 +125,12 @@ The scanner detects:
- **Hardcoded secrets**: AWS keys, GitHub tokens, OpenAI keys, Slack tokens, GitLab tokens, generic password/secret/token patterns
- **Dangerous MCP commands**: `rm -rf`, `sudo`, `curl|sh`, `wget|sh`, `eval`, `exec`, `powershell -c`
+- **Insecure MCP remotes**: non-HTTPS remote endpoints and non-loopback HTTP MCP transports
- **Risky Codex defaults**: approval bypass and unrestricted sandbox defaults in plugin-shipped config/docs
- **Shell injection**: template literals with unsanitized interpolation in exec/spawn calls
- **Unsafe code**: `eval()` and `new Function()` usage
- **Cisco skill threats**: policy violations and risky behaviors detected by Cisco `skill-scanner`
+- **Workflow hardening gaps**: unpinned third-party actions, `write-all`, privileged untrusted checkouts, missing Dependabot, missing lockfiles
## Report Formats
diff --git a/pyproject.toml b/pyproject.toml
index 47b5177..d5b6e2a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,10 +4,10 @@ build-backend = "hatchling.build"
[project]
name = "codex-plugin-scanner"
-version = "1.1.0"
-description = "Security and security-ops scanner for Codex plugins with Cisco-backed skill analysis."
+version = "1.2.0"
+description = "Security, operational-security, and publishability scanner for Codex plugins with Cisco-backed skill analysis."
readme = "README.md"
-license = {text = "Apache-2.0"}
+license = "Apache-2.0"
requires-python = ">=3.10"
authors = [
{ name = "Hashgraph Online", email = "dev@hol.org" },
diff --git a/src/codex_plugin_scanner/__init__.py b/src/codex_plugin_scanner/__init__.py
index 28ec96c..2413f6c 100644
--- a/src/codex_plugin_scanner/__init__.py
+++ b/src/codex_plugin_scanner/__init__.py
@@ -3,7 +3,7 @@
from .models import GRADE_LABELS, CategoryResult, CheckResult, Finding, ScanOptions, ScanResult, Severity, get_grade
from .scanner import scan_plugin
-__version__ = "1.1.0"
+__version__ = "1.2.0"
__all__ = [
"GRADE_LABELS",
"CategoryResult",
diff --git a/src/codex_plugin_scanner/checks/manifest.py b/src/codex_plugin_scanner/checks/manifest.py
index 990bb86..bbb51cf 100644
--- a/src/codex_plugin_scanner/checks/manifest.py
+++ b/src/codex_plugin_scanner/checks/manifest.py
@@ -1,4 +1,4 @@
-"""Manifest validation checks (25 points)."""
+"""Manifest validation checks (31 points)."""
from __future__ import annotations
@@ -11,6 +11,10 @@
SEMVER_RE = re.compile(r"^\d+\.\d+\.\d+$")
KEBAB_RE = re.compile(r"^[a-z0-9]+(-[a-z0-9]+)*$")
RECOMMENDED_FIELDS = ("author", "homepage", "repository", "license", "keywords")
+INTERFACE_METADATA_FIELDS = ("type", "displayName", "shortDescription", "longDescription", "developerName", "category")
+INTERFACE_URL_FIELDS = ("websiteURL", "privacyPolicyURL", "termsOfServiceURL")
+INTERFACE_ASSET_FIELDS = ("composerIcon", "logo")
+HEX_COLOR_RE = re.compile(r"^#[0-9A-Fa-f]{6}$")
def load_manifest(plugin_dir: Path) -> dict | None:
@@ -54,6 +58,17 @@ def _is_safe_relative_path(plugin_dir: Path, value: str) -> bool:
return True
+def _load_interface(manifest: dict | None) -> dict | None:
+ if manifest is None:
+ return None
+ interface = manifest.get("interface")
+ if interface is None:
+ return None
+ if isinstance(interface, dict):
+ return interface
+ return {}
+
+
def check_plugin_json_exists(plugin_dir: Path) -> CheckResult:
exists = (plugin_dir / ".codex-plugin" / "plugin.json").exists()
findings = ()
@@ -274,6 +289,188 @@ def check_recommended_metadata(plugin_dir: Path) -> CheckResult:
)
+def check_interface_metadata(plugin_dir: Path) -> CheckResult:
+ manifest = load_manifest(plugin_dir)
+ interface = _load_interface(manifest)
+ if interface is None:
+ return CheckResult(
+ name="Interface metadata complete if declared",
+ passed=True,
+ points=0,
+ max_points=0,
+ message="No interface metadata declared, check not applicable.",
+ applicable=False,
+ )
+
+ if not interface:
+ return CheckResult(
+ name="Interface metadata complete if declared",
+ passed=False,
+ points=0,
+ max_points=3,
+ message="Manifest interface must be a JSON object.",
+ findings=(
+ _manifest_finding(
+ "PLUGIN_JSON_INTERFACE_INVALID",
+ "Manifest interface is not a JSON object",
+ 'The "interface" field must be an object when it is declared.',
+ 'Replace "interface" with a JSON object that contains the documented publishable metadata.',
+ ),
+ ),
+ )
+
+ missing = [
+ field
+ for field in INTERFACE_METADATA_FIELDS
+ if not isinstance(interface.get(field), str) or not interface.get(field, "").strip()
+ ]
+
+ capabilities = interface.get("capabilities")
+ if (
+ not isinstance(capabilities, list)
+ or not capabilities
+ or not all(isinstance(item, str) and item.strip() for item in capabilities)
+ ):
+ missing.append("capabilities")
+
+ default_prompt = interface.get("defaultPrompt")
+ if default_prompt is not None:
+ valid_string_prompt = isinstance(default_prompt, str) and default_prompt.strip()
+ valid_list_prompt = (
+ isinstance(default_prompt, list)
+ and default_prompt
+ and all(isinstance(item, str) and item.strip() for item in default_prompt)
+ )
+ if not valid_string_prompt and not valid_list_prompt:
+ missing.append("defaultPrompt")
+
+ brand_color = interface.get("brandColor")
+ if brand_color is not None and (not isinstance(brand_color, str) or not HEX_COLOR_RE.match(brand_color)):
+ missing.append("brandColor")
+
+ if not missing:
+ return CheckResult(
+ name="Interface metadata complete if declared",
+ passed=True,
+ points=3,
+ max_points=3,
+ message="Interface metadata contains the expected publishable fields.",
+ )
+
+ findings = tuple(
+ _manifest_finding(
+ f"PLUGIN_JSON_INTERFACE_{field.upper()}",
+ f'Interface field "{field}" is missing or invalid',
+ f'The interface object is missing a valid "{field}" value.',
+ f'Add a valid "{field}" field to the interface metadata.',
+ severity=Severity.INFO,
+ )
+ for field in missing
+ )
+ return CheckResult(
+ name="Interface metadata complete if declared",
+ passed=False,
+ points=0,
+ max_points=3,
+ message=f"Interface metadata is missing or invalid: {', '.join(missing)}",
+ findings=findings,
+ )
+
+
+def _is_https_url(value: object) -> bool:
+ return isinstance(value, str) and value.startswith("https://")
+
+
+def _interface_asset_paths(value: object) -> list[str]:
+ if isinstance(value, str):
+ return [value]
+ if isinstance(value, list):
+ return [item for item in value if isinstance(item, str)]
+ return []
+
+
+def check_interface_assets(plugin_dir: Path) -> CheckResult:
+ manifest = load_manifest(plugin_dir)
+ interface = _load_interface(manifest)
+ if interface is None:
+ return CheckResult(
+ name="Interface links and assets valid if declared",
+ passed=True,
+ points=0,
+ max_points=0,
+ message="No interface metadata declared, check not applicable.",
+ applicable=False,
+ )
+
+ if not interface:
+ return CheckResult(
+ name="Interface links and assets valid if declared",
+ passed=False,
+ points=0,
+ max_points=3,
+ message="Manifest interface must be a JSON object.",
+ findings=(
+ _manifest_finding(
+ "PLUGIN_JSON_INTERFACE_INVALID",
+ "Manifest interface is not a JSON object",
+ 'The "interface" field must be an object when it is declared.',
+ 'Replace "interface" with a JSON object that contains valid URLs and asset paths.',
+ ),
+ ),
+ )
+
+ issues: list[str] = []
+ for field in INTERFACE_URL_FIELDS:
+ if not _is_https_url(interface.get(field)):
+ issues.append(field)
+
+ for field in INTERFACE_ASSET_FIELDS:
+ value = interface.get(field)
+ if (
+ not isinstance(value, str)
+ or not _is_safe_relative_path(plugin_dir, value)
+ or not (plugin_dir / value).exists()
+ ):
+ issues.append(field)
+
+ screenshots = _interface_asset_paths(interface.get("screenshots"))
+ if not screenshots:
+ issues.append("screenshots")
+ else:
+ for screenshot in screenshots:
+ if not _is_safe_relative_path(plugin_dir, screenshot) or not (plugin_dir / screenshot).exists():
+ issues.append("screenshots")
+ break
+
+ if not issues:
+ return CheckResult(
+ name="Interface links and assets valid if declared",
+ passed=True,
+ points=3,
+ max_points=3,
+ message="Interface URLs use HTTPS and referenced assets exist within the plugin directory.",
+ )
+
+ findings = tuple(
+ _manifest_finding(
+ f"PLUGIN_JSON_INTERFACE_ASSET_{field.upper()}",
+ f'Interface asset or URL "{field}" is invalid',
+ f'The interface field "{field}" must use HTTPS or point to a safe in-repo asset.',
+ f'Update "{field}" to use HTTPS or an existing relative asset path.',
+ severity=Severity.INFO,
+ )
+ for field in issues
+ )
+ return CheckResult(
+ name="Interface links and assets valid if declared",
+ passed=False,
+ points=0,
+ max_points=3,
+ message=f"Interface links or assets are invalid: {', '.join(issues)}",
+ findings=findings,
+ )
+
+
def check_declared_paths_safe(plugin_dir: Path) -> CheckResult:
manifest = load_manifest(plugin_dir)
if manifest is None:
@@ -332,5 +529,7 @@ def run_manifest_checks(plugin_dir: Path) -> tuple[CheckResult, ...]:
check_semver(plugin_dir),
check_kebab_case(plugin_dir),
check_recommended_metadata(plugin_dir),
+ check_interface_metadata(plugin_dir),
+ check_interface_assets(plugin_dir),
check_declared_paths_safe(plugin_dir),
)
diff --git a/src/codex_plugin_scanner/checks/operational_security.py b/src/codex_plugin_scanner/checks/operational_security.py
new file mode 100644
index 0000000..566361b
--- /dev/null
+++ b/src/codex_plugin_scanner/checks/operational_security.py
@@ -0,0 +1,346 @@
+"""Operational security checks for automation and supply-chain hygiene."""
+
+from __future__ import annotations
+
+import re
+from pathlib import Path
+
+from ..models import CheckResult, Finding, Severity
+
+USES_RE = re.compile(r"^\s*(?:-\s*)?uses:\s*([^\s#]+)", re.MULTILINE)
+WRITE_ALL_RE = re.compile(r"^\s*permissions:\s*write-all\b", re.MULTILINE)
+PRIVILEGED_TRIGGER_RE = re.compile(r"\bpull_request_target\b|\bworkflow_run\s*:", re.MULTILINE)
+UNTRUSTED_REF_RE = re.compile(
+ r"github\.event\.pull_request\.head\.(?:sha|ref)|github\.event\.workflow_run\.head_(?:sha|branch)",
+ re.MULTILINE,
+)
+SHA_PIN_RE = re.compile(r"^[0-9a-fA-F]{40}$")
+REQUIREMENTS_PIN_RE = re.compile(r"^[A-Za-z0-9_.-]+(?:\[[A-Za-z0-9_,.-]+\])?==[^=\s]+$")
+DEPENDABOT_ECOSYSTEM_RE = re.compile(r'package-ecosystem:\s*["\']?github-actions["\']?', re.IGNORECASE)
+
+NODE_LOCKFILES = ("package-lock.json", "pnpm-lock.yaml", "yarn.lock", "bun.lock", "bun.lockb")
+PYTHON_LOCKFILES = ("uv.lock", "poetry.lock", "Pipfile.lock", "requirements.lock")
+DEPENDENCY_MANIFESTS = ("package.json", "pyproject.toml", "requirements.txt")
+
+
+def _not_applicable(name: str, message: str) -> CheckResult:
+ return CheckResult(
+ name=name,
+ passed=True,
+ points=0,
+ max_points=0,
+ message=message,
+ applicable=False,
+ )
+
+
+def _workflow_files(plugin_dir: Path) -> tuple[Path, ...]:
+ workflows_dir = plugin_dir / ".github" / "workflows"
+ if not workflows_dir.is_dir():
+ return ()
+ return tuple(sorted(path for path in workflows_dir.rglob("*") if path.suffix in {".yml", ".yaml"}))
+
+
+def _read_workflow_text(workflow_files: tuple[Path, ...]) -> list[tuple[Path, str]]:
+ contents: list[tuple[Path, str]] = []
+ for workflow in workflow_files:
+ try:
+ contents.append((workflow, workflow.read_text(encoding="utf-8", errors="ignore")))
+ except OSError:
+ continue
+ return contents
+
+
+def _dependency_surface(plugin_dir: Path) -> dict[str, bool]:
+ return {name: (plugin_dir / name).exists() for name in DEPENDENCY_MANIFESTS}
+
+
+def _requirements_exactly_pinned(requirements_file: Path) -> bool:
+ try:
+ lines = requirements_file.read_text(encoding="utf-8", errors="ignore").splitlines()
+ except OSError:
+ return False
+
+ pinned: list[str] = []
+ for line in lines:
+ normalized = line.strip()
+ if not normalized or normalized.startswith("#") or normalized.startswith(("-e ", "--")):
+ continue
+ if normalized.startswith(("git+", "http://", "https://")):
+ return False
+ pinned.append(normalized)
+ return bool(pinned) and all(REQUIREMENTS_PIN_RE.match(line) for line in pinned)
+
+
+def _has_lockfiles(plugin_dir: Path, dependency_surface: dict[str, bool]) -> bool:
+ node_locked = not dependency_surface["package.json"] or any((plugin_dir / name).exists() for name in NODE_LOCKFILES)
+ python_lock_exists = any((plugin_dir / name).exists() for name in PYTHON_LOCKFILES)
+ requirements_pinned = dependency_surface["requirements.txt"] and _requirements_exactly_pinned(
+ plugin_dir / "requirements.txt"
+ )
+
+ python_locked = True
+ if dependency_surface["pyproject.toml"] or dependency_surface["requirements.txt"]:
+ python_locked = python_lock_exists or requirements_pinned
+ return node_locked and python_locked
+
+
+def check_actions_pinned(plugin_dir: Path) -> CheckResult:
+ workflow_files = _workflow_files(plugin_dir)
+ if not workflow_files:
+ return _not_applicable("Third-party GitHub Actions pinned to SHAs", "No GitHub Actions workflows found.")
+
+ unpinned: list[tuple[str, str]] = []
+ for workflow, content in _read_workflow_text(workflow_files):
+ for match in USES_RE.findall(content):
+ reference = match.strip()
+ if reference.startswith(("./", "docker://")):
+ continue
+ if "@" not in reference:
+ unpinned.append((str(workflow.relative_to(plugin_dir)), reference))
+ continue
+ _, ref = reference.rsplit("@", 1)
+ if not SHA_PIN_RE.match(ref):
+ unpinned.append((str(workflow.relative_to(plugin_dir)), reference))
+
+ if not unpinned:
+ return CheckResult(
+ name="Third-party GitHub Actions pinned to SHAs",
+ passed=True,
+ points=5,
+ max_points=5,
+ message="All third-party GitHub Actions and reusable workflows are pinned to immutable SHAs.",
+ )
+
+ return CheckResult(
+ name="Third-party GitHub Actions pinned to SHAs",
+ passed=False,
+ points=0,
+ max_points=5,
+ message=f"Unpinned GitHub Actions found: {', '.join(f'{path}:{ref}' for path, ref in unpinned[:3])}",
+ findings=tuple(
+ Finding(
+ rule_id="GITHUB_ACTION_UNPINNED",
+ severity=Severity.MEDIUM,
+ category="operational-security",
+ title="GitHub Action is not pinned to an immutable commit SHA",
+ description=f'{path} references "{reference}" instead of a full 40-character commit SHA.',
+ remediation="Pin third-party GitHub Actions and reusable workflows to a full commit SHA.",
+ file_path=path,
+ source="native",
+ )
+ for path, reference in unpinned
+ ),
+ )
+
+
+def check_no_write_all_permissions(plugin_dir: Path) -> CheckResult:
+ workflow_files = _workflow_files(plugin_dir)
+ if not workflow_files:
+ return _not_applicable("No write-all GitHub Actions permissions", "No GitHub Actions workflows found.")
+
+ issues: list[str] = []
+ for workflow, content in _read_workflow_text(workflow_files):
+ if WRITE_ALL_RE.search(content):
+ issues.append(str(workflow.relative_to(plugin_dir)))
+
+ if not issues:
+ return CheckResult(
+ name="No write-all GitHub Actions permissions",
+ passed=True,
+ points=4,
+ max_points=4,
+ message="No GitHub Actions workflow requests write-all permissions.",
+ )
+
+ return CheckResult(
+ name="No write-all GitHub Actions permissions",
+ passed=False,
+ points=0,
+ max_points=4,
+ message=f"Broad workflow permissions found in: {', '.join(issues)}",
+ findings=tuple(
+ Finding(
+ rule_id="GITHUB_ACTIONS_WRITE_ALL",
+ severity=Severity.MEDIUM,
+ category="operational-security",
+ title="GitHub Actions workflow requests write-all permissions",
+ description=f"{path} requests write-all permissions for the workflow token.",
+ remediation="Replace write-all with the narrowest required permissions block.",
+ file_path=path,
+ source="native",
+ )
+ for path in issues
+ ),
+ )
+
+
+def check_no_privileged_untrusted_checkout(plugin_dir: Path) -> CheckResult:
+ workflow_files = _workflow_files(plugin_dir)
+ if not workflow_files:
+ return _not_applicable("No privileged untrusted checkout patterns", "No GitHub Actions workflows found.")
+
+ issues: list[str] = []
+ for workflow, content in _read_workflow_text(workflow_files):
+ if not PRIVILEGED_TRIGGER_RE.search(content):
+ continue
+ if "actions/checkout" not in content and "git checkout" not in content:
+ continue
+ if UNTRUSTED_REF_RE.search(content):
+ issues.append(str(workflow.relative_to(plugin_dir)))
+
+ if not issues:
+ return CheckResult(
+ name="No privileged untrusted checkout patterns",
+ passed=True,
+ points=5,
+ max_points=5,
+ message="No privileged workflow checks out untrusted branch content.",
+ )
+
+ return CheckResult(
+ name="No privileged untrusted checkout patterns",
+ passed=False,
+ points=0,
+ max_points=5,
+ message=f"Privileged workflow checks out untrusted code in: {', '.join(issues)}",
+ findings=tuple(
+ Finding(
+ rule_id="GITHUB_ACTIONS_UNTRUSTED_CHECKOUT",
+ severity=Severity.HIGH,
+ category="operational-security",
+ title="Privileged workflow checks out untrusted code",
+ description=(
+ f"{path} combines a privileged trigger with a pull-request or workflow-run head ref checkout."
+ ),
+ remediation="Avoid checking out untrusted refs from privileged workflow contexts.",
+ file_path=path,
+ source="native",
+ )
+ for path in issues
+ ),
+ )
+
+
+def check_dependabot_configured(plugin_dir: Path) -> CheckResult:
+ workflow_files = _workflow_files(plugin_dir)
+ dependency_surface = _dependency_surface(plugin_dir)
+ if not workflow_files and not any(dependency_surface.values()):
+ return _not_applicable(
+ "Dependabot configured for automation surfaces",
+ "No workflows or dependency manifests found.",
+ )
+
+ dependabot_files = [plugin_dir / ".github" / "dependabot.yml", plugin_dir / ".github" / "dependabot.yaml"]
+ dependabot_file = next((path for path in dependabot_files if path.exists()), None)
+ if dependabot_file is None:
+ return CheckResult(
+ name="Dependabot configured for automation surfaces",
+ passed=False,
+ points=0,
+ max_points=3,
+ message="Dependabot is not configured for workflows or dependency manifests.",
+ findings=(
+ Finding(
+ rule_id="DEPENDABOT_MISSING",
+ severity=Severity.LOW,
+ category="operational-security",
+ title="Dependabot configuration is missing",
+ description="Automation or dependency surfaces exist, but .github/dependabot.yml is missing.",
+ remediation="Add Dependabot updates for GitHub Actions and dependency manifests.",
+ file_path=".github/dependabot.yml",
+ source="native",
+ ),
+ ),
+ )
+
+ try:
+ content = dependabot_file.read_text(encoding="utf-8", errors="ignore")
+ except OSError:
+ content = ""
+
+ has_github_actions_updates = DEPENDABOT_ECOSYSTEM_RE.search(content) is not None
+ if workflow_files and not has_github_actions_updates:
+ return CheckResult(
+ name="Dependabot configured for automation surfaces",
+ passed=False,
+ points=0,
+ max_points=3,
+ message="Dependabot config is present but does not include GitHub Actions updates.",
+ findings=(
+ Finding(
+ rule_id="DEPENDABOT_GITHUB_ACTIONS_MISSING",
+ severity=Severity.LOW,
+ category="operational-security",
+ title="Dependabot is not configured for GitHub Actions",
+ description="Workflow files exist, but Dependabot does not declare the github-actions ecosystem.",
+ remediation="Add a github-actions update entry to .github/dependabot.yml.",
+ file_path=str(dependabot_file.relative_to(plugin_dir)),
+ source="native",
+ ),
+ ),
+ )
+
+ return CheckResult(
+ name="Dependabot configured for automation surfaces",
+ passed=True,
+ points=3,
+ max_points=3,
+ message="Dependabot is configured for the detected automation surfaces.",
+ )
+
+
+def check_dependency_lockfiles(plugin_dir: Path) -> CheckResult:
+ dependency_surface = _dependency_surface(plugin_dir)
+ if not any(dependency_surface.values()):
+ return _not_applicable("Dependency manifests have lockfiles", "No dependency manifests found.")
+
+ if _has_lockfiles(plugin_dir, dependency_surface):
+ return CheckResult(
+ name="Dependency manifests have lockfiles",
+ passed=True,
+ points=3,
+ max_points=3,
+ message="Detected dependency manifests are paired with lockfiles or pinned requirements.",
+ )
+
+ missing: list[str] = []
+ if dependency_surface["package.json"] and not any((plugin_dir / name).exists() for name in NODE_LOCKFILES):
+ missing.append("package.json")
+ if dependency_surface["pyproject.toml"] and not any((plugin_dir / name).exists() for name in PYTHON_LOCKFILES):
+ missing.append("pyproject.toml")
+ if dependency_surface["requirements.txt"] and not _requirements_exactly_pinned(plugin_dir / "requirements.txt"):
+ missing.append("requirements.txt")
+
+ return CheckResult(
+ name="Dependency manifests have lockfiles",
+ passed=False,
+ points=0,
+ max_points=3,
+ message=f"Dependency lockfiles or pinned requirements missing for: {', '.join(missing)}",
+ findings=tuple(
+ Finding(
+ rule_id="DEPENDENCY_LOCKFILE_MISSING",
+ severity=Severity.MEDIUM,
+ category="operational-security",
+ title="Dependency manifest is missing a lockfile",
+ description=f"{name} is present without a corresponding lockfile or pinned dependency snapshot.",
+ remediation=(
+ "Commit a lockfile or use fully pinned requirements for reproducible dependency resolution."
+ ),
+ file_path=name,
+ source="native",
+ )
+ for name in missing
+ ),
+ )
+
+
+def run_operational_security_checks(plugin_dir: Path) -> tuple[CheckResult, ...]:
+ return (
+ check_actions_pinned(plugin_dir),
+ check_no_write_all_permissions(plugin_dir),
+ check_no_privileged_untrusted_checkout(plugin_dir),
+ check_dependabot_configured(plugin_dir),
+ check_dependency_lockfiles(plugin_dir),
+ )
diff --git a/src/codex_plugin_scanner/checks/security.py b/src/codex_plugin_scanner/checks/security.py
index 405fbcc..8e04686 100644
--- a/src/codex_plugin_scanner/checks/security.py
+++ b/src/codex_plugin_scanner/checks/security.py
@@ -2,8 +2,11 @@
from __future__ import annotations
+import ipaddress
+import json
import re
from pathlib import Path
+from urllib.parse import urlparse
from ..models import CheckResult, Finding, Severity
@@ -142,9 +145,7 @@ def check_license(plugin_dir: Path) -> CheckResult:
)
if "MIT" in content and "Permission is hereby granted" in content:
return CheckResult(name="LICENSE found", passed=True, points=3, max_points=3, message="LICENSE found (MIT)")
- return CheckResult(
- name="LICENSE found", passed=True, points=3, max_points=3, message="LICENSE found"
- )
+ return CheckResult(name="LICENSE found", passed=True, points=3, max_points=3, message="LICENSE found")
except OSError:
return CheckResult(
name="LICENSE found", passed=False, points=0, max_points=3, message="LICENSE exists but could not be read"
@@ -244,6 +245,138 @@ def check_no_dangerous_mcp(plugin_dir: Path) -> CheckResult:
)
+IGNORED_MCP_URL_CONTEXT = {"metadata", "description", "homepage", "website", "docs", "documentation"}
+MCP_URL_KEYS = {"url", "endpoint", "server_url"}
+
+
+def _collect_mcp_urls(node: object, urls: list[str], path: tuple[str, ...] = ()) -> None:
+ if isinstance(node, dict):
+ for key, value in node.items():
+ lowered_key = key.lower()
+ next_path = (*path, lowered_key)
+ if lowered_key in IGNORED_MCP_URL_CONTEXT:
+ continue
+ if isinstance(value, str) and lowered_key in MCP_URL_KEYS:
+ urls.append(value)
+ continue
+ if isinstance(value, (dict, list)):
+ _collect_mcp_urls(value, urls, next_path)
+ return
+ if isinstance(node, list):
+ for item in node:
+ _collect_mcp_urls(item, urls, path)
+
+
+def _extract_mcp_urls(payload: object) -> list[str]:
+ urls: list[str] = []
+ if isinstance(payload, dict):
+ targeted = False
+ for key in ("mcpServers", "servers"):
+ value = payload.get(key)
+ if isinstance(value, (dict, list)):
+ targeted = True
+ _collect_mcp_urls(value, urls, (key.lower(),))
+ if targeted:
+ return urls
+ _collect_mcp_urls(payload, urls)
+ return urls
+
+
+def _is_loopback_host(hostname: str | None) -> bool:
+ if hostname is None:
+ return False
+ if hostname == "localhost":
+ return True
+ try:
+ return ipaddress.ip_address(hostname).is_loopback
+ except ValueError:
+ return False
+
+
+def check_mcp_transport_security(plugin_dir: Path) -> CheckResult:
+ mcp_path = plugin_dir / ".mcp.json"
+ if not mcp_path.exists():
+ return CheckResult(
+ name="MCP remote transports are hardened",
+ passed=True,
+ points=0,
+ max_points=0,
+ message="No .mcp.json found, skipping transport hardening checks.",
+ applicable=False,
+ )
+
+ try:
+ payload = json.loads(mcp_path.read_text(encoding="utf-8"))
+ except Exception:
+ return CheckResult(
+ name="MCP remote transports are hardened",
+ passed=False,
+ points=0,
+ max_points=4,
+ message="Could not parse .mcp.json for transport URLs.",
+ findings=(
+ Finding(
+ rule_id="MCP_CONFIG_INVALID_JSON",
+ severity=Severity.MEDIUM,
+ category="security",
+ title="MCP configuration is not valid JSON",
+ description="The .mcp.json file exists but could not be parsed.",
+ remediation="Fix the .mcp.json syntax so transport settings can be validated.",
+ file_path=".mcp.json",
+ ),
+ ),
+ )
+
+ urls = _extract_mcp_urls(payload)
+ if not urls:
+ return CheckResult(
+ name="MCP remote transports are hardened",
+ passed=True,
+ points=0,
+ max_points=0,
+ message="No remote MCP URLs declared; stdio-only configuration is not applicable here.",
+ applicable=False,
+ )
+
+ issues = []
+ for url in urls:
+ parsed = urlparse(url)
+ if parsed.scheme == "https":
+ continue
+ if parsed.scheme == "http" and _is_loopback_host(parsed.hostname):
+ continue
+ issues.append(url)
+
+ if not issues:
+ return CheckResult(
+ name="MCP remote transports are hardened",
+ passed=True,
+ points=4,
+ max_points=4,
+ message="Remote MCP URLs use hardened transports or stay on loopback for local development.",
+ )
+
+ return CheckResult(
+ name="MCP remote transports are hardened",
+ passed=False,
+ points=0,
+ max_points=4,
+ message=f"Insecure MCP remote URLs detected: {', '.join(issues)}",
+ findings=tuple(
+ Finding(
+ rule_id="MCP_REMOTE_URL_INSECURE",
+ severity=Severity.HIGH,
+ category="security",
+ title="MCP remote transport uses an insecure URL",
+ description=f'The remote MCP endpoint "{url}" is not HTTPS or loopback-only HTTP.',
+ remediation="Use HTTPS for remote MCP servers and reserve plain HTTP for localhost development only.",
+ file_path=".mcp.json",
+ )
+ for url in issues
+ ),
+ )
+
+
def check_no_approval_bypass_defaults(plugin_dir: Path) -> CheckResult:
findings: list[str] = []
for file_path in _scan_all_files(plugin_dir):
@@ -294,5 +427,6 @@ def run_security_checks(plugin_dir: Path) -> tuple[CheckResult, ...]:
check_license(plugin_dir),
check_no_hardcoded_secrets(plugin_dir),
check_no_dangerous_mcp(plugin_dir),
+ check_mcp_transport_security(plugin_dir),
check_no_approval_bypass_defaults(plugin_dir),
)
diff --git a/src/codex_plugin_scanner/checks/skill_security.py b/src/codex_plugin_scanner/checks/skill_security.py
index c57e886..9cd272b 100644
--- a/src/codex_plugin_scanner/checks/skill_security.py
+++ b/src/codex_plugin_scanner/checks/skill_security.py
@@ -50,11 +50,7 @@ def _not_applicable_results(message: str) -> tuple[CheckResult, ...]:
def _elevated_findings(findings: tuple[Finding, ...]) -> tuple[Finding, ...]:
- return tuple(
- finding
- for finding in findings
- if SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[Severity.MEDIUM]
- )
+ return tuple(finding for finding in findings if SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[Severity.MEDIUM])
def _availability_check(summary: CiscoSkillScanSummary, mode: str) -> CheckResult:
diff --git a/src/codex_plugin_scanner/cli.py b/src/codex_plugin_scanner/cli.py
index 85e402e..b032509 100644
--- a/src/codex_plugin_scanner/cli.py
+++ b/src/codex_plugin_scanner/cli.py
@@ -11,7 +11,7 @@
from .reporting import format_markdown, format_sarif, should_fail_for_severity
from .scanner import scan_plugin
-__version__ = "1.1.0"
+__version__ = "1.2.0"
def format_text(result) -> str:
@@ -40,48 +40,6 @@ def format_text(result) -> str:
return "\n".join(lines)
-def _build_rich_text(result) -> str:
- """Build rich markup text from a scan result."""
- lines = [f"[bold cyan]🔗 Codex Plugin Scanner v{__version__}[/bold cyan]"]
- lines.append(f"Scanning: {result.plugin_dir}")
- lines.append("")
- for category in result.categories:
- cat_score = sum(c.points for c in category.checks)
- cat_max = sum(c.max_points for c in category.checks)
- lines.append(f"[bold yellow]── {category.name} ({cat_score}/{cat_max}) ──[/bold yellow]")
- for check in category.checks:
- icon = "✅" if check.passed else "⚠️"
- style = "[green]" if check.passed else "[red]"
- pts = f"[green]+{check.points}[/green]" if check.passed else "[red]+0[/red]"
- lines.append(f" {icon} {style}{check.name:<42}[/]{pts}")
- lines.append("")
- separator = "━" * 37
- grade = result.grade
- gc = {"A": "bold green", "B": "green", "C": "yellow", "D": "red", "F": "bold red"}.get(grade, "red")
- label = GRADE_LABELS.get(grade, "Unknown")
- lines += [
- f"[bold]{separator}[/bold]",
- f"Final Score: [bold]{result.score}[/bold]/100 ([{gc}]{grade} - {label}[/{gc}])",
- f"[bold]{separator}[/bold]",
- ]
- return "\n".join(lines)
-
-
-def format_text(result) -> str:
- """Format scan result as terminal output. Returns plain text string."""
- plain = _build_plain_text(result)
-
- try:
- from rich.console import Console
-
- console = Console()
- console.print(_build_rich_text(result))
- except ImportError:
- print(plain)
-
- return plain
-
-
def format_json(result) -> str:
"""Format scan result as JSON."""
return render_json(result)
diff --git a/src/codex_plugin_scanner/integrations/cisco_skill_scanner.py b/src/codex_plugin_scanner/integrations/cisco_skill_scanner.py
index 5daf3c2..e6b7fb1 100644
--- a/src/codex_plugin_scanner/integrations/cisco_skill_scanner.py
+++ b/src/codex_plugin_scanner/integrations/cisco_skill_scanner.py
@@ -3,13 +3,13 @@
from __future__ import annotations
from dataclasses import dataclass
-from enum import StrEnum
+from enum import Enum
from pathlib import Path
from ..models import Finding, Severity, severity_from_value
-class CiscoIntegrationStatus(StrEnum):
+class CiscoIntegrationStatus(str, Enum):
"""State of the Cisco skill-scanner integration."""
ENABLED = "enabled"
diff --git a/src/codex_plugin_scanner/models.py b/src/codex_plugin_scanner/models.py
index e5a6c47..8e43dd9 100644
--- a/src/codex_plugin_scanner/models.py
+++ b/src/codex_plugin_scanner/models.py
@@ -3,10 +3,10 @@
from __future__ import annotations
from dataclasses import dataclass, field
-from enum import StrEnum
+from enum import Enum
-class Severity(StrEnum):
+class Severity(str, Enum):
"""Severity levels for actionable findings."""
CRITICAL = "critical"
diff --git a/src/codex_plugin_scanner/reporting.py b/src/codex_plugin_scanner/reporting.py
index 790950e..a6216a0 100644
--- a/src/codex_plugin_scanner/reporting.py
+++ b/src/codex_plugin_scanner/reporting.py
@@ -107,9 +107,7 @@ def format_markdown(result: ScanResult) -> str:
for category in result.categories:
category_score = sum(check.points for check in category.checks)
category_max = sum(check.max_points for check in category.checks)
- lines.append(
- f"- **{category.name}**: {category_score}/{category_max}"
- )
+ lines.append(f"- **{category.name}**: {category_score}/{category_max}")
top_findings = _sorted_findings(result.findings)[:10]
lines += ["", "## Top Findings", ""]
@@ -209,7 +207,4 @@ def should_fail_for_severity(result: ScanResult, threshold: str | None) -> bool:
if not threshold or threshold.lower() == "none":
return False
threshold_severity = severity_from_value(threshold)
- return any(
- SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[threshold_severity]
- for finding in result.findings
- )
+ return any(SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[threshold_severity] for finding in result.findings)
diff --git a/src/codex_plugin_scanner/scanner.py b/src/codex_plugin_scanner/scanner.py
index 0acff1a..2311081 100644
--- a/src/codex_plugin_scanner/scanner.py
+++ b/src/codex_plugin_scanner/scanner.py
@@ -9,6 +9,7 @@
from .checks.code_quality import run_code_quality_checks
from .checks.manifest import run_manifest_checks
from .checks.marketplace import run_marketplace_checks
+from .checks.operational_security import run_operational_security_checks
from .checks.security import run_security_checks
from .checks.skill_security import resolve_skill_security_context, run_skill_security_checks
from .integrations.cisco_skill_scanner import CiscoIntegrationStatus
@@ -74,6 +75,7 @@ def scan_plugin(plugin_dir: str | Path, options: ScanOptions | None = None) -> S
categories: list[CategoryResult] = [
CategoryResult(name="Manifest Validation", checks=run_manifest_checks(resolved)),
CategoryResult(name="Security", checks=run_security_checks(resolved)),
+ CategoryResult(name="Operational Security", checks=run_operational_security_checks(resolved)),
CategoryResult(name="Best Practices", checks=run_best_practice_checks(resolved)),
CategoryResult(name="Marketplace", checks=run_marketplace_checks(resolved)),
CategoryResult(
diff --git a/tests/fixtures/good-plugin/.codex-plugin/plugin.json b/tests/fixtures/good-plugin/.codex-plugin/plugin.json
index 3247daf..6c1b808 100644
--- a/tests/fixtures/good-plugin/.codex-plugin/plugin.json
+++ b/tests/fixtures/good-plugin/.codex-plugin/plugin.json
@@ -12,7 +12,22 @@
"keywords": ["codex", "plugin", "security"],
"interface": {
"type": "cli",
- "displayName": "Example Good Plugin"
+ "displayName": "Example Good Plugin",
+ "shortDescription": "Reusable security-first plugin fixture",
+ "longDescription": "A well-structured example plugin that exercises publishable interface metadata.",
+ "developerName": "Hashgraph Online",
+ "category": "Developer Tools",
+ "capabilities": ["Read", "Write"],
+ "websiteURL": "https://github.com/hashgraph-online/codex-plugin-scanner",
+ "privacyPolicyURL": "https://example.com/privacy",
+ "termsOfServiceURL": "https://example.com/terms",
+ "defaultPrompt": [
+ "Use Example Good Plugin to scan a plugin before publishing."
+ ],
+ "brandColor": "#10A37F",
+ "composerIcon": "./assets/icon.svg",
+ "logo": "./assets/logo.svg",
+ "screenshots": ["./assets/screenshot.svg"]
},
"skills": "skills"
}
diff --git a/tests/fixtures/good-plugin/assets/icon.svg b/tests/fixtures/good-plugin/assets/icon.svg
new file mode 100644
index 0000000..aa5ae61
--- /dev/null
+++ b/tests/fixtures/good-plugin/assets/icon.svg
@@ -0,0 +1,4 @@
+
diff --git a/tests/fixtures/good-plugin/assets/logo.svg b/tests/fixtures/good-plugin/assets/logo.svg
new file mode 100644
index 0000000..57b5a9d
--- /dev/null
+++ b/tests/fixtures/good-plugin/assets/logo.svg
@@ -0,0 +1,4 @@
+
diff --git a/tests/fixtures/good-plugin/assets/screenshot.svg b/tests/fixtures/good-plugin/assets/screenshot.svg
new file mode 100644
index 0000000..25e0f2e
--- /dev/null
+++ b/tests/fixtures/good-plugin/assets/screenshot.svg
@@ -0,0 +1,6 @@
+
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 2dd3a81..c11c871 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -17,7 +17,7 @@ def test_valid_json_output(self):
parsed = json.loads(output)
assert parsed["score"] == 100
assert parsed["grade"] == "A"
- assert len(parsed["categories"]) == 6
+ assert len(parsed["categories"]) == 7
assert "timestamp" in parsed
assert "pluginDir" in parsed
assert "summary" in parsed
diff --git a/tests/test_integration.py b/tests/test_integration.py
index ec09f6e..8a5e012 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -4,6 +4,7 @@
from pathlib import Path
from codex_plugin_scanner.cli import format_json, format_text
+from codex_plugin_scanner.models import ScanOptions
from codex_plugin_scanner.scanner import scan_plugin
FIXTURES = Path(__file__).parent / "fixtures"
@@ -40,9 +41,9 @@ def test_json_output_is_parseable():
output = format_json(result)
parsed = json.loads(output)
assert parsed["score"] == 100
- assert len(parsed["categories"]) == 6
+ assert len(parsed["categories"]) == 7
total_checks = sum(len(c["checks"]) for c in parsed["categories"])
- assert total_checks == 25
+ assert total_checks == 33
def test_text_output_is_readable():
@@ -51,6 +52,7 @@ def test_text_output_is_readable():
# Should have all category headers
assert "Manifest Validation" in output
assert "Security" in output
+ assert "Operational Security" in output
assert "Best Practices" in output
assert "Marketplace" in output
assert "Skill Security" in output
@@ -69,9 +71,9 @@ def test_all_check_names_unique():
def test_max_points_total_100():
- result = scan_plugin(FIXTURES / "good-plugin")
+ result = scan_plugin(FIXTURES / "good-plugin", ScanOptions(cisco_skill_scan="off"))
total_max = sum(c.max_points for cat in result.categories for c in cat.checks)
- assert total_max == 81
+ assert total_max == 72
def test_mit_license_plugin():
@@ -96,7 +98,7 @@ def test_malformed_json_manifest():
result = scan_plugin(FIXTURES / "malformed-json")
assert result.score < 100
manifest_cat = next(c for c in result.categories if c.name == "Manifest Validation")
- assert sum(c.points for c in manifest_cat.checks) < 25
+ assert sum(c.points for c in manifest_cat.checks) < 31
def test_public_api_import():
diff --git a/tests/test_live_cisco_smoke.py b/tests/test_live_cisco_smoke.py
new file mode 100644
index 0000000..2e4d6b7
--- /dev/null
+++ b/tests/test_live_cisco_smoke.py
@@ -0,0 +1,32 @@
+"""Live smoke coverage for the real Cisco skill-scanner integration."""
+
+from __future__ import annotations
+
+import importlib.util
+from pathlib import Path
+
+import pytest
+
+from codex_plugin_scanner.integrations.cisco_skill_scanner import CiscoIntegrationStatus
+from codex_plugin_scanner.models import ScanOptions
+from codex_plugin_scanner.scanner import scan_plugin
+
+FIXTURES = Path(__file__).parent / "fixtures"
+
+pytestmark = pytest.mark.skipif(
+ importlib.util.find_spec("skill_scanner") is None,
+ reason="cisco-ai-skill-scanner is not installed in this environment",
+)
+
+
+def test_live_cisco_scan_on_good_plugin() -> None:
+ result = scan_plugin(FIXTURES / "good-plugin", ScanOptions(cisco_skill_scan="on", cisco_policy="balanced"))
+
+ integration = next(item for item in result.integrations if item.name == "cisco-skill-scanner")
+ skill_security = next(category for category in result.categories if category.name == "Skill Security")
+
+ assert integration.status == CiscoIntegrationStatus.ENABLED
+ assert integration.findings_count == 0
+ assert sum(check.points for check in skill_security.checks) == sum(
+ check.max_points for check in skill_security.checks
+ )
diff --git a/tests/test_manifest.py b/tests/test_manifest.py
index 047cf45..de48a3c 100644
--- a/tests/test_manifest.py
+++ b/tests/test_manifest.py
@@ -4,6 +4,8 @@
from pathlib import Path
from codex_plugin_scanner.checks.manifest import (
+ check_interface_assets,
+ check_interface_metadata,
check_kebab_case,
check_plugin_json_exists,
check_required_fields,
@@ -99,10 +101,56 @@ def test_fails_for_bad_name(self):
assert not r.passed and r.points == 0
+class TestInterfaceChecks:
+ def test_interface_metadata_passes_for_good_plugin(self):
+ r = check_interface_metadata(FIXTURES / "good-plugin")
+ assert r.passed and r.points == 3
+
+ def test_interface_metadata_not_applicable_when_missing(self):
+ r = check_interface_metadata(FIXTURES / "minimal-plugin")
+ assert r.passed and r.points == 0
+ assert not r.applicable
+
+ def test_interface_assets_pass_for_good_plugin(self):
+ r = check_interface_assets(FIXTURES / "good-plugin")
+ assert r.passed and r.points == 3
+
+ def test_interface_assets_fail_for_unsafe_values(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ plugin_dir = Path(tmpdir)
+ manifest_dir = plugin_dir / ".codex-plugin"
+ manifest_dir.mkdir(parents=True)
+ (manifest_dir / "plugin.json").write_text(
+ """
+ {
+ "name": "interface-bad",
+ "version": "1.0.0",
+ "description": "bad interface metadata",
+ "interface": {
+ "type": "cli",
+ "displayName": "Bad",
+ "shortDescription": "Bad",
+ "longDescription": "Bad",
+ "developerName": "Bad",
+ "category": "Security",
+ "capabilities": ["Read"],
+ "websiteURL": "http://example.com",
+ "privacyPolicyURL": "https://example.com/privacy",
+ "termsOfServiceURL": "https://example.com/terms",
+ "composerIcon": "../icon.svg"
+ }
+ }
+ """,
+ encoding="utf-8",
+ )
+ r = check_interface_assets(plugin_dir)
+ assert not r.passed and r.points == 0
+
+
class TestRunManifestChecks:
- def test_good_plugin_gets_25(self):
+ def test_good_plugin_gets_31(self):
results = run_manifest_checks(FIXTURES / "good-plugin")
- assert sum(c.points for c in results) == 25
+ assert sum(c.points for c in results) == 31
assert all(c.passed for c in results)
def test_bad_plugin_fails_version_and_name(self):
@@ -126,4 +174,4 @@ def test_empty_dir_gets_0(self):
def test_returns_tuple_of_correct_length(self):
results = run_manifest_checks(FIXTURES / "good-plugin")
assert isinstance(results, tuple)
- assert len(results) == 7
+ assert len(results) == 9
diff --git a/tests/test_operational_security.py b/tests/test_operational_security.py
new file mode 100644
index 0000000..0043318
--- /dev/null
+++ b/tests/test_operational_security.py
@@ -0,0 +1,137 @@
+"""Tests for operational-security checks."""
+
+import tempfile
+from pathlib import Path
+
+from codex_plugin_scanner.checks.operational_security import (
+ check_dependabot_configured,
+ check_dependency_lockfiles,
+ run_operational_security_checks,
+)
+
+
+def _write_file(path: Path, content: str) -> None:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(content, encoding="utf-8")
+
+
+def test_operational_checks_not_applicable_without_surface():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ results = run_operational_security_checks(Path(tmpdir))
+ assert sum(check.points for check in results) == 0
+ assert sum(check.max_points for check in results) == 0
+
+
+def test_operational_checks_pass_for_hardened_workflows():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ plugin_dir = Path(tmpdir)
+ _write_file(
+ plugin_dir / ".github" / "workflows" / "ci.yml",
+ """
+ on:
+ pull_request:
+ push:
+ branches: [main]
+ jobs:
+ test:
+ permissions:
+ contents: read
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@0123456789abcdef0123456789abcdef01234567
+ """,
+ )
+ _write_file(
+ plugin_dir / ".github" / "dependabot.yml",
+ """
+ version: 2
+ updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ """,
+ )
+ _write_file(plugin_dir / "package.json", '{"name": "fixture", "version": "1.0.0"}')
+ _write_file(plugin_dir / "package-lock.json", '{"name": "fixture", "lockfileVersion": 3}')
+
+ results = run_operational_security_checks(plugin_dir)
+
+ assert all(check.passed for check in results)
+ assert sum(check.points for check in results) == 20
+ assert sum(check.max_points for check in results) == 20
+
+
+def test_operational_checks_flag_common_workflow_risks():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ plugin_dir = Path(tmpdir)
+ _write_file(
+ plugin_dir / ".github" / "workflows" / "ci.yml",
+ """
+ on:
+ pull_request_target:
+ permissions: write-all
+ jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ """,
+ )
+ _write_file(plugin_dir / "package.json", '{"name": "fixture", "version": "1.0.0"}')
+
+ results = run_operational_security_checks(plugin_dir)
+ names = {check.name: check for check in results}
+
+ assert names["Third-party GitHub Actions pinned to SHAs"].passed is False
+ assert names["No write-all GitHub Actions permissions"].passed is False
+ assert names["No privileged untrusted checkout patterns"].passed is False
+ assert names["Dependabot configured for automation surfaces"].passed is False
+ assert names["Dependency manifests have lockfiles"].passed is False
+
+
+def test_dependabot_accepts_single_quoted_github_actions_ecosystem():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ plugin_dir = Path(tmpdir)
+ _write_file(
+ plugin_dir / ".github" / "workflows" / "ci.yml",
+ """
+ on:
+ push:
+ jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@0123456789abcdef0123456789abcdef01234567
+ """,
+ )
+ _write_file(
+ plugin_dir / ".github" / "dependabot.yml",
+ """
+ version: 2
+ updates:
+ - package-ecosystem: 'github-actions'
+ directory: "/"
+ schedule:
+ interval: weekly
+ """,
+ )
+
+ result = check_dependabot_configured(plugin_dir)
+
+ assert result.passed is True
+ assert result.points == 3
+
+
+def test_dependency_lockfiles_accept_pinned_requirements_with_pyproject():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ plugin_dir = Path(tmpdir)
+ _write_file(plugin_dir / "pyproject.toml", '[project]\nname = "fixture"\nversion = "1.0.0"\n')
+ _write_file(plugin_dir / "requirements.txt", "requests==2.32.3\nrich==13.9.4\n")
+
+ result = check_dependency_lockfiles(plugin_dir)
+
+ assert result.passed is True
+ assert result.points == 3
diff --git a/tests/test_scanner.py b/tests/test_scanner.py
index d5821f7..d22ab4f 100644
--- a/tests/test_scanner.py
+++ b/tests/test_scanner.py
@@ -98,10 +98,11 @@ def test_accepts_pathlib_and_string(self):
def test_returns_6_categories(self):
result = scan_plugin(FIXTURES / "good-plugin")
- assert len(result.categories) == 6
+ assert len(result.categories) == 7
names = [c.name for c in result.categories]
assert "Manifest Validation" in names
assert "Security" in names
+ assert "Operational Security" in names
assert "Best Practices" in names
assert "Marketplace" in names
assert "Skill Security" in names
diff --git a/tests/test_security.py b/tests/test_security.py
index ce3cb73..93c791e 100644
--- a/tests/test_security.py
+++ b/tests/test_security.py
@@ -6,6 +6,7 @@
from codex_plugin_scanner.checks.security import (
_scan_all_files,
check_license,
+ check_mcp_transport_security,
check_no_dangerous_mcp,
check_no_hardcoded_secrets,
check_security_md,
@@ -78,6 +79,59 @@ def test_passes_when_mcp_is_safe(self):
assert r.passed and r.points == 4
+class TestMcpTransportSecurity:
+ def test_not_applicable_for_stdio_only_configs(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ mcp = Path(tmpdir) / ".mcp.json"
+ mcp.write_text('{"mcpServers":{"safe":{"command":"echo","args":["hello"]}}}', encoding="utf-8")
+ r = check_mcp_transport_security(Path(tmpdir))
+ assert r.passed and r.points == 0
+ assert not r.applicable
+
+ def test_passes_for_https_remote_transport(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ mcp = Path(tmpdir) / ".mcp.json"
+ mcp.write_text('{"mcpServers":{"safe":{"url":"https://example.com/mcp"}}}', encoding="utf-8")
+ r = check_mcp_transport_security(Path(tmpdir))
+ assert r.passed and r.points == 4
+
+ def test_fails_for_insecure_remote_transport(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ mcp = Path(tmpdir) / ".mcp.json"
+ mcp.write_text('{"mcpServers":{"unsafe":{"url":"http://0.0.0.0:8080/mcp"}}}', encoding="utf-8")
+ r = check_mcp_transport_security(Path(tmpdir))
+ assert not r.passed and r.points == 0
+
+ def test_passes_for_loopback_remote_transport(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ mcp = Path(tmpdir) / ".mcp.json"
+ mcp.write_text('{"mcpServers":{"safe":{"url":"http://127.0.0.2:8080/mcp"}}}', encoding="utf-8")
+ r = check_mcp_transport_security(Path(tmpdir))
+ assert r.passed and r.points == 4
+
+ def test_ignores_metadata_urls_when_collecting_transport_endpoints(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ mcp = Path(tmpdir) / ".mcp.json"
+ mcp.write_text(
+ (
+ '{"mcpServers":{"safe":{"command":"echo","metadata":{"homepage":{"url":"http://example.com"}}}}}'
+ ),
+ encoding="utf-8",
+ )
+ r = check_mcp_transport_security(Path(tmpdir))
+ assert r.passed and r.points == 0
+ assert not r.applicable
+
+ def test_fails_for_invalid_mcp_json(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ mcp = Path(tmpdir) / ".mcp.json"
+ mcp.write_text("{invalid", encoding="utf-8")
+ r = check_mcp_transport_security(Path(tmpdir))
+ assert not r.passed and r.points == 0
+ assert r.max_points == 4
+ assert r.findings[0].rule_id == "MCP_CONFIG_INVALID_JSON"
+
+
class TestScanAllFiles:
def test_skips_excluded_dirs(self):
files = _scan_all_files(FIXTURES / "good-plugin")
@@ -120,4 +174,4 @@ def test_minimal_plugin_partial(self):
def test_returns_tuple_of_correct_length(self):
results = run_security_checks(FIXTURES / "good-plugin")
assert isinstance(results, tuple)
- assert len(results) == 5
+ assert len(results) == 6