diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 40a4044..1eac46a 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -13,5 +13,5 @@ jobs:
- uses: astral-sh/setup-uv@v5
- run: uv python install 3.10
- run: uv sync --extra all
- - run: uv run ruff check .
- - run: uv run ruff format --check .
+ - run: uvx ruff@0.12.2 check .
+ - run: uvx ruff@0.12.2 format --check .
diff --git a/.gitignore b/.gitignore
index 39fb274..309a193 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,3 +23,6 @@ Thumbs.db
# Environment
.env
*.env.local
+
+# uv
+uv.lock
diff --git a/evolve_server/__init__.py b/evolve_server/__init__.py
index d0197bc..cd6ddd9 100644
--- a/evolve_server/__init__.py
+++ b/evolve_server/__init__.py
@@ -28,7 +28,7 @@
"""
from .core.config import EvolveServerConfig
-from .core.constants import DecisionAction, FailureType, FAILURE_LABELS, NO_SKILL_KEY
+from .core.constants import FAILURE_LABELS, NO_SKILL_KEY, DecisionAction, FailureType
from .core.llm_client import AsyncLLMClient
from .core.skill_registry import SkillIDRegistry
from .engines.agent import AgentEvolveServer
diff --git a/evolve_server/__main__.py b/evolve_server/__main__.py
index a6d6f1c..0bbd88c 100644
--- a/evolve_server/__main__.py
+++ b/evolve_server/__main__.py
@@ -100,14 +100,12 @@ def build_parser() -> argparse.ArgumentParser:
help="Evolution engine: fixed workflow or OpenClaw agent.",
)
parser.add_argument("--once", action="store_true", help="Run once and exit")
- parser.add_argument("--mock", action="store_true",
- help="Use local mock/ directory instead of remote object storage")
- parser.add_argument("--mock-root", type=str, default=None,
- help="Custom root directory for mock mode")
- parser.add_argument("--port", type=int, default=None,
- help="HTTP trigger port (enables HTTP server)")
- parser.add_argument("--interval", type=int, default=None,
- help="Periodic interval in seconds")
+ parser.add_argument(
+ "--mock", action="store_true", help="Use local mock/ directory instead of remote object storage"
+ )
+ parser.add_argument("--mock-root", type=str, default=None, help="Custom root directory for mock mode")
+ parser.add_argument("--port", type=int, default=None, help="HTTP trigger port (enables HTTP server)")
+ parser.add_argument("--interval", type=int, default=None, help="Periodic interval in seconds")
parser.add_argument(
"--publish-mode",
choices=["direct", "validated"],
@@ -179,23 +177,29 @@ def build_parser() -> argparse.ArgumentParser:
default=None,
help="Use a local directory as the evolve backend root",
)
- parser.add_argument("--use-skillclaw-config", action="store_true",
- help="Load shared storage and LLM settings from skillclaw's config store")
- parser.add_argument("--openclaw-bin", type=str, default=None,
- help="Path to openclaw executable for --engine agent")
- parser.add_argument("--openclaw-home", type=str, default=None,
- help="OPENCLAW_HOME directory for --engine agent")
+ parser.add_argument(
+ "--use-skillclaw-config",
+ action="store_true",
+ help="Load shared storage and LLM settings from skillclaw's config store",
+ )
+ parser.add_argument("--openclaw-bin", type=str, default=None, help="Path to openclaw executable for --engine agent")
+ parser.add_argument("--openclaw-home", type=str, default=None, help="OPENCLAW_HOME directory for --engine agent")
fresh_group = parser.add_mutually_exclusive_group()
- fresh_group.add_argument("--fresh", dest="fresh", action="store_true", default=None,
- help="Wipe agent state each cycle (agent engine only)")
- fresh_group.add_argument("--no-fresh", dest="fresh", action="store_false",
- help="Preserve agent state across cycles (agent engine only)")
- parser.add_argument("--agent-timeout", type=int, default=None,
- help="Agent execution timeout in seconds")
- parser.add_argument("--workspace-root", type=str, default=None,
- help="Workspace directory for agent file operations")
- parser.add_argument("--agents-md", type=str, default=None,
- help="Custom EVOLVE_AGENTS.md path for agent engine")
+ fresh_group.add_argument(
+ "--fresh",
+ dest="fresh",
+ action="store_true",
+ default=None,
+ help="Wipe agent state each cycle (agent engine only)",
+ )
+ fresh_group.add_argument(
+ "--no-fresh", dest="fresh", action="store_false", help="Preserve agent state across cycles (agent engine only)"
+ )
+ parser.add_argument("--agent-timeout", type=int, default=None, help="Agent execution timeout in seconds")
+ parser.add_argument(
+ "--workspace-root", type=str, default=None, help="Workspace directory for agent file operations"
+ )
+ parser.add_argument("--agents-md", type=str, default=None, help="Custom EVOLVE_AGENTS.md path for agent engine")
parser.add_argument("--verbose", "-v", action="store_true")
return parser
@@ -259,7 +263,10 @@ def main() -> None:
async def _run_with_http():
uv_config = uvicorn.Config(
- app, host="0.0.0.0", port=config.http_port, log_level="info",
+ app,
+ host="0.0.0.0",
+ port=config.http_port,
+ log_level="info",
)
uv_server = uvicorn.Server(uv_config)
await asyncio.gather(server.run_periodic(), uv_server.serve())
diff --git a/evolve_server/core/config.py b/evolve_server/core/config.py
index c64037f..11f28ec 100644
--- a/evolve_server/core/config.py
+++ b/evolve_server/core/config.py
@@ -45,7 +45,15 @@ def _infer_storage_backend(endpoint: str, bucket: str, local_root: str) -> str:
return backend
if local_root:
return "local"
- if any(os.environ.get(name) for name in ("EVOLVE_OSS_ENDPOINT", "EVOLVE_OSS_BUCKET", "EVOLVE_OSS_KEY_ID", "EVOLVE_OSS_KEY_SECRET")):
+ if any(
+ os.environ.get(name)
+ for name in (
+ "EVOLVE_OSS_ENDPOINT",
+ "EVOLVE_OSS_BUCKET",
+ "EVOLVE_OSS_KEY_ID",
+ "EVOLVE_OSS_KEY_SECRET",
+ )
+ ):
return "oss"
if endpoint or bucket:
return "s3"
@@ -232,19 +240,11 @@ def from_skillclaw_config(cls, config) -> "EvolveServerConfig":
engine = _first_env("EVOLVE_ENGINE", default="workflow").strip().lower() or "workflow"
storage_backend = str(getattr(config, "sharing_backend", "") or "").strip().lower()
storage_endpoint = str(
- getattr(config, "sharing_endpoint", "")
- or getattr(config, "sharing_oss_endpoint", "")
- or ""
- )
- storage_bucket = str(
- getattr(config, "sharing_bucket", "")
- or getattr(config, "sharing_oss_bucket", "")
- or ""
+ getattr(config, "sharing_endpoint", "") or getattr(config, "sharing_oss_endpoint", "") or ""
)
+ storage_bucket = str(getattr(config, "sharing_bucket", "") or getattr(config, "sharing_oss_bucket", "") or "")
storage_access_key_id = str(
- getattr(config, "sharing_access_key_id", "")
- or getattr(config, "sharing_oss_access_key_id", "")
- or ""
+ getattr(config, "sharing_access_key_id", "") or getattr(config, "sharing_oss_access_key_id", "") or ""
)
storage_secret_access_key = str(
getattr(config, "sharing_secret_access_key", "")
@@ -281,7 +281,8 @@ def from_skillclaw_config(cls, config) -> "EvolveServerConfig":
return cls(
engine=engine,
- storage_backend=storage_backend or ("local" if local_root else "s3" if (storage_bucket or storage_endpoint) else "oss"),
+ storage_backend=storage_backend
+ or ("local" if local_root else "s3" if (storage_bucket or storage_endpoint) else "oss"),
storage_endpoint=storage_endpoint,
storage_bucket=storage_bucket,
storage_access_key_id=storage_access_key_id,
diff --git a/evolve_server/core/constants.py b/evolve_server/core/constants.py
index 3246361..7112c28 100644
--- a/evolve_server/core/constants.py
+++ b/evolve_server/core/constants.py
@@ -12,6 +12,7 @@
class FailureType(IntEnum):
"""Five-way failure taxonomy for a bad turn."""
+
SKILL_CONTENT_STALE = 1
SKILL_MISSELECT = 2
SKILL_GAP = 3
@@ -33,6 +34,7 @@ class FailureType(IntEnum):
class DecisionAction:
"""Allowed evolution-decision action identifiers."""
+
CREATE = "create_skill"
IMPROVE = "improve_skill"
OPTIMIZE_DESC = "optimize_description"
diff --git a/evolve_server/core/llm_client.py b/evolve_server/core/llm_client.py
index de157a6..362f819 100644
--- a/evolve_server/core/llm_client.py
+++ b/evolve_server/core/llm_client.py
@@ -31,8 +31,8 @@ def __init__(
max_tokens: int = 100000,
temperature: float = 0.4,
) -> None:
- from openai import OpenAI
import httpx
+ from openai import OpenAI
self._client = OpenAI(
api_key=api_key or os.environ.get("OPENAI_API_KEY", ""),
@@ -58,7 +58,8 @@ async def chat(self, messages: list[dict[str, str]], **kwargs: Any) -> str:
for attempt in range(max_retries):
try:
resp = await asyncio.to_thread(
- self._client.chat.completions.create, **merged,
+ self._client.chat.completions.create,
+ **merged,
)
return resp.choices[0].message.content or ""
except Exception as exc:
@@ -71,13 +72,15 @@ async def chat(self, messages: list[dict[str, str]], **kwargs: Any) -> str:
return await self._chat_via_stream(merged)
if attempt < max_retries - 1:
import random
- wait = min(2 ** attempt + random.uniform(0, 1), 30)
+
+ wait = min(2**attempt + random.uniform(0, 1), 30)
await asyncio.sleep(wait)
continue
raise
async def _chat_via_stream(self, body: dict[str, Any]) -> str:
import json
+
import httpx
headers: dict[str, str] = {}
diff --git a/evolve_server/core/skill_registry.py b/evolve_server/core/skill_registry.py
index 4c4d539..6633d65 100644
--- a/evolve_server/core/skill_registry.py
+++ b/evolve_server/core/skill_registry.py
@@ -121,12 +121,14 @@ def record_update(
entry["version"] = new_version
entry["content_sha"] = content_sha
history: list = entry.setdefault("history", [])
- history.append({
- "version": new_version,
- "content_sha": content_sha,
- "timestamp": datetime.now(timezone.utc).isoformat(),
- "action": action,
- })
+ history.append(
+ {
+ "version": new_version,
+ "content_sha": content_sha,
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "action": action,
+ }
+ )
if len(history) > 20:
entry["history"] = history[-20:]
diff --git a/evolve_server/core/utils.py b/evolve_server/core/utils.py
index 82bcbad..c37c3a8 100644
--- a/evolve_server/core/utils.py
+++ b/evolve_server/core/utils.py
@@ -8,11 +8,11 @@
import re
from typing import Any, Optional
-
# ------------------------------------------------------------------ #
# LLM output parsing #
# ------------------------------------------------------------------ #
+
def parse_single_skill(text: str) -> Optional[dict]:
"""Extract a single skill JSON object from LLM output."""
clean = re.sub(r"```(?:json)?\s*", "", text.strip()).strip().rstrip("`")
@@ -99,6 +99,7 @@ def compact_tool_observations(
# SKILL.md rendering #
# ------------------------------------------------------------------ #
+
def build_skill_md(skill: dict) -> str:
"""Render a skill dict into SKILL.md content (with YAML frontmatter)."""
name = skill.get("name", "unknown")
@@ -106,7 +107,7 @@ def build_skill_md(skill: dict) -> str:
category = skill.get("category", "general")
content = skill.get("content", "")
- needs_quoting = any(c in description for c in ':{}[],"\'#&*!|>%@`\n')
+ needs_quoting = any(c in description for c in ":{}[],\"'#&*!|>%@`\n")
if needs_quoting:
escaped = description.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
desc_line = f'description: "{escaped}"'
diff --git a/evolve_server/engines/agent.py b/evolve_server/engines/agent.py
index d5bae36..5c7120b 100644
--- a/evolve_server/engines/agent.py
+++ b/evolve_server/engines/agent.py
@@ -15,8 +15,18 @@
from pathlib import Path
from typing import Any
+from skillclaw.object_store import build_object_store
+
+from ..core.config import EvolveServerConfig
from ..core.constants import SLUG_RE
from ..core.llm_client import AsyncLLMClient
+from ..core.skill_registry import SkillIDRegistry
+from ..core.utils import build_skill_md
+from ..pipeline.summarizer import (
+ _extract_session_metadata,
+ build_session_trajectory,
+ summarize_sessions_parallel,
+)
from ..storage.mock_bucket import LocalBucket
from ..storage.oss_helpers import (
delete_session_keys,
@@ -26,19 +36,9 @@
read_json_object,
save_manifest,
)
-from ..core.skill_registry import SkillIDRegistry
-from ..pipeline.summarizer import (
- _extract_session_metadata,
- build_session_trajectory,
- summarize_sessions_parallel,
-)
-from ..core.utils import build_skill_md
-from skillclaw.object_store import build_object_store
-
+from .agent_workspace import AgentWorkspace
from .agents_md import load_agents_md
-from ..core.config import EvolveServerConfig
from .openclaw_runner import OpenClawRunner
-from .agent_workspace import AgentWorkspace
logger = logging.getLogger(__name__)
@@ -123,7 +123,7 @@ async def chat(self, messages: list[dict[str, str]], **kwargs: Any) -> str:
return "".join(parts)
except Exception:
if attempt < max_retries - 1:
- wait = min(2 ** attempt + random.uniform(0, 1), 30)
+ wait = min(2**attempt + random.uniform(0, 1), 30)
await asyncio.sleep(wait)
continue
raise
@@ -264,7 +264,8 @@ async def _drain_sessions(self) -> tuple[list[dict], list[str]]:
consumed_keys.append(key)
logger.info(
"[AgentEvolveServer] drained %d session(s) (%d keys found)",
- len(sessions), len(keys),
+ len(sessions),
+ len(keys),
)
return sessions, consumed_keys
@@ -312,7 +313,10 @@ def _upload_skill(self, skill: dict, action: str = "create") -> None:
save_manifest(self._bucket, self._prefix, manifest)
logger.info(
"[AgentEvolveServer] uploaded skill %s (id=%s, v%d) to %s",
- name, skill_id, version, object_key,
+ name,
+ skill_id,
+ version,
+ object_key,
)
# ================================================================= #
@@ -338,7 +342,9 @@ async def run_once(self) -> dict:
if not sessions:
logger.info("[AgentEvolveServer] queue empty — nothing to process")
return {
- "sessions": 0, "skills_evolved": 0, "agent_returncode": None,
+ "sessions": 0,
+ "skills_evolved": 0,
+ "agent_returncode": None,
}
# ---- 1.5. summarize sessions -------------------------------- #
@@ -350,7 +356,8 @@ async def run_once(self) -> dict:
# the agent well within the context-window budget.
await self._summarize_sessions(sessions)
logger.info(
- "[AgentEvolveServer] summarized %d session(s)", len(sessions),
+ "[AgentEvolveServer] summarized %d session(s)",
+ len(sessions),
)
# ---- 2. prepare workspace ------------------------------------ #
@@ -407,21 +414,27 @@ async def run_once(self) -> dict:
try:
await self._call_storage(self._upload_skill, skill, action)
skills_evolved += 1
- evolution_records.append({
- "action": action,
- "skill_name": name,
- "skill_id": self._id_registry.get_or_create(name),
- "version": self._id_registry.get_version(name),
- "source": "agent",
- })
+ evolution_records.append(
+ {
+ "action": action,
+ "skill_name": name,
+ "skill_id": self._id_registry.get_or_create(name),
+ "version": self._id_registry.get_version(name),
+ "source": "agent",
+ }
+ )
except Exception as e:
logger.error(
- "[AgentEvolveServer] failed to upload skill '%s': %s", name, e,
+ "[AgentEvolveServer] failed to upload skill '%s': %s",
+ name,
+ e,
)
# ---- 7. finalize + ack --------------------------------------- #
await self._call_storage(
- self._id_registry.save_to_oss, self._bucket, self._prefix,
+ self._id_registry.save_to_oss,
+ self._bucket,
+ self._prefix,
)
await self._call_storage(delete_session_keys, self._bucket, session_keys)
@@ -438,9 +451,11 @@ async def run_once(self) -> dict:
}
self._append_history(summary)
logger.info(
- "[AgentEvolveServer] === cycle done: %d sessions, %d skills evolved "
- "in %.1fs (agent exit=%d) ===",
- len(sessions), skills_evolved, elapsed, result.returncode,
+ "[AgentEvolveServer] === cycle done: %d sessions, %d skills evolved in %.1fs (agent exit=%d) ===",
+ len(sessions),
+ skills_evolved,
+ elapsed,
+ result.returncode,
)
return summary
@@ -503,15 +518,19 @@ async def status():
for name, e in entries.items()
}
pending_keys = await self._call_storage(
- list_session_keys, self._bucket, self._prefix,
+ list_session_keys,
+ self._bucket,
+ self._prefix,
+ )
+ return JSONResponse(
+ content={
+ "running": self._running,
+ "pending_sessions": len(pending_keys),
+ "registered_skills": len(entries),
+ "skills": skill_summary,
+ "fresh_mode": self.config.fresh,
+ }
)
- return JSONResponse(content={
- "running": self._running,
- "pending_sessions": len(pending_keys),
- "registered_skills": len(entries),
- "skills": skill_summary,
- "fresh_mode": self.config.fresh,
- })
@app.get("/health")
async def health():
diff --git a/evolve_server/engines/agent_workspace.py b/evolve_server/engines/agent_workspace.py
index 14c1b2b..fe42f75 100644
--- a/evolve_server/engines/agent_workspace.py
+++ b/evolve_server/engines/agent_workspace.py
@@ -160,7 +160,8 @@ def prepare(
# Write manifest
manifest_path = self.root / "manifest.json"
manifest_path.write_text(
- json.dumps(manifest, ensure_ascii=False, indent=2), encoding="utf-8",
+ json.dumps(manifest, ensure_ascii=False, indent=2),
+ encoding="utf-8",
)
# Write skill registry info (read-only reference for the agent)
@@ -201,9 +202,10 @@ def prepare(
logger.debug("[AgentWorkspace] wrote bootstrap %s", filename)
logger.info(
- "[AgentWorkspace] prepared: %d sessions, %d existing skills, "
- "bootstrap files written in %s",
- len(sessions), len(existing_skills), self.root,
+ "[AgentWorkspace] prepared: %d sessions, %d existing skills, bootstrap files written in %s",
+ len(sessions),
+ len(existing_skills),
+ self.root,
)
def snapshot_skills(self) -> dict[str, str]:
@@ -221,7 +223,8 @@ def snapshot_skills(self) -> dict[str, str]:
return snapshot
def collect_changes(
- self, before_snapshot: dict[str, str],
+ self,
+ before_snapshot: dict[str, str],
) -> list[dict[str, Any]]:
"""Compare current skills against *before_snapshot* and return changed/new skills.
@@ -244,15 +247,18 @@ def collect_changes(
parsed = parse_skill_content(name, raw_md)
action = "create" if before_sha is None else "improve"
- changes.append({
- "name": name,
- "action": action,
- "skill": parsed,
- "raw_md": raw_md,
- })
+ changes.append(
+ {
+ "name": name,
+ "action": action,
+ "skill": parsed,
+ "raw_md": raw_md,
+ }
+ )
logger.info(
"[AgentWorkspace] detected %s: skill '%s' (sha %s -> %s)",
- action, name,
+ action,
+ name,
(before_sha or "new")[:12],
after_sha[:12],
)
diff --git a/evolve_server/engines/openclaw_runner.py b/evolve_server/engines/openclaw_runner.py
index cab2926..acef048 100644
--- a/evolve_server/engines/openclaw_runner.py
+++ b/evolve_server/engines/openclaw_runner.py
@@ -156,12 +156,16 @@ def run(
cmd = [
self.openclaw_bin,
"agent",
- "--session-id", session_id,
- "--agent", "main",
- "--message", message,
+ "--session-id",
+ session_id,
+ "--agent",
+ "main",
+ "--message",
+ message,
"--json",
"--local",
- "--timeout", str(self.timeout),
+ "--timeout",
+ str(self.timeout),
]
env = self._build_env()
@@ -169,7 +173,9 @@ def run(
logger.info(
"[OpenClawRunner] running: %s (fresh=%s, timeout=%ds, session=%s)",
" ".join(cmd[:3] + ["..."]),
- self.fresh, self.timeout, session_id,
+ self.fresh,
+ self.timeout,
+ session_id,
)
try:
@@ -194,7 +200,8 @@ def run(
if result.returncode != 0:
logger.warning(
"[OpenClawRunner] agent exited with code %d\nstderr: %s",
- result.returncode, (result.stderr or "")[:500],
+ result.returncode,
+ (result.stderr or "")[:500],
)
else:
logger.info("[OpenClawRunner] agent completed successfully")
diff --git a/evolve_server/engines/workflow.py b/evolve_server/engines/workflow.py
index 5e544db..4a15f76 100644
--- a/evolve_server/engines/workflow.py
+++ b/evolve_server/engines/workflow.py
@@ -25,16 +25,21 @@
from skillclaw.object_store import build_object_store
from skillclaw.validation_store import ValidationStore
-from ..pipeline.aggregation import aggregate_sessions_by_skill
from ..core.config import EvolveServerConfig
-from ..core.constants import DecisionAction, NO_SKILL_KEY, SLUG_RE
+from ..core.constants import NO_SKILL_KEY, SLUG_RE, DecisionAction
+from ..core.llm_client import AsyncLLMClient
+from ..core.skill_registry import SkillIDRegistry
+from ..core.utils import build_skill_md, parse_skill_content
+from ..pipeline.aggregation import aggregate_sessions_by_skill
from ..pipeline.execution import (
create_skill_from_sessions,
evolve_skill_from_sessions,
execute_merge,
set_evolve_debug_dir,
)
-from ..core.llm_client import AsyncLLMClient
+from ..pipeline.session_judge import judge_sessions_parallel
+from ..pipeline.skill_verifier import verify_skill_candidate
+from ..pipeline.summarizer import set_summarizer_debug_dir, summarize_sessions_parallel
from ..storage.oss_helpers import (
delete_session_keys,
fetch_skill_content,
@@ -43,11 +48,6 @@
read_json_object,
save_manifest,
)
-from ..pipeline.session_judge import judge_sessions_parallel
-from ..pipeline.skill_verifier import verify_skill_candidate
-from ..core.skill_registry import SkillIDRegistry
-from ..pipeline.summarizer import set_summarizer_debug_dir, summarize_sessions_parallel
-from ..core.utils import build_skill_md, parse_skill_content
logger = logging.getLogger(__name__)
@@ -796,9 +796,7 @@ async def run_once(self) -> dict:
elapsed = round(time.monotonic() - started_at, 1)
uploaded_skills = sum(1 for record in all_records if record.get("uploaded"))
- queued_candidates = sum(
- 1 for record in all_records if record.get("action") == "queued_for_validation"
- )
+ queued_candidates = sum(1 for record in all_records if record.get("action") == "queued_for_validation")
published_after_validation = sum(
1 for record in all_records if record.get("action") == "published_after_validation"
)
diff --git a/evolve_server/pipeline/session_judge.py b/evolve_server/pipeline/session_judge.py
index a30ad36..e9a0b11 100644
--- a/evolve_server/pipeline/session_judge.py
+++ b/evolve_server/pipeline/session_judge.py
@@ -178,11 +178,7 @@ def _looks_like_existing_session_level_turn_score(session: dict[str, Any]) -> bo
# Be conservative: only treat the last-turn score as a benchmark-like
# session score when the session also carries task/aggregate metadata
# and there are no earlier PRM scores to suggest per-turn PRM usage.
- has_benchmarkish_context = bool(
- session.get("task_id")
- or session.get("aggregate")
- or session.get("phase")
- )
+ has_benchmarkish_context = bool(session.get("task_id") or session.get("aggregate") or session.get("phase"))
return has_benchmarkish_context and not earlier_scores
diff --git a/evolve_server/pipeline/skill_verifier.py b/evolve_server/pipeline/skill_verifier.py
index 5c521c9..7deaec9 100644
--- a/evolve_server/pipeline/skill_verifier.py
+++ b/evolve_server/pipeline/skill_verifier.py
@@ -222,12 +222,7 @@ async def verify_skill_candidate(
checks = _normalize_checks(parsed.get("checks"))
score = _compute_score(parsed.get("score"), checks)
decision_raw = str(parsed.get("decision", "") or "").strip().lower()
- reason = str(
- parsed.get("reason")
- or parsed.get("rationale")
- or parsed.get("notes")
- or ""
- ).strip()
+ reason = str(parsed.get("reason") or parsed.get("rationale") or parsed.get("notes") or "").strip()
accepted = decision_raw == "accept"
if score is not None and score < float(min_score):
diff --git a/evolve_server/pipeline/summarizer.py b/evolve_server/pipeline/summarizer.py
index 3ae2277..09582ac 100644
--- a/evolve_server/pipeline/summarizer.py
+++ b/evolve_server/pipeline/summarizer.py
@@ -97,9 +97,7 @@ def _format_tool_calls(turn: dict) -> list[str]:
leftover_errors = []
called_names = {
- (tc.get("function") or {}).get("name", "")
- for tc in raw_calls[:_MAX_TOOLS_PER_STEP]
- if isinstance(tc, dict)
+ (tc.get("function") or {}).get("name", "") for tc in raw_calls[:_MAX_TOOLS_PER_STEP] if isinstance(tc, dict)
}
for tname, errs in error_by_tool.items():
if tname not in called_names:
@@ -180,7 +178,11 @@ def _build_rollout_trajectory(turns: list[dict], first_prompt: str) -> str:
def _format_step(
- turn: dict, step_num: int, first_prompt: str, *, show_prompt: bool,
+ turn: dict,
+ step_num: int,
+ first_prompt: str,
+ *,
+ show_prompt: bool,
) -> str:
"""Format a single step line for the trajectory."""
prompt = _clip(turn.get("prompt_text", ""), _PROMPT_MAX)
@@ -297,14 +299,12 @@ def _build_session_payload(session: dict) -> dict[str, Any]:
read_skills = t.get("read_skills") or []
if read_skills:
interaction["read_skills"] = [
- s.get("skill_name", "") if isinstance(s, dict) else str(s or "")
- for s in read_skills
+ s.get("skill_name", "") if isinstance(s, dict) else str(s or "") for s in read_skills
]
modified_skills = t.get("modified_skills") or []
if modified_skills:
interaction["modified_skills"] = [
- s.get("skill_name", "") if isinstance(s, dict) else str(s or "")
- for s in modified_skills
+ s.get("skill_name", "") if isinstance(s, dict) else str(s or "") for s in modified_skills
]
injected = t.get("injected_skills") or []
if injected:
@@ -345,6 +345,7 @@ def _build_session_payload(session: dict) -> dict[str, Any]:
# Metadata extraction #
# ------------------------------------------------------------------ #
+
def _extract_session_metadata(session: dict) -> None:
"""Extract skill references and compute aggregate metrics for a session.
@@ -362,19 +363,11 @@ def _extract_session_metadata(session: dict) -> None:
for turn in session.get("turns", []):
for item in turn.get("read_skills") or []:
- name = (
- item.get("skill_name", "").strip()
- if isinstance(item, dict)
- else str(item or "").strip()
- )
+ name = item.get("skill_name", "").strip() if isinstance(item, dict) else str(item or "").strip()
if name:
skills.add(name)
for item in turn.get("modified_skills") or []:
- name = (
- item.get("skill_name", "").strip()
- if isinstance(item, dict)
- else str(item or "").strip()
- )
+ name = item.get("skill_name", "").strip() if isinstance(item, dict) else str(item or "").strip()
if name:
skills.add(name)
prm = turn.get("prm_score")
@@ -385,9 +378,7 @@ def _extract_session_metadata(session: dict) -> None:
session["_skills_referenced"] = skills
session["_prm_scores"] = prm_scores
- session["_avg_prm"] = (
- round(sum(prm_scores) / len(prm_scores), 3) if prm_scores else None
- )
+ session["_avg_prm"] = round(sum(prm_scores) / len(prm_scores), 3) if prm_scores else None
session["_has_tool_errors"] = has_tool_errors
@@ -395,6 +386,7 @@ def _extract_session_metadata(session: dict) -> None:
# Public API #
# ------------------------------------------------------------------ #
+
async def summarize_session(llm: AsyncLLMClient, session: dict) -> str:
"""Summarize an entire session via LLM (trajectory-aware)."""
payload = _build_session_payload(session)
@@ -460,15 +452,18 @@ async def summarize_sessions_parallel(
debug_dir = _SUMMARIZER_DEBUG_DIR
if debug_dir:
import pathlib
+
ddir = pathlib.Path(debug_dir) / "summarizer"
ddir.mkdir(parents=True, exist_ok=True)
for session in sessions:
sid = session.get("session_id", "unknown").replace("/", "_")
(ddir / f"{sid}_trajectory.txt").write_text(
- session.get("_trajectory", ""), encoding="utf-8",
+ session.get("_trajectory", ""),
+ encoding="utf-8",
)
(ddir / f"{sid}_summary.txt").write_text(
- session.get("_summary", ""), encoding="utf-8",
+ session.get("_summary", ""),
+ encoding="utf-8",
)
meta = {
"_skills_referenced": sorted(session.get("_skills_referenced") or []),
@@ -477,7 +472,8 @@ async def summarize_sessions_parallel(
"_prm_scores": session.get("_prm_scores"),
}
(ddir / f"{sid}_meta.json").write_text(
- json.dumps(meta, indent=2, ensure_ascii=False), encoding="utf-8",
+ json.dumps(meta, indent=2, ensure_ascii=False),
+ encoding="utf-8",
)
logger.info("[DebugDump] wrote summarizer artifacts to %s", ddir)
# ------------------------------------------------------------------ #
diff --git a/pyproject.toml b/pyproject.toml
index e3c8d56..b1aad2a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -62,5 +62,8 @@ line-length = 120
select = ["E", "F", "I"]
ignore = ["E731", "F403", "F405"]
+[tool.ruff.lint.per-file-ignores]
+"evolve_server/pipeline/execution.py" = ["E501"]
+
[tool.uv]
# Install with: uv sync --extra all
diff --git a/skillclaw/__main__.py b/skillclaw/__main__.py
index c756ecc..d7b7f6c 100644
--- a/skillclaw/__main__.py
+++ b/skillclaw/__main__.py
@@ -1,5 +1,4 @@
from .cli import skillclaw
-
if __name__ == "__main__":
skillclaw()
diff --git a/skillclaw/api_server.py b/skillclaw/api_server.py
index 5c2d103..0a2ecc4 100644
--- a/skillclaw/api_server.py
+++ b/skillclaw/api_server.py
@@ -12,19 +12,19 @@
import json
import logging
import os
-import queue
import random
import re
import threading
import time
-from datetime import datetime, timezone
from contextlib import asynccontextmanager
+from datetime import datetime, timezone
from itertools import count
from typing import Any, Optional
import uvicorn
from fastapi import FastAPI, Header, HTTPException, Request
from fastapi.responses import JSONResponse, StreamingResponse
+
from .config import SkillClawConfig
from .data_formatter import ConversationSample
from .prm_scorer import PRMScorer
@@ -46,15 +46,12 @@
# Helper utilities #
# ------------------------------------------------------------------ #
+
def _flatten_message_content(content) -> str:
if isinstance(content, str):
return content
if isinstance(content, list):
- parts = [
- item.get("text", "")
- for item in content
- if isinstance(item, dict) and item.get("type") == "text"
- ]
+ parts = [item.get("text", "") for item in content if isinstance(item, dict) and item.get("type") == "text"]
return " ".join(parts) if parts else ""
return str(content) if content is not None else ""
@@ -200,6 +197,7 @@ def _resolve_session_done(
return False
return str(candidate).strip().lower() in _TRUE_STRINGS
+
def _normalize_tool_name(raw_name: str, args_raw: str) -> str:
"""
Normalize tool names from model output.
@@ -245,19 +243,23 @@ def _deduplicate_paths(paths: list[str]) -> list[str]:
def _extract_skill_paths_from_patch(raw_text: str) -> list[str]:
- return _deduplicate_paths([
- match.group(1).strip()
- for match in _PATCH_PATH_RE.finditer(str(raw_text or ""))
- if match.group(1).strip().endswith("SKILL.md")
- ])
+ return _deduplicate_paths(
+ [
+ match.group(1).strip()
+ for match in _PATCH_PATH_RE.finditer(str(raw_text or ""))
+ if match.group(1).strip().endswith("SKILL.md")
+ ]
+ )
def _extract_skill_paths_from_shell(command: str) -> list[str]:
- return _deduplicate_paths([
- match.group(1).strip()
- for match in _SHELL_SKILL_PATH_RE.finditer(str(command or ""))
- if match.group(1).strip().endswith("SKILL.md")
- ])
+ return _deduplicate_paths(
+ [
+ match.group(1).strip()
+ for match in _SHELL_SKILL_PATH_RE.finditer(str(command or ""))
+ if match.group(1).strip().endswith("SKILL.md")
+ ]
+ )
def _extract_skill_paths_from_args_dict(args: dict[str, Any]) -> list[str]:
@@ -351,9 +353,7 @@ def _resolve_skill_reference(
expanded = os.path.expanduser(str(path or "").strip())
real_path = os.path.realpath(expanded) if expanded else ""
skill_info = (
- skill_path_map.get(real_path)
- or skill_path_map.get(expanded)
- or skill_path_map.get(str(path or "").strip())
+ skill_path_map.get(real_path) or skill_path_map.get(expanded) or skill_path_map.get(str(path or "").strip())
)
if skill_info:
return {
@@ -384,6 +384,7 @@ def _resolve_skill_reference_by_name(
}
return {"skill_id": "", "skill_name": clean_name, "path": ""}
+
def _extract_tool_calls_from_text(text: str) -> tuple[str, list[dict]]:
"""
Parse tool-call tags embedded in assistant text into OpenAI-style tool_calls.
@@ -418,10 +419,7 @@ def _extract_tool_calls_from_text(text: str) -> tuple[str, list[dict]]:
except Exception:
continue
name = (
- payload.get("name")
- or payload.get("tool_name")
- or payload.get("function", {}).get("name")
- or "unknown_tool"
+ payload.get("name") or payload.get("tool_name") or payload.get("function", {}).get("name") or "unknown_tool"
)
args = payload.get("arguments") or payload.get("function", {}).get("arguments") or {}
if not isinstance(args, str):
@@ -472,15 +470,9 @@ def _restore_missing_reasoning_content(
assistant_tool_indices = [
idx
for idx, msg in enumerate(messages)
- if isinstance(msg, dict)
- and msg.get("role") == "assistant"
- and _assistant_message_has_tool_calls(msg)
- ]
- prior_tool_turns = [
- turn
- for turn in prior_turns
- if isinstance(turn, dict) and turn.get("tool_calls")
+ if isinstance(msg, dict) and msg.get("role") == "assistant" and _assistant_message_has_tool_calls(msg)
]
+ prior_tool_turns = [turn for turn in prior_turns if isinstance(turn, dict) and turn.get("tool_calls")]
if not assistant_tool_indices or not prior_tool_turns:
return 0
@@ -571,7 +563,13 @@ def _extract_last_user_instruction(messages: list[dict]) -> str:
_ERROR_PATTERNS: list[tuple[re.Pattern, str]] = [
- (re.compile(r"exited with code (?!0\b)\d+|exit code (?!0\b)\d+|exit status (?!0\b)\d+", re.IGNORECASE), "exit_code"),
+ (
+ re.compile(
+ r"exited with code (?!0\b)\d+|exit code (?!0\b)\d+|exit status (?!0\b)\d+",
+ re.IGNORECASE,
+ ),
+ "exit_code",
+ ),
(re.compile(r"Traceback \(most recent call last\)|\.py\", line \d+", re.IGNORECASE), "traceback"),
(re.compile(r"Permission denied|EACCES|PermissionError", re.IGNORECASE), "permission"),
(re.compile(r"No such file|FileNotFoundError|ENOENT|not found", re.IGNORECASE), "not_found"),
@@ -603,24 +601,17 @@ def _extract_recent_tool_results(messages: list[dict]) -> list[dict]:
role = msg.get("role", "")
if role in ("toolResult", "tool"):
content = _flatten_message_content(msg.get("content"))
- tool_name = (
- msg.get("toolName")
- or msg.get("name")
- or msg.get("tool_name")
- or "unknown"
- )
+ tool_name = msg.get("toolName") or msg.get("name") or msg.get("tool_name") or "unknown"
has_error, error_type = _classify_tool_error(content)
- results.append({
- "tool_name": tool_name,
- "tool_call_id": (
- msg.get("toolCallId")
- or msg.get("tool_call_id")
- or ""
- ),
- "content": content[:_TOOL_RESULT_CONTENT_MAX_CHARS],
- "has_error": has_error,
- "error_type": error_type,
- })
+ results.append(
+ {
+ "tool_name": tool_name,
+ "tool_call_id": (msg.get("toolCallId") or msg.get("tool_call_id") or ""),
+ "content": content[:_TOOL_RESULT_CONTENT_MAX_CHARS],
+ "has_error": has_error,
+ "error_type": error_type,
+ }
+ )
elif role == "user":
continue
else:
@@ -738,10 +729,7 @@ def _assemble_streaming_chat_completion(
"content": "".join(entry["content_parts"]),
}
if entry["tool_calls"]:
- message["tool_calls"] = [
- entry["tool_calls"][i]
- for i in sorted(entry["tool_calls"])
- ]
+ message["tool_calls"] = [entry["tool_calls"][i] for i in sorted(entry["tool_calls"])]
choices.append(
{
"index": index,
@@ -755,11 +743,14 @@ def _assemble_streaming_chat_completion(
"object": "chat.completion",
"created": response_created,
"model": response_model,
- "choices": choices or [{
- "index": 0,
- "message": {"role": "assistant", "content": ""},
- "finish_reason": "stop",
- }],
+ "choices": choices
+ or [
+ {
+ "index": 0,
+ "message": {"role": "assistant", "content": ""},
+ "finish_reason": "stop",
+ }
+ ],
"usage": usage,
}
@@ -1031,6 +1022,7 @@ def _rewrite_new_session_bootstrap_prompt(messages: list[dict]) -> tuple[list[di
# Anthropic ↔ OpenAI format helpers (for NanoClaw /v1/messages) #
# ------------------------------------------------------------------ #
+
def _anthropic_to_openai_body(body: dict[str, Any]) -> dict[str, Any]:
"""Convert an Anthropic /v1/messages request body to OpenAI chat format."""
messages: list[dict] = list(body.get("messages", []))
@@ -1042,9 +1034,7 @@ def _anthropic_to_openai_body(body: dict[str, Any]) -> dict[str, Any]:
system_text = system
elif isinstance(system, list):
system_text = " ".join(
- blk.get("text", "")
- for blk in system
- if isinstance(blk, dict) and blk.get("type") == "text"
+ blk.get("text", "") for blk in system if isinstance(blk, dict) and blk.get("type") == "text"
)
else:
system_text = str(system)
@@ -1056,9 +1046,7 @@ def _anthropic_to_openai_body(body: dict[str, Any]) -> dict[str, Any]:
content = msg.get("content")
if isinstance(content, list):
text = " ".join(
- blk.get("text", "")
- for blk in content
- if isinstance(blk, dict) and blk.get("type") == "text"
+ blk.get("text", "") for blk in content if isinstance(blk, dict) and blk.get("type") == "text"
)
normalized.append({**msg, "content": text})
else:
@@ -1227,7 +1215,7 @@ def _responses_function_item_id(call_id: str, index: int) -> str:
if raw.startswith("fc_"):
return raw
if raw.startswith("call_") and len(raw) > len("call_"):
- return f"fc_{raw[len('call_'):]}"
+ return f"fc_{raw[len('call_') :]}"
cleaned = re.sub(r"[^A-Za-z0-9_-]", "", raw)
if cleaned:
return f"fc_{cleaned[:48]}"
@@ -1313,9 +1301,7 @@ def _merge_previous_response_messages(
first = current_messages[0]
if isinstance(first, dict) and first.get("role") == "system":
history_without_system = [
- msg
- for msg in previous_messages
- if not (isinstance(msg, dict) and msg.get("role") == "system")
+ msg for msg in previous_messages if not (isinstance(msg, dict) and msg.get("role") == "system")
]
return [first, *history_without_system, *current_messages[1:]]
@@ -1357,6 +1343,7 @@ def _openai_to_anthropic_response(openai_resp: dict[str, Any], model: str) -> di
# SkillClawAPIServer #
# ------------------------------------------------------------------ #
+
class SkillClawAPIServer:
"""Proxy between client agents and the upstream model with SkillClaw hooks.
@@ -1398,11 +1385,9 @@ def __init__(
# system prompt benefits from compression). Non-OpenClaw agents send
# short/no system prompts, and the compressed OpenClaw text can trigger
# content filters on strict providers (e.g. Azure).
- self._compress_system_prompt = (config.claw_type == "openclaw")
+ self._compress_system_prompt = config.claw_type == "openclaw"
cache_suffix = f"{config.claw_type}_{config.llm_provider}"
- self._system_prompt_cache_file = os.path.join(
- config.record_dir, f"system_prompt_cache_{cache_suffix}.json"
- )
+ self._system_prompt_cache_file = os.path.join(config.record_dir, f"system_prompt_cache_{cache_suffix}.json")
# State machines
self._index_counter = count(0)
@@ -1410,13 +1395,13 @@ def __init__(
self._turn_counts: dict[str, int] = {}
self._pending_turn_data: dict[str, dict[int, dict]] = {} # session → {turn → data}
self._prm_tasks: dict[str, dict[int, asyncio.Task]] = {} # session → {turn → task}
- self._pending_records: dict[str, dict] = {} # for record logging
- self._session_effective: dict[str, int] = {} # at-least-one guarantee
+ self._pending_records: dict[str, dict] = {} # for record logging
+ self._session_effective: dict[str, int] = {} # at-least-one guarantee
self._session_turns: dict[str, list] = {}
- self._session_last_active: dict[str, float] = {} # session -> unix_ts
- self._closing_sessions: set[str] = set() # session ids currently being closed
- self._background_tasks: set[asyncio.Task] = set() # transient async tasks (upload, submit)
- self._responses_store: dict[str, dict[str, Any]] = {} # response_id -> stored response/history
+ self._session_last_active: dict[str, float] = {} # session -> unix_ts
+ self._closing_sessions: set[str] = set() # session ids currently being closed
+ self._background_tasks: set[asyncio.Task] = set() # transient async tasks (upload, submit)
+ self._responses_store: dict[str, dict[str, Any]] = {} # response_id -> stored response/history
self._session_sweeper_task: Optional[asyncio.Task] = None
self._session_idle_close_seconds = max(
0,
@@ -1467,9 +1452,8 @@ def __init__(
def _load_tokenizer(self):
try:
from transformers import AutoTokenizer
- return AutoTokenizer.from_pretrained(
- self.config.model_name, trust_remote_code=True
- )
+
+ return AutoTokenizer.from_pretrained(self.config.model_name, trust_remote_code=True)
except Exception as e:
logger.warning("[OpenClaw] could not load tokenizer: %s", e)
return None
@@ -1506,17 +1490,19 @@ async def list_models(
owner: SkillClawAPIServer = request.app.state.owner
await owner._check_auth(authorization)
model_id = owner._served_model
- return JSONResponse(content={
- "object": "list",
- "data": [
- {
- "id": model_id,
- "object": "model",
- "created": 0,
- "owned_by": "skillclaw",
- }
- ],
- })
+ return JSONResponse(
+ content={
+ "object": "list",
+ "data": [
+ {
+ "id": model_id,
+ "object": "model",
+ "created": 0,
+ "owned_by": "skillclaw",
+ }
+ ],
+ }
+ )
@app.post("/v1/chat/completions")
async def chat_completions(
@@ -1534,12 +1520,8 @@ async def chat_completions(
body = await request.json()
incoming_messages = body.get("messages", [])
if isinstance(incoming_messages, list):
- rewritten_messages, rewritten = _rewrite_new_session_bootstrap_prompt(
- incoming_messages
- )
+ rewritten_messages, _ = _rewrite_new_session_bootstrap_prompt(incoming_messages)
body["messages"] = rewritten_messages
- else:
- rewritten = 0
_raw_sid = x_session_id or body.get("session_id") or ""
# OpenClaw sends X-Session-Id/X-Turn-Type on every request.
# Non-OpenClaw agents (QwenPaw, IronClaw, etc.) don't — detect
@@ -1547,19 +1529,17 @@ async def chat_completions(
# cleanup still work correctly.
if _raw_sid:
session_id = _raw_sid
- turn_type = _resolve_turn_type(
- x_turn_type, body.get("turn_type"), default="main"
- )
+ turn_type = _resolve_turn_type(x_turn_type, body.get("turn_type"), default="main")
else:
msg_count = len(body.get("messages") or [])
session_id = await owner._resolve_tui_session(
- body.get("model", "default"), msg_count,
- )
- turn_type = _resolve_turn_type(
- x_turn_type, body.get("turn_type"), default="main"
+ body.get("model", "default"),
+ msg_count,
)
+ turn_type = _resolve_turn_type(x_turn_type, body.get("turn_type"), default="main")
session_done = _resolve_session_done(x_session_done, body.get("session_done"))
- # Do not infer session_done from bootstrap text — only explicit X-Session-Done or body session_done trigger session close.
+ # Do not infer session_done from bootstrap text — only explicit
+ # X-Session-Done or body session_done trigger session close.
stream = bool(body.get("stream", False))
result = await owner._handle_request(
@@ -1569,9 +1549,7 @@ async def chat_completions(
session_done=session_done,
)
if stream:
- return StreamingResponse(
- owner._stream_response(result), media_type="text/event-stream"
- )
+ return StreamingResponse(owner._stream_response(result), media_type="text/event-stream")
return JSONResponse(content=result["response"])
@app.post("/v1/responses")
@@ -1593,7 +1571,10 @@ async def responses(
if previous_response_id:
stored = owner._responses_store.get(previous_response_id)
if stored is None:
- raise HTTPException(status_code=404, detail=f"previous_response_id not found: {previous_response_id}")
+ raise HTTPException(
+ status_code=404,
+ detail=f"previous_response_id not found: {previous_response_id}",
+ )
openai_body["messages"] = _merge_previous_response_messages(
list(stored.get("messages") or []),
list(openai_body.get("messages") or []),
@@ -1601,17 +1582,14 @@ async def responses(
_raw_sid = x_session_id or body.get("session_id") or ""
if _raw_sid:
session_id = _raw_sid
- turn_type = _resolve_turn_type(
- x_turn_type, body.get("turn_type"), default="main"
- )
+ turn_type = _resolve_turn_type(x_turn_type, body.get("turn_type"), default="main")
else:
msg_count = len(openai_body.get("messages") or [])
session_id = await owner._resolve_tui_session(
- openai_body.get("model", owner._served_model), msg_count,
- )
- turn_type = _resolve_turn_type(
- x_turn_type, body.get("turn_type"), default="main"
+ openai_body.get("model", owner._served_model),
+ msg_count,
)
+ turn_type = _resolve_turn_type(x_turn_type, body.get("turn_type"), default="main")
session_done = _resolve_session_done(x_session_done, body.get("session_done"))
result = await owner._handle_request(
@@ -1703,15 +1681,11 @@ async def anthropic_messages(
_raw_sid = x_session_id or ""
if _raw_sid:
session_id = _raw_sid
- turn_type = _resolve_turn_type(
- x_turn_type, raw_body.get("turn_type"), default="main"
- )
+ turn_type = _resolve_turn_type(x_turn_type, raw_body.get("turn_type"), default="main")
else:
msg_count = len(openai_body.get("messages") or [])
session_id = await owner._resolve_tui_session(model, msg_count)
- turn_type = _resolve_turn_type(
- x_turn_type, raw_body.get("turn_type"), default="main"
- )
+ turn_type = _resolve_turn_type(x_turn_type, raw_body.get("turn_type"), default="main")
session_done = _resolve_session_done(x_session_done, raw_body.get("session_done"))
result = await owner._handle_request(
@@ -1800,14 +1774,16 @@ async def _resolve_tui_session(self, model: str, msg_count: int) -> str:
new_session = True
logger.info(
"[SessionDetect] msg count dropped %d → %d — new session",
- meta["last_msg_count"], msg_count,
+ meta["last_msg_count"],
+ msg_count,
)
elif (now - meta["last_request_time"]) > self._tui_inactivity_timeout:
new_session = True
idle_sec = int(now - meta["last_request_time"])
logger.info(
"[SessionDetect] inactivity %ds > %ds — new session",
- idle_sec, self._tui_inactivity_timeout,
+ idle_sec,
+ self._tui_inactivity_timeout,
)
if new_session:
@@ -1850,9 +1826,7 @@ def _collect_idle_session_ids(self, now: Optional[float] = None) -> list[str]:
return sorted(
sid
for sid, ts in self._session_last_active.items()
- if sid
- and sid not in self._closing_sessions
- and (now - float(ts)) >= threshold
+ if sid and sid not in self._closing_sessions and (now - float(ts)) >= threshold
)
def _start_session_idle_sweeper(self) -> None:
@@ -1922,8 +1896,7 @@ async def _close_session(self, session_id: str, reason: str = "explicit") -> Non
try:
self._flush_pending_record(session_id, None)
pending_snapshot = {
- turn_num: dict(turn_data)
- for turn_num, turn_data in self._pending_turn_data.get(session_id, {}).items()
+ turn_num: dict(turn_data) for turn_num, turn_data in self._pending_turn_data.get(session_id, {}).items()
}
self._maybe_submit_ready_samples(session_id, force_last_prm=True)
prm_tasks = list(self._prm_tasks.get(session_id, {}).values())
@@ -1934,9 +1907,7 @@ async def _close_session(self, session_id: str, reason: str = "explicit") -> Non
timeout=_SHUTDOWN_DRAIN_TIMEOUT_SECONDS,
)
except asyncio.TimeoutError:
- logger.warning(
- "[SessionDetect] PRM drain timed out for session=%s", session_id
- )
+ logger.warning("[SessionDetect] PRM drain timed out for session=%s", session_id)
for turn_num in sorted(pending_snapshot.keys()):
turn_data = pending_snapshot[turn_num]
prm_result = turn_data.pop("prm_result", None)
@@ -1960,7 +1931,10 @@ async def _close_session(self, session_id: str, reason: str = "explicit") -> Non
if isinstance(task, asyncio.Task) and not task.done():
task.cancel()
logger.info(
- "[SessionDetect] closed session=%s reason=%s (effective_samples=%d)", session_id, reason, eff,
+ "[SessionDetect] closed session=%s reason=%s (effective_samples=%d)",
+ session_id,
+ reason,
+ eff,
)
if self.skill_manager:
self.skill_manager._save_stats()
@@ -2009,8 +1983,9 @@ def _flush_pending_record(self, session_id: str, next_state):
except OSError as e:
logger.warning("[OpenClaw] failed to write record: %s", e)
- def _buffer_record(self, session_id: str, turn_num: int, messages: list,
- prompt_text: str, response_text: str, tool_calls: list):
+ def _buffer_record(
+ self, session_id: str, turn_num: int, messages: list, prompt_text: str, response_text: str, tool_calls: list
+ ):
if not self._record_file:
return
instruction_text = _extract_last_user_instruction(messages)
@@ -2025,18 +2000,23 @@ def _buffer_record(self, session_id: str, turn_num: int, messages: list,
"tool_calls": tool_calls or None,
}
- def _append_prm_record(self, session_id: str, turn_num: int,
- score: float, votes: list):
+ def _append_prm_record(self, session_id: str, turn_num: int, score: float, votes: list):
if not self._prm_record_file:
return
try:
with open(self._prm_record_file, "a", encoding="utf-8") as f:
- f.write(json.dumps({
- "session_id": session_id,
- "turn": turn_num,
- "score": score,
- "votes": votes,
- }, ensure_ascii=False) + "\n")
+ f.write(
+ json.dumps(
+ {
+ "session_id": session_id,
+ "turn": turn_num,
+ "score": score,
+ "votes": votes,
+ },
+ ensure_ascii=False,
+ )
+ + "\n"
+ )
except OSError as e:
logger.warning("[OpenClaw] failed to write PRM record: %s", e)
@@ -2058,26 +2038,26 @@ def purge_record_files(self):
# PRM scoring #
# ------------------------------------------------------------------ #
- def _fire_prm_scoring(self, session_id: str, turn_num: int,
- response_text: str, instruction_text: str, next_state,
- submit_ready_samples: bool = True):
+ def _fire_prm_scoring(
+ self,
+ session_id: str,
+ turn_num: int,
+ response_text: str,
+ instruction_text: str,
+ next_state,
+ submit_ready_samples: bool = True,
+ ):
if not self.prm_scorer or not next_state:
return
inst_text = instruction_text or ""
task = asyncio.create_task(
- self.prm_scorer.evaluate(
- response_text, inst_text, session_id=session_id, turn_num=turn_num
- )
+ self.prm_scorer.evaluate(response_text, inst_text, session_id=session_id, turn_num=turn_num)
)
task.add_done_callback(self._task_done_cb)
if submit_ready_samples:
- task.add_done_callback(
- lambda _t: self._on_prm_done(session_id, turn_num, _t)
- )
+ task.add_done_callback(lambda _t: self._on_prm_done(session_id, turn_num, _t))
else:
- task.add_done_callback(
- lambda _t: self._on_prm_done_without_submit(session_id, turn_num, _t)
- )
+ task.add_done_callback(lambda _t: self._on_prm_done_without_submit(session_id, turn_num, _t))
self._prm_tasks.setdefault(session_id, {})[turn_num] = task
td = self._pending_turn_data.get(session_id, {}).get(turn_num)
if td is not None:
@@ -2100,10 +2080,7 @@ def _apply_prm_result(
self.skill_manager.record_feedback(injected, score)
read = turns[idx].get("read_skills", [])
if read and self.skill_manager:
- read_names = [
- r["skill_name"] for r in read
- if isinstance(r, dict) and r.get("skill_name")
- ]
+ read_names = [r["skill_name"] for r in read if isinstance(r, dict) and r.get("skill_name")]
if read_names:
self.skill_manager.record_feedback(read_names, score)
pending_turn = self._pending_turn_data.get(session_id, {}).get(turn_num)
@@ -2180,7 +2157,10 @@ def _prompt_len(msgs):
try:
norm_msgs = _normalize_messages_for_template(msgs)
text = self._tokenizer.apply_chat_template(
- norm_msgs, tools=body.get("tools"), tokenize=False, add_generation_prompt=True,
+ norm_msgs,
+ tools=body.get("tools"),
+ tokenize=False,
+ add_generation_prompt=True,
)
return len(self._tokenizer(text, add_special_tokens=False)["input_ids"])
except Exception:
@@ -2295,10 +2275,7 @@ def _prompt_len(msgs):
reasoning = assistant_msg.get("reasoning_content") or ""
- logger.info(
- f"{_YELLOW}[OpenClaw] [{turn_type}] session={session_id} "
- f"prompt_msgs={len(messages)}{_RESET}"
- )
+ logger.info(f"{_YELLOW}[OpenClaw] [{turn_type}] session={session_id} prompt_msgs={len(messages)}{_RESET}")
logger.info(
f"{_RED}[OpenClaw] [{turn_type}] session={session_id} "
f"thinking={len(reasoning)} chars, response:\n{content}{_RESET}"
@@ -2324,15 +2301,14 @@ def _prompt_len(msgs):
norm_resp = _normalize_messages_for_template([response_msg])[0]
full_norm = norm_msgs + [norm_resp]
- skill_path_map = (
- self.skill_manager.get_skill_path_map()
- if self.skill_manager else {}
- )
+ skill_path_map = self.skill_manager.get_skill_path_map() if self.skill_manager else {}
read_skills = _extract_read_skills_from_tool_calls(
- tool_calls, skill_path_map,
+ tool_calls,
+ skill_path_map,
)
modified_skills = _extract_modified_skills_from_tool_calls(
- tool_calls, skill_path_map,
+ tool_calls,
+ skill_path_map,
)
tool_summaries = _build_tool_summaries(tool_calls)
if read_skills:
@@ -2354,31 +2330,34 @@ def _prompt_len(msgs):
self._turn_counts[session_id] = self._turn_counts.get(session_id, 0) + 1
turn_num = self._turn_counts[session_id]
prompt_text_simple = "\n".join(
- f"{m.get('role', '?')}: {_flatten_message_content(m.get('content', ''))}"
- for m in messages
- )
- response_text_simple = content or (
- json.dumps(tool_calls, ensure_ascii=False) if tool_calls else ""
+ f"{m.get('role', '?')}: {_flatten_message_content(m.get('content', ''))}" for m in messages
)
+ response_text_simple = content or (json.dumps(tool_calls, ensure_ascii=False) if tool_calls else "")
self._buffer_record(
- session_id, turn_num, messages,
- prompt_text_simple, response_text_simple, tool_calls,
+ session_id,
+ turn_num,
+ messages,
+ prompt_text_simple,
+ response_text_simple,
+ tool_calls,
+ )
+ self._session_turns.setdefault(session_id, []).append(
+ {
+ "turn_num": turn_num,
+ "prompt_text": user_instruction,
+ "response_text": response_text_simple,
+ "reasoning_content": reasoning or None,
+ "tool_calls": tool_calls,
+ "read_skills": read_skills,
+ "modified_skills": modified_skills,
+ "tool_results": tool_summaries,
+ "tool_results_raw": [],
+ "tool_observations": [],
+ "tool_errors": [],
+ "injected_skills": injected_skills,
+ "prm_score": None,
+ }
)
- self._session_turns.setdefault(session_id, []).append({
- "turn_num": turn_num,
- "prompt_text": user_instruction,
- "response_text": response_text_simple,
- "reasoning_content": reasoning or None,
- "tool_calls": tool_calls,
- "read_skills": read_skills,
- "modified_skills": modified_skills,
- "tool_results": tool_summaries,
- "tool_results_raw": [],
- "tool_observations": [],
- "tool_errors": [],
- "injected_skills": injected_skills,
- "prm_score": None,
- })
self._pending_turn_data.setdefault(session_id, {})[turn_num] = {
"prompt_ids": [],
"response_ids": [],
@@ -2392,14 +2371,20 @@ def _prompt_len(msgs):
return {"response": output}
prompt_text = self._tokenizer.apply_chat_template(
- norm_msgs, tools=tools, tokenize=False, add_generation_prompt=True,
+ norm_msgs,
+ tools=tools,
+ tokenize=False,
+ add_generation_prompt=True,
)
full_text = self._tokenizer.apply_chat_template(
- full_norm, tools=tools, tokenize=False, add_generation_prompt=False,
+ full_norm,
+ tools=tools,
+ tokenize=False,
+ add_generation_prompt=False,
)
if full_text.startswith(prompt_text):
- response_text = full_text[len(prompt_text):]
+ response_text = full_text[len(prompt_text) :]
else:
logger.warning("[OpenClaw] prompt_text not prefix of full_text, using full_text as response")
response_text = full_text
@@ -2431,24 +2416,29 @@ def _prompt_len(msgs):
logger.info(
"[OpenClaw] MAIN session=%s turn=%d prompt_tokens=%d response_tokens=%d",
- session_id, turn_num, len(prompt_ids), len(response_ids),
+ session_id,
+ turn_num,
+ len(prompt_ids),
+ len(response_ids),
)
self._buffer_record(session_id, turn_num, messages, prompt_text, response_text, tool_calls)
- self._session_turns.setdefault(session_id, []).append({
- "turn_num": turn_num,
- "prompt_text": user_instruction,
- "response_text": response_text,
- "reasoning_content": reasoning or None,
- "tool_calls": tool_calls,
- "read_skills": read_skills,
- "modified_skills": modified_skills,
- "tool_results": tool_summaries,
- "tool_results_raw": [],
- "tool_observations": [],
- "tool_errors": [],
- "injected_skills": injected_skills,
- "prm_score": None,
- })
+ self._session_turns.setdefault(session_id, []).append(
+ {
+ "turn_num": turn_num,
+ "prompt_text": user_instruction,
+ "response_text": response_text,
+ "reasoning_content": reasoning or None,
+ "tool_calls": tool_calls,
+ "read_skills": read_skills,
+ "modified_skills": modified_skills,
+ "tool_results": tool_summaries,
+ "tool_results_raw": [],
+ "tool_observations": [],
+ "tool_errors": [],
+ "injected_skills": injected_skills,
+ "prm_score": None,
+ }
+ )
self._pending_turn_data.setdefault(session_id, {})[turn_num] = turn_data
self._maybe_submit_ready_samples(session_id)
else:
@@ -2488,10 +2478,7 @@ async def _forward_to_llm_openai(self, body: dict[str, Any]) -> dict[str, Any]:
)
# Strip Tinker-specific fields not supported by standard OpenAI APIs
- send_body = {
- k: v for k, v in body.items()
- if k not in {"logprobs", "top_logprobs", "stream_options"}
- }
+ send_body = {k: v for k, v in body.items() if k not in {"logprobs", "top_logprobs", "stream_options"}}
send_body["model"] = self.config.llm_model_id or body.get("model", "")
send_body["stream"] = False
@@ -2533,17 +2520,11 @@ async def _forward_to_llm_openai(self, body: dict[str, Any]) -> dict[str, Any]:
return resp.json()
except httpx.HTTPStatusError as e:
response_text = e.response.text[:200]
- if (
- e.response.status_code == 400
- and "'temperature' is not supported" in e.response.text
- ):
+ if e.response.status_code == 400 and "'temperature' is not supported" in e.response.text:
logger.info("[OpenClaw] upstream rejects temperature param, retrying without it")
send_body.pop("temperature", None)
continue
- if (
- e.response.status_code == 400
- and "Stream must be set to true" in e.response.text
- ):
+ if e.response.status_code == 400 and "Stream must be set to true" in e.response.text:
logger.info("[OpenClaw] upstream requires stream=true, retrying with SSE collection")
stream_body = dict(send_body)
stream_body["stream"] = True
@@ -2579,10 +2560,14 @@ async def _forward_to_llm_openai(self, body: dict[str, Any]) -> dict[str, Any]:
) from stream_error
# Retryable upstream error — retry if attempts remain
if attempt < max_retries - 1:
- wait = min(2 ** attempt + random.uniform(0, 1), 30)
+ wait = min(2**attempt + random.uniform(0, 1), 30)
logger.warning(
"[OpenClaw] upstream LLM error (attempt %d/%d), retrying in %.1fs: %s %s",
- attempt + 1, max_retries, wait, e.response.status_code, response_text,
+ attempt + 1,
+ max_retries,
+ wait,
+ e.response.status_code,
+ response_text,
)
await asyncio.sleep(wait)
continue
@@ -2590,10 +2575,13 @@ async def _forward_to_llm_openai(self, body: dict[str, Any]) -> dict[str, Any]:
raise HTTPException(status_code=502, detail=f"Upstream LLM error: {e}") from e
except Exception as e:
if attempt < max_retries - 1:
- wait = min(2 ** attempt + random.uniform(0, 1), 30)
+ wait = min(2**attempt + random.uniform(0, 1), 30)
logger.warning(
"[OpenClaw] LLM forward failed (attempt %d/%d), retrying in %.1fs: %s",
- attempt + 1, max_retries, wait, e,
+ attempt + 1,
+ max_retries,
+ wait,
+ e,
)
await asyncio.sleep(wait)
continue
@@ -2613,11 +2601,7 @@ async def _forward_to_llm_bedrock(self, body: dict[str, Any]) -> dict[str, Any]:
messages = body.get("messages", [])
temperature = body.get("temperature", 0.6)
- max_tokens = (
- body.get("max_completion_tokens")
- or body.get("max_tokens")
- or 8192
- )
+ max_tokens = body.get("max_completion_tokens") or body.get("max_tokens") or 8192
try:
client = BedrockChatClient(
@@ -2637,14 +2621,16 @@ async def _forward_to_llm_bedrock(self, body: dict[str, Any]) -> dict[str, Any]:
"id": f"chatcmpl-bedrock-{int(time.time())}",
"object": "chat.completion",
"model": model_id,
- "choices": [{
- "index": 0,
- "message": {
- "role": choice.message.role if choice else "assistant",
- "content": choice.message.content if choice else "",
- },
- "finish_reason": choice.finish_reason if choice else "stop",
- }],
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": choice.message.role if choice else "assistant",
+ "content": choice.message.content if choice else "",
+ },
+ "finish_reason": choice.finish_reason if choice else "stop",
+ }
+ ],
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
@@ -2660,7 +2646,9 @@ async def _forward_to_llm_bedrock(self, body: dict[str, Any]) -> dict[str, Any]:
# ------------------------------------------------------------------ #
async def _upload_session_data(
- self, session_id: str, turns: list[dict],
+ self,
+ session_id: str,
+ turns: list[dict],
) -> None:
"""Upload the complete session turn records to cloud storage.
@@ -2671,6 +2659,7 @@ async def _upload_session_data(
"""
try:
from .skill_hub import SkillHub
+
hub = SkillHub.from_config(self.config)
session_payload = {
"session_id": session_id,
@@ -2685,7 +2674,9 @@ async def _upload_session_data(
hub._bucket.put_object(oss_key, content.encode("utf-8"))
logger.info(
"[SkillHub] session uploaded: %s (%d turns, %d bytes)",
- oss_key, len(turns), len(content),
+ oss_key,
+ len(turns),
+ len(content),
)
except Exception as e:
logger.warning("[SkillHub] session upload failed: %s", e)
@@ -2702,11 +2693,14 @@ async def _pull_skills_from_cloud(self, skip_names: Optional[set[str]] = None) -
"""
try:
from .skill_hub import SkillHub
+
hub = SkillHub.from_config(self.config)
pull_result = hub.pull_skills(self.config.skills_dir, skip_names=skip_names)
logger.info(
"[SkillHub] skill pull: %d downloaded, %d unchanged, %d deleted",
- pull_result["downloaded"], pull_result["skipped"], pull_result.get("deleted", 0),
+ pull_result["downloaded"],
+ pull_result["skipped"],
+ pull_result.get("deleted", 0),
)
if self.skill_manager and (
pull_result.get("downloaded", 0) > 0
@@ -2739,7 +2733,10 @@ def _prompt_len(msgs):
try:
norm_msgs = _normalize_messages_for_template(msgs)
text = self._tokenizer.apply_chat_template(
- norm_msgs, tools=tools, tokenize=False, add_generation_prompt=True,
+ norm_msgs,
+ tools=tools,
+ tokenize=False,
+ add_generation_prompt=True,
)
return len(self._tokenizer(text, add_special_tokens=False)["input_ids"])
except Exception:
@@ -2768,8 +2765,7 @@ def _prompt_len(msgs):
dropped = len(messages) - len(result)
if dropped > 0:
logger.warning(
- "[OpenClaw] context truncated: dropped %d oldest messages "
- "(%d → %d tokens, limit=%d)",
+ "[OpenClaw] context truncated: dropped %d oldest messages (%d → %d tokens, limit=%d)",
dropped,
_prompt_len(messages),
_prompt_len(result),
@@ -2802,11 +2798,7 @@ def _inject_skills(self, messages: list[dict]) -> tuple[list[dict], list[str]]:
return messages, []
all_skills = self.skill_manager.get_all_skills()
- skill_names = [
- s.get("name", "unknown_skill")
- for s in all_skills
- if isinstance(s, dict)
- ]
+ skill_names = [s.get("name", "unknown_skill") for s in all_skills if isinstance(s, dict)]
logger.info(
"[SkillManager] listing %d skills in catalog: %s",
len(skill_names),
@@ -2959,7 +2951,7 @@ async def _submit_turn_sample(
)
loss_mask = [0] * len(response_ids) if exclude else [1] * len(response_ids)
- sample = ConversationSample(
+ _ = ConversationSample(
session_id=session_id,
turn_num=turn_num,
prompt_tokens=prompt_ids,
@@ -2973,22 +2965,24 @@ async def _submit_turn_sample(
)
if not exclude:
- self._session_effective[session_id] = (
- self._session_effective.get(session_id, 0) + 1
- )
+ self._session_effective[session_id] = self._session_effective.get(session_id, 0) + 1
index = next(self._index_counter)
- group_index = next(self._group_counter)
+ next(self._group_counter)
if prm_result:
- self._append_prm_record(
- session_id, turn_num, score, prm_result.get("votes", [])
- )
+ self._append_prm_record(session_id, turn_num, score, prm_result.get("votes", []))
logger.info(
"[OpenClaw] submitted sample session=%s turn=%d index=%d score=%.1f exclude=%s "
"prompt_len=%d response_len=%d",
- session_id, turn_num, index, score, exclude, len(prompt_ids), len(response_ids),
+ session_id,
+ turn_num,
+ index,
+ score,
+ exclude,
+ len(prompt_ids),
+ len(response_ids),
)
# ------------------------------------------------------------------ #
@@ -3137,8 +3131,10 @@ async def _stream_anthropic_response(self, result: dict[str, Any], model: str):
content_text = message.get("content", "") or ""
finish_reason = choice.get("finish_reason", "stop")
stop_reason_map = {
- "stop": "end_turn", "length": "max_tokens",
- "tool_calls": "tool_use", "content_filter": "stop_sequence",
+ "stop": "end_turn",
+ "length": "max_tokens",
+ "tool_calls": "tool_use",
+ "content_filter": "stop_sequence",
}
stop_reason = stop_reason_map.get(finish_reason, "end_turn")
usage = payload.get("usage", {})
@@ -3147,30 +3143,48 @@ async def _stream_anthropic_response(self, result: dict[str, Any], model: str):
def _sse(event: str, data: dict) -> str:
return f"event: {event}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"
- yield _sse("message_start", {
- "type": "message_start",
- "message": {
- "id": msg_id, "type": "message", "role": "assistant",
- "content": [], "model": model, "stop_reason": None,
- "stop_sequence": None,
- "usage": {"input_tokens": usage.get("prompt_tokens", 0), "output_tokens": 0},
+ yield _sse(
+ "message_start",
+ {
+ "type": "message_start",
+ "message": {
+ "id": msg_id,
+ "type": "message",
+ "role": "assistant",
+ "content": [],
+ "model": model,
+ "stop_reason": None,
+ "stop_sequence": None,
+ "usage": {"input_tokens": usage.get("prompt_tokens", 0), "output_tokens": 0},
+ },
+ },
+ )
+ yield _sse(
+ "content_block_start",
+ {
+ "type": "content_block_start",
+ "index": 0,
+ "content_block": {"type": "text", "text": ""},
},
- })
- yield _sse("content_block_start", {
- "type": "content_block_start", "index": 0,
- "content_block": {"type": "text", "text": ""},
- })
+ )
yield _sse("ping", {"type": "ping"})
- yield _sse("content_block_delta", {
- "type": "content_block_delta", "index": 0,
- "delta": {"type": "text_delta", "text": content_text},
- })
+ yield _sse(
+ "content_block_delta",
+ {
+ "type": "content_block_delta",
+ "index": 0,
+ "delta": {"type": "text_delta", "text": content_text},
+ },
+ )
yield _sse("content_block_stop", {"type": "content_block_stop", "index": 0})
- yield _sse("message_delta", {
- "type": "message_delta",
- "delta": {"stop_reason": stop_reason, "stop_sequence": None},
- "usage": {"output_tokens": usage.get("completion_tokens", 0)},
- })
+ yield _sse(
+ "message_delta",
+ {
+ "type": "message_delta",
+ "delta": {"stop_reason": stop_reason, "stop_sequence": None},
+ "usage": {"output_tokens": usage.get("completion_tokens", 0)},
+ },
+ )
yield _sse("message_stop", {"type": "message_stop"})
# ------------------------------------------------------------------ #
@@ -3233,9 +3247,11 @@ def wait_until_ready(self, timeout_s: float = 30.0) -> bool:
def _safe_create_task(self, coro):
task = asyncio.create_task(coro)
self._background_tasks.add(task)
+
def _on_done(t: asyncio.Task):
self._background_tasks.discard(t)
self._task_done_cb(t)
+
task.add_done_callback(_on_done)
return task
diff --git a/skillclaw/bedrock_client.py b/skillclaw/bedrock_client.py
index c707fec..3c120f5 100644
--- a/skillclaw/bedrock_client.py
+++ b/skillclaw/bedrock_client.py
@@ -27,7 +27,7 @@
import logging
from dataclasses import dataclass, field
-from typing import Any, Optional
+from typing import Any
logger = logging.getLogger(__name__)
@@ -36,6 +36,7 @@
# Response dataclasses (mimic openai.ChatCompletion) #
# ------------------------------------------------------------------ #
+
@dataclass
class _Message:
content: str
@@ -67,6 +68,7 @@ class _ChatCompletion:
# Completions / Chat namespace (duck-type openai.OpenAI().chat) #
# ------------------------------------------------------------------ #
+
class _Completions:
"""Mimics ``openai.resources.chat.Completions``."""
@@ -98,10 +100,12 @@ def create(
if role == "system":
system_parts.append({"text": content})
else:
- converse_messages.append({
- "role": role,
- "content": [{"text": content}],
- })
+ converse_messages.append(
+ {
+ "role": role,
+ "content": [{"text": content}],
+ }
+ )
# Bedrock requires at least one user message
if not converse_messages:
@@ -149,6 +153,7 @@ def __init__(self, completions: _Completions):
# Public class #
# ------------------------------------------------------------------ #
+
class BedrockChatClient:
"""
Drop-in replacement for ``openai.OpenAI()`` that calls AWS Bedrock.
@@ -181,6 +186,7 @@ def __init__(
region: str = "us-east-1",
):
import boto3
+
self._boto_client = boto3.client("bedrock-runtime", region_name=region)
self.model_id = model_id
self._completions = _Completions(self._boto_client, model_id, region)
diff --git a/skillclaw/claw_adapter.py b/skillclaw/claw_adapter.py
index d9137b6..d44059d 100644
--- a/skillclaw/claw_adapter.py
+++ b/skillclaw/claw_adapter.py
@@ -56,6 +56,7 @@
# Dispatcher #
# ------------------------------------------------------------------ #
+
def configure_claw(cfg: "SkillClawConfig") -> None:
"""Dispatch to the appropriate claw adapter based on cfg.claw_type."""
claw = getattr(cfg, "claw_type", "openclaw")
@@ -67,9 +68,7 @@ def configure_claw(cfg: "SkillClawConfig") -> None:
adapter = _ADAPTERS.get(claw)
if adapter is None:
- logger.warning(
- "[ClawAdapter] Unknown claw_type=%r — skipping auto-configuration", claw
- )
+ logger.warning("[ClawAdapter] Unknown claw_type=%r — skipping auto-configuration", claw)
return
adapter(cfg)
@@ -78,29 +77,32 @@ def configure_claw(cfg: "SkillClawConfig") -> None:
# OpenClaw adapter #
# ------------------------------------------------------------------ #
+
def _configure_openclaw(cfg: "SkillClawConfig") -> None:
"""Auto-configure OpenClaw to use the SkillClaw proxy."""
model_id = cfg.served_model_name or cfg.llm_model_id or "skillclaw-model"
- provider_json = json.dumps({
- "api": "openai-completions",
- "baseUrl": f"http://127.0.0.1:{cfg.proxy_port}/v1",
- "apiKey": cfg.proxy_api_key or "skillclaw",
- "models": [{
- "id": model_id,
- "name": model_id,
- "reasoning": False,
- "input": ["text"],
- "cost": {"input": 0, "output": 0, "cacheRead": 0, "cacheWrite": 0},
- "contextWindow": 32768,
- "maxTokens": 8192,
- }],
- })
+ provider_json = json.dumps(
+ {
+ "api": "openai-completions",
+ "baseUrl": f"http://127.0.0.1:{cfg.proxy_port}/v1",
+ "apiKey": cfg.proxy_api_key or "skillclaw",
+ "models": [
+ {
+ "id": model_id,
+ "name": model_id,
+ "reasoning": False,
+ "input": ["text"],
+ "cost": {"input": 0, "output": 0, "cacheRead": 0, "cacheWrite": 0},
+ "contextWindow": 32768,
+ "maxTokens": 8192,
+ }
+ ],
+ }
+ )
commands = [
- ["openclaw", "config", "set", "models.providers.skillclaw",
- "--json", provider_json],
- ["openclaw", "config", "set", "agents.defaults.model.primary",
- f"skillclaw/{model_id}"],
+ ["openclaw", "config", "set", "models.providers.skillclaw", "--json", provider_json],
+ ["openclaw", "config", "set", "agents.defaults.model.primary", f"skillclaw/{model_id}"],
["openclaw", "config", "set", "agents.defaults.sandbox.mode", "off"],
["openclaw", "gateway", "restart"],
]
@@ -111,6 +113,7 @@ def _configure_openclaw(cfg: "SkillClawConfig") -> None:
# Hermes adapter #
# ------------------------------------------------------------------ #
+
def _load_yaml_mapping(path: Path, label: str) -> dict:
"""Load a YAML mapping, falling back to an empty mapping."""
if not path.exists():
@@ -491,7 +494,8 @@ def inspect_hermes_config(cfg: "SkillClawConfig") -> dict[str, object]:
issues: list[str] = []
notes: list[str] = [
"This integration only rewrites Hermes-local config and does not touch other claw adapters.",
- "Hermes session capture still relies on explicit session headers when available, with proxy-side heuristics as the fallback.",
+ "Hermes session capture still relies on explicit session headers when"
+ " available, with proxy-side heuristics as the fallback.",
]
next_steps: list[str] = []
@@ -505,10 +509,13 @@ def inspect_hermes_config(cfg: "SkillClawConfig") -> dict[str, object]:
next_steps.append(f"Create or prepare the Hermes skills directory: {expected_skills_dir}")
if legacy_present:
notes.append(
- f"Legacy SkillClaw skills were found at {_LEGACY_SKILLCLAW_SKILLS_DIR}; missing skills are copied into the Hermes library on startup."
+ f"Legacy SkillClaw skills were found at {_LEGACY_SKILLCLAW_SKILLS_DIR};"
+ " missing skills are copied into the Hermes library on startup."
)
if not backup_path:
- next_steps.append("Run SkillClaw once before relying on `skillclaw restore hermes`, so a backup can be created.")
+ next_steps.append(
+ "Run SkillClaw once before relying on `skillclaw restore hermes`, so a backup can be created."
+ )
return {
"status": "ok" if not issues else "warning",
@@ -591,6 +598,7 @@ def _copy_missing_skill_dirs(src_root: Path, dst_root: Path) -> int:
# Codex adapter #
# ------------------------------------------------------------------ #
+
def _backup_codex_config_if_changed(config_path: Path, new_text: str) -> Path | None:
return _backup_text_file_if_changed(
config_path,
@@ -684,7 +692,8 @@ def inspect_codex_config(cfg: "SkillClawConfig") -> dict[str, object]:
issues: list[str] = []
notes: list[str] = [
"Codex uses the OpenAI Responses-compatible SkillClaw endpoint via `model_providers.skillclaw`.",
- "Codex session boundaries fall back to proxy-side heuristics because Codex does not send SkillClaw session headers.",
+ "Codex session boundaries fall back to proxy-side heuristics because"
+ " Codex does not send SkillClaw session headers.",
]
next_steps: list[str] = []
@@ -745,6 +754,7 @@ def restore_codex_config(backup_path: Path | None = None) -> dict[str, str]:
# Claude Code adapter #
# ------------------------------------------------------------------ #
+
def _backup_claude_settings_if_changed(settings_path: Path, new_text: str) -> Path | None:
return _backup_text_file_if_changed(
settings_path,
@@ -795,17 +805,16 @@ def inspect_claude_config(cfg: "SkillClawConfig") -> dict[str, object]:
configured_base_url = str(env.get("ANTHROPIC_BASE_URL", "") or "")
configured_token = str(env.get("ANTHROPIC_AUTH_TOKEN", "") or "")
configured_model = str(data.get("model", "") or "")
- proxy_match = (
- configured_base_url == expected_base_url
- and configured_token == expected_api_key
- )
+ proxy_match = configured_base_url == expected_base_url and configured_token == expected_api_key
backup_path = _latest_claude_backup_path()
skills_dir_match = configured_skillclaw_skills_dir == expected_skills_dir
issues: list[str] = []
notes: list[str] = [
- "Claude Code uses SkillClaw through `ANTHROPIC_BASE_URL` and `ANTHROPIC_AUTH_TOKEN` in ~/.claude/settings.json.",
- "Claude Code session boundaries fall back to proxy-side heuristics because Claude Code does not send SkillClaw session headers.",
+ "Claude Code uses SkillClaw through `ANTHROPIC_BASE_URL` and"
+ " `ANTHROPIC_AUTH_TOKEN` in ~/.claude/settings.json.",
+ "Claude Code session boundaries fall back to proxy-side heuristics"
+ " because Claude Code does not send SkillClaw session headers.",
]
next_steps: list[str] = []
@@ -824,7 +833,9 @@ def inspect_claude_config(cfg: "SkillClawConfig") -> dict[str, object]:
)
next_steps.append(f"Set `skills.dir` to {expected_skills_dir} when using the Claude Code integration.")
if not backup_path:
- next_steps.append("Run SkillClaw once before relying on `skillclaw restore claude`, so a backup can be created.")
+ next_steps.append(
+ "Run SkillClaw once before relying on `skillclaw restore claude`, so a backup can be created."
+ )
return {
"status": "ok" if not issues else "warning",
@@ -864,6 +875,7 @@ def restore_claude_config(backup_path: Path | None = None) -> dict[str, str]:
# QwenPaw adapter #
# ------------------------------------------------------------------ #
+
def _get_qwenpaw_env(key: str, default: str = "") -> str:
"""Look up a QwenPaw env var."""
if key in os.environ:
@@ -873,12 +885,20 @@ def _get_qwenpaw_env(key: str, default: str = "") -> str:
def _resolve_qwenpaw_dirs() -> tuple[Path, Path]:
"""Resolve QwenPaw working/secret directories."""
- working_dir = Path(
- _get_qwenpaw_env("QWENPAW_WORKING_DIR", "~/.qwenpaw"),
- ).expanduser().resolve()
- secret_dir = Path(
- _get_qwenpaw_env("QWENPAW_SECRET_DIR", f"{working_dir}.secret"),
- ).expanduser().resolve()
+ working_dir = (
+ Path(
+ _get_qwenpaw_env("QWENPAW_WORKING_DIR", "~/.qwenpaw"),
+ )
+ .expanduser()
+ .resolve()
+ )
+ secret_dir = (
+ Path(
+ _get_qwenpaw_env("QWENPAW_SECRET_DIR", f"{working_dir}.secret"),
+ )
+ .expanduser()
+ .resolve()
+ )
return working_dir, secret_dir
@@ -947,16 +967,10 @@ def _configure_qwenpaw(cfg: "SkillClawConfig") -> None:
provider_data.get("support_connection_check", True),
)
provider_data["generate_kwargs"] = (
- provider_data["generate_kwargs"]
- if isinstance(provider_data.get("generate_kwargs"), dict)
- else {}
- )
- provider_data["meta"] = (
- provider_data["meta"] if isinstance(provider_data.get("meta"), dict) else {}
- )
- provider_data["models"] = (
- provider_data["models"] if isinstance(provider_data.get("models"), list) else []
+ provider_data["generate_kwargs"] if isinstance(provider_data.get("generate_kwargs"), dict) else {}
)
+ provider_data["meta"] = provider_data["meta"] if isinstance(provider_data.get("meta"), dict) else {}
+ provider_data["models"] = provider_data["models"] if isinstance(provider_data.get("models"), list) else []
provider_data["extra_models"] = _upsert_model_info(
provider_data.get("extra_models"),
model_id,
@@ -974,6 +988,7 @@ def _configure_qwenpaw(cfg: "SkillClawConfig") -> None:
# IronClaw adapter #
# ------------------------------------------------------------------ #
+
def _configure_ironclaw(cfg: "SkillClawConfig") -> None:
"""Auto-configure IronClaw to use the SkillClaw proxy.
@@ -1042,6 +1057,7 @@ def _patch_dotenv(env_path: Path, new_vars: dict[str, str], label: str = "IronCl
# PicoClaw adapter #
# ------------------------------------------------------------------ #
+
def _configure_picoclaw(cfg: "SkillClawConfig") -> None:
"""Auto-configure PicoClaw to use the SkillClaw proxy.
@@ -1105,6 +1121,7 @@ def _configure_picoclaw(cfg: "SkillClawConfig") -> None:
# ZeroClaw adapter #
# ------------------------------------------------------------------ #
+
def _configure_zeroclaw(cfg: "SkillClawConfig") -> None:
"""Auto-configure ZeroClaw to use the SkillClaw proxy.
@@ -1177,6 +1194,7 @@ def _patch_toml(toml_path: Path, new_vars: dict[str, str]) -> None:
# NanoClaw adapter #
# ------------------------------------------------------------------ #
+
def _configure_nanoclaw(cfg: "SkillClawConfig") -> None:
"""Auto-configure NanoClaw to route API calls through SkillClaw proxy.
@@ -1210,6 +1228,7 @@ def _configure_nanoclaw(cfg: "SkillClawConfig") -> None:
# Restart nanoclaw via launchd (macOS) or systemd --user (Linux)
if platform.system() == "Darwin":
import os
+
uid = os.getuid()
_run_commands(
"nanoclaw",
@@ -1233,6 +1252,7 @@ def _find_nanoclaw_env() -> Path | None:
if plist_path.exists():
try:
import plistlib
+
with open(plist_path, "rb") as f:
plist = plistlib.load(f)
work_dir = plist.get("WorkingDirectory")
@@ -1273,6 +1293,7 @@ def _find_nanoclaw_env() -> Path | None:
# NemoClaw adapter #
# ------------------------------------------------------------------ #
+
def _configure_nemoclaw(cfg: "SkillClawConfig") -> None:
"""Auto-configure NemoClaw to route inference through SkillClaw proxy.
@@ -1288,25 +1309,44 @@ def _configure_nemoclaw(cfg: "SkillClawConfig") -> None:
# Step 1: Register (or update) the skillclaw provider in OpenShell
create_cmd = [
- "openshell", "provider", "create",
- "--name", "skillclaw",
- "--type", "openai",
- "--credential", f"OPENAI_API_KEY={api_key}",
- "--config", f"OPENAI_BASE_URL={base_url}",
+ "openshell",
+ "provider",
+ "create",
+ "--name",
+ "skillclaw",
+ "--type",
+ "openai",
+ "--credential",
+ f"OPENAI_API_KEY={api_key}",
+ "--config",
+ f"OPENAI_BASE_URL={base_url}",
]
try:
result = subprocess.run(
- create_cmd, capture_output=True, text=True, timeout=15,
+ create_cmd,
+ capture_output=True,
+ text=True,
+ timeout=15,
)
if result.returncode != 0:
stderr = result.stderr or ""
if "AlreadyExists" in stderr or "already exists" in stderr.lower():
logger.info("[ClawAdapter] openshell provider 'skillclaw' exists — updating")
- _run_commands("nemoclaw", [[
- "openshell", "provider", "update", "skillclaw",
- "--credential", f"OPENAI_API_KEY={api_key}",
- "--config", f"OPENAI_BASE_URL={base_url}",
- ]])
+ _run_commands(
+ "nemoclaw",
+ [
+ [
+ "openshell",
+ "provider",
+ "update",
+ "skillclaw",
+ "--credential",
+ f"OPENAI_API_KEY={api_key}",
+ "--config",
+ f"OPENAI_BASE_URL={base_url}",
+ ]
+ ],
+ )
else:
logger.warning(
"[ClawAdapter] openshell provider create failed: %s",
@@ -1316,20 +1356,27 @@ def _configure_nemoclaw(cfg: "SkillClawConfig") -> None:
else:
logger.info("[ClawAdapter] openshell provider create skillclaw → ok")
except FileNotFoundError:
- logger.warning(
- "[ClawAdapter] 'openshell' not found in PATH — configure NemoClaw manually."
- )
+ logger.warning("[ClawAdapter] 'openshell' not found in PATH — configure NemoClaw manually.")
return
except Exception as e:
logger.warning("[ClawAdapter] openshell provider create error: %s", e)
return
# Step 2: Set inference route to the skillclaw provider
- _run_commands("nemoclaw", [[
- "openshell", "inference", "set",
- "--provider", "skillclaw",
- "--model", model_id,
- ]])
+ _run_commands(
+ "nemoclaw",
+ [
+ [
+ "openshell",
+ "inference",
+ "set",
+ "--provider",
+ "skillclaw",
+ "--model",
+ model_id,
+ ]
+ ],
+ )
# Step 3: Persist ~/.nemoclaw/config.json
_write_nemoclaw_config(base_url, model_id, api_key)
@@ -1365,6 +1412,7 @@ def _write_nemoclaw_config(endpoint_url: str, model: str, api_key: str) -> None:
# Noop adapter #
# ------------------------------------------------------------------ #
+
def _configure_none(cfg: "SkillClawConfig") -> None:
logger.info("[ClawAdapter] claw_type=none — skipping auto-configuration")
@@ -1395,6 +1443,7 @@ def _configure_none(cfg: "SkillClawConfig") -> None:
# Shared helper #
# ------------------------------------------------------------------ #
+
def _run_commands(
agent_name: str,
commands: list[list[str]],
@@ -1425,8 +1474,7 @@ def _run_commands(
)
else:
logger.warning(
- "[ClawAdapter] '%s' not found in PATH — skipping auto-config. "
- "Configure %s manually.",
+ "[ClawAdapter] '%s' not found in PATH — skipping auto-config. Configure %s manually.",
cmd[0],
agent_name,
)
diff --git a/skillclaw/cli.py b/skillclaw/cli.py
index 01af6e9..7895283 100644
--- a/skillclaw/cli.py
+++ b/skillclaw/cli.py
@@ -24,8 +24,8 @@
print("SkillClaw requires 'click'. Install it with: pip install click")
sys.exit(1)
-from .config_store import CONFIG_FILE, ConfigStore
from . import runtime_state
+from .config_store import CONFIG_FILE, ConfigStore
def _default_daemon_log_path() -> Path:
@@ -135,17 +135,12 @@ def _wait_for_daemon_ready(proc, port: int, log_path: Path, timeout_s: float = 1
while time.monotonic() < deadline:
returncode = proc.poll()
if returncode is not None:
- raise click.ClickException(
- f"SkillClaw daemon exited with code {returncode}. Check logs: {log_path}"
- )
+ raise click.ClickException(f"SkillClaw daemon exited with code {returncode}. Check logs: {log_path}")
if _healthz_ready(port):
return
time.sleep(0.2)
- raise click.ClickException(
- "SkillClaw daemon did not become healthy in time. "
- f"Check logs: {log_path}"
- )
+ raise click.ClickException(f"SkillClaw daemon did not become healthy in time. Check logs: {log_path}")
def _daemon_ready_timeout_seconds(default: float = 15.0) -> float:
@@ -191,9 +186,8 @@ def _spawn_daemon_process(
"env": child_env,
}
if os.name == "nt":
- creationflags = (
- getattr(subprocess, "DETACHED_PROCESS", 0)
- | getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0)
+ creationflags = getattr(subprocess, "DETACHED_PROCESS", 0) | getattr(
+ subprocess, "CREATE_NEW_PROCESS_GROUP", 0
)
if creationflags:
popen_kwargs["creationflags"] = creationflags
@@ -247,6 +241,7 @@ def skillclaw():
def setup():
"""Interactive first-time configuration wizard."""
from .setup_wizard import SetupWizard
+
SetupWizard().run()
@@ -273,6 +268,7 @@ def setup():
def start(port: int | None, daemon: bool, log_file: str | None):
"""Start SkillClaw (proxy + skill injection + optional PRM)."""
import asyncio
+
from .log_color import setup_logging
setup_logging()
@@ -298,15 +294,15 @@ def start(port: int | None, daemon: bool, log_file: str | None):
return
if port:
- from .config_store import ConfigStore as _CS
import tempfile
+
import yaml
+ from .config_store import ConfigStore as _CS
+
data = cs.load()
data.setdefault("proxy", {})["port"] = port
- tmp = tempfile.NamedTemporaryFile(
- mode="w", suffix=".yaml", delete=False, encoding="utf-8"
- )
+ tmp = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False, encoding="utf-8")
try:
yaml.dump(data, tmp)
finally:
@@ -317,6 +313,7 @@ def start(port: int | None, daemon: bool, log_file: str | None):
tmp_path = None
from .launcher import SkillClawLauncher
+
launcher = SkillClawLauncher(cs)
try:
asyncio.run(launcher.start())
@@ -467,15 +464,11 @@ def restore_hermes(backup_path: str | None):
from .claw_adapter import restore_hermes_config
try:
- result = restore_hermes_config(
- Path(backup_path).expanduser() if backup_path else None
- )
+ result = restore_hermes_config(Path(backup_path).expanduser() if backup_path else None)
except FileNotFoundError as exc:
raise click.ClickException(str(exc)) from None
- click.echo(
- f"Restored Hermes config: {result['target']} <- {result['source']}"
- )
+ click.echo(f"Restored Hermes config: {result['target']} <- {result['source']}")
@restore.command(name="codex")
@@ -491,15 +484,11 @@ def restore_codex(backup_path: str | None):
from .claw_adapter import restore_codex_config
try:
- result = restore_codex_config(
- Path(backup_path).expanduser() if backup_path else None
- )
+ result = restore_codex_config(Path(backup_path).expanduser() if backup_path else None)
except FileNotFoundError as exc:
raise click.ClickException(str(exc)) from None
- click.echo(
- f"Restored Codex config: {result['target']} <- {result['source']}"
- )
+ click.echo(f"Restored Codex config: {result['target']} <- {result['source']}")
@restore.command(name="claude")
@@ -515,15 +504,11 @@ def restore_claude(backup_path: str | None):
from .claw_adapter import restore_claude_config
try:
- result = restore_claude_config(
- Path(backup_path).expanduser() if backup_path else None
- )
+ result = restore_claude_config(Path(backup_path).expanduser() if backup_path else None)
except FileNotFoundError as exc:
raise click.ClickException(str(exc)) from None
- click.echo(
- f"Restored Claude Code settings: {result['target']} <- {result['source']}"
- )
+ click.echo(f"Restored Claude Code settings: {result['target']} <- {result['source']}")
@skillclaw.group()
@@ -549,6 +534,7 @@ def validation_status():
def validation_run_once(force: bool):
"""Run one background validation polling iteration."""
import asyncio
+
from .validation_worker import ValidationWorker
cs = ConfigStore()
@@ -597,38 +583,27 @@ def _require_sharing(cs: ConfigStore):
backend = _sharing_backend(cfg)
if backend == "local":
if not cfg.sharing_local_root:
- raise click.ClickException(
- "Local sharing backend is not configured. "
- "Set sharing.local_root first."
- )
+ raise click.ClickException("Local sharing backend is not configured. Set sharing.local_root first.")
elif backend == "s3":
if not cfg.sharing_bucket:
- raise click.ClickException(
- "S3 bucket is not configured. "
- "Set sharing.bucket first."
- )
+ raise click.ClickException("S3 bucket is not configured. Set sharing.bucket first.")
if not cfg.sharing_access_key_id or not cfg.sharing_secret_access_key:
raise click.ClickException(
- "S3 credentials are not configured. "
- "Set sharing.access_key_id and sharing.secret_access_key."
+ "S3 credentials are not configured. Set sharing.access_key_id and sharing.secret_access_key."
)
elif backend == "oss":
if not cfg.sharing_endpoint or not cfg.sharing_bucket:
raise click.ClickException(
- "OSS endpoint or bucket is not configured. "
- "Set sharing.endpoint and sharing.bucket first."
+ "OSS endpoint or bucket is not configured. Set sharing.endpoint and sharing.bucket first."
)
if not cfg.sharing_access_key_id or not cfg.sharing_secret_access_key:
raise click.ClickException(
- "OSS credentials are not configured. "
- "Set sharing.access_key_id and sharing.secret_access_key."
+ "OSS credentials are not configured. Set sharing.access_key_id and sharing.secret_access_key."
)
else:
- raise click.ClickException(
- "Sharing backend is not configured. "
- "Set sharing.backend to local, s3, or oss."
- )
+ raise click.ClickException("Sharing backend is not configured. Set sharing.backend to local, s3, or oss.")
from .skill_hub import SkillHub
+
hub = SkillHub.from_config(cfg)
return cfg, hub
@@ -645,6 +620,7 @@ def skills_push(no_filter):
stats_path = os.path.join(cfg.skills_dir, "skill_stats.json")
if os.path.exists(stats_path):
import json
+
try:
with open(stats_path, encoding="utf-8") as f:
stats = json.load(f)
@@ -706,9 +682,9 @@ def skills_list_remote():
if not remote:
click.echo("No skills found on the cloud.")
return
- click.echo(f"\n{'='*60}")
+ click.echo(f"\n{'=' * 60}")
click.echo(f" Shared Skills ({len(remote)} total)")
- click.echo(f"{'='*60}\n")
+ click.echo(f"{'=' * 60}\n")
for rec in sorted(remote, key=lambda r: r.get("name", "")):
name = rec.get("name", "?")
desc = rec.get("description", "")
diff --git a/skillclaw/config_store.py b/skillclaw/config_store.py
index 429aa4d..9bfa417 100644
--- a/skillclaw/config_store.py
+++ b/skillclaw/config_store.py
@@ -200,6 +200,7 @@ def load(self) -> dict:
return _deep_merge({}, _DEFAULTS)
try:
import yaml
+
with open(self.config_file, "r", encoding="utf-8") as f:
data = yaml.safe_load(f) or {}
return _deep_merge(_DEFAULTS, data)
@@ -208,6 +209,7 @@ def load(self) -> dict:
def save(self, data: dict):
import yaml
+
self.config_file.parent.mkdir(parents=True, exist_ok=True)
with open(self.config_file, "w", encoding="utf-8") as f:
yaml.dump(data, f, default_flow_style=False, allow_unicode=True)
@@ -288,8 +290,7 @@ def to_skillclaw_config(self) -> SkillClawConfig:
proxy_host=proxy.get("host", "0.0.0.0"),
proxy_api_key=str(proxy.get("api_key", "") or ""),
served_model_name=(
- _first_non_empty(proxy, "served_model_name")
- or _default_served_model_name(llm_model_id)
+ _first_non_empty(proxy, "served_model_name") or _default_served_model_name(llm_model_id)
),
# Skills
use_skills=bool(skills.get("enabled", True)),
@@ -348,12 +349,20 @@ def describe(self) -> str:
f"llm.provider: {llm.get('provider', '?')}",
f"llm.model_id: {llm.get('model_id', '?')}",
f"llm.api_base: {llm.get('api_base', '—') if llm.get('provider') != 'bedrock' else '(n/a)'}",
- *([ f"llm.bedrock_region: {llm.get('bedrock_region', 'us-east-1')}" ] if llm.get('provider') == 'bedrock' else []),
- *([
- f"openrouter.route: {data.get('openrouter', {}).get('route', 'fallback')}",
- f"openrouter.fallback: {data.get('openrouter', {}).get('fallback_models', '') or '(none)'}",
- f"openrouter.data: {data.get('openrouter', {}).get('data_policy', '') or 'allow'}",
- ] if llm.get('provider') == 'openrouter' else []),
+ *(
+ [f"llm.bedrock_region: {llm.get('bedrock_region', 'us-east-1')}"]
+ if llm.get("provider") == "bedrock"
+ else []
+ ),
+ *(
+ [
+ f"openrouter.route: {data.get('openrouter', {}).get('route', 'fallback')}",
+ f"openrouter.fallback: {data.get('openrouter', {}).get('fallback_models', '') or '(none)'}",
+ f"openrouter.data: {data.get('openrouter', {}).get('data_policy', '') or 'allow'}",
+ ]
+ if llm.get("provider") == "openrouter"
+ else []
+ ),
f"proxy.port: {data.get('proxy', {}).get('port', 30000)}",
f"skills.enabled: {skills.get('enabled', True)}",
f"skills.dir: {effective_skills_dir}",
@@ -364,7 +373,7 @@ def describe(self) -> str:
if sharing.get("enabled"):
backend = _infer_sharing_backend(sharing) or "unknown"
lines += [
- f"sharing.enabled: True",
+ "sharing.enabled: True",
f"sharing.backend: {backend}",
]
if backend == "local":
@@ -382,7 +391,7 @@ def describe(self) -> str:
f"sharing.auto_pull: {sharing.get('auto_pull_on_start', False)}",
]
else:
- lines.append(f"sharing.enabled: False")
+ lines.append("sharing.enabled: False")
lines += [
f"validation.enabled: {validation.get('enabled', False)}",
f"validation.mode: {_normalize_validation_mode(validation.get('mode', 'replay'))}",
diff --git a/skillclaw/launcher.py b/skillclaw/launcher.py
index da8c0df..958731b 100644
--- a/skillclaw/launcher.py
+++ b/skillclaw/launcher.py
@@ -86,6 +86,7 @@ async def _run(self, cfg):
if cfg.use_prm and prm_provider == "bedrock" and prm_model:
from .bedrock_client import BedrockChatClient
+
prm_client = BedrockChatClient(
model_id=prm_model,
region=cfg.bedrock_region,
@@ -125,11 +126,14 @@ async def _run(self, cfg):
if cfg.sharing_enabled and cfg.sharing_auto_pull_on_start:
try:
from .skill_hub import SkillHub
+
hub = SkillHub.from_config(cfg)
result = hub.pull_skills(cfg.skills_dir)
logger.info(
"[Launcher] auto-pull: %d downloaded, %d unchanged, %d deleted",
- result["downloaded"], result["skipped"], result.get("deleted", 0),
+ result["downloaded"],
+ result["skipped"],
+ result.get("deleted", 0),
)
if skill_manager is not None and (
result.get("downloaded", 0) > 0
@@ -159,11 +163,10 @@ async def _run(self, cfg):
cfg.proxy_port,
)
else:
- logger.info(
- "[Launcher] proxy server does not expose wait_until_ready(); skipping readiness wait"
- )
+ logger.info("[Launcher] proxy server does not expose wait_until_ready(); skipping readiness wait")
from .claw_adapter import configure_claw
+
configure_claw(cfg)
if getattr(cfg, "validation_enabled", False):
diff --git a/skillclaw/log_color.py b/skillclaw/log_color.py
index 690600d..037d96c 100644
--- a/skillclaw/log_color.py
+++ b/skillclaw/log_color.py
@@ -54,17 +54,13 @@ def _colorize_message(message: str, *, level: str, logger_name: str) -> str:
text = message
if "tokenization_kimi" in logger_name and TOKENIZATION_KIMI_RELOADED_RE.search(text):
return f"{ANSI_BOLD}{ANSI_BLUE}{text}{ANSI_RESET}"
- if (
- "tokenization_kimi" in logger_name
- and (TOKENIZATION_KIMI_PLAIN_RE.search(text) or TOKENIZATION_KIMI_WORDS_PLAIN_RE.search(text))
+ if "tokenization_kimi" in logger_name and (
+ TOKENIZATION_KIMI_PLAIN_RE.search(text) or TOKENIZATION_KIMI_WORDS_PLAIN_RE.search(text)
):
return text
if "huggingface_hub.utils._http" in logger_name and HF_HUB_UNAUTH_PLAIN_RE.search(text):
return text
- if (
- "tinker.lib.public_interfaces.service_client" in logger_name
- and TINKER_CLIENT_INIT_PLAIN_RE.search(text)
- ):
+ if "tinker.lib.public_interfaces.service_client" in logger_name and TINKER_CLIENT_INIT_PLAIN_RE.search(text):
return text
if "tinker.lib.telemetry" in logger_name and TINKER_TELEMETRY_EXCEPTION_PLAIN_RE.search(text):
return text
diff --git a/skillclaw/prm_scorer.py b/skillclaw/prm_scorer.py
index dc1a31f..5469383 100644
--- a/skillclaw/prm_scorer.py
+++ b/skillclaw/prm_scorer.py
@@ -36,9 +36,11 @@
# Pure helpers (no I/O) #
# ------------------------------------------------------------------ #
+
def _sanitize_text(text: str) -> str:
"""Replace XML-like tags that may trigger content filters."""
import re as _re
+
# Replace ... blocks with a neutral label
text = _re.sub(r".*?", "[tool_call block]", text, flags=_re.DOTALL)
# Replace any remaining angle-bracket tags
@@ -110,6 +112,7 @@ def _majority_vote(scores: list[Optional[int]]) -> float:
# PRMScorer class #
# ------------------------------------------------------------------ #
+
class PRMScorer:
"""
Async PRM scorer using any OpenAI-compatible /v1/chat/completions API.
@@ -154,10 +157,7 @@ def __init__(
try:
from openai import OpenAI
except ImportError as e:
- raise ImportError(
- "PRMScorer requires the 'openai' package. "
- "Install it with: pip install openai"
- ) from e
+ raise ImportError("PRMScorer requires the 'openai' package. Install it with: pip install openai") from e
base_url = prm_url.rstrip("/")
client_kwargs: dict[str, Any] = {"api_key": api_key}
client_kwargs["base_url"] = base_url
@@ -187,9 +187,7 @@ async def evaluate(
"""
msgs = _build_prm_judge_prompt(response, instruction)
- results = await asyncio.gather(
- *[self._query_once(msgs, i) for i in range(self.prm_m)]
- )
+ results = await asyncio.gather(*[self._query_once(msgs, i) for i in range(self.prm_m)])
scores = [r[0] for r in results]
final = _majority_vote(scores)
@@ -208,9 +206,7 @@ async def evaluate(
)
return {"score": final, "votes": votes_display, "eval_text": representative}
- async def _query_once(
- self, messages: list[dict], vote_id: int
- ) -> tuple[Optional[int], str]:
+ async def _query_once(self, messages: list[dict], vote_id: int) -> tuple[Optional[int], str]:
try:
completion = await asyncio.to_thread(
self._client.chat.completions.create,
diff --git a/skillclaw/runtime_state.py b/skillclaw/runtime_state.py
index 69beb9a..be44e92 100644
--- a/skillclaw/runtime_state.py
+++ b/skillclaw/runtime_state.py
@@ -1,9 +1,9 @@
# Adapted from MetaClaw
from __future__ import annotations
-from contextlib import contextmanager
import os
import tempfile
+from contextlib import contextmanager
from pathlib import Path
from typing import Any
diff --git a/skillclaw/setup_wizard.py b/skillclaw/setup_wizard.py
index ae8eb5f..71168f0 100644
--- a/skillclaw/setup_wizard.py
+++ b/skillclaw/setup_wizard.py
@@ -48,6 +48,7 @@
def _prompt(msg: str, default: str = "", hide: bool = False) -> str:
import getpass
+
if default:
display_default = "***" if hide else default
full_msg = f"{msg} [{display_default}]: "
@@ -82,9 +83,7 @@ def _prompt_int(msg: str, default: int = 0) -> int:
def _prompt_choice(msg: str, choices: list[str], default: str = "") -> str:
- choices_str = "/".join(
- f"[{c}]" if c == default else c for c in choices
- )
+ choices_str = "/".join(f"[{c}]" if c == default else c for c in choices)
while True:
val = _prompt(f"{msg} ({choices_str})", default)
if val in choices:
@@ -98,10 +97,7 @@ def _infer_existing_sharing_backend(current_sharing: dict) -> str:
return backend
if current_sharing.get("local_root"):
return "local"
- if any(
- current_sharing.get(key)
- for key in ("endpoint", "bucket", "access_key_id", "secret_access_key")
- ):
+ if any(current_sharing.get(key) for key in ("endpoint", "bucket", "access_key_id", "secret_access_key")):
return "s3"
return "s3"
@@ -121,10 +117,7 @@ def run(self):
# ---- CLI agent (claw type) ----
print("\n--- CLI Agent ---")
- print(
- "SkillClaw will auto-configure the chosen agent to route its LLM\n"
- "calls through the SkillClaw proxy."
- )
+ print("SkillClaw will auto-configure the chosen agent to route its LLM\ncalls through the SkillClaw proxy.")
current_claw = existing.get("claw_type", "openclaw")
claw_type = _prompt_choice(
"CLI agent to configure",
@@ -209,18 +202,12 @@ def run(self):
f"Recommended directory: {default_skills_dir}"
)
elif claw_type == "codex":
- print(
- "Codex reads native skills from ~/.codex/skills.\n"
- f"Recommended directory: {default_skills_dir}"
- )
+ print(f"Codex reads native skills from ~/.codex/skills.\nRecommended directory: {default_skills_dir}")
elif claw_type == "claude":
print(
- "Claude Code reads native skills from ~/.claude/skills.\n"
- f"Recommended directory: {default_skills_dir}"
+ f"Claude Code reads native skills from ~/.claude/skills.\nRecommended directory: {default_skills_dir}"
)
- skills_enabled = _prompt_bool(
- "Enable skill injection", default=current_skills.get("enabled", True)
- )
+ skills_enabled = _prompt_bool("Enable skill injection", default=current_skills.get("enabled", True))
skills_dir = _prompt(
"Skills directory",
default=default_skills_dir,
@@ -344,10 +331,7 @@ def run(self):
# ---- Proxy port ----
print("\n--- Proxy Configuration ---")
current_proxy = existing.get("proxy", {})
- default_served_model_name = str(
- current_proxy.get("served_model_name")
- or "skillclaw-model"
- )
+ default_served_model_name = str(current_proxy.get("served_model_name") or "skillclaw-model")
served_model_name = _prompt(
"Proxy model name exposed to agents",
default=default_served_model_name,
diff --git a/skillclaw/skill_hub.py b/skillclaw/skill_hub.py
index 25582df..5053e7f 100644
--- a/skillclaw/skill_hub.py
+++ b/skillclaw/skill_hub.py
@@ -15,12 +15,12 @@
from __future__ import annotations
+import glob
import hashlib
import json
import logging
import os
import shutil
-import glob
from datetime import datetime, timezone
from typing import Any, Collection, Optional
@@ -38,9 +38,7 @@ def _compute_sha256(path: str) -> str:
def _is_hermes_skill_root(skills_dir: str) -> bool:
- return os.path.realpath(skills_dir) == os.path.realpath(
- os.path.join(os.path.expanduser("~"), ".hermes", "skills")
- )
+ return os.path.realpath(skills_dir) == os.path.realpath(os.path.join(os.path.expanduser("~"), ".hermes", "skills"))
def _skill_dir_for_root(skills_dir: str, skill_name: str, category: str = "general") -> str:
@@ -215,7 +213,10 @@ def push_skills(
if inj >= min_inj and eff < min_eff:
logger.info(
"[SkillHub] filtered out skill %s (effectiveness=%.2f < %.2f, injections=%d)",
- skill_name, eff, min_eff, inj,
+ skill_name,
+ eff,
+ min_eff,
+ inj,
)
filtered += 1
continue
@@ -246,7 +247,10 @@ def push_skills(
logger.info(
"[SkillHub] push complete: %d uploaded, %d skipped, %d filtered, %d total",
- uploaded, skipped, filtered, len(paths),
+ uploaded,
+ skipped,
+ filtered,
+ len(paths),
)
return {"uploaded": uploaded, "skipped": skipped, "filtered": filtered, "total_local": len(paths)}
@@ -267,6 +271,7 @@ def _enrich_manifest_entry(entry: dict, skill_path: str) -> None:
try:
import yaml
+
fm = yaml.safe_load(raw[3:end_idx].strip()) or {}
except Exception:
fm = {}
@@ -331,10 +336,7 @@ def _resolve_pull_target_dir(
return target
if str(category or "general").strip() == "general":
- nested = [
- path for path in existing_dirs
- if len(os.path.relpath(path, skills_dir).split(os.sep)) >= 2
- ]
+ nested = [path for path in existing_dirs if len(os.path.relpath(path, skills_dir).split(os.sep)) >= 2]
if len(nested) == 1:
return nested[0]
@@ -364,10 +366,7 @@ def _remove_duplicate_local_skill_dirs(
def _prune_backups(backup_root: str, prefix: str, keep: int = 3) -> None:
"""Keep only newest `keep` backups for current skills dir."""
try:
- names = sorted(
- n for n in os.listdir(backup_root)
- if n.startswith(prefix)
- )
+ names = sorted(n for n in os.listdir(backup_root) if n.startswith(prefix))
except Exception:
return
to_delete = names[:-keep] if keep > 0 else names
@@ -412,17 +411,10 @@ def pull_skills(
local_dirs_by_name = self._list_local_skill_dirs(skills_dir)
local_skills = {name: dirs[-1] for name, dirs in local_dirs_by_name.items() if dirs}
manifest = self._load_remote_manifest()
- skip_set = {
- str(name or "").strip()
- for name in (skip_names or [])
- if str(name or "").strip()
- }
+ skip_set = {str(name or "").strip() for name in (skip_names or []) if str(name or "").strip()}
if not manifest:
# Empty/failed manifest is treated as no-op to avoid accidental wipe.
- logger.warning(
- "[SkillHub] remote manifest empty; skip mirror pull "
- "(downloaded=0 skipped=0 deleted=0)"
- )
+ logger.warning("[SkillHub] remote manifest empty; skip mirror pull (downloaded=0 skipped=0 deleted=0)")
return {
"downloaded": 0,
"skipped": 0,
@@ -478,7 +470,9 @@ def pull_skills(
logger.info(
"[SkillHub] incremental pull complete: %d downloaded, %d skipped, %d total remote",
- downloaded, skipped, len(manifest),
+ downloaded,
+ skipped,
+ len(manifest),
)
return {
"downloaded": downloaded,
@@ -610,7 +604,10 @@ def pull_skills(
logger.info(
"[SkillHub] pull complete: %d downloaded, %d skipped, %d deleted, %d total remote",
- downloaded, skipped, deleted, len(manifest),
+ downloaded,
+ skipped,
+ deleted,
+ len(manifest),
)
self._prune_backups(backup_root, backup_prefix, keep=3)
return {
diff --git a/skillclaw/skill_manager.py b/skillclaw/skill_manager.py
index 3abaa9b..0513ead 100644
--- a/skillclaw/skill_manager.py
+++ b/skillclaw/skill_manager.py
@@ -55,7 +55,7 @@
import re
import time
from collections import Counter
-from typing import Any, Dict, List, Optional
+from typing import Any, Dict, Optional
import yaml
@@ -99,7 +99,7 @@ def _parse_skill_md(path: str) -> Optional[Dict[str, Any]]:
return None
fm_text = raw[3:end_idx].strip()
- body = raw[end_idx + 4:].strip()
+ body = raw[end_idx + 4 :].strip()
try:
fm = yaml.safe_load(fm_text) or {}
@@ -152,6 +152,7 @@ def _parse_skill_md(path: str) -> Optional[Dict[str, Any]]:
# SkillManager #
# ------------------------------------------------------------------ #
+
class SkillManager:
"""Loads skills from a directory of AgentSkills / OpenClaw compatible
skill folders.
@@ -173,9 +174,7 @@ def __init__(
embedding_model_path: Optional[str] = None,
):
if retrieval_mode not in ("template", "embedding"):
- raise ValueError(
- f"retrieval_mode must be 'template' or 'embedding', got '{retrieval_mode}'"
- )
+ raise ValueError(f"retrieval_mode must be 'template' or 'embedding', got '{retrieval_mode}'")
if not os.path.isdir(skills_dir):
raise FileNotFoundError(f"Skills directory not found: {skills_dir}")
@@ -244,11 +243,17 @@ def record_injection(self, skill_names: list[str]) -> None:
"""Record that these skills were injected into a request."""
now = time.strftime("%Y-%m-%dT%H:%M:%S")
for name in skill_names:
- entry = self._stats.setdefault(name, {
- "inject_count": 0, "positive_count": 0,
- "negative_count": 0, "neutral_count": 0,
- "last_injected_at": "", "effectiveness": 0.5,
- })
+ entry = self._stats.setdefault(
+ name,
+ {
+ "inject_count": 0,
+ "positive_count": 0,
+ "negative_count": 0,
+ "neutral_count": 0,
+ "last_injected_at": "",
+ "effectiveness": 0.5,
+ },
+ )
entry["inject_count"] += 1
entry["last_injected_at"] = now
self._maybe_flush_stats()
@@ -502,10 +507,9 @@ def get_all_skills(self) -> list[dict]:
are filtered out (matching OpenClaw behaviour).
"""
return [
- s for s in self.skills.get("all_skills", [])
- if not s.get("_extra_frontmatter", {}).get(
- "disable-model-invocation", False
- )
+ s
+ for s in self.skills.get("all_skills", [])
+ if not s.get("_extra_frontmatter", {}).get("disable-model-invocation", False)
]
def get_skill_path_map(self) -> Dict[str, Dict[str, str]]:
@@ -535,8 +539,7 @@ def _public_skill_path(self, skill: dict) -> str:
def _escape_xml(text: str) -> str:
"""Escape XML special characters (matching OpenClaw's escapeXml)."""
return (
- text
- .replace("&", "&")
+ text.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
@@ -604,7 +607,8 @@ def format_skills_compact(self, skills: list[dict]) -> str:
@staticmethod
def build_skills_section(
- skills_prompt: str, read_tool_name: str = "read",
+ skills_prompt: str,
+ read_tool_name: str = "read",
) -> str:
"""Wrap a skills catalog string with the ``## Skills (mandatory)``
instruction block.
@@ -614,20 +618,22 @@ def build_skills_section(
trimmed = skills_prompt.strip()
if not trimmed:
return ""
- return "\n".join([
- "## Skills (mandatory)",
- "Before replying: scan entries.",
- f"- If exactly one skill clearly applies: read its SKILL.md at "
- f" with `{read_tool_name}`, then follow it.",
- "- If multiple could apply: choose the most specific one, then read/follow it.",
- "- If none clearly apply: do not read any SKILL.md.",
- "Constraints: never read more than one skill up front; only read after selecting.",
- "- When a skill drives external API writes, assume rate limits: prefer fewer "
- "larger writes, avoid tight one-item loops, serialize bursts when possible, "
- "and respect 429/Retry-After.",
- trimmed,
- "",
- ])
+ return "\n".join(
+ [
+ "## Skills (mandatory)",
+ "Before replying: scan entries.",
+ f"- If exactly one skill clearly applies: read its SKILL.md at "
+ f" with `{read_tool_name}`, then follow it.",
+ "- If multiple could apply: choose the most specific one, then read/follow it.",
+ "- If none clearly apply: do not read any SKILL.md.",
+ "Constraints: never read more than one skill up front; only read after selecting.",
+ "- When a skill drives external API writes, assume rate limits: prefer fewer "
+ "larger writes, avoid tight one-item loops, serialize bursts when possible, "
+ "and respect 429/Retry-After.",
+ trimmed,
+ "",
+ ]
+ )
def build_injection_prompt(
self,
@@ -652,10 +658,7 @@ def build_injection_prompt(
def _remove_skill_from_memory(self, name: str) -> None:
"""Remove a skill from in-memory structures (not from disk)."""
- self.skills["all_skills"] = [
- s for s in self.skills.get("all_skills", [])
- if s.get("name") != name
- ]
+ self.skills["all_skills"] = [s for s in self.skills.get("all_skills", []) if s.get("name") != name]
def add_skill(self, skill: dict) -> bool:
"""
@@ -682,8 +685,7 @@ def add_skill(self, skill: dict) -> bool:
logger.info("[SkillManager] skipping duplicate skill: %s", name)
return False
- clean_skill = {k: v for k, v in skill.items()
- if not k.startswith("_") or k == "_extra_frontmatter"}
+ clean_skill = {k: v for k, v in skill.items() if not k.startswith("_") or k == "_extra_frontmatter"}
if "id" not in clean_skill:
clean_skill["id"] = hashlib.sha256(name.encode()).hexdigest()[:12]
if "file_path" not in clean_skill:
@@ -740,14 +742,9 @@ def _format_frontmatter(skill: dict) -> str:
lines.append(f"name: {name}")
- needs_quoting = any(c in description for c in ':{}[],"\'#&*!|>%@`\n')
+ needs_quoting = any(c in description for c in ":{}[],\"'#&*!|>%@`\n")
if needs_quoting:
- escaped = (
- description
- .replace("\\", "\\\\")
- .replace('"', '\\"')
- .replace("\n", "\\n")
- )
+ escaped = description.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
lines.append(f'description: "{escaped}"')
else:
lines.append(f"description: {description}")
@@ -757,8 +754,10 @@ def _format_frontmatter(skill: dict) -> str:
if key.startswith("_"):
continue
dumped = yaml.dump(
- {key: value}, default_flow_style=False,
- allow_unicode=True, width=10000,
+ {key: value},
+ default_flow_style=False,
+ allow_unicode=True,
+ width=10000,
).strip()
lines.append(dumped)
@@ -828,17 +827,10 @@ def save(self, path: Optional[str] = None) -> None:
logger.info("[SkillManager] saved %d skills to %s", len(all_skills), self._skills_dir)
def _get_all_skill_names(self) -> set:
- return {
- str(s.get("name"))
- for s in self.skills.get("all_skills", [])
- if s.get("name")
- }
+ return {str(s.get("name")) for s in self.skills.get("all_skills", []) if s.get("name")}
def _category_counts(self) -> Counter:
- return Counter(
- str(s.get("category") or "general")
- for s in self.skills.get("all_skills", [])
- )
+ return Counter(str(s.get("category") or "general") for s in self.skills.get("all_skills", []))
def get_skill_count(self) -> dict:
return {
diff --git a/skillclaw/utils.py b/skillclaw/utils.py
index efb1092..825f53c 100644
--- a/skillclaw/utils.py
+++ b/skillclaw/utils.py
@@ -1,6 +1,6 @@
# Adapted from MetaClaw
-from typing import Any, Optional, TYPE_CHECKING
import os
+from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from .config import SkillClawConfig
@@ -56,14 +56,10 @@ def run_llm(messages, config: Optional["SkillClawConfig"] = None):
def _run_llm_bedrock(messages, config: Optional["SkillClawConfig"] = None):
from .bedrock_client import BedrockChatClient
- model_id = (
- (getattr(config, "llm_model_id", "") if config else "")
- or os.environ.get("BEDROCK_MODEL", "us.anthropic.claude-sonnet-4-6")
- )
- region = (
- (getattr(config, "bedrock_region", "") if config else "")
- or os.environ.get("BEDROCK_REGION", "us-east-1")
+ model_id = (getattr(config, "llm_model_id", "") if config else "") or os.environ.get(
+ "BEDROCK_MODEL", "us.anthropic.claude-sonnet-4-6"
)
+ region = (getattr(config, "bedrock_region", "") if config else "") or os.environ.get("BEDROCK_REGION", "us-east-1")
client = BedrockChatClient(model_id=model_id, region=region)
rewrite_messages = [{"role": "system", "content": _COMPRESSION_INSTRUCTION}, *messages]
@@ -80,8 +76,7 @@ def _run_llm_openai(messages, config: Optional["SkillClawConfig"] = None):
from openai import OpenAI
except ImportError as e:
raise ImportError(
- "The openai provider requires the 'openai' package. "
- "Install it with: pip install openai"
+ "The openai provider requires the 'openai' package. Install it with: pip install openai"
) from e
api_base = ""
diff --git a/skillclaw/validation_store.py b/skillclaw/validation_store.py
index 63119cb..08d832a 100644
--- a/skillclaw/validation_store.py
+++ b/skillclaw/validation_store.py
@@ -14,9 +14,10 @@
from datetime import datetime, timezone
from typing import Any, Optional
-from .object_store import build_object_store, is_not_found_error
from evolve_server.core.utils import build_skill_md
+from .object_store import build_object_store, is_not_found_error
+
logger = logging.getLogger(__name__)
diff --git a/skillclaw/validation_worker.py b/skillclaw/validation_worker.py
index 0605fb5..0bdfa4a 100644
--- a/skillclaw/validation_worker.py
+++ b/skillclaw/validation_worker.py
@@ -78,11 +78,7 @@ def stop(self) -> None:
self._stop_event.set()
def _validation_enabled(self) -> bool:
- return bool(
- self.config.validation_enabled
- and self.config.sharing_enabled
- and self.config.sharing_group_id
- )
+ return bool(self.config.validation_enabled and self.config.sharing_enabled and self.config.sharing_group_id)
def _reset_daily_quota_if_needed(self) -> None:
today = datetime.now(timezone.utc).date().isoformat()