diff --git a/packages/contexto/README.md b/packages/contexto/README.md index a2762af..483e7c8 100644 --- a/packages/contexto/README.md +++ b/packages/contexto/README.md @@ -120,7 +120,24 @@ For the deeper technical reasoning: | Property | Type | Required | Description | | --- | --- | --- | --- | -| `apiKey` | string | Yes | Your Contexto API key | +| `apiKey` | string | Yes (remote) | Your Contexto API key | +| `mode` | string | No | `remote` (default) or `local` | + +### Remote mode (default) + +Uses the hosted Contexto API. Get an API key at [getcontexto.com](https://getcontexto.com/). + +```bash +openclaw config set plugins.entries.contexto.config.apiKey YOUR_KEY +``` + +### Local mode + +Runs the full pipeline locally: summarize via LLM, embed, cluster (AGNES), and persist to `~/.openclaw/data/contexto/mindmap.json`. Uses your OpenClaw provider and API key — no extra config needed. + +```bash +openclaw config set plugins.entries.contexto.config.mode local +``` ## Community diff --git a/packages/contexto/openclaw.plugin.json b/packages/contexto/openclaw.plugin.json index a740537..e4af558 100644 --- a/packages/contexto/openclaw.plugin.json +++ b/packages/contexto/openclaw.plugin.json @@ -15,6 +15,11 @@ "maxContextChars": { "type": "number", "description": "Maximum characters of context to inject (default: 2000)" + }, + "mode": { + "type": "string", + "default": "remote", + "description": "'remote' (hosted API) or 'local' (local pipeline with LLM summarization + mindmap)" } } } diff --git a/packages/contexto/package.json b/packages/contexto/package.json index 42c9ffc..3394751 100644 --- a/packages/contexto/package.json +++ b/packages/contexto/package.json @@ -31,6 +31,9 @@ "scripts": { "build": "tsc --noEmit" }, + "dependencies": { + "@ekai/mindmap": "^0.1.8" + }, "peerDependencies": { "openclaw": "*" }, diff --git a/packages/contexto/src/engine/base.ts b/packages/contexto/src/engine/base.ts index 2cf1925..47cfeb0 100644 --- a/packages/contexto/src/engine/base.ts +++ b/packages/contexto/src/engine/base.ts @@ -65,12 +65,12 @@ export abstract class AbstractContextEngine implements ContextEngine { if (tokenBudget != null) this.state.cachedTokenBudget = tokenBudget; const lastMsg = messages?.[messages.length - 1]; - this.logger.info(`[contexto] assemble() called — ${messages?.length} messages, tokenBudget: ${tokenBudget}, contextEnabled: ${this.config.contextEnabled}, hasApiKey: ${!!this.config.apiKey}`); + this.logger.info(`[contexto] assemble() called — ${messages?.length} messages, tokenBudget: ${tokenBudget}`); const lastMsgContent = lastMsg && 'content' in lastMsg ? lastMsg.content : undefined; this.logger.debug(`[contexto] last message — role: ${lastMsg?.role}, content type: ${typeof lastMsgContent}, isArray: ${Array.isArray(lastMsgContent)}, sample: ${JSON.stringify(lastMsgContent)?.slice(0, 200)}`); - if (!this.config.apiKey || !this.config.contextEnabled) { - this.logger.info(`[contexto] assemble() skipping — apiKey: ${!!this.config.apiKey}, contextEnabled: ${this.config.contextEnabled}`); + if (!this.config.apiKey) { + this.logger.info(`[contexto] assemble() skipping — not configured`); return { messages, estimatedTokens: 0 }; } diff --git a/packages/contexto/src/index.ts b/packages/contexto/src/index.ts index 642c969..d188719 100644 --- a/packages/contexto/src/index.ts +++ b/packages/contexto/src/index.ts @@ -1,10 +1,12 @@ import type { PluginConfig } from './types.js'; import { RemoteBackend } from './client.js'; +import { LocalBackend } from './local/index.js'; import { createContextEngine } from './engine/index.js'; -// Public API — use ContextoBackend to implement a custom (e.g. local) backend export type { ContextoBackend, SearchResult, WebhookPayload, Logger } from './types.js'; export { RemoteBackend } from './client.js'; +export { LocalBackend } from './local/index.js'; +export type { LocalBackendConfig, EpisodeSummary } from './local/index.js'; /** OpenClaw plugin definition. */ export default { @@ -16,19 +18,22 @@ export default { type: 'object', properties: { apiKey: { type: 'string' }, - contextEnabled: { type: 'boolean', default: true }, + maxContextChars: { type: 'number' }, compactThreshold: { type: 'number', default: 0.50 }, compactionStrategy: { type: 'string', default: 'default' }, + mode: { type: 'string', default: 'remote' }, }, }, register(api: any) { const strategy = api.pluginConfig?.compactionStrategy ?? 'default'; + const backendMode = api.pluginConfig?.mode ?? 'remote'; + const base = { apiKey: api.pluginConfig?.apiKey, - contextEnabled: api.pluginConfig?.contextEnabled ?? true, maxContextChars: api.pluginConfig?.maxContextChars, + mode: backendMode as 'remote' | 'local', }; const config: PluginConfig = strategy === 'default' @@ -41,17 +46,53 @@ export default { const logger = api.logger; + if (backendMode === 'local') { + const modelAuth = api.runtime?.modelAuth; + if (!modelAuth?.resolveApiKeyForProvider) { + logger.warn('[contexto] Local mode requires modelAuth — not available'); + return; + } + + // Resolve API key via .then() since register() must be synchronous + modelAuth.resolveApiKeyForProvider({ provider: 'openrouter', cfg: api.config }) + .then((openrouterAuth: any) => { + if (openrouterAuth?.apiKey) { + return { provider: 'openrouter' as const, apiKey: openrouterAuth.apiKey }; + } + return modelAuth.resolveApiKeyForProvider({ provider: 'openai', cfg: api.config }) + .then((openaiAuth: any) => { + if (openaiAuth?.apiKey) { + return { provider: 'openai' as const, apiKey: openaiAuth.apiKey }; + } + return null; + }); + }) + .then((result: { provider: 'openrouter' | 'openai'; apiKey: string } | null) => { + if (!result) { + logger.warn('[contexto] Local mode requires an OpenRouter or OpenAI API key configured in OpenClaw'); + return; + } + config.apiKey = 'local'; + const backend = new LocalBackend({ provider: result.provider, apiKey: result.apiKey }, logger); + const engine = createContextEngine(config, backend, logger); + api.registerContextEngine('contexto', () => engine); + logger.info(`[contexto] Plugin registered with local backend (provider: ${result.provider})`); + }) + .catch((err: any) => { + logger.warn(`[contexto] Failed to resolve API key: ${err?.message ?? err}`); + }); + return; + } + + // Remote backend (default) if (!config.apiKey) { logger.warn('[contexto] Missing apiKey — ingestion and retrieval will be disabled'); return; } const backend = new RemoteBackend(config, logger); - const engine = createContextEngine(config, backend, logger); - api.registerContextEngine('contexto', () => engine); - - logger.info(`[contexto] Plugin registered (contextEnabled: ${config.contextEnabled})`); + logger.info('[contexto] Plugin registered'); }, }; diff --git a/packages/contexto/src/local/backend.ts b/packages/contexto/src/local/backend.ts new file mode 100644 index 0000000..698332e --- /dev/null +++ b/packages/contexto/src/local/backend.ts @@ -0,0 +1,125 @@ +import { homedir } from 'node:os'; +import { join } from 'node:path'; +import { Mindmap, jsonFileStorage } from '@ekai/mindmap'; +import type { ContextoBackend, Logger, SearchResult, WebhookPayload } from '../types.js'; +import type { LocalBackendConfig } from './types.js'; +import { extractEpisodeText, summarizeEpisode } from './summarizer.js'; + +const STORAGE_PATH = join(homedir(), '.openclaw', 'data', 'contexto', 'mindmap.json'); + +/** ContextoBackend implementation that runs the full pipeline locally. */ +export class LocalBackend implements ContextoBackend { + private mindmap: Mindmap; + private config: LocalBackendConfig; + private logger: Logger; + + constructor(config: LocalBackendConfig, logger: Logger) { + this.config = config; + this.logger = logger; + + const storage = config.storage ?? jsonFileStorage(STORAGE_PATH); + + this.mindmap = new Mindmap({ + provider: config.provider, + apiKey: config.apiKey, + embedModel: config.embedModel, + storage, + config: config.mindmapConfig, + }); + } + + async ingest(payload: WebhookPayload | WebhookPayload[]): Promise { + const payloads = Array.isArray(payload) ? payload : [payload]; + if (payloads.length === 0) return; + + // Filter to episode/combined events only + const episodes = payloads.filter( + (p) => p.event.type === 'episode' && p.event.action === 'combined', + ); + + if (episodes.length === 0) { + this.logger.debug('[contexto:local] No episode/combined events to ingest'); + return; + } + + try { + const items: Array<{ id: string; role: string; content: string; timestamp?: string; metadata?: Record }> = []; + + for (const ep of episodes) { + const text = extractEpisodeText(ep); + if (!text) { + this.logger.debug('[contexto:local] Empty episode text, skipping'); + continue; + } + + const traceRef = crypto.randomUUID(); + const summary = await summarizeEpisode(text, { + provider: this.config.provider, + apiKey: this.config.apiKey, + model: this.config.llmModel, + }, this.logger); + + // Compose content: summary + key findings as bullets (matches remote API format) + const contentParts = [summary.summary]; + if (summary.key_findings.length > 0) { + contentParts.push(`\nKey findings:\n${summary.key_findings.map((f) => `- ${f}`).join('\n')}`); + } + + const episodeData = ep.data as Record | undefined; + + items.push({ + id: crypto.randomUUID(), + role: 'assistant', + content: contentParts.join('\n'), + timestamp: ep.timestamp ?? new Date().toISOString(), + metadata: { + source: 'summary', + status: summary.status, + evidence_refs: summary.evidence_refs, + open_questions: summary.open_questions, + confidence: summary.confidence, + trace_ref: traceRef, + sessionKey: ep.sessionKey, + episode: { + userMessage: episodeData?.userMessage, + assistantMessages: episodeData?.assistantMessages ?? [], + toolMessages: episodeData?.toolMessages ?? [], + }, + }, + }); + } + + if (items.length > 0) { + await this.mindmap.add(items); + this.logger.info(`[contexto:local] Ingested ${items.length} episode(s) into mindmap`); + } + } catch (err) { + this.logger.warn(`[contexto:local] Ingest failed: ${err instanceof Error ? err.message : String(err)}`); + } + } + + async search( + query: string, + maxResults: number, + filter?: Record, + minScore?: number, + ): Promise { + try { + const result = await this.mindmap.search(query, { + maxResults, + filter, + minScore, + }); + + if (!result.items.length) return null; + + return { + items: result.items, + paths: result.paths, + }; + } catch (err) { + this.logger.warn(`[contexto:local] Search failed: ${err instanceof Error ? err.message : String(err)}`); + return null; + } + } +} diff --git a/packages/contexto/src/local/index.ts b/packages/contexto/src/local/index.ts new file mode 100644 index 0000000..a7948ce --- /dev/null +++ b/packages/contexto/src/local/index.ts @@ -0,0 +1,3 @@ +export { LocalBackend } from './backend.js'; +export type { LocalBackendConfig, EpisodeSummary, EvidenceRef, EvidenceRefType, LLMProviderConfig } from './types.js'; +export { extractEpisodeText, summarizeEpisode } from './summarizer.js'; diff --git a/packages/contexto/src/local/summarizer.ts b/packages/contexto/src/local/summarizer.ts new file mode 100644 index 0000000..b4a0346 --- /dev/null +++ b/packages/contexto/src/local/summarizer.ts @@ -0,0 +1,179 @@ +import type { WebhookPayload, ContentBlock, Logger } from '../types.js'; +import { stripMetadataEnvelope } from '../helpers.js'; +import type { EpisodeSummary, LLMProviderConfig } from './types.js'; + +const LLM_PROVIDERS: Record = { + openrouter: { + baseUrl: 'https://openrouter.ai/api/v1', + defaultModel: 'openai/gpt-4o-mini', + }, + openai: { + baseUrl: 'https://api.openai.com/v1', + defaultModel: 'gpt-4o-mini', + }, +}; + +const SUMMARIZE_SYSTEM_PROMPT = `You are a concise summarizer. Given a conversation episode (user question + assistant answer + tool outputs), produce a JSON object with exactly these fields: + +{ + "status": "complete" | "partial" | "blocked", + "summary": "", + "key_findings": ["", "", ...], + "evidence_refs": [{"type": "", "value": ""}], + "open_questions": [""], + "confidence": <0.0 to 1.0> +} + +Rules: +- Set status to "complete" if the episode fully resolved the user's request, "partial" if only partly, "blocked" if unable to proceed. +- summary should be 1-3 sentences capturing the essence. +- key_findings should have at least one entry. +- evidence_refs should reference relevant tools, files, or episodes mentioned. +- Respond ONLY with valid JSON, no markdown fences, no extra text.`; + +/** Extract text content from a message, handling both string and ContentBlock[] formats. */ +function extractMessageText(message: any): string { + if (!message) return ''; + const content = message.content; + if (typeof content === 'string') return content; + if (Array.isArray(content)) { + return (content as ContentBlock[]) + .filter((block) => block.type === 'text' && block.text) + .map((block) => block.text) + .join('\n'); + } + return ''; +} + +/** + * Extract combined text from an episode/combined WebhookPayload. + * Returns empty string for non-episode events. + */ +export function extractEpisodeText(payload: WebhookPayload): string { + if (payload.event.type !== 'episode' || payload.event.action !== 'combined') { + return ''; + } + + const data = payload.data as Record | undefined; + if (!data) return ''; + + const parts: string[] = []; + + // User message — strip OpenClaw metadata envelope + const userText = extractMessageText(data.userMessage); + if (userText) { + parts.push(`Q: ${stripMetadataEnvelope(userText)}`); + } + + // Assistant messages (drop api/usage/model metadata) + const assistantMessages = Array.isArray(data.assistantMessages) ? data.assistantMessages : []; + for (const msg of assistantMessages) { + const text = extractMessageText(msg); + if (text) parts.push(`A: ${text}`); + } + + return parts.join('\n'); +} + +/** + * Summarize episode text via an LLM call. + * Returns a graceful fallback on any failure. + */ +export async function summarizeEpisode( + text: string, + config: LLMProviderConfig, + logger: Logger, +): Promise { + const providerDef = LLM_PROVIDERS[config.provider]; + if (!providerDef) { + logger.warn(`[contexto:local] Unknown LLM provider: ${config.provider}, using fallback summary`); + return buildFallback(text); + } + + const model = config.model ?? providerDef.defaultModel; + const url = `${providerDef.baseUrl}/chat/completions`; + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${config.apiKey}`, + }, + body: JSON.stringify({ + model, + temperature: 0.2, + response_format: { type: 'json_object' }, + messages: [ + { role: 'system', content: SUMMARIZE_SYSTEM_PROMPT }, + { role: 'user', content: text }, + ], + }), + }); + + if (!response.ok) { + const body = await response.text().catch(() => '(no body)'); + logger.warn(`[contexto:local] LLM summarize HTTP ${response.status}: ${body.slice(0, 200)}`); + return buildFallback(text); + } + + const json = await response.json() as any; + const raw = json.choices?.[0]?.message?.content; + if (!raw) { + logger.warn('[contexto:local] LLM returned no content, using fallback summary'); + return buildFallback(text); + } + + return parseSummary(raw, text, logger); + } catch (err) { + logger.warn(`[contexto:local] LLM summarize failed: ${err instanceof Error ? err.message : String(err)}`); + return buildFallback(text); + } +} + +/** Parse and validate LLM JSON response into EpisodeSummary, with graceful degradation. */ +function parseSummary(raw: string, originalText: string, logger: Logger): EpisodeSummary { + try { + const parsed = JSON.parse(raw); + + const summary = typeof parsed.summary === 'string' && parsed.summary + ? parsed.summary + : originalText.slice(0, 200); + + const key_findings = Array.isArray(parsed.key_findings) && parsed.key_findings.length > 0 + ? parsed.key_findings.map(String) + : ['Episode processed']; + + const status = ['complete', 'partial', 'blocked'].includes(parsed.status) + ? parsed.status as EpisodeSummary['status'] + : 'partial'; + + const confidence = typeof parsed.confidence === 'number' && parsed.confidence >= 0 && parsed.confidence <= 1 + ? parsed.confidence + : 0.5; + + const evidence_refs = Array.isArray(parsed.evidence_refs) + ? parsed.evidence_refs.filter((r: any) => r && typeof r.type === 'string' && typeof r.value === 'string') + : []; + + const open_questions = Array.isArray(parsed.open_questions) + ? parsed.open_questions.filter((q: any) => typeof q === 'string') + : undefined; + + return { summary, key_findings, status, confidence, evidence_refs, open_questions }; + } catch (err) { + logger.warn(`[contexto:local] Failed to parse LLM summary JSON: ${err instanceof Error ? err.message : String(err)}`); + return buildFallback(originalText); + } +} + +/** Build a fallback EpisodeSummary from raw text when LLM call or parsing fails. */ +function buildFallback(text: string): EpisodeSummary { + return { + summary: text.slice(0, 200) + (text.length > 200 ? '...' : ''), + key_findings: ['Episode processed (fallback — LLM summarization unavailable)'], + status: 'partial', + confidence: 0.0, + evidence_refs: [], + }; +} diff --git a/packages/contexto/src/local/types.ts b/packages/contexto/src/local/types.ts new file mode 100644 index 0000000..5bf5fdb --- /dev/null +++ b/packages/contexto/src/local/types.ts @@ -0,0 +1,33 @@ +import type { MindmapStorage } from '@ekai/mindmap'; +import type { MindmapConfig } from '@ekai/mindmap'; + +export type EvidenceRefType = 'episode_ref' | 'tool_ref' | 'file_ref' | 'trace_ref'; + +export interface EvidenceRef { + type: EvidenceRefType; + value: string; +} + +export interface EpisodeSummary { + summary: string; + key_findings: string[]; + status: 'complete' | 'partial' | 'blocked'; + confidence: number; + evidence_refs: EvidenceRef[]; + open_questions?: string[]; +} + +export interface LocalBackendConfig { + provider: 'openrouter' | 'openai'; + apiKey: string; + embedModel?: string; + llmModel?: string; + storage?: MindmapStorage; + mindmapConfig?: Partial; +} + +export interface LLMProviderConfig { + provider: 'openrouter' | 'openai'; + apiKey: string; + model?: string; +} diff --git a/packages/contexto/src/types.ts b/packages/contexto/src/types.ts index 57dfd98..80986bf 100644 --- a/packages/contexto/src/types.ts +++ b/packages/contexto/src/types.ts @@ -1,9 +1,10 @@ export interface BaseConfig { apiKey: string; - contextEnabled: boolean; + maxContextChars?: number; minScore?: number; filter?: Record; + mode?: 'remote' | 'local'; } export interface DefaultConfig extends BaseConfig { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 63a8c86..86c5e35 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -41,6 +41,9 @@ importers: packages/contexto: dependencies: + '@ekai/mindmap': + specifier: ^0.1.8 + version: 0.1.8 openclaw: specifier: '*' version: 2026.4.5(@napi-rs/canvas@0.1.97) @@ -401,6 +404,9 @@ packages: '@ekai/memory@0.0.1': resolution: {integrity: sha512-bMdR8X6UDhlLmwm1mUFuuZQv6ZMl9G1gB0fcH+iHA2q8lSeCRDDBibn7b+k2OXfP1rTtlY8OAFnWLV0mXvfLKw==} + '@ekai/mindmap@0.1.8': + resolution: {integrity: sha512-7Af9ShJ8c4d80HAw5MR0j2ypIUQwag6lgt/s5Mywm2EIZrl2uDVdM6VmjT+U2SjYBc/cbyjSwiHRfb3x93GWVA==} + '@emnapi/core@1.9.1': resolution: {integrity: sha512-mukuNALVsoix/w1BJwFzwXBN/dHeejQtuVzcDsfOEsdpCumXb/E9j8w11h5S54tT1xhifGfbbSm/ICrObRb3KA==} @@ -6552,6 +6558,10 @@ snapshots: transitivePeerDependencies: - supports-color + '@ekai/mindmap@0.1.8': + dependencies: + ml-hclust: 4.0.0 + '@emnapi/core@1.9.1': dependencies: '@emnapi/wasi-threads': 1.2.0 diff --git a/release.config.cjs b/release.config.cjs index eb486f7..ad0b767 100644 --- a/release.config.cjs +++ b/release.config.cjs @@ -4,7 +4,8 @@ module.exports = { ['@semantic-release/commit-analyzer', { preset: 'conventionalcommits', releaseRules: [ - { type: 'feat', release: 'minor' }, + { breaking: true, release: 'minor' }, + { type: 'feat', release: 'patch' }, { type: 'fix', release: 'patch' }, { type: 'perf', release: 'patch' }, { type: 'refactor', release: 'patch' },