diff --git a/.changeset/bumpy-crabs-nail.md b/.changeset/bumpy-crabs-nail.md
new file mode 100644
index 000000000..7081e6ca3
--- /dev/null
+++ b/.changeset/bumpy-crabs-nail.md
@@ -0,0 +1,11 @@
+---
+"emdash": minor
+"@emdash-cms/cloudflare": patch
+"@emdash-cms/workerd": minor
+---
+
+Adds workerd-based plugin sandboxing for Node.js deployments.
+
+- **emdash**: Adds `isHealthy()` to `SandboxRunner` interface, `SandboxUnavailableError` class, `sandbox: false` config option, `mediaStorage` field on `SandboxOptions`, and exports `createHttpAccess`/`createUnrestrictedHttpAccess`/`PluginStorageRepository`/`UserRepository`/`OptionsRepository` for platform adapters.
+- **@emdash-cms/cloudflare**: Implements `isHealthy()` on `CloudflareSandboxRunner`. Fixes `storageQuery()` and `storageCount()` to honor `where`, `orderBy`, and `cursor` options (previously ignored, causing infinite pagination loops and incorrect filtered counts). Adds `storageConfig` to `PluginBridgeProps` so `PluginStorageRepository` can use declared indexes.
+- **@emdash-cms/workerd**: New package. `WorkerdSandboxRunner` for production (workerd child process + capnp config + authenticated HTTP backing service) and `MiniflareDevRunner` for development.
diff --git a/docs/src/content/docs/plugins/creating-plugins.mdx b/docs/src/content/docs/plugins/creating-plugins.mdx
index 2ec5c175e..857129bea 100644
--- a/docs/src/content/docs/plugins/creating-plugins.mdx
+++ b/docs/src/content/docs/plugins/creating-plugins.mdx
@@ -380,6 +380,63 @@ Test plugins by creating a minimal Astro site with the plugin registered:
For unit tests, mock the `PluginContext` interface and call hook handlers directly.
+### Testing in the Sandbox
+
+If your plugin will run sandboxed (marketplace distribution or on sites with workerd enabled), test it under sandbox conditions locally to catch capability violations before deploying.
+
+
+
+1. Install the workerd sandbox runner in your test site:
+
+ ```bash
+ npm install @emdash-cms/workerd
+ ```
+
+2. Enable it in your test site's config:
+
+ ```typescript title="astro.config.mjs"
+ export default defineConfig({
+ integrations: [
+ emdash({
+ sandboxRunner: "@emdash-cms/workerd/sandbox",
+ plugins: [myPlugin()],
+ }),
+ ],
+ });
+ ```
+
+3. Run the dev server and exercise your plugin's hooks and routes.
+
+
+
+If something works in trusted mode but fails in the sandbox, use `sandbox: false` to confirm it's a sandbox issue:
+
+```typescript title="astro.config.mjs"
+emdash({
+ sandboxRunner: "@emdash-cms/workerd/sandbox",
+ sandbox: false, // Temporarily bypass sandbox for debugging
+ plugins: [myPlugin()],
+})
+```
+
+### What Behaves Differently in the Sandbox
+
+Your plugin code is the same in both modes, but the sandbox enforces restrictions that trusted mode does not:
+
+| What | Trusted mode | Sandboxed mode |
+|---|---|---|
+| **Undeclared capabilities** | `ctx.content`, `ctx.media`, etc. are always present | Present on `ctx`, but methods throw capability errors when called |
+| **Network access** | `fetch()` works globally | Only via `ctx.http.fetch()`, restricted to `allowedHosts` |
+| **Node.js builtins** | `fs`, `path`, `child_process` available | Not available (V8 isolate, no Node APIs) |
+| **Environment variables** | `process.env` accessible | Not accessible |
+| **CPU time** | Unbounded | Limited (default 50ms per invocation) |
+| **Wall-clock time** | Unbounded | Limited (default 30s per invocation) |
+| **Direct DB access** | Possible (but discouraged) | Not possible, all access via `ctx.*` |
+
+
+ The easiest way to ensure sandbox compatibility: only use the `ctx` object passed to your hooks and routes. If your plugin only touches `ctx.content`, `ctx.storage`, `ctx.kv`, `ctx.http`, `ctx.email`, and `ctx.log`, it will work identically in both modes.
+
+
## Portable Text Block Types
Plugins can add custom block types to the Portable Text editor. These appear in the editor's slash command menu and can be inserted into any `portableText` field.
diff --git a/docs/src/content/docs/plugins/sandbox.mdx b/docs/src/content/docs/plugins/sandbox.mdx
index 5587749f5..469c54a3c 100644
--- a/docs/src/content/docs/plugins/sandbox.mdx
+++ b/docs/src/content/docs/plugins/sandbox.mdx
@@ -16,7 +16,7 @@ EmDash supports running plugins in two execution modes: **trusted** and **sandbo
| **Resource limits** | None | CPU, memory, subrequests, wall-time |
| **Network access** | Unrestricted | Blocked; only via `ctx.http` with host allowlist |
| **Data access** | Full database access | Scoped to declared capabilities via RPC bridge |
-| **Available on** | All platforms | Cloudflare Workers only |
+| **Available on** | All platforms | Cloudflare Workers, Node.js (with workerd) |
## Trusted Mode
@@ -145,34 +145,99 @@ Sandboxing requires Dynamic Worker Loader. Add to your `wrangler.jsonc`:
## Node.js Deployments
-
- Node.js does not support plugin sandboxing. All plugins run in trusted mode regardless of configuration. There is no V8 isolate boundary, no resource limits, and no capability enforcement at the runtime level.
-
+Node.js supports plugin sandboxing via [workerd](https://github.com/cloudflare/workerd), the open-source runtime that powers Cloudflare Workers. When configured, plugins run in isolated V8 isolates with the same capability enforcement as on Cloudflare.
+
+### Enabling Sandboxing on Node.js
+
+
+
+1. Install the workerd sandbox runner:
+
+ ```bash
+ npm install @emdash-cms/workerd
+ ```
+
+2. Configure it in your Astro config:
+
+ ```typescript title="astro.config.mjs"
+ export default defineConfig({
+ integrations: [
+ emdash({
+ sandboxRunner: "@emdash-cms/workerd/sandbox",
+ }),
+ ],
+ });
+ ```
+
+3. Restart your dev server. Sandboxed plugins will now run in workerd isolates.
+
+
+
+In development, if [miniflare](https://miniflare.dev/) is installed, the runner uses it for faster startup. In production (`NODE_ENV=production`), it spawns workerd as a child process with a generated configuration. Install miniflare as a dev dependency for the best local development experience:
+
+```bash
+npm install -D miniflare
+```
+
+### Debugging Escape Hatch
-When deploying to Node.js (or any non-Cloudflare platform):
+If you need to determine whether a bug is in your plugin code or in the sandbox, disable sandboxing temporarily:
+
+```typescript title="astro.config.mjs"
+emdash({
+ sandboxRunner: "@emdash-cms/workerd/sandbox",
+ sandbox: false, // Disable sandboxing, all plugins run in-process
+})
+```
+
+When `sandbox: false` is set:
+
+- Build-time sandboxed plugins (registered via `sandboxed: [...]` in your config) load in-process and run their hooks and routes normally. Plugin state (active/inactive) from the admin UI is respected.
+- Marketplace plugins also load in-process and run their hooks and routes. Cold-start loads them before the hook pipeline is built; runtime install/update/uninstall via the admin UI rebuilds the pipeline so changes take effect immediately without a server restart.
+- All plugin code runs with full Node.js privileges. Capability declarations are not enforced at the runtime level. Use this only for debugging — re-enable sandboxing for normal operation.
+
+### Without workerd
+
+If workerd is not installed, EmDash falls back to trusted mode for all plugins. A warning is logged at startup:
+
+> Plugin sandbox is configured but not available on this platform. Sandboxed plugins will not be loaded. If using @emdash-cms/workerd/sandbox, ensure workerd is installed.
+
+In this mode:
- The `NoopSandboxRunner` is used. It returns `isAvailable() === false`.
-- Attempting to load sandboxed plugins throws `SandboxNotAvailableError`.
- All plugins must be registered as trusted plugins in the `plugins` array.
-- Capability declarations are purely informational — they are not enforced.
+- Capability declarations are purely informational.
-### What This Means for Security
+### Security Comparison
-| Threat | Cloudflare (Sandboxed) | Node.js (Trusted only) |
-|---|---|---|
-| Plugin reads data it shouldn't | Blocked by bridge capability checks | **Not prevented** — plugin has full DB access |
-| Plugin makes unauthorized network calls | Blocked by `globalOutbound: null` + host allowlist | **Not prevented** — plugin can call `fetch()` directly |
-| Plugin exhausts CPU | Isolate aborted by Worker Loader | **Not prevented** — blocks the event loop |
-| Plugin exhausts memory | Isolate terminated by Worker Loader | **Not prevented** — can crash the process |
-| Plugin accesses environment variables | No access (isolated V8 context) | **Not prevented** — shares `process.env` |
-| Plugin accesses filesystem | No filesystem in Workers | **Not prevented** — full `fs` access |
+| Threat | Cloudflare (Sandboxed) | Node.js + workerd (Sandboxed) | Node.js (Trusted only) |
+|---|---|---|---|
+| Plugin reads unauthorized data | Blocked by bridge | Blocked by bridge | **Not prevented** |
+| Plugin makes unauthorized network calls | Blocked by host allowlist | Blocked by host allowlist | **Not prevented** |
+| Plugin exhausts CPU | Isolate aborted (per-request CPU limit) | Wall-time only (no per-request CPU limit) | **Not prevented** |
+| Plugin exhausts memory | 128MB per-isolate limit | **Not enforced by standalone workerd** | **Not prevented** |
+| Plugin makes excessive subrequests | Subrequest limit enforced | **Not enforced by standalone workerd** | **Not prevented** |
+| Plugin runs forever (wall-clock) | Wall-time limit | Wall-time limit (Promise.race wrapper) | **Not prevented** |
+| Plugin accesses env vars | No access (isolated V8) | No access (isolated V8) | **Not prevented** |
+| Plugin accesses filesystem | No filesystem in Workers | No filesystem in workerd | **Not prevented** |
+| Defense against V8 zero-days | Rapid patching + kernel hardening | Dependent on workerd release cycle | N/A |
+
+
+ Standalone workerd does **not** enforce per-worker `cpuMs`, `memoryMb`, or `subrequests` limits. Those are Cloudflare platform features, not workerd capnp options. The only resource limit enforced on the Node path is `wallTimeMs`, applied via `Promise.race` in the runner.
+
+ A misbehaving plugin can still consume arbitrary CPU and memory until it hits the wall-clock timeout. For full per-request resource isolation, deploy on Cloudflare Workers.
+
+ The plugin code is still isolated (V8 isolate boundaries, no filesystem, no env vars, capability-gated APIs) — only the resource limit enforcement is weaker.
+
### Recommendations for Node.js Deployments
-1. **Only install plugins from trusted sources.** Review the source code of any plugin before installing. Prefer plugins published by known maintainers.
-2. **Use capability declarations as a review checklist.** Even though capabilities aren't enforced, they document the plugin's intended scope. A plugin declaring `["network:fetch"]` that doesn't need network access is suspicious.
-3. **Monitor resource usage.** Use process-level monitoring (e.g., `--max-old-space-size`, health checks) to catch runaway plugins.
-4. **Consider Cloudflare for untrusted plugins.** If you need to run plugins from unknown sources (e.g., a marketplace), deploy on Cloudflare Workers where sandboxing is available.
+1. **Install workerd for sandboxing.** It provides the same V8 isolate boundaries and capability enforcement as Cloudflare with no code changes to your plugins.
+2. **Set NODE_ENV=production explicitly.** The runner uses the production hardening path (child process supervision, crash restart with backoff, wall-time wrapper) when NODE_ENV is "production". Other values fall back to the dev runner if miniflare is installed.
+3. **Use capability declarations as a review checklist.** Even in trusted mode, they document the plugin's intended scope.
+4. **Monitor resource usage.** Since CPU/memory limits are not enforced per worker, use process-level monitoring (`--max-old-space-size`, container memory limits, OS cgroups) as the primary defense.
+5. **Pin workerd versions.** The workerd binary is pinned via npm. Pin the version to avoid unexpected API changes.
+6. **For hostile multi-tenant plugins, deploy on Cloudflare.** The standalone workerd path is appropriate for trusted or semi-trusted plugins. Hostile multi-tenant scenarios need Cloudflare's per-request CPU/memory enforcement.
## Same API, Different Guarantees
@@ -199,3 +264,5 @@ export default definePlugin({
```
The goal is to let plugin authors develop locally in trusted mode (faster iteration, easier debugging) and deploy to sandboxed mode in production without code changes.
+
+With workerd installed locally, you can also test under sandbox conditions during development. See [Testing in the Sandbox](/plugins/creating-plugins/#testing-in-the-sandbox) for setup instructions.
diff --git a/packages/cloudflare/src/sandbox/bridge.ts b/packages/cloudflare/src/sandbox/bridge.ts
index 9bac489ac..fb297a430 100644
--- a/packages/cloudflare/src/sandbox/bridge.ts
+++ b/packages/cloudflare/src/sandbox/bridge.ts
@@ -7,9 +7,12 @@
*
*/
+import type { D1Database } from "@cloudflare/workers-types";
import { WorkerEntrypoint } from "cloudflare:workers";
import type { SandboxEmailSendCallback } from "emdash";
-import { ulid } from "emdash";
+import { ulid, PluginStorageRepository } from "emdash";
+import { Kysely } from "kysely";
+import { D1Dialect } from "kysely-d1";
/** Regex to validate collection names (prevent SQL injection) */
const COLLECTION_NAME_REGEX = /^[a-z][a-z0-9_]*$/;
@@ -125,6 +128,11 @@ export interface PluginBridgeProps {
capabilities: string[];
allowedHosts: string[];
storageCollections: string[];
+ /** Per-collection storage config (matches manifest.storage entries) */
+ storageConfig?: Record<
+ string,
+ { indexes?: Array; uniqueIndexes?: Array }
+ >;
}
/**
@@ -139,6 +147,28 @@ export interface PluginBridgeProps {
* 3. Plugins call bridge methods which validate and proxy to the database
*/
export class PluginBridge extends WorkerEntrypoint {
+ /**
+ * Construct a PluginStorageRepository for the requested collection.
+ * Uses the indexes from the plugin's storage config (if provided) so
+ * query/count operations support WHERE/ORDER BY/cursor pagination
+ * matching in-process and workerd sandbox plugins.
+ */
+ private getStorageRepo(collection: string): PluginStorageRepository {
+ const { pluginId, storageConfig } = this.ctx.props;
+ const config = storageConfig?.[collection];
+ // Merge unique indexes into the indexes list since both are queryable
+ const allIndexes: Array = [
+ ...(config?.indexes ?? []),
+ ...(config?.uniqueIndexes ?? []),
+ ];
+ // eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- D1 is the kysely-d1 dialect database type
+ const db = new Kysely({
+ dialect: new D1Dialect({ database: this.env.DB as D1Database }),
+ });
+ // eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- Kysely is compatible with PluginStorageRepository's expected db
+ return new PluginStorageRepository(db as never, pluginId, collection, allIndexes);
+ }
+
// =========================================================================
// KV Operations - scoped to plugin namespace
// =========================================================================
@@ -240,45 +270,45 @@ export class PluginBridge extends WorkerEntrypoint;
+ orderBy?: Record;
+ } = {},
): Promise<{
items: Array<{ id: string; data: unknown }>;
hasMore: boolean;
cursor?: string;
}> {
- const { pluginId, storageCollections } = this.ctx.props;
+ const { storageCollections } = this.ctx.props;
if (!storageCollections.includes(collection)) {
throw new Error(`Storage collection not declared: ${collection}`);
}
- const limit = Math.min(opts.limit ?? 50, 1000);
- const results = await this.env.DB.prepare(
- "SELECT id, data FROM _plugin_storage WHERE plugin_id = ? AND collection = ? LIMIT ?",
- )
- .bind(pluginId, collection, limit + 1)
- .all<{ id: string; data: string }>();
-
- const items = (results.results ?? []).slice(0, limit).map((row) => ({
- id: row.id,
- data: JSON.parse(row.data),
- }));
+ // Delegate to PluginStorageRepository for proper WHERE/ORDER BY/cursor support
+ const repo = this.getStorageRepo(collection);
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- WhereClause is structurally Record
+ const result = await repo.query({
+ where: opts.where as never,
+ orderBy: opts.orderBy,
+ limit: opts.limit,
+ cursor: opts.cursor,
+ });
return {
- items,
- hasMore: (results.results ?? []).length > limit,
- cursor: items.length > 0 ? items.at(-1)!.id : undefined,
+ items: result.items,
+ hasMore: result.hasMore,
+ cursor: result.cursor,
};
}
- async storageCount(collection: string): Promise {
- const { pluginId, storageCollections } = this.ctx.props;
+ async storageCount(collection: string, where?: Record): Promise {
+ const { storageCollections } = this.ctx.props;
if (!storageCollections.includes(collection)) {
throw new Error(`Storage collection not declared: ${collection}`);
}
- const result = await this.env.DB.prepare(
- "SELECT COUNT(*) as count FROM _plugin_storage WHERE plugin_id = ? AND collection = ?",
- )
- .bind(pluginId, collection)
- .first<{ count: number }>();
- return result?.count ?? 0;
+ const repo = this.getStorageRepo(collection);
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- WhereClause is structurally Record
+ return repo.count(where as never);
}
async storageGetMany(collection: string, ids: string[]): Promise> {
diff --git a/packages/cloudflare/src/sandbox/runner.ts b/packages/cloudflare/src/sandbox/runner.ts
index 35ecc8d5b..5222bec31 100644
--- a/packages/cloudflare/src/sandbox/runner.ts
+++ b/packages/cloudflare/src/sandbox/runner.ts
@@ -48,6 +48,10 @@ export interface PluginBridgeProps {
capabilities: string[];
allowedHosts: string[];
storageCollections: string[];
+ storageConfig?: Record<
+ string,
+ { indexes?: Array; uniqueIndexes?: Array }
+ >;
}
/**
@@ -124,6 +128,13 @@ export class CloudflareSandboxRunner implements SandboxRunner {
return !!getLoader() && !!getPluginBridge();
}
+ /**
+ * Worker Loader runs in-process, always healthy if available.
+ */
+ isHealthy(): boolean {
+ return this.isAvailable();
+ }
+
/**
* Load a sandboxed plugin.
*
@@ -236,6 +247,15 @@ class CloudflareSandboxedPlugin implements SandboxedPlugin {
capabilities: this.manifest.capabilities || [],
allowedHosts: this.manifest.allowedHosts || [],
storageCollections: Object.keys(this.manifest.storage || {}),
+ storageConfig: this.manifest.storage as
+ | Record<
+ string,
+ {
+ indexes?: Array;
+ uniqueIndexes?: Array;
+ }
+ >
+ | undefined,
},
});
diff --git a/packages/core/package.json b/packages/core/package.json
index f319b6fce..ee941e2ce 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -208,7 +208,7 @@
"@apidevtools/swagger-parser": "^12.1.0",
"@arethetypeswrong/cli": "catalog:",
"@emdash-cms/blocks": "workspace:*",
- "@types/better-sqlite3": "^7.6.12",
+ "@types/better-sqlite3": "catalog:",
"@types/pg": "^8.16.0",
"@types/sanitize-html": "^2.16.0",
"@types/sax": "^1.2.7",
diff --git a/packages/core/src/api/handlers/marketplace.ts b/packages/core/src/api/handlers/marketplace.ts
index a63a5f572..e4496c9d3 100644
--- a/packages/core/src/api/handlers/marketplace.ts
+++ b/packages/core/src/api/handlers/marketplace.ts
@@ -292,7 +292,18 @@ export async function handleMarketplaceInstall(
sandboxRunner: SandboxRunner | null,
marketplaceUrl: string | undefined,
pluginId: string,
- opts?: { version?: string; configuredPluginIds?: Set; siteOrigin?: string },
+ opts?: {
+ version?: string;
+ configuredPluginIds?: Set;
+ siteOrigin?: string;
+ /**
+ * When true, sandbox: false bypass mode is active. The sandbox runner
+ * is the noop runner (isAvailable() === false) but the runtime will
+ * load the marketplace plugin in-process via syncMarketplacePlugins().
+ * Skip the SANDBOX_NOT_AVAILABLE gate so the install can proceed.
+ */
+ sandboxBypassed?: boolean;
+ },
): Promise> {
const client = getClient(marketplaceUrl, opts?.siteOrigin);
if (!client) {
@@ -315,7 +326,9 @@ export async function handleMarketplaceInstall(
};
}
- if (!sandboxRunner || !sandboxRunner.isAvailable()) {
+ // Sandbox availability check: skip when sandbox: false bypass is active.
+ // The runtime's syncMarketplacePlugins() will load the plugin in-process.
+ if (!opts?.sandboxBypassed && (!sandboxRunner || !sandboxRunner.isAvailable())) {
return {
success: false,
error: {
@@ -496,6 +509,13 @@ export async function handleMarketplaceUpdate(
version?: string;
confirmCapabilityChanges?: boolean;
confirmRouteVisibilityChanges?: boolean;
+ /**
+ * When true, sandbox: false bypass mode is active. The sandbox runner
+ * is the noop runner (isAvailable() === false) but the runtime will
+ * load the marketplace plugin in-process via syncMarketplacePlugins().
+ * Skip the SANDBOX_NOT_AVAILABLE gate so the update can proceed.
+ */
+ sandboxBypassed?: boolean;
},
): Promise> {
const client = getClient(marketplaceUrl);
@@ -511,7 +531,9 @@ export async function handleMarketplaceUpdate(
error: { code: "STORAGE_NOT_CONFIGURED", message: "Storage is required" },
};
}
- if (!sandboxRunner || !sandboxRunner.isAvailable()) {
+ // Sandbox availability check: skip when sandbox: false bypass is active.
+ // The runtime's syncMarketplacePlugins() will load the plugin in-process.
+ if (!opts?.sandboxBypassed && (!sandboxRunner || !sandboxRunner.isAvailable())) {
return {
success: false,
error: { code: "SANDBOX_NOT_AVAILABLE", message: "Sandbox runner is required" },
diff --git a/packages/core/src/astro/integration/runtime.ts b/packages/core/src/astro/integration/runtime.ts
index 7b000d738..606b5fe7a 100644
--- a/packages/core/src/astro/integration/runtime.ts
+++ b/packages/core/src/astro/integration/runtime.ts
@@ -196,6 +196,17 @@ export interface EmDashConfig {
*/
sandboxRunner?: string;
+ /**
+ * Explicitly disable plugin sandboxing, even if a sandbox runner is configured.
+ * Use this as a debugging escape hatch to determine whether a bug is in your
+ * plugin code or in the sandbox runtime.
+ *
+ * When set to `false`, all plugins run in-process without isolation.
+ *
+ * @default true (sandboxing enabled if sandboxRunner is configured)
+ */
+ sandbox?: boolean;
+
/**
* Authentication configuration
*
diff --git a/packages/core/src/astro/integration/virtual-modules.ts b/packages/core/src/astro/integration/virtual-modules.ts
index 5a99937b2..cb3afdb2b 100644
--- a/packages/core/src/astro/integration/virtual-modules.ts
+++ b/packages/core/src/astro/integration/virtual-modules.ts
@@ -259,10 +259,14 @@ ${entries.join("\n")}
/**
* Generates the sandbox runner module.
* Imports the configured sandbox runner factory or provides a noop default.
+ *
+ * When sandbox is explicitly false (debugging escape hatch), we still mark
+ * sandboxEnabled = true so sandboxed plugin entries are loaded, but we use
+ * the noop runner which falls through to in-process loading via adaptSandboxEntry.
*/
-export function generateSandboxRunnerModule(sandboxRunner?: string): string {
+export function generateSandboxRunnerModule(sandboxRunner?: string, sandbox?: boolean): string {
if (!sandboxRunner) {
- // No sandbox runner configured - use noop
+ // No sandbox runner configured - sandboxed plugins disabled
return `
// No sandbox runner configured - sandboxed plugins disabled
import { createNoopSandboxRunner } from "emdash";
@@ -272,6 +276,19 @@ export const sandboxEnabled = false;
`;
}
+ if (sandbox === false) {
+ // sandbox: false escape hatch - plugins are loaded but run in-process
+ // (no isolation, for debugging)
+ return `
+// Sandbox explicitly disabled (sandbox: false) - plugins run in-process
+import { createNoopSandboxRunner } from "emdash";
+
+export const createSandboxRunner = createNoopSandboxRunner;
+export const sandboxEnabled = true;
+export const sandboxBypassed = true;
+`;
+ }
+
return `
// Auto-generated sandbox runner module
import { createSandboxRunner as _createSandboxRunner } from "${sandboxRunner}";
diff --git a/packages/core/src/astro/integration/vite-config.ts b/packages/core/src/astro/integration/vite-config.ts
index 41d45ee5d..b6db1a171 100644
--- a/packages/core/src/astro/integration/vite-config.ts
+++ b/packages/core/src/astro/integration/vite-config.ts
@@ -194,7 +194,7 @@ export function createVirtualModulesPlugin(options: VitePluginOptions): Plugin {
}
// Generate sandbox runner module
if (id === RESOLVED_VIRTUAL_SANDBOX_RUNNER_ID) {
- return generateSandboxRunnerModule(resolvedConfig.sandboxRunner);
+ return generateSandboxRunnerModule(resolvedConfig.sandboxRunner, resolvedConfig.sandbox);
}
// Generate sandboxed plugins config module
if (id === RESOLVED_VIRTUAL_SANDBOXED_PLUGINS_ID) {
diff --git a/packages/core/src/astro/middleware.ts b/packages/core/src/astro/middleware.ts
index 67475220d..67c82116e 100644
--- a/packages/core/src/astro/middleware.ts
+++ b/packages/core/src/astro/middleware.ts
@@ -23,11 +23,8 @@ import {
import { mediaProviders as virtualMediaProviders } from "virtual:emdash/media-providers";
// @ts-ignore - virtual module
import { plugins as virtualPlugins } from "virtual:emdash/plugins";
-import {
- createSandboxRunner as virtualCreateSandboxRunner,
- sandboxEnabled as virtualSandboxEnabled,
- // @ts-ignore - virtual module
-} from "virtual:emdash/sandbox-runner";
+// @ts-ignore - virtual module
+import * as virtualSandboxRunnerModule from "virtual:emdash/sandbox-runner";
// @ts-ignore - virtual module
import { sandboxedPlugins as virtualSandboxedPlugins } from "virtual:emdash/sandboxed-plugins";
// @ts-ignore - virtual module
@@ -114,12 +111,26 @@ function buildDependencies(config: EmDashConfig): RuntimeDependencies {
// eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- virtual module import is untyped (@ts-ignore above)
createStorage: virtualCreateStorage as ((config: Record) => Storage) | null,
// eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- virtual module import is untyped (@ts-ignore above)
- sandboxEnabled: virtualSandboxEnabled as boolean,
+ sandboxEnabled: (virtualSandboxRunnerModule as Record)
+ .sandboxEnabled as boolean,
+ sandboxBypassed:
+ ((virtualSandboxRunnerModule as Record).sandboxBypassed as boolean) ?? false,
// eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- virtual module import is untyped (@ts-ignore above)
sandboxedPluginEntries: (virtualSandboxedPlugins as SandboxedPluginEntry[]) || [],
// eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- virtual module import is untyped (@ts-ignore above)
- createSandboxRunner: virtualCreateSandboxRunner as
- | ((opts: { db: Kysely }) => SandboxRunner)
+ createSandboxRunner: (virtualSandboxRunnerModule as Record)
+ .createSandboxRunner as
+ | ((opts: {
+ db: Kysely;
+ mediaStorage?: {
+ upload(options: {
+ key: string;
+ body: Uint8Array;
+ contentType: string;
+ }): Promise;
+ delete(key: string): Promise;
+ };
+ }) => SandboxRunner)
| null,
// eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- virtual module import is untyped (@ts-ignore above)
mediaProviderEntries: (virtualMediaProviders as MediaProviderEntry[]) || [],
diff --git a/packages/core/src/astro/routes/api/admin/plugins/[id]/update.ts b/packages/core/src/astro/routes/api/admin/plugins/[id]/update.ts
index 9e2747399..17f76794a 100644
--- a/packages/core/src/astro/routes/api/admin/plugins/[id]/update.ts
+++ b/packages/core/src/astro/routes/api/admin/plugins/[id]/update.ts
@@ -48,6 +48,7 @@ export const POST: APIRoute = async ({ params, request, locals }) => {
version: body.version,
confirmCapabilityChanges: body.confirmCapabilityChanges,
confirmRouteVisibilityChanges: body.confirmRouteVisibilityChanges,
+ sandboxBypassed: emdash.isSandboxBypassed(),
},
);
diff --git a/packages/core/src/astro/routes/api/admin/plugins/marketplace/[id]/install.ts b/packages/core/src/astro/routes/api/admin/plugins/marketplace/[id]/install.ts
index 33b329875..7f9e7ee7f 100644
--- a/packages/core/src/astro/routes/api/admin/plugins/marketplace/[id]/install.ts
+++ b/packages/core/src/astro/routes/api/admin/plugins/marketplace/[id]/install.ts
@@ -49,7 +49,12 @@ export const POST: APIRoute = async ({ params, request, locals }) => {
emdash.getSandboxRunner(),
emdash.config.marketplace,
id,
- { version: body.version, configuredPluginIds, siteOrigin },
+ {
+ version: body.version,
+ configuredPluginIds,
+ siteOrigin,
+ sandboxBypassed: emdash.isSandboxBypassed(),
+ },
);
if (!result.success) return unwrapResult(result);
diff --git a/packages/core/src/emdash-runtime.ts b/packages/core/src/emdash-runtime.ts
index e6e685b10..960e8be80 100644
--- a/packages/core/src/emdash-runtime.ts
+++ b/packages/core/src/emdash-runtime.ts
@@ -221,11 +221,21 @@ export interface RuntimeDependencies {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
createStorage: ((config: any) => Storage) | null;
sandboxEnabled: boolean;
+ /** sandbox: false escape hatch - load sandboxed plugins in-process */
+ sandboxBypassed?: boolean;
/** Media provider entries from virtual module */
mediaProviderEntries?: MediaProviderEntry[];
sandboxedPluginEntries: SandboxedPluginEntry[];
/** Factory function matching SandboxRunnerFactory signature */
- createSandboxRunner: ((opts: { db: Kysely }) => SandboxRunner) | null;
+ createSandboxRunner:
+ | ((opts: {
+ db: Kysely;
+ mediaStorage?: {
+ upload(options: { key: string; body: Uint8Array; contentType: string }): Promise;
+ delete(key: string): Promise;
+ };
+ }) => SandboxRunner)
+ | null;
}
/**
@@ -368,6 +378,16 @@ export class EmDashRuntime {
return sandboxRunner;
}
+ /**
+ * Whether the sandbox bypass mode (sandbox: false) is active.
+ * Marketplace install/update handlers use this to skip the
+ * SANDBOX_NOT_AVAILABLE gate, since the bypass path loads
+ * marketplace plugins in-process via syncMarketplacePlugins().
+ */
+ isSandboxBypassed(): boolean {
+ return this.runtimeDeps.sandboxBypassed === true;
+ }
+
/**
* Tick the cron system from request context (piggyback mode).
* Call this from middleware on each request to ensure cron tasks
@@ -456,6 +476,17 @@ export class EmDashRuntime {
*/
async syncMarketplacePlugins(): Promise {
if (!this.config.marketplace || !this.storage) return;
+
+ // In sandbox bypass mode (sandbox: false), the noop runner reports
+ // unavailable but we still want admin metadata for newly installed
+ // marketplace plugins to refresh in-process. Hooks/routes still won't
+ // execute (matches the cold-start bypass behavior), but Configure
+ // links and admin pages appear immediately.
+ if (this.runtimeDeps.sandboxBypassed) {
+ await this.syncMarketplacePluginsBypassed();
+ return;
+ }
+
if (!sandboxRunner || !sandboxRunner.isAvailable()) return;
try {
@@ -554,6 +585,149 @@ export class EmDashRuntime {
}
}
+ /**
+ * Remove a plugin from the in-memory pipeline lists by ID.
+ * Mutates allPipelinePlugins and configuredPlugins in place.
+ */
+ private removePluginFromLists(pluginId: string): void {
+ const allIdx = this.allPipelinePlugins.findIndex((p) => p.id === pluginId);
+ if (allIdx !== -1) this.allPipelinePlugins.splice(allIdx, 1);
+ const configured = this.configuredPlugins as ResolvedPlugin[];
+ const configIdx = configured.findIndex((p) => p.id === pluginId);
+ if (configIdx !== -1) configured.splice(configIdx, 1);
+ }
+
+ /**
+ * Sync marketplace plugin metadata in sandbox: false bypass mode.
+ *
+ * In bypass mode the noop runner can't load plugins, but admin pages,
+ * widgets, and route metadata still need to refresh in-process when an
+ * admin installs/updates/uninstalls a marketplace plugin. Otherwise the
+ * admin UI shows stale data until the server restarts.
+ *
+ * Hooks and routes still won't execute under bypass (matches the
+ * cold-start bypass behavior in loadMarketplacePluginsBypassed).
+ *
+ * Known limitation: bypass plugins are loaded via `import(dataUrl)`,
+ * which Node's ESM cache keys on the full URL. Updates create fresh
+ * module objects, but old ones remain cached for the worker's lifetime.
+ * In practice this is a few KB per update — only matters for sites with
+ * very frequent marketplace updates running long-lived processes. The
+ * fix would be vm.SourceTextModule for explicit lifecycle management.
+ */
+ private async syncMarketplacePluginsBypassed(): Promise {
+ if (!this.storage) return;
+ try {
+ const stateRepo = new PluginStateRepository(this.db);
+ const marketplaceStates = await stateRepo.getMarketplacePlugins();
+
+ const desired = new Map();
+ for (const state of marketplaceStates) {
+ this.pluginStates.set(state.pluginId, state.status);
+ if (state.status === "active") {
+ this.enabledPlugins.add(state.pluginId);
+ } else {
+ this.enabledPlugins.delete(state.pluginId);
+ }
+ if (state.status !== "active") continue;
+ desired.set(state.pluginId, state.marketplaceVersion ?? state.version);
+ }
+
+ // Drop metadata for plugins no longer active.
+ const toRemove: string[] = [];
+ for (const pluginId of marketplaceManifestCache.keys()) {
+ if (!desired.has(pluginId)) toRemove.push(pluginId);
+ }
+ for (const pluginId of toRemove) {
+ marketplaceManifestCache.delete(pluginId);
+ sandboxedRouteMetaCache.delete(pluginId);
+ // Remove from pipeline lists too (mutate in place since the
+ // arrays are readonly references but mutable contents)
+ this.removePluginFromLists(pluginId);
+ this.enabledPlugins.delete(pluginId);
+ }
+
+ // Load plugin code, adapt as trusted plugins, and add to pipeline lists
+ const { adaptSandboxEntry } = await import("./plugins/adapt-sandbox-entry.js");
+ const newPlugins: ResolvedPlugin[] = [];
+ for (const [pluginId, version] of desired) {
+ const bundle = await loadBundleFromR2(this.storage, pluginId, version);
+ if (!bundle) {
+ console.warn(`EmDash: Marketplace plugin ${pluginId}@${version} not found in R2`);
+ continue;
+ }
+ marketplaceManifestCache.set(pluginId, {
+ id: bundle.manifest.id,
+ version: bundle.manifest.version,
+ admin: bundle.manifest.admin,
+ });
+ if (bundle.manifest.routes.length > 0) {
+ const routeMetaMap = new Map();
+ for (const entry of bundle.manifest.routes) {
+ const normalized = normalizeManifestRoute(entry);
+ routeMetaMap.set(normalized.name, { public: normalized.public === true });
+ }
+ sandboxedRouteMetaCache.set(pluginId, routeMetaMap);
+ } else {
+ sandboxedRouteMetaCache.delete(pluginId);
+ }
+
+ // Skip if already in the pipeline at this version
+ const existing = this.allPipelinePlugins.find((p) => p.id === pluginId);
+ if (existing && existing.version === bundle.manifest.version) continue;
+
+ // Remove any older version
+ if (existing) {
+ this.removePluginFromLists(pluginId);
+ }
+
+ try {
+ const dataUrl = `data:text/javascript;base64,${Buffer.from(bundle.backendCode).toString("base64")}`;
+ const pluginModule = (await import(/* @vite-ignore */ dataUrl)) as Record<
+ string,
+ unknown
+ >;
+ const pluginDef = (pluginModule.default ?? pluginModule) as Parameters<
+ typeof adaptSandboxEntry
+ >[0];
+ const adapted = adaptSandboxEntry(pluginDef, {
+ id: bundle.manifest.id,
+ version: bundle.manifest.version,
+ entrypoint: "",
+ capabilities: bundle.manifest.capabilities ?? [],
+ allowedHosts: bundle.manifest.allowedHosts ?? [],
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- adaptSandboxEntry copies storage through
+ storage: (bundle.manifest.storage ?? {}) as never,
+ adminPages: bundle.manifest.admin?.pages,
+ adminWidgets: bundle.manifest.admin?.widgets?.map((w) => ({
+ id: w.id,
+ title: w.title,
+ size:
+ w.size === "full" || w.size === "half" || w.size === "third" ? w.size : undefined,
+ })),
+ });
+ newPlugins.push(adapted);
+ this.allPipelinePlugins.push(adapted);
+ (this.configuredPlugins as ResolvedPlugin[]).push(adapted);
+ this.enabledPlugins.add(adapted.id);
+ } catch (error) {
+ console.error(
+ `EmDash: Failed to load marketplace plugin ${pluginId}@${version} in-process:`,
+ error,
+ );
+ }
+ }
+
+ // If anything changed, rebuild the hook pipeline so new/removed
+ // plugins take effect immediately without a server restart.
+ if (toRemove.length > 0 || newPlugins.length > 0) {
+ await this.rebuildHookPipeline();
+ }
+ } catch (error) {
+ console.error("EmDash: Failed to sync marketplace plugins (bypass):", error);
+ }
+ }
+
/**
* Create and initialize the runtime
*/
@@ -618,6 +792,11 @@ export class EmDashRuntime {
// rebuildHookPipeline() filters this to only enabled plugins.
const allPipelinePlugins: ResolvedPlugin[] = [...deps.plugins];
+ // Collected bypassed plugins (sandbox: false escape hatch).
+ // These need to be added to BOTH the pipeline (for hooks) AND the
+ // configuredPlugins list (for route dispatch).
+ const bypassedPluginsList: ResolvedPlugin[] = [];
+
// In dev mode, register a built-in console email provider.
// It participates in exclusive hook resolution like any other plugin —
// auto-selected when it's the sole provider, overridden when a real one is configured.
@@ -665,6 +844,41 @@ export class EmDashRuntime {
console.warn("[comments] Failed to register default moderator:", error);
}
+ // sandbox: false escape hatch - load sandboxed plugin entries in-process
+ // as trusted plugins (no isolation) so they participate in the hook pipeline.
+ if (deps.sandboxBypassed && deps.sandboxedPluginEntries.length > 0) {
+ console.info(
+ "EmDash: Sandbox disabled (sandbox: false). " +
+ "Sandboxed plugins will run in-process without isolation.",
+ );
+ const bypassedPlugins = await EmDashRuntime.loadBypassedPlugins(deps.sandboxedPluginEntries);
+ for (const plugin of bypassedPlugins) {
+ allPipelinePlugins.push(plugin);
+ bypassedPluginsList.push(plugin);
+ // Respect plugin state: only enable if active or no record exists.
+ // Plugins an admin previously disabled should stay disabled.
+ const status = pluginStates.get(plugin.id);
+ if (status === undefined || status === "active") {
+ enabledPlugins.add(plugin.id);
+ }
+ }
+ }
+
+ // In bypass mode, also load marketplace plugins from R2 as trusted
+ // in-process plugins BEFORE pipeline creation. They need to be in the
+ // pipeline to participate in hook dispatch.
+ if (deps.sandboxBypassed && deps.config.marketplace && storage) {
+ const marketplaceBypassed = await EmDashRuntime.loadMarketplacePluginsBypassed(db, storage);
+ for (const plugin of marketplaceBypassed) {
+ allPipelinePlugins.push(plugin);
+ bypassedPluginsList.push(plugin);
+ const status = pluginStates.get(plugin.id);
+ if (status === undefined || status === "active") {
+ enabledPlugins.add(plugin.id);
+ }
+ }
+ }
+
// Filter to currently enabled plugins for the initial pipeline
const enabledPluginList = allPipelinePlugins.filter((p) => enabledPlugins.has(p.id));
@@ -676,11 +890,12 @@ export class EmDashRuntime {
};
const pipeline = createHookPipeline(enabledPluginList, pipelineFactoryOptions);
- // Load sandboxed plugins (build-time)
- const sandboxedPlugins = await EmDashRuntime.loadSandboxedPlugins(deps, db);
+ // Load sandboxed plugins (build-time, sandbox runner path)
+ const sandboxedPlugins = await EmDashRuntime.loadSandboxedPlugins(deps, db, storage);
- // Cold-start: load marketplace-installed plugins from site R2
- if (deps.config.marketplace && storage) {
+ // Cold-start: load marketplace-installed plugins from site R2 via
+ // the sandbox runner. In bypass mode this was already handled above.
+ if (deps.config.marketplace && storage && !deps.sandboxBypassed) {
await EmDashRuntime.loadMarketplacePlugins(db, storage, deps, sandboxedPlugins);
}
@@ -782,7 +997,10 @@ export class EmDashRuntime {
return new EmDashRuntime(
db,
storage,
- deps.plugins,
+ // Include bypassed sandboxed plugins in configuredPlugins so route
+ // dispatch can find them under sandbox: false (they're treated as
+ // trusted plugins for the duration of the bypass).
+ [...deps.plugins, ...bypassedPluginsList],
sandboxedPlugins,
deps.sandboxedPluginEntries,
pipeline,
@@ -945,12 +1163,82 @@ export class EmDashRuntime {
return storage;
}
+ /**
+ * Load sandboxed plugin entries as trusted in-process plugins.
+ * Used by the sandbox: false debugging escape hatch.
+ *
+ * Imports each plugin's bundled ESM code via a data URL, adapts it
+ * with adaptSandboxEntry, and returns ResolvedPlugin objects ready
+ * to be merged into the pipeline plugin list.
+ */
+ private static async loadBypassedPlugins(
+ entries: SandboxedPluginEntry[],
+ ): Promise {
+ const { adaptSandboxEntry } = await import("./plugins/adapt-sandbox-entry.js");
+ const plugins: ResolvedPlugin[] = [];
+ for (const entry of entries) {
+ try {
+ const dataUrl = `data:text/javascript;base64,${Buffer.from(entry.code).toString("base64")}`;
+ const pluginModule = (await import(/* @vite-ignore */ dataUrl)) as Record;
+ const pluginDef = (pluginModule.default ?? pluginModule) as Parameters<
+ typeof adaptSandboxEntry
+ >[0];
+ // PluginDescriptor.storage's TypeScript type is narrower than what
+ // adaptSandboxEntry actually accepts at runtime — it copies indexes
+ // through to PluginStorageConfig which supports composite indexes
+ // (string[][]). Pass the raw entry.storage with a structural cast
+ // to preserve composite index declarations.
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- adaptSandboxEntry copies storage through to PluginStorageConfig which supports composite indexes
+ // Preserve admin metadata so plugin-management APIs can derive
+ // hasAdminPages / hasDashboardWidgets correctly. Without this,
+ // the admin UI hides Configure links and dashboard widgets for
+ // bypassed plugins even though they declared them.
+ // SandboxedPluginEntry uses looser types than PluginDescriptor
+ // (label?, size: string), so coerce to the descriptor shape.
+ const adminPages = entry.adminPages?.map((p) => ({
+ path: p.path,
+ label: p.label ?? p.path,
+ icon: p.icon,
+ }));
+ const adminWidgets:
+ | Array<{
+ id: string;
+ title?: string;
+ size?: "full" | "half" | "third";
+ }>
+ | undefined = entry.adminWidgets?.map((w) => {
+ const size: "full" | "half" | "third" | undefined =
+ w.size === "full" || w.size === "half" || w.size === "third" ? w.size : undefined;
+ return { id: w.id, title: w.title, size };
+ });
+ const resolved = adaptSandboxEntry(pluginDef, {
+ id: entry.id,
+ version: entry.version,
+ entrypoint: "",
+ capabilities: entry.capabilities,
+ allowedHosts: entry.allowedHosts,
+ storage: entry.storage as never,
+ adminPages,
+ adminWidgets,
+ });
+ plugins.push(resolved);
+ console.log(
+ `EmDash: Loaded plugin ${entry.id}:${entry.version} in-process (sandbox bypassed)`,
+ );
+ } catch (error) {
+ console.error(`EmDash: Failed to load sandboxed plugin ${entry.id} in-process:`, error);
+ }
+ }
+ return plugins;
+ }
+
/**
* Load sandboxed plugins using SandboxRunner
*/
private static async loadSandboxedPlugins(
deps: RuntimeDependencies,
db: Kysely,
+ mediaStorage?: Storage | null,
): Promise> {
// Return cached plugins if already loaded
if (sandboxedPluginCache.size > 0) {
@@ -964,20 +1252,44 @@ export class EmDashRuntime {
// Create sandbox runner if not exists
if (!sandboxRunner && deps.createSandboxRunner) {
- sandboxRunner = deps.createSandboxRunner({ db });
+ sandboxRunner = deps.createSandboxRunner({
+ db,
+ mediaStorage: mediaStorage
+ ? {
+ upload: (opts) =>
+ mediaStorage.upload({
+ key: opts.key,
+ body: opts.body,
+ contentType: opts.contentType,
+ }),
+ delete: (key) => mediaStorage.delete(key),
+ }
+ : undefined,
+ });
}
if (!sandboxRunner) {
return sandboxedPluginCache;
}
+ // sandbox: false escape hatch is handled separately (before pipeline
+ // creation) via loadBypassedPlugins. If we somehow reach here with the
+ // flag set, just return — the plugins are already in the trusted pipeline.
+ if (deps.sandboxBypassed) {
+ return sandboxedPluginCache;
+ }
+
// Check if the runner is actually available (has required bindings)
if (!sandboxRunner.isAvailable()) {
- console.debug("EmDash: Sandbox runner not available (missing bindings), skipping sandbox");
+ console.warn(
+ "EmDash: Plugin sandbox is configured but not available on this platform. " +
+ "Sandboxed plugins will not be loaded. " +
+ "If using @emdash-cms/workerd/sandbox, ensure workerd is installed.",
+ );
return sandboxedPluginCache;
}
- // Load each sandboxed plugin
+ // Load each sandboxed plugin via sandbox runner
for (const entry of deps.sandboxedPluginEntries) {
const pluginKey = `${entry.id}:${entry.version}`;
if (sandboxedPluginCache.has(pluginKey)) {
@@ -1022,10 +1334,26 @@ export class EmDashRuntime {
deps: RuntimeDependencies,
cache: Map,
): Promise {
- // Ensure sandbox runner exists
+ // Ensure sandbox runner exists with media storage wired up.
+ // (storage here is the media Storage adapter from the runtime.)
if (!sandboxRunner && deps.createSandboxRunner) {
- sandboxRunner = deps.createSandboxRunner({ db });
+ sandboxRunner = deps.createSandboxRunner({
+ db,
+ mediaStorage: {
+ upload: (opts) =>
+ storage.upload({
+ key: opts.key,
+ body: opts.body,
+ contentType: opts.contentType,
+ }),
+ delete: (key) => storage.delete(key),
+ },
+ });
}
+ // In sandbox bypass mode, marketplace plugins are loaded in-process
+ // BEFORE pipeline creation by EmDashRuntime.create(). Skip here.
+ if (deps.sandboxBypassed) return;
+
if (!sandboxRunner || !sandboxRunner.isAvailable()) {
return;
}
@@ -1085,6 +1413,105 @@ export class EmDashRuntime {
}
}
+ /**
+ * Cold-start: load marketplace plugins in bypass mode (sandbox: false).
+ *
+ * Each active marketplace bundle is read, evaluated via data URL, adapted
+ * with adaptSandboxEntry, and returned as a ResolvedPlugin. The caller is
+ * responsible for merging these into allPipelinePlugins / configuredPlugins
+ * BEFORE the hook pipeline is created, so hooks and routes register in
+ * the trusted pipeline.
+ *
+ * Also caches manifest and route metadata so admin UI / getManifest() work.
+ *
+ * Returns ResolvedPlugins to be merged into the pipeline.
+ */
+ private static async loadMarketplacePluginsBypassed(
+ db: Kysely,
+ storage: Storage,
+ ): Promise {
+ const resolved: ResolvedPlugin[] = [];
+ try {
+ const stateRepo = new PluginStateRepository(db);
+ const marketplacePlugins = await stateRepo.getMarketplacePlugins();
+ if (marketplacePlugins.length === 0) return resolved;
+
+ console.info(
+ "EmDash: Sandbox disabled (sandbox: false). " +
+ "Marketplace plugins will run in-process without isolation.",
+ );
+
+ const { adaptSandboxEntry } = await import("./plugins/adapt-sandbox-entry.js");
+
+ for (const plugin of marketplacePlugins) {
+ if (plugin.status !== "active") continue;
+ const version = plugin.marketplaceVersion ?? plugin.version;
+ try {
+ const bundle = await loadBundleFromR2(storage, plugin.pluginId, version);
+ if (!bundle) {
+ console.warn(
+ `EmDash: Marketplace plugin ${plugin.pluginId}@${version} not found in R2`,
+ );
+ continue;
+ }
+
+ // Cache manifest and route metadata for admin UI and route auth
+ marketplaceManifestCache.set(plugin.pluginId, {
+ id: bundle.manifest.id,
+ version: bundle.manifest.version,
+ admin: bundle.manifest.admin,
+ });
+ if (bundle.manifest.routes.length > 0) {
+ const routeMeta = new Map();
+ for (const entry of bundle.manifest.routes) {
+ const normalized = normalizeManifestRoute(entry);
+ routeMeta.set(normalized.name, { public: normalized.public === true });
+ }
+ sandboxedRouteMetaCache.set(plugin.pluginId, routeMeta);
+ }
+
+ // Evaluate the bundled ESM and adapt it as a trusted plugin
+ const dataUrl = `data:text/javascript;base64,${Buffer.from(bundle.backendCode).toString("base64")}`;
+ const pluginModule = (await import(/* @vite-ignore */ dataUrl)) as Record<
+ string,
+ unknown
+ >;
+ const pluginDef = (pluginModule.default ?? pluginModule) as Parameters<
+ typeof adaptSandboxEntry
+ >[0];
+ const adapted = adaptSandboxEntry(pluginDef, {
+ id: bundle.manifest.id,
+ version: bundle.manifest.version,
+ entrypoint: "",
+ capabilities: bundle.manifest.capabilities ?? [],
+ allowedHosts: bundle.manifest.allowedHosts ?? [],
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- adaptSandboxEntry copies storage through
+ storage: (bundle.manifest.storage ?? {}) as never,
+ adminPages: bundle.manifest.admin?.pages,
+ adminWidgets: bundle.manifest.admin?.widgets?.map((w) => ({
+ id: w.id,
+ title: w.title,
+ size:
+ w.size === "full" || w.size === "half" || w.size === "third" ? w.size : undefined,
+ })),
+ });
+ resolved.push(adapted);
+ console.log(
+ `EmDash: Loaded marketplace plugin ${plugin.pluginId}@${version} in-process (sandbox bypassed)`,
+ );
+ } catch (error) {
+ console.error(
+ `EmDash: Failed to load marketplace plugin ${plugin.pluginId} in-process:`,
+ error,
+ );
+ }
+ }
+ } catch {
+ // _plugin_state table may not exist yet
+ }
+ return resolved;
+ }
+
/**
* Resolve exclusive hook selections on startup.
*
diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts
index 5f343cade..e063d16b7 100644
--- a/packages/core/src/index.ts
+++ b/packages/core/src/index.ts
@@ -12,6 +12,9 @@ export type {
export {
ContentRepository,
MediaRepository,
+ PluginStorageRepository,
+ UserRepository,
+ OptionsRepository,
EmDashValidationError,
} from "./database/repositories/index.js";
export type {
@@ -192,7 +195,11 @@ export {
// Sandbox
NoopSandboxRunner,
SandboxNotAvailableError,
+ SandboxUnavailableError,
createNoopSandboxRunner,
+ // HTTP access for plugins (shared between in-process, Cloudflare, and workerd runners)
+ createHttpAccess,
+ createUnrestrictedHttpAccess,
} from "./plugins/index.js";
export type {
PluginDefinition,
diff --git a/packages/core/src/plugins/index.ts b/packages/core/src/plugins/index.ts
index 829a5012c..4d89ff036 100644
--- a/packages/core/src/plugins/index.ts
+++ b/packages/core/src/plugins/index.ts
@@ -67,6 +67,7 @@ export type { PluginManagerOptions, PluginState } from "./manager.js";
export {
NoopSandboxRunner,
SandboxNotAvailableError,
+ SandboxUnavailableError,
createNoopSandboxRunner,
} from "./sandbox/index.js";
export type {
diff --git a/packages/core/src/plugins/sandbox/index.ts b/packages/core/src/plugins/sandbox/index.ts
index ae3050ca8..4e9030f64 100644
--- a/packages/core/src/plugins/sandbox/index.ts
+++ b/packages/core/src/plugins/sandbox/index.ts
@@ -4,6 +4,7 @@
*/
export { NoopSandboxRunner, SandboxNotAvailableError, createNoopSandboxRunner } from "./noop.js";
+export { SandboxUnavailableError } from "./types.js";
export type {
SandboxRunner,
diff --git a/packages/core/src/plugins/sandbox/noop.ts b/packages/core/src/plugins/sandbox/noop.ts
index f9369eb73..938ca061b 100644
--- a/packages/core/src/plugins/sandbox/noop.ts
+++ b/packages/core/src/plugins/sandbox/noop.ts
@@ -15,9 +15,10 @@ import type { SandboxRunner, SandboxedPlugin, SandboxOptions } from "./types.js"
export class SandboxNotAvailableError extends Error {
constructor() {
super(
- "Plugin sandboxing is not available on this platform. " +
- "Sandboxed plugins require Cloudflare Workers with Worker Loader. " +
- "Use trusted plugins (from config) instead, or deploy to Cloudflare.",
+ "Plugin sandboxing is not available. " +
+ "Configure a sandbox runner: use @emdash-cms/cloudflare/sandbox on Cloudflare, " +
+ "or @emdash-cms/workerd/sandbox on Node.js (requires workerd). " +
+ "Without sandboxing, use trusted plugins (from config) instead.",
);
this.name = "SandboxNotAvailableError";
}
@@ -40,6 +41,13 @@ export class NoopSandboxRunner implements SandboxRunner {
return false;
}
+ /**
+ * Always returns false - no sandbox runtime to be healthy.
+ */
+ isHealthy(): boolean {
+ return false;
+ }
+
/**
* Always throws - can't load sandboxed plugins without isolation.
*/
diff --git a/packages/core/src/plugins/sandbox/types.ts b/packages/core/src/plugins/sandbox/types.ts
index 716594ec0..2638ecaa3 100644
--- a/packages/core/src/plugins/sandbox/types.ts
+++ b/packages/core/src/plugins/sandbox/types.ts
@@ -75,6 +75,15 @@ export interface SandboxOptions {
siteInfo?: { name: string; url: string; locale: string };
/** Email send callback, wired from the EmailPipeline by the runtime */
emailSend?: SandboxEmailSendCallback;
+ /**
+ * Media storage adapter for sandboxed plugin uploads and deletes.
+ * When provided, plugins with write:media can upload and delete files
+ * via ctx.media.upload() and ctx.media.delete().
+ */
+ mediaStorage?: {
+ upload(options: { key: string; body: Uint8Array; contentType: string }): Promise;
+ delete(key: string): Promise;
+ };
}
/**
@@ -134,6 +143,14 @@ export interface SandboxRunner {
*/
isAvailable(): boolean;
+ /**
+ * Check if the sandbox runtime is currently healthy.
+ * For in-process runners this always returns true.
+ * For sidecar-based runners (workerd), returns false if the
+ * child process has crashed and hasn't been restarted yet.
+ */
+ isHealthy(): boolean;
+
/**
* Load a sandboxed plugin from code.
*
@@ -158,6 +175,17 @@ export interface SandboxRunner {
terminateAll(): Promise;
}
+/**
+ * Error thrown when the sandbox runtime is unavailable.
+ * This happens when the sidecar process has crashed or hasn't started.
+ */
+export class SandboxUnavailableError extends Error {
+ constructor(pluginId: string, reason: string) {
+ super(`Plugin sandbox unavailable for ${pluginId}: ${reason}`);
+ this.name = "SandboxUnavailableError";
+ }
+}
+
/**
* Factory function type for creating sandbox runners.
* Exported by platform adapters (e.g., @emdash-cms/adapter-cloudflare/sandbox).
diff --git a/packages/marketplace/package.json b/packages/marketplace/package.json
index f649ddcaf..617887ed4 100644
--- a/packages/marketplace/package.json
+++ b/packages/marketplace/package.json
@@ -18,7 +18,7 @@
"zod": "^3.25.67"
},
"devDependencies": {
- "@types/better-sqlite3": "^7.6.13",
+ "@types/better-sqlite3": "catalog:",
"@types/node": "catalog:",
"better-sqlite3": "catalog:",
"typescript": "catalog:",
diff --git a/packages/workerd/package.json b/packages/workerd/package.json
new file mode 100644
index 000000000..56224a80a
--- /dev/null
+++ b/packages/workerd/package.json
@@ -0,0 +1,61 @@
+{
+ "name": "@emdash-cms/workerd",
+ "version": "0.0.1",
+ "description": "workerd-based plugin sandbox for EmDash on Node.js",
+ "type": "module",
+ "main": "dist/index.mjs",
+ "files": [
+ "dist",
+ "src"
+ ],
+ "exports": {
+ ".": {
+ "types": "./dist/index.d.mts",
+ "default": "./dist/index.mjs"
+ },
+ "./sandbox": {
+ "types": "./dist/sandbox/index.d.mts",
+ "default": "./dist/sandbox/index.mjs"
+ }
+ },
+ "scripts": {
+ "build": "tsdown",
+ "dev": "tsdown --watch",
+ "test": "vitest run"
+ },
+ "dependencies": {
+ "emdash": "workspace:*",
+ "ulidx": "^2.4.1"
+ },
+ "peerDependencies": {
+ "kysely": ">=0.27.0",
+ "workerd": ">=1.0.0"
+ },
+ "optionalDependencies": {
+ "miniflare": "^4.20250408.0"
+ },
+ "devDependencies": {
+ "@types/better-sqlite3": "catalog:",
+ "better-sqlite3": "catalog:",
+ "kysely": "^0.27.0",
+ "tsdown": "catalog:",
+ "typescript": "catalog:",
+ "vitest": "catalog:"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/emdash-cms/emdash.git",
+ "directory": "packages/workerd"
+ },
+ "homepage": "https://github.com/emdash-cms/emdash",
+ "keywords": [
+ "emdash",
+ "workerd",
+ "sandbox",
+ "plugins",
+ "isolation",
+ "v8-isolate"
+ ],
+ "author": "Benjamin Price",
+ "license": "MIT"
+}
diff --git a/packages/workerd/src/index.ts b/packages/workerd/src/index.ts
new file mode 100644
index 000000000..52a20a880
--- /dev/null
+++ b/packages/workerd/src/index.ts
@@ -0,0 +1 @@
+export { WorkerdSandboxRunner, createSandboxRunner } from "./sandbox/index.js";
diff --git a/packages/workerd/src/sandbox/backing-service.ts b/packages/workerd/src/sandbox/backing-service.ts
new file mode 100644
index 000000000..140c805e5
--- /dev/null
+++ b/packages/workerd/src/sandbox/backing-service.ts
@@ -0,0 +1,98 @@
+/**
+ * Backing Service HTTP Handler
+ *
+ * Runs in the Node process for production workerd deployments.
+ * Receives HTTP requests from plugin workers running in workerd isolates.
+ * Each request is authenticated via a per-plugin auth token.
+ *
+ * This is a thin wrapper around createBridgeHandler that adds:
+ * - Auth token validation (extracting claims from the HMAC token)
+ * - Node http.IncomingMessage -> Request conversion
+ * - Response -> http.ServerResponse conversion
+ *
+ * The actual bridge logic (dispatch, capability enforcement, DB queries)
+ * lives in bridge-handler.ts and is shared with the dev runner.
+ */
+
+import type { IncomingMessage, ServerResponse } from "node:http";
+
+import { createBridgeHandler } from "./bridge-handler.js";
+import type { WorkerdSandboxRunner } from "./runner.js";
+
+/**
+ * Create an HTTP request handler for the backing service.
+ */
+export function createBackingServiceHandler(
+ runner: WorkerdSandboxRunner,
+): (req: IncomingMessage, res: ServerResponse) => void {
+ // Cache bridge handlers per plugin token to avoid re-creation
+ const handlerCache = new Map Promise>();
+
+ return async (req, res) => {
+ try {
+ // Parse auth token from Authorization header
+ const authHeader = req.headers.authorization;
+ if (!authHeader?.startsWith("Bearer ")) {
+ res.writeHead(401, { "Content-Type": "application/json" });
+ res.end(JSON.stringify({ error: "Missing or invalid authorization" }));
+ return;
+ }
+
+ const token = authHeader.slice(7);
+ const claims = runner.validateToken(token);
+ if (!claims) {
+ res.writeHead(401, { "Content-Type": "application/json" });
+ res.end(JSON.stringify({ error: "Invalid auth token" }));
+ return;
+ }
+
+ // Get or create bridge handler for this plugin
+ let handler = handlerCache.get(token);
+ if (!handler) {
+ handler = createBridgeHandler({
+ pluginId: claims.pluginId,
+ version: claims.version,
+ capabilities: claims.capabilities,
+ allowedHosts: claims.allowedHosts,
+ storageCollections: claims.storageCollections,
+ storageConfig: runner.getPluginStorageConfig(claims.pluginId, claims.version) as
+ | Record }>
+ | undefined,
+ db: runner.db,
+ emailSend: () => runner.emailSend,
+ storage: runner.mediaStorage,
+ });
+ handlerCache.set(token, handler);
+ }
+
+ // Convert Node request to web Request
+ const body = await readBody(req);
+ const url = `http://bridge${req.url || "/"}`;
+ const webRequest = new Request(url, {
+ method: req.method || "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body),
+ });
+
+ // Dispatch through the shared bridge handler
+ const webResponse = await handler(webRequest);
+ const responseBody = await webResponse.text();
+
+ res.writeHead(webResponse.status, { "Content-Type": "application/json" });
+ res.end(responseBody);
+ } catch (error) {
+ const message = error instanceof Error ? error.message : "Internal error";
+ res.writeHead(500, { "Content-Type": "application/json" });
+ res.end(JSON.stringify({ error: message }));
+ }
+ };
+}
+
+async function readBody(req: IncomingMessage): Promise> {
+ const chunks: Buffer[] = [];
+ for await (const chunk of req) {
+ chunks.push(chunk as Buffer);
+ }
+ const raw = Buffer.concat(chunks).toString();
+ return raw ? (JSON.parse(raw) as Record) : {};
+}
diff --git a/packages/workerd/src/sandbox/bridge-handler.ts b/packages/workerd/src/sandbox/bridge-handler.ts
new file mode 100644
index 000000000..f345bd69c
--- /dev/null
+++ b/packages/workerd/src/sandbox/bridge-handler.ts
@@ -0,0 +1,1115 @@
+/**
+ * Bridge Handler
+ *
+ * Handles bridge calls from sandboxed plugin workers.
+ * Used in two contexts:
+ * - Dev mode: as a miniflare outboundService function (Request -> Response)
+ * - Production: called from the backing service HTTP handler
+ *
+ * Each handler is scoped to a specific plugin with its capabilities.
+ * Capability enforcement happens here, not in the plugin.
+ *
+ * This implementation maintains behavioral parity with the Cloudflare
+ * PluginBridge (packages/cloudflare/src/sandbox/bridge.ts). Same inputs
+ * must produce same outputs, same return shapes, same error messages.
+ */
+
+// @ts-ignore -- value exports used at runtime
+import { createHttpAccess, createUnrestrictedHttpAccess, PluginStorageRepository } from "emdash";
+import type { Database } from "emdash";
+import type { SandboxEmailSendCallback } from "emdash";
+import { sql, type Kysely } from "kysely";
+
+/** Validates collection/field names to prevent SQL injection */
+const COLLECTION_NAME_RE = /^[a-z][a-z0-9_]*$/;
+
+/** System columns that plugins cannot directly write to */
+const SYSTEM_COLUMNS = new Set([
+ "id",
+ "slug",
+ "status",
+ "author_id",
+ "created_at",
+ "updated_at",
+ "published_at",
+ "scheduled_at",
+ "deleted_at",
+ "version",
+ "live_revision_id",
+ "draft_revision_id",
+]);
+
+/** Minimal storage interface for media uploads and deletes */
+export interface BridgeStorage {
+ upload(options: { key: string; body: Uint8Array; contentType: string }): Promise;
+ delete(key: string): Promise;
+}
+
+/** Per-collection storage config (matches manifest.storage entries) */
+export interface BridgeStorageCollectionConfig {
+ indexes?: Array;
+ uniqueIndexes?: Array;
+}
+
+export interface BridgeHandlerOptions {
+ pluginId: string;
+ version: string;
+ capabilities: string[];
+ allowedHosts: string[];
+ /** Storage collection names declared by the plugin */
+ storageCollections: string[];
+ /** Full storage config (with indexes) for proper query/count delegation */
+ storageConfig?: Record;
+ db: Kysely;
+ emailSend: () => SandboxEmailSendCallback | null;
+ /** Storage for media uploads. Optional; media/upload throws if not provided. */
+ storage?: BridgeStorage | null;
+}
+
+/**
+ * Create a bridge handler function scoped to a specific plugin.
+ * Returns an async function that takes a Request and returns a Response.
+ */
+export function createBridgeHandler(
+ opts: BridgeHandlerOptions,
+): (request: Request) => Promise {
+ return async (request: Request): Promise => {
+ try {
+ const url = new URL(request.url);
+ const method = url.pathname.slice(1);
+
+ let body: Record = {};
+ if (request.method === "POST") {
+ const text = await request.text();
+ if (text) {
+ body = JSON.parse(text) as Record;
+ }
+ }
+
+ const result = await dispatch(opts, method, body);
+ return Response.json({ result });
+ } catch (error) {
+ const message = error instanceof Error ? error.message : "Internal error";
+ return new Response(JSON.stringify({ error: message }), {
+ status: 500,
+ headers: { "Content-Type": "application/json" },
+ });
+ }
+ };
+}
+
+// ── Dispatch ─────────────────────────────────────────────────────────────
+
+async function dispatch(
+ opts: BridgeHandlerOptions,
+ method: string,
+ body: Record,
+): Promise {
+ const { db, pluginId } = opts;
+
+ switch (method) {
+ // ── KV (stored in _plugin_storage with collection='__kv') ────────
+ case "kv/get":
+ return kvGet(db, pluginId, requireString(body, "key"));
+ case "kv/set":
+ return kvSet(db, pluginId, requireString(body, "key"), body.value);
+ case "kv/delete":
+ return kvDelete(db, pluginId, requireString(body, "key"));
+ case "kv/list":
+ return kvList(db, pluginId, (body.prefix as string) ?? "");
+
+ // ── Content ─────────────────────────────────────────────────────
+ case "content/get":
+ requireCapability(opts, "read:content");
+ return contentGet(db, requireString(body, "collection"), requireString(body, "id"));
+ case "content/list":
+ requireCapability(opts, "read:content");
+ return contentList(db, requireString(body, "collection"), body);
+ case "content/create":
+ requireCapability(opts, "write:content");
+ return contentCreate(
+ db,
+ requireString(body, "collection"),
+ body.data as Record,
+ );
+ case "content/update":
+ requireCapability(opts, "write:content");
+ return contentUpdate(
+ db,
+ requireString(body, "collection"),
+ requireString(body, "id"),
+ body.data as Record,
+ );
+ case "content/delete":
+ requireCapability(opts, "write:content");
+ return contentDelete(db, requireString(body, "collection"), requireString(body, "id"));
+
+ // ── Media ───────────────────────────────────────────────────────
+ case "media/get":
+ requireCapability(opts, "read:media");
+ return mediaGet(db, requireString(body, "id"));
+ case "media/list":
+ requireCapability(opts, "read:media");
+ return mediaList(db, body);
+ case "media/upload":
+ requireCapability(opts, "write:media");
+ return mediaUpload(
+ db,
+ requireString(body, "filename"),
+ requireString(body, "contentType"),
+ body.bytes as number[],
+ opts.storage,
+ );
+ case "media/delete":
+ requireCapability(opts, "write:media");
+ return mediaDelete(db, requireString(body, "id"), opts.storage);
+
+ // ── HTTP ────────────────────────────────────────────────────────
+ case "http/fetch":
+ requireCapability(opts, "network:fetch");
+ return httpFetch(requireString(body, "url"), body.init, opts);
+
+ // ── Email ───────────────────────────────────────────────────────
+ case "email/send": {
+ requireCapability(opts, "email:send");
+ const message = body.message as {
+ to: string;
+ subject: string;
+ text: string;
+ html?: string;
+ };
+ if (!message?.to || !message?.subject || !message?.text) {
+ throw new Error("email/send requires message with to, subject, and text");
+ }
+ const emailSend = opts.emailSend();
+ if (!emailSend) throw new Error("Email is not configured. No email provider is available.");
+ await emailSend(message, pluginId);
+ return null;
+ }
+
+ // ── Users ───────────────────────────────────────────────────────
+ case "users/get":
+ requireCapability(opts, "read:users");
+ return userGet(db, requireString(body, "id"));
+ case "users/getByEmail":
+ requireCapability(opts, "read:users");
+ return userGetByEmail(db, requireString(body, "email"));
+ case "users/list":
+ requireCapability(opts, "read:users");
+ return userList(db, body);
+
+ // ── Storage (document store, scoped to declared collections) ────
+ case "storage/get":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storageGet(opts, requireString(body, "collection"), requireString(body, "id"));
+ case "storage/put":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storagePut(
+ opts,
+ requireString(body, "collection"),
+ requireString(body, "id"),
+ body.data,
+ );
+ case "storage/delete":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storageDelete(opts, requireString(body, "collection"), requireString(body, "id"));
+ case "storage/query":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storageQuery(opts, requireString(body, "collection"), body);
+ case "storage/count":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storageCount(
+ opts,
+ requireString(body, "collection"),
+ body.where as Record | undefined,
+ );
+ case "storage/getMany":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storageGetMany(opts, requireString(body, "collection"), body.ids as string[]);
+ case "storage/putMany":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storagePutMany(
+ opts,
+ requireString(body, "collection"),
+ body.items as Array<{ id: string; data: unknown }>,
+ );
+ case "storage/deleteMany":
+ validateStorageCollection(opts, requireString(body, "collection"));
+ return storageDeleteMany(opts, requireString(body, "collection"), body.ids as string[]);
+
+ // ── Logging ─────────────────────────────────────────────────────
+ case "log": {
+ const level = requireString(body, "level") as "debug" | "info" | "warn" | "error";
+ const msg = requireString(body, "msg");
+ console[level](`[plugin:${pluginId}]`, msg, body.data ?? "");
+ return null;
+ }
+
+ default:
+ // All outbound fetch() from sandboxed plugins is routed to the
+ // backing service via workerd's globalOutbound config. If a plugin
+ // calls plain fetch("https://anywhere.com/path") instead of
+ // ctx.http.fetch(), we land here. This is intentional: plugins
+ // must use ctx.http.fetch (which goes through the http/fetch
+ // bridge with capability + host enforcement) to reach the network.
+ throw new Error(`Unknown bridge method: ${method}`);
+ }
+}
+
+// ── Validation ───────────────────────────────────────────────────────────
+
+function requireString(body: Record, key: string): string {
+ const value = body[key];
+ if (typeof value !== "string") throw new Error(`Missing required string parameter: ${key}`);
+ return value;
+}
+
+function requireCapability(opts: BridgeHandlerOptions, capability: string): void {
+ // Strict capability check matching the Cloudflare PluginBridge.
+ // We do NOT imply write → read here: a plugin that declares only
+ // write:content cannot call ctx.content.get/list. The plugin must
+ // declare read:content explicitly. This matches the Cloudflare bridge
+ // behavior and ensures sandboxed plugins behave the same on both runners.
+ //
+ // Note: the in-process PluginContextFactory in core does build the read
+ // API onto the write object, so a trusted plugin can read with only
+ // write:content. The sandbox bridges are stricter on purpose — they
+ // enforce the manifest as written.
+ //
+ // The one exception: network:fetch:any is documented as a strict
+ // superset of network:fetch, so the broader capability satisfies it.
+ if (capability === "network:fetch" && opts.capabilities.includes("network:fetch:any")) return;
+ if (!opts.capabilities.includes(capability)) {
+ // Error message matches Cloudflare PluginBridge format
+ throw new Error(`Missing capability: ${capability}`);
+ }
+}
+
+function validateStorageCollection(opts: BridgeHandlerOptions, collection: string): void {
+ if (!opts.storageCollections.includes(collection)) {
+ // Error message matches Cloudflare PluginBridge format
+ throw new Error(`Storage collection not declared: ${collection}`);
+ }
+}
+
+function validateCollectionName(collection: string): void {
+ if (!COLLECTION_NAME_RE.test(collection)) {
+ throw new Error(`Invalid collection name: ${collection}`);
+ }
+}
+
+// ── Value serialization (matches Cloudflare bridge) ──────────────────────
+
+function serializeValue(value: unknown): unknown {
+ if (value === null || value === undefined) return null;
+ if (typeof value === "boolean") return value ? 1 : 0;
+ if (typeof value === "object") return JSON.stringify(value);
+ return value;
+}
+
+/**
+ * Transform a raw DB row into the content item shape returned to plugins.
+ * Matches the Cloudflare bridge's rowToContentItem.
+ */
+function rowToContentItem(
+ collection: string,
+ row: Record,
+): {
+ id: string;
+ type: string;
+ data: Record;
+ createdAt: string;
+ updatedAt: string;
+} {
+ const data: Record = {};
+ for (const [key, value] of Object.entries(row)) {
+ if (!SYSTEM_COLUMNS.has(key)) {
+ if (typeof value === "string" && (value.startsWith("{") || value.startsWith("["))) {
+ try {
+ data[key] = JSON.parse(value);
+ } catch {
+ data[key] = value;
+ }
+ } else if (value !== null) {
+ data[key] = value;
+ }
+ }
+ }
+
+ return {
+ id: typeof row.id === "string" ? row.id : String(row.id),
+ type: collection,
+ data,
+ createdAt: typeof row.created_at === "string" ? row.created_at : new Date().toISOString(),
+ updatedAt: typeof row.updated_at === "string" ? row.updated_at : new Date().toISOString(),
+ };
+}
+
+// ── KV Operations ────────────────────────────────────────────────────────
+// Uses _plugin_storage with collection='__kv' (matching Cloudflare bridge)
+
+async function kvGet(db: Kysely, pluginId: string, key: string): Promise {
+ const row = await db
+ .selectFrom("_plugin_storage" as keyof Database)
+ .where("plugin_id", "=", pluginId)
+ .where("collection", "=", "__kv")
+ .where("id", "=", key)
+ .select("data")
+ .executeTakeFirst();
+ if (!row) return null;
+ try {
+ return JSON.parse(row.data as string);
+ } catch {
+ return row.data;
+ }
+}
+
+async function kvSet(
+ db: Kysely,
+ pluginId: string,
+ key: string,
+ value: unknown,
+): Promise {
+ const serialized = JSON.stringify(value);
+ const now = new Date().toISOString();
+ await db
+ .insertInto("_plugin_storage" as keyof Database)
+ .values({
+ plugin_id: pluginId,
+ collection: "__kv",
+ id: key,
+ data: serialized,
+ created_at: now,
+ updated_at: now,
+ } as never)
+ .onConflict((oc) =>
+ oc.columns(["plugin_id", "collection", "id"] as never[]).doUpdateSet({
+ data: serialized,
+ updated_at: now,
+ } as never),
+ )
+ .execute();
+}
+
+async function kvDelete(db: Kysely, pluginId: string, key: string): Promise {
+ const result = await db
+ .deleteFrom("_plugin_storage" as keyof Database)
+ .where("plugin_id", "=", pluginId)
+ .where("collection", "=", "__kv")
+ .where("id", "=", key)
+ .executeTakeFirst();
+ return BigInt(result.numDeletedRows) > 0n;
+}
+
+async function kvList(
+ db: Kysely,
+ pluginId: string,
+ prefix: string,
+): Promise> {
+ const rows = await db
+ .selectFrom("_plugin_storage" as keyof Database)
+ .where("plugin_id", "=", pluginId)
+ .where("collection", "=", "__kv")
+ .where("id", "like", `${prefix}%`)
+ .select(["id", "data"])
+ .execute();
+
+ return rows.map((r) => ({
+ key: r.id as string,
+ value: JSON.parse(r.data as string),
+ }));
+}
+
+// ── Content Operations ───────────────────────────────────────────────────
+
+async function contentGet(
+ db: Kysely,
+ collection: string,
+ id: string,
+): Promise<{
+ id: string;
+ type: string;
+ data: Record;
+ createdAt: string;
+ updatedAt: string;
+} | null> {
+ validateCollectionName(collection);
+ try {
+ const row = await db
+ .selectFrom(`ec_${collection}` as keyof Database)
+ .where("id", "=", id)
+ .where("deleted_at", "is", null)
+ .selectAll()
+ .executeTakeFirst();
+ if (!row) return null;
+ return rowToContentItem(collection, row as Record);
+ } catch {
+ return null;
+ }
+}
+
+async function contentList(
+ db: Kysely,
+ collection: string,
+ opts: Record,
+): Promise<{
+ items: Array<{
+ id: string;
+ type: string;
+ data: Record;
+ createdAt: string;
+ updatedAt: string;
+ }>;
+ cursor?: string;
+ hasMore: boolean;
+}> {
+ validateCollectionName(collection);
+ const limit = Math.min(Number(opts.limit) || 50, 100);
+ try {
+ let query = db
+ .selectFrom(`ec_${collection}` as keyof Database)
+ .where("deleted_at", "is", null)
+ .selectAll()
+ .orderBy("id", "desc");
+
+ if (typeof opts.cursor === "string") {
+ query = query.where("id", "<", opts.cursor);
+ }
+
+ const rows = await query.limit(limit + 1).execute();
+ const pageRows = rows.slice(0, limit);
+ const items = pageRows.map((row) =>
+ rowToContentItem(collection, row as Record),
+ );
+ const hasMore = rows.length > limit;
+
+ return {
+ items,
+ cursor: hasMore && items.length > 0 ? items.at(-1)!.id : undefined,
+ hasMore,
+ };
+ } catch {
+ return { items: [], hasMore: false };
+ }
+}
+
+async function contentCreate(
+ db: Kysely,
+ collection: string,
+ data: Record,
+): Promise<{
+ id: string;
+ type: string;
+ data: Record;
+ createdAt: string;
+ updatedAt: string;
+}> {
+ validateCollectionName(collection);
+
+ // Generate ULID for the new content item
+ const { ulid } = await import("ulidx");
+ const id = ulid();
+ const now = new Date().toISOString();
+
+ // Build insert values: system columns + user data columns
+ const values: Record = {
+ id,
+ slug: typeof data.slug === "string" ? data.slug : null,
+ status: typeof data.status === "string" ? data.status : "draft",
+ author_id: typeof data.author_id === "string" ? data.author_id : null,
+ created_at: now,
+ updated_at: now,
+ version: 1,
+ };
+
+ // Add user data fields (skip system columns, validate names)
+ for (const [key, value] of Object.entries(data)) {
+ if (!SYSTEM_COLUMNS.has(key) && COLLECTION_NAME_RE.test(key)) {
+ values[key] = serializeValue(value);
+ }
+ }
+
+ await db
+ .insertInto(`ec_${collection}` as keyof Database)
+ .values(values as never)
+ .execute();
+
+ // Re-read the created row
+ const created = await db
+ .selectFrom(`ec_${collection}` as keyof Database)
+ .where("id", "=", id)
+ .where("deleted_at", "is", null)
+ .selectAll()
+ .executeTakeFirst();
+
+ if (!created) {
+ return { id, type: collection, data: {}, createdAt: now, updatedAt: now };
+ }
+ return rowToContentItem(collection, created as Record);
+}
+
+async function contentUpdate(
+ db: Kysely,
+ collection: string,
+ id: string,
+ data: Record,
+): Promise<{
+ id: string;
+ type: string;
+ data: Record;
+ createdAt: string;
+ updatedAt: string;
+}> {
+ validateCollectionName(collection);
+
+ const now = new Date().toISOString();
+
+ // Build update: always bump updated_at and version
+ let query = db
+ .updateTable(`ec_${collection}` as keyof Database)
+ .set({ updated_at: now } as never)
+ .set(sql`version = version + 1` as never)
+ .where("id", "=", id)
+ .where("deleted_at", "is", null);
+
+ // System field updates
+ if (typeof data.status === "string") {
+ query = query.set({ status: data.status } as never);
+ }
+ if (data.slug !== undefined) {
+ query = query.set({ slug: typeof data.slug === "string" ? data.slug : null } as never);
+ }
+
+ // User data fields
+ for (const [key, value] of Object.entries(data)) {
+ if (!SYSTEM_COLUMNS.has(key) && COLLECTION_NAME_RE.test(key)) {
+ query = query.set({ [key]: serializeValue(value) } as never);
+ }
+ }
+
+ const result = await query.executeTakeFirst();
+ if (BigInt(result.numUpdatedRows) === 0n) {
+ throw new Error(`Content not found or deleted: ${collection}/${id}`);
+ }
+
+ // Re-read the updated row
+ const updated = await db
+ .selectFrom(`ec_${collection}` as keyof Database)
+ .where("id", "=", id)
+ .where("deleted_at", "is", null)
+ .selectAll()
+ .executeTakeFirst();
+
+ if (!updated) {
+ throw new Error(`Content not found: ${collection}/${id}`);
+ }
+ return rowToContentItem(collection, updated as Record);
+}
+
+async function contentDelete(
+ db: Kysely,
+ collection: string,
+ id: string,
+): Promise {
+ validateCollectionName(collection);
+
+ // Soft-delete: set deleted_at timestamp (matching Cloudflare bridge)
+ const now = new Date().toISOString();
+ const result = await db
+ .updateTable(`ec_${collection}` as keyof Database)
+ .set({ deleted_at: now, updated_at: now } as never)
+ .where("id", "=", id)
+ .where("deleted_at", "is", null)
+ .executeTakeFirst();
+
+ return BigInt(result.numUpdatedRows) > 0n;
+}
+
+// ── Media Operations ─────────────────────────────────────────────────────
+
+interface MediaRow {
+ id: string;
+ filename: string;
+ mime_type: string;
+ size: number | null;
+ storage_key: string;
+ created_at: string;
+}
+
+function rowToMediaItem(row: MediaRow) {
+ return {
+ id: row.id,
+ filename: row.filename,
+ mimeType: row.mime_type,
+ size: row.size,
+ url: `/_emdash/api/media/file/${row.storage_key}`,
+ createdAt: row.created_at,
+ };
+}
+
+async function mediaGet(
+ db: Kysely,
+ id: string,
+): Promise<{
+ id: string;
+ filename: string;
+ mimeType: string;
+ size: number | null;
+ url: string;
+ createdAt: string;
+} | null> {
+ const row = await db
+ .selectFrom("media" as keyof Database)
+ .where("id", "=", id)
+ .selectAll()
+ .executeTakeFirst();
+ if (!row) return null;
+ return rowToMediaItem(row as unknown as MediaRow);
+}
+
+async function mediaList(
+ db: Kysely,
+ opts: Record,
+): Promise<{
+ items: Array<{
+ id: string;
+ filename: string;
+ mimeType: string;
+ size: number | null;
+ url: string;
+ createdAt: string;
+ }>;
+ cursor?: string;
+ hasMore: boolean;
+}> {
+ const limit = Math.min(Number(opts.limit) || 50, 100);
+
+ // Only return ready items (matching Cloudflare bridge)
+ let query = db
+ .selectFrom("media" as keyof Database)
+ .where("status", "=", "ready")
+ .selectAll()
+ .orderBy("id", "desc");
+
+ if (typeof opts.mimeType === "string") {
+ query = query.where("mime_type", "like", `${opts.mimeType}%`);
+ }
+
+ if (typeof opts.cursor === "string") {
+ query = query.where("id", "<", opts.cursor);
+ }
+
+ const rows = await query.limit(limit + 1).execute();
+ const pageRows = rows.slice(0, limit);
+ const items = pageRows.map((row) => rowToMediaItem(row as unknown as MediaRow));
+ const hasMore = rows.length > limit;
+
+ return {
+ items,
+ cursor: hasMore && items.length > 0 ? items.at(-1)!.id : undefined,
+ hasMore,
+ };
+}
+
+const ALLOWED_MIME_PREFIXES = ["image/", "video/", "audio/", "application/pdf"];
+const FILE_EXT_RE = /^\.[a-z0-9]{1,10}$/i;
+
+async function mediaUpload(
+ db: Kysely,
+ filename: string,
+ contentType: string,
+ bytes: number[],
+ storage?: BridgeStorage | null,
+): Promise<{ mediaId: string; storageKey: string; url: string }> {
+ if (!storage) {
+ throw new Error(
+ "Media storage is not configured. Cannot upload files without a storage adapter.",
+ );
+ }
+
+ if (!ALLOWED_MIME_PREFIXES.some((prefix) => contentType.startsWith(prefix))) {
+ throw new Error(
+ `Unsupported content type: ${contentType}. Allowed: image/*, video/*, audio/*, application/pdf`,
+ );
+ }
+
+ const { ulid } = await import("ulidx");
+ const mediaId = ulid();
+ const basename = filename.includes("/")
+ ? filename.slice(filename.lastIndexOf("/") + 1)
+ : filename;
+ const rawExt = basename.includes(".") ? basename.slice(basename.lastIndexOf(".")) : "";
+ const ext = FILE_EXT_RE.test(rawExt) ? rawExt : "";
+ const storageKey = `${mediaId}${ext}`;
+ const now = new Date().toISOString();
+ const byteArray = new Uint8Array(bytes);
+
+ // Write bytes to storage first, then create DB record.
+ // If DB insert fails, delete the storage object so we don't leak files.
+ // (cleanupPendingUploads only deletes 'pending' DB rows; objects with no
+ // row are invisible to it.)
+ await storage.upload({ key: storageKey, body: byteArray, contentType });
+
+ try {
+ await db
+ .insertInto("media" as keyof Database)
+ .values({
+ id: mediaId,
+ filename,
+ mime_type: contentType,
+ size: byteArray.byteLength,
+ storage_key: storageKey,
+ status: "ready",
+ created_at: now,
+ } as never)
+ .execute();
+ } catch (error) {
+ // Best-effort cleanup of the orphaned storage object. Log if cleanup
+ // itself fails so operators see the leak instead of silently dropping it.
+ try {
+ await storage.delete(storageKey);
+ } catch (cleanupError) {
+ console.warn(
+ `[bridge] media/upload: DB insert failed and storage cleanup failed for ${storageKey}. ` +
+ `Storage object is leaked.`,
+ cleanupError,
+ );
+ }
+ throw error;
+ }
+
+ return {
+ mediaId,
+ storageKey,
+ url: `/_emdash/api/media/file/${storageKey}`,
+ };
+}
+
+async function mediaDelete(
+ db: Kysely,
+ id: string,
+ storage?: BridgeStorage | null,
+): Promise {
+ // Look up storage key before deleting
+ const media = await db
+ .selectFrom("media" as keyof Database)
+ .where("id", "=", id)
+ .select("storage_key")
+ .executeTakeFirst();
+
+ if (!media) return false;
+
+ // Delete the DB row first
+ const result = await db
+ .deleteFrom("media" as keyof Database)
+ .where("id", "=", id)
+ .executeTakeFirst();
+
+ // Delete the storage object. If this fails, log but don't throw —
+ // the DB row is already deleted and the orphan cleanup cron will
+ // catch it. Matches the Cloudflare bridge's behavior.
+ if (storage && (media as { storage_key: string }).storage_key) {
+ try {
+ await storage.delete((media as { storage_key: string }).storage_key);
+ } catch (error) {
+ console.warn(
+ `[bridge] Failed to delete storage object ${(media as { storage_key: string }).storage_key}:`,
+ error,
+ );
+ }
+ }
+
+ return BigInt(result.numDeletedRows) > 0n;
+}
+
+// ── HTTP Operations ──────────────────────────────────────────────────────
+
+/** Marshaled RequestInit shape sent over the bridge from the wrapper */
+interface MarshaledRequestInit {
+ method?: string;
+ redirect?: RequestRedirect;
+ /** List of [name, value] pairs to preserve multi-value headers */
+ headers?: Array<[string, string]>;
+ bodyType?: "string" | "base64" | "formdata";
+ body?: unknown;
+}
+
+/**
+ * Reverse the wrapper's marshalRequestInit() to reconstruct a real RequestInit
+ * with proper Headers, binary bodies, and FormData.
+ */
+function unmarshalRequestInit(
+ marshaled: MarshaledRequestInit | undefined,
+): RequestInit | undefined {
+ if (!marshaled) return undefined;
+ const init: RequestInit = {};
+ if (marshaled.method) init.method = marshaled.method;
+ if (marshaled.redirect) init.redirect = marshaled.redirect;
+ if (marshaled.headers && marshaled.headers.length > 0) {
+ // Use a Headers instance and append() so duplicates are preserved
+ // (e.g., multiple Set-Cookie). A plain Record would collapse them.
+ const headers = new Headers();
+ for (const [name, value] of marshaled.headers) {
+ headers.append(name, value);
+ }
+ init.headers = headers;
+ }
+ if (marshaled.bodyType && marshaled.body !== undefined) {
+ switch (marshaled.bodyType) {
+ case "string":
+ init.body = marshaled.body as string;
+ break;
+ case "base64":
+ init.body = Buffer.from(marshaled.body as string, "base64");
+ break;
+ case "formdata": {
+ const fd = new FormData();
+ const parts = marshaled.body as Array<{
+ name: string;
+ value: string;
+ filename?: string;
+ type?: string;
+ isBlob?: boolean;
+ }>;
+ for (const part of parts) {
+ if (part.isBlob) {
+ const bytes = Buffer.from(part.value, "base64");
+ const blob = new Blob([bytes], { type: part.type || "application/octet-stream" });
+ fd.append(part.name, blob, part.filename);
+ } else {
+ fd.append(part.name, part.value);
+ }
+ }
+ init.body = fd;
+ break;
+ }
+ }
+ }
+ return init;
+}
+
+async function httpFetch(
+ url: string,
+ marshaledInit: unknown,
+ opts: BridgeHandlerOptions,
+): Promise<{
+ status: number;
+ statusText: string;
+ headers: Record;
+ bodyBase64: string;
+}> {
+ const hasAnyFetch = opts.capabilities.includes("network:fetch:any");
+ const httpAccess = hasAnyFetch
+ ? createUnrestrictedHttpAccess(opts.pluginId)
+ : createHttpAccess(opts.pluginId, opts.allowedHosts || []);
+
+ const init = unmarshalRequestInit(marshaledInit as MarshaledRequestInit | undefined);
+ const res = await httpAccess.fetch(url, init);
+ // Read as bytes to preserve binary content (images, audio, etc.)
+ const bytes = new Uint8Array(await res.arrayBuffer());
+ const headers: Record = {};
+ res.headers.forEach((v, k) => {
+ headers[k] = v;
+ });
+ return {
+ status: res.status,
+ statusText: res.statusText,
+ headers,
+ bodyBase64: Buffer.from(bytes).toString("base64"),
+ };
+}
+
+// ── User Operations ──────────────────────────────────────────────────────
+
+interface UserRow {
+ id: string;
+ email: string;
+ name: string | null;
+ role: number;
+ created_at: string;
+}
+
+function rowToUser(row: UserRow) {
+ return {
+ id: row.id,
+ email: row.email,
+ name: row.name,
+ role: row.role,
+ createdAt: row.created_at,
+ };
+}
+
+async function userGet(
+ db: Kysely,
+ id: string,
+): Promise<{
+ id: string;
+ email: string;
+ name: string | null;
+ role: number;
+ createdAt: string;
+} | null> {
+ const row = await db
+ .selectFrom("users" as keyof Database)
+ .where("id", "=", id)
+ .select(["id", "email", "name", "role", "created_at"])
+ .executeTakeFirst();
+ if (!row) return null;
+ return rowToUser(row as unknown as UserRow);
+}
+
+async function userGetByEmail(
+ db: Kysely,
+ email: string,
+): Promise<{
+ id: string;
+ email: string;
+ name: string | null;
+ role: number;
+ createdAt: string;
+} | null> {
+ const row = await db
+ .selectFrom("users" as keyof Database)
+ .where("email", "=", email.toLowerCase())
+ .select(["id", "email", "name", "role", "created_at"])
+ .executeTakeFirst();
+ if (!row) return null;
+ return rowToUser(row as unknown as UserRow);
+}
+
+async function userList(
+ db: Kysely,
+ opts: Record,
+): Promise<{
+ items: Array<{ id: string; email: string; name: string | null; role: number; createdAt: string }>;
+ nextCursor?: string;
+}> {
+ const limit = Math.max(1, Math.min(Number(opts.limit) || 50, 100));
+
+ let query = db
+ .selectFrom("users" as keyof Database)
+ .select(["id", "email", "name", "role", "created_at"])
+ .orderBy("id", "desc");
+
+ if (opts.role !== undefined) {
+ query = query.where("role", "=", Number(opts.role));
+ }
+ if (typeof opts.cursor === "string") {
+ query = query.where("id", "<", opts.cursor);
+ }
+
+ const rows = await query.limit(limit + 1).execute();
+ const pageRows = rows.slice(0, limit);
+ const items = pageRows.map((row) => rowToUser(row as unknown as UserRow));
+ const hasMore = rows.length > limit;
+
+ return {
+ items,
+ nextCursor: hasMore && items.length > 0 ? items.at(-1)!.id : undefined,
+ };
+}
+
+// ── Storage Operations ───────────────────────────────────────────────────
+
+/**
+ * Construct a PluginStorageRepository for the requested collection.
+ * Uses the indexes from the plugin's storage config (if provided) so
+ * query/count operations support the same WHERE/ORDER BY clauses as
+ * in-process plugins.
+ */
+function getStorageRepo(opts: BridgeHandlerOptions, collection: string): PluginStorageRepository {
+ const config = opts.storageConfig?.[collection];
+ // Merge unique indexes into the indexes list since both are queryable
+ const allIndexes: Array = [
+ ...(config?.indexes ?? []),
+ ...(config?.uniqueIndexes ?? []),
+ ];
+ return new PluginStorageRepository(opts.db, opts.pluginId, collection, allIndexes);
+}
+
+async function storageGet(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ id: string,
+): Promise {
+ return getStorageRepo(opts, collection).get(id);
+}
+
+async function storagePut(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ id: string,
+ data: unknown,
+): Promise {
+ await getStorageRepo(opts, collection).put(id, data);
+}
+
+async function storageDelete(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ id: string,
+): Promise {
+ return getStorageRepo(opts, collection).delete(id);
+}
+
+async function storageQuery(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ queryOpts: Record,
+): Promise<{ items: Array<{ id: string; data: unknown }>; hasMore: boolean; cursor?: string }> {
+ const repo = getStorageRepo(opts, collection);
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- WhereClause is structurally Record
+ const result = await repo.query({
+ where: queryOpts.where as never,
+ orderBy: queryOpts.orderBy as Record | undefined,
+ limit: typeof queryOpts.limit === "number" ? queryOpts.limit : undefined,
+ cursor: typeof queryOpts.cursor === "string" ? queryOpts.cursor : undefined,
+ });
+ return {
+ items: result.items,
+ hasMore: result.hasMore,
+ cursor: result.cursor,
+ };
+}
+
+async function storageCount(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ where?: Record,
+): Promise {
+ const repo = getStorageRepo(opts, collection);
+ // eslint-disable-next-line typescript-eslint/no-unsafe-type-assertion -- WhereClause is structurally Record
+ return repo.count(where as never);
+}
+
+async function storageGetMany(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ ids: string[],
+): Promise> {
+ if (!ids || ids.length === 0) return [];
+ const repo = getStorageRepo(opts, collection);
+ const result = await repo.getMany(ids);
+ // Return as a list of [id, data] pairs rather than a plain object so
+ // special property names like "__proto__" survive transport. The wrapper
+ // reconstructs a Map from these entries.
+ return [...result.entries()];
+}
+
+async function storagePutMany(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ items: Array<{ id: string; data: unknown }>,
+): Promise {
+ if (!items || items.length === 0) return;
+ await getStorageRepo(opts, collection).putMany(items);
+}
+
+async function storageDeleteMany(
+ opts: BridgeHandlerOptions,
+ collection: string,
+ ids: string[],
+): Promise {
+ if (!ids || ids.length === 0) return 0;
+ return getStorageRepo(opts, collection).deleteMany(ids);
+}
diff --git a/packages/workerd/src/sandbox/capnp.ts b/packages/workerd/src/sandbox/capnp.ts
new file mode 100644
index 000000000..75bb00f71
--- /dev/null
+++ b/packages/workerd/src/sandbox/capnp.ts
@@ -0,0 +1,124 @@
+/**
+ * Cap'n Proto Config Generator for workerd
+ *
+ * Generates workerd configuration from plugin manifests.
+ * Each plugin becomes a nanoservice with:
+ * - Its own listening socket (for hook/route invocation from Node)
+ * - An external service definition for the Node backing service
+ * - globalOutbound set to the backing service (all fetch() calls route
+ * through the backing service, which enforces capability checks)
+ */
+
+import type { PluginManifest } from "emdash";
+
+const SAFE_ID_RE = /[^a-z0-9_-]/gi;
+
+interface LoadedPlugin {
+ manifest: PluginManifest;
+ code: string;
+ port: number;
+ token: string;
+}
+
+interface CapnpOptions {
+ plugins: Map;
+ backingServiceUrl: string;
+ configDir: string;
+}
+
+/**
+ * Generate a workerd capnp configuration file.
+ *
+ * Each plugin gets its own worker (nanoservice) with:
+ * - A listener socket on its assigned port
+ * - Modules for wrapper + plugin code
+ * - globalOutbound pointing to the backing service external server
+ * (all outbound fetch() goes through the backing service for
+ * capability enforcement, SSRF protection, and host allowlist checks)
+ *
+ * KNOWN LIMITATION on resource limits:
+ * Standalone workerd does NOT support per-worker cpuMs/memoryMb/subrequests
+ * limits — those are Cloudflare platform features, not workerd capnp options.
+ * The only limit we enforce on the Node path is wallTimeMs, which is wrapped
+ * via Promise.race in WorkerdSandboxedPlugin.invokeHook/invokeRoute.
+ * For true CPU/memory isolation, deploy on Cloudflare Workers.
+ */
+export function generateCapnpConfig(options: CapnpOptions): string {
+ const { plugins, backingServiceUrl } = options;
+
+ // Parse backing service URL for external server config
+ const backingUrl = new URL(backingServiceUrl);
+ const backingAddress = `${backingUrl.hostname}:${backingUrl.port}`;
+
+ const lines: string[] = [
+ `# Auto-generated workerd configuration for EmDash plugin sandbox`,
+ `# Generated at: ${new Date().toISOString()}`,
+ `# Plugins: ${plugins.size}`,
+ ``,
+ `using Workerd = import "/workerd/workerd.capnp";`,
+ ``,
+ `const config :Workerd.Config = (`,
+ ` services = [`,
+ // External service: the Node backing service
+ ` (name = "emdash-backing", external = (address = "${backingAddress}")),`,
+ ];
+
+ // Add a service + socket for each plugin
+ const socketEntries: string[] = [];
+
+ for (const [pluginId, plugin] of plugins) {
+ const safeId = pluginId.replace(SAFE_ID_RE, "_");
+
+ lines.push(` (name = "plugin-${safeId}", worker = .plugin_${safeId}),`);
+ socketEntries.push(
+ ` (name = "socket-${safeId}", address = "127.0.0.1:${plugin.port}", service = "plugin-${safeId}"),`,
+ );
+ }
+
+ lines.push(` ],`);
+
+ // Socket definitions
+ lines.push(` sockets = [`);
+ for (const socket of socketEntries) {
+ lines.push(socket);
+ }
+ lines.push(` ],`);
+ lines.push(`);`);
+ lines.push(``);
+
+ // Worker definitions for each plugin
+ for (const [pluginId] of plugins) {
+ const safeId = pluginId.replace(SAFE_ID_RE, "_");
+ const wrapperFile = `${safeId}-wrapper.js`;
+ const pluginFile = `${safeId}-plugin.js`;
+
+ lines.push(`const plugin_${safeId} :Workerd.Worker = (`);
+ lines.push(` modules = [`);
+ lines.push(` (name = "worker.js", esModule = embed "${wrapperFile}"),`);
+ lines.push(` (name = "sandbox-plugin.js", esModule = embed "${pluginFile}"),`);
+ lines.push(` ],`);
+ lines.push(` compatibilityDate = "2025-01-01",`);
+ lines.push(` compatibilityFlags = ["nodejs_compat"],`);
+ // globalOutbound routes ALL outbound fetch() calls from the plugin
+ // through the backing service. This is intentional security posture:
+ //
+ // - Bridge calls (e.g., fetch("http://bridge/content/get")) are
+ // dispatched normally by the backing service path router.
+ // - Direct fetch() calls to arbitrary URLs (e.g., fetch("https://evil.com"))
+ // also arrive at the backing service. They will NOT match any known
+ // bridge method and will return 500 "Unknown bridge method".
+ //
+ // In other words: plugins cannot reach the internet by calling plain
+ // fetch(). They must use ctx.http.fetch(), which goes through the
+ // http/fetch bridge handler, which enforces network:fetch capability
+ // and the allowedHosts allowlist.
+ lines.push(` globalOutbound = "emdash-backing",`);
+ // Note: workerd capnp config does not support per-worker cpu/memory
+ // limits. Wall-time is enforced in WorkerdSandboxedPlugin via
+ // Promise.race. See generateCapnpConfig docstring above.
+ lines.push(`);`);
+ lines.push(``);
+ }
+
+ return lines.join("\n");
+}
diff --git a/packages/workerd/src/sandbox/dev-runner.ts b/packages/workerd/src/sandbox/dev-runner.ts
new file mode 100644
index 000000000..67795f973
--- /dev/null
+++ b/packages/workerd/src/sandbox/dev-runner.ts
@@ -0,0 +1,275 @@
+/**
+ * Miniflare Dev Runner
+ *
+ * Uses miniflare for plugin sandboxing during development.
+ * Provides the same SandboxRunner interface as WorkerdSandboxRunner
+ * but uses miniflare's serviceBindings-as-functions pattern instead
+ * of raw workerd + capnp + HTTP backing service.
+ *
+ * Advantages over raw workerd in dev:
+ * - No HTTP backing service needed (bridge calls are Node functions)
+ * - No capnp config generation
+ * - No child process management
+ * - Faster startup
+ */
+
+import { randomBytes } from "node:crypto";
+import { createRequire } from "node:module";
+
+import type {
+ SandboxRunner,
+ SandboxedPlugin,
+ SandboxEmailSendCallback,
+ SandboxOptions,
+ SerializedRequest,
+} from "emdash";
+import type { PluginManifest } from "emdash";
+
+import { createBridgeHandler } from "./bridge-handler.js";
+import { generatePluginWrapper } from "./wrapper.js";
+
+const SAFE_ID_RE = /[^a-z0-9_-]/gi;
+
+/**
+ * Miniflare-based sandbox runner for development.
+ */
+export class MiniflareDevRunner implements SandboxRunner {
+ private options: SandboxOptions;
+ private siteInfo?: { name: string; url: string; locale: string };
+ private emailSendCallback: SandboxEmailSendCallback | null = null;
+
+ /** Miniflare instance (lazily created) */
+ private mf: InstanceType | null = null;
+
+ /** Loaded plugins */
+ private plugins = new Map();
+
+ /** Whether miniflare is running */
+ private running = false;
+
+ /**
+ * Per-startup token sent on every hook/route invocation. Plugins reject
+ * requests without this token. In dev mode the plugin worker is only
+ * reachable through miniflare's dispatchFetch, but we still wire the
+ * token for consistency with production and so the wrapper template
+ * is identical in both modes.
+ */
+ private devInvokeToken: string;
+
+ constructor(options: SandboxOptions) {
+ this.options = options;
+ this.siteInfo = options.siteInfo;
+ this.emailSendCallback = options.emailSend ?? null;
+ this.devInvokeToken = randomBytes(32).toString("hex");
+ }
+
+ /** Get the per-startup invoke token (sent on hook/route requests to plugins) */
+ get invokeAuthToken() {
+ return this.devInvokeToken;
+ }
+
+ isAvailable(): boolean {
+ try {
+ const esmRequire = createRequire(import.meta.url);
+ esmRequire.resolve("miniflare");
+ return true;
+ } catch {
+ return false;
+ }
+ }
+
+ isHealthy(): boolean {
+ return this.running;
+ }
+
+ setEmailSend(callback: SandboxEmailSendCallback | null): void {
+ this.emailSendCallback = callback;
+ }
+
+ async load(manifest: PluginManifest, code: string): Promise {
+ const pluginId = `${manifest.id}:${manifest.version}`;
+ this.plugins.set(pluginId, { manifest, code });
+
+ // Rebuild miniflare with all plugins
+ await this.rebuild();
+
+ return new MiniflareDevPlugin(pluginId, manifest, this);
+ }
+
+ async terminateAll(): Promise {
+ if (this.mf) {
+ await this.mf.dispose();
+ this.mf = null;
+ }
+ this.plugins.clear();
+ this.running = false;
+ }
+
+ /**
+ * Unload a single plugin and rebuild miniflare without it.
+ * Called from MiniflareDevPlugin.terminate() so marketplace
+ * update/uninstall flows actually drop the old plugin from
+ * the dev sandbox instead of leaving stale entries.
+ */
+ async unloadPlugin(pluginId: string): Promise {
+ if (this.plugins.delete(pluginId)) {
+ await this.rebuild();
+ }
+ }
+
+ /**
+ * Rebuild miniflare with current plugin configuration.
+ * Called on each plugin load/unload.
+ */
+ private async rebuild(): Promise {
+ if (this.mf) {
+ await this.mf.dispose();
+ this.mf = null;
+ }
+
+ if (this.plugins.size === 0) {
+ this.running = false;
+ return;
+ }
+
+ const { Miniflare } = await import("miniflare");
+
+ // Build worker configs with outboundService to intercept bridge calls.
+ // The wrapper code does fetch("http://bridge/method", ...).
+ // outboundService intercepts all outbound fetches and routes bridge
+ // calls to the Node handler function.
+ const workerConfigs = [];
+
+ for (const [pluginId, { manifest, code }] of this.plugins) {
+ const bridgeHandler = createBridgeHandler({
+ pluginId: manifest.id,
+ version: manifest.version || "0.0.0",
+ capabilities: manifest.capabilities || [],
+ allowedHosts: manifest.allowedHosts || [],
+ storageCollections: Object.keys(manifest.storage || {}),
+ storageConfig: manifest.storage as
+ | Record }>
+ | undefined,
+ db: this.options.db,
+ emailSend: () => this.emailSendCallback,
+ storage: this.options.mediaStorage,
+ });
+
+ const wrapperCode = generatePluginWrapper(manifest, {
+ site: this.siteInfo,
+ backingServiceUrl: "http://bridge",
+ authToken: "dev-mode",
+ invokeToken: this.devInvokeToken,
+ });
+
+ // outboundService intercepts all fetch() calls from this worker.
+ // Calls to http://bridge/... go to the Node bridge handler.
+ // Other calls pass through for network:fetch.
+ workerConfigs.push({
+ name: pluginId.replace(SAFE_ID_RE, "_"),
+ // The wrapper imports "sandbox-plugin.js", so we provide both
+ // the wrapper as the main module and the plugin code as a
+ // named module that the wrapper can import.
+ modulesRoot: "/",
+ modules: [
+ { type: "ESModule" as const, path: "worker.js", contents: wrapperCode },
+ { type: "ESModule" as const, path: "sandbox-plugin.js", contents: code },
+ ],
+ outboundService: async (request: Request) => {
+ const url = new URL(request.url);
+ // Only allow bridge calls. Any other outbound fetch is blocked
+ // to enforce that all network access goes through ctx.http.fetch
+ // (which routes via the bridge with capability + host validation).
+ // Without this, plugins could bypass network:fetch / allowedHosts
+ // by calling plain fetch() directly.
+ if (url.hostname === "bridge") {
+ return bridgeHandler(request);
+ }
+ return new Response(
+ `Direct fetch() blocked in sandbox. Plugin "${manifest.id}" must use ctx.http.fetch() (requires network:fetch capability).`,
+ { status: 403 },
+ );
+ },
+ });
+ }
+
+ this.mf = new Miniflare({ workers: workerConfigs });
+ this.running = true;
+ }
+
+ /**
+ * Dispatch a fetch to a specific plugin worker in miniflare.
+ */
+ async dispatchToPlugin(pluginId: string, url: string, init?: RequestInit): Promise {
+ if (!this.mf) {
+ throw new Error(`Miniflare not running, cannot dispatch to ${pluginId}`);
+ }
+ const workerName = pluginId.replace(SAFE_ID_RE, "_");
+ const worker = await this.mf.getWorker(workerName);
+ return worker.fetch(url, init);
+ }
+}
+
+/**
+ * A plugin running in a miniflare dev isolate.
+ */
+class MiniflareDevPlugin implements SandboxedPlugin {
+ readonly id: string;
+ private manifest: PluginManifest;
+ private runner: MiniflareDevRunner;
+
+ constructor(id: string, manifest: PluginManifest, runner: MiniflareDevRunner) {
+ this.id = id;
+ this.manifest = manifest;
+ this.runner = runner;
+ }
+
+ async invokeHook(hookName: string, event: unknown): Promise {
+ if (!this.runner.isHealthy()) {
+ throw new Error(`Dev sandbox unavailable for ${this.id}`);
+ }
+ const res = await this.runner.dispatchToPlugin(this.id, `http://plugin/hook/${hookName}`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${this.runner.invokeAuthToken}`,
+ },
+ body: JSON.stringify({ event }),
+ });
+ if (!res.ok) {
+ const text = await res.text();
+ throw new Error(`Plugin ${this.id} hook ${hookName} failed: ${text}`);
+ }
+ const result = (await res.json()) as { value: unknown };
+ return result.value;
+ }
+
+ async invokeRoute(
+ routeName: string,
+ input: unknown,
+ request: SerializedRequest,
+ ): Promise {
+ if (!this.runner.isHealthy()) {
+ throw new Error(`Dev sandbox unavailable for ${this.id}`);
+ }
+ const res = await this.runner.dispatchToPlugin(this.id, `http://plugin/route/${routeName}`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${this.runner.invokeAuthToken}`,
+ },
+ body: JSON.stringify({ input, request }),
+ });
+ if (!res.ok) {
+ const text = await res.text();
+ throw new Error(`Plugin ${this.id} route ${routeName} failed: ${text}`);
+ }
+ return res.json();
+ }
+
+ async terminate(): Promise {
+ // Drop this plugin from the runner so marketplace update/uninstall
+ // actually removes it from the dev sandbox.
+ await this.runner.unloadPlugin(this.id);
+ }
+}
diff --git a/packages/workerd/src/sandbox/index.ts b/packages/workerd/src/sandbox/index.ts
new file mode 100644
index 000000000..306ae67b2
--- /dev/null
+++ b/packages/workerd/src/sandbox/index.ts
@@ -0,0 +1,3 @@
+export { WorkerdSandboxRunner, createSandboxRunner } from "./runner.js";
+export { MiniflareDevRunner } from "./dev-runner.js";
+export { createBridgeHandler } from "./bridge-handler.js";
diff --git a/packages/workerd/src/sandbox/runner.ts b/packages/workerd/src/sandbox/runner.ts
new file mode 100644
index 000000000..d1f9b1d9f
--- /dev/null
+++ b/packages/workerd/src/sandbox/runner.ts
@@ -0,0 +1,831 @@
+/**
+ * Workerd Sandbox Runner
+ *
+ * Implements the SandboxRunner interface for Node.js deployments using
+ * workerd as a sidecar process. Plugins run in isolated V8 isolates
+ * with capability-scoped access to EmDash APIs.
+ *
+ * Architecture:
+ * - Node spawns workerd with a generated capnp config
+ * - Each plugin is a nanoservice with its own internal port
+ * - Plugins communicate with Node via a backing service HTTP server
+ * - Node invokes plugin hooks/routes via HTTP to the plugin's port
+ * - Plugins call back to Node for content/media/KV/email operations
+ *
+ * The backing service HTTP server runs in the Node process and handles
+ * authenticated requests from plugins. Each plugin receives a unique
+ * auth token that encodes its ID and capabilities.
+ */
+
+import { execFileSync, spawn } from "node:child_process";
+import type { ChildProcess } from "node:child_process";
+import { createHmac, randomBytes, timingSafeEqual } from "node:crypto";
+import { writeFile, mkdir, rm } from "node:fs/promises";
+import { createServer } from "node:http";
+import type { Server } from "node:http";
+import { createRequire } from "node:module";
+import { tmpdir } from "node:os";
+import { join } from "node:path";
+
+import type {
+ SandboxRunner,
+ SandboxedPlugin,
+ SandboxEmailSendCallback,
+ SandboxOptions,
+ SandboxRunnerFactory,
+ SerializedRequest,
+} from "emdash";
+import type { PluginManifest } from "emdash";
+// @ts-ignore -- SandboxUnavailableError is a class export, not type-only
+import { SandboxUnavailableError } from "emdash";
+
+import { createBackingServiceHandler } from "./backing-service.js";
+import { generateCapnpConfig } from "./capnp.js";
+import { MiniflareDevRunner } from "./dev-runner.js";
+import { generatePluginWrapper } from "./wrapper.js";
+
+const SAFE_ID_RE = /[^a-z0-9_-]/gi;
+
+/**
+ * Default resource limits for sandboxed plugins.
+ * Matches Cloudflare production limits.
+ */
+const DEFAULT_LIMITS = {
+ cpuMs: 50,
+ memoryMb: 128,
+ subrequests: 10,
+ wallTimeMs: 30_000,
+} as const;
+
+/**
+ * Resolved resource limits with defaults applied.
+ */
+interface ResolvedLimits {
+ cpuMs: number;
+ memoryMb: number;
+ subrequests: number;
+ wallTimeMs: number;
+}
+
+function resolveLimits(limits?: SandboxOptions["limits"]): ResolvedLimits {
+ return {
+ cpuMs: limits?.cpuMs ?? DEFAULT_LIMITS.cpuMs,
+ memoryMb: limits?.memoryMb ?? DEFAULT_LIMITS.memoryMb,
+ subrequests: limits?.subrequests ?? DEFAULT_LIMITS.subrequests,
+ wallTimeMs: limits?.wallTimeMs ?? DEFAULT_LIMITS.wallTimeMs,
+ };
+}
+
+/**
+ * State for a loaded plugin in the workerd process.
+ */
+interface LoadedPlugin {
+ manifest: PluginManifest;
+ code: string;
+ /** Port the plugin's nanoservice listens on inside workerd */
+ port: number;
+ /** Auth token for this plugin's backing service requests */
+ token: string;
+}
+
+/**
+ * Workerd sandbox runner for Node.js deployments.
+ *
+ * Manages a workerd child process and a backing service HTTP server.
+ * Plugins are added/removed by regenerating the capnp config and
+ * restarting workerd (millisecond cold start).
+ */
+export class WorkerdSandboxRunner implements SandboxRunner {
+ private options: SandboxOptions;
+ private limits: ResolvedLimits;
+ private siteInfo?: { name: string; url: string; locale: string };
+
+ /** Loaded plugins indexed by pluginId (manifest.id:manifest.version) */
+ private plugins = new Map();
+
+ /** Backing service HTTP server (runs in Node) */
+ private backingServer: Server | null = null;
+ private backingPort = 0;
+
+ /** workerd child process */
+ private workerdProcess: ChildProcess | null = null;
+
+ /** Master secret for generating per-plugin auth tokens */
+ private masterSecret = randomBytes(32).toString("hex");
+
+ /**
+ * Per-startup token the runner sends on every hook/route invocation
+ * to its plugins. Plugins reject requests without this token, which
+ * prevents same-host attackers from invoking plugin hooks directly
+ * via the per-plugin TCP listener on 127.0.0.1.
+ */
+ private invokeToken = randomBytes(32).toString("hex");
+
+ /** Temporary directory for capnp config and plugin code files */
+ private configDir: string | null = null;
+
+ /** Email send callback, wired from EmailPipeline */
+ private emailSendCallback: SandboxEmailSendCallback | null = null;
+
+ /** Epoch counter, incremented on each workerd restart */
+ private epoch = 0;
+
+ /** Next available port for plugin nanoservices */
+ private nextPluginPort = 18788;
+
+ /** Whether workerd is currently healthy */
+ private healthy = false;
+
+ /** Whether workerd needs to be (re)started before next invocation */
+ private needsRestart = false;
+
+ /** Serializes concurrent ensureRunning() calls */
+ private startupPromise: Promise | null = null;
+
+ /** Crash restart state */
+ private crashCount = 0;
+ private crashWindowStart = 0;
+ private restartTimer: ReturnType | null = null;
+ private shuttingDown = false;
+
+ /**
+ * True when stopWorkerd() is intentionally tearing down the child
+ * (e.g., on intentional restart() to reload plugins). The exit handler
+ * uses this to skip crash recovery for intentional stops, otherwise
+ * every plugin reload would trigger a phantom crash-restart cycle.
+ */
+ private intentionalStop = false;
+
+ /** SIGTERM handler for clean shutdown */
+ private sigHandler: (() => void) | null = null;
+
+ constructor(options: SandboxOptions) {
+ this.options = options;
+ this.limits = resolveLimits(options.limits);
+ this.siteInfo = options.siteInfo;
+ this.emailSendCallback = options.emailSend ?? null;
+
+ // Warn about unenforceable resource limits. Standalone workerd
+ // only supports wall-time enforcement on the Node path (via
+ // Promise.race). cpuMs, memoryMb, and subrequests are Cloudflare
+ // platform features and are not enforced here.
+ if (
+ options.limits &&
+ (options.limits.cpuMs !== undefined ||
+ options.limits.memoryMb !== undefined ||
+ options.limits.subrequests !== undefined)
+ ) {
+ console.warn(
+ "[emdash:workerd] cpuMs, memoryMb, and subrequests limits are not enforced " +
+ "by standalone workerd. Only wallTimeMs is enforced on the Node path. " +
+ "For full resource isolation, deploy on Cloudflare Workers.",
+ );
+ }
+
+ // Forward SIGTERM to workerd child for clean shutdown
+ this.sigHandler = () => {
+ this.shuttingDown = true;
+ void this.terminateAll();
+ };
+ process.on("SIGTERM", this.sigHandler);
+ }
+
+ /**
+ * Check if workerd is available on this system.
+ */
+ isAvailable(): boolean {
+ try {
+ const bin = this.resolveWorkerdBinary();
+ // execFileSync (not execSync) so paths with spaces or shell
+ // metacharacters are passed verbatim, not shell-split.
+ execFileSync(bin, ["--version"], { stdio: "ignore", timeout: 5000 });
+ return true;
+ } catch {
+ return false;
+ }
+ }
+
+ /**
+ * Resolve the workerd binary path from node_modules.
+ * Avoids npx which can download binaries at runtime (supply chain risk).
+ */
+ private resolveWorkerdBinary(): string {
+ try {
+ // workerd package: main is lib/main.js, bin is bin/workerd
+ const esmRequire = createRequire(import.meta.url);
+ const workerdMain = esmRequire.resolve("workerd");
+ // workerdMain = .../node_modules/workerd/lib/main.js
+ // binary = .../node_modules/workerd/bin/workerd
+ const pkgDir = join(workerdMain, "..", "..");
+ return join(pkgDir, "bin", "workerd");
+ } catch {
+ // Fallback: try workerd on PATH
+ return "workerd";
+ }
+ }
+
+ /**
+ * Check if the workerd process is currently healthy.
+ *
+ * Returns false when needsRestart is set (process not yet started or
+ * needs to be restarted), since callers using this for monitoring or
+ * external health checks expect "running and serving requests".
+ *
+ * Internal callers that just want to defer-then-invoke should use
+ * ensureRunning() instead, which handles the deferred startup.
+ */
+ isHealthy(): boolean {
+ if (this.needsRestart) return false;
+ return this.healthy && this.workerdProcess !== null && !this.workerdProcess.killed;
+ }
+
+ /**
+ * Ensure workerd is running. Called before first invocation.
+ * Batches plugin loading: all plugins are registered via load(),
+ * then workerd starts once on the first hook/route call.
+ */
+ async ensureRunning(): Promise {
+ // If a startup is already in progress, wait for it
+ if (this.startupPromise) {
+ await this.startupPromise;
+ return;
+ }
+ if (!this.needsRestart) return;
+
+ // Serialize: concurrent callers await the same promise.
+ // Don't clear needsRestart until startup succeeds, so a transient
+ // failure (waitForReady timeout, spawn error) can be retried by
+ // the next invocation.
+ this.startupPromise = this.restart();
+ try {
+ await this.startupPromise;
+ this.needsRestart = false;
+ } finally {
+ // Always clear startupPromise so a failed start doesn't block
+ // subsequent retries. needsRestart stays true on failure (set above
+ // only after the await succeeds), enabling automatic retry.
+ this.startupPromise = null;
+ }
+ }
+
+ /**
+ * Set the email send callback for sandboxed plugins.
+ */
+ setEmailSend(callback: SandboxEmailSendCallback | null): void {
+ this.emailSendCallback = callback;
+ }
+
+ /**
+ * Load a sandboxed plugin.
+ *
+ * Adds the plugin to the configuration and restarts workerd
+ * to pick up the new nanoservice.
+ */
+ async load(manifest: PluginManifest, code: string): Promise {
+ const pluginId = `${manifest.id}:${manifest.version}`;
+
+ // Return cached plugin if already loaded
+ const existing = this.plugins.get(pluginId);
+ if (existing) {
+ return new WorkerdSandboxedPlugin(pluginId, manifest, existing.port, this.limits, this);
+ }
+
+ // Assign port and generate auth token
+ const port = this.nextPluginPort++;
+ const token = this.generatePluginToken(manifest);
+
+ this.plugins.set(pluginId, { manifest, code, port, token });
+
+ // Defer workerd start: collect all plugins first, start once.
+ // The runtime loads plugins sequentially, so we batch by deferring
+ // the actual workerd spawn until the first hook/route invocation.
+ this.needsRestart = true;
+
+ return new WorkerdSandboxedPlugin(pluginId, manifest, port, this.limits, this);
+ }
+
+ /**
+ * Unload a single plugin (called from WorkerdSandboxedPlugin.terminate()).
+ *
+ * Removes the plugin from the in-memory map and marks needsRestart so
+ * the next invocation rebuilds workerd without it. We don't restart
+ * eagerly here because update/uninstall flows often unload immediately
+ * before loading the new version, and back-to-back restarts are wasteful.
+ */
+ unloadPlugin(pluginId: string): void {
+ if (this.plugins.delete(pluginId)) {
+ // Mark for restart so the next load() or invocation regenerates
+ // the capnp config without this plugin's port/listener.
+ this.needsRestart = true;
+ }
+ }
+
+ /**
+ * Terminate all loaded plugins and shut down workerd.
+ */
+ async terminateAll(): Promise {
+ this.shuttingDown = true;
+ if (this.restartTimer) {
+ clearTimeout(this.restartTimer);
+ this.restartTimer = null;
+ }
+ if (this.sigHandler) {
+ process.removeListener("SIGTERM", this.sigHandler);
+ this.sigHandler = null;
+ }
+ this.plugins.clear();
+ await this.stopWorkerd();
+ await this.stopBackingServer();
+ if (this.configDir) {
+ await rm(this.configDir, { recursive: true, force: true }).catch(() => {});
+ this.configDir = null;
+ }
+ }
+
+ /**
+ * Schedule a restart with exponential backoff.
+ * Backoff: 1s, 2s, 4s, cap at 30s.
+ * Gives up after 5 failures within 60 seconds.
+ */
+ private scheduleRestart(): void {
+ if (this.shuttingDown || this.plugins.size === 0) return;
+
+ const now = Date.now();
+
+ // Reset crash window if it's been more than 60 seconds
+ if (now - this.crashWindowStart > 60_000) {
+ this.crashCount = 0;
+ this.crashWindowStart = now;
+ }
+
+ this.crashCount++;
+
+ if (this.crashCount > 5) {
+ console.error(
+ "[emdash:workerd] workerd crashed 5 times in 60 seconds, giving up. " +
+ "Plugins will run unsandboxed. Restart the server to retry.",
+ );
+ return;
+ }
+
+ // Exponential backoff: 1s, 2s, 4s, 8s, 16s, capped at 30s
+ const delayMs = Math.min(1000 * 2 ** (this.crashCount - 1), 30_000);
+ console.warn(`[emdash:workerd] restarting in ${delayMs}ms (attempt ${this.crashCount}/5)`);
+
+ this.restartTimer = setTimeout(() => {
+ this.restartTimer = null;
+ // Just mark as needing restart. The next plugin invocation will
+ // drive the actual restart through ensureRunning(), which serializes
+ // concurrent attempts via startupPromise. We don't call ensureRunning()
+ // here because that would race with plugin-invocation-driven calls
+ // (the finally block clears startupPromise so a second concurrent
+ // caller could enter restart() while the first is still running).
+ //
+ // If no plugin invocations happen after a crash, there's nothing
+ // to recover for, so deferring restart until next use is fine.
+ this.needsRestart = true;
+ }, delayMs);
+ }
+
+ /**
+ * Generate a per-plugin auth token.
+ * Encodes pluginId and capabilities for server-side validation.
+ */
+ private generatePluginToken(manifest: PluginManifest): string {
+ const payload = JSON.stringify({
+ pluginId: manifest.id,
+ version: manifest.version,
+ capabilities: manifest.capabilities || [],
+ allowedHosts: manifest.allowedHosts || [],
+ storageCollections: Object.keys(manifest.storage || {}),
+ });
+ const payloadB64 = Buffer.from(payload).toString("base64url");
+ const hmac = createHmac("sha256", this.masterSecret).update(payload).digest("base64url");
+ return `${payloadB64}.${hmac}`;
+ }
+
+ /**
+ * Validate a plugin auth token and extract its claims.
+ * Returns null if invalid.
+ */
+ validateToken(token: string): {
+ pluginId: string;
+ version: string;
+ capabilities: string[];
+ allowedHosts: string[];
+ storageCollections: string[];
+ } | null {
+ const parts = token.split(".");
+ if (parts.length !== 2) return null;
+
+ const [payloadB64, hmacB64] = parts;
+ if (!payloadB64 || !hmacB64) return null;
+
+ const payload = Buffer.from(payloadB64, "base64url").toString();
+ const expectedHmac = createHmac("sha256", this.masterSecret)
+ .update(payload)
+ .digest("base64url");
+
+ // Constant-time comparison to prevent timing side channels
+ const a = Buffer.from(hmacB64);
+ const b = Buffer.from(expectedHmac);
+ if (a.length !== b.length || !timingSafeEqual(a, b)) return null;
+
+ try {
+ return JSON.parse(payload) as {
+ pluginId: string;
+ version: string;
+ capabilities: string[];
+ allowedHosts: string[];
+ storageCollections: string[];
+ };
+ } catch {
+ return null;
+ }
+ }
+
+ /**
+ * Start or restart workerd with current plugin configuration.
+ */
+ private async restart(): Promise {
+ await this.stopWorkerd();
+
+ // Ensure backing server is running
+ if (!this.backingServer) {
+ await this.startBackingServer();
+ }
+
+ // Create temp directory for config files
+ if (!this.configDir) {
+ this.configDir = join(tmpdir(), `emdash-workerd-${process.pid}-${Date.now()}`);
+ await mkdir(this.configDir, { recursive: true });
+ }
+
+ // Write plugin code files to disk (workerd needs file paths)
+ for (const [pluginId, plugin] of this.plugins) {
+ const safeId = pluginId.replace(SAFE_ID_RE, "_");
+ const wrapperCode = generatePluginWrapper(plugin.manifest, {
+ site: this.siteInfo,
+ backingServiceUrl: `http://127.0.0.1:${this.backingPort}`,
+ authToken: plugin.token,
+ invokeToken: this.invokeToken,
+ });
+ await writeFile(join(this.configDir, `${safeId}-wrapper.js`), wrapperCode);
+ await writeFile(join(this.configDir, `${safeId}-plugin.js`), plugin.code);
+ }
+
+ // Generate capnp config. Note: cpuMs/memoryMb/subrequests from
+ // this.limits are NOT passed here because standalone workerd doesn't
+ // support per-worker enforcement of those limits (Cloudflare-only).
+ // Only wallTimeMs is enforced (via Promise.race in invokeHook/invokeRoute).
+ const capnpConfig = generateCapnpConfig({
+ plugins: this.plugins,
+ backingServiceUrl: `http://127.0.0.1:${this.backingPort}`,
+ configDir: this.configDir,
+ });
+
+ const configPath = join(this.configDir, "workerd.capnp");
+ await writeFile(configPath, capnpConfig);
+
+ // Spawn workerd using resolved binary (not npx)
+ const workerdBin = this.resolveWorkerdBinary();
+ this.workerdProcess = spawn(workerdBin, ["serve", configPath], {
+ stdio: ["ignore", "pipe", "pipe"],
+ env: { ...process.env },
+ });
+
+ this.epoch++;
+
+ // Drain stdout/stderr to prevent pipe buffer deadlock
+ this.workerdProcess.stdout?.on("data", (chunk: Buffer) => {
+ process.stdout.write(`[emdash:workerd] ${chunk.toString()}`);
+ });
+ this.workerdProcess.stderr?.on("data", (chunk: Buffer) => {
+ process.stderr.write(`[emdash:workerd] ${chunk.toString()}`);
+ });
+
+ // Handle workerd exit with auto-restart on crash
+ this.workerdProcess.on("exit", (code, signal) => {
+ this.healthy = false;
+ this.workerdProcess = null;
+ if (this.shuttingDown) return;
+ // Skip crash recovery for intentional stops (e.g., reload via
+ // stopWorkerd() during restart()). Reset the flag so the next
+ // exit, if it happens unexpectedly, is treated as a real crash.
+ if (this.intentionalStop) {
+ this.intentionalStop = false;
+ return;
+ }
+ // Restart on non-zero exit code OR signal-based termination (OOM, kill)
+ if ((code !== 0 && code !== null) || signal) {
+ const reason = signal ? `signal ${signal}` : `code ${code}`;
+ console.error(`[emdash:workerd] workerd exited with ${reason}`);
+ this.scheduleRestart();
+ }
+ });
+
+ // Wait for workerd to be ready
+ await this.waitForReady();
+ this.healthy = true;
+ }
+
+ /**
+ * Wait for workerd to be ready by polling plugin ports.
+ */
+ private async waitForReady(): Promise {
+ const startTime = Date.now();
+ const timeout = 10_000;
+
+ while (Date.now() - startTime < timeout) {
+ try {
+ // Try to reach the first plugin
+ const firstPlugin = this.plugins.values().next().value;
+ if (!firstPlugin) {
+ this.healthy = true;
+ return;
+ }
+ // Send the invoke token: the wrapper rejects every request
+ // without it. We hit /__health which the wrapper doesn't define
+ // (so we expect 404 from a healthy worker), but we still need
+ // to get past the auth check first or we'd see 401.
+ const res = await fetch(`http://127.0.0.1:${firstPlugin.port}/__health`, {
+ signal: AbortSignal.timeout(1000),
+ headers: { Authorization: `Bearer ${this.invokeToken}` },
+ });
+ // Any response from the worker (404 from unknown route, or
+ // any 2xx) means workerd is up and serving requests. 401
+ // would mean the worker is up but rejecting our auth, which
+ // shouldn't happen since we're sending the right token.
+ if (res.status === 404 || res.ok) {
+ return;
+ }
+ } catch {
+ // Not ready yet
+ }
+ await new Promise((r) => setTimeout(r, 100));
+ }
+
+ throw new Error("[emdash:workerd] workerd failed to start within 10 seconds");
+ }
+
+ /**
+ * Stop the workerd child process.
+ *
+ * Marks the stop as intentional so the exit handler in restart() does
+ * not interpret it as a crash and trigger scheduleRestart(). Without
+ * this, every intentional reload (plugin install/uninstall) would
+ * cascade into a phantom crash-restart cycle.
+ */
+ private async stopWorkerd(): Promise {
+ if (!this.workerdProcess) return;
+ this.healthy = false;
+ this.intentionalStop = true;
+
+ const proc = this.workerdProcess;
+ this.workerdProcess = null;
+
+ // Fast path: process already exited (exitCode is set after exit)
+ if (proc.exitCode !== null) {
+ return;
+ }
+
+ return new Promise((resolve) => {
+ let exited = false;
+ proc.on("exit", () => {
+ exited = true;
+ resolve();
+ });
+ proc.kill("SIGTERM");
+ // Force kill after 5 seconds if SIGTERM was ignored.
+ // Use the local `exited` flag (not proc.killed, which flips
+ // to true as soon as a signal is queued, not when the process
+ // actually exits).
+ setTimeout(() => {
+ if (!exited) {
+ proc.kill("SIGKILL");
+ }
+ }, 5000);
+ });
+ }
+
+ /**
+ * Start the backing service HTTP server.
+ */
+ private async startBackingServer(): Promise {
+ const handler = createBackingServiceHandler(this);
+
+ return new Promise((resolve, reject) => {
+ this.backingServer = createServer(handler);
+ // Bind to localhost only (not 0.0.0.0)
+ this.backingServer.listen(0, "127.0.0.1", () => {
+ const addr = this.backingServer!.address();
+ if (addr && typeof addr === "object") {
+ this.backingPort = addr.port;
+ }
+ resolve();
+ });
+ this.backingServer.on("error", reject);
+ });
+ }
+
+ /**
+ * Stop the backing service HTTP server.
+ */
+ private async stopBackingServer(): Promise {
+ if (!this.backingServer) return;
+ return new Promise((resolve) => {
+ this.backingServer!.close(() => resolve());
+ this.backingServer = null;
+ });
+ }
+
+ /** Get the database for backing service operations */
+ get db() {
+ return this.options.db;
+ }
+
+ /** Get the email send callback */
+ get emailSend() {
+ return this.emailSendCallback;
+ }
+
+ /** Get the media storage adapter */
+ get mediaStorage() {
+ return this.options.mediaStorage ?? null;
+ }
+
+ /** Get the per-startup invoke token (sent on hook/route requests to plugins) */
+ get invokeAuthToken() {
+ return this.invokeToken;
+ }
+
+ /**
+ * Look up the storage config (with indexes) for a specific plugin version.
+ * The plugins map is keyed by `${id}:${version}`. Looking up by id alone
+ * could return a stale version's storage schema after a plugin upgrade,
+ * so we require both id and version.
+ */
+ getPluginStorageConfig(pluginId: string, version: string): Record | undefined {
+ const plugin = this.plugins.get(`${pluginId}:${version}`);
+ if (plugin) {
+ return plugin.manifest.storage as Record | undefined;
+ }
+ return undefined;
+ }
+
+ /** Get the current epoch (incremented on each workerd restart) */
+ get currentEpoch() {
+ return this.epoch;
+ }
+}
+
+/**
+ * A plugin running in a workerd V8 isolate.
+ */
+class WorkerdSandboxedPlugin implements SandboxedPlugin {
+ readonly id: string;
+ private manifest: PluginManifest;
+ private port: number;
+ private limits: ResolvedLimits;
+ private runner: WorkerdSandboxRunner;
+ constructor(
+ id: string,
+ manifest: PluginManifest,
+ port: number,
+ limits: ResolvedLimits,
+ runner: WorkerdSandboxRunner,
+ ) {
+ this.id = id;
+ this.manifest = manifest;
+ this.port = port;
+ this.limits = limits;
+ this.runner = runner;
+ }
+
+ /**
+ * Ensure workerd is running before invoking a hook or route.
+ * On first call, this triggers deferred workerd startup (batching
+ * all plugins registered via load() into a single workerd start).
+ */
+ private async ensureReady(): Promise {
+ await this.runner.ensureRunning();
+ if (!this.runner.isHealthy()) {
+ throw new SandboxUnavailableError(this.id, "workerd is not running");
+ }
+ }
+
+ /**
+ * Invoke a hook in the sandboxed plugin via HTTP.
+ */
+ async invokeHook(hookName: string, event: unknown): Promise {
+ await this.ensureReady();
+ return this.withWallTimeLimit(`hook:${hookName}`, async () => {
+ const res = await fetch(`http://127.0.0.1:${this.port}/hook/${hookName}`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${this.runner.invokeAuthToken}`,
+ },
+ body: JSON.stringify({ event }),
+ });
+ if (!res.ok) {
+ const text = await res.text();
+ throw new Error(`Plugin ${this.id} hook ${hookName} failed: ${text}`);
+ }
+ const result = (await res.json()) as { value: unknown };
+ return result.value;
+ });
+ }
+
+ /**
+ * Invoke an API route in the sandboxed plugin via HTTP.
+ */
+ async invokeRoute(
+ routeName: string,
+ input: unknown,
+ request: SerializedRequest,
+ ): Promise {
+ await this.ensureReady();
+ return this.withWallTimeLimit(`route:${routeName}`, async () => {
+ const res = await fetch(`http://127.0.0.1:${this.port}/route/${routeName}`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${this.runner.invokeAuthToken}`,
+ },
+ body: JSON.stringify({ input, request }),
+ });
+ if (!res.ok) {
+ const text = await res.text();
+ throw new Error(`Plugin ${this.id} route ${routeName} failed: ${text}`);
+ }
+ return res.json();
+ });
+ }
+
+ /**
+ * Terminate the sandboxed plugin.
+ *
+ * Removes this plugin from the runner's plugins map and marks
+ * needsRestart so the next load/invocation rebuilds workerd without
+ * its listener. Without this, marketplace update/uninstall would
+ * leak old plugin entries (and their ports) until full server restart.
+ */
+ async terminate(): Promise {
+ this.runner.unloadPlugin(this.id);
+ }
+
+ /**
+ * Enforce wall-time limit on an operation.
+ */
+ private async withWallTimeLimit(operation: string, fn: () => Promise): Promise {
+ const wallTimeMs = this.limits.wallTimeMs;
+ let timer: ReturnType | undefined;
+
+ const timeout = new Promise((_, reject) => {
+ timer = setTimeout(() => {
+ reject(
+ new Error(
+ `Plugin ${this.manifest.id} exceeded wall-time limit of ${wallTimeMs}ms during ${operation}`,
+ ),
+ );
+ }, wallTimeMs);
+ });
+
+ try {
+ return await Promise.race([fn(), timeout]);
+ } finally {
+ if (timer !== undefined) clearTimeout(timer);
+ }
+ }
+}
+
+/**
+ * Factory function for creating the workerd sandbox runner.
+ *
+ * Selects MiniflareDevRunner only when explicitly in development mode
+ * (NODE_ENV === "development"). Any other value — including unset (which
+ * is the default for `node server.js` and `astro preview` on self-hosted
+ * deployments) — uses the production WorkerdSandboxRunner.
+ *
+ * The dev runner skips production hardening (wall-time wrapper, child
+ * process supervision, crash/restart with backoff), so falling back to
+ * it silently in production would be a security regression.
+ *
+ * Operators who want the dev runner explicitly should set NODE_ENV=development.
+ */
+export const createSandboxRunner: SandboxRunnerFactory = (options) => {
+ const isDev = process.env.NODE_ENV === "development";
+
+ if (isDev) {
+ // MiniflareDevRunner is statically imported (no miniflare dependency
+ // at this point — dev-runner only imports miniflare dynamically inside
+ // rebuild()). isAvailable() does the actual miniflare resolution check.
+ const devRunner = new MiniflareDevRunner(options);
+ if (devRunner.isAvailable()) {
+ return devRunner;
+ }
+ }
+
+ return new WorkerdSandboxRunner(options);
+};
diff --git a/packages/workerd/src/sandbox/wrapper.ts b/packages/workerd/src/sandbox/wrapper.ts
new file mode 100644
index 000000000..4fda89e16
--- /dev/null
+++ b/packages/workerd/src/sandbox/wrapper.ts
@@ -0,0 +1,414 @@
+/**
+ * Plugin Wrapper Generator for workerd
+ *
+ * Generates the code that wraps a plugin to run in a workerd isolate.
+ * Unlike the Cloudflare wrapper which uses RPC via service bindings,
+ * this wrapper uses HTTP fetch to call the Node backing service.
+ *
+ * The wrapper:
+ * - Imports plugin hooks and routes from "sandbox-plugin.js"
+ * - Creates plugin context that proxies operations via HTTP to the backing service
+ * - Exposes an HTTP fetch handler for hook/route invocation
+ */
+
+import type { PluginManifest } from "emdash";
+
+const TRAILING_SLASH_RE = /\/$/;
+const NEWLINE_RE = /[\n\r]/g;
+const COMMENT_CLOSE_RE = /\*\//g;
+
+export interface WrapperOptions {
+ site?: { name: string; url: string; locale: string };
+ /** URL of the Node backing service (e.g., http://127.0.0.1:18787) */
+ backingServiceUrl: string;
+ /** Auth token the plugin sends on outbound bridge calls to Node */
+ authToken: string;
+ /**
+ * Auth token the Node runner must send on inbound hook/route invocations.
+ * Prevents same-host attackers from invoking plugin hooks directly via
+ * the per-plugin TCP listener (which is exposed on 127.0.0.1).
+ */
+ invokeToken: string;
+}
+
+export function generatePluginWrapper(manifest: PluginManifest, options: WrapperOptions): string {
+ const site = options.site ?? { name: "", url: "", locale: "en" };
+ const hasReadUsers = manifest.capabilities.includes("read:users");
+ const hasEmailSend = manifest.capabilities.includes("email:send");
+
+ return `
+// =============================================================================
+// Sandboxed Plugin Wrapper (workerd)
+// Generated by @emdash-cms/workerd
+// Plugin: ${sanitizeComment(manifest.id)}@${sanitizeComment(manifest.version)}
+// =============================================================================
+
+import pluginModule from "sandbox-plugin.js";
+
+const hooks = pluginModule?.hooks || pluginModule?.default?.hooks || {};
+const routes = pluginModule?.routes || pluginModule?.default?.routes || {};
+
+const BACKING_URL = ${JSON.stringify(options.backingServiceUrl)};
+const AUTH_TOKEN = ${JSON.stringify(options.authToken)};
+const INVOKE_TOKEN = ${JSON.stringify(options.invokeToken)};
+
+// -----------------------------------------------------------------------------
+// Bridge - HTTP calls to Node backing service
+// -----------------------------------------------------------------------------
+
+async function bridgeCall(method, body) {
+ const res = await fetch(BACKING_URL + "/" + method, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + AUTH_TOKEN,
+ },
+ body: JSON.stringify(body),
+ });
+ if (!res.ok) {
+ const text = await res.text();
+ throw new Error("Bridge call " + method + " failed: " + text);
+ }
+ const data = await res.json();
+ return data.result;
+}
+
+// -----------------------------------------------------------------------------
+// Context Factory
+// -----------------------------------------------------------------------------
+
+function createContext() {
+ const kv = {
+ get: (key) => bridgeCall("kv/get", { key }),
+ set: (key, value) => bridgeCall("kv/set", { key, value }),
+ delete: (key) => bridgeCall("kv/delete", { key }),
+ list: (prefix) => bridgeCall("kv/list", { prefix }),
+ };
+
+ function createStorageCollection(collectionName) {
+ return {
+ get: (id) => bridgeCall("storage/get", { collection: collectionName, id }),
+ put: (id, data) => bridgeCall("storage/put", { collection: collectionName, id, data }),
+ delete: (id) => bridgeCall("storage/delete", { collection: collectionName, id }),
+ exists: async (id) => (await bridgeCall("storage/get", { collection: collectionName, id })) !== null,
+ query: (opts) => bridgeCall("storage/query", { collection: collectionName, ...opts }),
+ count: (where) => bridgeCall("storage/count", { collection: collectionName, where }),
+ getMany: async (ids) => {
+ // Bridge returns a list of [id, data] pairs (not a plain object)
+ // so special IDs like "__proto__" survive transport. Convert
+ // back to Map to match StorageCollection.getMany() contract.
+ const entries = await bridgeCall("storage/getMany", { collection: collectionName, ids });
+ return new Map(entries || []);
+ },
+ putMany: (items) => bridgeCall("storage/putMany", { collection: collectionName, items }),
+ deleteMany: (ids) => bridgeCall("storage/deleteMany", { collection: collectionName, ids }),
+ };
+ }
+
+ const storage = new Proxy({}, {
+ get(_, collectionName) {
+ if (typeof collectionName !== "string") return undefined;
+ return createStorageCollection(collectionName);
+ }
+ });
+
+ const content = {
+ get: (collection, id) => bridgeCall("content/get", { collection, id }),
+ list: (collection, opts) => bridgeCall("content/list", { collection, ...opts }),
+ create: (collection, data) => bridgeCall("content/create", { collection, data }),
+ update: (collection, id, data) => bridgeCall("content/update", { collection, id, data }),
+ delete: (collection, id) => bridgeCall("content/delete", { collection, id }),
+ };
+
+ const media = {
+ get: (id) => bridgeCall("media/get", { id }),
+ list: (opts) => bridgeCall("media/list", opts || {}),
+ upload: (filename, contentType, bytes) => {
+ // Convert any binary input into a Uint8Array view pointing at the
+ // SAME underlying bytes (not reinterpreted). For ArrayBufferView
+ // inputs (Uint16Array, Int32Array, DataView, etc.) we must use
+ // the view's buffer + byteOffset + byteLength so we don't
+ // reinterpret element-typed values as bytes and corrupt the file.
+ let view;
+ if (bytes instanceof Uint8Array) {
+ view = bytes;
+ } else if (bytes instanceof ArrayBuffer) {
+ view = new Uint8Array(bytes);
+ } else if (ArrayBuffer.isView(bytes)) {
+ view = new Uint8Array(bytes.buffer, bytes.byteOffset, bytes.byteLength);
+ } else {
+ throw new TypeError("media.upload: bytes must be ArrayBuffer or ArrayBufferView");
+ }
+ return bridgeCall("media/upload", {
+ filename,
+ contentType,
+ bytes: Array.from(view),
+ });
+ },
+ getUploadUrl: () => { throw new Error("getUploadUrl is not available in sandbox mode. Use media.upload() instead."); },
+ delete: (id) => bridgeCall("media/delete", { id }),
+ };
+
+ // Marshal a RequestInit into a JSON-safe shape so headers, body, and other
+ // fields survive transport over the bridge. The bridge handler reverses
+ // this in unmarshalRequestInit().
+ async function marshalRequestInit(init) {
+ if (!init) return undefined;
+ const out = {};
+ if (init.method) out.method = init.method;
+ if (init.redirect) out.redirect = init.redirect;
+ // Headers: serialize as a list of [name, value] pairs so multi-value
+ // headers (Set-Cookie etc.) survive round-trip. A plain object would
+ // collapse duplicate names.
+ if (init.headers) {
+ const headers = [];
+ if (init.headers instanceof Headers) {
+ init.headers.forEach((v, k) => { headers.push([k, v]); });
+ } else if (Array.isArray(init.headers)) {
+ for (const [k, v] of init.headers) headers.push([k, v]);
+ } else {
+ for (const [k, v] of Object.entries(init.headers)) {
+ headers.push([k, v]);
+ }
+ }
+ out.headers = headers;
+ }
+ // Helper: convert a Uint8Array view to base64, preserving offset/length
+ function viewToBase64(view) {
+ let binary = "";
+ for (let i = 0; i < view.length; i++) binary += String.fromCharCode(view[i]);
+ return btoa(binary);
+ }
+
+ // Helper: get a Uint8Array view from any binary input, respecting
+ // the original byteOffset and byteLength so we don't serialize the
+ // entire backing buffer for views like Uint8Array.subarray().
+ function toBytes(input) {
+ if (input instanceof Uint8Array) return input;
+ if (input instanceof ArrayBuffer) return new Uint8Array(input);
+ if (ArrayBuffer.isView(input)) {
+ // DataView, Int8Array, Float32Array, etc. — preserve the window
+ return new Uint8Array(input.buffer, input.byteOffset, input.byteLength);
+ }
+ // Should never reach here: callers gate with ArrayBuffer/isView checks.
+ // Throw loudly so unexpected body types surface as errors instead of
+ // silently dropping data.
+ throw new TypeError("toBytes: unsupported binary input type");
+ }
+
+ // Body: convert to base64 to preserve binary, or pass strings through
+ if (init.body !== undefined && init.body !== null) {
+ if (typeof init.body === "string") {
+ out.bodyType = "string";
+ out.body = init.body;
+ } else if (init.body instanceof ArrayBuffer || ArrayBuffer.isView(init.body)) {
+ out.bodyType = "base64";
+ out.body = viewToBase64(toBytes(init.body));
+ } else if (typeof Blob !== "undefined" && init.body instanceof Blob) {
+ // Blob/File (without going through FormData): read bytes and
+ // preserve content type if not already set
+ const bytes = new Uint8Array(await init.body.arrayBuffer());
+ out.bodyType = "base64";
+ out.body = viewToBase64(bytes);
+ if (init.body.type) {
+ out.headers = out.headers || {};
+ if (!out.headers["content-type"] && !out.headers["Content-Type"]) {
+ out.headers["content-type"] = init.body.type;
+ }
+ }
+ } else if (init.body instanceof FormData) {
+ // FormData: serialize entries as { name, value, filename? }
+ const parts = [];
+ for (const [k, v] of init.body.entries()) {
+ if (typeof v === "string") {
+ parts.push({ name: k, value: v });
+ } else {
+ // File/Blob: read as base64
+ const bytes = new Uint8Array(await v.arrayBuffer());
+ parts.push({
+ name: k,
+ value: viewToBase64(bytes),
+ filename: v.name,
+ type: v.type,
+ isBlob: true,
+ });
+ }
+ }
+ out.bodyType = "formdata";
+ out.body = parts;
+ } else if (init.body instanceof URLSearchParams) {
+ out.bodyType = "string";
+ out.body = init.body.toString();
+ out.headers = out.headers || {};
+ if (!out.headers["content-type"] && !out.headers["Content-Type"]) {
+ out.headers["content-type"] = "application/x-www-form-urlencoded";
+ }
+ } else {
+ // Fall back to JSON for plain objects
+ out.bodyType = "string";
+ out.body = JSON.stringify(init.body);
+ }
+ }
+ return out;
+ }
+
+ const http = {
+ fetch: async (url, init) => {
+ const marshaledInit = await marshalRequestInit(init);
+ const result = await bridgeCall("http/fetch", { url, init: marshaledInit });
+ // Decode base64 body back to bytes to preserve binary content
+ // (images, audio, etc.) so arrayBuffer()/blob() work correctly.
+ const binaryString = atob(result.bodyBase64);
+ const bytes = new Uint8Array(binaryString.length);
+ for (let i = 0; i < binaryString.length; i++) {
+ bytes[i] = binaryString.charCodeAt(i);
+ }
+ return new Response(bytes, {
+ status: result.status,
+ statusText: result.statusText,
+ headers: result.headers,
+ });
+ }
+ };
+
+ const log = {
+ debug: (msg, data) => bridgeCall("log", { level: "debug", msg, data }),
+ info: (msg, data) => bridgeCall("log", { level: "info", msg, data }),
+ warn: (msg, data) => bridgeCall("log", { level: "warn", msg, data }),
+ error: (msg, data) => bridgeCall("log", { level: "error", msg, data }),
+ };
+
+ const site = ${JSON.stringify(site)};
+ const siteBaseUrl = ${JSON.stringify(site.url.replace(TRAILING_SLASH_RE, ""))};
+
+ function url(path) {
+ if (!path.startsWith("/")) {
+ throw new Error('URL path must start with "/", got: "' + path + '"');
+ }
+ if (path.startsWith("//")) {
+ throw new Error('URL path must not be protocol-relative, got: "' + path + '"');
+ }
+ return siteBaseUrl + path;
+ }
+
+ const users = ${hasReadUsers} ? {
+ get: (id) => bridgeCall("users/get", { id }),
+ getByEmail: (email) => bridgeCall("users/getByEmail", { email }),
+ list: (opts) => bridgeCall("users/list", opts || {}),
+ } : undefined;
+
+ const email = ${hasEmailSend} ? {
+ send: (message) => bridgeCall("email/send", { message }),
+ } : undefined;
+
+ return {
+ plugin: {
+ id: ${JSON.stringify(manifest.id)},
+ version: ${JSON.stringify(manifest.version || "0.0.0")},
+ },
+ storage,
+ kv,
+ content,
+ media,
+ http,
+ log,
+ site,
+ url,
+ users,
+ email,
+ };
+}
+
+// -----------------------------------------------------------------------------
+// HTTP Handler (replaces WorkerEntrypoint for workerd-on-Node)
+// -----------------------------------------------------------------------------
+
+// Constant-time string comparison. workerd doesn't expose
+// crypto.timingSafeEqual, so XOR char codes manually. Always processes
+// the full length of the longer input to avoid early-exit timing leaks.
+function constantTimeEqual(a, b) {
+ let result = a.length === b.length ? 0 : 1;
+ const len = Math.max(a.length, b.length);
+ for (let i = 0; i < len; i++) {
+ const ac = i < a.length ? a.charCodeAt(i) : 0;
+ const bc = i < b.length ? b.charCodeAt(i) : 0;
+ result |= ac ^ bc;
+ }
+ return result === 0;
+}
+
+export default {
+ async fetch(request) {
+ const url = new URL(request.url);
+
+ // Authenticate the caller. The plugin's TCP listener is exposed on
+ // 127.0.0.1, so any local process could otherwise invoke hooks/routes
+ // directly. Only the Node runner has the per-startup invoke token.
+ // Use constant-time comparison: workerd doesn't expose timingSafeEqual,
+ // so we XOR character codes manually. Same length always required.
+ const authHeader = request.headers.get("authorization") || "";
+ const expected = "Bearer " + INVOKE_TOKEN;
+ if (!constantTimeEqual(authHeader, expected)) {
+ return new Response("Unauthorized", { status: 401 });
+ }
+
+ // Hook invocation: POST /hook/{hookName}
+ if (url.pathname.startsWith("/hook/")) {
+ const hookName = url.pathname.slice(6); // Remove "/hook/"
+ const { event } = await request.json();
+ const ctx = createContext();
+
+ const hookDef = hooks[hookName];
+ if (!hookDef) {
+ return Response.json({ value: undefined });
+ }
+
+ const handler = typeof hookDef === "function" ? hookDef : hookDef.handler;
+ if (typeof handler !== "function") {
+ return new Response("Hook " + hookName + " handler is not a function", { status: 500 });
+ }
+
+ try {
+ const result = await handler(event, ctx);
+ return Response.json({ value: result });
+ } catch (err) {
+ return new Response(err.message || "Hook error", { status: 500 });
+ }
+ }
+
+ // Route invocation: POST /route/{routeName}
+ if (url.pathname.startsWith("/route/")) {
+ const routeName = url.pathname.slice(7); // Remove "/route/"
+ const { input, request: serializedRequest } = await request.json();
+ const ctx = createContext();
+
+ const route = routes[routeName];
+ if (!route) {
+ return new Response("Route not found: " + routeName, { status: 404 });
+ }
+
+ const handler = typeof route === "function" ? route : route.handler;
+ if (typeof handler !== "function") {
+ return new Response("Route " + routeName + " handler is not a function", { status: 500 });
+ }
+
+ try {
+ const result = await handler(
+ { input, request: serializedRequest, requestMeta: serializedRequest?.meta },
+ ctx,
+ );
+ return Response.json(result);
+ } catch (err) {
+ return new Response(err.message || "Route error", { status: 500 });
+ }
+ }
+
+ return new Response("Not found", { status: 404 });
+ }
+};
+`;
+}
+
+function sanitizeComment(s: string): string {
+ return s.replace(NEWLINE_RE, " ").replace(COMMENT_CLOSE_RE, "* /");
+}
diff --git a/packages/workerd/test/bridge-handler.test.ts b/packages/workerd/test/bridge-handler.test.ts
new file mode 100644
index 000000000..b7a64ffa9
--- /dev/null
+++ b/packages/workerd/test/bridge-handler.test.ts
@@ -0,0 +1,361 @@
+/**
+ * Bridge Handler Conformance Tests
+ *
+ * Tests the shared bridge handler that both the production (workerd)
+ * and dev (miniflare) runners use. This is the conformance test suite
+ * that ensures identical behavior across all sandbox runners.
+ *
+ * These tests exercise capability enforcement, KV isolation, and
+ * error handling at the bridge level.
+ */
+
+import Database from "better-sqlite3";
+import { Kysely, SqliteDialect } from "kysely";
+import { describe, it, expect, beforeEach, afterEach } from "vitest";
+
+import { createBridgeHandler } from "../src/sandbox/bridge-handler.js";
+
+// Set up an in-memory SQLite database with the minimum tables needed
+function createTestDb() {
+ const sqlite = new Database(":memory:");
+ const db = new Kysely({
+ dialect: new SqliteDialect({ database: sqlite }),
+ });
+ return { db, sqlite };
+}
+
+async function setupTables(db: Kysely) {
+ // Plugin storage table (used for both KV and document storage)
+ await db.schema
+ .createTable("_plugin_storage")
+ .addColumn("plugin_id", "text", (col) => col.notNull())
+ .addColumn("collection", "text", (col) => col.notNull())
+ .addColumn("id", "text", (col) => col.notNull())
+ .addColumn("data", "text", (col) => col.notNull())
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .addColumn("updated_at", "text", (col) => col.notNull())
+ .addPrimaryKeyConstraint("pk_plugin_storage", ["plugin_id", "collection", "id"])
+ .execute();
+
+ // Users table (matches migration 001)
+ await db.schema
+ .createTable("users")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("email", "text", (col) => col.notNull())
+ .addColumn("name", "text")
+ .addColumn("role", "integer", (col) => col.notNull())
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .execute();
+
+ // Insert a test user
+ await db
+ .insertInto("users" as any)
+ .values({
+ id: "user-1",
+ email: "test@example.com",
+ name: "Test User",
+ role: 50,
+ created_at: new Date().toISOString(),
+ })
+ .execute();
+}
+
+describe("Bridge Handler Conformance", () => {
+ let db: Kysely;
+ let sqlite: Database.Database;
+
+ beforeEach(async () => {
+ const ctx = createTestDb();
+ db = ctx.db;
+ sqlite = ctx.sqlite;
+ await setupTables(db);
+ });
+
+ afterEach(async () => {
+ await db.destroy();
+ sqlite.close();
+ });
+
+ function makeHandler(opts: {
+ capabilities?: string[];
+ allowedHosts?: string[];
+ storageCollections?: string[];
+ }) {
+ return createBridgeHandler({
+ pluginId: "test-plugin",
+ version: "1.0.0",
+ capabilities: opts.capabilities ?? [],
+ allowedHosts: opts.allowedHosts ?? [],
+ storageCollections: opts.storageCollections ?? [],
+ db,
+ emailSend: () => null,
+ });
+ }
+
+ async function call(
+ handler: ReturnType,
+ method: string,
+ body: Record = {},
+ ) {
+ const request = new Request(`http://bridge/${method}`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body),
+ });
+ const response = await handler(request);
+ return response.json() as Promise<{ result?: unknown; error?: string }>;
+ }
+
+ // ── KV Operations ────────────────────────────────────────────────────
+
+ describe("KV operations", () => {
+ it("set and get a value", async () => {
+ const handler = makeHandler({});
+ await call(handler, "kv/set", { key: "test", value: "hello" });
+ const result = await call(handler, "kv/get", { key: "test" });
+ expect(result.result).toBe("hello");
+ });
+
+ it("get returns null for non-existent key", async () => {
+ const handler = makeHandler({});
+ const result = await call(handler, "kv/get", { key: "missing" });
+ expect(result.result).toBeNull();
+ });
+
+ it("delete removes a key", async () => {
+ const handler = makeHandler({});
+ await call(handler, "kv/set", { key: "to-delete", value: "bye" });
+ await call(handler, "kv/delete", { key: "to-delete" });
+ const result = await call(handler, "kv/get", { key: "to-delete" });
+ expect(result.result).toBeNull();
+ });
+
+ it("list returns keys with prefix", async () => {
+ const handler = makeHandler({});
+ await call(handler, "kv/set", { key: "settings:theme", value: "dark" });
+ await call(handler, "kv/set", { key: "settings:lang", value: "en" });
+ await call(handler, "kv/set", { key: "state:count", value: 42 });
+
+ const result = await call(handler, "kv/list", { prefix: "settings:" });
+ const items = result.result as Array<{ key: string; value: unknown }>;
+ expect(items).toHaveLength(2);
+ expect(items.map((i) => i.key).toSorted()).toEqual(["settings:lang", "settings:theme"]);
+ });
+
+ it("KV is scoped per plugin (isolation)", async () => {
+ const handlerA = createBridgeHandler({
+ pluginId: "plugin-a",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+ const handlerB = createBridgeHandler({
+ pluginId: "plugin-b",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+
+ // Plugin A sets a value
+ await call(handlerA, "kv/set", { key: "secret", value: "a-data" });
+
+ // Plugin B cannot see it
+ const resultB = await call(handlerB, "kv/get", { key: "secret" });
+ expect(resultB.result).toBeNull();
+
+ // Plugin A can see it
+ const resultA = await call(handlerA, "kv/get", { key: "secret" });
+ expect(resultA.result).toBe("a-data");
+ });
+ });
+
+ // ── Capability Enforcement ────────────────────────────────────────────
+
+ describe("capability enforcement", () => {
+ it("rejects content read without read:content capability", async () => {
+ const handler = makeHandler({ capabilities: [] });
+ const result = await call(handler, "content/get", {
+ collection: "posts",
+ id: "123",
+ });
+ expect(result.error).toContain("Missing capability: read:content");
+ });
+
+ it("allows content read with read:content", async () => {
+ // Create a content table first
+ await db.schema
+ .createTable("ec_posts")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("deleted_at", "text")
+ .addColumn("title", "text")
+ .execute();
+
+ const handler = makeHandler({ capabilities: ["read:content"] });
+ const result = await call(handler, "content/get", {
+ collection: "posts",
+ id: "123",
+ });
+ // No error, returns null (post doesn't exist)
+ expect(result.error).toBeUndefined();
+ expect(result.result).toBeNull();
+ });
+
+ it("write:content does NOT imply read:content (matches Cloudflare bridge)", async () => {
+ // The bridge enforces capabilities strictly: a plugin that declares
+ // only write:content cannot call ctx.content.get/list. This matches
+ // the Cloudflare PluginBridge behavior. The plugin must declare
+ // read:content explicitly to read.
+ await db.schema
+ .createTable("ec_posts")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("deleted_at", "text")
+ .addColumn("title", "text")
+ .execute();
+
+ const handler = makeHandler({ capabilities: ["write:content"] });
+ const result = await call(handler, "content/get", {
+ collection: "posts",
+ id: "123",
+ });
+ expect(result.error).toContain("Missing capability: read:content");
+ });
+
+ it("rejects user read without read:users capability", async () => {
+ const handler = makeHandler({ capabilities: [] });
+ const result = await call(handler, "users/get", { id: "user-1" });
+ expect(result.error).toContain("Missing capability: read:users");
+ });
+
+ it("allows user read with read:users", async () => {
+ const handler = makeHandler({ capabilities: ["read:users"] });
+ const result = await call(handler, "users/get", { id: "user-1" });
+ expect(result.error).toBeUndefined();
+ const user = result.result as { id: string; email: string };
+ expect(user.id).toBe("user-1");
+ expect(user.email).toBe("test@example.com");
+ });
+
+ it("rejects network fetch without network:fetch capability", async () => {
+ const handler = makeHandler({ capabilities: [] });
+ const result = await call(handler, "http/fetch", {
+ url: "https://example.com",
+ });
+ expect(result.error).toContain("Missing capability: network:fetch");
+ });
+
+ it("rejects email send without email:send capability", async () => {
+ const handler = makeHandler({ capabilities: [] });
+ const result = await call(handler, "email/send", {
+ message: { to: "a@b.com", subject: "hi", text: "hello" },
+ });
+ expect(result.error).toContain("Missing capability: email:send");
+ });
+ });
+
+ // ── Storage (document store) ──────────────────────────────────────────
+
+ describe("plugin storage", () => {
+ it("rejects access to undeclared storage collection", async () => {
+ const handler = makeHandler({ storageCollections: ["logs"] });
+ const result = await call(handler, "storage/get", {
+ collection: "secrets",
+ id: "1",
+ });
+ expect(result.error).toContain("Storage collection not declared: secrets");
+ });
+
+ it("allows access to declared storage collection", async () => {
+ const handler = makeHandler({ storageCollections: ["logs"] });
+ const result = await call(handler, "storage/get", {
+ collection: "logs",
+ id: "1",
+ });
+ expect(result.error).toBeUndefined();
+ expect(result.result).toBeNull();
+ });
+
+ it("put and get storage document", async () => {
+ const handler = makeHandler({ storageCollections: ["logs"] });
+ await call(handler, "storage/put", {
+ collection: "logs",
+ id: "log-1",
+ data: { message: "hello", level: "info" },
+ });
+ const result = await call(handler, "storage/get", {
+ collection: "logs",
+ id: "log-1",
+ });
+ expect(result.result).toEqual({ message: "hello", level: "info" });
+ });
+
+ it("storage is scoped per plugin", async () => {
+ const handlerA = createBridgeHandler({
+ pluginId: "plugin-a",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: ["data"],
+ db,
+ emailSend: () => null,
+ });
+ const handlerB = createBridgeHandler({
+ pluginId: "plugin-b",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: ["data"],
+ db,
+ emailSend: () => null,
+ });
+
+ await call(handlerA, "storage/put", {
+ collection: "data",
+ id: "item-1",
+ data: { owner: "a" },
+ });
+
+ // Plugin B cannot see plugin A's data
+ const resultB = await call(handlerB, "storage/get", {
+ collection: "data",
+ id: "item-1",
+ });
+ expect(resultB.result).toBeNull();
+ });
+ });
+
+ // ── Error Handling ────────────────────────────────────────────────────
+
+ describe("error handling", () => {
+ it("returns error for unknown bridge method", async () => {
+ const handler = makeHandler({});
+ const result = await call(handler, "unknown/method");
+ expect(result.error).toContain("Unknown bridge method: unknown/method");
+ });
+
+ it("returns error for missing required parameters", async () => {
+ const handler = makeHandler({ capabilities: ["read:content"] });
+ const result = await call(handler, "content/get", {});
+ expect(result.error).toContain("Missing required string parameter");
+ });
+ });
+
+ // ── Logging ───────────────────────────────────────────────────────────
+
+ describe("logging", () => {
+ it("log call succeeds without capabilities", async () => {
+ const handler = makeHandler({});
+ const result = await call(handler, "log", {
+ level: "info",
+ msg: "test message",
+ });
+ expect(result.error).toBeUndefined();
+ expect(result.result).toBeNull();
+ });
+ });
+});
diff --git a/packages/workerd/test/miniflare-isolation.test.ts b/packages/workerd/test/miniflare-isolation.test.ts
new file mode 100644
index 000000000..2eaa8ccc4
--- /dev/null
+++ b/packages/workerd/test/miniflare-isolation.test.ts
@@ -0,0 +1,342 @@
+/**
+ * Miniflare Isolation Tests
+ *
+ * Integration tests verifying that miniflare (wrapping workerd) provides
+ * the isolation primitives needed for the MiniflareDevRunner:
+ *
+ * - Service bindings scope capabilities per plugin
+ * - External service bindings route calls to Node handler functions
+ * - Plugin code loads from strings (bundles from DB/R2)
+ * - KV namespace bindings provide per-plugin isolated storage
+ * - Plugins without bindings cannot access unavailable capabilities
+ * - Worker reconfiguration supports plugin install/uninstall
+ */
+
+import { Miniflare } from "miniflare";
+import { afterEach, describe, expect, it } from "vitest";
+
+describe("miniflare plugin isolation", () => {
+ let mf: Miniflare | undefined;
+
+ afterEach(async () => {
+ if (mf) {
+ await mf.dispose();
+ mf = undefined;
+ }
+ });
+
+ it("can create an isolated plugin worker with scoped service bindings", async () => {
+ // This test creates:
+ // 1. A "bridge" worker that simulates the backing service (content API)
+ // 2. A "plugin" worker that calls the bridge via service binding
+ // 3. Verifies the plugin can only access what the binding exposes
+ //
+ // dispatchFetch always hits the first worker in the array.
+ // To invoke a specific worker, we put the plugin first and use
+ // service bindings to connect it to the bridge.
+
+ mf = new Miniflare({
+ workers: [
+ {
+ // Plugin is first so dispatchFetch targets it
+ name: "plugin-test",
+ modules: true,
+ serviceBindings: {
+ BRIDGE: "bridge",
+ },
+ script: `
+ export default {
+ async fetch(request, env) {
+ const url = new URL(request.url);
+
+ if (url.pathname === "/hook/afterSave") {
+ const res = await env.BRIDGE.fetch("http://bridge/content/get", {
+ method: "POST",
+ body: JSON.stringify({ collection: "posts", id: "123" }),
+ headers: { "Content-Type": "application/json" },
+ });
+ const data = await res.json();
+ return Response.json({
+ hookResult: "processed",
+ contentFromBridge: data,
+ });
+ }
+
+ return new Response("Unknown hook", { status: 404 });
+ }
+ };
+ `,
+ },
+ {
+ name: "bridge",
+ modules: true,
+ script: `
+ export default {
+ async fetch(request) {
+ const url = new URL(request.url);
+ if (url.pathname === "/content/get") {
+ const { collection, id } = await request.json();
+ return Response.json({
+ success: true,
+ data: { id, type: collection, slug: "test-post", data: { title: "Hello" } }
+ });
+ }
+ return new Response("Not found", { status: 404 });
+ }
+ };
+ `,
+ },
+ ],
+ });
+
+ // dispatchFetch hits the first worker (plugin-test)
+ const response = await mf.dispatchFetch("http://localhost/hook/afterSave");
+ const result = (await response.json()) as {
+ hookResult: string;
+ contentFromBridge: {
+ success: boolean;
+ data: { id: string; type: string; slug: string };
+ };
+ };
+
+ expect(result.hookResult).toBe("processed");
+ expect(result.contentFromBridge.success).toBe(true);
+ expect(result.contentFromBridge.data.id).toBe("123");
+ expect(result.contentFromBridge.data.type).toBe("posts");
+ });
+
+ it("plugins are isolated from each other", async () => {
+ // Two plugins with different service bindings.
+ // Plugin A has BRIDGE binding (read:content).
+ // Plugin B has NO bridge binding (no capabilities).
+ // Use separate Miniflare instances to test isolation,
+ // since dispatchFetch always hits the first worker.
+
+ // Test Plugin A: has BRIDGE binding
+ mf = new Miniflare({
+ workers: [
+ {
+ name: "plugin-a",
+ modules: true,
+ serviceBindings: {
+ BRIDGE: async () => {
+ return Response.json({ success: true, data: { secret: "bridge-data" } });
+ },
+ },
+ script: `
+ export default {
+ async fetch(request, env) {
+ const res = await env.BRIDGE.fetch("http://bridge/");
+ const data = await res.json();
+ return Response.json({ hasAccess: true, data });
+ }
+ };
+ `,
+ },
+ ],
+ });
+
+ const resA = await mf.dispatchFetch("http://localhost/");
+ const dataA = (await resA.json()) as { hasAccess: boolean };
+ expect(dataA.hasAccess).toBe(true);
+ await mf.dispose();
+
+ // Test Plugin B: NO bridge binding
+ mf = new Miniflare({
+ workers: [
+ {
+ name: "plugin-b",
+ modules: true,
+ // NO service bindings - this plugin has no capabilities
+ script: `
+ export default {
+ async fetch(request, env) {
+ const hasBridge = "BRIDGE" in env;
+ return Response.json({ hasBridge });
+ }
+ };
+ `,
+ },
+ ],
+ });
+
+ const resB = await mf.dispatchFetch("http://localhost/");
+ const dataB = (await resB.json()) as { hasBridge: boolean };
+ expect(dataB.hasBridge).toBe(false);
+ });
+
+ it("can load plugin code dynamically from a string", async () => {
+ // Test that we can pass plugin code as a string (not a file path).
+ // This is critical for the runtime: plugin bundles come from the DB/R2,
+ // not from the filesystem.
+
+ const pluginCode = `
+ export default {
+ async fetch(request, env) {
+ return Response.json({
+ pluginId: "dynamic-plugin",
+ version: "1.0.0",
+ message: "I was loaded from a string!",
+ });
+ }
+ };
+ `;
+
+ mf = new Miniflare({
+ workers: [
+ {
+ name: "dynamic-plugin",
+ modules: true,
+ script: pluginCode,
+ },
+ ],
+ });
+
+ const response = await mf.dispatchFetch("http://dynamic-plugin/");
+ const result = (await response.json()) as { pluginId: string; message: string };
+ expect(result.pluginId).toBe("dynamic-plugin");
+ expect(result.message).toBe("I was loaded from a string!");
+ });
+
+ it("can use KV namespace bindings per plugin", async () => {
+ // Plugin with KV namespace binding
+ mf = new Miniflare({
+ kvNamespaces: ["PLUGIN_KV"],
+ modules: true,
+ script: `
+ export default {
+ async fetch(request, env) {
+ const url = new URL(request.url);
+ if (url.pathname === "/set") {
+ await env.PLUGIN_KV.put("test-key", "test-value");
+ return new Response("set");
+ }
+ if (url.pathname === "/get") {
+ const val = await env.PLUGIN_KV.get("test-key");
+ return Response.json({ value: val });
+ }
+ return new Response("unknown", { status: 404 });
+ }
+ };
+ `,
+ });
+
+ // Set and get
+ await mf.dispatchFetch("http://localhost/set");
+ const getRes = await mf.dispatchFetch("http://localhost/get");
+ const getData = (await getRes.json()) as { value: string };
+ expect(getData.value).toBe("test-value");
+ await mf.dispose();
+
+ // Plugin without KV has no access
+ mf = new Miniflare({
+ modules: true,
+ script: `
+ export default {
+ async fetch(request, env) {
+ const hasKv = "PLUGIN_KV" in env;
+ return Response.json({ hasKv });
+ }
+ };
+ `,
+ });
+
+ const noKvRes = await mf.dispatchFetch("http://localhost/");
+ const noKvData = (await noKvRes.json()) as { hasKv: boolean };
+ expect(noKvData.hasKv).toBe(false);
+ });
+
+ it("can reconfigure workers without full restart (add/remove plugins)", async () => {
+ // Test that we can dispose and recreate miniflare with different workers.
+ // This simulates plugin install/uninstall.
+
+ // Start with one plugin
+ mf = new Miniflare({
+ modules: true,
+ script: `
+ export default {
+ async fetch() { return Response.json({ id: "original" }); }
+ };
+ `,
+ });
+
+ const res1 = await mf.dispatchFetch("http://localhost/");
+ const data1 = (await res1.json()) as { id: string };
+ expect(data1.id).toBe("original");
+
+ // Dispose and recreate with a different plugin
+ await mf.dispose();
+
+ mf = new Miniflare({
+ modules: true,
+ script: `
+ export default {
+ async fetch() { return Response.json({ id: "new-plugin" }); }
+ };
+ `,
+ });
+
+ const res2 = await mf.dispatchFetch("http://localhost/");
+ const data2 = (await res2.json()) as { id: string };
+ expect(data2.id).toBe("new-plugin");
+ });
+
+ it("external service binding to Node HTTP server works", async () => {
+ // Critical test: can a plugin worker call an EXTERNAL HTTP service
+ // (simulating the Node backing service) via a service binding?
+ //
+ // Miniflare supports `serviceBindings` with custom handler functions.
+ // This maps to how the Node process would expose backing services.
+
+ mf = new Miniflare({
+ workers: [
+ {
+ name: "plugin-with-external-bridge",
+ modules: true,
+ serviceBindings: {
+ BRIDGE: async (request: Request) => {
+ // This function runs in Node, not in workerd.
+ // It simulates the backing service HTTP handler.
+ const url = new URL(request.url);
+ if (url.pathname === "/content/get") {
+ const body = (await request.json()) as { collection: string; id: string };
+ return Response.json({
+ success: true,
+ data: {
+ id: body.id,
+ type: body.collection,
+ data: { title: "From Node backing service" },
+ },
+ });
+ }
+ return new Response("Not found", { status: 404 });
+ },
+ },
+ script: `
+ export default {
+ async fetch(request, env) {
+ const res = await env.BRIDGE.fetch("http://bridge/content/get", {
+ method: "POST",
+ body: JSON.stringify({ collection: "posts", id: "from-plugin" }),
+ headers: { "Content-Type": "application/json" },
+ });
+ const data = await res.json();
+ return Response.json(data);
+ }
+ };
+ `,
+ },
+ ],
+ });
+
+ const response = await mf.dispatchFetch("http://plugin-with-external-bridge/");
+ const result = (await response.json()) as {
+ success: boolean;
+ data: { id: string; data: { title: string } };
+ };
+
+ expect(result.success).toBe(true);
+ expect(result.data.id).toBe("from-plugin");
+ expect(result.data.data.title).toBe("From Node backing service");
+ });
+});
diff --git a/packages/workerd/test/plugin-integration.test.ts b/packages/workerd/test/plugin-integration.test.ts
new file mode 100644
index 000000000..63faeb1e9
--- /dev/null
+++ b/packages/workerd/test/plugin-integration.test.ts
@@ -0,0 +1,465 @@
+/**
+ * Plugin Integration Tests
+ *
+ * Exercises the bridge handler with the same operations that EmDash's
+ * shipped plugins perform. Uses a real SQLite database with migrations
+ * to test against the actual schema, not hand-rolled test tables.
+ *
+ * This validates that the workerd bridge handler produces the same
+ * results as the Cloudflare PluginBridge for real plugin workloads.
+ *
+ * Tests are modeled after the sandboxed-test plugin's routes:
+ * - kv/test: set, get, delete a KV entry
+ * - storage/test: put, get, count in a declared storage collection
+ * - content/list: list content with read:content capability
+ * - content lifecycle: create, read, update, soft-delete
+ */
+
+import Database from "better-sqlite3";
+import { Kysely, SqliteDialect } from "kysely";
+import { afterEach, beforeEach, describe, expect, it } from "vitest";
+
+import { createBridgeHandler } from "../src/sandbox/bridge-handler.js";
+
+/**
+ * Create a test database with the minimum schema needed for plugin operations.
+ * Matches the real migration schema (001_initial + 004_plugins).
+ */
+function createTestDb() {
+ const sqlite = new Database(":memory:");
+ const db = new Kysely({
+ dialect: new SqliteDialect({ database: sqlite }),
+ });
+ return { db, sqlite };
+}
+
+async function runMigrations(db: Kysely) {
+ // Plugin storage (migration 004)
+ await db.schema
+ .createTable("_plugin_storage")
+ .addColumn("plugin_id", "text", (col) => col.notNull())
+ .addColumn("collection", "text", (col) => col.notNull())
+ .addColumn("id", "text", (col) => col.notNull())
+ .addColumn("data", "text", (col) => col.notNull())
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .addColumn("updated_at", "text", (col) => col.notNull())
+ .addPrimaryKeyConstraint("pk_plugin_storage", ["plugin_id", "collection", "id"])
+ .execute();
+
+ // Users (migration 001)
+ await db.schema
+ .createTable("users")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("email", "text", (col) => col.notNull())
+ .addColumn("name", "text")
+ .addColumn("role", "integer", (col) => col.notNull())
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .execute();
+
+ // Media (migration 001)
+ await db.schema
+ .createTable("media")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("filename", "text", (col) => col.notNull())
+ .addColumn("mime_type", "text", (col) => col.notNull())
+ .addColumn("size", "integer")
+ .addColumn("storage_key", "text", (col) => col.notNull())
+ .addColumn("status", "text", (col) => col.notNull().defaultTo("pending"))
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .execute();
+
+ // Content table for posts (created by SchemaRegistry in real code)
+ await db.schema
+ .createTable("ec_posts")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("slug", "text")
+ .addColumn("status", "text", (col) => col.notNull().defaultTo("draft"))
+ .addColumn("author_id", "text")
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .addColumn("updated_at", "text", (col) => col.notNull())
+ .addColumn("published_at", "text")
+ .addColumn("deleted_at", "text")
+ .addColumn("version", "integer", (col) => col.notNull().defaultTo(1))
+ .addColumn("title", "text")
+ .addColumn("body", "text")
+ .execute();
+}
+
+describe("Plugin integration: sandboxed-test plugin operations", () => {
+ let db: Kysely;
+ let sqlite: Database.Database;
+
+ beforeEach(async () => {
+ const ctx = createTestDb();
+ db = ctx.db;
+ sqlite = ctx.sqlite;
+ await runMigrations(db);
+ });
+
+ afterEach(async () => {
+ await db.destroy();
+ sqlite.close();
+ });
+
+ /**
+ * Create a bridge handler matching the sandboxed-test plugin's capabilities:
+ * read:content, network:fetch with allowedHosts: ["httpbin.org"]
+ * storage: { events: { indexes: ["timestamp", "type"] } }
+ */
+ function makePluginHandler() {
+ return createBridgeHandler({
+ pluginId: "sandboxed-test",
+ version: "0.0.1",
+ capabilities: ["read:content", "network:fetch"],
+ allowedHosts: ["httpbin.org"],
+ storageCollections: ["events"],
+ db,
+ emailSend: () => null,
+ });
+ }
+
+ async function call(
+ handler: ReturnType,
+ method: string,
+ body: Record = {},
+ ) {
+ const request = new Request(`http://bridge/${method}`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(body),
+ });
+ const response = await handler(request);
+ return response.json() as Promise<{ result?: unknown; error?: string }>;
+ }
+
+ // ── Mirrors sandboxed-test plugin's kv/test route ────────────────────
+
+ it("KV round-trip: set, get, delete", async () => {
+ const handler = makePluginHandler();
+
+ // Set
+ await call(handler, "kv/set", {
+ key: "sandbox-test-key",
+ value: { tested: true, time: 12345 },
+ });
+
+ // Get
+ const getResult = await call(handler, "kv/get", { key: "sandbox-test-key" });
+ expect(getResult.result).toEqual({ tested: true, time: 12345 });
+
+ // Delete
+ const deleteResult = await call(handler, "kv/delete", { key: "sandbox-test-key" });
+ expect(deleteResult.result).toBe(true);
+
+ // Verify deleted
+ const afterDelete = await call(handler, "kv/get", { key: "sandbox-test-key" });
+ expect(afterDelete.result).toBeNull();
+ });
+
+ // ── Mirrors sandboxed-test plugin's storage/test route ───────────────
+
+ it("Storage round-trip: put, get, count", async () => {
+ const handler = makePluginHandler();
+
+ // Put
+ await call(handler, "storage/put", {
+ collection: "events",
+ id: "event-1",
+ data: {
+ timestamp: "2025-01-01T00:00:00Z",
+ type: "test",
+ message: "Sandboxed plugin storage test",
+ },
+ });
+
+ // Get
+ const getResult = await call(handler, "storage/get", {
+ collection: "events",
+ id: "event-1",
+ });
+ expect(getResult.result).toEqual({
+ timestamp: "2025-01-01T00:00:00Z",
+ type: "test",
+ message: "Sandboxed plugin storage test",
+ });
+
+ // Count
+ const countResult = await call(handler, "storage/count", { collection: "events" });
+ expect(countResult.result).toBe(1);
+ });
+
+ // ── Mirrors sandboxed-test plugin's content/list route ───────────────
+
+ it("Content list with read:content capability", async () => {
+ const handler = makePluginHandler();
+
+ // Seed some content
+ const now = new Date().toISOString();
+ await db
+ .insertInto("ec_posts" as any)
+ .values([
+ {
+ id: "post-1",
+ slug: "hello",
+ status: "published",
+ title: "Hello World",
+ created_at: now,
+ updated_at: now,
+ version: 1,
+ },
+ {
+ id: "post-2",
+ slug: "second",
+ status: "draft",
+ title: "Second Post",
+ created_at: now,
+ updated_at: now,
+ version: 1,
+ },
+ ])
+ .execute();
+
+ const result = await call(handler, "content/list", { collection: "posts", limit: 5 });
+ expect(result.error).toBeUndefined();
+
+ const data = result.result as {
+ items: Array<{ id: string; type: string; data: Record }>;
+ hasMore: boolean;
+ };
+ expect(data.items).toHaveLength(2);
+ expect(data.hasMore).toBe(false);
+ // Items should be transformed via rowToContentItem
+ expect(data.items[0]!.type).toBe("posts");
+ expect(data.items[0]!.data.title).toBeDefined();
+ });
+
+ // ── Content lifecycle: create, read, update, soft-delete ─────────────
+
+ describe("content lifecycle (requires read:content + write:content)", () => {
+ function makeWriteHandler() {
+ // Bridge enforces capabilities strictly: write:content does NOT
+ // imply read:content. Plugins that need both must declare both.
+ return createBridgeHandler({
+ pluginId: "sandboxed-test",
+ version: "0.0.1",
+ capabilities: ["read:content", "write:content"],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+ }
+
+ it("create, read, update, delete", async () => {
+ const handler = makeWriteHandler();
+
+ // Create
+ const createResult = await call(handler, "content/create", {
+ collection: "posts",
+ data: { title: "New Post", body: "Content here", slug: "new-post", status: "draft" },
+ });
+ expect(createResult.error).toBeUndefined();
+ const created = createResult.result as {
+ id: string;
+ type: string;
+ data: Record;
+ };
+ expect(created.type).toBe("posts");
+ expect(created.data.title).toBe("New Post");
+ expect(created.id).toBeTruthy();
+
+ // Read
+ const readResult = await call(handler, "content/get", {
+ collection: "posts",
+ id: created.id,
+ });
+ expect(readResult.error).toBeUndefined();
+ const read = readResult.result as { id: string; data: Record };
+ expect(read.data.title).toBe("New Post");
+
+ // Update
+ const updateResult = await call(handler, "content/update", {
+ collection: "posts",
+ id: created.id,
+ data: { title: "Updated Post" },
+ });
+ expect(updateResult.error).toBeUndefined();
+ const updated = updateResult.result as { id: string; data: Record };
+ expect(updated.data.title).toBe("Updated Post");
+
+ // Delete (soft-delete)
+ const deleteResult = await call(handler, "content/delete", {
+ collection: "posts",
+ id: created.id,
+ });
+ expect(deleteResult.result).toBe(true);
+
+ // Verify soft-deleted: get returns null
+ const afterDelete = await call(handler, "content/get", {
+ collection: "posts",
+ id: created.id,
+ });
+ expect(afterDelete.result).toBeNull();
+ });
+ });
+
+ // ── Capability enforcement matches real plugin config ─────────────────
+
+ it("sandboxed-test plugin cannot write content (only has read:content)", async () => {
+ const handler = makePluginHandler();
+ const result = await call(handler, "content/create", {
+ collection: "posts",
+ data: { title: "Should fail" },
+ });
+ expect(result.error).toContain("Missing capability: write:content");
+ });
+
+ it("write-only plugin cannot read content (no implicit upgrade)", async () => {
+ // Plugins with only write:content cannot call ctx.content.get/list.
+ // This matches the Cloudflare PluginBridge: capabilities are enforced
+ // strictly as declared in the manifest. A plugin that needs both
+ // reads and writes must declare both capabilities.
+ await db.schema
+ .createTable("ec_pages")
+ .addColumn("id", "text", (col) => col.primaryKey())
+ .addColumn("slug", "text")
+ .addColumn("status", "text", (col) => col.notNull().defaultTo("draft"))
+ .addColumn("author_id", "text")
+ .addColumn("created_at", "text", (col) => col.notNull())
+ .addColumn("updated_at", "text", (col) => col.notNull())
+ .addColumn("deleted_at", "text")
+ .addColumn("version", "integer", (col) => col.notNull().defaultTo(1))
+ .addColumn("title", "text")
+ .execute();
+
+ const writeOnlyHandler = createBridgeHandler({
+ pluginId: "write-only-plugin",
+ version: "1.0.0",
+ capabilities: ["write:content"],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+
+ // content/get should fail
+ const getResult = await call(writeOnlyHandler, "content/get", {
+ collection: "pages",
+ id: "any",
+ });
+ expect(getResult.error).toContain("Missing capability: read:content");
+
+ // content/list should also fail
+ const listResult = await call(writeOnlyHandler, "content/list", {
+ collection: "pages",
+ });
+ expect(listResult.error).toContain("Missing capability: read:content");
+
+ // content/create should still succeed (has write:content)
+ const createResult = await call(writeOnlyHandler, "content/create", {
+ collection: "pages",
+ data: { title: "Allowed" },
+ });
+ expect(createResult.error).toBeUndefined();
+ });
+
+ it("write-only media plugin cannot read media", async () => {
+ // Same enforcement for media: write:media does NOT imply read:media.
+ const writeOnlyHandler = createBridgeHandler({
+ pluginId: "write-only-media",
+ version: "1.0.0",
+ capabilities: ["write:media"],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+
+ const getResult = await call(writeOnlyHandler, "media/get", { id: "any" });
+ expect(getResult.error).toContain("Missing capability: read:media");
+
+ const listResult = await call(writeOnlyHandler, "media/list", {});
+ expect(listResult.error).toContain("Missing capability: read:media");
+ });
+
+ it("sandboxed-test plugin cannot send email (not in capabilities)", async () => {
+ const handler = makePluginHandler();
+ const result = await call(handler, "email/send", {
+ message: { to: "a@b.com", subject: "hi", text: "hello" },
+ });
+ expect(result.error).toContain("Missing capability: email:send");
+ });
+
+ it("sandboxed-test plugin cannot access undeclared storage collections", async () => {
+ const handler = makePluginHandler();
+ const result = await call(handler, "storage/get", {
+ collection: "secrets",
+ id: "1",
+ });
+ expect(result.error).toContain("Storage collection not declared: secrets");
+ });
+
+ // ── Cross-plugin isolation ────────────────────────────────────────────
+
+ it("two plugins cannot see each other's KV data", async () => {
+ const pluginA = createBridgeHandler({
+ pluginId: "plugin-a",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+ const pluginB = createBridgeHandler({
+ pluginId: "plugin-b",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: [],
+ db,
+ emailSend: () => null,
+ });
+
+ await call(pluginA, "kv/set", { key: "secret", value: "a-only" });
+
+ const fromA = await call(pluginA, "kv/get", { key: "secret" });
+ expect(fromA.result).toBe("a-only");
+
+ const fromB = await call(pluginB, "kv/get", { key: "secret" });
+ expect(fromB.result).toBeNull();
+ });
+
+ it("two plugins cannot see each other's storage documents", async () => {
+ const pluginA = createBridgeHandler({
+ pluginId: "plugin-a",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: ["shared-name"],
+ db,
+ emailSend: () => null,
+ });
+ const pluginB = createBridgeHandler({
+ pluginId: "plugin-b",
+ version: "1.0.0",
+ capabilities: [],
+ allowedHosts: [],
+ storageCollections: ["shared-name"],
+ db,
+ emailSend: () => null,
+ });
+
+ await call(pluginA, "storage/put", {
+ collection: "shared-name",
+ id: "doc-1",
+ data: { owner: "a" },
+ });
+
+ const fromA = await call(pluginA, "storage/get", { collection: "shared-name", id: "doc-1" });
+ expect((fromA.result as Record).owner).toBe("a");
+
+ const fromB = await call(pluginB, "storage/get", { collection: "shared-name", id: "doc-1" });
+ expect(fromB.result).toBeNull();
+ });
+});
diff --git a/packages/workerd/tsconfig.json b/packages/workerd/tsconfig.json
new file mode 100644
index 000000000..7d1576d39
--- /dev/null
+++ b/packages/workerd/tsconfig.json
@@ -0,0 +1,15 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "preserve",
+ "moduleResolution": "bundler",
+ "strict": true,
+ "noUncheckedIndexedAccess": true,
+ "noImplicitOverride": true,
+ "verbatimModuleSyntax": true,
+ "skipLibCheck": true,
+ "declaration": true,
+ "outDir": "dist"
+ },
+ "include": ["src"]
+}
diff --git a/packages/workerd/tsdown.config.ts b/packages/workerd/tsdown.config.ts
new file mode 100644
index 000000000..600f9569e
--- /dev/null
+++ b/packages/workerd/tsdown.config.ts
@@ -0,0 +1,14 @@
+import { defineConfig } from "tsdown";
+
+export default defineConfig({
+ entry: ["src/index.ts", "src/sandbox/index.ts"],
+ format: ["esm"],
+ dts: true,
+ clean: true,
+ external: [
+ // Native Node modules
+ "better-sqlite3",
+ // miniflare is a devDependency, dynamically imported at runtime
+ "miniflare",
+ ],
+});
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index f182f3bd0..66cb505dc 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -99,6 +99,9 @@ catalogs:
'@tiptap/suggestion':
specifier: ^3.20.0
version: 3.20.0
+ '@types/better-sqlite3':
+ specifier: ^7.6.12
+ version: 7.6.13
'@types/node':
specifier: 24.10.13
version: 24.10.13
@@ -978,7 +981,7 @@ importers:
specifier: workspace:*
version: link:../blocks
'@types/better-sqlite3':
- specifier: ^7.6.12
+ specifier: 'catalog:'
version: 7.6.13
'@types/pg':
specifier: ^8.16.0
@@ -1081,7 +1084,7 @@ importers:
version: 3.25.76
devDependencies:
'@types/better-sqlite3':
- specifier: ^7.6.13
+ specifier: 'catalog:'
version: 7.6.13
'@types/node':
specifier: 'catalog:'
@@ -1253,6 +1256,41 @@ importers:
specifier: 'catalog:'
version: 5.9.3
+ packages/workerd:
+ dependencies:
+ emdash:
+ specifier: workspace:*
+ version: link:../core
+ ulidx:
+ specifier: ^2.4.1
+ version: 2.4.1
+ workerd:
+ specifier: '>=1.0.0'
+ version: 1.20260401.1
+ devDependencies:
+ '@types/better-sqlite3':
+ specifier: 'catalog:'
+ version: 7.6.13
+ better-sqlite3:
+ specifier: 'catalog:'
+ version: 11.10.0
+ kysely:
+ specifier: ^0.27.0
+ version: 0.27.6
+ tsdown:
+ specifier: 'catalog:'
+ version: 0.20.3(@arethetypeswrong/core@0.18.2)(@typescript/native-preview@7.0.0-dev.20260213.1)(oxc-resolver@11.16.4)(publint@0.3.17)(typescript@5.9.3)
+ typescript:
+ specifier: 'catalog:'
+ version: 5.9.3
+ vitest:
+ specifier: 'catalog:'
+ version: 4.0.18(@types/node@24.10.13)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(jsdom@26.1.0)(lightningcss@1.31.1)(tsx@4.21.0)(yaml@2.8.2)
+ optionalDependencies:
+ miniflare:
+ specifier: ^4.20250408.0
+ version: 4.20260401.0
+
packages/x402:
dependencies:
'@x402/core':
@@ -16749,7 +16787,7 @@ snapshots:
picomatch: 4.0.3
std-env: 3.10.0
tinybench: 2.9.0
- tinyexec: 1.0.2
+ tinyexec: 1.0.4
tinyglobby: 0.2.15
tinyrainbow: 3.0.3
vite: 6.4.1(@types/node@24.10.13)(jiti@2.6.1)(lightningcss@1.31.1)(tsx@4.21.0)(yaml@2.8.2)
@@ -16789,7 +16827,7 @@ snapshots:
picomatch: 4.0.3
std-env: 3.10.0
tinybench: 2.9.0
- tinyexec: 1.0.2
+ tinyexec: 1.0.4
tinyglobby: 0.2.15
tinyrainbow: 3.0.3
vite: 6.4.1(@types/node@24.10.13)(jiti@2.6.1)(lightningcss@1.31.1)(tsx@4.21.0)(yaml@2.8.2)
diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml
index e1beb4dea..73de20eeb 100644
--- a/pnpm-workspace.yaml
+++ b/pnpm-workspace.yaml
@@ -42,6 +42,7 @@ catalog:
"@tiptap/starter-kit": ^3.20.0
"@tiptap/suggestion": ^3.20.0
"@types/node": 24.10.13
+ "@types/better-sqlite3": ^7.6.12
"@types/react": 19.2.14
"@types/react-dom": 19.2.3
astro: ^6.0.1