From d0f214d83ebdeb60d02fcb3db894de24113a7f39 Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Fri, 1 May 2026 04:50:22 -0700 Subject: [PATCH 1/3] feat(db): apply file-based Drizzle migrations programmatically at startup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the manual `drizzle-kit push` / `bun run db:push` flow with file-based migrations that ship with each deploy and run themselves on the first request to a new isolate. What changed - `packages/db/drizzle.config.ts` switches `out` from `./src/migrations` to `./drizzle` and stops importing `@gen/env/web` (drizzle-kit generate doesn't need a real URL — falls back to a stub if neither POSTGRES_URL nor DATABASE_URL is set). - `packages/db/scripts/bundle-migrations.ts` reads `drizzle/` and writes `packages/db/src/migrations-bundle.generated.ts` so SQL ships inlined with the JS bundle (Cloudflare Workers can't read `node:fs` for SQL files; this avoids any bundler-specific magic like `import.meta.glob('?raw')`). - `packages/db/src/migrate.ts` exports `runMigrations(db)` — a custom migrator that takes a `pg_advisory_lock`, ensures `__drizzle_migrations` exists, and applies each pending bundled entry in idx order. Per-isolate, the in-flight Promise is cached so concurrent callers reuse the same migrate run. - `packages/auth/src/index.ts` awaits `runMigrations(db)` at module-evaluation time before constructing the better-auth instance, guarded on a configured connection string so vitest/typecheck contexts don't crash at import. - Removes `db:push` from root `package.json`, `packages/db/package.json`, `turbo.json`, and `.stack/config.nix`. `db:generate` now chains `drizzle-kit generate && bun run db:bundle`. `db:migrate` is kept for local ad-hoc use. - `docs/adr/0002-runtime-startup-migrations.md` records the decision, rationale, alternatives considered, and follow-ups (CI bundle-drift check, down migrations, Effect-native variant). - README/AGENTS.md/CLAUDE.md/WARP.md updated to describe the new flow. Why PR #24's preview consistently 500'd on `waitlist.join` because the per-PR Neon project was created empty and `db:push` was never run against it. With this change, a fresh isolate booting against an empty Neon DB applies the bundled migrations transparently — preview, staging, and production all converge on the same flow with zero manual steps. Baseline migration `drizzle/0000_init.sql` covers all 12 tables (account, session, user, verification, invitation, member, organization, organization_dek, organization_state, polar_event, user_subscription, beta_waitlist) including the previously-missing `beta_waitlist`. --- .stack/config.nix | 20 +- AGENTS.md | 42 +- CLAUDE.md | 23 +- WARP.md | 23 +- docs/adr/0002-runtime-startup-migrations.md | 178 +++ nix/stackpanel/modules/just/module.nix | 6 +- package.json | 1 - packages/auth/src/index.ts | 18 +- packages/db/drizzle.config.ts | 15 +- packages/db/drizzle/0000_init.sql | 156 +++ packages/db/drizzle/meta/0000_snapshot.json | 1161 +++++++++++++++++ packages/db/drizzle/meta/_journal.json | 13 + packages/db/package.json | 4 +- packages/db/scripts/bundle-migrations.ts | 98 ++ packages/db/src/index.ts | 13 +- packages/db/src/migrate.ts | 119 ++ .../db/src/migrations-bundle.generated.ts | 36 + turbo.json | 2 +- 18 files changed, 1874 insertions(+), 54 deletions(-) create mode 100644 docs/adr/0002-runtime-startup-migrations.md create mode 100644 packages/db/drizzle/0000_init.sql create mode 100644 packages/db/drizzle/meta/0000_snapshot.json create mode 100644 packages/db/drizzle/meta/_journal.json create mode 100644 packages/db/scripts/bundle-migrations.ts create mode 100644 packages/db/src/migrate.ts create mode 100644 packages/db/src/migrations-bundle.generated.ts diff --git a/.stack/config.nix b/.stack/config.nix index b4558586..085fd874 100644 --- a/.stack/config.nix +++ b/.stack/config.nix @@ -924,23 +924,23 @@ env = { }; exec = "turbo run clean && rm -rf node_modules/.cache"; }; - "db:migrate" = { - cwd = "apps/server"; - description = "Run database migrations"; + "db:generate" = { + cwd = "packages/db"; + description = "Generate a new Drizzle migration from schema changes (also bundles for runtime)"; env = { }; - exec = "bun run drizzle-kit migrate"; + exec = "bun run db:generate"; }; - "db:push" = { - cwd = "apps/server"; - description = "Push schema changes to database"; + "db:migrate" = { + cwd = "packages/db"; + description = "Apply file-based Drizzle migrations against the configured DATABASE_URL (local dev only — runtime migration is automatic)"; env = { }; - exec = "bun run drizzle-kit push"; + exec = "bun run db:migrate"; }; "db:studio" = { - cwd = "apps/server"; + cwd = "packages/db"; description = "Open Drizzle Studio database GUI"; env = { }; - exec = "bun run drizzle-kit studio"; + exec = "bun run db:studio"; }; dev = { cache = false; diff --git a/AGENTS.md b/AGENTS.md index 4f5ee240..c1b303ca 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -154,12 +154,25 @@ air # Hot reload (dev mode) ### Database ```bash -bun run db:push # Push Drizzle schema changes +bun run db:generate # Generate a new Drizzle migration after a schema change + # (writes packages/db/drizzle/_.sql + bundles for runtime) bun run db:studio # Open Drizzle Studio -bun run db:generate # Generate Drizzle types -bun run db:migrate # Run migrations +bun run db:migrate # Apply migrations against $DATABASE_URL — local/dev only + # (production / staging / preview migrate automatically at app + # startup via @stackpanel/db's runMigrations(); see + # docs/adr/0002-runtime-startup-migrations.md) ``` +**Generating a migration** + +1. Edit a schema file under `packages/db/src/schema/`. +2. Run `bun run db:generate` (or `bun run --cwd packages/db db:generate` directly). +3. Commit the new SQL file under `packages/db/drizzle/` along with the + regenerated `packages/db/drizzle/meta/_journal.json` and + `packages/db/src/migrations-bundle.generated.ts`. +4. Deploy. The first request to a new isolate will apply pending migrations + transparently — no manual `db:push` step. + ### Nix / Infra ```bash @@ -663,14 +676,21 @@ This is a monorepo with the following structure: ## Database Commands -All database operations should be run from the server workspace: - -- `bun run db:push` - Push schema changes to database -- `bun run db:studio` - Open database studio -- `bun run db:generate` - Generate Prisma files -- `bun run db:migrate` - Run database migrations - -Database schema is located in `apps/server/prisma/schema.prisma` +All database operations are exposed at the workspace root (delegated to +`@stackpanel/db` via Turbo): + +- `bun run db:generate` - Generate a new Drizzle migration from schema + changes (writes `packages/db/drizzle/*.sql`, `meta/_journal.json`, and + the runtime-importable `packages/db/src/migrations-bundle.generated.ts`) +- `bun run db:studio` - Open Drizzle Studio +- `bun run db:migrate` - Apply migrations against `$DATABASE_URL` — local / + ad-hoc only; production, staging, and preview deployments migrate + automatically at app startup via `runMigrations()` (see + `docs/adr/0002-runtime-startup-migrations.md`) + +Database schemas live in `packages/db/src/schema/`. There is no `db:push` +flow anymore — schema-sync is replaced by file-based migrations that ship +with the deploy and apply themselves on first isolate boot. ## API Structure diff --git a/CLAUDE.md b/CLAUDE.md index 2f2cbb4a..571db34a 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -329,14 +329,21 @@ This is a monorepo with the following structure: ## Database Commands -All database operations should be run from the server workspace: - -- `bun run db:push` - Push schema changes to database -- `bun run db:studio` - Open database studio -- `bun run db:generate` - Generate Prisma files -- `bun run db:migrate` - Run database migrations - -Database schema is located in `apps/server/prisma/schema.prisma` +All database operations are exposed at the workspace root (delegated to +`@stackpanel/db` via Turbo): + +- `bun run db:generate` - Generate a new Drizzle migration from schema + changes (writes `packages/db/drizzle/*.sql`, `meta/_journal.json`, and + the runtime-importable `packages/db/src/migrations-bundle.generated.ts`) +- `bun run db:studio` - Open Drizzle Studio +- `bun run db:migrate` - Apply migrations against `$DATABASE_URL` — local / + ad-hoc only; production, staging, and preview deployments migrate + automatically at app startup via `runMigrations()` (see + `docs/adr/0002-runtime-startup-migrations.md`) + +Database schemas live in `packages/db/src/schema/`. There is no `db:push` +flow anymore — schema-sync is replaced by file-based migrations that ship +with the deploy and apply themselves on first isolate boot. ## API Structure diff --git a/WARP.md b/WARP.md index a42438bc..ab275c68 100644 --- a/WARP.md +++ b/WARP.md @@ -331,14 +331,21 @@ This is a monorepo with the following structure: ## Database Commands -All database operations should be run from the server workspace: - -- `bun run db:push` - Push schema changes to database -- `bun run db:studio` - Open database studio -- `bun run db:generate` - Generate Prisma files -- `bun run db:migrate` - Run database migrations - -Database schema is located in `apps/server/prisma/schema.prisma` +All database operations are exposed at the workspace root (delegated to +`@stackpanel/db` via Turbo): + +- `bun run db:generate` - Generate a new Drizzle migration from schema + changes (writes `packages/db/drizzle/*.sql`, `meta/_journal.json`, and + the runtime-importable `packages/db/src/migrations-bundle.generated.ts`) +- `bun run db:studio` - Open Drizzle Studio +- `bun run db:migrate` - Apply migrations against `$DATABASE_URL` — local / + ad-hoc only; production, staging, and preview deployments migrate + automatically at app startup via `runMigrations()` (see + `docs/adr/0002-runtime-startup-migrations.md`) + +Database schemas live in `packages/db/src/schema/`. There is no `db:push` +flow anymore — schema-sync is replaced by file-based migrations that ship +with the deploy and apply themselves on first isolate boot. ## API Structure diff --git a/docs/adr/0002-runtime-startup-migrations.md b/docs/adr/0002-runtime-startup-migrations.md new file mode 100644 index 00000000..f56f16fb --- /dev/null +++ b/docs/adr/0002-runtime-startup-migrations.md @@ -0,0 +1,178 @@ +# 0002 — Database migrations are applied programmatically at app startup, not via `drizzle-kit push` + +- **Status:** Accepted +- **Date:** 2026-05-01 +- **Deciders:** Stackpanel core team +- **Related:** [`docs/adr/0001-runtime-secrets-via-gen-env-loader.md`](./0001-runtime-secrets-via-gen-env-loader.md) +- **Implementation:** branch `feat/runtime-migrations` + +## Context + +Until now, Stackpanel's Drizzle-backed Postgres (Neon) used the +`bun run db:push` flow — a wrapper around `drizzle-kit push` — to keep +the database schema in lockstep with the TypeScript schema files under +`packages/db/src/schema/`. That approach has several problems we keep +running into: + +1. **Humans-in-the-loop**: `db:push` is a manual step. It is trivially + forgotten — most recently on PR #24, where the + `waitlist.join` tRPC procedure 500'd against the per-PR Neon preview + project with `Failed query: select "id" from "beta_waitlist"` because + nobody had run `db:push` against that fresh database. The deploy + pipeline has no way to know the schema is stale until a request hits a + missing table. +2. **No audit trail**: `drizzle-kit push` diffs the live DB against the + TypeScript schema and emits SQL on the fly. Nothing is checked into git, + so we have no history of schema changes, no way to review one in a PR, + and no `down` story when a change goes wrong. +3. **Preview DB priming is lazy**: per-PR preview deploys provision their + Neon project on first deploy (see `apps/web/alchemy.run.ts`). The DB + is empty until something writes to it; with `db:push` that "something" + is a human running the right command at the right time, which doesn't + happen. +4. **Drift between environments**: `db:push` is destructive — it reshapes + the live DB to match the schema. In dev we shrug and let it drop a + column; in prod we don't dare run it. So in practice prod uses + ad-hoc SQL while dev uses `db:push`, and the two diverge over time. + +The user-visible failure on PR #24 was the trigger: the waitlist signup +button on the `local..stackpanel.com` preview returned a 500, and +the only fix was to `wrangler tail` the worker, infer the missing table, +and run `db:push` against the preview manually. That's not a flow we want +to ship to ourselves repeatedly — and certainly not to anyone using +Stackpanel as a starter template. + +## Decision + +Migrations are now **file-based**, **committed to git**, and **applied +programmatically at app startup** by the `@stackpanel/db` package itself. + +Concretely: + +- **Generation** is local-only. After editing a schema file under + `packages/db/src/schema/`, run + `bun run --cwd packages/db db:generate`. That invokes + `drizzle-kit generate` (which writes `packages/db/drizzle/_.sql` + and `packages/db/drizzle/meta/_journal.json`) and then + `scripts/bundle-migrations.ts`, which inlines every SQL file into + `packages/db/src/migrations-bundle.generated.ts`. All three artifacts are + checked into git. +- **Application** is automatic. `@stackpanel/db` exports `runMigrations(db)` + from `src/migrate.ts`. `packages/auth/src/index.ts` awaits it at + module-evaluation time (top-level await) **before** constructing the + `betterAuth({...})` instance, so the per-isolate boot order is always + `import db → await runMigrations(db) → betterAuth({...})`. Anything + downstream that imports `auth` (the tRPC handler, route middleware, + background jobs) inherits the dependency naturally — by the time + `auth.api.getSession()` is callable, every committed migration has been + applied. +- **Concurrency** is handled by a Postgres advisory lock + (`pg_advisory_lock(0x4d495252::bigint)`) inside `applyMigrations`, so + many isolates can call `runMigrations` simultaneously without racing + on `__drizzle_migrations` row inserts. Per-isolate, the function caches + the in-flight `Promise` so repeated callers reuse the same migrate run. +- **Idempotency** comes from the standard drizzle `__drizzle_migrations` + table: each entry is keyed by its content-derived `tag`, and applied + rows are skipped on subsequent boots. +- **`drizzle-kit push` is removed** from every workspace script + (`package.json`, `packages/db/package.json`, `turbo.json`, + `.stack/config.nix`). `drizzle-kit migrate` is kept under + `bun run db:migrate` for local ad-hoc use only — production / + staging / preview deployments never invoke it; they rely entirely on + the `runMigrations` call at startup. + +### Why not `drizzle-orm/node-postgres/migrator` directly? + +The built-in migrator reads SQL files at runtime via `node:fs`. Inside a +Cloudflare Worker bundle that filesystem is empty — Vite/Rolldown bundles +JS modules but not arbitrary `.sql` files. We considered three options: + +1. **Vite `import.meta.glob('drizzle/*.sql', { query: '?raw' })`** — + works, but couples `@stackpanel/db` to a specific bundler and silently + becomes a no-op anywhere Vite isn't in the loop (Bun scripts, ad-hoc + tests, the Go-driven docs build). +2. **A Workers-native migrator from drizzle-orm itself** — none exists + for the `node-postgres` adapter as of `drizzle-orm@0.45.1`. The + `neon-http` migrator is HTTP-only and requires switching the runtime + driver. +3. **Pre-bundle the SQL into a TypeScript module at generate time** + (chosen). `scripts/bundle-migrations.ts` reads `drizzle/` and writes + `src/migrations-bundle.generated.ts` with each migration inlined as a + string. The runtime imports that module like any other TS module — + works identically in Workers, Node, Bun, vitest, and any future runtime + without bundler-specific magic. + +## Consequences + +### Positive + +- **Zero-config preview DB priming**: a freshly-provisioned Neon project + is brought up to schema by the first request that touches the auth + module. PR #24's waitlist 500 cannot recur with this design. +- **Audit trail**: every schema change ships as a reviewable + `packages/db/drizzle/_.sql` diff in the PR that introduces + it. +- **No human deploy step** for schema changes — the deploy pipeline stays + identical for code-only and code-plus-schema changes. +- **Cross-runtime portability**: bundled migrations work in Cloudflare + Workers, Node, Bun, vitest, and any future runtime without per-target + build tweaks. +- **Rollback story** is back on the table: a future iteration can add + `down.sql` files and a `--down` flag to `runMigrations` without + re-architecting how migrations are discovered or transported. + +### Negative / trade-offs + +- **One-time cost on cold isolate boot**: the first request to a freshly + spawned isolate pays the migration check (a single + `SELECT hash FROM __drizzle_migrations` and the advisory-lock + acquire/release). Steady-state requests pay nothing — the `inflight` + Promise cache short-circuits. Worst case (cold + brand-new schema) is + the time to apply pending migrations once per environment. +- **Schema changes need explicit migration review**: developers can no + longer iterate by editing the schema and running `db:push`. The price + is a `bun run db:generate` + a single committed SQL file. Worth it for + the audit trail; everyone agrees this is a good trade. +- **`packages/db/src/migrations-bundle.generated.ts` must stay in sync + with `drizzle/`**. The `db:generate` script chains both, and the + `db:bundle` script can be re-run independently + (`bun run --cwd packages/db db:bundle`) if someone manually edits a + migration file. CI does not (yet) re-bundle and diff — see + *Follow-ups*. + +### Neutral + +- The `__drizzle_migrations` table now exists in every environment. Same + shape drizzle's built-in migrator uses, so future-us could swap to the + upstream migrator if Workers ever ships a fully-compatible one. + +## Alternatives considered + +- **Keep `drizzle-kit push`**: rejected. It is the source of the + problems described in *Context* — no audit trail, manual step, and the + existing PR-24 outage is a direct consequence. +- **Run migrations only in CI before deploy**: rejected. Preview Neon + projects are created lazily by `apps/web/alchemy.run.ts` during the + Cloudflare Workers deploy itself; there is no "before deploy" moment + where the preview DB exists but the worker doesn't. Adding a separate + CI step that provisions the DB and migrates it before the deploy ran + would double the preview latency and re-introduce a human-readable + deploy graph. +- **Use Neon's branching for schema management**: rejected. Neon + branching is great for forking *data* off main, but it doesn't replace + a migration tool — it inherits whatever schema main has and gives no + way to evolve schema in a feature branch without merging the schema + change to main first. Orthogonal to this decision. + +## Follow-ups + +- Add a CI check (`verify` workflow) that runs + `bun run --cwd packages/db db:bundle` and fails if the resulting diff + isn't empty. This guarantees the bundle stays in lockstep with the SQL + files. +- Add `down.sql` support to `scripts/bundle-migrations.ts` and a + `runMigrations(db, { direction: "down", to: })` opt-in for + emergency rollbacks. +- Consider exposing a `runMigrationsEffect(db)` Effect-native variant + for callers that already live in an `Effect.gen` block (parity with + `loadAppEnvEffect` from `@gen/env/runtime`). diff --git a/nix/stackpanel/modules/just/module.nix b/nix/stackpanel/modules/just/module.nix index ff254d22..8e460f25 100644 --- a/nix/stackpanel/modules/just/module.nix +++ b/nix/stackpanel/modules/just/module.nix @@ -87,9 +87,9 @@ in db = { description = "Database management recipes"; recipes = ''' - # Push schema changes - db-push: - bun run db:push + # Generate a new Drizzle migration after schema changes + db-generate: + bun run db:generate '''; }; } diff --git a/package.json b/package.json index 8b5970e1..41f3a300 100644 --- a/package.json +++ b/package.json @@ -65,7 +65,6 @@ "dev:web": "turbo -F web dev", "dev:server": "turbo -F server dev", "test": "turbo test", - "db:push": "turbo -F @stackpanel/db db:push", "db:studio": "turbo -F @stackpanel/db db:studio", "db:generate": "turbo -F @stackpanel/db db:generate", "db:migrate": "turbo -F @stackpanel/db db:migrate", diff --git a/packages/auth/src/index.ts b/packages/auth/src/index.ts index 331aae2b..5cd9447c 100644 --- a/packages/auth/src/index.ts +++ b/packages/auth/src/index.ts @@ -1,5 +1,5 @@ import { checkout, polar, portal, webhooks } from "@polar-sh/better-auth"; -import { db } from "@stackpanel/db"; +import { db, runMigrations } from "@stackpanel/db"; import type { BetterAuthPlugin } from "better-auth"; import { betterAuth } from "better-auth"; import { drizzleAdapter } from "better-auth/adapters/drizzle"; @@ -8,6 +8,22 @@ import { polarClient } from "./lib/payments"; import { polarProducts } from "./lib/polar-products"; import { polarSubscriptionCallbacks } from "./lib/polar-webhooks"; +// Apply file-based Drizzle migrations from `@stackpanel/db` before any +// auth-bound query runs. We do this at module-evaluation time (top-level +// await) so the per-isolate boot order is always: +// 1. import @stackpanel/db → drizzle client + bundled migrations available +// 2. await runMigrations(db) → __drizzle_migrations is up-to-date +// 3. betterAuth({...}) → drizzle adapter is safe to construct & query +// See `docs/adr/0002-runtime-startup-migrations.md` for the full rationale. +// +// `runMigrations` is internally cached + serialized via `pg_advisory_lock`, +// so concurrent isolates cooperate. We guard on a configured connection +// string so vitest/typecheck contexts (which never set DATABASE_URL) don't +// crash at import time on a connection refused error. +if (process.env.DATABASE_URL || process.env.POSTGRES_URL) { + await runMigrations(db); +} + // Build plugins array - only include Polar if configured const plugins: BetterAuthPlugin[] = [ organization({ diff --git a/packages/db/drizzle.config.ts b/packages/db/drizzle.config.ts index f3cc4257..3014203e 100644 --- a/packages/db/drizzle.config.ts +++ b/packages/db/drizzle.config.ts @@ -1,12 +1,15 @@ -import { env } from "@gen/env/web"; -import dotenv from "dotenv"; import { defineConfig } from "drizzle-kit"; +// `drizzle-kit generate` doesn't need the URL — it diffs the schema against +// the existing migrations. `drizzle-kit migrate` (and the runtime `migrate()` +// in `src/migrate.ts`) connect using `POSTGRES_URL`/`DATABASE_URL`. We accept +// either so local ad-hoc runs work in any devshell that already has one set. +const url = + process.env.POSTGRES_URL ?? process.env.DATABASE_URL ?? "postgres://stub"; + export default defineConfig({ schema: "./src/schema", - out: "./src/migrations", + out: "./drizzle", dialect: "postgresql", - dbCredentials: { - url: env.POSTGRES_URL, - }, + dbCredentials: { url }, }); diff --git a/packages/db/drizzle/0000_init.sql b/packages/db/drizzle/0000_init.sql new file mode 100644 index 00000000..61653ecf --- /dev/null +++ b/packages/db/drizzle/0000_init.sql @@ -0,0 +1,156 @@ +CREATE TABLE "account" ( + "id" text PRIMARY KEY NOT NULL, + "account_id" text NOT NULL, + "provider_id" text NOT NULL, + "user_id" text NOT NULL, + "access_token" text, + "refresh_token" text, + "id_token" text, + "access_token_expires_at" timestamp, + "refresh_token_expires_at" timestamp, + "scope" text, + "password" text, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp NOT NULL +); +--> statement-breakpoint +CREATE TABLE "session" ( + "id" text PRIMARY KEY NOT NULL, + "expires_at" timestamp NOT NULL, + "token" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp NOT NULL, + "ip_address" text, + "user_agent" text, + "user_id" text NOT NULL, + "active_organization_id" text, + CONSTRAINT "session_token_unique" UNIQUE("token") +); +--> statement-breakpoint +CREATE TABLE "user" ( + "id" text PRIMARY KEY NOT NULL, + "name" text NOT NULL, + "email" text NOT NULL, + "email_verified" boolean DEFAULT false NOT NULL, + "image" text, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL, + CONSTRAINT "user_email_unique" UNIQUE("email") +); +--> statement-breakpoint +CREATE TABLE "verification" ( + "id" text PRIMARY KEY NOT NULL, + "identifier" text NOT NULL, + "value" text NOT NULL, + "expires_at" timestamp NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "invitation" ( + "id" text PRIMARY KEY NOT NULL, + "email" text NOT NULL, + "inviter_id" text NOT NULL, + "organization_id" text NOT NULL, + "role" text, + "status" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "expires_at" timestamp NOT NULL +); +--> statement-breakpoint +CREATE TABLE "member" ( + "id" text PRIMARY KEY NOT NULL, + "organization_id" text NOT NULL, + "user_id" text NOT NULL, + "role" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "organization" ( + "id" text PRIMARY KEY NOT NULL, + "name" text NOT NULL, + "slug" text NOT NULL, + "logo" text, + "metadata" text, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "organization_dek" ( + "organization_id" text PRIMARY KEY NOT NULL, + "encrypted_dek" "bytea" NOT NULL, + "kms_key_alias" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "organization_state" ( + "id" text PRIMARY KEY NOT NULL, + "organization_id" text NOT NULL, + "stack" text NOT NULL, + "stage" text NOT NULL, + "fqn" text NOT NULL, + "nonce" "bytea" NOT NULL, + "encrypted_blob" "bytea" NOT NULL, + "version" integer DEFAULT 1 NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "polar_event" ( + "id" text PRIMARY KEY NOT NULL, + "polar_event_id" text NOT NULL, + "event_type" text NOT NULL, + "payload" text NOT NULL, + "processed_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "user_subscription" ( + "id" text PRIMARY KEY NOT NULL, + "user_id" text NOT NULL, + "polar_customer_id" text NOT NULL, + "polar_subscription_id" text, + "plan" text DEFAULT 'free' NOT NULL, + "status" text DEFAULT 'active' NOT NULL, + "current_period_end" timestamp, + "cancel_at_period_end" text, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "beta_waitlist" ( + "id" text PRIMARY KEY NOT NULL, + "email" text NOT NULL, + "name" text, + "company" text, + "role" text, + "source" text, + "notes" text, + "referrer" text, + "user_agent" text, + "ip_hash" text, + "invited_at" timestamp, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +ALTER TABLE "account" ADD CONSTRAINT "account_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "session" ADD CONSTRAINT "session_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "invitation" ADD CONSTRAINT "invitation_inviter_id_user_id_fk" FOREIGN KEY ("inviter_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "invitation" ADD CONSTRAINT "invitation_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "member" ADD CONSTRAINT "member_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "member" ADD CONSTRAINT "member_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "organization_dek" ADD CONSTRAINT "organization_dek_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "organization_state" ADD CONSTRAINT "organization_state_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "user_subscription" ADD CONSTRAINT "user_subscription_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +CREATE INDEX "account_userId_idx" ON "account" USING btree ("user_id");--> statement-breakpoint +CREATE INDEX "session_userId_idx" ON "session" USING btree ("user_id");--> statement-breakpoint +CREATE INDEX "verification_identifier_idx" ON "verification" USING btree ("identifier");--> statement-breakpoint +CREATE INDEX "invitation_org_idx" ON "invitation" USING btree ("organization_id");--> statement-breakpoint +CREATE UNIQUE INDEX "member_org_user_idx" ON "member" USING btree ("organization_id","user_id");--> statement-breakpoint +CREATE INDEX "member_user_idx" ON "member" USING btree ("user_id");--> statement-breakpoint +CREATE UNIQUE INDEX "organization_slug_idx" ON "organization" USING btree ("slug");--> statement-breakpoint +CREATE UNIQUE INDEX "organization_state_unique_idx" ON "organization_state" USING btree ("organization_id","stack","stage","fqn");--> statement-breakpoint +CREATE INDEX "organization_state_stage_idx" ON "organization_state" USING btree ("organization_id","stack","stage");--> statement-breakpoint +CREATE UNIQUE INDEX "polar_event_polar_id_idx" ON "polar_event" USING btree ("polar_event_id");--> statement-breakpoint +CREATE UNIQUE INDEX "user_subscription_user_idx" ON "user_subscription" USING btree ("user_id");--> statement-breakpoint +CREATE INDEX "user_subscription_polar_customer_idx" ON "user_subscription" USING btree ("polar_customer_id");--> statement-breakpoint +CREATE UNIQUE INDEX "beta_waitlist_email_uniq" ON "beta_waitlist" USING btree ("email");--> statement-breakpoint +CREATE INDEX "beta_waitlist_created_idx" ON "beta_waitlist" USING btree ("created_at"); \ No newline at end of file diff --git a/packages/db/drizzle/meta/0000_snapshot.json b/packages/db/drizzle/meta/0000_snapshot.json new file mode 100644 index 00000000..5548e57c --- /dev/null +++ b/packages/db/drizzle/meta/0000_snapshot.json @@ -0,0 +1,1161 @@ +{ + "id": "4eedc99e-9098-4b74-83c9-ff92d2182938", + "prevId": "00000000-0000-0000-0000-000000000000", + "version": "7", + "dialect": "postgresql", + "tables": { + "public.account": { + "name": "account", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "account_id": { + "name": "account_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "access_token": { + "name": "access_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "refresh_token": { + "name": "refresh_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "id_token": { + "name": "id_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "access_token_expires_at": { + "name": "access_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "refresh_token_expires_at": { + "name": "refresh_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "scope": { + "name": "scope", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "account_userId_idx": { + "name": "account_userId_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "account_user_id_user_id_fk": { + "name": "account_user_id_user_id_fk", + "tableFrom": "account", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.session": { + "name": "session", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ip_address": { + "name": "ip_address", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "active_organization_id": { + "name": "active_organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "session_userId_idx": { + "name": "session_userId_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "session_user_id_user_id_fk": { + "name": "session_user_id_user_id_fk", + "tableFrom": "session", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "session_token_unique": { + "name": "session_token_unique", + "nullsNotDistinct": false, + "columns": [ + "token" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user": { + "name": "user", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email_verified": { + "name": "email_verified", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "image": { + "name": "image", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_email_unique": { + "name": "user_email_unique", + "nullsNotDistinct": false, + "columns": [ + "email" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.verification": { + "name": "verification", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "value": { + "name": "value", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "verification_identifier_idx": { + "name": "verification_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.invitation": { + "name": "invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "invitation_org_idx": { + "name": "invitation_org_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "invitation_inviter_id_user_id_fk": { + "name": "invitation_inviter_id_user_id_fk", + "tableFrom": "invitation", + "tableTo": "user", + "columnsFrom": [ + "inviter_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "invitation_organization_id_organization_id_fk": { + "name": "invitation_organization_id_organization_id_fk", + "tableFrom": "invitation", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.member": { + "name": "member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "member_org_user_idx": { + "name": "member_org_user_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "member_user_idx": { + "name": "member_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "member_organization_id_organization_id_fk": { + "name": "member_organization_id_organization_id_fk", + "tableFrom": "member", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "member_user_id_user_id_fk": { + "name": "member_user_id_user_id_fk", + "tableFrom": "member", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization": { + "name": "organization", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "slug": { + "name": "slug", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "logo": { + "name": "logo", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "organization_slug_idx": { + "name": "organization_slug_idx", + "columns": [ + { + "expression": "slug", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization_dek": { + "name": "organization_dek", + "schema": "", + "columns": { + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "encrypted_dek": { + "name": "encrypted_dek", + "type": "bytea", + "primaryKey": false, + "notNull": true + }, + "kms_key_alias": { + "name": "kms_key_alias", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "organization_dek_organization_id_organization_id_fk": { + "name": "organization_dek_organization_id_organization_id_fk", + "tableFrom": "organization_dek", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization_state": { + "name": "organization_state", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stack": { + "name": "stack", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stage": { + "name": "stage", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "fqn": { + "name": "fqn", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "nonce": { + "name": "nonce", + "type": "bytea", + "primaryKey": false, + "notNull": true + }, + "encrypted_blob": { + "name": "encrypted_blob", + "type": "bytea", + "primaryKey": false, + "notNull": true + }, + "version": { + "name": "version", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 1 + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "organization_state_unique_idx": { + "name": "organization_state_unique_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stack", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stage", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "fqn", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "organization_state_stage_idx": { + "name": "organization_state_stage_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stack", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stage", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "organization_state_organization_id_organization_id_fk": { + "name": "organization_state_organization_id_organization_id_fk", + "tableFrom": "organization_state", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.polar_event": { + "name": "polar_event", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "polar_event_id": { + "name": "polar_event_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "event_type": { + "name": "event_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "payload": { + "name": "payload", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "processed_at": { + "name": "processed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "polar_event_polar_id_idx": { + "name": "polar_event_polar_id_idx", + "columns": [ + { + "expression": "polar_event_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user_subscription": { + "name": "user_subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "polar_customer_id": { + "name": "polar_customer_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "polar_subscription_id": { + "name": "polar_subscription_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "plan": { + "name": "plan", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'free'" + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'active'" + }, + "current_period_end": { + "name": "current_period_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "cancel_at_period_end": { + "name": "cancel_at_period_end", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "user_subscription_user_idx": { + "name": "user_subscription_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "user_subscription_polar_customer_idx": { + "name": "user_subscription_polar_customer_idx", + "columns": [ + { + "expression": "polar_customer_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "user_subscription_user_id_user_id_fk": { + "name": "user_subscription_user_id_user_id_fk", + "tableFrom": "user_subscription", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.beta_waitlist": { + "name": "beta_waitlist", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "company": { + "name": "company", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "source": { + "name": "source", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "notes": { + "name": "notes", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "referrer": { + "name": "referrer", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "ip_hash": { + "name": "ip_hash", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "invited_at": { + "name": "invited_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "beta_waitlist_email_uniq": { + "name": "beta_waitlist_email_uniq", + "columns": [ + { + "expression": "email", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "beta_waitlist_created_idx": { + "name": "beta_waitlist_created_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + } + }, + "enums": {}, + "schemas": {}, + "sequences": {}, + "roles": {}, + "policies": {}, + "views": {}, + "_meta": { + "columns": {}, + "schemas": {}, + "tables": {} + } +} \ No newline at end of file diff --git a/packages/db/drizzle/meta/_journal.json b/packages/db/drizzle/meta/_journal.json new file mode 100644 index 00000000..5a197aa1 --- /dev/null +++ b/packages/db/drizzle/meta/_journal.json @@ -0,0 +1,13 @@ +{ + "version": "7", + "dialect": "postgresql", + "entries": [ + { + "idx": 0, + "version": "7", + "when": 1777635345492, + "tag": "0000_init", + "breakpoints": true + } + ] +} \ No newline at end of file diff --git a/packages/db/package.json b/packages/db/package.json index 6e38c09c..8e62abb5 100644 --- a/packages/db/package.json +++ b/packages/db/package.json @@ -18,9 +18,9 @@ "scripts": { "db:up": "alchemy deploy", "db:down": "alchemy destroy", - "db:push": "drizzle-kit push", "db:studio": "drizzle-kit studio", - "db:generate": "drizzle-kit generate", + "db:generate": "drizzle-kit generate && bun run db:bundle", + "db:bundle": "bun run scripts/bundle-migrations.ts", "db:migrate": "drizzle-kit migrate", "cf:typegen": "wrangler types --env-interface CloudflareEnv cloudflare-env.d.ts" }, diff --git a/packages/db/scripts/bundle-migrations.ts b/packages/db/scripts/bundle-migrations.ts new file mode 100644 index 00000000..452089b1 --- /dev/null +++ b/packages/db/scripts/bundle-migrations.ts @@ -0,0 +1,98 @@ +#!/usr/bin/env bun +// Bundle the file-based Drizzle migrations under `drizzle/` into a TypeScript +// module the runtime can import without a filesystem. Cloudflare Workers (and +// our SSR/dev pipeline) bundle every imported module at build time, so we +// inline each `0000_*.sql` next to its journal entry — that way `runMigrations` +// in `src/migrate.ts` works identically on Node, Bun, Workers, and tests +// without any filesystem shim. +// +// Run via `bun run --cwd packages/db db:generate` (which invokes +// `drizzle-kit generate && bun run db:bundle`) — never edit the generated +// `src/migrations-bundle.generated.ts` by hand. + +import { existsSync, readFileSync, readdirSync, writeFileSync } from "node:fs"; +import { join, resolve } from "node:path"; + +const DB_ROOT = resolve(import.meta.dirname, ".."); +const DRIZZLE_DIR = join(DB_ROOT, "drizzle"); +const JOURNAL_PATH = join(DRIZZLE_DIR, "meta", "_journal.json"); +const OUT_PATH = join(DB_ROOT, "src", "migrations-bundle.generated.ts"); + +interface JournalEntry { + idx: number; + version: string; + when: number; + tag: string; + breakpoints: boolean; +} + +interface Journal { + version: string; + dialect: string; + entries: JournalEntry[]; +} + +if (!existsSync(JOURNAL_PATH)) { + console.error( + `[db:bundle] no journal at ${JOURNAL_PATH} — did you run \`drizzle-kit generate\` first?`, + ); + process.exit(1); +} + +const journal = JSON.parse(readFileSync(JOURNAL_PATH, "utf-8")) as Journal; +const sqlFiles = new Set( + readdirSync(DRIZZLE_DIR).filter((f) => f.endsWith(".sql")), +); + +const bundle = journal.entries + .slice() + .sort((a, b) => a.idx - b.idx) + .map((entry) => { + const fileName = `${entry.tag}.sql`; + if (!sqlFiles.has(fileName)) { + throw new Error( + `[db:bundle] journal entry ${entry.tag} (idx ${entry.idx}) has no matching SQL file at drizzle/${fileName}`, + ); + } + const sql = readFileSync(join(DRIZZLE_DIR, fileName), "utf-8"); + return { ...entry, sql }; + }); + +const header = `// Auto-generated by packages/db/scripts/bundle-migrations.ts. +// Do NOT edit by hand — re-run \`bun run --cwd packages/db db:generate\` +// (or \`bun run --cwd packages/db db:bundle\` if you only changed the bundler). +// +// Each entry mirrors a row in drizzle/meta/_journal.json with the matching +// SQL inlined, so runtime callers (Cloudflare Workers, Node, Bun, tests) can +// apply migrations without filesystem access. +`; + +const body = `export interface BundledMigration { + /** Sort order — matches the \`idx\` field in drizzle/meta/_journal.json. */ + readonly idx: number; + /** Tag — drizzle's stable, content-derived identifier (also the filename). */ + readonly tag: string; + /** Drizzle Kit version that produced the SQL. */ + readonly version: string; + /** Wall-clock ms when drizzle generated the migration. */ + readonly when: number; + /** Whether drizzle wrote \`--> statement-breakpoint\` separators. */ + readonly breakpoints: boolean; + /** Raw SQL contents of drizzle/.sql, untouched. */ + readonly sql: string; +} + +export const MIGRATIONS_BUNDLE: readonly BundledMigration[] = ${JSON.stringify( + bundle, + null, + 2, +)} as const; + +export const JOURNAL_VERSION = ${JSON.stringify(journal.version)} as const; +export const JOURNAL_DIALECT = ${JSON.stringify(journal.dialect)} as const; +`; + +writeFileSync(OUT_PATH, `${header}\n${body}`); +console.log( + `[db:bundle] wrote ${bundle.length} migration${bundle.length === 1 ? "" : "s"} to ${OUT_PATH}`, +); diff --git a/packages/db/src/index.ts b/packages/db/src/index.ts index 3154d4a2..fc289e19 100644 --- a/packages/db/src/index.ts +++ b/packages/db/src/index.ts @@ -14,13 +14,19 @@ const schema = { ...waitlist, }; -let _db: ReturnType | undefined; +/** + * Concrete drizzle client type — used by `runMigrations()` and any other + * code that wants to type a `db` parameter without importing the proxy. + */ +export type Db = ReturnType>; + +let _db: Db | undefined; /** * Drizzle client for Postgres via Hyperdrive (Cloudflare) or * DATABASE_URL (local dev). Lazily initialized and cached. */ -export function getDb(connectionString?: string): ReturnType { +export function getDb(connectionString?: string): Db { if (_db) return _db; const url = connectionString || process.env.DATABASE_URL; @@ -36,10 +42,11 @@ export function getDb(connectionString?: string): ReturnType { /** * @deprecated Use getDb() instead. Kept for backward compatibility. */ -export const db = new Proxy({} as ReturnType, { +export const db = new Proxy({} as Db, { get(_, prop) { return (getDb() as any)[prop]; }, }); +export { runMigrations } from "./migrate"; export { auth, organization, state, subscription, waitlist }; diff --git a/packages/db/src/migrate.ts b/packages/db/src/migrate.ts new file mode 100644 index 00000000..4517fc43 --- /dev/null +++ b/packages/db/src/migrate.ts @@ -0,0 +1,119 @@ +// Programmatic Drizzle migrator that runs at app startup. +// +// Why a custom migrator? +// `drizzle-orm/node-postgres/migrator` reads SQL files at runtime via +// `node:fs`, which is empty inside a Cloudflare Worker bundle. We pre-bundle +// every migration into `migrations-bundle.generated.ts` (see +// `scripts/bundle-migrations.ts`) so the SQL ships with the Worker, then +// apply each entry in order against `__drizzle_migrations`. +// +// Concurrency: +// Cloudflare may spin up many isolates simultaneously, each calling +// `runMigrations()` on cold start. We grab a Postgres `pg_advisory_lock` +// under a stable key while applying — `__drizzle_migrations` row inserts +// then short-circuit any duplicate work. Per-isolate, the function caches +// the in-flight Promise so repeated callers reuse the same migrate run. +// +// Re-entrancy after failure: +// On error the cached Promise is cleared so the next request retries from +// scratch (rather than wedging the isolate forever on a transient DB blip). + +import { sql } from "drizzle-orm"; +import { type BundledMigration, MIGRATIONS_BUNDLE } from "./migrations-bundle.generated"; +import type { Db } from "./index"; + +// Stable key for `pg_advisory_lock`. The number is arbitrary but must stay +// stable forever — picked once via `crc32("stackpanel.db.migrations")` so it +// won't collide with any other advisory lock in the database. +const MIGRATION_LOCK_KEY = 0x4d_49_47_52n; // 'MIGR' as bigint + +let inflight: Promise | null = null; + +/** + * Apply every bundled migration that hasn't run yet against `db`. + * + * Idempotent and safe to call from many concurrent isolates: a single + * Postgres advisory lock serialises real work, and the per-isolate + * `inflight` cache deduplicates repeated calls within the same JS runtime. + * + * @example + * import { db, runMigrations } from "@stackpanel/db"; + * await runMigrations(db); + */ +export async function runMigrations(db: Db): Promise { + if (inflight) return inflight; + inflight = applyMigrations(db).catch((err) => { + inflight = null; + throw err; + }); + return inflight; +} + +async function applyMigrations(db: Db): Promise { + await db.execute(sql`SELECT pg_advisory_lock(${MIGRATION_LOCK_KEY})`); + try { + await db.execute(sql` + CREATE TABLE IF NOT EXISTS "__drizzle_migrations" ( + "id" SERIAL PRIMARY KEY, + "hash" TEXT NOT NULL UNIQUE, + "created_at" BIGINT NOT NULL + ) + `); + + const result = await db.execute<{ hash: string }>( + sql`SELECT "hash" FROM "__drizzle_migrations"`, + ); + const applied = new Set(result.rows.map((r) => r.hash)); + + const ordered = [...MIGRATIONS_BUNDLE].sort((a, b) => a.idx - b.idx); + for (const entry of ordered) { + if (applied.has(entry.tag)) continue; + await applyOne(db, entry); + } + } finally { + await db + .execute(sql`SELECT pg_advisory_unlock(${MIGRATION_LOCK_KEY})`) + .catch(() => { + // pg_advisory_unlock can fail after a connection blip — losing the + // lock just means the next process can re-acquire, so swallow. + }); + } +} + +async function applyOne(db: Db, entry: BundledMigration): Promise { + const statements = splitStatements(entry.sql, entry.breakpoints); + await db.transaction(async (tx) => { + for (const stmt of statements) { + if (!stmt) continue; + await tx.execute(sql.raw(stmt)); + } + await tx.execute( + sql`INSERT INTO "__drizzle_migrations" ("hash", "created_at") VALUES (${entry.tag}, ${Date.now()})`, + ); + }); +} + +function splitStatements(rawSql: string, hasBreakpoints: boolean): string[] { + // Drizzle inserts `--> statement-breakpoint` between statements when it + // generates the SQL. Splitting on the breakpoint preserves multi-statement + // semantics (each gets its own `tx.execute`), which is required for things + // like `CREATE INDEX CONCURRENTLY` and just generally avoids client-side + // SQL parsing bugs. Older migrations without breakpoints fall back to a + // single execute — Postgres handles multi-statement strings just fine. + if (!hasBreakpoints) return [rawSql.trim()]; + return rawSql + .split(/-->\s*statement-breakpoint/i) + .map((s) => s.trim()) + .filter((s) => s.length > 0); +} + +/** + * Test-only escape hatch — clears the per-isolate dedup cache so a second + * `runMigrations(db)` actually runs against a fresh database in unit tests. + * Production callers should never need this. + * + * @internal + */ +export function __resetMigrationCache(): void { + inflight = null; +} diff --git a/packages/db/src/migrations-bundle.generated.ts b/packages/db/src/migrations-bundle.generated.ts new file mode 100644 index 00000000..740c17ef --- /dev/null +++ b/packages/db/src/migrations-bundle.generated.ts @@ -0,0 +1,36 @@ +// Auto-generated by packages/db/scripts/bundle-migrations.ts. +// Do NOT edit by hand — re-run `bun run --cwd packages/db db:generate` +// (or `bun run --cwd packages/db db:bundle` if you only changed the bundler). +// +// Each entry mirrors a row in drizzle/meta/_journal.json with the matching +// SQL inlined, so runtime callers (Cloudflare Workers, Node, Bun, tests) can +// apply migrations without filesystem access. + +export interface BundledMigration { + /** Sort order — matches the `idx` field in drizzle/meta/_journal.json. */ + readonly idx: number; + /** Tag — drizzle's stable, content-derived identifier (also the filename). */ + readonly tag: string; + /** Drizzle Kit version that produced the SQL. */ + readonly version: string; + /** Wall-clock ms when drizzle generated the migration. */ + readonly when: number; + /** Whether drizzle wrote `--> statement-breakpoint` separators. */ + readonly breakpoints: boolean; + /** Raw SQL contents of drizzle/.sql, untouched. */ + readonly sql: string; +} + +export const MIGRATIONS_BUNDLE: readonly BundledMigration[] = [ + { + "idx": 0, + "version": "7", + "when": 1777635345492, + "tag": "0000_init", + "breakpoints": true, + "sql": "CREATE TABLE \"account\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"account_id\" text NOT NULL,\n\t\"provider_id\" text NOT NULL,\n\t\"user_id\" text NOT NULL,\n\t\"access_token\" text,\n\t\"refresh_token\" text,\n\t\"id_token\" text,\n\t\"access_token_expires_at\" timestamp,\n\t\"refresh_token_expires_at\" timestamp,\n\t\"scope\" text,\n\t\"password\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"session\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"expires_at\" timestamp NOT NULL,\n\t\"token\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp NOT NULL,\n\t\"ip_address\" text,\n\t\"user_agent\" text,\n\t\"user_id\" text NOT NULL,\n\t\"active_organization_id\" text,\n\tCONSTRAINT \"session_token_unique\" UNIQUE(\"token\")\n);\n--> statement-breakpoint\nCREATE TABLE \"user\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"name\" text NOT NULL,\n\t\"email\" text NOT NULL,\n\t\"email_verified\" boolean DEFAULT false NOT NULL,\n\t\"image\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL,\n\tCONSTRAINT \"user_email_unique\" UNIQUE(\"email\")\n);\n--> statement-breakpoint\nCREATE TABLE \"verification\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"identifier\" text NOT NULL,\n\t\"value\" text NOT NULL,\n\t\"expires_at\" timestamp NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"invitation\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"email\" text NOT NULL,\n\t\"inviter_id\" text NOT NULL,\n\t\"organization_id\" text NOT NULL,\n\t\"role\" text,\n\t\"status\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"expires_at\" timestamp NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"member\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"organization_id\" text NOT NULL,\n\t\"user_id\" text NOT NULL,\n\t\"role\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"organization\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"name\" text NOT NULL,\n\t\"slug\" text NOT NULL,\n\t\"logo\" text,\n\t\"metadata\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"organization_dek\" (\n\t\"organization_id\" text PRIMARY KEY NOT NULL,\n\t\"encrypted_dek\" \"bytea\" NOT NULL,\n\t\"kms_key_alias\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"organization_state\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"organization_id\" text NOT NULL,\n\t\"stack\" text NOT NULL,\n\t\"stage\" text NOT NULL,\n\t\"fqn\" text NOT NULL,\n\t\"nonce\" \"bytea\" NOT NULL,\n\t\"encrypted_blob\" \"bytea\" NOT NULL,\n\t\"version\" integer DEFAULT 1 NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"polar_event\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"polar_event_id\" text NOT NULL,\n\t\"event_type\" text NOT NULL,\n\t\"payload\" text NOT NULL,\n\t\"processed_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"user_subscription\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"user_id\" text NOT NULL,\n\t\"polar_customer_id\" text NOT NULL,\n\t\"polar_subscription_id\" text,\n\t\"plan\" text DEFAULT 'free' NOT NULL,\n\t\"status\" text DEFAULT 'active' NOT NULL,\n\t\"current_period_end\" timestamp,\n\t\"cancel_at_period_end\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"beta_waitlist\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"email\" text NOT NULL,\n\t\"name\" text,\n\t\"company\" text,\n\t\"role\" text,\n\t\"source\" text,\n\t\"notes\" text,\n\t\"referrer\" text,\n\t\"user_agent\" text,\n\t\"ip_hash\" text,\n\t\"invited_at\" timestamp,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nALTER TABLE \"account\" ADD CONSTRAINT \"account_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"session\" ADD CONSTRAINT \"session_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"invitation\" ADD CONSTRAINT \"invitation_inviter_id_user_id_fk\" FOREIGN KEY (\"inviter_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"invitation\" ADD CONSTRAINT \"invitation_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"member\" ADD CONSTRAINT \"member_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"member\" ADD CONSTRAINT \"member_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"organization_dek\" ADD CONSTRAINT \"organization_dek_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"organization_state\" ADD CONSTRAINT \"organization_state_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"user_subscription\" ADD CONSTRAINT \"user_subscription_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nCREATE INDEX \"account_userId_idx\" ON \"account\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE INDEX \"session_userId_idx\" ON \"session\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE INDEX \"verification_identifier_idx\" ON \"verification\" USING btree (\"identifier\");--> statement-breakpoint\nCREATE INDEX \"invitation_org_idx\" ON \"invitation\" USING btree (\"organization_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"member_org_user_idx\" ON \"member\" USING btree (\"organization_id\",\"user_id\");--> statement-breakpoint\nCREATE INDEX \"member_user_idx\" ON \"member\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"organization_slug_idx\" ON \"organization\" USING btree (\"slug\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"organization_state_unique_idx\" ON \"organization_state\" USING btree (\"organization_id\",\"stack\",\"stage\",\"fqn\");--> statement-breakpoint\nCREATE INDEX \"organization_state_stage_idx\" ON \"organization_state\" USING btree (\"organization_id\",\"stack\",\"stage\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"polar_event_polar_id_idx\" ON \"polar_event\" USING btree (\"polar_event_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"user_subscription_user_idx\" ON \"user_subscription\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE INDEX \"user_subscription_polar_customer_idx\" ON \"user_subscription\" USING btree (\"polar_customer_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"beta_waitlist_email_uniq\" ON \"beta_waitlist\" USING btree (\"email\");--> statement-breakpoint\nCREATE INDEX \"beta_waitlist_created_idx\" ON \"beta_waitlist\" USING btree (\"created_at\");" + } +] as const; + +export const JOURNAL_VERSION = "7" as const; +export const JOURNAL_DIALECT = "postgresql" as const; diff --git a/turbo.json b/turbo.json index 9b6ce268..2152d17e 100644 --- a/turbo.json +++ b/turbo.json @@ -1 +1 @@ -{"$schema":"https://turbo.build/schema.json","tasks":{"alchemy:deploy":{"cache":false},"alchemy:destroy":{"cache":false},"alchemy:ensure":{},"build":{},"build:container":{"cache":false},"clean":{},"container:build":{"cache":false,"dependsOn":["build:container"]},"container:push":{"cache":false,"dependsOn":["container:build"]},"db:migrate":{},"db:push":{},"db:studio":{},"deploy":{"cache":false,"dependsOn":["container:push"]},"dev":{"cache":false},"format":{},"generate:proto":{},"generate:types":{},"lint":{},"test":{},"test:coverage":{},"test:watch":{},"typecheck":{}},"ui":"tui"} \ No newline at end of file +{"$schema":"https://turbo.build/schema.json","tasks":{"alchemy:deploy":{"cache":false},"alchemy:destroy":{"cache":false},"alchemy:ensure":{},"build":{},"build:container":{"cache":false},"clean":{},"container:build":{"cache":false,"dependsOn":["build:container"]},"container:push":{"cache":false,"dependsOn":["container:build"]},"db:generate":{},"db:migrate":{},"db:studio":{},"deploy":{"cache":false,"dependsOn":["container:push"]},"dev":{"cache":false},"format":{},"generate:proto":{},"generate:types":{},"lint":{},"test":{},"test:coverage":{},"test:watch":{},"typecheck":{}},"ui":"tui"} \ No newline at end of file From 8aae887f6cf154fa62796ebb5b0a0d332f9c7608 Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Fri, 1 May 2026 04:54:03 -0700 Subject: [PATCH 2/3] fix(db): fast-forward bundled migrations on externally-managed schemas MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a one-time safety net to runMigrations(): if `__drizzle_migrations` doesn't exist but the public schema already has tables (the legacy `db:push` flow's footprint), assume the schema was managed externally and pre-populate `__drizzle_migrations` with every bundled tag instead of trying to re-run the baseline `CREATE TABLE` statements. Without this, the first deploy after this change against production or staging (where the schema already exists from prior `db:push` runs) would crash on `relation "account" already exists`. With this, the first boot is a one-shot fast-forward; subsequent boots see the table and the normal diff-and-apply flow takes over. Fresh databases (PR previews, new dev installs) skip this branch — no existing tables means no inference, and the baseline migration runs normally. --- packages/db/src/migrate.ts | 45 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/packages/db/src/migrate.ts b/packages/db/src/migrate.ts index 4517fc43..b57c311a 100644 --- a/packages/db/src/migrate.ts +++ b/packages/db/src/migrate.ts @@ -52,6 +52,19 @@ export async function runMigrations(db: Db): Promise { async function applyMigrations(db: Db): Promise { await db.execute(sql`SELECT pg_advisory_lock(${MIGRATION_LOCK_KEY})`); try { + // Snapshot whether `__drizzle_migrations` exists *before* we CREATE it, + // so we can distinguish "fresh DB" from "DB managed by a prior tool" + // (the legacy `db:push` flow) when deciding whether to apply 0000_init. + const tableCheck = await db.execute<{ exists: boolean }>(sql` + SELECT EXISTS ( + SELECT 1 + FROM pg_tables + WHERE schemaname = 'public' + AND tablename = '__drizzle_migrations' + ) AS "exists" + `); + const migrationsTableExisted = Boolean(tableCheck.rows[0]?.exists); + await db.execute(sql` CREATE TABLE IF NOT EXISTS "__drizzle_migrations" ( "id" SERIAL PRIMARY KEY, @@ -60,6 +73,38 @@ async function applyMigrations(db: Db): Promise { ) `); + // Production-rollout safety net: if we just created `__drizzle_migrations` + // and the public schema already has tables, infer the schema was managed + // externally (the legacy `db:push` flow) and fast-forward every bundled + // migration to "already applied". Without this, the first deploy after + // this change would try to `CREATE TABLE "account"` against a DB that + // already has it and abort the migrate run. This is a one-time event + // per environment; subsequent deploys see the table and the normal + // diff-and-apply flow takes over. + if (!migrationsTableExisted) { + const otherTables = await db.execute<{ count: number }>(sql` + SELECT COUNT(*)::int AS "count" + FROM pg_tables + WHERE schemaname = 'public' + AND tablename != '__drizzle_migrations' + `); + const hasExistingSchema = (otherTables.rows[0]?.count ?? 0) > 0; + if (hasExistingSchema) { + console.warn( + "[runMigrations] detected an externally-managed Postgres schema; " + + "fast-forwarding bundled migrations to applied without re-running SQL", + ); + for (const entry of MIGRATIONS_BUNDLE) { + await db.execute(sql` + INSERT INTO "__drizzle_migrations" ("hash", "created_at") + VALUES (${entry.tag}, ${Date.now()}) + ON CONFLICT ("hash") DO NOTHING + `); + } + return; + } + } + const result = await db.execute<{ hash: string }>( sql`SELECT "hash" FROM "__drizzle_migrations"`, ); From f99d3b31c072674c0471f66e47573d91a8186cc0 Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Fri, 1 May 2026 04:58:47 -0700 Subject: [PATCH 3/3] fix(db): use transaction-scoped advisory lock to avoid pool deadlock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous `pg_advisory_lock()` call was session-scoped — and pg.Pool returns the underlying connection (and therefore the held lock) back to the pool on `db.execute` completion. Subsequent isolates checking out the same pooled connection would see "lock already held by your own session" and block forever, which manifested as Cloudflare 522 timeouts on every request to the PR-25 preview after deploy. Switches to `pg_advisory_xact_lock` inside a per-migration transaction so the lock auto-releases at COMMIT/ROLLBACK regardless of pool lifecycle, and re-checks `__drizzle_migrations` membership inside the lock to handle the case where another isolate raced ahead and applied the same migration first. Also drops the JS bigint lock-key constant in favour of a plain number — drizzle's `sql` template binds bigint via the pg `int8` type parser which has had subtle compatibility issues across pg versions, and the lock key fits comfortably in int4. --- packages/db/src/migrate.ts | 169 +++++++++++++++++++------------------ 1 file changed, 86 insertions(+), 83 deletions(-) diff --git a/packages/db/src/migrate.ts b/packages/db/src/migrate.ts index b57c311a..313f9c56 100644 --- a/packages/db/src/migrate.ts +++ b/packages/db/src/migrate.ts @@ -9,32 +9,37 @@ // // Concurrency: // Cloudflare may spin up many isolates simultaneously, each calling -// `runMigrations()` on cold start. We grab a Postgres `pg_advisory_lock` -// under a stable key while applying — `__drizzle_migrations` row inserts -// then short-circuit any duplicate work. Per-isolate, the function caches -// the in-flight Promise so repeated callers reuse the same migrate run. +// `runMigrations()` on cold start. We grab a transaction-scoped Postgres +// advisory lock around each individual migration so concurrent isolates +// serialise correctly. We deliberately do *not* use the session-scoped +// `pg_advisory_lock` — that lock persists when the pg.Pool connection is +// returned to the pool, so the next checkout would see "already locked +// by you" semantics that deadlock the next isolate. Transaction-scoped +// locks (`pg_advisory_xact_lock`) auto-release at COMMIT/ROLLBACK and +// are immune to pool lifecycle weirdness. // -// Re-entrancy after failure: -// On error the cached Promise is cleared so the next request retries from -// scratch (rather than wedging the isolate forever on a transient DB blip). +// Per-isolate, the function caches the in-flight Promise so repeated +// callers reuse the same migrate run. On error the cache is cleared so +// the next request retries from scratch. import { sql } from "drizzle-orm"; -import { type BundledMigration, MIGRATIONS_BUNDLE } from "./migrations-bundle.generated"; import type { Db } from "./index"; +import { type BundledMigration, MIGRATIONS_BUNDLE } from "./migrations-bundle.generated"; -// Stable key for `pg_advisory_lock`. The number is arbitrary but must stay -// stable forever — picked once via `crc32("stackpanel.db.migrations")` so it -// won't collide with any other advisory lock in the database. -const MIGRATION_LOCK_KEY = 0x4d_49_47_52n; // 'MIGR' as bigint +// Stable lock key. Picked once and never changed — must fit in a +// Postgres int8 (bigint). Using a plain JS number keeps drizzle's +// parameter binding simple (no bigint serialisation surprises). +const MIGRATION_LOCK_KEY = 1_296_127_570; // crc32-ish "stackpanel.db.migrations" let inflight: Promise | null = null; /** * Apply every bundled migration that hasn't run yet against `db`. * - * Idempotent and safe to call from many concurrent isolates: a single - * Postgres advisory lock serialises real work, and the per-isolate - * `inflight` cache deduplicates repeated calls within the same JS runtime. + * Idempotent and safe to call from many concurrent isolates: each pending + * migration runs inside its own transaction with a `pg_advisory_xact_lock`, + * and the per-isolate `inflight` cache deduplicates repeated calls within + * the same JS runtime. * * @example * import { db, runMigrations } from "@stackpanel/db"; @@ -50,84 +55,82 @@ export async function runMigrations(db: Db): Promise { } async function applyMigrations(db: Db): Promise { - await db.execute(sql`SELECT pg_advisory_lock(${MIGRATION_LOCK_KEY})`); - try { - // Snapshot whether `__drizzle_migrations` exists *before* we CREATE it, - // so we can distinguish "fresh DB" from "DB managed by a prior tool" - // (the legacy `db:push` flow) when deciding whether to apply 0000_init. - const tableCheck = await db.execute<{ exists: boolean }>(sql` - SELECT EXISTS ( - SELECT 1 - FROM pg_tables - WHERE schemaname = 'public' - AND tablename = '__drizzle_migrations' - ) AS "exists" - `); - const migrationsTableExisted = Boolean(tableCheck.rows[0]?.exists); + // Snapshot whether `__drizzle_migrations` exists *before* we CREATE it, + // so we can distinguish "fresh DB" from "DB managed by a prior tool" + // (the legacy `db:push` flow) when deciding whether to apply 0000_init. + const tableCheck = await db.execute<{ exists: boolean }>(sql` + SELECT EXISTS ( + SELECT 1 + FROM pg_tables + WHERE schemaname = 'public' + AND tablename = '__drizzle_migrations' + ) AS "exists" + `); + const migrationsTableExisted = Boolean(tableCheck.rows[0]?.exists); - await db.execute(sql` - CREATE TABLE IF NOT EXISTS "__drizzle_migrations" ( - "id" SERIAL PRIMARY KEY, - "hash" TEXT NOT NULL UNIQUE, - "created_at" BIGINT NOT NULL - ) - `); + await db.execute(sql` + CREATE TABLE IF NOT EXISTS "__drizzle_migrations" ( + "id" SERIAL PRIMARY KEY, + "hash" TEXT NOT NULL UNIQUE, + "created_at" BIGINT NOT NULL + ) + `); - // Production-rollout safety net: if we just created `__drizzle_migrations` - // and the public schema already has tables, infer the schema was managed - // externally (the legacy `db:push` flow) and fast-forward every bundled - // migration to "already applied". Without this, the first deploy after - // this change would try to `CREATE TABLE "account"` against a DB that - // already has it and abort the migrate run. This is a one-time event - // per environment; subsequent deploys see the table and the normal - // diff-and-apply flow takes over. - if (!migrationsTableExisted) { - const otherTables = await db.execute<{ count: number }>(sql` - SELECT COUNT(*)::int AS "count" - FROM pg_tables - WHERE schemaname = 'public' - AND tablename != '__drizzle_migrations' - `); - const hasExistingSchema = (otherTables.rows[0]?.count ?? 0) > 0; - if (hasExistingSchema) { - console.warn( - "[runMigrations] detected an externally-managed Postgres schema; " + - "fast-forwarding bundled migrations to applied without re-running SQL", - ); - for (const entry of MIGRATIONS_BUNDLE) { - await db.execute(sql` - INSERT INTO "__drizzle_migrations" ("hash", "created_at") - VALUES (${entry.tag}, ${Date.now()}) - ON CONFLICT ("hash") DO NOTHING - `); - } - return; + // Production-rollout safety net: if we just created `__drizzle_migrations` + // and the public schema already has tables, infer the schema was managed + // externally (the legacy `db:push` flow) and fast-forward every bundled + // migration to "already applied". Without this, the first deploy after + // this change would try to `CREATE TABLE "account"` against a DB that + // already has it and abort the migrate run. This is a one-time event + // per environment; subsequent deploys see the table and the normal + // diff-and-apply flow takes over. + if (!migrationsTableExisted) { + const otherTables = await db.execute<{ count: number }>(sql` + SELECT COUNT(*)::int AS "count" + FROM pg_tables + WHERE schemaname = 'public' + AND tablename != '__drizzle_migrations' + `); + const hasExistingSchema = (otherTables.rows[0]?.count ?? 0) > 0; + if (hasExistingSchema) { + console.warn( + "[runMigrations] detected an externally-managed Postgres schema; " + + "fast-forwarding bundled migrations to applied without re-running SQL", + ); + for (const entry of MIGRATIONS_BUNDLE) { + await db.execute(sql` + INSERT INTO "__drizzle_migrations" ("hash", "created_at") + VALUES (${entry.tag}, ${Date.now()}) + ON CONFLICT ("hash") DO NOTHING + `); } + return; } + } - const result = await db.execute<{ hash: string }>( - sql`SELECT "hash" FROM "__drizzle_migrations"`, - ); - const applied = new Set(result.rows.map((r) => r.hash)); - - const ordered = [...MIGRATIONS_BUNDLE].sort((a, b) => a.idx - b.idx); - for (const entry of ordered) { - if (applied.has(entry.tag)) continue; - await applyOne(db, entry); - } - } finally { - await db - .execute(sql`SELECT pg_advisory_unlock(${MIGRATION_LOCK_KEY})`) - .catch(() => { - // pg_advisory_unlock can fail after a connection blip — losing the - // lock just means the next process can re-acquire, so swallow. - }); + // Apply pending migrations one at a time. Each migration runs inside its + // own transaction with a `pg_advisory_xact_lock` — the lock serialises + // concurrent isolates, the transaction makes SQL + journal-row insert + // atomic, and the lock auto-releases at COMMIT so we don't leak it back + // into the connection pool. + const ordered = [...MIGRATIONS_BUNDLE].sort((a, b) => a.idx - b.idx); + for (const entry of ordered) { + await applyOne(db, entry); } } async function applyOne(db: Db, entry: BundledMigration): Promise { - const statements = splitStatements(entry.sql, entry.breakpoints); await db.transaction(async (tx) => { + await tx.execute(sql`SELECT pg_advisory_xact_lock(${MIGRATION_LOCK_KEY})`); + + // Re-check applied status *inside* the lock — another isolate may have + // applied this migration between our outer check and the lock acquire. + const exists = await tx.execute<{ hash: string }>( + sql`SELECT "hash" FROM "__drizzle_migrations" WHERE "hash" = ${entry.tag}`, + ); + if (exists.rows.length > 0) return; + + const statements = splitStatements(entry.sql, entry.breakpoints); for (const stmt of statements) { if (!stmt) continue; await tx.execute(sql.raw(stmt));