diff --git a/.stack/config.nix b/.stack/config.nix index b4558586..085fd874 100644 --- a/.stack/config.nix +++ b/.stack/config.nix @@ -924,23 +924,23 @@ env = { }; exec = "turbo run clean && rm -rf node_modules/.cache"; }; - "db:migrate" = { - cwd = "apps/server"; - description = "Run database migrations"; + "db:generate" = { + cwd = "packages/db"; + description = "Generate a new Drizzle migration from schema changes (also bundles for runtime)"; env = { }; - exec = "bun run drizzle-kit migrate"; + exec = "bun run db:generate"; }; - "db:push" = { - cwd = "apps/server"; - description = "Push schema changes to database"; + "db:migrate" = { + cwd = "packages/db"; + description = "Apply file-based Drizzle migrations against the configured DATABASE_URL (local dev only — runtime migration is automatic)"; env = { }; - exec = "bun run drizzle-kit push"; + exec = "bun run db:migrate"; }; "db:studio" = { - cwd = "apps/server"; + cwd = "packages/db"; description = "Open Drizzle Studio database GUI"; env = { }; - exec = "bun run drizzle-kit studio"; + exec = "bun run db:studio"; }; dev = { cache = false; diff --git a/AGENTS.md b/AGENTS.md index 4f5ee240..c1b303ca 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -154,12 +154,25 @@ air # Hot reload (dev mode) ### Database ```bash -bun run db:push # Push Drizzle schema changes +bun run db:generate # Generate a new Drizzle migration after a schema change + # (writes packages/db/drizzle/_.sql + bundles for runtime) bun run db:studio # Open Drizzle Studio -bun run db:generate # Generate Drizzle types -bun run db:migrate # Run migrations +bun run db:migrate # Apply migrations against $DATABASE_URL — local/dev only + # (production / staging / preview migrate automatically at app + # startup via @stackpanel/db's runMigrations(); see + # docs/adr/0002-runtime-startup-migrations.md) ``` +**Generating a migration** + +1. Edit a schema file under `packages/db/src/schema/`. +2. Run `bun run db:generate` (or `bun run --cwd packages/db db:generate` directly). +3. Commit the new SQL file under `packages/db/drizzle/` along with the + regenerated `packages/db/drizzle/meta/_journal.json` and + `packages/db/src/migrations-bundle.generated.ts`. +4. Deploy. The first request to a new isolate will apply pending migrations + transparently — no manual `db:push` step. + ### Nix / Infra ```bash @@ -663,14 +676,21 @@ This is a monorepo with the following structure: ## Database Commands -All database operations should be run from the server workspace: - -- `bun run db:push` - Push schema changes to database -- `bun run db:studio` - Open database studio -- `bun run db:generate` - Generate Prisma files -- `bun run db:migrate` - Run database migrations - -Database schema is located in `apps/server/prisma/schema.prisma` +All database operations are exposed at the workspace root (delegated to +`@stackpanel/db` via Turbo): + +- `bun run db:generate` - Generate a new Drizzle migration from schema + changes (writes `packages/db/drizzle/*.sql`, `meta/_journal.json`, and + the runtime-importable `packages/db/src/migrations-bundle.generated.ts`) +- `bun run db:studio` - Open Drizzle Studio +- `bun run db:migrate` - Apply migrations against `$DATABASE_URL` — local / + ad-hoc only; production, staging, and preview deployments migrate + automatically at app startup via `runMigrations()` (see + `docs/adr/0002-runtime-startup-migrations.md`) + +Database schemas live in `packages/db/src/schema/`. There is no `db:push` +flow anymore — schema-sync is replaced by file-based migrations that ship +with the deploy and apply themselves on first isolate boot. ## API Structure diff --git a/CLAUDE.md b/CLAUDE.md index 2f2cbb4a..571db34a 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -329,14 +329,21 @@ This is a monorepo with the following structure: ## Database Commands -All database operations should be run from the server workspace: - -- `bun run db:push` - Push schema changes to database -- `bun run db:studio` - Open database studio -- `bun run db:generate` - Generate Prisma files -- `bun run db:migrate` - Run database migrations - -Database schema is located in `apps/server/prisma/schema.prisma` +All database operations are exposed at the workspace root (delegated to +`@stackpanel/db` via Turbo): + +- `bun run db:generate` - Generate a new Drizzle migration from schema + changes (writes `packages/db/drizzle/*.sql`, `meta/_journal.json`, and + the runtime-importable `packages/db/src/migrations-bundle.generated.ts`) +- `bun run db:studio` - Open Drizzle Studio +- `bun run db:migrate` - Apply migrations against `$DATABASE_URL` — local / + ad-hoc only; production, staging, and preview deployments migrate + automatically at app startup via `runMigrations()` (see + `docs/adr/0002-runtime-startup-migrations.md`) + +Database schemas live in `packages/db/src/schema/`. There is no `db:push` +flow anymore — schema-sync is replaced by file-based migrations that ship +with the deploy and apply themselves on first isolate boot. ## API Structure diff --git a/WARP.md b/WARP.md index a42438bc..ab275c68 100644 --- a/WARP.md +++ b/WARP.md @@ -331,14 +331,21 @@ This is a monorepo with the following structure: ## Database Commands -All database operations should be run from the server workspace: - -- `bun run db:push` - Push schema changes to database -- `bun run db:studio` - Open database studio -- `bun run db:generate` - Generate Prisma files -- `bun run db:migrate` - Run database migrations - -Database schema is located in `apps/server/prisma/schema.prisma` +All database operations are exposed at the workspace root (delegated to +`@stackpanel/db` via Turbo): + +- `bun run db:generate` - Generate a new Drizzle migration from schema + changes (writes `packages/db/drizzle/*.sql`, `meta/_journal.json`, and + the runtime-importable `packages/db/src/migrations-bundle.generated.ts`) +- `bun run db:studio` - Open Drizzle Studio +- `bun run db:migrate` - Apply migrations against `$DATABASE_URL` — local / + ad-hoc only; production, staging, and preview deployments migrate + automatically at app startup via `runMigrations()` (see + `docs/adr/0002-runtime-startup-migrations.md`) + +Database schemas live in `packages/db/src/schema/`. There is no `db:push` +flow anymore — schema-sync is replaced by file-based migrations that ship +with the deploy and apply themselves on first isolate boot. ## API Structure diff --git a/docs/adr/0002-runtime-startup-migrations.md b/docs/adr/0002-runtime-startup-migrations.md new file mode 100644 index 00000000..f56f16fb --- /dev/null +++ b/docs/adr/0002-runtime-startup-migrations.md @@ -0,0 +1,178 @@ +# 0002 — Database migrations are applied programmatically at app startup, not via `drizzle-kit push` + +- **Status:** Accepted +- **Date:** 2026-05-01 +- **Deciders:** Stackpanel core team +- **Related:** [`docs/adr/0001-runtime-secrets-via-gen-env-loader.md`](./0001-runtime-secrets-via-gen-env-loader.md) +- **Implementation:** branch `feat/runtime-migrations` + +## Context + +Until now, Stackpanel's Drizzle-backed Postgres (Neon) used the +`bun run db:push` flow — a wrapper around `drizzle-kit push` — to keep +the database schema in lockstep with the TypeScript schema files under +`packages/db/src/schema/`. That approach has several problems we keep +running into: + +1. **Humans-in-the-loop**: `db:push` is a manual step. It is trivially + forgotten — most recently on PR #24, where the + `waitlist.join` tRPC procedure 500'd against the per-PR Neon preview + project with `Failed query: select "id" from "beta_waitlist"` because + nobody had run `db:push` against that fresh database. The deploy + pipeline has no way to know the schema is stale until a request hits a + missing table. +2. **No audit trail**: `drizzle-kit push` diffs the live DB against the + TypeScript schema and emits SQL on the fly. Nothing is checked into git, + so we have no history of schema changes, no way to review one in a PR, + and no `down` story when a change goes wrong. +3. **Preview DB priming is lazy**: per-PR preview deploys provision their + Neon project on first deploy (see `apps/web/alchemy.run.ts`). The DB + is empty until something writes to it; with `db:push` that "something" + is a human running the right command at the right time, which doesn't + happen. +4. **Drift between environments**: `db:push` is destructive — it reshapes + the live DB to match the schema. In dev we shrug and let it drop a + column; in prod we don't dare run it. So in practice prod uses + ad-hoc SQL while dev uses `db:push`, and the two diverge over time. + +The user-visible failure on PR #24 was the trigger: the waitlist signup +button on the `local..stackpanel.com` preview returned a 500, and +the only fix was to `wrangler tail` the worker, infer the missing table, +and run `db:push` against the preview manually. That's not a flow we want +to ship to ourselves repeatedly — and certainly not to anyone using +Stackpanel as a starter template. + +## Decision + +Migrations are now **file-based**, **committed to git**, and **applied +programmatically at app startup** by the `@stackpanel/db` package itself. + +Concretely: + +- **Generation** is local-only. After editing a schema file under + `packages/db/src/schema/`, run + `bun run --cwd packages/db db:generate`. That invokes + `drizzle-kit generate` (which writes `packages/db/drizzle/_.sql` + and `packages/db/drizzle/meta/_journal.json`) and then + `scripts/bundle-migrations.ts`, which inlines every SQL file into + `packages/db/src/migrations-bundle.generated.ts`. All three artifacts are + checked into git. +- **Application** is automatic. `@stackpanel/db` exports `runMigrations(db)` + from `src/migrate.ts`. `packages/auth/src/index.ts` awaits it at + module-evaluation time (top-level await) **before** constructing the + `betterAuth({...})` instance, so the per-isolate boot order is always + `import db → await runMigrations(db) → betterAuth({...})`. Anything + downstream that imports `auth` (the tRPC handler, route middleware, + background jobs) inherits the dependency naturally — by the time + `auth.api.getSession()` is callable, every committed migration has been + applied. +- **Concurrency** is handled by a Postgres advisory lock + (`pg_advisory_lock(0x4d495252::bigint)`) inside `applyMigrations`, so + many isolates can call `runMigrations` simultaneously without racing + on `__drizzle_migrations` row inserts. Per-isolate, the function caches + the in-flight `Promise` so repeated callers reuse the same migrate run. +- **Idempotency** comes from the standard drizzle `__drizzle_migrations` + table: each entry is keyed by its content-derived `tag`, and applied + rows are skipped on subsequent boots. +- **`drizzle-kit push` is removed** from every workspace script + (`package.json`, `packages/db/package.json`, `turbo.json`, + `.stack/config.nix`). `drizzle-kit migrate` is kept under + `bun run db:migrate` for local ad-hoc use only — production / + staging / preview deployments never invoke it; they rely entirely on + the `runMigrations` call at startup. + +### Why not `drizzle-orm/node-postgres/migrator` directly? + +The built-in migrator reads SQL files at runtime via `node:fs`. Inside a +Cloudflare Worker bundle that filesystem is empty — Vite/Rolldown bundles +JS modules but not arbitrary `.sql` files. We considered three options: + +1. **Vite `import.meta.glob('drizzle/*.sql', { query: '?raw' })`** — + works, but couples `@stackpanel/db` to a specific bundler and silently + becomes a no-op anywhere Vite isn't in the loop (Bun scripts, ad-hoc + tests, the Go-driven docs build). +2. **A Workers-native migrator from drizzle-orm itself** — none exists + for the `node-postgres` adapter as of `drizzle-orm@0.45.1`. The + `neon-http` migrator is HTTP-only and requires switching the runtime + driver. +3. **Pre-bundle the SQL into a TypeScript module at generate time** + (chosen). `scripts/bundle-migrations.ts` reads `drizzle/` and writes + `src/migrations-bundle.generated.ts` with each migration inlined as a + string. The runtime imports that module like any other TS module — + works identically in Workers, Node, Bun, vitest, and any future runtime + without bundler-specific magic. + +## Consequences + +### Positive + +- **Zero-config preview DB priming**: a freshly-provisioned Neon project + is brought up to schema by the first request that touches the auth + module. PR #24's waitlist 500 cannot recur with this design. +- **Audit trail**: every schema change ships as a reviewable + `packages/db/drizzle/_.sql` diff in the PR that introduces + it. +- **No human deploy step** for schema changes — the deploy pipeline stays + identical for code-only and code-plus-schema changes. +- **Cross-runtime portability**: bundled migrations work in Cloudflare + Workers, Node, Bun, vitest, and any future runtime without per-target + build tweaks. +- **Rollback story** is back on the table: a future iteration can add + `down.sql` files and a `--down` flag to `runMigrations` without + re-architecting how migrations are discovered or transported. + +### Negative / trade-offs + +- **One-time cost on cold isolate boot**: the first request to a freshly + spawned isolate pays the migration check (a single + `SELECT hash FROM __drizzle_migrations` and the advisory-lock + acquire/release). Steady-state requests pay nothing — the `inflight` + Promise cache short-circuits. Worst case (cold + brand-new schema) is + the time to apply pending migrations once per environment. +- **Schema changes need explicit migration review**: developers can no + longer iterate by editing the schema and running `db:push`. The price + is a `bun run db:generate` + a single committed SQL file. Worth it for + the audit trail; everyone agrees this is a good trade. +- **`packages/db/src/migrations-bundle.generated.ts` must stay in sync + with `drizzle/`**. The `db:generate` script chains both, and the + `db:bundle` script can be re-run independently + (`bun run --cwd packages/db db:bundle`) if someone manually edits a + migration file. CI does not (yet) re-bundle and diff — see + *Follow-ups*. + +### Neutral + +- The `__drizzle_migrations` table now exists in every environment. Same + shape drizzle's built-in migrator uses, so future-us could swap to the + upstream migrator if Workers ever ships a fully-compatible one. + +## Alternatives considered + +- **Keep `drizzle-kit push`**: rejected. It is the source of the + problems described in *Context* — no audit trail, manual step, and the + existing PR-24 outage is a direct consequence. +- **Run migrations only in CI before deploy**: rejected. Preview Neon + projects are created lazily by `apps/web/alchemy.run.ts` during the + Cloudflare Workers deploy itself; there is no "before deploy" moment + where the preview DB exists but the worker doesn't. Adding a separate + CI step that provisions the DB and migrates it before the deploy ran + would double the preview latency and re-introduce a human-readable + deploy graph. +- **Use Neon's branching for schema management**: rejected. Neon + branching is great for forking *data* off main, but it doesn't replace + a migration tool — it inherits whatever schema main has and gives no + way to evolve schema in a feature branch without merging the schema + change to main first. Orthogonal to this decision. + +## Follow-ups + +- Add a CI check (`verify` workflow) that runs + `bun run --cwd packages/db db:bundle` and fails if the resulting diff + isn't empty. This guarantees the bundle stays in lockstep with the SQL + files. +- Add `down.sql` support to `scripts/bundle-migrations.ts` and a + `runMigrations(db, { direction: "down", to: })` opt-in for + emergency rollbacks. +- Consider exposing a `runMigrationsEffect(db)` Effect-native variant + for callers that already live in an `Effect.gen` block (parity with + `loadAppEnvEffect` from `@gen/env/runtime`). diff --git a/nix/stackpanel/modules/just/module.nix b/nix/stackpanel/modules/just/module.nix index ff254d22..8e460f25 100644 --- a/nix/stackpanel/modules/just/module.nix +++ b/nix/stackpanel/modules/just/module.nix @@ -87,9 +87,9 @@ in db = { description = "Database management recipes"; recipes = ''' - # Push schema changes - db-push: - bun run db:push + # Generate a new Drizzle migration after schema changes + db-generate: + bun run db:generate '''; }; } diff --git a/package.json b/package.json index 8b5970e1..41f3a300 100644 --- a/package.json +++ b/package.json @@ -65,7 +65,6 @@ "dev:web": "turbo -F web dev", "dev:server": "turbo -F server dev", "test": "turbo test", - "db:push": "turbo -F @stackpanel/db db:push", "db:studio": "turbo -F @stackpanel/db db:studio", "db:generate": "turbo -F @stackpanel/db db:generate", "db:migrate": "turbo -F @stackpanel/db db:migrate", diff --git a/packages/auth/src/index.ts b/packages/auth/src/index.ts index 331aae2b..5cd9447c 100644 --- a/packages/auth/src/index.ts +++ b/packages/auth/src/index.ts @@ -1,5 +1,5 @@ import { checkout, polar, portal, webhooks } from "@polar-sh/better-auth"; -import { db } from "@stackpanel/db"; +import { db, runMigrations } from "@stackpanel/db"; import type { BetterAuthPlugin } from "better-auth"; import { betterAuth } from "better-auth"; import { drizzleAdapter } from "better-auth/adapters/drizzle"; @@ -8,6 +8,22 @@ import { polarClient } from "./lib/payments"; import { polarProducts } from "./lib/polar-products"; import { polarSubscriptionCallbacks } from "./lib/polar-webhooks"; +// Apply file-based Drizzle migrations from `@stackpanel/db` before any +// auth-bound query runs. We do this at module-evaluation time (top-level +// await) so the per-isolate boot order is always: +// 1. import @stackpanel/db → drizzle client + bundled migrations available +// 2. await runMigrations(db) → __drizzle_migrations is up-to-date +// 3. betterAuth({...}) → drizzle adapter is safe to construct & query +// See `docs/adr/0002-runtime-startup-migrations.md` for the full rationale. +// +// `runMigrations` is internally cached + serialized via `pg_advisory_lock`, +// so concurrent isolates cooperate. We guard on a configured connection +// string so vitest/typecheck contexts (which never set DATABASE_URL) don't +// crash at import time on a connection refused error. +if (process.env.DATABASE_URL || process.env.POSTGRES_URL) { + await runMigrations(db); +} + // Build plugins array - only include Polar if configured const plugins: BetterAuthPlugin[] = [ organization({ diff --git a/packages/db/drizzle.config.ts b/packages/db/drizzle.config.ts index f3cc4257..3014203e 100644 --- a/packages/db/drizzle.config.ts +++ b/packages/db/drizzle.config.ts @@ -1,12 +1,15 @@ -import { env } from "@gen/env/web"; -import dotenv from "dotenv"; import { defineConfig } from "drizzle-kit"; +// `drizzle-kit generate` doesn't need the URL — it diffs the schema against +// the existing migrations. `drizzle-kit migrate` (and the runtime `migrate()` +// in `src/migrate.ts`) connect using `POSTGRES_URL`/`DATABASE_URL`. We accept +// either so local ad-hoc runs work in any devshell that already has one set. +const url = + process.env.POSTGRES_URL ?? process.env.DATABASE_URL ?? "postgres://stub"; + export default defineConfig({ schema: "./src/schema", - out: "./src/migrations", + out: "./drizzle", dialect: "postgresql", - dbCredentials: { - url: env.POSTGRES_URL, - }, + dbCredentials: { url }, }); diff --git a/packages/db/drizzle/0000_init.sql b/packages/db/drizzle/0000_init.sql new file mode 100644 index 00000000..61653ecf --- /dev/null +++ b/packages/db/drizzle/0000_init.sql @@ -0,0 +1,156 @@ +CREATE TABLE "account" ( + "id" text PRIMARY KEY NOT NULL, + "account_id" text NOT NULL, + "provider_id" text NOT NULL, + "user_id" text NOT NULL, + "access_token" text, + "refresh_token" text, + "id_token" text, + "access_token_expires_at" timestamp, + "refresh_token_expires_at" timestamp, + "scope" text, + "password" text, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp NOT NULL +); +--> statement-breakpoint +CREATE TABLE "session" ( + "id" text PRIMARY KEY NOT NULL, + "expires_at" timestamp NOT NULL, + "token" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp NOT NULL, + "ip_address" text, + "user_agent" text, + "user_id" text NOT NULL, + "active_organization_id" text, + CONSTRAINT "session_token_unique" UNIQUE("token") +); +--> statement-breakpoint +CREATE TABLE "user" ( + "id" text PRIMARY KEY NOT NULL, + "name" text NOT NULL, + "email" text NOT NULL, + "email_verified" boolean DEFAULT false NOT NULL, + "image" text, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL, + CONSTRAINT "user_email_unique" UNIQUE("email") +); +--> statement-breakpoint +CREATE TABLE "verification" ( + "id" text PRIMARY KEY NOT NULL, + "identifier" text NOT NULL, + "value" text NOT NULL, + "expires_at" timestamp NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "invitation" ( + "id" text PRIMARY KEY NOT NULL, + "email" text NOT NULL, + "inviter_id" text NOT NULL, + "organization_id" text NOT NULL, + "role" text, + "status" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "expires_at" timestamp NOT NULL +); +--> statement-breakpoint +CREATE TABLE "member" ( + "id" text PRIMARY KEY NOT NULL, + "organization_id" text NOT NULL, + "user_id" text NOT NULL, + "role" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "organization" ( + "id" text PRIMARY KEY NOT NULL, + "name" text NOT NULL, + "slug" text NOT NULL, + "logo" text, + "metadata" text, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "organization_dek" ( + "organization_id" text PRIMARY KEY NOT NULL, + "encrypted_dek" "bytea" NOT NULL, + "kms_key_alias" text NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "organization_state" ( + "id" text PRIMARY KEY NOT NULL, + "organization_id" text NOT NULL, + "stack" text NOT NULL, + "stage" text NOT NULL, + "fqn" text NOT NULL, + "nonce" "bytea" NOT NULL, + "encrypted_blob" "bytea" NOT NULL, + "version" integer DEFAULT 1 NOT NULL, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "polar_event" ( + "id" text PRIMARY KEY NOT NULL, + "polar_event_id" text NOT NULL, + "event_type" text NOT NULL, + "payload" text NOT NULL, + "processed_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "user_subscription" ( + "id" text PRIMARY KEY NOT NULL, + "user_id" text NOT NULL, + "polar_customer_id" text NOT NULL, + "polar_subscription_id" text, + "plan" text DEFAULT 'free' NOT NULL, + "status" text DEFAULT 'active' NOT NULL, + "current_period_end" timestamp, + "cancel_at_period_end" text, + "created_at" timestamp DEFAULT now() NOT NULL, + "updated_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +CREATE TABLE "beta_waitlist" ( + "id" text PRIMARY KEY NOT NULL, + "email" text NOT NULL, + "name" text, + "company" text, + "role" text, + "source" text, + "notes" text, + "referrer" text, + "user_agent" text, + "ip_hash" text, + "invited_at" timestamp, + "created_at" timestamp DEFAULT now() NOT NULL +); +--> statement-breakpoint +ALTER TABLE "account" ADD CONSTRAINT "account_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "session" ADD CONSTRAINT "session_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "invitation" ADD CONSTRAINT "invitation_inviter_id_user_id_fk" FOREIGN KEY ("inviter_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "invitation" ADD CONSTRAINT "invitation_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "member" ADD CONSTRAINT "member_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "member" ADD CONSTRAINT "member_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "organization_dek" ADD CONSTRAINT "organization_dek_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "organization_state" ADD CONSTRAINT "organization_state_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +ALTER TABLE "user_subscription" ADD CONSTRAINT "user_subscription_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint +CREATE INDEX "account_userId_idx" ON "account" USING btree ("user_id");--> statement-breakpoint +CREATE INDEX "session_userId_idx" ON "session" USING btree ("user_id");--> statement-breakpoint +CREATE INDEX "verification_identifier_idx" ON "verification" USING btree ("identifier");--> statement-breakpoint +CREATE INDEX "invitation_org_idx" ON "invitation" USING btree ("organization_id");--> statement-breakpoint +CREATE UNIQUE INDEX "member_org_user_idx" ON "member" USING btree ("organization_id","user_id");--> statement-breakpoint +CREATE INDEX "member_user_idx" ON "member" USING btree ("user_id");--> statement-breakpoint +CREATE UNIQUE INDEX "organization_slug_idx" ON "organization" USING btree ("slug");--> statement-breakpoint +CREATE UNIQUE INDEX "organization_state_unique_idx" ON "organization_state" USING btree ("organization_id","stack","stage","fqn");--> statement-breakpoint +CREATE INDEX "organization_state_stage_idx" ON "organization_state" USING btree ("organization_id","stack","stage");--> statement-breakpoint +CREATE UNIQUE INDEX "polar_event_polar_id_idx" ON "polar_event" USING btree ("polar_event_id");--> statement-breakpoint +CREATE UNIQUE INDEX "user_subscription_user_idx" ON "user_subscription" USING btree ("user_id");--> statement-breakpoint +CREATE INDEX "user_subscription_polar_customer_idx" ON "user_subscription" USING btree ("polar_customer_id");--> statement-breakpoint +CREATE UNIQUE INDEX "beta_waitlist_email_uniq" ON "beta_waitlist" USING btree ("email");--> statement-breakpoint +CREATE INDEX "beta_waitlist_created_idx" ON "beta_waitlist" USING btree ("created_at"); \ No newline at end of file diff --git a/packages/db/drizzle/meta/0000_snapshot.json b/packages/db/drizzle/meta/0000_snapshot.json new file mode 100644 index 00000000..5548e57c --- /dev/null +++ b/packages/db/drizzle/meta/0000_snapshot.json @@ -0,0 +1,1161 @@ +{ + "id": "4eedc99e-9098-4b74-83c9-ff92d2182938", + "prevId": "00000000-0000-0000-0000-000000000000", + "version": "7", + "dialect": "postgresql", + "tables": { + "public.account": { + "name": "account", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "account_id": { + "name": "account_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "access_token": { + "name": "access_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "refresh_token": { + "name": "refresh_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "id_token": { + "name": "id_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "access_token_expires_at": { + "name": "access_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "refresh_token_expires_at": { + "name": "refresh_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "scope": { + "name": "scope", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "account_userId_idx": { + "name": "account_userId_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "account_user_id_user_id_fk": { + "name": "account_user_id_user_id_fk", + "tableFrom": "account", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.session": { + "name": "session", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ip_address": { + "name": "ip_address", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "active_organization_id": { + "name": "active_organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "session_userId_idx": { + "name": "session_userId_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "session_user_id_user_id_fk": { + "name": "session_user_id_user_id_fk", + "tableFrom": "session", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "session_token_unique": { + "name": "session_token_unique", + "nullsNotDistinct": false, + "columns": [ + "token" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user": { + "name": "user", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email_verified": { + "name": "email_verified", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "image": { + "name": "image", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_email_unique": { + "name": "user_email_unique", + "nullsNotDistinct": false, + "columns": [ + "email" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.verification": { + "name": "verification", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "value": { + "name": "value", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "verification_identifier_idx": { + "name": "verification_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.invitation": { + "name": "invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "invitation_org_idx": { + "name": "invitation_org_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "invitation_inviter_id_user_id_fk": { + "name": "invitation_inviter_id_user_id_fk", + "tableFrom": "invitation", + "tableTo": "user", + "columnsFrom": [ + "inviter_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "invitation_organization_id_organization_id_fk": { + "name": "invitation_organization_id_organization_id_fk", + "tableFrom": "invitation", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.member": { + "name": "member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "member_org_user_idx": { + "name": "member_org_user_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "member_user_idx": { + "name": "member_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "member_organization_id_organization_id_fk": { + "name": "member_organization_id_organization_id_fk", + "tableFrom": "member", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "member_user_id_user_id_fk": { + "name": "member_user_id_user_id_fk", + "tableFrom": "member", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization": { + "name": "organization", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "slug": { + "name": "slug", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "logo": { + "name": "logo", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "organization_slug_idx": { + "name": "organization_slug_idx", + "columns": [ + { + "expression": "slug", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization_dek": { + "name": "organization_dek", + "schema": "", + "columns": { + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "encrypted_dek": { + "name": "encrypted_dek", + "type": "bytea", + "primaryKey": false, + "notNull": true + }, + "kms_key_alias": { + "name": "kms_key_alias", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "organization_dek_organization_id_organization_id_fk": { + "name": "organization_dek_organization_id_organization_id_fk", + "tableFrom": "organization_dek", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization_state": { + "name": "organization_state", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stack": { + "name": "stack", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stage": { + "name": "stage", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "fqn": { + "name": "fqn", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "nonce": { + "name": "nonce", + "type": "bytea", + "primaryKey": false, + "notNull": true + }, + "encrypted_blob": { + "name": "encrypted_blob", + "type": "bytea", + "primaryKey": false, + "notNull": true + }, + "version": { + "name": "version", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 1 + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "organization_state_unique_idx": { + "name": "organization_state_unique_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stack", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stage", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "fqn", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "organization_state_stage_idx": { + "name": "organization_state_stage_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stack", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stage", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "organization_state_organization_id_organization_id_fk": { + "name": "organization_state_organization_id_organization_id_fk", + "tableFrom": "organization_state", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.polar_event": { + "name": "polar_event", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "polar_event_id": { + "name": "polar_event_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "event_type": { + "name": "event_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "payload": { + "name": "payload", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "processed_at": { + "name": "processed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "polar_event_polar_id_idx": { + "name": "polar_event_polar_id_idx", + "columns": [ + { + "expression": "polar_event_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user_subscription": { + "name": "user_subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "polar_customer_id": { + "name": "polar_customer_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "polar_subscription_id": { + "name": "polar_subscription_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "plan": { + "name": "plan", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'free'" + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'active'" + }, + "current_period_end": { + "name": "current_period_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "cancel_at_period_end": { + "name": "cancel_at_period_end", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "user_subscription_user_idx": { + "name": "user_subscription_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "user_subscription_polar_customer_idx": { + "name": "user_subscription_polar_customer_idx", + "columns": [ + { + "expression": "polar_customer_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "user_subscription_user_id_user_id_fk": { + "name": "user_subscription_user_id_user_id_fk", + "tableFrom": "user_subscription", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.beta_waitlist": { + "name": "beta_waitlist", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "company": { + "name": "company", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "source": { + "name": "source", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "notes": { + "name": "notes", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "referrer": { + "name": "referrer", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "ip_hash": { + "name": "ip_hash", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "invited_at": { + "name": "invited_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "beta_waitlist_email_uniq": { + "name": "beta_waitlist_email_uniq", + "columns": [ + { + "expression": "email", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "beta_waitlist_created_idx": { + "name": "beta_waitlist_created_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + } + }, + "enums": {}, + "schemas": {}, + "sequences": {}, + "roles": {}, + "policies": {}, + "views": {}, + "_meta": { + "columns": {}, + "schemas": {}, + "tables": {} + } +} \ No newline at end of file diff --git a/packages/db/drizzle/meta/_journal.json b/packages/db/drizzle/meta/_journal.json new file mode 100644 index 00000000..5a197aa1 --- /dev/null +++ b/packages/db/drizzle/meta/_journal.json @@ -0,0 +1,13 @@ +{ + "version": "7", + "dialect": "postgresql", + "entries": [ + { + "idx": 0, + "version": "7", + "when": 1777635345492, + "tag": "0000_init", + "breakpoints": true + } + ] +} \ No newline at end of file diff --git a/packages/db/package.json b/packages/db/package.json index 6e38c09c..8e62abb5 100644 --- a/packages/db/package.json +++ b/packages/db/package.json @@ -18,9 +18,9 @@ "scripts": { "db:up": "alchemy deploy", "db:down": "alchemy destroy", - "db:push": "drizzle-kit push", "db:studio": "drizzle-kit studio", - "db:generate": "drizzle-kit generate", + "db:generate": "drizzle-kit generate && bun run db:bundle", + "db:bundle": "bun run scripts/bundle-migrations.ts", "db:migrate": "drizzle-kit migrate", "cf:typegen": "wrangler types --env-interface CloudflareEnv cloudflare-env.d.ts" }, diff --git a/packages/db/scripts/bundle-migrations.ts b/packages/db/scripts/bundle-migrations.ts new file mode 100644 index 00000000..452089b1 --- /dev/null +++ b/packages/db/scripts/bundle-migrations.ts @@ -0,0 +1,98 @@ +#!/usr/bin/env bun +// Bundle the file-based Drizzle migrations under `drizzle/` into a TypeScript +// module the runtime can import without a filesystem. Cloudflare Workers (and +// our SSR/dev pipeline) bundle every imported module at build time, so we +// inline each `0000_*.sql` next to its journal entry — that way `runMigrations` +// in `src/migrate.ts` works identically on Node, Bun, Workers, and tests +// without any filesystem shim. +// +// Run via `bun run --cwd packages/db db:generate` (which invokes +// `drizzle-kit generate && bun run db:bundle`) — never edit the generated +// `src/migrations-bundle.generated.ts` by hand. + +import { existsSync, readFileSync, readdirSync, writeFileSync } from "node:fs"; +import { join, resolve } from "node:path"; + +const DB_ROOT = resolve(import.meta.dirname, ".."); +const DRIZZLE_DIR = join(DB_ROOT, "drizzle"); +const JOURNAL_PATH = join(DRIZZLE_DIR, "meta", "_journal.json"); +const OUT_PATH = join(DB_ROOT, "src", "migrations-bundle.generated.ts"); + +interface JournalEntry { + idx: number; + version: string; + when: number; + tag: string; + breakpoints: boolean; +} + +interface Journal { + version: string; + dialect: string; + entries: JournalEntry[]; +} + +if (!existsSync(JOURNAL_PATH)) { + console.error( + `[db:bundle] no journal at ${JOURNAL_PATH} — did you run \`drizzle-kit generate\` first?`, + ); + process.exit(1); +} + +const journal = JSON.parse(readFileSync(JOURNAL_PATH, "utf-8")) as Journal; +const sqlFiles = new Set( + readdirSync(DRIZZLE_DIR).filter((f) => f.endsWith(".sql")), +); + +const bundle = journal.entries + .slice() + .sort((a, b) => a.idx - b.idx) + .map((entry) => { + const fileName = `${entry.tag}.sql`; + if (!sqlFiles.has(fileName)) { + throw new Error( + `[db:bundle] journal entry ${entry.tag} (idx ${entry.idx}) has no matching SQL file at drizzle/${fileName}`, + ); + } + const sql = readFileSync(join(DRIZZLE_DIR, fileName), "utf-8"); + return { ...entry, sql }; + }); + +const header = `// Auto-generated by packages/db/scripts/bundle-migrations.ts. +// Do NOT edit by hand — re-run \`bun run --cwd packages/db db:generate\` +// (or \`bun run --cwd packages/db db:bundle\` if you only changed the bundler). +// +// Each entry mirrors a row in drizzle/meta/_journal.json with the matching +// SQL inlined, so runtime callers (Cloudflare Workers, Node, Bun, tests) can +// apply migrations without filesystem access. +`; + +const body = `export interface BundledMigration { + /** Sort order — matches the \`idx\` field in drizzle/meta/_journal.json. */ + readonly idx: number; + /** Tag — drizzle's stable, content-derived identifier (also the filename). */ + readonly tag: string; + /** Drizzle Kit version that produced the SQL. */ + readonly version: string; + /** Wall-clock ms when drizzle generated the migration. */ + readonly when: number; + /** Whether drizzle wrote \`--> statement-breakpoint\` separators. */ + readonly breakpoints: boolean; + /** Raw SQL contents of drizzle/.sql, untouched. */ + readonly sql: string; +} + +export const MIGRATIONS_BUNDLE: readonly BundledMigration[] = ${JSON.stringify( + bundle, + null, + 2, +)} as const; + +export const JOURNAL_VERSION = ${JSON.stringify(journal.version)} as const; +export const JOURNAL_DIALECT = ${JSON.stringify(journal.dialect)} as const; +`; + +writeFileSync(OUT_PATH, `${header}\n${body}`); +console.log( + `[db:bundle] wrote ${bundle.length} migration${bundle.length === 1 ? "" : "s"} to ${OUT_PATH}`, +); diff --git a/packages/db/src/index.ts b/packages/db/src/index.ts index 3154d4a2..fc289e19 100644 --- a/packages/db/src/index.ts +++ b/packages/db/src/index.ts @@ -14,13 +14,19 @@ const schema = { ...waitlist, }; -let _db: ReturnType | undefined; +/** + * Concrete drizzle client type — used by `runMigrations()` and any other + * code that wants to type a `db` parameter without importing the proxy. + */ +export type Db = ReturnType>; + +let _db: Db | undefined; /** * Drizzle client for Postgres via Hyperdrive (Cloudflare) or * DATABASE_URL (local dev). Lazily initialized and cached. */ -export function getDb(connectionString?: string): ReturnType { +export function getDb(connectionString?: string): Db { if (_db) return _db; const url = connectionString || process.env.DATABASE_URL; @@ -36,10 +42,11 @@ export function getDb(connectionString?: string): ReturnType { /** * @deprecated Use getDb() instead. Kept for backward compatibility. */ -export const db = new Proxy({} as ReturnType, { +export const db = new Proxy({} as Db, { get(_, prop) { return (getDb() as any)[prop]; }, }); +export { runMigrations } from "./migrate"; export { auth, organization, state, subscription, waitlist }; diff --git a/packages/db/src/migrate.ts b/packages/db/src/migrate.ts new file mode 100644 index 00000000..313f9c56 --- /dev/null +++ b/packages/db/src/migrate.ts @@ -0,0 +1,167 @@ +// Programmatic Drizzle migrator that runs at app startup. +// +// Why a custom migrator? +// `drizzle-orm/node-postgres/migrator` reads SQL files at runtime via +// `node:fs`, which is empty inside a Cloudflare Worker bundle. We pre-bundle +// every migration into `migrations-bundle.generated.ts` (see +// `scripts/bundle-migrations.ts`) so the SQL ships with the Worker, then +// apply each entry in order against `__drizzle_migrations`. +// +// Concurrency: +// Cloudflare may spin up many isolates simultaneously, each calling +// `runMigrations()` on cold start. We grab a transaction-scoped Postgres +// advisory lock around each individual migration so concurrent isolates +// serialise correctly. We deliberately do *not* use the session-scoped +// `pg_advisory_lock` — that lock persists when the pg.Pool connection is +// returned to the pool, so the next checkout would see "already locked +// by you" semantics that deadlock the next isolate. Transaction-scoped +// locks (`pg_advisory_xact_lock`) auto-release at COMMIT/ROLLBACK and +// are immune to pool lifecycle weirdness. +// +// Per-isolate, the function caches the in-flight Promise so repeated +// callers reuse the same migrate run. On error the cache is cleared so +// the next request retries from scratch. + +import { sql } from "drizzle-orm"; +import type { Db } from "./index"; +import { type BundledMigration, MIGRATIONS_BUNDLE } from "./migrations-bundle.generated"; + +// Stable lock key. Picked once and never changed — must fit in a +// Postgres int8 (bigint). Using a plain JS number keeps drizzle's +// parameter binding simple (no bigint serialisation surprises). +const MIGRATION_LOCK_KEY = 1_296_127_570; // crc32-ish "stackpanel.db.migrations" + +let inflight: Promise | null = null; + +/** + * Apply every bundled migration that hasn't run yet against `db`. + * + * Idempotent and safe to call from many concurrent isolates: each pending + * migration runs inside its own transaction with a `pg_advisory_xact_lock`, + * and the per-isolate `inflight` cache deduplicates repeated calls within + * the same JS runtime. + * + * @example + * import { db, runMigrations } from "@stackpanel/db"; + * await runMigrations(db); + */ +export async function runMigrations(db: Db): Promise { + if (inflight) return inflight; + inflight = applyMigrations(db).catch((err) => { + inflight = null; + throw err; + }); + return inflight; +} + +async function applyMigrations(db: Db): Promise { + // Snapshot whether `__drizzle_migrations` exists *before* we CREATE it, + // so we can distinguish "fresh DB" from "DB managed by a prior tool" + // (the legacy `db:push` flow) when deciding whether to apply 0000_init. + const tableCheck = await db.execute<{ exists: boolean }>(sql` + SELECT EXISTS ( + SELECT 1 + FROM pg_tables + WHERE schemaname = 'public' + AND tablename = '__drizzle_migrations' + ) AS "exists" + `); + const migrationsTableExisted = Boolean(tableCheck.rows[0]?.exists); + + await db.execute(sql` + CREATE TABLE IF NOT EXISTS "__drizzle_migrations" ( + "id" SERIAL PRIMARY KEY, + "hash" TEXT NOT NULL UNIQUE, + "created_at" BIGINT NOT NULL + ) + `); + + // Production-rollout safety net: if we just created `__drizzle_migrations` + // and the public schema already has tables, infer the schema was managed + // externally (the legacy `db:push` flow) and fast-forward every bundled + // migration to "already applied". Without this, the first deploy after + // this change would try to `CREATE TABLE "account"` against a DB that + // already has it and abort the migrate run. This is a one-time event + // per environment; subsequent deploys see the table and the normal + // diff-and-apply flow takes over. + if (!migrationsTableExisted) { + const otherTables = await db.execute<{ count: number }>(sql` + SELECT COUNT(*)::int AS "count" + FROM pg_tables + WHERE schemaname = 'public' + AND tablename != '__drizzle_migrations' + `); + const hasExistingSchema = (otherTables.rows[0]?.count ?? 0) > 0; + if (hasExistingSchema) { + console.warn( + "[runMigrations] detected an externally-managed Postgres schema; " + + "fast-forwarding bundled migrations to applied without re-running SQL", + ); + for (const entry of MIGRATIONS_BUNDLE) { + await db.execute(sql` + INSERT INTO "__drizzle_migrations" ("hash", "created_at") + VALUES (${entry.tag}, ${Date.now()}) + ON CONFLICT ("hash") DO NOTHING + `); + } + return; + } + } + + // Apply pending migrations one at a time. Each migration runs inside its + // own transaction with a `pg_advisory_xact_lock` — the lock serialises + // concurrent isolates, the transaction makes SQL + journal-row insert + // atomic, and the lock auto-releases at COMMIT so we don't leak it back + // into the connection pool. + const ordered = [...MIGRATIONS_BUNDLE].sort((a, b) => a.idx - b.idx); + for (const entry of ordered) { + await applyOne(db, entry); + } +} + +async function applyOne(db: Db, entry: BundledMigration): Promise { + await db.transaction(async (tx) => { + await tx.execute(sql`SELECT pg_advisory_xact_lock(${MIGRATION_LOCK_KEY})`); + + // Re-check applied status *inside* the lock — another isolate may have + // applied this migration between our outer check and the lock acquire. + const exists = await tx.execute<{ hash: string }>( + sql`SELECT "hash" FROM "__drizzle_migrations" WHERE "hash" = ${entry.tag}`, + ); + if (exists.rows.length > 0) return; + + const statements = splitStatements(entry.sql, entry.breakpoints); + for (const stmt of statements) { + if (!stmt) continue; + await tx.execute(sql.raw(stmt)); + } + await tx.execute( + sql`INSERT INTO "__drizzle_migrations" ("hash", "created_at") VALUES (${entry.tag}, ${Date.now()})`, + ); + }); +} + +function splitStatements(rawSql: string, hasBreakpoints: boolean): string[] { + // Drizzle inserts `--> statement-breakpoint` between statements when it + // generates the SQL. Splitting on the breakpoint preserves multi-statement + // semantics (each gets its own `tx.execute`), which is required for things + // like `CREATE INDEX CONCURRENTLY` and just generally avoids client-side + // SQL parsing bugs. Older migrations without breakpoints fall back to a + // single execute — Postgres handles multi-statement strings just fine. + if (!hasBreakpoints) return [rawSql.trim()]; + return rawSql + .split(/-->\s*statement-breakpoint/i) + .map((s) => s.trim()) + .filter((s) => s.length > 0); +} + +/** + * Test-only escape hatch — clears the per-isolate dedup cache so a second + * `runMigrations(db)` actually runs against a fresh database in unit tests. + * Production callers should never need this. + * + * @internal + */ +export function __resetMigrationCache(): void { + inflight = null; +} diff --git a/packages/db/src/migrations-bundle.generated.ts b/packages/db/src/migrations-bundle.generated.ts new file mode 100644 index 00000000..740c17ef --- /dev/null +++ b/packages/db/src/migrations-bundle.generated.ts @@ -0,0 +1,36 @@ +// Auto-generated by packages/db/scripts/bundle-migrations.ts. +// Do NOT edit by hand — re-run `bun run --cwd packages/db db:generate` +// (or `bun run --cwd packages/db db:bundle` if you only changed the bundler). +// +// Each entry mirrors a row in drizzle/meta/_journal.json with the matching +// SQL inlined, so runtime callers (Cloudflare Workers, Node, Bun, tests) can +// apply migrations without filesystem access. + +export interface BundledMigration { + /** Sort order — matches the `idx` field in drizzle/meta/_journal.json. */ + readonly idx: number; + /** Tag — drizzle's stable, content-derived identifier (also the filename). */ + readonly tag: string; + /** Drizzle Kit version that produced the SQL. */ + readonly version: string; + /** Wall-clock ms when drizzle generated the migration. */ + readonly when: number; + /** Whether drizzle wrote `--> statement-breakpoint` separators. */ + readonly breakpoints: boolean; + /** Raw SQL contents of drizzle/.sql, untouched. */ + readonly sql: string; +} + +export const MIGRATIONS_BUNDLE: readonly BundledMigration[] = [ + { + "idx": 0, + "version": "7", + "when": 1777635345492, + "tag": "0000_init", + "breakpoints": true, + "sql": "CREATE TABLE \"account\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"account_id\" text NOT NULL,\n\t\"provider_id\" text NOT NULL,\n\t\"user_id\" text NOT NULL,\n\t\"access_token\" text,\n\t\"refresh_token\" text,\n\t\"id_token\" text,\n\t\"access_token_expires_at\" timestamp,\n\t\"refresh_token_expires_at\" timestamp,\n\t\"scope\" text,\n\t\"password\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"session\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"expires_at\" timestamp NOT NULL,\n\t\"token\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp NOT NULL,\n\t\"ip_address\" text,\n\t\"user_agent\" text,\n\t\"user_id\" text NOT NULL,\n\t\"active_organization_id\" text,\n\tCONSTRAINT \"session_token_unique\" UNIQUE(\"token\")\n);\n--> statement-breakpoint\nCREATE TABLE \"user\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"name\" text NOT NULL,\n\t\"email\" text NOT NULL,\n\t\"email_verified\" boolean DEFAULT false NOT NULL,\n\t\"image\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL,\n\tCONSTRAINT \"user_email_unique\" UNIQUE(\"email\")\n);\n--> statement-breakpoint\nCREATE TABLE \"verification\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"identifier\" text NOT NULL,\n\t\"value\" text NOT NULL,\n\t\"expires_at\" timestamp NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"invitation\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"email\" text NOT NULL,\n\t\"inviter_id\" text NOT NULL,\n\t\"organization_id\" text NOT NULL,\n\t\"role\" text,\n\t\"status\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"expires_at\" timestamp NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"member\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"organization_id\" text NOT NULL,\n\t\"user_id\" text NOT NULL,\n\t\"role\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"organization\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"name\" text NOT NULL,\n\t\"slug\" text NOT NULL,\n\t\"logo\" text,\n\t\"metadata\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"organization_dek\" (\n\t\"organization_id\" text PRIMARY KEY NOT NULL,\n\t\"encrypted_dek\" \"bytea\" NOT NULL,\n\t\"kms_key_alias\" text NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"organization_state\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"organization_id\" text NOT NULL,\n\t\"stack\" text NOT NULL,\n\t\"stage\" text NOT NULL,\n\t\"fqn\" text NOT NULL,\n\t\"nonce\" \"bytea\" NOT NULL,\n\t\"encrypted_blob\" \"bytea\" NOT NULL,\n\t\"version\" integer DEFAULT 1 NOT NULL,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"polar_event\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"polar_event_id\" text NOT NULL,\n\t\"event_type\" text NOT NULL,\n\t\"payload\" text NOT NULL,\n\t\"processed_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"user_subscription\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"user_id\" text NOT NULL,\n\t\"polar_customer_id\" text NOT NULL,\n\t\"polar_subscription_id\" text,\n\t\"plan\" text DEFAULT 'free' NOT NULL,\n\t\"status\" text DEFAULT 'active' NOT NULL,\n\t\"current_period_end\" timestamp,\n\t\"cancel_at_period_end\" text,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL,\n\t\"updated_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nCREATE TABLE \"beta_waitlist\" (\n\t\"id\" text PRIMARY KEY NOT NULL,\n\t\"email\" text NOT NULL,\n\t\"name\" text,\n\t\"company\" text,\n\t\"role\" text,\n\t\"source\" text,\n\t\"notes\" text,\n\t\"referrer\" text,\n\t\"user_agent\" text,\n\t\"ip_hash\" text,\n\t\"invited_at\" timestamp,\n\t\"created_at\" timestamp DEFAULT now() NOT NULL\n);\n--> statement-breakpoint\nALTER TABLE \"account\" ADD CONSTRAINT \"account_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"session\" ADD CONSTRAINT \"session_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"invitation\" ADD CONSTRAINT \"invitation_inviter_id_user_id_fk\" FOREIGN KEY (\"inviter_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"invitation\" ADD CONSTRAINT \"invitation_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"member\" ADD CONSTRAINT \"member_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"member\" ADD CONSTRAINT \"member_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"organization_dek\" ADD CONSTRAINT \"organization_dek_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"organization_state\" ADD CONSTRAINT \"organization_state_organization_id_organization_id_fk\" FOREIGN KEY (\"organization_id\") REFERENCES \"public\".\"organization\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nALTER TABLE \"user_subscription\" ADD CONSTRAINT \"user_subscription_user_id_user_id_fk\" FOREIGN KEY (\"user_id\") REFERENCES \"public\".\"user\"(\"id\") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint\nCREATE INDEX \"account_userId_idx\" ON \"account\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE INDEX \"session_userId_idx\" ON \"session\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE INDEX \"verification_identifier_idx\" ON \"verification\" USING btree (\"identifier\");--> statement-breakpoint\nCREATE INDEX \"invitation_org_idx\" ON \"invitation\" USING btree (\"organization_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"member_org_user_idx\" ON \"member\" USING btree (\"organization_id\",\"user_id\");--> statement-breakpoint\nCREATE INDEX \"member_user_idx\" ON \"member\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"organization_slug_idx\" ON \"organization\" USING btree (\"slug\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"organization_state_unique_idx\" ON \"organization_state\" USING btree (\"organization_id\",\"stack\",\"stage\",\"fqn\");--> statement-breakpoint\nCREATE INDEX \"organization_state_stage_idx\" ON \"organization_state\" USING btree (\"organization_id\",\"stack\",\"stage\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"polar_event_polar_id_idx\" ON \"polar_event\" USING btree (\"polar_event_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"user_subscription_user_idx\" ON \"user_subscription\" USING btree (\"user_id\");--> statement-breakpoint\nCREATE INDEX \"user_subscription_polar_customer_idx\" ON \"user_subscription\" USING btree (\"polar_customer_id\");--> statement-breakpoint\nCREATE UNIQUE INDEX \"beta_waitlist_email_uniq\" ON \"beta_waitlist\" USING btree (\"email\");--> statement-breakpoint\nCREATE INDEX \"beta_waitlist_created_idx\" ON \"beta_waitlist\" USING btree (\"created_at\");" + } +] as const; + +export const JOURNAL_VERSION = "7" as const; +export const JOURNAL_DIALECT = "postgresql" as const; diff --git a/turbo.json b/turbo.json index 9b6ce268..2152d17e 100644 --- a/turbo.json +++ b/turbo.json @@ -1 +1 @@ -{"$schema":"https://turbo.build/schema.json","tasks":{"alchemy:deploy":{"cache":false},"alchemy:destroy":{"cache":false},"alchemy:ensure":{},"build":{},"build:container":{"cache":false},"clean":{},"container:build":{"cache":false,"dependsOn":["build:container"]},"container:push":{"cache":false,"dependsOn":["container:build"]},"db:migrate":{},"db:push":{},"db:studio":{},"deploy":{"cache":false,"dependsOn":["container:push"]},"dev":{"cache":false},"format":{},"generate:proto":{},"generate:types":{},"lint":{},"test":{},"test:coverage":{},"test:watch":{},"typecheck":{}},"ui":"tui"} \ No newline at end of file +{"$schema":"https://turbo.build/schema.json","tasks":{"alchemy:deploy":{"cache":false},"alchemy:destroy":{"cache":false},"alchemy:ensure":{},"build":{},"build:container":{"cache":false},"clean":{},"container:build":{"cache":false,"dependsOn":["build:container"]},"container:push":{"cache":false,"dependsOn":["container:build"]},"db:generate":{},"db:migrate":{},"db:studio":{},"deploy":{"cache":false,"dependsOn":["container:push"]},"dev":{"cache":false},"format":{},"generate:proto":{},"generate:types":{},"lint":{},"test":{},"test:coverage":{},"test:watch":{},"typecheck":{}},"ui":"tui"} \ No newline at end of file