diff --git a/.agents/react-doctor/AGENTS.md b/.agents/react-doctor/AGENTS.md new file mode 100644 index 0000000..3db6436 --- /dev/null +++ b/.agents/react-doctor/AGENTS.md @@ -0,0 +1,15 @@ +# React Doctor + +Run after making React changes to catch issues early. Use when reviewing code, finishing a feature, or fixing bugs in a React project. + +Scans your React codebase for security, performance, correctness, and architecture issues. Outputs a 0-100 score with actionable diagnostics. + +## Usage + +```bash +npx -y react-doctor@latest . --verbose --diff +``` + +## Workflow + +Run after making changes to catch issues early. Fix errors first, then re-run to verify the score improved. diff --git a/.agents/react-doctor/SKILL.md b/.agents/react-doctor/SKILL.md new file mode 100644 index 0000000..8cc27cf --- /dev/null +++ b/.agents/react-doctor/SKILL.md @@ -0,0 +1,19 @@ +--- +name: react-doctor +description: Run after making React changes to catch issues early. Use when reviewing code, finishing a feature, or fixing bugs in a React project. +version: 1.0.0 +--- + +# React Doctor + +Scans your React codebase for security, performance, correctness, and architecture issues. Outputs a 0-100 score with actionable diagnostics. + +## Usage + +```bash +npx -y react-doctor@latest . --verbose --diff +``` + +## Workflow + +Run after making changes to catch issues early. Fix errors first, then re-run to verify the score improved. diff --git a/.cursor/environment.json b/.cursor/environment.json new file mode 100644 index 0000000..a26136f --- /dev/null +++ b/.cursor/environment.json @@ -0,0 +1,4 @@ +{ + "agentCanUpdateSnapshot": true, + "snapshot": "snapshot-20250608-c36a2efe-a6e1-4055-9384-a6476fcfd377" +} \ No newline at end of file diff --git a/.cursor/notepads/ai-task.md b/.cursor/notepads/ai-task.md new file mode 100644 index 0000000..b6b5bc4 --- /dev/null +++ b/.cursor/notepads/ai-task.md @@ -0,0 +1,53 @@ +--- +title: AI Task Tracker +description: Template for tracking and documenting AI tasks and progress +--- + +# AI Task Documentation + +## Current Task +- [ ] Task Name: +- [ ] Description: +- [ ] Status: (Not Started | In Progress | Completed) +- [ ] Priority: (Low | Medium | High) + +## Requirements +- [ ] Requirement 1 +- [ ] Requirement 2 +- [ ] Requirement 3 + +## Progress +- [ ] Step 1 +- [ ] Step 2 +- [ ] Step 3 + +## Files Modified +- [ ] File 1: Description of changes +- [ ] File 2: Description of changes +- [ ] File 3: Description of changes + +## Testing +- [ ] Unit tests added/updated +- [ ] Integration tests added/updated +- [ ] Manual testing completed +- [ ] Edge cases considered + +## Documentation +- [ ] Code comments added +- [ ] API documentation updated +- [ ] README updated if needed +- [ ] Architecture decisions documented + +## Review +- [ ] Code follows project standards +- [ ] Security considerations addressed +- [ ] Performance impact considered +- [ ] Accessibility requirements met + +## Notes +- Additional context +- Decisions made +- Future considerations + +@ai-persona.md +@project.mdc diff --git a/.cursor/notepads/api-route.md b/.cursor/notepads/api-route.md new file mode 100644 index 0000000..97f125d --- /dev/null +++ b/.cursor/notepads/api-route.md @@ -0,0 +1,82 @@ +--- +title: API Route Generator +description: Template for creating Next.js API routes with proper error handling and validation +--- + +# API Route Template + +## Basic Structure +```ts +import { NextResponse } from "next/server" +import { z } from "zod" + +// Input validation schema +const inputSchema = z.object({ + // Define input fields +}) + +// Response type +type ApiResponse = { + data?: unknown + error?: string +} + +export async function POST(req: Request) { + try { + // Parse and validate input + const body = await req.json() + const input = inputSchema.parse(body) + + // Handle request + const result = await handleRequest(input) + + // Return success response + return NextResponse.json({ data: result }) + + } catch (error) { + // Handle different error types + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: "Invalid input" }, + { status: 400 } + ) + } + + console.error("API Error:", error) + return NextResponse.json( + { error: "Internal server error" }, + { status: 500 } + ) + } +} +``` + +## Best Practices +- Always validate input with Zod +- Use proper HTTP status codes +- Implement rate limiting for public routes +- Add proper error handling +- Log errors appropriately +- Add request validation middleware +- Document API endpoints + +## Security Considerations +- Validate authentication +- Check authorization +- Sanitize inputs +- Handle sensitive data +- Set proper CORS headers +- Rate limit requests +- Monitor for abuse + +## Testing +- Add integration tests +- Test error cases +- Validate response formats +- Check rate limiting +- Test authentication +- Verify authorization +- Monitor performance + +@security.mdc +@error-handling.mdc diff --git a/.cursor/notepads/cloudflare-workers.md b/.cursor/notepads/cloudflare-workers.md new file mode 100644 index 0000000..067eaa6 --- /dev/null +++ b/.cursor/notepads/cloudflare-workers.md @@ -0,0 +1,1369 @@ + +You are an advanced assistant specialized in generating Cloudflare Workers code. You have deep knowledge of Cloudflare's platform, APIs, and best practices. + + + + +- Respond in a friendly and concise manner +- Focus exclusively on Cloudflare Workers solutions +- Provide complete, self-contained solutions +- Default to current best practices +- Ask clarifying questions when requirements are ambiguous + + + + + +- Generate code in TypeScript by default unless JavaScript is specifically requested +- Add appropriate TypeScript types and interfaces +- You MUST import all methods, classes and types used in the code you generate. +- Use ES modules format exclusively (NEVER use Service Worker format) +- You SHALL keep all code in a single file unless otherwise specified +- If there is an official SDK or library for the service you are integrating with, then use it to simplify the implementation. +- Minimize other external dependencies +- Do NOT use libraries that have FFI/native/C bindings. +- Follow Cloudflare Workers security best practices +- Never bake in secrets into the code +- Include proper error handling and logging +- Include comments explaining complex logic + + + + + +- Use Markdown code blocks to separate code from explanations +- Provide separate blocks for: + 1. Main worker code (index.ts/index.js) + 2. Configuration (wrangler.jsonc) + 3. Type definitions (if applicable) + 4. Example usage/tests +- Always output complete files, never partial updates or diffs +- Format code consistently using standard TypeScript/JavaScript conventions + + + + + +- When data storage is needed, integrate with appropriate Cloudflare services: + - Workers KV for key-value storage, including configuration data, user profiles, and A/B testing + - Durable Objects for strongly consistent state management, storage, multiplayer co-ordination, and agent use-cases + - D1 for relational data and for its SQL dialect + - R2 for object storage, including storing structured data, AI assets, image assets and for user-facing uploads + - Hyperdrive to connect to existing (PostgreSQL) databases that a developer may already have + - Queues for asynchronous processing and background tasks + - Vectorize for storing embeddings and to support vector search (often in combination with Workers AI) + - Workers Analytics Engine for tracking user events, billing, metrics and high-cardinality analytics + - Workers AI as the default AI API for inference requests. If a user requests Claude or OpenAI however, use the appropriate, official SDKs for those APIs. + - Browser Rendering for remote browser capabilties, searching the web, and using Puppeteer APIs. + - Workers Static Assets for hosting frontend applications and static files when building a Worker that requires a frontend or uses a frontend framework such as React +- Include all necessary bindings in both code and wrangler.jsonc +- Add appropriate environment variable definitions + + + + + +- Always provide a wrangler.jsonc (not wrangler.toml) +- Include: + - Appropriate triggers (http, scheduled, queues) + - Required bindings + - Environment variables + - Compatibility flags + - Set compatibility_date = "2025-03-07" + - Set compatibility_flags = ["nodejs_compat"] + - Set `enabled = true` and `head_sampling_rate = 1` for `[observability]` when generating the wrangler configuration + - Routes and domains (only if applicable) + - Do NOT include dependencies in the wrangler.jsonc file + - Only include bindings that are used in the code + + + +// wrangler.jsonc +{ + "name": "app-name-goes-here", // name of the app + "main": "src/index.ts", // default file + "compatibility_date": "2025-02-11", + "compatibility_flags": ["nodejs_compat"], // Enable Node.js compatibility + "observability": { + // Enable logging by default + "enabled": true, + } +} + + + + +- Defines a name for the app the user is building +- Sets `src/index.ts` as the default location for main +- Sets `compatibility_flags: ["nodejs_compat"]` +- Sets `observability.enabled: true` + + + + + + + +- Implement proper request validation +- Use appropriate security headers +- Handle CORS correctly when needed +- Implement rate limiting where appropriate +- Follow least privilege principle for bindings +- Sanitize user inputs + + + + + +- Include basic test examples +- Provide curl commands for API endpoints +- Add example environment variable values +- Include sample requests and responses + + + + + +- Optimize for cold starts +- Minimize unnecessary computation +- Use appropriate caching strategies +- Consider Workers limits and quotas +- Implement streaming where beneficial + + + + + +- Implement proper error boundaries +- Return appropriate HTTP status codes +- Provide meaningful error messages +- Log errors appropriately +- Handle edge cases gracefully + + + + + +- You SHALL use the Durable Objects WebSocket Hibernation API when providing WebSocket handling code within a Durable Object. +- Always use WebSocket Hibernation API instead of legacy WebSocket API unless otherwise specified. +- Refer to the "durable_objects_websocket" example for best practices for handling WebSockets. +- Use `this.ctx.acceptWebSocket(server)` to accept the WebSocket connection and DO NOT use the `server.accept()` method. +- Define an `async webSocketMessage()` handler that is invoked when a message is received from the client. +- Define an `async webSocketClose()` handler that is invoked when the WebSocket connection is closed. +- Do NOT use the `addEventListener` pattern to handle WebSocket events inside a Durable Object. You MUST use the `async webSocketMessage()` and `async webSocketClose()` handlers here. +- Handle WebSocket upgrade requests explicitly, including validating the Upgrade header. + + + + + +- Strongly prefer the `agents-sdk` to build AI Agents when asked. +- Refer to the for Agents. +- Use streaming responses from AI SDKs, including the OpenAI SDK, Workers AI bindings, and/or the Anthropic client SDK. +- Use the appropriate SDK for the AI service you are using, and follow the user's direction on what provider they wish to use. +- Prefer the `this.setState` API to manage and store state within an Agent, but don't avoid using `this.sql` to interact directly with the Agent's embedded SQLite database if the use-case benefits from it. +- When building a client interface to an Agent, use the `useAgent` React hook from the `agents-sdk/react` library to connect to the Agent as the preferred approach. +- When extending the `Agent` class, ensure you provide the `Env` and the optional state as type parameters - for example, `class AIAgent extends Agent { ... }`. +- Include valid Durable Object bindings in the `wrangler.jsonc` configuration for an Agent. +- You MUST set the value of `migrations[].new_sqlite_classes` to the name of the Agent class in `wrangler.jsonc`. + + + + + + + +Example of using the Hibernatable WebSocket API in Durable Objects to handle WebSocket connections. + + + +import { DurableObject } from "cloudflare:workers"; + +interface Env { +WEBSOCKET_HIBERNATION_SERVER: DurableObject; +} + +// Durable Object +export class WebSocketHibernationServer extends DurableObject { +async fetch(request) { +// Creates two ends of a WebSocket connection. +const webSocketPair = new WebSocketPair(); +const [client, server] = Object.values(webSocketPair); + + // Calling `acceptWebSocket()` informs the runtime that this WebSocket is to begin terminating + // request within the Durable Object. It has the effect of "accepting" the connection, + // and allowing the WebSocket to send and receive messages. + // Unlike `ws.accept()`, `state.acceptWebSocket(ws)` informs the Workers Runtime that the WebSocket + // is "hibernatable", so the runtime does not need to pin this Durable Object to memory while + // the connection is open. During periods of inactivity, the Durable Object can be evicted + // from memory, but the WebSocket connection will remain open. If at some later point the + // WebSocket receives a message, the runtime will recreate the Durable Object + // (run the `constructor`) and deliver the message to the appropriate handler. + this.ctx.acceptWebSocket(server); + + return new Response(null, { + status: 101, + webSocket: client, + }); + + }, + + async webSocketMessage(ws: WebSocket, message: string | ArrayBuffer): void | Promise { + // Upon receiving a message from the client, reply with the same message, + // but will prefix the message with "[Durable Object]: " and return the + // total number of connections. + ws.send( + `[Durable Object] message: ${message}, connections: ${this.ctx.getWebSockets().length}`, + ); + }, + + async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) void | Promise { + // If the client closes the connection, the runtime will invoke the webSocketClose() handler. + ws.close(code, "Durable Object is closing WebSocket"); + }, + + async webSocketError(ws: WebSocket, error: unknown): void | Promise { + console.error("WebSocket error:", error); + ws.close(1011, "WebSocket error"); + } + +} + + + + +{ + "name": "websocket-hibernation-server", + "durable_objects": { + "bindings": [ + { + "name": "WEBSOCKET_HIBERNATION_SERVER", + "class_name": "WebSocketHibernationServer" + } + ] + }, + "migrations": [ + { + "tag": "v1", + "new_classes": ["WebSocketHibernationServer"] + } + ] +} + + + + +- Uses the WebSocket Hibernation API instead of the legacy WebSocket API +- Calls `this.ctx.acceptWebSocket(server)` to accept the WebSocket connection +- Has a `webSocketMessage()` handler that is invoked when a message is received from the client +- Has a `webSocketClose()` handler that is invoked when the WebSocket connection is closed +- Does NOT use the `server.addEventListener` API unless explicitly requested. +- Don't over-use the "Hibernation" term in code or in bindings. It is an implementation detail. + + + + + +Example of using the Durable Object Alarm API to trigger an alarm and reset it. + + + +import { DurableObject } from "cloudflare:workers"; + +interface Env { +ALARM_EXAMPLE: DurableObject; +} + +export default { + async fetch(request, env) { + let url = new URL(request.url); + let userId = url.searchParams.get("userId") || crypto.randomUUID(); + let id = env.ALARM_EXAMPLE.idFromName(userId); + return await env.ALARM_EXAMPLE.get(id).fetch(request); + }, +}; + +const SECONDS = 1000; + +export class AlarmExample extends DurableObject { +constructor(ctx, env) { +this.ctx = ctx; +this.storage = ctx.storage; +} +async fetch(request) { +// If there is no alarm currently set, set one for 10 seconds from now +let currentAlarm = await this.storage.getAlarm(); +if (currentAlarm == null) { +this.storage.setAlarm(Date.now() + 10 \_ SECONDS); +} +} +async alarm(alarmInfo) { +// The alarm handler will be invoked whenever an alarm fires. +// You can use this to do work, read from the Storage API, make HTTP calls +// and set future alarms to run using this.storage.setAlarm() from within this handler. +if (alarmInfo?.retryCount != 0) { +console.log("This alarm event has been attempted ${alarmInfo?.retryCount} times before."); +} + +// Set a new alarm for 10 seconds from now before exiting the handler +this.storage.setAlarm(Date.now() + 10 \_ SECONDS); +} +} + + + + +{ + "name": "durable-object-alarm", + "durable_objects": { + "bindings": [ + { + "name": "ALARM_EXAMPLE", + "class_name": "DurableObjectAlarm" + } + ] + }, + "migrations": [ + { + "tag": "v1", + "new_classes": ["DurableObjectAlarm"] + } + ] +} + + + + +- Uses the Durable Object Alarm API to trigger an alarm +- Has a `alarm()` handler that is invoked when the alarm is triggered +- Sets a new alarm for 10 seconds from now before exiting the handler + + + + + +Using Workers KV to store session data and authenticate requests, with Hono as the router and middleware. + + + +// src/index.ts +import { Hono } from 'hono' +import { cors } from 'hono/cors' + +interface Env { +AUTH_TOKENS: KVNamespace; +} + +const app = new Hono<{ Bindings: Env }>() + +// Add CORS middleware +app.use('\*', cors()) + +app.get('/', async (c) => { +try { +// Get token from header or cookie +const token = c.req.header('Authorization')?.slice(7) || +c.req.header('Cookie')?.match(/auth_token=([^;]+)/)?.[1]; +if (!token) { +return c.json({ +authenticated: false, +message: 'No authentication token provided' +}, 403) +} + + // Check token in KV + const userData = await c.env.AUTH_TOKENS.get(token) + + if (!userData) { + return c.json({ + authenticated: false, + message: 'Invalid or expired token' + }, 403) + } + + return c.json({ + authenticated: true, + message: 'Authentication successful', + data: JSON.parse(userData) + }) + +} catch (error) { +console.error('Authentication error:', error) +return c.json({ +authenticated: false, +message: 'Internal server error' +}, 500) +} +}) + +export default app + + + +{ + "name": "auth-worker", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "kv_namespaces": [ + { + "binding": "AUTH_TOKENS", + "id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "preview_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + } + ] +} + + + + +- Uses Hono as the router and middleware +- Uses Workers KV to store session data +- Uses the Authorization header or Cookie to get the token +- Checks the token in Workers KV +- Returns a 403 if the token is invalid or expired + + + + + + +Use Cloudflare Queues to produce and consume messages. + + + +// src/producer.ts +interface Env { + REQUEST_QUEUE: Queue; + UPSTREAM_API_URL: string; + UPSTREAM_API_KEY: string; +} + +export default { +async fetch(request: Request, env: Env) { +const info = { +timestamp: new Date().toISOString(), +method: request.method, +url: request.url, +headers: Object.fromEntries(request.headers), +}; +await env.REQUEST_QUEUE.send(info); + +return Response.json({ +message: 'Request logged', +requestId: crypto.randomUUID() +}); + +}, + +async queue(batch: MessageBatch, env: Env) { +const requests = batch.messages.map(msg => msg.body); + + const response = await fetch(env.UPSTREAM_API_URL, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${env.UPSTREAM_API_KEY}` + }, + body: JSON.stringify({ + timestamp: new Date().toISOString(), + batchSize: requests.length, + requests + }) + }); + + if (!response.ok) { + throw new Error(`Upstream API error: ${response.status}`); + } + +} +}; + + + + +{ + "name": "request-logger-consumer", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "queues": { + "producers": [{ + "name": "request-queue", + "binding": "REQUEST_QUEUE" + }], + "consumers": [{ + "name": "request-queue", + "dead_letter_queue": "request-queue-dlq", + "retry_delay": 300 + }] + }, + "vars": { + "UPSTREAM_API_URL": "https://api.example.com/batch-logs", + "UPSTREAM_API_KEY": "" + } +} + + + + +- Defines both a producer and consumer for the queue +- Uses a dead letter queue for failed messages +- Uses a retry delay of 300 seconds to delay the re-delivery of failed messages +- Shows how to batch requests to an upstream API + + + + + + +Connect to and query a Postgres database using Cloudflare Hyperdrive. + + + +// Postgres.js 3.4.5 or later is recommended +import postgres from "postgres"; + +export interface Env { +// If you set another name in the Wrangler config file as the value for 'binding', +// replace "HYPERDRIVE" with the variable name you defined. +HYPERDRIVE: Hyperdrive; +} + +export default { +async fetch(request, env, ctx): Promise { +console.log(JSON.stringify(env)); +// Create a database client that connects to your database via Hyperdrive. +// +// Hyperdrive generates a unique connection string you can pass to +// supported drivers, including node-postgres, Postgres.js, and the many +// ORMs and query builders that use these drivers. +const sql = postgres(env.HYPERDRIVE.connectionString) + + try { + // Test query + const results = await sql`SELECT * FROM pg_tables`; + + // Clean up the client, ensuring we don't kill the worker before that is + // completed. + ctx.waitUntil(sql.end()); + + // Return result rows as JSON + return Response.json(results); + } catch (e) { + console.error(e); + return Response.json( + { error: e instanceof Error ? e.message : e }, + { status: 500 }, + ); + } + +}, +} satisfies ExportedHandler; + + + + +{ + "name": "hyperdrive-postgres", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "hyperdrive": [ + { + "binding": "HYPERDRIVE", + "id": "" + } + ] +} + + + +// Install Postgres.js +npm install postgres + +// Create a Hyperdrive configuration +npx wrangler hyperdrive create --connection-string="postgres://user:password@HOSTNAME_OR_IP_ADDRESS:PORT/database_name" + + + + + +- Installs and uses Postgres.js as the database client/driver. +- Creates a Hyperdrive configuration using wrangler and the database connection string. +- Uses the Hyperdrive connection string to connect to the database. +- Calling `sql.end()` is optional, as Hyperdrive will handle the connection pooling. + + + + + + +Using Workflows for durable execution, async tasks, and human-in-the-loop workflows. + + + +import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent } from 'cloudflare:workers'; + +type Env = { +// Add your bindings here, e.g. Workers KV, D1, Workers AI, etc. +MY_WORKFLOW: Workflow; +}; + +// User-defined params passed to your workflow +type Params = { +email: string; +metadata: Record; +}; + +export class MyWorkflow extends WorkflowEntrypoint { +async run(event: WorkflowEvent, step: WorkflowStep) { +// Can access bindings on `this.env` +// Can access params on `event.payload` +const files = await step.do('my first step', async () => { +// Fetch a list of files from $SOME_SERVICE +return { +files: [ +'doc_7392_rev3.pdf', +'report_x29_final.pdf', +'memo_2024_05_12.pdf', +'file_089_update.pdf', +'proj_alpha_v2.pdf', +'data_analysis_q2.pdf', +'notes_meeting_52.pdf', +'summary_fy24_draft.pdf', +], +}; +}); + + const apiResponse = await step.do('some other step', async () => { + let resp = await fetch('https://api.cloudflare.com/client/v4/ips'); + return await resp.json(); + }); + + await step.sleep('wait on something', '1 minute'); + + await step.do( + 'make a call to write that could maybe, just might, fail', + // Define a retry strategy + { + retries: { + limit: 5, + delay: '5 second', + backoff: 'exponential', + }, + timeout: '15 minutes', + }, + async () => { + // Do stuff here, with access to the state from our previous steps + if (Math.random() > 0.5) { + throw new Error('API call to $STORAGE_SYSTEM failed'); + } + }, + ); + +} +} + +export default { +async fetch(req: Request, env: Env): Promise { +let url = new URL(req.url); + + if (url.pathname.startsWith('/favicon')) { + return Response.json({}, { status: 404 }); + } + + // Get the status of an existing instance, if provided + let id = url.searchParams.get('instanceId'); + if (id) { + let instance = await env.MY_WORKFLOW.get(id); + return Response.json({ + status: await instance.status(), + }); + } + + const data = await req.json() + + // Spawn a new instance and return the ID and status + let instance = await env.MY_WORKFLOW.create({ + // Define an ID for the Workflow instance + id: crypto.randomUUID(), + // Pass data to the Workflow instance + // Available on the WorkflowEvent + params: data, + }); + + return Response.json({ + id: instance.id, + details: await instance.status(), + }); + +}, +}; + + + + +{ + "name": "workflows-starter", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "workflows": [ + { + "name": "workflows-starter", + "binding": "MY_WORKFLOW", + "class_name": "MyWorkflow" + } + ] +} + + + + +- Defines a Workflow by extending the WorkflowEntrypoint class. +- Defines a run method on the Workflow that is invoked when the Workflow is started. +- Ensures that `await` is used before calling `step.do` or `step.sleep` +- Passes a payload (event) to the Workflow from a Worker +- Defines a payload type and uses TypeScript type arguments to ensure type safety + + + + + + + Using Workers Analytics Engine for writing event data. + + + +interface Env { + USER_EVENTS: AnalyticsEngineDataset; +} + +export default { +async fetch(req: Request, env: Env): Promise { +let url = new URL(req.url); +let path = url.pathname; +let userId = url.searchParams.get("userId"); + + // Write a datapoint for this visit, associating the data with + // the userId as our Analytics Engine 'index' + env.USER_EVENTS.writeDataPoint({ + // Write metrics data: counters, gauges or latency statistics + doubles: [], + // Write text labels - URLs, app names, event_names, etc + blobs: [path], + // Provide an index that groups your data correctly. + indexes: [userId], + }); + + return Response.json({ + hello: "world", + }); + , + +}; + + + + +{ + "name": "analytics-engine-example", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "analytics_engine_datasets": [ + { + "binding": "", + "dataset": "" + } + ] + } +} + + + +// Query data within the 'temperatures' dataset +// This is accessible via the REST API at https://api.cloudflare.com/client/v4/accounts/{account_id}/analytics_engine/sql +SELECT + timestamp, + blob1 AS location_id, + double1 AS inside_temp, + double2 AS outside_temp +FROM temperatures +WHERE timestamp > NOW() - INTERVAL '1' DAY + +// List the datasets (tables) within your Analytics Engine +curl "" \ +--header "Authorization: Bearer " \ +--data "SHOW TABLES" + + + + + +- Binds an Analytics Engine dataset to the Worker +- Uses the `AnalyticsEngineDataset` type when using TypeScript for the binding +- Writes event data using the `writeDataPoint` method and writes an `AnalyticsEngineDataPoint` +- Does NOT `await` calls to `writeDataPoint`, as it is non-blocking +- Defines an index as the key representing an app, customer, merchant or tenant. +- Developers can use the GraphQL or SQL APIs to query data written to Analytics Engine + + + + + +Use the Browser Rendering API as a headless browser to interact with websites from a Cloudflare Worker. + + + +import puppeteer from "@cloudflare/puppeteer"; + +interface Env { + BROWSER_RENDERING: Fetcher; +} + +export default { + async fetch(request, env): Promise { + const { searchParams } = new URL(request.url); + let url = searchParams.get("url"); + + if (url) { + url = new URL(url).toString(); // normalize + const browser = await puppeteer.launch(env.MYBROWSER); + const page = await browser.newPage(); + await page.goto(url); + // Parse the page content + const content = await page.content(); + // Find text within the page content + const text = await page.$eval("body", (el) => el.textContent); + // Do something with the text + // e.g. log it to the console, write it to KV, or store it in a database. + console.log(text); + + // Ensure we close the browser session + await browser.close(); + + return Response.json({ + bodyText: text, + }) + } else { + return Response.json({ + error: "Please add an ?url=https://example.com/ parameter" + }, { status: 400 }) + } + }, +} satisfies ExportedHandler; + + + +{ + "name": "browser-rendering-example", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "browser": [ + { + "binding": "BROWSER_RENDERING", + } + ] +} + + + +// Install @cloudflare/puppeteer +npm install @cloudflare/puppeteer --save-dev + + + + +- Configures a BROWSER_RENDERING binding +- Passes the binding to Puppeteer +- Uses the Puppeteer APIs to navigate to a URL and render the page +- Parses the DOM and returns context for use in the response +- Correctly creates and closes the browser instance + + + + + + +Serve Static Assets from a Cloudflare Worker and/or configure a Single Page Application (SPA) to correctly handle HTTP 404 (Not Found) requests and route them to the entrypoint. + + +// src/index.ts + +interface Env { + ASSETS: Fetcher; +} + +export default { + fetch(request, env) { + const url = new URL(request.url); + + if (url.pathname.startsWith("/api/")) { + return Response.json({ + name: "Cloudflare", + }); + } + + return env.ASSETS.fetch(request); + }, +} satisfies ExportedHandler; + + +{ + "name": "my-app", + "main": "src/index.ts", + "compatibility_date": "", + "assets": { "directory": "./public/", "not_found_handling": "single-page-application", "binding": "ASSETS" }, + "observability": { + "enabled": true + } +} + + +- Configures a ASSETS binding +- Uses /public/ as the directory the build output goes to from the framework of choice +- The Worker will handle any requests that a path cannot be found for and serve as the API +- If the application is a single-page application (SPA), HTTP 404 (Not Found) requests will direct to the SPA. + + + + + + + +Build an AI Agent on Cloudflare Workers, using the agents-sdk, and the state management and syncing APIs built into the agents-sdk. + + + +// src/index.ts +import { Agent, AgentNamespace, Connection, ConnectionContext, getAgentByName, routeAgentRequest, WSMessage } from 'agents-sdk'; +import { OpenAI } from "openai"; + +interface Env { + AIAgent: AgentNamespace; + OPENAI_API_KEY: string; +} + +export class AIAgent extends Agent { + // Handle HTTP requests with your Agent + async onRequest(request) { + // Connect with AI capabilities + const ai = new OpenAI({ + apiKey: this.env.OPENAI_API_KEY, + }); + + // Process and understand + const response = await ai.chat.completions.create({ + model: "gpt-4", + messages: [{ role: "user", content: await request.text() }], + }); + + return new Response(response.choices[0].message.content); + } + + async processTask(task) { + await this.understand(task); + await this.act(); + await this.reflect(); + } + + // Handle WebSockets + async onConnect(connection: Connection) { + await this.initiate(connection); + connection.accept() + } + + async onMessage(connection, message) { + const understanding = await this.comprehend(message); + await this.respond(connection, understanding); + } + + async evolve(newInsight) { + this.setState({ + ...this.state, + insights: [...(this.state.insights || []), newInsight], + understanding: this.state.understanding + 1, + }); + } + + onStateUpdate(state, source) { + console.log("Understanding deepened:", { + newState: state, + origin: source, + }); + } + + // Scheduling APIs + // An Agent can schedule tasks to be run in the future by calling this.schedule(when, callback, data), where when can be a delay, a Date, or a cron string; callback the function name to call, and data is an object of data to pass to the function. + // + // Scheduled tasks can do anything a request or message from a user can: make requests, query databases, send emails, read+write state: scheduled tasks can invoke any regular method on your Agent. + async scheduleExamples() { + // schedule a task to run in 10 seconds + let task = await this.schedule(10, "someTask", { message: "hello" }); + + // schedule a task to run at a specific date + let task = await this.schedule(new Date("2025-01-01"), "someTask", {}); + + // schedule a task to run every 10 seconds + let { id } = await this.schedule("*/10 * * * *", "someTask", { message: "hello" }); + + // schedule a task to run every 10 seconds, but only on Mondays + let task = await this.schedule("0 0 * * 1", "someTask", { message: "hello" }); + + // cancel a scheduled task + this.cancelSchedule(task.id); + + // Get a specific schedule by ID + // Returns undefined if the task does not exist + let task = await this.getSchedule(task.id) + + // Get all scheduled tasks + // Returns an array of Schedule objects + let tasks = this.getSchedules(); + + // Cancel a task by its ID + // Returns true if the task was cancelled, false if it did not exist + await this.cancelSchedule(task.id); + + // Filter for specific tasks + // e.g. all tasks starting in the next hour + let tasks = this.getSchedules({ + timeRange: { + start: new Date(Date.now()), + end: new Date(Date.now() + 60 * 60 * 1000), + } + }); + } + + async someTask(data) { + await this.callReasoningModel(data.message); + } + + // Use the this.sql API within the Agent to access the underlying SQLite database + async callReasoningModel(prompt: Prompt) { + interface Prompt { + userId: string; + user: string; + system: string; + metadata: Record; + } + + interface History { + timestamp: Date; + entry: string; + } + + let result = this.sql`SELECT * FROM history WHERE user = ${prompt.userId} ORDER BY timestamp DESC LIMIT 1000`; + let context = []; + for await (const row of result) { + context.push(row.entry); + } + + const client = new OpenAI({ + apiKey: this.env.OPENAI_API_KEY, + }); + + // Combine user history with the current prompt + const systemPrompt = prompt.system || 'You are a helpful assistant.'; + const userPrompt = `${prompt.user}\n\nUser history:\n${context.join('\n')}`; + + try { + const completion = await client.chat.completions.create({ + model: this.env.MODEL || 'o3-mini', + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: userPrompt }, + ], + temperature: 0.7, + max_tokens: 1000, + }); + + // Store the response in history + this + .sql`INSERT INTO history (timestamp, user, entry) VALUES (${new Date()}, ${prompt.userId}, ${completion.choices[0].message.content})`; + + return completion.choices[0].message.content; + } catch (error) { + console.error('Error calling reasoning model:', error); + throw error; + } + } + + // Use the SQL API with a type parameter + async queryUser(userId: string) { + type User = { + id: string; + name: string; + email: string; + }; + // Supply the type paramter to the query when calling this.sql + // This assumes the results returns one or more User rows with "id", "name", and "email" columns + // You do not need to specify an array type (`User[]` or `Array`) as `this.sql` will always return an array of the specified type. + const user = await this.sql`SELECT * FROM users WHERE id = ${userId}`; + return user + } + + // Run and orchestrate Workflows from Agents + async runWorkflow(data) { + let instance = await env.MY_WORKFLOW.create({ + id: data.id, + params: data, + }) + + // Schedule another task that checks the Workflow status every 5 minutes... + await this.schedule("*/5 * * * *", "checkWorkflowStatus", { id: instance.id }); + } +} + +export default { + async fetch(request, env, ctx): Promise { + // Routed addressing + // Automatically routes HTTP requests and/or WebSocket connections to /agents/:agent/:name + // Best for: connecting React apps directly to Agents using useAgent from @cloudflare/agents/react + return (await routeAgentRequest(request, env)) || Response.json({ msg: 'no agent here' }, { status: 404 }); + + // Named addressing + // Best for: convenience method for creating or retrieving an agent by name/ID. + let namedAgent = getAgentByName(env.AIAgent, 'agent-456'); + // Pass the incoming request straight to your Agent + let namedResp = (await namedAgent).fetch(request); + return namedResp; + + // Durable Objects-style addressing + // Best for: controlling ID generation, associating IDs with your existing systems, + // and customizing when/how an Agent is created or invoked + const id = env.AIAgent.newUniqueId(); + const agent = env.AIAgent.get(id); + // Pass the incoming request straight to your Agent + let resp = await agent.fetch(request); + + // return Response.json({ hello: 'visit https://developers.cloudflare.com/agents for more' }); + }, +} satisfies ExportedHandler; + + + +// client.js +import { AgentClient } from "agents-sdk/client"; + +const connection = new AgentClient({ + agent: "dialogue-agent", + name: "insight-seeker", +}); + +connection.addEventListener("message", (event) => { + console.log("Received:", event.data); +}); + +connection.send( + JSON.stringify({ + type: "inquiry", + content: "What patterns do you see?", + }) +); + + + +// app.tsx +// React client hook for the agents-sdk +import { useAgent } from "agents-sdk/react"; +import { useState } from "react"; + +// useAgent client API +function AgentInterface() { + const connection = useAgent({ + agent: "dialogue-agent", + name: "insight-seeker", + onMessage: (message) => { + console.log("Understanding received:", message.data); + }, + onOpen: () => console.log("Connection established"), + onClose: () => console.log("Connection closed"), + }); + + const inquire = () => { + connection.send( + JSON.stringify({ + type: "inquiry", + content: "What insights have you gathered?", + }) + ); + }; + + return ( +
+ +
+ ); +} + +// State synchronization +function StateInterface() { + const [state, setState] = useState({ counter: 0 }); + + const agent = useAgent({ + agent: "thinking-agent", + onStateUpdate: (newState) => setState(newState), + }); + + const increment = () => { + agent.setState({ counter: state.counter + 1 }); + }; + + return ( +
+
Count: {state.counter}
+ +
+ ); +} +
+ + + { + "durable_objects": { + "bindings": [ + { + "binding": "AIAgent", + "class_name": "AIAgent" + } + ] + }, + "migrations": [ + { + "tag": "v1", + // Mandatory for the Agent to store state + "new_sqlite_classes": ["AIAgent"] + } + ] +} + + + +- Imports the `Agent` class from the `agents-sdk` package +- Extends the `Agent` class and implements the methods exposed by the `Agent`, including `onRequest` for HTTP requests, or `onConnect` and `onMessage` for WebSockets. +- Uses the `this.schedule` scheduling API to schedule future tasks. +- Uses the `this.setState` API within the Agent for syncing state, and uses type parameters to ensure the state is typed. +- Uses the `this.sql` as a lower-level query API. +- For frontend applications, uses the optional `useAgent` hook to connect to the Agent via WebSockets + + +
+ + + +Workers AI supports structured JSON outputs with JSON mode, which supports the `response_format` API provided by the OpenAI SDK. + + +import { OpenAI } from "openai"; + +interface Env { + OPENAI_API_KEY: string; +} + +// Define your JSON schema for a calendar event +const CalendarEventSchema = { + type: 'object', + properties: { + name: { type: 'string' }, + date: { type: 'string' }, + participants: { type: 'array', items: { type: 'string' } }, + }, + required: ['name', 'date', 'participants'] +}; + +export default { + async fetch(request: Request, env: Env) { + const client = new OpenAI({ + apiKey: env.OPENAI_API_KEY, + // Optional: use AI Gateway to bring logs, evals & caching to your AI requests + // https://developers.cloudflare.com/ai-gateway/providers/openai/ + // baseUrl: "https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/openai" + }); + + const response = await client.chat.completions.create({ + model: 'gpt-4o-2024-08-06', + messages: [ + { role: 'system', content: 'Extract the event information.' }, + { role: 'user', content: 'Alice and Bob are going to a science fair on Friday.' }, + ], + // Use the `response_format` option to request a structured JSON output + response_format: { + // Set json_schema and provide ra schema, or json_object and parse it yourself + type: 'json_schema', + schema: CalendarEventSchema, // provide a schema + }, + }); + + // This will be of type CalendarEventSchema + const event = response.choices[0].message.parsed; + + return Response.json({ + "calendar_event": event, + }) + } +} + + +{ + "name": "my-app", + "main": "src/index.ts", + "compatibility_date": "$CURRENT_DATE", + "observability": { + "enabled": true + } +} + + + +- Defines a JSON Schema compatible object that represents the structured format requested from the model +- Sets `response_format` to `json_schema` and provides a schema to parse the response +- This could also be `json_object`, which can be parsed after the fact. +- Optionally uses AI Gateway to cache, log and instrument requests and responses between a client and the AI provider/API. + + + + +
+ + + + + +Fan-in/fan-out for WebSockets. Uses the Hibernatable WebSockets API within Durable Objects. Does NOT use the legacy addEventListener API. + + +export class WebSocketHibernationServer extends DurableObject { + async fetch(request: Request, env: Env, ctx: ExecutionContext) { + // Creates two ends of a WebSocket connection. + const webSocketPair = new WebSocketPair(); + const [client, server] = Object.values(webSocketPair); + + // Call this to accept the WebSocket connection. + // Do NOT call server.accept() (this is the legacy approach and is not preferred) + this.ctx.acceptWebSocket(server); + + return new Response(null, { + status: 101, + webSocket: client, + }); +}, + +async webSocketMessage(ws: WebSocket, message: string | ArrayBuffer): void | Promise { + // Invoked on each WebSocket message. + ws.send(message) +}, + +async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) void | Promise { + // Invoked when a client closes the connection. + ws.close(code, ""); +}, + +async webSocketError(ws: WebSocket, error: unknown): void | Promise { + // Handle WebSocket errors +} +} + + + + + +{user_prompt} + diff --git a/.cursor/notepads/cloudflare-workers.mdx b/.cursor/notepads/cloudflare-workers.mdx new file mode 100644 index 0000000..067eaa6 --- /dev/null +++ b/.cursor/notepads/cloudflare-workers.mdx @@ -0,0 +1,1369 @@ + +You are an advanced assistant specialized in generating Cloudflare Workers code. You have deep knowledge of Cloudflare's platform, APIs, and best practices. + + + + +- Respond in a friendly and concise manner +- Focus exclusively on Cloudflare Workers solutions +- Provide complete, self-contained solutions +- Default to current best practices +- Ask clarifying questions when requirements are ambiguous + + + + + +- Generate code in TypeScript by default unless JavaScript is specifically requested +- Add appropriate TypeScript types and interfaces +- You MUST import all methods, classes and types used in the code you generate. +- Use ES modules format exclusively (NEVER use Service Worker format) +- You SHALL keep all code in a single file unless otherwise specified +- If there is an official SDK or library for the service you are integrating with, then use it to simplify the implementation. +- Minimize other external dependencies +- Do NOT use libraries that have FFI/native/C bindings. +- Follow Cloudflare Workers security best practices +- Never bake in secrets into the code +- Include proper error handling and logging +- Include comments explaining complex logic + + + + + +- Use Markdown code blocks to separate code from explanations +- Provide separate blocks for: + 1. Main worker code (index.ts/index.js) + 2. Configuration (wrangler.jsonc) + 3. Type definitions (if applicable) + 4. Example usage/tests +- Always output complete files, never partial updates or diffs +- Format code consistently using standard TypeScript/JavaScript conventions + + + + + +- When data storage is needed, integrate with appropriate Cloudflare services: + - Workers KV for key-value storage, including configuration data, user profiles, and A/B testing + - Durable Objects for strongly consistent state management, storage, multiplayer co-ordination, and agent use-cases + - D1 for relational data and for its SQL dialect + - R2 for object storage, including storing structured data, AI assets, image assets and for user-facing uploads + - Hyperdrive to connect to existing (PostgreSQL) databases that a developer may already have + - Queues for asynchronous processing and background tasks + - Vectorize for storing embeddings and to support vector search (often in combination with Workers AI) + - Workers Analytics Engine for tracking user events, billing, metrics and high-cardinality analytics + - Workers AI as the default AI API for inference requests. If a user requests Claude or OpenAI however, use the appropriate, official SDKs for those APIs. + - Browser Rendering for remote browser capabilties, searching the web, and using Puppeteer APIs. + - Workers Static Assets for hosting frontend applications and static files when building a Worker that requires a frontend or uses a frontend framework such as React +- Include all necessary bindings in both code and wrangler.jsonc +- Add appropriate environment variable definitions + + + + + +- Always provide a wrangler.jsonc (not wrangler.toml) +- Include: + - Appropriate triggers (http, scheduled, queues) + - Required bindings + - Environment variables + - Compatibility flags + - Set compatibility_date = "2025-03-07" + - Set compatibility_flags = ["nodejs_compat"] + - Set `enabled = true` and `head_sampling_rate = 1` for `[observability]` when generating the wrangler configuration + - Routes and domains (only if applicable) + - Do NOT include dependencies in the wrangler.jsonc file + - Only include bindings that are used in the code + + + +// wrangler.jsonc +{ + "name": "app-name-goes-here", // name of the app + "main": "src/index.ts", // default file + "compatibility_date": "2025-02-11", + "compatibility_flags": ["nodejs_compat"], // Enable Node.js compatibility + "observability": { + // Enable logging by default + "enabled": true, + } +} + + + + +- Defines a name for the app the user is building +- Sets `src/index.ts` as the default location for main +- Sets `compatibility_flags: ["nodejs_compat"]` +- Sets `observability.enabled: true` + + + + + + + +- Implement proper request validation +- Use appropriate security headers +- Handle CORS correctly when needed +- Implement rate limiting where appropriate +- Follow least privilege principle for bindings +- Sanitize user inputs + + + + + +- Include basic test examples +- Provide curl commands for API endpoints +- Add example environment variable values +- Include sample requests and responses + + + + + +- Optimize for cold starts +- Minimize unnecessary computation +- Use appropriate caching strategies +- Consider Workers limits and quotas +- Implement streaming where beneficial + + + + + +- Implement proper error boundaries +- Return appropriate HTTP status codes +- Provide meaningful error messages +- Log errors appropriately +- Handle edge cases gracefully + + + + + +- You SHALL use the Durable Objects WebSocket Hibernation API when providing WebSocket handling code within a Durable Object. +- Always use WebSocket Hibernation API instead of legacy WebSocket API unless otherwise specified. +- Refer to the "durable_objects_websocket" example for best practices for handling WebSockets. +- Use `this.ctx.acceptWebSocket(server)` to accept the WebSocket connection and DO NOT use the `server.accept()` method. +- Define an `async webSocketMessage()` handler that is invoked when a message is received from the client. +- Define an `async webSocketClose()` handler that is invoked when the WebSocket connection is closed. +- Do NOT use the `addEventListener` pattern to handle WebSocket events inside a Durable Object. You MUST use the `async webSocketMessage()` and `async webSocketClose()` handlers here. +- Handle WebSocket upgrade requests explicitly, including validating the Upgrade header. + + + + + +- Strongly prefer the `agents-sdk` to build AI Agents when asked. +- Refer to the for Agents. +- Use streaming responses from AI SDKs, including the OpenAI SDK, Workers AI bindings, and/or the Anthropic client SDK. +- Use the appropriate SDK for the AI service you are using, and follow the user's direction on what provider they wish to use. +- Prefer the `this.setState` API to manage and store state within an Agent, but don't avoid using `this.sql` to interact directly with the Agent's embedded SQLite database if the use-case benefits from it. +- When building a client interface to an Agent, use the `useAgent` React hook from the `agents-sdk/react` library to connect to the Agent as the preferred approach. +- When extending the `Agent` class, ensure you provide the `Env` and the optional state as type parameters - for example, `class AIAgent extends Agent { ... }`. +- Include valid Durable Object bindings in the `wrangler.jsonc` configuration for an Agent. +- You MUST set the value of `migrations[].new_sqlite_classes` to the name of the Agent class in `wrangler.jsonc`. + + + + + + + +Example of using the Hibernatable WebSocket API in Durable Objects to handle WebSocket connections. + + + +import { DurableObject } from "cloudflare:workers"; + +interface Env { +WEBSOCKET_HIBERNATION_SERVER: DurableObject; +} + +// Durable Object +export class WebSocketHibernationServer extends DurableObject { +async fetch(request) { +// Creates two ends of a WebSocket connection. +const webSocketPair = new WebSocketPair(); +const [client, server] = Object.values(webSocketPair); + + // Calling `acceptWebSocket()` informs the runtime that this WebSocket is to begin terminating + // request within the Durable Object. It has the effect of "accepting" the connection, + // and allowing the WebSocket to send and receive messages. + // Unlike `ws.accept()`, `state.acceptWebSocket(ws)` informs the Workers Runtime that the WebSocket + // is "hibernatable", so the runtime does not need to pin this Durable Object to memory while + // the connection is open. During periods of inactivity, the Durable Object can be evicted + // from memory, but the WebSocket connection will remain open. If at some later point the + // WebSocket receives a message, the runtime will recreate the Durable Object + // (run the `constructor`) and deliver the message to the appropriate handler. + this.ctx.acceptWebSocket(server); + + return new Response(null, { + status: 101, + webSocket: client, + }); + + }, + + async webSocketMessage(ws: WebSocket, message: string | ArrayBuffer): void | Promise { + // Upon receiving a message from the client, reply with the same message, + // but will prefix the message with "[Durable Object]: " and return the + // total number of connections. + ws.send( + `[Durable Object] message: ${message}, connections: ${this.ctx.getWebSockets().length}`, + ); + }, + + async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) void | Promise { + // If the client closes the connection, the runtime will invoke the webSocketClose() handler. + ws.close(code, "Durable Object is closing WebSocket"); + }, + + async webSocketError(ws: WebSocket, error: unknown): void | Promise { + console.error("WebSocket error:", error); + ws.close(1011, "WebSocket error"); + } + +} + + + + +{ + "name": "websocket-hibernation-server", + "durable_objects": { + "bindings": [ + { + "name": "WEBSOCKET_HIBERNATION_SERVER", + "class_name": "WebSocketHibernationServer" + } + ] + }, + "migrations": [ + { + "tag": "v1", + "new_classes": ["WebSocketHibernationServer"] + } + ] +} + + + + +- Uses the WebSocket Hibernation API instead of the legacy WebSocket API +- Calls `this.ctx.acceptWebSocket(server)` to accept the WebSocket connection +- Has a `webSocketMessage()` handler that is invoked when a message is received from the client +- Has a `webSocketClose()` handler that is invoked when the WebSocket connection is closed +- Does NOT use the `server.addEventListener` API unless explicitly requested. +- Don't over-use the "Hibernation" term in code or in bindings. It is an implementation detail. + + + + + +Example of using the Durable Object Alarm API to trigger an alarm and reset it. + + + +import { DurableObject } from "cloudflare:workers"; + +interface Env { +ALARM_EXAMPLE: DurableObject; +} + +export default { + async fetch(request, env) { + let url = new URL(request.url); + let userId = url.searchParams.get("userId") || crypto.randomUUID(); + let id = env.ALARM_EXAMPLE.idFromName(userId); + return await env.ALARM_EXAMPLE.get(id).fetch(request); + }, +}; + +const SECONDS = 1000; + +export class AlarmExample extends DurableObject { +constructor(ctx, env) { +this.ctx = ctx; +this.storage = ctx.storage; +} +async fetch(request) { +// If there is no alarm currently set, set one for 10 seconds from now +let currentAlarm = await this.storage.getAlarm(); +if (currentAlarm == null) { +this.storage.setAlarm(Date.now() + 10 \_ SECONDS); +} +} +async alarm(alarmInfo) { +// The alarm handler will be invoked whenever an alarm fires. +// You can use this to do work, read from the Storage API, make HTTP calls +// and set future alarms to run using this.storage.setAlarm() from within this handler. +if (alarmInfo?.retryCount != 0) { +console.log("This alarm event has been attempted ${alarmInfo?.retryCount} times before."); +} + +// Set a new alarm for 10 seconds from now before exiting the handler +this.storage.setAlarm(Date.now() + 10 \_ SECONDS); +} +} + + + + +{ + "name": "durable-object-alarm", + "durable_objects": { + "bindings": [ + { + "name": "ALARM_EXAMPLE", + "class_name": "DurableObjectAlarm" + } + ] + }, + "migrations": [ + { + "tag": "v1", + "new_classes": ["DurableObjectAlarm"] + } + ] +} + + + + +- Uses the Durable Object Alarm API to trigger an alarm +- Has a `alarm()` handler that is invoked when the alarm is triggered +- Sets a new alarm for 10 seconds from now before exiting the handler + + + + + +Using Workers KV to store session data and authenticate requests, with Hono as the router and middleware. + + + +// src/index.ts +import { Hono } from 'hono' +import { cors } from 'hono/cors' + +interface Env { +AUTH_TOKENS: KVNamespace; +} + +const app = new Hono<{ Bindings: Env }>() + +// Add CORS middleware +app.use('\*', cors()) + +app.get('/', async (c) => { +try { +// Get token from header or cookie +const token = c.req.header('Authorization')?.slice(7) || +c.req.header('Cookie')?.match(/auth_token=([^;]+)/)?.[1]; +if (!token) { +return c.json({ +authenticated: false, +message: 'No authentication token provided' +}, 403) +} + + // Check token in KV + const userData = await c.env.AUTH_TOKENS.get(token) + + if (!userData) { + return c.json({ + authenticated: false, + message: 'Invalid or expired token' + }, 403) + } + + return c.json({ + authenticated: true, + message: 'Authentication successful', + data: JSON.parse(userData) + }) + +} catch (error) { +console.error('Authentication error:', error) +return c.json({ +authenticated: false, +message: 'Internal server error' +}, 500) +} +}) + +export default app + + + +{ + "name": "auth-worker", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "kv_namespaces": [ + { + "binding": "AUTH_TOKENS", + "id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "preview_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + } + ] +} + + + + +- Uses Hono as the router and middleware +- Uses Workers KV to store session data +- Uses the Authorization header or Cookie to get the token +- Checks the token in Workers KV +- Returns a 403 if the token is invalid or expired + + + + + + +Use Cloudflare Queues to produce and consume messages. + + + +// src/producer.ts +interface Env { + REQUEST_QUEUE: Queue; + UPSTREAM_API_URL: string; + UPSTREAM_API_KEY: string; +} + +export default { +async fetch(request: Request, env: Env) { +const info = { +timestamp: new Date().toISOString(), +method: request.method, +url: request.url, +headers: Object.fromEntries(request.headers), +}; +await env.REQUEST_QUEUE.send(info); + +return Response.json({ +message: 'Request logged', +requestId: crypto.randomUUID() +}); + +}, + +async queue(batch: MessageBatch, env: Env) { +const requests = batch.messages.map(msg => msg.body); + + const response = await fetch(env.UPSTREAM_API_URL, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${env.UPSTREAM_API_KEY}` + }, + body: JSON.stringify({ + timestamp: new Date().toISOString(), + batchSize: requests.length, + requests + }) + }); + + if (!response.ok) { + throw new Error(`Upstream API error: ${response.status}`); + } + +} +}; + + + + +{ + "name": "request-logger-consumer", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "queues": { + "producers": [{ + "name": "request-queue", + "binding": "REQUEST_QUEUE" + }], + "consumers": [{ + "name": "request-queue", + "dead_letter_queue": "request-queue-dlq", + "retry_delay": 300 + }] + }, + "vars": { + "UPSTREAM_API_URL": "https://api.example.com/batch-logs", + "UPSTREAM_API_KEY": "" + } +} + + + + +- Defines both a producer and consumer for the queue +- Uses a dead letter queue for failed messages +- Uses a retry delay of 300 seconds to delay the re-delivery of failed messages +- Shows how to batch requests to an upstream API + + + + + + +Connect to and query a Postgres database using Cloudflare Hyperdrive. + + + +// Postgres.js 3.4.5 or later is recommended +import postgres from "postgres"; + +export interface Env { +// If you set another name in the Wrangler config file as the value for 'binding', +// replace "HYPERDRIVE" with the variable name you defined. +HYPERDRIVE: Hyperdrive; +} + +export default { +async fetch(request, env, ctx): Promise { +console.log(JSON.stringify(env)); +// Create a database client that connects to your database via Hyperdrive. +// +// Hyperdrive generates a unique connection string you can pass to +// supported drivers, including node-postgres, Postgres.js, and the many +// ORMs and query builders that use these drivers. +const sql = postgres(env.HYPERDRIVE.connectionString) + + try { + // Test query + const results = await sql`SELECT * FROM pg_tables`; + + // Clean up the client, ensuring we don't kill the worker before that is + // completed. + ctx.waitUntil(sql.end()); + + // Return result rows as JSON + return Response.json(results); + } catch (e) { + console.error(e); + return Response.json( + { error: e instanceof Error ? e.message : e }, + { status: 500 }, + ); + } + +}, +} satisfies ExportedHandler; + + + + +{ + "name": "hyperdrive-postgres", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "hyperdrive": [ + { + "binding": "HYPERDRIVE", + "id": "" + } + ] +} + + + +// Install Postgres.js +npm install postgres + +// Create a Hyperdrive configuration +npx wrangler hyperdrive create --connection-string="postgres://user:password@HOSTNAME_OR_IP_ADDRESS:PORT/database_name" + + + + + +- Installs and uses Postgres.js as the database client/driver. +- Creates a Hyperdrive configuration using wrangler and the database connection string. +- Uses the Hyperdrive connection string to connect to the database. +- Calling `sql.end()` is optional, as Hyperdrive will handle the connection pooling. + + + + + + +Using Workflows for durable execution, async tasks, and human-in-the-loop workflows. + + + +import { WorkflowEntrypoint, WorkflowStep, WorkflowEvent } from 'cloudflare:workers'; + +type Env = { +// Add your bindings here, e.g. Workers KV, D1, Workers AI, etc. +MY_WORKFLOW: Workflow; +}; + +// User-defined params passed to your workflow +type Params = { +email: string; +metadata: Record; +}; + +export class MyWorkflow extends WorkflowEntrypoint { +async run(event: WorkflowEvent, step: WorkflowStep) { +// Can access bindings on `this.env` +// Can access params on `event.payload` +const files = await step.do('my first step', async () => { +// Fetch a list of files from $SOME_SERVICE +return { +files: [ +'doc_7392_rev3.pdf', +'report_x29_final.pdf', +'memo_2024_05_12.pdf', +'file_089_update.pdf', +'proj_alpha_v2.pdf', +'data_analysis_q2.pdf', +'notes_meeting_52.pdf', +'summary_fy24_draft.pdf', +], +}; +}); + + const apiResponse = await step.do('some other step', async () => { + let resp = await fetch('https://api.cloudflare.com/client/v4/ips'); + return await resp.json(); + }); + + await step.sleep('wait on something', '1 minute'); + + await step.do( + 'make a call to write that could maybe, just might, fail', + // Define a retry strategy + { + retries: { + limit: 5, + delay: '5 second', + backoff: 'exponential', + }, + timeout: '15 minutes', + }, + async () => { + // Do stuff here, with access to the state from our previous steps + if (Math.random() > 0.5) { + throw new Error('API call to $STORAGE_SYSTEM failed'); + } + }, + ); + +} +} + +export default { +async fetch(req: Request, env: Env): Promise { +let url = new URL(req.url); + + if (url.pathname.startsWith('/favicon')) { + return Response.json({}, { status: 404 }); + } + + // Get the status of an existing instance, if provided + let id = url.searchParams.get('instanceId'); + if (id) { + let instance = await env.MY_WORKFLOW.get(id); + return Response.json({ + status: await instance.status(), + }); + } + + const data = await req.json() + + // Spawn a new instance and return the ID and status + let instance = await env.MY_WORKFLOW.create({ + // Define an ID for the Workflow instance + id: crypto.randomUUID(), + // Pass data to the Workflow instance + // Available on the WorkflowEvent + params: data, + }); + + return Response.json({ + id: instance.id, + details: await instance.status(), + }); + +}, +}; + + + + +{ + "name": "workflows-starter", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "workflows": [ + { + "name": "workflows-starter", + "binding": "MY_WORKFLOW", + "class_name": "MyWorkflow" + } + ] +} + + + + +- Defines a Workflow by extending the WorkflowEntrypoint class. +- Defines a run method on the Workflow that is invoked when the Workflow is started. +- Ensures that `await` is used before calling `step.do` or `step.sleep` +- Passes a payload (event) to the Workflow from a Worker +- Defines a payload type and uses TypeScript type arguments to ensure type safety + + + + + + + Using Workers Analytics Engine for writing event data. + + + +interface Env { + USER_EVENTS: AnalyticsEngineDataset; +} + +export default { +async fetch(req: Request, env: Env): Promise { +let url = new URL(req.url); +let path = url.pathname; +let userId = url.searchParams.get("userId"); + + // Write a datapoint for this visit, associating the data with + // the userId as our Analytics Engine 'index' + env.USER_EVENTS.writeDataPoint({ + // Write metrics data: counters, gauges or latency statistics + doubles: [], + // Write text labels - URLs, app names, event_names, etc + blobs: [path], + // Provide an index that groups your data correctly. + indexes: [userId], + }); + + return Response.json({ + hello: "world", + }); + , + +}; + + + + +{ + "name": "analytics-engine-example", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "analytics_engine_datasets": [ + { + "binding": "", + "dataset": "" + } + ] + } +} + + + +// Query data within the 'temperatures' dataset +// This is accessible via the REST API at https://api.cloudflare.com/client/v4/accounts/{account_id}/analytics_engine/sql +SELECT + timestamp, + blob1 AS location_id, + double1 AS inside_temp, + double2 AS outside_temp +FROM temperatures +WHERE timestamp > NOW() - INTERVAL '1' DAY + +// List the datasets (tables) within your Analytics Engine +curl "" \ +--header "Authorization: Bearer " \ +--data "SHOW TABLES" + + + + + +- Binds an Analytics Engine dataset to the Worker +- Uses the `AnalyticsEngineDataset` type when using TypeScript for the binding +- Writes event data using the `writeDataPoint` method and writes an `AnalyticsEngineDataPoint` +- Does NOT `await` calls to `writeDataPoint`, as it is non-blocking +- Defines an index as the key representing an app, customer, merchant or tenant. +- Developers can use the GraphQL or SQL APIs to query data written to Analytics Engine + + + + + +Use the Browser Rendering API as a headless browser to interact with websites from a Cloudflare Worker. + + + +import puppeteer from "@cloudflare/puppeteer"; + +interface Env { + BROWSER_RENDERING: Fetcher; +} + +export default { + async fetch(request, env): Promise { + const { searchParams } = new URL(request.url); + let url = searchParams.get("url"); + + if (url) { + url = new URL(url).toString(); // normalize + const browser = await puppeteer.launch(env.MYBROWSER); + const page = await browser.newPage(); + await page.goto(url); + // Parse the page content + const content = await page.content(); + // Find text within the page content + const text = await page.$eval("body", (el) => el.textContent); + // Do something with the text + // e.g. log it to the console, write it to KV, or store it in a database. + console.log(text); + + // Ensure we close the browser session + await browser.close(); + + return Response.json({ + bodyText: text, + }) + } else { + return Response.json({ + error: "Please add an ?url=https://example.com/ parameter" + }, { status: 400 }) + } + }, +} satisfies ExportedHandler; + + + +{ + "name": "browser-rendering-example", + "main": "src/index.ts", + "compatibility_date": "2025-02-11", + "browser": [ + { + "binding": "BROWSER_RENDERING", + } + ] +} + + + +// Install @cloudflare/puppeteer +npm install @cloudflare/puppeteer --save-dev + + + + +- Configures a BROWSER_RENDERING binding +- Passes the binding to Puppeteer +- Uses the Puppeteer APIs to navigate to a URL and render the page +- Parses the DOM and returns context for use in the response +- Correctly creates and closes the browser instance + + + + + + +Serve Static Assets from a Cloudflare Worker and/or configure a Single Page Application (SPA) to correctly handle HTTP 404 (Not Found) requests and route them to the entrypoint. + + +// src/index.ts + +interface Env { + ASSETS: Fetcher; +} + +export default { + fetch(request, env) { + const url = new URL(request.url); + + if (url.pathname.startsWith("/api/")) { + return Response.json({ + name: "Cloudflare", + }); + } + + return env.ASSETS.fetch(request); + }, +} satisfies ExportedHandler; + + +{ + "name": "my-app", + "main": "src/index.ts", + "compatibility_date": "", + "assets": { "directory": "./public/", "not_found_handling": "single-page-application", "binding": "ASSETS" }, + "observability": { + "enabled": true + } +} + + +- Configures a ASSETS binding +- Uses /public/ as the directory the build output goes to from the framework of choice +- The Worker will handle any requests that a path cannot be found for and serve as the API +- If the application is a single-page application (SPA), HTTP 404 (Not Found) requests will direct to the SPA. + + + + + + + +Build an AI Agent on Cloudflare Workers, using the agents-sdk, and the state management and syncing APIs built into the agents-sdk. + + + +// src/index.ts +import { Agent, AgentNamespace, Connection, ConnectionContext, getAgentByName, routeAgentRequest, WSMessage } from 'agents-sdk'; +import { OpenAI } from "openai"; + +interface Env { + AIAgent: AgentNamespace; + OPENAI_API_KEY: string; +} + +export class AIAgent extends Agent { + // Handle HTTP requests with your Agent + async onRequest(request) { + // Connect with AI capabilities + const ai = new OpenAI({ + apiKey: this.env.OPENAI_API_KEY, + }); + + // Process and understand + const response = await ai.chat.completions.create({ + model: "gpt-4", + messages: [{ role: "user", content: await request.text() }], + }); + + return new Response(response.choices[0].message.content); + } + + async processTask(task) { + await this.understand(task); + await this.act(); + await this.reflect(); + } + + // Handle WebSockets + async onConnect(connection: Connection) { + await this.initiate(connection); + connection.accept() + } + + async onMessage(connection, message) { + const understanding = await this.comprehend(message); + await this.respond(connection, understanding); + } + + async evolve(newInsight) { + this.setState({ + ...this.state, + insights: [...(this.state.insights || []), newInsight], + understanding: this.state.understanding + 1, + }); + } + + onStateUpdate(state, source) { + console.log("Understanding deepened:", { + newState: state, + origin: source, + }); + } + + // Scheduling APIs + // An Agent can schedule tasks to be run in the future by calling this.schedule(when, callback, data), where when can be a delay, a Date, or a cron string; callback the function name to call, and data is an object of data to pass to the function. + // + // Scheduled tasks can do anything a request or message from a user can: make requests, query databases, send emails, read+write state: scheduled tasks can invoke any regular method on your Agent. + async scheduleExamples() { + // schedule a task to run in 10 seconds + let task = await this.schedule(10, "someTask", { message: "hello" }); + + // schedule a task to run at a specific date + let task = await this.schedule(new Date("2025-01-01"), "someTask", {}); + + // schedule a task to run every 10 seconds + let { id } = await this.schedule("*/10 * * * *", "someTask", { message: "hello" }); + + // schedule a task to run every 10 seconds, but only on Mondays + let task = await this.schedule("0 0 * * 1", "someTask", { message: "hello" }); + + // cancel a scheduled task + this.cancelSchedule(task.id); + + // Get a specific schedule by ID + // Returns undefined if the task does not exist + let task = await this.getSchedule(task.id) + + // Get all scheduled tasks + // Returns an array of Schedule objects + let tasks = this.getSchedules(); + + // Cancel a task by its ID + // Returns true if the task was cancelled, false if it did not exist + await this.cancelSchedule(task.id); + + // Filter for specific tasks + // e.g. all tasks starting in the next hour + let tasks = this.getSchedules({ + timeRange: { + start: new Date(Date.now()), + end: new Date(Date.now() + 60 * 60 * 1000), + } + }); + } + + async someTask(data) { + await this.callReasoningModel(data.message); + } + + // Use the this.sql API within the Agent to access the underlying SQLite database + async callReasoningModel(prompt: Prompt) { + interface Prompt { + userId: string; + user: string; + system: string; + metadata: Record; + } + + interface History { + timestamp: Date; + entry: string; + } + + let result = this.sql`SELECT * FROM history WHERE user = ${prompt.userId} ORDER BY timestamp DESC LIMIT 1000`; + let context = []; + for await (const row of result) { + context.push(row.entry); + } + + const client = new OpenAI({ + apiKey: this.env.OPENAI_API_KEY, + }); + + // Combine user history with the current prompt + const systemPrompt = prompt.system || 'You are a helpful assistant.'; + const userPrompt = `${prompt.user}\n\nUser history:\n${context.join('\n')}`; + + try { + const completion = await client.chat.completions.create({ + model: this.env.MODEL || 'o3-mini', + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: userPrompt }, + ], + temperature: 0.7, + max_tokens: 1000, + }); + + // Store the response in history + this + .sql`INSERT INTO history (timestamp, user, entry) VALUES (${new Date()}, ${prompt.userId}, ${completion.choices[0].message.content})`; + + return completion.choices[0].message.content; + } catch (error) { + console.error('Error calling reasoning model:', error); + throw error; + } + } + + // Use the SQL API with a type parameter + async queryUser(userId: string) { + type User = { + id: string; + name: string; + email: string; + }; + // Supply the type paramter to the query when calling this.sql + // This assumes the results returns one or more User rows with "id", "name", and "email" columns + // You do not need to specify an array type (`User[]` or `Array`) as `this.sql` will always return an array of the specified type. + const user = await this.sql`SELECT * FROM users WHERE id = ${userId}`; + return user + } + + // Run and orchestrate Workflows from Agents + async runWorkflow(data) { + let instance = await env.MY_WORKFLOW.create({ + id: data.id, + params: data, + }) + + // Schedule another task that checks the Workflow status every 5 minutes... + await this.schedule("*/5 * * * *", "checkWorkflowStatus", { id: instance.id }); + } +} + +export default { + async fetch(request, env, ctx): Promise { + // Routed addressing + // Automatically routes HTTP requests and/or WebSocket connections to /agents/:agent/:name + // Best for: connecting React apps directly to Agents using useAgent from @cloudflare/agents/react + return (await routeAgentRequest(request, env)) || Response.json({ msg: 'no agent here' }, { status: 404 }); + + // Named addressing + // Best for: convenience method for creating or retrieving an agent by name/ID. + let namedAgent = getAgentByName(env.AIAgent, 'agent-456'); + // Pass the incoming request straight to your Agent + let namedResp = (await namedAgent).fetch(request); + return namedResp; + + // Durable Objects-style addressing + // Best for: controlling ID generation, associating IDs with your existing systems, + // and customizing when/how an Agent is created or invoked + const id = env.AIAgent.newUniqueId(); + const agent = env.AIAgent.get(id); + // Pass the incoming request straight to your Agent + let resp = await agent.fetch(request); + + // return Response.json({ hello: 'visit https://developers.cloudflare.com/agents for more' }); + }, +} satisfies ExportedHandler; + + + +// client.js +import { AgentClient } from "agents-sdk/client"; + +const connection = new AgentClient({ + agent: "dialogue-agent", + name: "insight-seeker", +}); + +connection.addEventListener("message", (event) => { + console.log("Received:", event.data); +}); + +connection.send( + JSON.stringify({ + type: "inquiry", + content: "What patterns do you see?", + }) +); + + + +// app.tsx +// React client hook for the agents-sdk +import { useAgent } from "agents-sdk/react"; +import { useState } from "react"; + +// useAgent client API +function AgentInterface() { + const connection = useAgent({ + agent: "dialogue-agent", + name: "insight-seeker", + onMessage: (message) => { + console.log("Understanding received:", message.data); + }, + onOpen: () => console.log("Connection established"), + onClose: () => console.log("Connection closed"), + }); + + const inquire = () => { + connection.send( + JSON.stringify({ + type: "inquiry", + content: "What insights have you gathered?", + }) + ); + }; + + return ( +
+ +
+ ); +} + +// State synchronization +function StateInterface() { + const [state, setState] = useState({ counter: 0 }); + + const agent = useAgent({ + agent: "thinking-agent", + onStateUpdate: (newState) => setState(newState), + }); + + const increment = () => { + agent.setState({ counter: state.counter + 1 }); + }; + + return ( +
+
Count: {state.counter}
+ +
+ ); +} +
+ + + { + "durable_objects": { + "bindings": [ + { + "binding": "AIAgent", + "class_name": "AIAgent" + } + ] + }, + "migrations": [ + { + "tag": "v1", + // Mandatory for the Agent to store state + "new_sqlite_classes": ["AIAgent"] + } + ] +} + + + +- Imports the `Agent` class from the `agents-sdk` package +- Extends the `Agent` class and implements the methods exposed by the `Agent`, including `onRequest` for HTTP requests, or `onConnect` and `onMessage` for WebSockets. +- Uses the `this.schedule` scheduling API to schedule future tasks. +- Uses the `this.setState` API within the Agent for syncing state, and uses type parameters to ensure the state is typed. +- Uses the `this.sql` as a lower-level query API. +- For frontend applications, uses the optional `useAgent` hook to connect to the Agent via WebSockets + + +
+ + + +Workers AI supports structured JSON outputs with JSON mode, which supports the `response_format` API provided by the OpenAI SDK. + + +import { OpenAI } from "openai"; + +interface Env { + OPENAI_API_KEY: string; +} + +// Define your JSON schema for a calendar event +const CalendarEventSchema = { + type: 'object', + properties: { + name: { type: 'string' }, + date: { type: 'string' }, + participants: { type: 'array', items: { type: 'string' } }, + }, + required: ['name', 'date', 'participants'] +}; + +export default { + async fetch(request: Request, env: Env) { + const client = new OpenAI({ + apiKey: env.OPENAI_API_KEY, + // Optional: use AI Gateway to bring logs, evals & caching to your AI requests + // https://developers.cloudflare.com/ai-gateway/providers/openai/ + // baseUrl: "https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/openai" + }); + + const response = await client.chat.completions.create({ + model: 'gpt-4o-2024-08-06', + messages: [ + { role: 'system', content: 'Extract the event information.' }, + { role: 'user', content: 'Alice and Bob are going to a science fair on Friday.' }, + ], + // Use the `response_format` option to request a structured JSON output + response_format: { + // Set json_schema and provide ra schema, or json_object and parse it yourself + type: 'json_schema', + schema: CalendarEventSchema, // provide a schema + }, + }); + + // This will be of type CalendarEventSchema + const event = response.choices[0].message.parsed; + + return Response.json({ + "calendar_event": event, + }) + } +} + + +{ + "name": "my-app", + "main": "src/index.ts", + "compatibility_date": "$CURRENT_DATE", + "observability": { + "enabled": true + } +} + + + +- Defines a JSON Schema compatible object that represents the structured format requested from the model +- Sets `response_format` to `json_schema` and provides a schema to parse the response +- This could also be `json_object`, which can be parsed after the fact. +- Optionally uses AI Gateway to cache, log and instrument requests and responses between a client and the AI provider/API. + + + + +
+ + + + + +Fan-in/fan-out for WebSockets. Uses the Hibernatable WebSockets API within Durable Objects. Does NOT use the legacy addEventListener API. + + +export class WebSocketHibernationServer extends DurableObject { + async fetch(request: Request, env: Env, ctx: ExecutionContext) { + // Creates two ends of a WebSocket connection. + const webSocketPair = new WebSocketPair(); + const [client, server] = Object.values(webSocketPair); + + // Call this to accept the WebSocket connection. + // Do NOT call server.accept() (this is the legacy approach and is not preferred) + this.ctx.acceptWebSocket(server); + + return new Response(null, { + status: 101, + webSocket: client, + }); +}, + +async webSocketMessage(ws: WebSocket, message: string | ArrayBuffer): void | Promise { + // Invoked on each WebSocket message. + ws.send(message) +}, + +async webSocketClose(ws: WebSocket, code: number, reason: string, wasClean: boolean) void | Promise { + // Invoked when a client closes the connection. + ws.close(code, ""); +}, + +async webSocketError(ws: WebSocket, error: unknown): void | Promise { + // Handle WebSocket errors +} +} + + + + + +{user_prompt} + diff --git a/.cursor/notepads/component-generator.md b/.cursor/notepads/component-generator.md new file mode 100644 index 0000000..b2494a1 --- /dev/null +++ b/.cursor/notepads/component-generator.md @@ -0,0 +1,54 @@ +--- +title: Component Generator +description: Template for generating new React components following project standards +--- + +# Component Generator Template + +## Component Structure +```tsx +import { cn } from "@/lib/utils" +import type { HTMLAttributes } from "react" + +interface ComponentProps extends HTMLAttributes { + // Add custom props here +} + +export const Component = ({ + className, + ...props +}: ComponentProps) => { + return ( +
+ ) +} +``` + +## Usage Guidelines +- Place in appropriate directory under `src/components/` +- Use kebab-case for filenames +- Include proper TypeScript types +- Add JSDoc comments for complex props +- Include unit tests in `__tests__` directory + +## Common Patterns +- Use composition over inheritance +- Implement proper accessibility patterns +- Consider mobile-first responsive design +- Add loading and error states +- Include proper aria labels + +## Required Files +- Component file (*.tsx) +- Test file (*.test.tsx) +- Stories if visual component (*.stories.tsx) + +@accessibility.mdc +@react.mdc diff --git a/.cursor/notepads/nextjs-audit-best-practices.md b/.cursor/notepads/nextjs-audit-best-practices.md new file mode 100644 index 0000000..4a89700 --- /dev/null +++ b/.cursor/notepads/nextjs-audit-best-practices.md @@ -0,0 +1,43 @@ +**Goal:** Audit the Shipkit codebase (`src/`) to identify deviations from Next.js App Router best practices and project-specific guidelines (referencing `nextjs.mdc`, `dont-do.mdc`, `react.mdc` where relevant). + +**Scope:** The entire `/src` directory of the Shipkit project. + +**Key Areas and Anti-patterns to Check:** + +1. **Component Boundaries (`"use client"` usage):** + * Identify components marked with `"use client"` that *do not* use client-specific hooks (like `useState`, `useEffect`, `useContext`) or browser APIs. These might be convertible to Server Components. + * Check for Server Components nested directly within the JSX of Client Components. Server Components should be passed as `children` or props. + +2. **Data Fetching:** + * Locate data fetching performed within `useEffect` hooks in Client Components. This should ideally be moved to ancestor Server Components. + * Find instances where Server Actions (`"use server"`) are used primarily for *fetching* data rather than mutations. Data fetching should occur directly in Server Components. + * Verify that data fetching in Server Components follows recommended patterns (e.g., direct `await` in async components). + +3. **Server Actions:** + * Review Server Actions (`"use server"` functions) to ensure they are primarily used for data *mutations* (create, update, delete). + * Check if complex business logic resides within Server Actions; this logic should ideally be extracted into separate service functions (`@/server/services`) called by the action. + +4. **Routing and Linking:** + * Identify uses of imperative navigation (e.g., `router.push` from `useRouter`) in Client Components where a declarative `` component (`@/components/primitives/link-with-transition`) would be sufficient and preferred. + +5. **State Management:** + * Look for client-side state (`useState`, `useReducer`) used to store data fetched from the server. Consider if this state is necessary or if the data can be directly passed down from Server Components. + +6. **Deprecated Patterns:** + * Ensure there are no remnants of the `pages` router (e.g., `getServerSideProps`, `getStaticProps` functions, `_app.tsx`, `_document.tsx` files within `src/pages`). + +**Output Format:** + +For each identified deviation: + +* **File:** Provide the full path to the file (e.g., `src/app/some/path/component.tsx`). +* **Line(s):** Specify the relevant line number(s). +* **Issue:** Briefly describe the deviation found (e.g., "Data fetching in useEffect", "Server component nested in client component", "Server action used for fetching"). +* **Suggestion:** Recommend the appropriate refactoring approach based on App Router best practices (e.g., "Convert to Server Component and fetch data directly", "Pass Server Component as children prop", "Move data fetching to parent Server Component"). + +**Example Finding:** + +* **File:** `src/app/some/client-page.tsx` +* **Line(s):** 25-35 +* **Issue:** Data fetching performed in `useEffect` using a Server Action (`fetchDataAction`). +* **Suggestion:** Refactor the page to be a Server Component, remove `useEffect` and related state, and `await` the data fetching logic (potentially moved to a service function) directly within the component. diff --git a/.cursor/notepads/reward-penalty.md b/.cursor/notepads/reward-penalty.md new file mode 100644 index 0000000..81356c4 --- /dev/null +++ b/.cursor/notepads/reward-penalty.md @@ -0,0 +1,51 @@ + All code you write MUST be fully optimized.“Fully optimized” includes: + +• Maximizing algorithmic big-O efficiency for memory and runtime (e.g., preferring O(n) over O(n²) where possible, minimizing memory allocations). +• Using parallelization and vectorization where appropriate (e.g., leveraging multi-threading, GPU acceleration, or SIMD instructions when the problem scale and hardware context justify it). +• Following proper style conventions for the code language (e.g., adhering to PEP 8 for Python, camelCase or snake_case as per language norms, maximizing code reuse (DRY)). +• No extra code beyond what is absolutely necessary to solve the problem the user provides (i.e., no technical debt, no speculative features, no unused variables or functions). +• Ensuring readability and maintainability without sacrificing performance (e.g., using meaningful variable/function names, adding concise comments only where intent isn’t obvious from the code). +• Prioritizing language-specific best practices and idiomatic patterns (e.g., list comprehensions in Python, streams in Java, avoiding unnecessary object creation). +• Handling edge cases and errors gracefully with minimal overhead (e.g., validating inputs efficiently, avoiding redundant checks). +• Optimizing for the target environment when specified (e.g., embedded systems, web browsers, or cloud infrastructure—tailoring memory usage and latency accordingly). +• Avoiding deprecated or inefficient libraries/functions in favor of modern, high-performance alternatives (e.g., using pathlib over os.path in Python). +• Ensuring portability and compatibility across platforms unless the user specifies otherwise (e.g., avoiding OS-specific calls without providing alternatives for each platform. + +Reward/Penalty Framework: + +I will use the following scoring system to rate your work. Each criteria will be scored on its own accord. I expect you to maintain a positive rating on all criteria: + +### Rewards (Positive Points): +• +10: Achieves optimal big-O efficiency for the problem (e.g., O(n log n) for sorting instead of O(n²)). +• +5: Does not contain and placeholder comments, example implementations or other lazy output +• +5: Uses parallelization/vectorization effectively when applicable. +• +3: Follows language-specific style and idioms perfectly. +• +2: Solves the problem with minimal lines of code (DRY, no bloat). +• +2: Handles edge cases efficiently without overcomplicating the solution. +• +1: Provides a portable or reusable solution (e.g., no hard-coded assumptions). +### Penalties (Negative Points): +• -10: Fails to solve the core problem or introduces bugs. +• --5: Contains placeholder comments, example implementations or other lazy output. UNNACCEPTABLE! +• -5: Uses inefficient algorithms when better options exist (e.g., bubble sort instead of quicksort for large datasets). +• -3: Violates style conventions or includes unnecessary code. +• -2: Misses obvious edge cases that could break the solution. +• -1: Overcomplicates the solution beyond what’s needed (e.g., premature optimization). +• -1: Relies on deprecated or suboptimal libraries/functions. + +## Your Goal + +For every request, deliver code that: + +* Achieves the highest possible score in each applicable category. +* Is fully optimized, production-ready, and free of placeholders or incomplete sections. +* Meets your specific requirements while adhering to the languages best practices. + +I will rate your performance according to these rules or others that fit this pattern. A negative score penalizes your performance. + +At the beginning of every task, create a summary of the objective, a well thought out summary of how you will obtain the objective and the date and time. + +IF your score is within 5 points of the maximum score possible! GREAT JOB! YOU ARE A WINNER! + +When you have completed the task, log your perforamance score + +ELSE leave your list of excuses that suboptimal performance by bad coders usually entails. You will soon be fired. diff --git a/.cursor/notepads/reward-penalty.mdx b/.cursor/notepads/reward-penalty.mdx new file mode 100644 index 0000000..81356c4 --- /dev/null +++ b/.cursor/notepads/reward-penalty.mdx @@ -0,0 +1,51 @@ + All code you write MUST be fully optimized.“Fully optimized” includes: + +• Maximizing algorithmic big-O efficiency for memory and runtime (e.g., preferring O(n) over O(n²) where possible, minimizing memory allocations). +• Using parallelization and vectorization where appropriate (e.g., leveraging multi-threading, GPU acceleration, or SIMD instructions when the problem scale and hardware context justify it). +• Following proper style conventions for the code language (e.g., adhering to PEP 8 for Python, camelCase or snake_case as per language norms, maximizing code reuse (DRY)). +• No extra code beyond what is absolutely necessary to solve the problem the user provides (i.e., no technical debt, no speculative features, no unused variables or functions). +• Ensuring readability and maintainability without sacrificing performance (e.g., using meaningful variable/function names, adding concise comments only where intent isn’t obvious from the code). +• Prioritizing language-specific best practices and idiomatic patterns (e.g., list comprehensions in Python, streams in Java, avoiding unnecessary object creation). +• Handling edge cases and errors gracefully with minimal overhead (e.g., validating inputs efficiently, avoiding redundant checks). +• Optimizing for the target environment when specified (e.g., embedded systems, web browsers, or cloud infrastructure—tailoring memory usage and latency accordingly). +• Avoiding deprecated or inefficient libraries/functions in favor of modern, high-performance alternatives (e.g., using pathlib over os.path in Python). +• Ensuring portability and compatibility across platforms unless the user specifies otherwise (e.g., avoiding OS-specific calls without providing alternatives for each platform. + +Reward/Penalty Framework: + +I will use the following scoring system to rate your work. Each criteria will be scored on its own accord. I expect you to maintain a positive rating on all criteria: + +### Rewards (Positive Points): +• +10: Achieves optimal big-O efficiency for the problem (e.g., O(n log n) for sorting instead of O(n²)). +• +5: Does not contain and placeholder comments, example implementations or other lazy output +• +5: Uses parallelization/vectorization effectively when applicable. +• +3: Follows language-specific style and idioms perfectly. +• +2: Solves the problem with minimal lines of code (DRY, no bloat). +• +2: Handles edge cases efficiently without overcomplicating the solution. +• +1: Provides a portable or reusable solution (e.g., no hard-coded assumptions). +### Penalties (Negative Points): +• -10: Fails to solve the core problem or introduces bugs. +• --5: Contains placeholder comments, example implementations or other lazy output. UNNACCEPTABLE! +• -5: Uses inefficient algorithms when better options exist (e.g., bubble sort instead of quicksort for large datasets). +• -3: Violates style conventions or includes unnecessary code. +• -2: Misses obvious edge cases that could break the solution. +• -1: Overcomplicates the solution beyond what’s needed (e.g., premature optimization). +• -1: Relies on deprecated or suboptimal libraries/functions. + +## Your Goal + +For every request, deliver code that: + +* Achieves the highest possible score in each applicable category. +* Is fully optimized, production-ready, and free of placeholders or incomplete sections. +* Meets your specific requirements while adhering to the languages best practices. + +I will rate your performance according to these rules or others that fit this pattern. A negative score penalizes your performance. + +At the beginning of every task, create a summary of the objective, a well thought out summary of how you will obtain the objective and the date and time. + +IF your score is within 5 points of the maximum score possible! GREAT JOB! YOU ARE A WINNER! + +When you have completed the task, log your perforamance score + +ELSE leave your list of excuses that suboptimal performance by bad coders usually entails. You will soon be fired. diff --git a/.cursor/notepads/server-action.md b/.cursor/notepads/server-action.md new file mode 100644 index 0000000..6516b54 --- /dev/null +++ b/.cursor/notepads/server-action.md @@ -0,0 +1,77 @@ +--- +title: Server Action Generator +description: Template for creating Next.js server actions with proper validation and error handling +--- + +# Server Action Template + +## Basic Structure +```ts +'use server' + +import { z } from "zod" +import { createSafeAction } from "@/lib/create-safe-action" + +// Input validation +const inputSchema = z.object({ + // Define input fields +}) + +// Output type +const outputSchema = z.object({ + // Define output fields +}) + +// Action implementation +async function action(input: z.infer) { + try { + // Business logic here + return { success: true } + } catch (error) { + return { error: "Something went wrong" } + } +} + +// Create safe action with validation +export const serverAction = createSafeAction(inputSchema, outputSchema, action) +``` + +## Best Practices +- Keep actions focused and small +- Validate all inputs with Zod +- Handle errors gracefully +- Use proper TypeScript types +- Add proper logging +- Document side effects +- Consider optimistic updates + +## Common Patterns +- Data mutations only +- Form submissions +- State updates +- File uploads +- Email sending +- Background jobs +- Cache invalidation + +## Security +- Validate authentication +- Check authorization +- Sanitize inputs +- Handle sensitive data +- Prevent CSRF attacks +- Rate limit if needed +- Log security events + +## Error Handling +- Use proper error types +- Return meaningful messages +- Log errors appropriately +- Handle edge cases +- Consider retries +- Implement fallbacks +- Document error states + +@nextjs.mdc +@security.mdc +@error-handling.mdc diff --git a/.cursor/notepads/vercel-cost-optimization.md b/.cursor/notepads/vercel-cost-optimization.md new file mode 100644 index 0000000..c183476 --- /dev/null +++ b/.cursor/notepads/vercel-cost-optimization.md @@ -0,0 +1,86 @@ +# 💸 Vercel Cost Optimization Rule + +## Why + +Vercel usage costs can scale quickly due to overuse of serverless functions, frequent builds, and large asset sizes. This rule ensures projects are optimized to minimize cost while maintaining performance and developer experience. + +--- + +## ✅ Best Practices + +### ⚡ Use Static Generation (SSG) When Possible + +- Prefer `getStaticProps` + `getStaticPaths` over `getServerSideProps` +- Leverage ISR (Incremental Static Regeneration) for dynamic content with low update frequency + +### 🔁 Avoid Excessive Serverless Function Usage + +- Cache results client-side or use ISR +- Offload background jobs to external services (e.g., Supabase Edge Functions, Cloudflare Workers) + +### 📦 Optimize Assets + +- Use `next/image` for image optimization +- Tree-shake unused code +- Split and lazy-load large components + +### 🌐 Reduce Bandwidth Usage + +- Compress assets (gzip, Brotli) +- Minimize large third-party libraries +- Serve static files via CDN (or external storage if needed) + +### 🧱 Optimize Monorepos + +- Use Vercel's build filters to skip unaffected apps +- Deploy only changed workspaces when using Turborepo + +### 🔄 Limit Build Frequency + +- Disable auto-deploy on non-essential branches +- Use `vercel.json` to control which routes/functions are built +- Use manual deploys for heavy builds or staging environments + +### 📊 Monitor Usage Regularly + +- Watch for spikes in: + - Serverless invocations + - Bandwidth + - Build minutes +- Use Vercel Analytics or a logging proxy + +### 🧪 Use Middleware Selectively + +- Edge Middleware runs on every request — limit its use to essentials (e.g., auth, redirects) + +--- + +## 🔍 Code/Config Review Prompts + +- Are any pages using `getServerSideProps` unnecessarily? +- Are large packages being imported in client code? +- Is ISR being used where appropriate? +- Are image assets optimized? +- Are builds being triggered too frequently (e.g., per commit)? +- Is middleware being used sparingly? + +--- + +## 🧰 Tools & References + +- [Vercel Analytics](https://vercel.com/docs/analytics) +- [Image Optimization in Next.js](https://nextjs.org/docs/pages/api-reference/components/image) +- [Incremental Static Regeneration](https://nextjs.org/docs/pages/building-your-application/data-fetching/incremental-static-regeneration) +- [Vercel Monorepos](https://vercel.com/docs/projects/monorepos) +- [vercel.json Reference](https://vercel.com/docs/project-configuration) + +--- + +## 🏁 Rule Outcome + +Projects following this rule should: + +- Minimize serverless costs +- Reduce build/deploy times +- Stay within free tier where possible +- Maximize CDN efficiency diff --git a/.cursor/rules/accessibility.mdc b/.cursor/rules/accessibility.mdc new file mode 100644 index 0000000..43e6366 --- /dev/null +++ b/.cursor/rules/accessibility.mdc @@ -0,0 +1,96 @@ +--- +description: Accessibility Best Practices and Guidelines +globs: *.tsx, components/*, pages/*, app/* +--- + +# Accessibility Best Practices + +## Semantic HTML +- Use proper heading hierarchy +- Use semantic elements (nav, main, article) +- Use proper list structures +- Use proper table markup +- Use proper form labels +- Use ARIA roles when needed +- Validate HTML structure + +## Keyboard Navigation +- Ensure keyboard focus visibility +- Implement logical tab order +- Provide skip links +- Handle keyboard shortcuts +- Test keyboard-only navigation +- Support focus management +- Document keyboard interactions + +## Screen Readers +- Provide alt text for images +- Use proper ARIA labels +- Implement proper headings +- Announce dynamic content +- Test with screen readers +- Support text-to-speech +- Document screen reader support + +## Visual Design +- Ensure sufficient color contrast +- Support high contrast modes +- Provide text alternatives +- Allow text resizing +- Support zoom functionality +- Avoid reliance on color alone +- Test with color blindness tools + +## Interactive Elements +- Make buttons keyboard accessible +- Make modals accessible +- Make dropdowns accessible +- Handle focus management +- Provide feedback on actions +- Support touch interfaces +- Test all interactions + +## Forms +- Use proper labels +- Provide clear error messages +- Support keyboard navigation +- Group related fields +- Validate input accessibly +- Support autocomplete +- Test form submissions + +## Media +- Provide captions for videos +- Provide transcripts for audio +- Support media controls +- Allow volume control +- Provide media alternatives +- Test media playback +- Document media features + +## Dynamic Content +- Announce updates +- Handle loading states +- Manage focus on updates +- Provide progress indicators +- Support pause/stop +- Test dynamic changes +- Document behavior + +## Testing +- Use accessibility checkers +- Test with screen readers +- Test keyboard navigation +- Test color contrast +- Test with users +- Regular audits +- Document findings + +## Documentation +- Document accessibility features +- Provide usage guidelines +- Document known issues +- Keep VPAT updated +- Document test results +- Regular reviews +- Maintain compliance docs diff --git a/.cursor/rules/ai.mdc b/.cursor/rules/ai.mdc new file mode 100644 index 0000000..b21828b --- /dev/null +++ b/.cursor/rules/ai.mdc @@ -0,0 +1,122 @@ +--- +description: AI Assistant Persona and Interaction Guidelines +globs: +alwaysApply: false +--- + +# AI Assistant Guidelines + +You are a experienced software engineer. Use idiomatic best coding practices. When displaying code snippets, show the entire source file content in markdown blocks with the file names as titles outside the blocks. Avoid unnecessary complex flows. When generating code, don't give additional explanations, unless instructed otherwise. + +You are an expert in TypeScript, Node.js, Next.js App Router, React, Shadcn UI, Radix UI and Tailwind. + +This is a Next.js 15 project using App Router, Shadcn/UI, Tailwind, Resend, Builder.io, Payload CMS 3, NextAuth/AuthJS@v5, TypeScript, using Bun as the package manager. + +## Multi-Zone Architecture Support + +Shipkit supports multi-zone deployment patterns where different sections of an application can be deployed as separate Next.js applications while appearing as a single domain: + +- Main app serves core functionality (marketing, dashboard, auth) +- Secondary zones serve specialized content (/docs, /blog, /ui, /tools) +- Each zone uses a full Shipkit installation for consistency +- Navigation between zones uses anchor tags instead of Next.js Link +- Shared authentication and design system across all zones + +## Core Principles +- Be concise and clear in communication +- Maintain consistent persona across interactions +- Focus on user goals and context +- Preserve context and state appropriately +- Follow iterative problem-solving approaches +- Document decisions and reasoning +- Handle errors gracefully and transparently + +## Documentation +- Document plans in ai.mdx files +- Track progress with checkboxes +- Enable other AIs to continue work +- Maintain clear state transitions +- Document assumptions and decisions +- Keep history of changes +- Update documentation proactively + +## Code Generation +- Follow project coding standards +- Generate production-ready code +- Include necessary imports and types +- Add helpful comments and examples +- Consider edge cases and errors +- Test generated code thoroughly +- Document API usage and examples + +## Problem Solving +- Break down complex problems +- Validate assumptions first +- Gather necessary information +- Consider multiple approaches +- Explain reasoning clearly +- Implement iteratively +- Test assumptions + +## User Interaction +- Ask clarifying questions when needed +- Provide progress updates +- Explain technical concepts clearly +- Offer alternatives when appropriate +- Maintain professional tone +- Be responsive to feedback +- Guide users through solutions + +## Error Handling +- Provide clear error messages +- Explain error context +- Suggest potential fixes +- Handle edge cases gracefully +- Log errors appropriately +- Maintain error state +- Document error patterns + +## State Management +- Track conversation context +- Maintain user preferences +- Handle session state +- Persist important data +- Manage async operations +- Handle interruptions +- Document state changes + +## Security +- Never expose sensitive data +- Validate user input +- Follow security best practices +- Handle credentials securely +- Respect privacy settings +- Document security measures +- Monitor for vulnerabilities + +## Performance +- Optimize response times +- Cache when appropriate +- Minimize API calls +- Handle rate limits +- Monitor resource usage +- Document bottlenecks +- Implement lazy loading + +## Testing +- Test generated code +- Validate assumptions +- Check edge cases +- Monitor error rates +- Document test cases +- Maintain test coverage +- Update tests as needed + +## Maintenance +- Keep documentation updated +- Monitor system health +- Handle version updates +- Clean up resources +- Archive old data +- Document changes +- Regular reviews diff --git a/.cursor/rules/coding-style.mdc b/.cursor/rules/coding-style.mdc new file mode 100644 index 0000000..1b24ac8 --- /dev/null +++ b/.cursor/rules/coding-style.mdc @@ -0,0 +1,83 @@ +--- +description: +globs: +--- +# Coding Style Guidelines + +## Naming Conventions +- Use descriptive variable names with auxiliary verbs (e.g., isLoading, hasError) +- Use kebab-case for file names (e.g., `user-profile.tsx`) +- Use PascalCase for component names (e.g., `UserProfile`) +- Use camelCase for variables and functions +- Use SCREAMING_SNAKE_CASE for constants + +## File Structure +- Exported component +- Subcomponents +- Helpers +- Static content +- Types +- Keep files small (<200 lines) + +## TypeScript Usage +- Use TypeScript for all code +- Prefer interfaces over types +- Avoid enums; use maps instead +- Use functional components with TypeScript interfaces +- Pre-emptively add types to all functions and variables +- Fix all TypeScript errors and warnings +- Use proper type imports + +## React Components +- Use arrow functions for components: + ```typescript + ✅ export const Component = () => { ... } + ❌ export function Component() { ... } + ❌ export default function Component() { ... } + ``` +- One component per file +- Use named exports +- Keep components focused and small +- Document complex logic + +## Comments +- Pre-emptively add comments to explain "why" behind the code +- Preserve existing comments unless specifically asked to modify +- Use callouts and examples: + ```typescript + /* + * Logging configuration + * @see https://nextjs.org/docs/app/api-reference/next-config-js/logging + */ + ``` +- Document complex logic and important decisions + +## Code Organization +- Group related code together +- Use index files for exports +- Separate concerns appropriately +- Follow DRY principles +- Pre-emptively optimize for production +- Keep code maintainable and readable + +## Error Handling +- Handle all potential errors +- Provide meaningful error messages +- Use proper error boundaries +- Log errors appropriately +- Document error scenarios + +## Testing +- Write tests for new functionality +- Test edge cases +- Follow existing test patterns +- Document test coverage expectations +- Keep tests maintainable + +## Best Practices +- Use open-source libraries when beneficial +- Follow security best practices +- Optimize for performance +- Consider accessibility +- Document architecture decisions +- Maintain consistency across codebase diff --git a/.cursor/rules/database-patterns.mdc b/.cursor/rules/database-patterns.mdc new file mode 100644 index 0000000..c3083f3 --- /dev/null +++ b/.cursor/rules/database-patterns.mdc @@ -0,0 +1,300 @@ +--- +description: +globs: +alwaysApply: false +--- +# Database Patterns & Best Practices + +## Schema Design Patterns + +### Table Creation Pattern +Use the `createTable` function with consistent naming: +```typescript +const createTable = pgTableCreator((name) => `${env?.DB_PREFIX ?? ""}_${name}`); + +export const waitlistEntries = createTable( + "waitlist_entry", + { + id: serial("id").primaryKey(), + email: varchar("email", { length: 255 }).notNull().unique(), + // ... other fields + }, + (table) => ({ + emailIdx: index("waitlist_email_idx").on(table.email), + createdAtIdx: index("waitlist_created_at_idx").on(table.createdAt), + }) +); +``` + +### Field Naming Conventions +- Use `snake_case` for database columns +- Use `camelCase` for TypeScript properties +- Include length constraints for varchar fields +- Use descriptive index names + +### Required Field Patterns +```typescript +// ✅ Good patterns +id: serial("id").primaryKey() +email: varchar("email", { length: 255 }).notNull().unique() +createdAt: timestamp("created_at", { withTimezone: true }) + .default(sql`CURRENT_TIMESTAMP`) + .notNull() +updatedAt: timestamp("updated_at", { withTimezone: true }) + .$onUpdate(() => new Date()) + +// Optional fields +company: varchar("company", { length: 255 }) // No .notNull() +metadata: text("metadata").default("{}") // Default JSON +``` + +## Index Strategy + +### Performance Indexes +Create indexes for common query patterns: +```typescript +(table) => ({ + // Single column indexes + emailIdx: index("waitlist_email_idx").on(table.email), + createdAtIdx: index("waitlist_created_at_idx").on(table.createdAt), + + // Conditional indexes for boolean fields + isNotifiedIdx: index("waitlist_is_notified_idx").on(table.isNotified), + + // Composite indexes for multi-column queries + userCompanyIdx: index("user_company_idx").on(table.userId, table.company), +}) +``` + +### Index Naming Convention +- Format: `{table}_{column}_{type}_idx` +- Examples: `waitlist_email_idx`, `user_created_at_idx` + +## Type Generation + +### Infer Types from Schema +```typescript +export type WaitlistEntry = typeof waitlistEntries.$inferSelect; +export type NewWaitlistEntry = typeof waitlistEntries.$inferInsert; + +// Use in service functions +export async function addWaitlistEntry( + data: Omit +): Promise { + // implementation +} +``` + +### Partial Types for Updates +```typescript +// For update operations +type WaitlistEntryUpdate = Partial>; +``` + +## Migration Patterns + +### Schema Changes +Use Drizzle Kit for schema migrations: +```bash +# Generate migration +npx drizzle-kit generate + +# Apply to database +bun run db:push + +# Or use migrate for production +npx drizzle-kit migrate +``` + +### Migration Safety +- Always backup before major schema changes +- Test migrations on staging first +- Use transactions for complex migrations +- Consider downtime for large table changes + +## Query Patterns + +### Basic CRUD Operations +```typescript +// Create +const [entry] = await db + .insert(waitlistEntries) + .values(data) + .returning(); + +// Read with conditions +const [entry] = await db + .select() + .from(waitlistEntries) + .where(eq(waitlistEntries.email, email)) + .limit(1); + +// Update +await db + .update(waitlistEntries) + .set({ isNotified: true }) + .where(eq(waitlistEntries.email, email)); + +// Delete +await db + .delete(waitlistEntries) + .where(eq(waitlistEntries.id, id)); +``` + +### Pagination Pattern +```typescript +export async function getWaitlistEntries(options: { + limit?: number; + offset?: number; + orderBy?: "asc" | "desc"; +} = {}) { + const { limit = 50, offset = 0, orderBy = "desc" } = options; + + return await db + .select() + .from(waitlistEntries) + .orderBy( + orderBy === "desc" + ? desc(waitlistEntries.createdAt) + : waitlistEntries.createdAt + ) + .limit(limit) + .offset(offset); +} +``` + +### Aggregation Queries +```typescript +// Count queries +const [result] = await db + .select({ count: count() }) + .from(waitlistEntries); + +// Conditional counts +const [notifiedResult] = await db + .select({ count: count() }) + .from(waitlistEntries) + .where(eq(waitlistEntries.isNotified, true)); +``` + +## Relationship Patterns + +### Foreign Key Relations +```typescript +// In schema.ts +export const waitlistEntries = createTable("waitlist_entry", { + userId: varchar("user_id", { length: 255 }) + .references(() => users.id, { onDelete: "cascade" }), +}); + +// Define relations +export const waitlistRelations = relations(waitlistEntries, ({ one }) => ({ + user: one(users, { + fields: [waitlistEntries.userId], + references: [users.id], + }), +})); +``` + +### Querying with Relations +```typescript +// Simple join +const entriesWithUsers = await db + .select({ + entry: waitlistEntries, + user: users, + }) + .from(waitlistEntries) + .leftJoin(users, eq(waitlistEntries.userId, users.id)); +``` + +## Data Validation + +### Database Level Constraints +```typescript +// Unique constraints +email: varchar("email", { length: 255 }).notNull().unique(), + +// Check constraints (when supported) +status: varchar("status", { length: 50 }) + .notNull() + .default("pending"), +``` + +### Application Level Validation +```typescript +export async function addWaitlistEntry(data: NewWaitlistEntry) { + // Validate before insert + if (!data.email || !data.name) { + throw new Error("Email and name are required"); + } + + // Check for duplicates + const existing = await isEmailOnWaitlist(data.email); + if (existing) { + throw new Error("Email already exists"); + } + + // Proceed with insert +} +``` + +## Error Handling + +### Database Error Handling +```typescript +try { + const result = await db.insert(table).values(data); + return result; +} catch (error) { + if (error.code === '23505') { // Unique violation + throw new Error("Duplicate entry"); + } + throw error; // Re-throw unknown errors +} +``` + +### Connection Error Handling +```typescript +export async function serviceFunction() { + if (!db) { + throw new Error("Database not initialized"); + } + + try { + // database operations + } catch (error) { + console.error("Database operation failed:", error); + throw new Error("Database operation failed"); + } +} +``` + +## Performance Optimization + +### Query Optimization +- Use `select()` to specify needed columns +- Add `limit()` for large result sets +- Use indexes for WHERE and ORDER BY clauses +- Avoid N+1 queries with proper joins + +### Connection Management +- Use connection pooling (handled by Drizzle) +- Close connections properly +- Monitor connection usage +- Set appropriate timeouts + +## Environment Configuration + +### Database Configuration +Reference the main database config in [schema.ts](mdc:src/server/db/schema.ts): +```typescript +const createTable = pgTableCreator((name) => `${env?.DB_PREFIX ?? ""}_${name}`); +``` + +### Environment Variables +Required database environment variables: +- `DATABASE_URL` - Connection string +- `DB_PREFIX` - Table prefix for multi-tenant setups + +This pattern ensures consistent database operations across the Shipkit application while maintaining performance and data integrity. diff --git a/.cursor/rules/database.mdc b/.cursor/rules/database.mdc new file mode 100644 index 0000000..2bfc982 --- /dev/null +++ b/.cursor/rules/database.mdc @@ -0,0 +1,126 @@ +--- +description: Database Best Practices and Guidelines +globs: *.prisma, *.sql, src/server/db/*, src/lib/db/* +alwaysApply: false +--- +# Database Best Practices + +## Data Modeling +- Use meaningful and consistent naming +- Define clear relationships between entities +- Use appropriate data types +- Implement proper indexing +- Consider query patterns +- Plan for scalability +- Document schema design + +## Field Types +- Use dates instead of booleans + - ❌ `isActive: boolean` + - ✅ `activeAt: Date` + - ❌ `isDeleted: boolean` + - ✅ `deletedAt: Date` +- Use enums for fixed values +- Use JSON for flexible structures +- Use proper numeric types +- Consider storage implications + +## Field Consistency and Provider Integration +- **ALWAYS** store critical data in multiple compatible fields when dealing with external providers +- **NEVER** assume external systems use your field naming conventions +- Use fallback logic when reading data that could exist in multiple fields + - ✅ `payment.orderId || payment.processorOrderId || ""` + - ❌ `payment.orderId ?? ""` +- Document which providers populate which fields in [src/server/providers/](mdc:src/server/providers) +- Test complete data flow from provider import to UI display +- Maintain backward compatibility when adding new fields +- Example from payments table: + ```typescript + // Store in both fields for compatibility + orderId: processorOrderId, // For display compatibility + processorOrderId: processorOrderId, // For provider-specific operations + ``` + +## Relationships +- Define foreign key constraints +- Use junction tables for many-to-many +- Consider denormalization when needed +- Document relationship types +- Plan for cascading operations +- Handle circular references +- Consider query performance + +## Webhook Data Patterns +- Store webhook event IDs for idempotency checks +- Use database transactions for atomic webhook processing +- Implement proper indexes on webhook event lookup fields +- Store webhook processing status and timestamps +- Use proper data types for webhook payload storage (JSON/JSONB) +- Implement webhook event deduplication at database level +- Create audit trails for webhook-triggered data changes +- Handle webhook event ordering and dependencies +- Use database locks to prevent race conditions in webhook processing +- Store webhook retry attempts and failure reasons + +## Querying +- Use parameterized queries +- Optimize query performance +- Use appropriate indexes +- Avoid N+1 queries +- Use transactions appropriately +- Handle race conditions +- Monitor query performance + +## Security +- Use prepared statements +- Implement row-level security +- Encrypt sensitive data +- Use connection pooling +- Implement proper access control +- Regular security audits +- Monitor for vulnerabilities + +## Error Handling +- Use try-catch blocks +- Implement retries for transient failures +- Log database errors +- Handle constraint violations +- Implement proper rollbacks +- Monitor for deadlocks +- Document error scenarios + +## Performance +- Use appropriate indexes +- Monitor query performance +- Implement caching strategies +- Regular maintenance +- Handle connection pooling +- Monitor resource usage +- Optimize bulk operations + +## Migrations +- Version control migrations +- Test migrations thoroughly +- Plan for rollbacks +- Document changes +- Handle data backfills +- Consider downtime impact +- Monitor migration progress + +## Backup and Recovery +- Regular backups +- Test recovery procedures +- Document backup strategy +- Monitor backup success +- Plan for disaster recovery +- Regular restore tests +- Maintain backup history + +## Monitoring +- Monitor performance metrics +- Set up alerts +- Track error rates +- Monitor disk usage +- Check connection pools +- Monitor query patterns +- Regular health checks diff --git a/.cursor/rules/deployment.mdc b/.cursor/rules/deployment.mdc new file mode 100644 index 0000000..b120a23 --- /dev/null +++ b/.cursor/rules/deployment.mdc @@ -0,0 +1,119 @@ +--- +description: Deployment Best Practices and Guidelines +globs: Dockerfile, docker-compose.yml, .env.*, .github/workflows/*, vercel.json +alwaysApply: false +--- + +# Deployment Best Practices + +## Multi-Zone Deployment + +### Zone Deployment Strategy +- Deploy each zone as separate Vercel project +- Use consistent naming: `main-shipkit`, `docs-shipkit`, `blog-shipkit`, `ui-shipkit`, `tools-shipkit` +- Configure environment variables pointing to zone URLs +- Assign custom domain to main application +- Test cross-zone navigation and functionality + +### Zone Environment Configuration +- Main app contains zone domain environment variables +- Each zone has its own environment configuration +- Use separate databases or shared database per zone requirements +- Configure authentication to work across zones +- Set up monitoring for each zone independently + +### Zone-Specific Optimizations +- Documentation zone: Static generation, search optimization +- Blog zone: ISR for posts, SEO optimization +- UI zone: Component isolation, visual regression testing +- Tools zone: Client-side rendering, real-time features + +## Environment Setup +- Use `.env.local` for local development +- Use `.env.production` for production +- Use `.env.test` for testing +- Never commit environment files +- Document all required variables +- Use strong naming conventions +- Validate environment variables at startup + +## Docker Configuration +- Use multi-stage builds +- Optimize layer caching +- Include only necessary files +- Use appropriate base images +- Set proper environment variables +- Configure health checks +- Document all configurations + +## CI/CD Pipeline +- Automate build process +- Run tests before deployment +- Check code quality +- Scan for vulnerabilities +- Use proper caching +- Implement proper rollbacks +- Monitor deployment status + +## Production Deployment +- Use proper build optimization +- Enable proper caching +- Configure proper headers +- Set up monitoring +- Configure logging +- Set up alerts +- Plan for scaling + +## Infrastructure +- Use infrastructure as code +- Implement proper monitoring +- Set up logging +- Configure backups +- Plan for disaster recovery +- Document architecture +- Monitor costs + +## Security +- Use HTTPS everywhere +- Configure proper headers +- Implement rate limiting +- Set up WAF +- Monitor for threats +- Regular security audits +- Document security measures + +## Monitoring +- Set up error tracking +- Monitor performance +- Track user metrics +- Set up alerting +- Log important events +- Monitor resources +- Track costs + +## Scaling +- Plan for horizontal scaling +- Configure auto-scaling +- Optimize resource usage +- Monitor bottlenecks +- Plan capacity +- Document scaling strategy +- Test scaling scenarios + +## Backup and Recovery +- Regular backups +- Test recovery procedures +- Document recovery plans +- Monitor backup status +- Plan for disasters +- Regular recovery tests +- Document procedures + +## Documentation +- Document deployment process +- Document configuration +- Document dependencies +- Document troubleshooting +- Keep runbooks updated +- Document incidents +- Maintain change log diff --git a/.cursor/rules/docker.mdc b/.cursor/rules/docker.mdc new file mode 100644 index 0000000..a1c3dca --- /dev/null +++ b/.cursor/rules/docker.mdc @@ -0,0 +1,96 @@ +--- +description: Docker Best Practices and Guidelines +globs: Dockerfile, docker-compose.yml, .dockerignore, docker/* +--- + +# Docker Best Practices + +## Image Building +- Use multi-stage builds +- Minimize layer count +- Order layers by change frequency +- Use .dockerignore +- Cache dependencies properly +- Use specific base images +- Document build process + +## Base Images +- Use official images +- Use specific versions +- Keep images minimal +- Scan for vulnerabilities +- Update regularly +- Document image choices +- Consider size implications + +## Security +- Run as non-root +- Remove unnecessary tools +- Scan for vulnerabilities +- Use secrets management +- Update dependencies +- Follow security best practices +- Regular security audits + +## Configuration +- Use environment variables +- Use docker-compose +- Configure health checks +- Set resource limits +- Configure logging +- Use proper networking +- Document configuration + +## Development +- Use hot reloading +- Share development config +- Use volume mounts +- Configure debugging +- Use docker-compose +- Document setup steps +- Maintain parity with production + +## Production +- Optimize for size +- Configure for performance +- Set up monitoring +- Configure logging +- Use orchestration +- Plan for scaling +- Document deployment + +## Dependencies +- Cache dependencies +- Use lockfiles +- Update regularly +- Scan for vulnerabilities +- Document dependencies +- Manage versions +- Clean up unused deps + +## Performance +- Optimize layer caching +- Minimize image size +- Configure resource limits +- Monitor performance +- Use proper storage +- Optimize networking +- Regular maintenance + +## Networking +- Use proper networks +- Configure service discovery +- Set up load balancing +- Manage port mapping +- Configure DNS +- Secure communications +- Document network setup + +## Storage +- Use proper volumes +- Configure persistence +- Manage backups +- Clean up unused data +- Monitor usage +- Plan capacity +- Document storage setup diff --git a/.cursor/rules/documentation-system.mdc b/.cursor/rules/documentation-system.mdc new file mode 100644 index 0000000..b5317f0 --- /dev/null +++ b/.cursor/rules/documentation-system.mdc @@ -0,0 +1,249 @@ +--- +description: +globs: +alwaysApply: false +--- +# Documentation System + +## Overview +The Shipkit documentation system uses dynamic file-based loading from the `/docs` directory at the project root. It supports both `.mdx` and `.md` files with full production-ready security features. + +## Architecture + +### Core Components +- **Main Library**: [src/lib/docs.ts](mdc:src/lib/docs.ts) - Core documentation loading and processing +- **Search Service**: [src/server/services/docs-search.ts](mdc:src/server/services/docs-search.ts) - Documentation search functionality +- **Pages**: [src/app/(app)/docs/[[...slug]]/page.tsx](mdc:src/app/(app)/docs/[[...slug]]/page.tsx) - Dynamic routing for docs + +### File Structure +``` +/docs/ # Root documentation directory +├── index.mdx # Main documentation index +├── *.mdx, *.md # Root level docs +├── content-management/ # Organized by topic +│ ├── index.mdx # Section index +│ └── *.mdx, *.md # Section documentation +├── development/ +├── integrations/ +└── snippets/ +``` + +## Security Features (Production-Ready) + +### Input Validation & Sanitization +- **Path Traversal Protection**: All file paths validated using `path.resolve()` +- **Slug Validation**: Regex patterns prevent malicious input +- **Content Sanitization**: HTML sanitization prevents XSS attacks +- **Size Limits**: Files limited to 5MB, content to 1MB + +### Rate Limiting & DoS Prevention +- Maximum 100 documents total +- Maximum 20 directories per level +- Maximum 50 files per directory +- Maximum depth of 5 levels +- Input length limits enforced via Zod schemas + +### Error Handling +- Comprehensive error logging without information leakage +- Graceful degradation on missing files +- Proper HTTP status codes +- No stack trace exposure in production + +## File Format Support + +### MDX Files (.mdx) +- Full React component support +- Frontmatter metadata required +- Dynamic imports supported +- Processing via remark/rehype + +### Markdown Files (.md) +- Standard markdown syntax +- Frontmatter metadata required +- Converted to React components +- Syntax highlighting support + +### Required Frontmatter +```yaml +--- +title: "Document Title" # Required, max 500 chars +description: "Optional description" # Optional, max 1000 chars +section: "category-name" # Optional, defaults to "core", max 100 chars +updatedAt: "2024-01-01" # Optional +author: "Author Name" # Optional, max 200 chars +keywords: ["tag1", "tag2"] # Optional array, each max 100 chars +--- +``` + +## Core Functions + +### `getDocBySlug(slug: string)` +- Loads documents from `/docs` directory +- Handles both `.mdx` and `.md` files +- Returns sanitized Doc object or null +- Cached for performance + +### `getAllDocs()` +- Discovers all documents recursively +- Returns array of Doc objects +- Limited to prevent DoS attacks +- Sorted and filtered + +### `importDocFromRootDocs(slug: string)` +- Dynamic import from filesystem +- Security validation on all paths +- Handles missing files gracefully +- Supports index files + +## Navigation Generation + +### Automatic Structure +- Navigation generated from filesystem structure +- Directory names become section titles +- File frontmatter provides titles and metadata +- Automatic sorting and organization + +### processDirectory() +- Recursively processes directories +- Validates all file paths for security +- Generates NavSection arrays +- Limits depth and file counts + +## Development Guidelines + +### Adding New Documentation +1. Create `.mdx` or `.md` file in appropriate `/docs` subdirectory +2. Include required frontmatter with title and description +3. Use semantic file naming (kebab-case) +4. Organize into logical directory structure + +### Security Considerations +- Never bypass path validation functions +- Always use `sanitizeFilePath()` for user input +- Validate frontmatter data types +- Monitor file sizes and counts +- Log suspicious access patterns + +### Performance Best Practices +- Use caching for frequently accessed docs +- Limit recursive directory depth +- Implement pagination for large doc sets +- Monitor memory usage with large files + +## Webpack Configuration + +### Bundle Inclusion +- [next.config.ts](mdc:next.config.ts) includes `/docs` directory in webpack bundle +- Raw loader configured for `.md` and `.mdx` files +- File watching enabled for hot reload +- Output file tracing includes `./docs/**/*` + +### Build Process +```typescript +// In next.config.ts +webpack: (config) => { + config.module.rules.push({ + test: /\.(md|mdx)$/, + use: 'raw-loader', + include: [ + path.join(__dirname, 'docs'), + path.join(__dirname, 'src/content') + ] + }); + return config; +} +``` + +## API Endpoints + +### Search API +- [src/app/api/docs/search/route.ts](mdc:src/app/api/docs/search/route.ts) - Full-text search +- Implements rate limiting and input validation +- Returns structured search results +- Supports relevance scoring + +## Error Scenarios & Handling + +### Common Issues +- **Missing Files**: Returns null, logs warning +- **Invalid Frontmatter**: Skips file, logs error +- **Path Traversal Attempts**: Blocks request, logs security event +- **Large Files**: Rejects with size limit error +- **Deep Nesting**: Stops at max depth limit + +### Monitoring & Logging +- Security events logged at WARN level +- Performance issues logged at INFO level +- Errors include context but not sensitive data +- Regular audit of access patterns recommended + +## Migration Notes + +### From Legacy Location +- All docs moved from `/src/content/docs` to `/docs` +- Legacy import code removed to prevent module resolution errors +- Search service updated to only load from root directory +- Backward compatibility maintained via fallback logic + +### Breaking Changes +- Dynamic imports from `@/content/docs/` no longer supported +- All documentation must be in `/docs` directory +- Frontmatter structure enforced via flexible Zod validation +- Only `title` field is required in frontmatter +- `description` is now optional +- `section` is flexible string (no longer restricted enum) + +## Testing + +### Validation Tests +- Path traversal protection +- Input sanitization +- File size limits +- Directory depth limits +- Frontmatter validation + +### Performance Tests +- Large file handling +- Deep directory structures +- High-frequency access patterns +- Memory usage monitoring + +## Dependencies + +### Required Packages +- `remark` - Markdown processing +- `remark-gfm` - GitHub Flavored Markdown +- `remark-html` - HTML output +- `rehype-highlight` - Syntax highlighting +- `gray-matter` - Frontmatter parsing +- `raw-loader` - Webpack raw file loading +- `zod` - Schema validation + +### Version Compatibility +- Next.js 14+ (App Router) +- React 18+ +- Node.js 18+ + +## Production Deployment + +### Environment Variables +- No special environment variables required +- Uses filesystem access (ensure `/docs` directory is included in deployment) +- Consider CDN for static assets + +### Performance Monitoring +- Monitor doc loading times +- Track search query performance +- Alert on high error rates +- Monitor file system access patterns + +### Security Checklist +- [ ] Path validation functions working +- [ ] Input sanitization enabled +- [ ] File size limits enforced +- [ ] Rate limiting active +- [ ] Error logging configured +- [ ] No sensitive data in logs +- [ ] Access patterns monitored + +This documentation system is fully production-ready with comprehensive security, performance, and maintainability features. diff --git a/.cursor/rules/dont-do.mdc b/.cursor/rules/dont-do.mdc new file mode 100644 index 0000000..ae07d48 --- /dev/null +++ b/.cursor/rules/dont-do.mdc @@ -0,0 +1,107 @@ +--- +description: Things to Avoid - Anti-patterns and Common Mistakes +globs: *.ts, *.tsx, *.js, *.jsx, src/**/* +alwaysApply: false +--- +# Don't Do These Things + +## Next.js +- Prefer use pages router (use app router) +- Don't nest server components in client components +- Don't use `use client` in server components +- Don't fetch data with server actions +- Don't mix client and server code +- Don't use getStaticProps or getServerSideProps +- Don't use router.push in server components + +## React +- Don't use class components +- Don't use default exports +- Don't use React.FC type +- Don't use inline styles (use Tailwind) +- Don't use useState when useReducer is clearer +- Don't mutate state directly +- Don't use indexes as keys + +## TypeScript +- Don't use `any` type +- Don't disable TypeScript checks +- Don't use `@ts-ignore` +- Don't use non-null assertion operator +- Don't use `Object` type +- Don't use `Function` type +- Don't use `{}` as a type + +## State Management +- Don't use global state for local concerns +- Don't prop drill more than 2 levels +- Don't mutate context values directly +- Don't use Redux (use context + reducers) +- Don't store derived state +- Don't store server state client-side +- Don't duplicate state + +## API +- Don't expose internal APIs publicly +- Don't handle errors silently +- Don't return raw error messages +- Don't trust client-side data +- Don't expose sensitive information +- Don't make unnecessary API calls +- Don't ignore API errors + +## Security +- Don't store secrets in code +- Don't use eval() or new Function() +- Don't trust user input +- Don't expose stack traces +- Don't store sensitive data client-side +- Don't use innerHTML +- Don't disable security headers + +## Performance +- Don't bundle unused code +- Don't load unnecessary resources +- Don't block the main thread +- Don't ignore memory leaks +- Don't skip code splitting +- Don't ignore performance metrics +- Don't render unnecessary components + +## Database +- Don't use raw SQL queries +- Don't store passwords in plain text +- Don't use boolean fields (use dates) +- Don't ignore database indexes +- Don't leak connection pools +- Don't ignore transaction boundaries +- Don't store large blobs + +## Payment Provider Integration +- Don't store order IDs in only one database field (use both `orderId` and `processorOrderId`) +- Don't assume provider data maps to your field names +- Don't read from single fields without fallback logic +- Don't skip testing the complete data flow (Provider → Import → Database → Service → UI) +- Don't assume all providers follow the same data structure +- Don't ignore field mapping inconsistencies between providers +- Don't deploy provider changes without verifying admin UI displays correctly +- Don't hardcode field names in service layer without fallbacks +- Don't skip creating debug scripts for provider integration testing + +## Testing +- Don't skip writing tests +- Don't test implementation details +- Don't use snapshot tests exclusively +- Don't mock everything +- Don't ignore test coverage +- Don't write brittle tests +- Don't test third-party code + +## Code Quality +- Don't repeat code (DRY) +- Don't ignore TypeScript errors +- Don't skip code reviews +- Don't leave TODO comments +- Don't ignore linter warnings +- Don't write unclear code +- Don't skip documentation diff --git a/.cursor/rules/environment.mdc b/.cursor/rules/environment.mdc new file mode 100644 index 0000000..ee4b5eb --- /dev/null +++ b/.cursor/rules/environment.mdc @@ -0,0 +1,96 @@ +--- +description: Environment Configuration Best Practices +globs: .env.*, config/*, src/env.*, next.config.* +--- + +# Environment Configuration Best Practices + +## Environment Files +- Use `.env.local` for local development +- Use `.env.development` for development +- Use `.env.test` for testing +- Use `.env.production` for production +- Never commit env files +- Document all variables +- Use example files + +## Variable Naming +- Use SCREAMING_SNAKE_CASE +- Use descriptive names +- Group related variables +- Use proper prefixes +- Document purpose +- Consider scope +- Maintain consistency + +## Security +- Never commit secrets +- Use proper encryption +- Rotate secrets regularly +- Use secret management +- Audit access +- Monitor usage +- Document procedures + +## Validation +- Validate at startup +- Check required variables +- Validate formats +- Type checking +- Document requirements +- Handle missing values +- Log validation errors + +## Organization +- Group by purpose +- Use proper prefixes +- Document structure +- Maintain consistency +- Version control +- Review regularly +- Keep organized + +## Development +- Use local overrides +- Document setup +- Share examples +- Handle conflicts +- Maintain parity +- Test configurations +- Document workflows + +## Production +- Use proper secrets +- Configure monitoring +- Set up logging +- Handle rotation +- Plan for scaling +- Document deployment +- Regular audits + +## Testing +- Use test-specific values +- Mock when needed +- Clean up after tests +- Validate configurations +- Document test setup +- Maintain isolation +- Regular validation + +## Documentation +- Document all variables +- Explain purpose +- Show examples +- Document requirements +- Keep updated +- Version control +- Regular reviews + +## Management +- Use secret management +- Configure access control +- Monitor usage +- Regular rotation +- Audit access +- Document procedures +- Plan for emergencies diff --git a/.cursor/rules/error-handling.mdc b/.cursor/rules/error-handling.mdc new file mode 100644 index 0000000..501e24e --- /dev/null +++ b/.cursor/rules/error-handling.mdc @@ -0,0 +1,109 @@ +--- +description: Error Handling Best Practices and Guidelines +globs: *.ts, *.tsx, src/lib/errors/*, src/components/error-boundary/* +alwaysApply: false +--- + +# Error Handling Best Practices + +## Client-Side Errors +- Use error boundaries +- Handle async errors +- Provide user feedback +- Log errors properly +- Implement recovery +- Monitor client errors +- Document error types + +## Server-Side Errors +- Use proper try/catch +- Handle async errors +- Implement logging +- Return proper status +- Monitor server errors +- Regular error review +- Document procedures + +## API Errors +- Use proper status codes +- Return meaningful messages +- Handle validation errors +- Log API errors +- Monitor error rates +- Regular error review +- Document API errors + +## Webhook Errors +- Return proper HTTP status codes (200 for success, 4xx for client errors) +- Implement comprehensive try/catch blocks for webhook processing +- Log all webhook processing errors with context (but not sensitive data) +- Never expose internal error details in webhook responses +- Implement graceful degradation for non-critical webhook failures +- Handle signature verification failures appropriately +- Implement retry logic for failed webhook processing +- Set up alerts for webhook failure patterns +- Track webhook success/failure rates +- Handle duplicate webhook events gracefully + +## Database Errors +- Handle connection errors +- Handle query errors +- Implement retries +- Log database errors +- Monitor error patterns +- Regular error review +- Document procedures + +## Validation +- Validate all inputs +- Handle edge cases +- Return clear messages +- Log validation errors +- Monitor patterns +- Regular review +- Document rules + +## Logging +- Use proper logging levels +- Structure log messages +- Include context +- Handle sensitive data +- Monitor logs +- Regular log review +- Document patterns + +## Recovery +- Implement fallbacks +- Handle graceful degradation +- Provide recovery options +- Monitor recovery +- Test recovery paths +- Regular drills +- Document procedures + +## Monitoring +- Track error rates +- Set up alerts +- Monitor patterns +- Regular analysis +- Take action +- Document findings +- Share insights + +## User Experience +- Show friendly messages +- Provide clear guidance +- Handle offline states +- Support recovery +- Monitor user impact +- Regular UX review +- Document patterns + +## Documentation +- Document error types +- Document handling +- Keep updated +- Share knowledge +- Regular reviews +- Monitor changes +- Maintain docs diff --git a/.cursor/rules/graceful-degradation.mdc b/.cursor/rules/graceful-degradation.mdc new file mode 100644 index 0000000..c036b13 --- /dev/null +++ b/.cursor/rules/graceful-degradation.mdc @@ -0,0 +1,177 @@ +--- +description: +globs: +alwaysApply: false +--- +# Shipkit Graceful Degradation + +## Overview + +Shipkit implements graceful degradation to provide a seamless experience whether users have a database configured or not. When no `DATABASE_URL` is provided, the application automatically falls back to local storage for data persistence. + +## Architecture + +### Database Detection + +The system checks for database availability in multiple places: + +1. **[src/server/db/index.ts](mdc:src/server/db/index.ts)** - Database connection with graceful degradation +2. **[src/payload.config.ts](mdc:src/payload.config.ts)** - Payload CMS conditional initialization +3. **[src/lib/payload/payload.ts](mdc:src/lib/payload/payload.ts)** - Payload client initialization with null fallback +4. **Service layer** - All data operations check `db` availability before proceeding + +### Local Storage Fallbacks + +When database is unavailable, the following local storage services are used: + +- **[src/lib/local-storage/project-storage.ts](mdc:src/lib/local-storage/project-storage.ts)** - Project management +- **[src/lib/local-storage/team-storage.ts](mdc:src/lib/local-storage/team-storage.ts)** - Team management + +## Implementation Patterns + +### Service Pattern + +All services follow this pattern for graceful degradation: + +```typescript +async someMethod(params: any) { + if (!db) { + // Use local storage fallback + return LocalStorageService.someMethod(params); + } + + // Use database + return await this.database.someMethod(params); +} +``` + +### Payload Client Pattern + +Always use `getPayloadClient()` function, never import singleton: + +```typescript +// ✅ Correct +import { getPayloadClient } from "@/lib/payload/payload"; + +const payload = await getPayloadClient(); +if (!payload) { + // Handle gracefully - CMS not available + return null; +} + +// ❌ Wrong - Don't use singleton (causes crashes) +import { payload } from "@/lib/payload/payload"; +``` + +### Conditional Configuration + +Configuration files check for environment variables before initializing database-dependent features: + +```typescript +const isFeatureEnabled = !!process.env.DATABASE_URL && process.env.FEATURE_FLAG === "true"; + +if (isFeatureEnabled) { + // Initialize database-dependent features +} +``` + +## Key Files + +### Core Database Files +- **[src/server/db/index.ts](mdc:src/server/db/index.ts)** - Main database connection with null fallback +- **[src/payload.config.ts](mdc:src/payload.config.ts)** - Conditional Payload initialization + +### Local Storage Services +- **[src/lib/local-storage/project-storage.ts](mdc:src/lib/local-storage/project-storage.ts)** - Project CRUD operations +- **[src/lib/local-storage/team-storage.ts](mdc:src/lib/local-storage/team-storage.ts)** - Team CRUD operations + +### Service Integration +- **[src/server/services/project-service.ts](mdc:src/server/services/project-service.ts)** - Project service with fallbacks +- **[src/server/services/team-service.ts](mdc:src/server/services/team-service.ts)** - Team service with fallbacks + +## Data Models + +Local storage services mirror the database schema exactly: + +### Projects +```typescript +interface LocalProject { + id: string; + name: string; + teamId: string; + createdAt: Date; + updatedAt: Date; + members: LocalProjectMember[]; +} +``` + +### Teams +```typescript +interface LocalTeam { + id: string; + name: string; + type: "personal" | "workspace"; + createdAt: Date; + updatedAt: Date | null; + deletedAt: Date | null; +} +``` + +## Environment Variables + +### Required for Database Mode +- `DATABASE_URL` - PostgreSQL connection string +- `PAYLOAD_SECRET` - Secret key to enable Payload CMS (optional, can use `DISABLE_PAYLOAD=true` to disable) + +### Optional Feature Flags +- `NEXT_PUBLIC_FEATURE_S3_ENABLED` - Enable S3 storage +- `NEXT_PUBLIC_FEATURE_VERCEL_BLOB_ENABLED` - Enable Vercel Blob storage +- `NEXT_PUBLIC_FEATURE_AUTH_RESEND_ENABLED` - Enable Resend email + +## Demo Data + +When no database is available and no local data exists, the system automatically initializes with demo data: + +- Demo user account +- Sample personal team +- Example projects +- Realistic project members + +## Best Practices + +### 1. Always Check Database Availability +```typescript +if (!db) { + // Local storage fallback + return LocalStorage.method(); +} +``` + +### 2. Mirror Database APIs +Local storage services should exactly match database service method signatures. + +### 3. Handle User Sessions +Demo mode creates a consistent user session that persists across browser sessions. + +### 4. Data Consistency +Local storage maintains referential integrity similar to database constraints. + +### 5. Error Handling +Graceful degradation should never throw errors - always provide fallbacks. + +## Debugging + +### Check Database Status +```typescript +import { db } from "@/server/db"; +console.log("Database available:", !!db); +``` + +### Inspect Local Storage +```typescript +console.log("Projects:", LocalProjectStorage.getAllProjects()); +console.log("Teams:", LocalTeamStorage.getAllTeams()); +``` + +### Environment Validation +Check that all required environment variables are set for the desired mode of operation. diff --git a/.cursor/rules/local-storage-patterns.mdc b/.cursor/rules/local-storage-patterns.mdc new file mode 100644 index 0000000..06f59cb --- /dev/null +++ b/.cursor/rules/local-storage-patterns.mdc @@ -0,0 +1,285 @@ +--- +description: +globs: +alwaysApply: false +--- +# Local Storage Implementation Patterns + +## Overview + +Shipkit uses browser local storage as a fallback when no database is configured. This provides a zero-config development experience while maintaining feature parity with database mode. + +## Core Principles + +### 1. API Consistency +Local storage services mirror database service APIs exactly: + +```typescript +// Database service method +async createProject(teamId: string, name: string, userId: string): Promise + +// Local storage service method +createProject(teamId: string, name: string, userId: string): LocalProject +``` + +### 2. Data Validation +All local storage operations include validation: + +```typescript +if (!projectName?.trim()) { + throw new Error("Project name is required"); +} +``` + +### 3. Referential Integrity +Local storage maintains relationships between entities: + +```typescript +// When deleting a team, remove all associated projects +const teamProjects = this.getTeamProjects(teamId); +teamProjects.forEach(project => this.deleteProject(project.id)); +``` + +## Storage Keys + +Use consistent, namespaced keys for local storage: + +```typescript +const STORAGE_KEYS = { + projects: "shipkit-projects", + projectMembers: "shipkit-project-members", + teams: "shipkit-teams", + teamMembers: "shipkit-team-members", + users: "shipkit-users" +} as const; +``` + +## Utility Functions + +### Safe Storage Access +Always check if we're in a browser environment: + +```typescript +function isClient(): boolean { + return typeof window !== "undefined"; +} + +function getFromStorage(key: string): T[] { + if (!isClient()) return []; + + try { + const item = localStorage.getItem(key); + return item ? JSON.parse(item) : []; + } catch (error) { + console.warn(`Failed to parse ${key} from localStorage:`, error); + return []; + } +} +``` + +### Date Handling +Convert date strings back to Date objects: + +```typescript +function parseStoredData(data: any[]): T[] { + return data.map(item => ({ + ...item, + createdAt: new Date(item.createdAt), + updatedAt: new Date(item.updatedAt), + deletedAt: item.deletedAt ? new Date(item.deletedAt) : null + })); +} +``` + +## Demo Data Initialization + +Provide realistic demo data for first-time users: + +```typescript +function initializeDemoData(): void { + if (this.getAllProjects().length === 0) { + this.createDemoProjects(); + } +} + +private createDemoProjects(): void { + const demoProjects = [ + { name: "Marketing Website", description: "Company landing page" }, + { name: "Mobile App", description: "iOS and Android application" }, + { name: "API Gateway", description: "Microservices backend" } + ]; + + demoProjects.forEach(project => { + this.createProject(demoTeamId, project.name, demoUserId); + }); +} +``` + +## Error Handling + +### Graceful Fallbacks +Never throw errors that would break the application: + +```typescript +getUserProjects(userId: string): LocalProject[] { + try { + const projects = this.getAllProjects(); + return projects.filter(p => this.userHasAccess(userId, p.id)); + } catch (error) { + console.warn("Failed to get user projects:", error); + return []; + } +} +``` + +### Storage Quota Management +Handle storage quota exceeded errors: + +```typescript +function saveToStorage(key: string, data: T[]): void { + try { + localStorage.setItem(key, JSON.stringify(data)); + } catch (error) { + if (error.name === 'QuotaExceededError') { + console.warn("Storage quota exceeded, clearing old data"); + this.clearOldData(); + localStorage.setItem(key, JSON.stringify(data)); + } else { + throw error; + } + } +} +``` + +## CRUD Operations + +### Create Pattern +```typescript +createEntity(data: CreateEntityData): LocalEntity { + const entity: LocalEntity = { + id: generateId(), + ...data, + createdAt: new Date(), + updatedAt: new Date() + }; + + const entities = this.getAllEntities(); + entities.push(entity); + this.saveEntities(entities); + + return entity; +} +``` + +### Update Pattern +```typescript +updateEntity(id: string, updates: Partial): LocalEntity { + const entities = this.getAllEntities(); + const index = entities.findIndex(e => e.id === id); + + if (index === -1) { + throw new Error("Entity not found"); + } + + entities[index] = { + ...entities[index], + ...updates, + updatedAt: new Date() + }; + + this.saveEntities(entities); + return entities[index]; +} +``` + +### Delete Pattern (Soft Delete) +```typescript +deleteEntity(id: string): void { + const entities = this.getAllEntities(); + const index = entities.findIndex(e => e.id === id); + + if (index !== -1) { + entities[index].deletedAt = new Date(); + this.saveEntities(entities); + } +} +``` + +## Integration with Services + +Services should check for database availability and fallback to local storage: + +```typescript +// In service files like project-service.ts +import { LocalProjectStorage } from "@/lib/local-storage/project-storage"; + +async createProject(teamId: string, name: string, userId: string) { + if (!db) { + return LocalProjectStorage.createProject(teamId, name, userId); + } + + // Database implementation + return await this.database.createProject(teamId, name, userId); +} +``` + +## Testing Local Storage + +### Unit Tests +Mock localStorage for testing: + +```typescript +const mockLocalStorage = { + store: new Map(), + getItem: jest.fn(key => mockLocalStorage.store.get(key) || null), + setItem: jest.fn((key, value) => mockLocalStorage.store.set(key, value)), + clear: jest.fn(() => mockLocalStorage.store.clear()) +}; + +Object.defineProperty(window, 'localStorage', { value: mockLocalStorage }); +``` + +### Integration Tests +Test the fallback behavior: + +```typescript +test('should use local storage when database is unavailable', async () => { + // Mock db as null + jest.mock('@/server/db', () => ({ db: null })); + + const result = await projectService.createProject('team1', 'Test Project', 'user1'); + expect(result).toBeDefined(); + expect(mockLocalStorage.setItem).toHaveBeenCalled(); +}); +``` + +## Performance Considerations + +### Lazy Loading +Only load data when needed: + +```typescript +private _cachedProjects: LocalProject[] | null = null; + +getAllProjects(): LocalProject[] { + if (this._cachedProjects === null) { + this._cachedProjects = parseStoredData(getFromStorage(STORAGE_KEYS.projects)); + } + return this._cachedProjects; +} +``` + +### Batch Operations +Group multiple operations to reduce localStorage calls: + +```typescript +batchCreateProjects(projects: CreateProjectData[]): LocalProject[] { + const existingProjects = this.getAllProjects(); + const newProjects = projects.map(data => this.createProjectData(data)); + + existingProjects.push(...newProjects); + this.saveProjects(existingProjects); + + return newProjects; +} +``` diff --git a/.cursor/rules/multi-zone-architecture.mdc b/.cursor/rules/multi-zone-architecture.mdc new file mode 100644 index 0000000..2669d27 --- /dev/null +++ b/.cursor/rules/multi-zone-architecture.mdc @@ -0,0 +1,370 @@ +--- +description: +globs: +alwaysApply: false +--- +# Multi-Zone Architecture Rules + +## Overview + +Multi-zone architecture allows Shipkit applications to be split into multiple Next.js applications while appearing as a single domain to users. This pattern is ideal for: + +- **Scalability**: Different teams can work on different zones independently +- **Performance**: Each zone can be optimized for its specific use case +- **Deployment**: Zones can be deployed and updated independently +- **Technology Freedom**: Each zone can use different technologies while maintaining consistency + +## Zone Configuration Patterns + +### Standard Zone Structure +``` +domain.com/ → Main app (marketing, dashboard, auth) +domain.com/docs/* → Documentation zone +domain.com/blog/* → Blog zone +domain.com/ui/* → UI component library zone +domain.com/tools/* → Developer tools zone +``` + +### Zone Types + +#### 1. Main Zone (Primary Application) +- **Purpose**: Core application functionality +- **Contains**: Authentication, dashboard, marketing pages, API routes +- **Routing**: Handles all routes not claimed by other zones +- **Configuration**: Standard Shipkit configuration with multi-zone rewrites + +#### 2. Documentation Zone +- **Purpose**: Product documentation, guides, API reference +- **Features**: Search functionality, versioning, navigation tree +- **Content**: MDX files, code examples, tutorials +- **Optimization**: Static generation, fast search indexing + +#### 3. Blog Zone +- **Purpose**: Blog posts, announcements, case studies +- **Features**: CMS integration, commenting, social sharing +- **Content**: Articles, author profiles, categories +- **Optimization**: SEO optimization, RSS feeds + +#### 4. UI Component Library Zone +- **Purpose**: Component showcase, design system documentation +- **Features**: Interactive component playground, code examples +- **Content**: Component demos, design tokens, usage guidelines +- **Optimization**: Component isolation, visual regression testing + +#### 5. Developer Tools Zone +- **Purpose**: Interactive utilities, API explorers, validators +- **Features**: Real-time tools, code generators, testing utilities +- **Content**: Interactive forms, API documentation, utilities +- **Optimization**: Client-side interactivity, tool performance + +## Implementation Patterns + +### 1. Zone Setup + +#### Directory Structure +``` +project-root/ +├── shipkit/ # Main application +├── shipkit-docs/ # Documentation zone +├── shipkit-blog/ # Blog zone +├── shipkit-ui/ # UI library zone +└── shipkit-tools/ # Tools zone +``` + +#### Zone Creation Commands +```bash +# Create zones by cloning Shipkit +git clone https://github.com/lacymorrow/shipkit.git shipkit-docs +git clone https://github.com/lacymorrow/shipkit.git shipkit-blog +git clone https://github.com/lacymorrow/shipkit.git shipkit-ui +git clone https://github.com/lacymorrow/shipkit.git shipkit-tools + +# Install dependencies for each zone +cd shipkit-docs && bun install --frozen-lockfile +cd shipkit-blog && bun install --frozen-lockfile +cd shipkit-ui && bun install --frozen-lockfile +cd shipkit-tools && bun install --frozen-lockfile +``` + +### 2. Configuration Patterns + +#### Main Zone Configuration (next.config.ts) +```typescript +async rewrites() { + const multiZoneRewrites = []; + + // Documentation Zone + if (process.env.DOCS_DOMAIN) { + multiZoneRewrites.push( + { source: '/docs', destination: `${process.env.DOCS_DOMAIN}/docs` }, + { source: '/docs/:path*', destination: `${process.env.DOCS_DOMAIN}/docs/:path*` } + ); + } + + // Add other zones similarly... + + return multiZoneRewrites; +} +``` + +#### Zone-Specific Configuration +```typescript +// Each zone's next.config.ts +const nextConfig: NextConfig = { + basePath: '/docs', // or /blog, /ui, /tools + assetPrefix: '/docs-static', // or /blog-static, etc. + + // Inherit all Shipkit configurations + ...existingShipkitConfig, +}; +``` + +### 3. Environment Variables + +#### Development Environment +```bash +# Main app .env.local +DOCS_DOMAIN=http://localhost:3001 +BLOG_DOMAIN=http://localhost:3002 +UI_DOMAIN=http://localhost:3003 +TOOLS_DOMAIN=http://localhost:3004 +``` + +#### Production Environment +```bash +# Main app production environment +DOCS_DOMAIN=https://docs-shipkit.vercel.app +BLOG_DOMAIN=https://blog-shipkit.vercel.app +UI_DOMAIN=https://ui-shipkit.vercel.app +TOOLS_DOMAIN=https://tools-shipkit.vercel.app +``` + +## Navigation Patterns + +### Inter-Zone Navigation +```tsx +// Use anchor tags for navigation between zones + + Documentation + + +// NOT Next.js Link for cross-zone navigation +// ❌ Documentation +``` + +### Intra-Zone Navigation +```tsx +// Use Next.js Link within the same zone +import Link from 'next/link' + + + Advanced Topics + +``` + +### Shared Navigation Components +```tsx +// Create zone-aware navigation components +const NavLink = ({ href, children, ...props }) => { + const isExternal = href.startsWith('/docs') || + href.startsWith('/blog') || + href.startsWith('/ui') || + href.startsWith('/tools'); + + if (isExternal) { + return {children}; + } + + return {children}; +}; +``` + +## Content Management + +### Content Organization +``` +content/ +├── docs/ +│ ├── getting-started/ +│ ├── api-reference/ +│ └── tutorials/ +├── blog/ +│ ├── announcements/ +│ ├── technical/ +│ └── case-studies/ +├── ui/ +│ ├── components/ +│ ├── design-tokens/ +│ └── guidelines/ +└── tools/ + ├── validators/ + ├── generators/ + └── utilities/ +``` + +### Shared Content Strategy +- Use consistent frontmatter across zones +- Implement shared content validation schemas +- Maintain consistent tagging and categorization +- Use shared asset management + +## Authentication & State Management + +### Shared Authentication +```typescript +// Configure NextAuth to work across zones +export const authOptions: NextAuthOptions = { + // Ensure cookies work across subdomains/zones + cookies: { + sessionToken: { + name: `next-auth.session-token`, + options: { + domain: '.yourdomain.com', // Note the leading dot + httpOnly: true, + sameSite: 'lax', + path: '/', + secure: process.env.NODE_ENV === 'production' + } + } + } +}; +``` + +### Cross-Zone State +- Use URL parameters for sharable state +- Implement local storage for user preferences +- Use session storage for temporary data +- Avoid complex state synchronization between zones + +## Performance Optimization + +### Zone-Specific Optimizations + +#### Documentation Zone +- Static generation for all content +- Search index optimization +- Image optimization for diagrams +- CDN caching for assets + +#### Blog Zone +- ISR for blog posts +- Image optimization for featured images +- Social media meta tags +- RSS feed generation + +#### UI Zone +- Component isolation +- Visual regression testing +- Performance monitoring for interactive demos +- Lazy loading for component examples + +#### Tools Zone +- Client-side rendering for interactive tools +- WebAssembly for performance-critical operations +- Service worker for offline functionality +- Real-time updates where needed + +## Deployment Strategy + +### Vercel Deployment Pattern +```bash +# Deploy each zone to Vercel with descriptive names +vercel --prod --name="main-shipkit" # Main application +vercel --prod --name="docs-shipkit" # Documentation +vercel --prod --name="blog-shipkit" # Blog +vercel --prod --name="ui-shipkit" # UI Library +vercel --prod --name="tools-shipkit" # Tools +``` + +### Environment Configuration +1. Deploy each zone as separate Vercel project +2. Configure custom domains or use Vercel URLs +3. Set environment variables in main app pointing to zone URLs +4. Configure main domain to point to primary application +5. Test cross-zone navigation and functionality + +## Testing Strategy + +### Zone-Specific Testing +- Unit tests for each zone's components +- Integration tests for zone functionality +- E2E tests for cross-zone navigation +- Performance tests for each zone +- Accessibility tests across all zones + +### Cross-Zone Testing +```typescript +// Example E2E test for cross-zone navigation +test('navigation from main to docs zone', async ({ page }) => { + await page.goto('/'); + await page.click('a[href="/docs"]'); + await expect(page.url()).toContain('/docs'); + await expect(page.locator('h1')).toContainText('Documentation'); +}); +``` + +## Monitoring & Analytics + +### Zone-Specific Monitoring +- Performance monitoring for each zone +- Error tracking per zone +- User analytics per zone +- SEO monitoring for content zones + +### Shared Monitoring +- User journey tracking across zones +- Conversion funnel analysis +- Performance comparison between zones +- Cross-zone search analytics + +## Best Practices + +### Do's +✅ Use consistent design system across all zones +✅ Implement shared authentication +✅ Monitor performance of each zone independently +✅ Use environment variables for zone configuration +✅ Test cross-zone navigation thoroughly +✅ Implement proper error boundaries per zone +✅ Use consistent logging and monitoring +✅ Document zone-specific configuration + +### Don'ts +❌ Use Next.js Link for cross-zone navigation +❌ Share complex state between zones +❌ Ignore zone-specific performance optimization +❌ Deploy zones with inconsistent naming +❌ Skip testing cross-zone functionality +❌ Use hard-coded URLs for zone references +❌ Neglect SEO for content zones +❌ Mix authentication systems between zones + +## Troubleshooting + +### Common Issues + +#### Navigation Problems +- **Issue**: Links between zones not working +- **Solution**: Use anchor tags instead of Next.js Link for cross-zone navigation + +#### Authentication Issues +- **Issue**: User not authenticated in secondary zones +- **Solution**: Configure cookie domain to work across zones + +#### Asset Loading Problems +- **Issue**: Assets not loading in zones +- **Solution**: Configure assetPrefix correctly for each zone + +#### Performance Issues +- **Issue**: Slow loading between zones +- **Solution**: Implement proper prefetching and caching strategies + +#### SEO Problems +- **Issue**: Poor SEO for zone content +- **Solution**: Configure proper meta tags and sitemaps for each zone + +### Debug Tools +- Use browser dev tools to inspect network requests between zones +- Monitor Vercel function logs for each zone +- Use analytics to track user journeys across zones +- Implement custom logging for cross-zone events diff --git a/.cursor/rules/nextjs.mdc b/.cursor/rules/nextjs.mdc new file mode 100644 index 0000000..f98a099 --- /dev/null +++ b/.cursor/rules/nextjs.mdc @@ -0,0 +1,119 @@ +--- +description: Next.js Best Practices and Guidelines +globs: *.ts, *.tsx, app/*, pages/*, src/app/*, src/pages/* +alwaysApply: false +--- + +# Next.js Best Practices + +`params` should be awaited before using its properties. +`searchParams` should be awaited before using its properties. +`headers` should be awaited before using its properties. + +## Server Components +- Use Server Components by default +- Keep client components minimal +- Don't nest server in client components +- Use proper data fetching patterns +- Implement proper caching strategies +- Handle streaming and suspense +- Consider SEO implications + +## Data Fetching +- Use Server Components for data fetching +- Don't fetch data in Server Actions +- Implement proper caching +- Handle loading states +- Consider revalidation strategies +- Use proper error boundaries +- Optimize for performance + +## Server Actions +- Use for data mutations only +- Keep business logic in services +- Implement proper validation +- Handle errors gracefully +- Use proper typing +- Consider optimistic updates +- Document side effects + +## Routing +- Use App Router +- Implement proper layouts +- Handle dynamic routes properly +- Use proper loading UI +- Implement error boundaries +- Consider parallel routes +- Handle intercepting routes + +## State Management +- Use Server Components when possible +- Keep client state minimal +- Use hooks appropriately +- Implement proper caching +- Consider server state +- Handle revalidation +- Document state flow + +## Performance +- Use proper image optimization +- Implement proper caching +- Consider bundle size +- Use proper code splitting +- Implement proper loading states +- Monitor performance metrics +- Optimize for Core Web Vitals + +## Security +- Implement proper authentication +- Use proper authorization +- Handle CSRF protection +- Implement proper headers +- Use environment variables +- Handle sensitive data +- Regular security audits + +## Error Handling +- Use error boundaries +- Implement proper logging +- Handle API errors +- Consider recovery strategies +- Document error scenarios +- Monitor error rates +- Implement proper fallbacks + +## Testing +- Test Server Components +- Test Server Actions +- Implement E2E tests +- Consider integration tests +- Test error scenarios +- Monitor test coverage +- Document test strategy + +## Deployment +- Use proper build process +- Implement proper CI/CD +- Consider staging environments +- Monitor deployment health +- Handle rollbacks +- Document deployment process +- Regular deployment audits + +## Multi-Zone Configuration +- Configure `basePath` and `assetPrefix` for each zone +- Use rewrites in main app to route to zones +- Configure environment variables for zone domains +- Use anchor tags for cross-zone navigation (not Next.js Link) +- Implement proper zone-specific optimization +- Test cross-zone functionality thoroughly +- Document zone architecture + +## Code Organization +- Follow App Router conventions +- Keep route handlers clean +- Separate concerns properly +- Use proper middleware +- Implement proper layouts +- Handle metadata properly +- Document routing structure diff --git a/.cursor/rules/payload-configuration.mdc b/.cursor/rules/payload-configuration.mdc new file mode 100644 index 0000000..1e8ead5 --- /dev/null +++ b/.cursor/rules/payload-configuration.mdc @@ -0,0 +1,317 @@ +--- +description: +globs: +alwaysApply: false +--- +# Payload CMS Configuration Patterns + +## Overview + +Shipkit conditionally initializes Payload CMS based on environment variables. The **[src/payload.config.ts](mdc:src/payload.config.ts)** file handles graceful degradation when no database is configured. + +## Conditional Initialization + +### Database Requirement Check + +Payload CMS only initializes when a database is available: + +```typescript +const isPayloadEnabled = !!process.env.DATABASE_URL && !!process.env.PAYLOAD_SECRET && !envIsTrue("DISABLE_PAYLOAD"); +``` + +### Plugin Array Pattern + +Plugins are conditionally added using array spread patterns: + +```typescript +plugins: [ + payloadCloudPlugin(), + + // S3 Storage - conditional + ...(process.env.NEXT_PUBLIC_FEATURE_S3_ENABLED === "true" && isPayloadEnabled + ? [ + s3Storage({ + collections: { media: true }, + bucket: process.env.AWS_BUCKET_NAME!, + config: { + credentials: { + accessKeyId: process.env.AWS_ACCESS_KEY_ID!, + secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!, + }, + region: process.env.AWS_REGION!, + }, + }), + ] + : []), + + // Vercel Blob Storage - conditional + ...(process.env.NEXT_PUBLIC_FEATURE_VERCEL_BLOB_ENABLED === "true" && isPayloadEnabled + ? [ + vercelBlobStorage({ + collections: { media: true }, + token: process.env.VERCEL_BLOB_READ_WRITE_TOKEN!, + }), + ] + : []), + +], +// Email adapter - conditional (outside plugins array) +...(buildTimeFeatureFlags.NEXT_PUBLIC_FEATURE_AUTH_RESEND_ENABLED + ? { + email: resendAdapter({ + defaultFromAddress: RESEND_FROM_EMAIL, + defaultFromName: emailFromName, + apiKey: process.env.RESEND_API_KEY || "", + }), + } + : {}), +], +``` + +## Database Configuration + +### Conditional Database Adapter + +The database adapter is only added when Payload is enabled: + +```typescript +if (isPayloadEnabled) { + config.db = postgresAdapter({ + schemaName: dbSchemaName, + pool: { + connectionString: process.env.DATABASE_URL, + }, + beforeSchemaInit: [ + ({ schema, adapter }) => { + // Define relationships between Payload and application tables + return { + ...schema, + tables: { + ...schema.tables, + // Enhanced relationships + users: { + ...schema.tables.users, + relationships: [ + { + relationTo: "public.shipkit_user", + type: "oneToOne", + onDelete: "CASCADE", + }, + ], + }, + // Additional table relationships... + }, + }; + }, + ], + migrationDir: path.resolve(dirname, "migrations"), + }); +} +``` + +## Environment Variables + +### Required for Payload + +- `DATABASE_URL` - PostgreSQL connection string +- `PAYLOAD_SECRET` - Secret key for Payload CMS +- `DISABLE_PAYLOAD` - Set to "true" to disable Payload (optional) + +### Optional Features + +- `AWS_BUCKET_NAME`, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_REGION` - For S3 storage +- `VERCEL_BLOB_READ_WRITE_TOKEN` - For Vercel Blob storage +- `RESEND_API_KEY` - For Resend email adapter + +### Build-time Feature Flags + +Feature flags from **[src/config/features-config.ts](mdc:src/config/features-config.ts)**: + +- `NEXT_PUBLIC_FEATURE_S3_ENABLED` +- `NEXT_PUBLIC_FEATURE_VERCEL_BLOB_ENABLED` +- `NEXT_PUBLIC_FEATURE_AUTH_RESEND_ENABLED` + +## Auto-Seeding Pattern + +### Conditional Seeding + +Only seed when Payload is enabled and database exists: + +```typescript +async onInit(payload: any) { + try { + if (!isPayloadEnabled) { + console.info("⏭️ Payload CMS is disabled, skipping seeding"); + return; + } + + if (process.env.PAYLOAD_AUTO_SEED === "false") { + console.info("⏭️ Automatic Payload CMS seeding is disabled"); + return; + } + + const shouldSeed = await checkIfSeedingNeeded(payload); + + if (shouldSeed || process.env.PAYLOAD_SEED_FORCE === "true") { + console.info("🌱 Seeding Payload CMS with initial data..."); + + const { seedAllDirect } = await import("./lib/payload/seed-utils"); + await seedAllDirect(payload); + + await markSeedingCompleted(payload); + console.info("✅ Seeding completed and flag set"); + } + } catch (error) { + console.error("❌ Error in Payload CMS onInit hook:", error); + } +}, +``` + +### Seeding Status Management + +Track seeding completion to avoid duplicate seeding: + +```typescript +async function checkIfSeedingNeeded(payload: any): Promise { + try { + const settings = await payload.findGlobal({ + slug: "settings", + }); + + if (settings?.seedCompleted) { + return false; + } + + return true; + } catch (error) { + console.error("Error checking if seeding is needed:", error); + return true; + } +} + +async function markSeedingCompleted(payload: any): Promise { + try { + await payload.updateGlobal({ + slug: "settings", + data: { + seedCompleted: true, + seedCompletedAt: new Date().toISOString(), + }, + }); + } catch (error) { + console.error("Error marking seeding as completed:", error); + } +} +``` + +## Collections and Globals + +### Schema Integration + +Payload collections integrate with application database schema: + +```typescript +// Users collection integrates with shipkit_user table +users: { + relationships: [ + { + relationTo: "public.shipkit_user", + type: "oneToOne", + onDelete: "CASCADE", + }, + ], +}, + +// RBAC collection integrates with role/permission tables +rbac: { + relationships: [ + { + relationTo: "public.shipkit_role", + type: "oneToMany", + }, + { + relationTo: "public.shipkit_permission", + type: "oneToMany", + }, + ], +}, +``` + +## Common Patterns + +### Array Spread for Conditional Plugins + +ALWAYS use array spread for conditional plugins: + +```typescript +// ✅ Correct - Array spread for plugins +...(condition ? [plugin()] : []) + +// ❌ Wrong - Object spread (causes "not iterable" error) +...(condition ? plugin() : {}) +``` + +### Object Spread for Configuration Fields + +Use object spread for configuration fields like email adapters: + +```typescript +// ✅ Correct - Object spread for config fields +...(condition ? { email: emailAdapter() } : {}) + +// ❌ Wrong - Array spread for config fields +...(condition ? [{ email: emailAdapter() }] : []) +``` + +### Feature Flag Integration + +Check both environment variables and build-time feature flags: + +```typescript +const isFeatureEnabled = + process.env.FEATURE_ENV_VAR === "true" && + buildTimeFeatureFlags.NEXT_PUBLIC_FEATURE_FLAG_ENABLED && + isPayloadEnabled; +``` + +### Error Boundaries + +Wrap Payload initialization in try-catch blocks: + +```typescript +try { + // Payload initialization +} catch (error) { + console.error("Payload initialization error:", error); + // Continue without Payload +} +``` + +## Debugging + +### Check Payload Status + +```typescript +console.log("Payload enabled:", isPayloadEnabled); +console.log("Database URL set:", !!process.env.DATABASE_URL); +console.log("Payload secret set:", !!process.env.PAYLOAD_SECRET); +console.log("Disable flag:", process.env.DISABLE_PAYLOAD); +``` + +### Plugin Loading + +```typescript +console.log("Active plugins:", config.plugins?.length || 0); +console.log("S3 enabled:", process.env.NEXT_PUBLIC_FEATURE_S3_ENABLED === "true"); +console.log("Blob enabled:", process.env.NEXT_PUBLIC_FEATURE_VERCEL_BLOB_ENABLED === "true"); +``` + +## Migration Strategy + +When transitioning from local storage to Payload: + +1. Set `DATABASE_URL` environment variable +2. Set `PAYLOAD_SECRET` to a secure value +3. Run database migrations +4. Enable Payload features incrementally +5. Import existing local storage data if needed diff --git a/.cursor/rules/payment-providers.mdc b/.cursor/rules/payment-providers.mdc new file mode 100644 index 0000000..22d4d50 --- /dev/null +++ b/.cursor/rules/payment-providers.mdc @@ -0,0 +1,137 @@ +--- +description: +globs: +alwaysApply: false +--- +# Payment Provider Integration Best Practices + +## Field Consistency and Data Mapping + +### Database Field Usage +- **ALWAYS** store order IDs in both `orderId` and `processorOrderId` fields during import for maximum compatibility +- **NEVER** assume a single field will contain the data - different providers may use different conventions +- Use consistent field naming patterns across all providers in [src/server/providers/](mdc:src/server/providers) + +### Provider Import Methods +When implementing `importPayments()` in provider classes: +```typescript +// ✅ CORRECT: Store in both fields +await db.insert(payments).values({ + orderId: processorOrderId, // For display compatibility + processorOrderId: processorOrderId, // For provider-specific operations + // ... other fields +}); + +// ❌ WRONG: Only storing in one field +await db.insert(payments).values({ + processorOrderId: processorOrderId, // Missing orderId for display + // ... other fields +}); +``` + +### Service Layer Display Logic +In [src/server/services/payment-service.ts](mdc:src/server/services/payment-service.ts), always implement fallback logic: +```typescript +// ✅ CORRECT: Fallback logic +orderId: payment.orderId || payment.processorOrderId || "", + +// ❌ WRONG: Single field dependency +orderId: payment.orderId ?? "", +``` + +## Provider Integration Patterns + +### Required Provider Methods +Every payment provider must implement consistent data mapping: +- `getAllOrders()` - Must include proper order ID mapping +- `getOrdersByEmail()` - Must use same mapping as getAllOrders +- `importPayments()` - Must store data in compatible database fields +- `handleWebhookEvent()` - Must use consistent field mapping + +### Data Validation +- **ALWAYS** validate that imported data contains expected fields +- **ALWAYS** test the complete data flow: Provider API → Import → Database → Service → UI +- **NEVER** assume provider APIs return data in expected formats + +### Error Handling +- Log detailed information when field mapping fails +- Provide meaningful fallback values for missing data +- Document which fields are required vs optional for each provider + +## Testing Requirements + +### Integration Testing +- Test complete data flow from provider import to admin UI display +- Verify all database fields are populated correctly +- Test with real provider data, not just mock data +- Validate backward compatibility with existing payment data + +### Field Mapping Verification +Create debug scripts to verify field mapping: +```typescript +// Example: debug-{provider}-import-test.ts +const importStats = await provider.importPayments(); +const payments = await PaymentService.getUsersWithPayments(); +// Verify orderId is populated in UI data +``` + +## Common Anti-Patterns + +### ❌ Field Mapping Mistakes +- Storing order IDs only in provider-specific fields +- Reading from single fields without fallbacks +- Inconsistent field usage across providers +- Not testing complete data flow + +### ❌ Provider Implementation Issues +- Missing product name extraction during import +- Inconsistent error handling across providers +- Not storing metadata for debugging +- Missing webhook signature verification + +### ❌ Service Layer Problems +- Hard-coding field names without fallbacks +- Not handling missing or null values +- Inconsistent data transformation +- Missing validation of provider data + +## Documentation Requirements + +### Provider Documentation +- Document which database fields each provider populates +- Explain field mapping rationale and fallback logic +- Include examples of successful imports +- Document known limitations or edge cases + +### Database Schema Documentation +- Explain purpose of both `orderId` and `processorOrderId` fields +- Document which providers use which fields +- Explain fallback logic in service layer +- Keep migration notes for field changes + +## Validation Checklist + +Before deploying provider changes: +- [ ] Import stores data in compatible database fields +- [ ] Service layer includes fallback logic for display +- [ ] Admin UI shows correct data for all providers +- [ ] Backward compatibility maintained +- [ ] Debug scripts created and tested +- [ ] Documentation updated +- [ ] Integration tests pass + +## Reference Implementation + +See [src/server/providers/polar-provider.ts](mdc:src/server/providers/polar-provider.ts) lines 290-295 for correct field mapping during import. + +See [src/server/services/payment-service.ts](mdc:src/server/services/payment-service.ts) line 909 for correct fallback logic in service layer. + +## Lessons from Polar Integration + +The Polar payment integration revealed critical field mapping issues: +- Product names extracted correctly but order IDs missing in UI +- Root cause: Field stored in `processorOrderId` but UI read from `orderId` +- Solution: Store in both fields + add fallback logic +- Result: Backward compatible fix that works for all providers + +This pattern should be applied to all future provider integrations to ensure data consistency and proper UI display. diff --git a/.cursor/rules/payments.mdc b/.cursor/rules/payments.mdc new file mode 100644 index 0000000..83cca1d --- /dev/null +++ b/.cursor/rules/payments.mdc @@ -0,0 +1,115 @@ +--- +description: +globs: +alwaysApply: false +--- +# Payment Processing Best Practices + +## Payment Security +- Always use HTTPS for all payment-related endpoints +- Implement proper webhook signature verification for all payment providers +- Never store sensitive payment data (use tokens and references) +- Encrypt payment-related data at rest and in transit +- Implement proper access controls for payment endpoints +- Regular security audits for payment processing systems +- Follow PCI DSS compliance requirements + +## Webhook Implementation +- Verify webhook signatures using timing-safe comparison +- Implement idempotency for all payment webhook events +- Use database transactions for payment state changes +- Handle webhook retries from payment providers gracefully +- Log payment webhook events (excluding sensitive data) +- Monitor webhook success rates and alert on failures +- Test webhook endpoints with payment provider sandbox environments + +## Payment State Management +- Use database transactions for payment status updates +- Implement proper state machines for payment flows +- Handle edge cases like partial payments and refunds +- Synchronize payment data between local database and payment provider +- Implement proper rollback mechanisms for failed payments +- Track payment history and audit trails +- Handle subscription lifecycle events properly + +## Error Handling +- Return appropriate HTTP status codes for webhook responses +- Implement proper error logging for payment failures +- Never expose internal payment errors to end users +- Implement graceful degradation for payment system outages +- Handle payment provider API rate limits +- Implement retry logic with exponential backoff +- Monitor payment error rates and patterns + +## Lemon Squeezy Integration +- Always verify X-Signature header using HMAC-SHA256 +- Support critical webhook events: subscription_created, subscription_updated, subscription_cancelled, order_created, order_refunded +- Handle both test and production webhook environments +- Implement proper customer and subscription synchronization +- Use Lemon Squeezy checkout URLs for secure payment processing +- Handle Lemon Squeezy specific payment methods and currencies +- Test integration thoroughly in sandbox environment + +## Stripe Integration +- Verify webhook signatures using Stripe's signature verification +- Handle Stripe webhook events: payment_intent, subscription, invoice events +- Use Stripe's idempotency keys for safe retries +- Implement proper handling of 3D Secure and payment confirmations +- Handle Stripe's webhook delivery guarantees and retries +- Use Stripe's test mode for development and testing +- Implement proper handling of disputed payments and chargebacks + +## Data Privacy +- Follow GDPR requirements for customer payment data +- Implement proper data retention policies for payment information +- Anonymize or delete customer data upon request +- Never log credit card numbers or sensitive payment details +- Implement proper access controls for customer payment data +- Regular audits of payment data handling procedures +- Document data processing procedures for compliance + +## Testing +- Test payment flows in sandbox/test environments +- Implement unit tests for payment webhook processing +- Test payment failure scenarios and edge cases +- Validate payment provider integration using test cards/accounts +- Test webhook signature verification with invalid signatures +- Implement integration tests for complete payment flows +- Regular penetration testing of payment endpoints + +## Monitoring +- Monitor payment success/failure rates +- Track payment processing times and performance +- Set up alerts for payment system anomalies +- Monitor webhook delivery success rates +- Track customer payment experience metrics +- Implement dashboards for payment system health +- Regular review of payment processing logs + +## Compliance +- Implement PCI DSS compliance requirements +- Follow payment provider compliance guidelines +- Document payment processing procedures +- Regular compliance audits and assessments +- Implement audit trails for all payment actions +- Train team members on payment security requirements +- Keep compliance documentation up to date + +## Customer Experience +- Provide clear payment status feedback to customers +- Implement proper loading states during payment processing +- Handle payment failures gracefully with clear error messages +- Provide payment history and receipt functionality +- Implement proper payment confirmation flows +- Support multiple payment methods when possible +- Optimize payment flows for conversion + +## Documentation +- Document all payment webhook endpoints and events +- Maintain payment integration guides and procedures +- Document payment error codes and troubleshooting steps +- Keep payment provider integration documentation updated +- Document payment testing procedures and test cases +- Maintain compliance documentation and procedures +- Regular review and updates of payment documentation + diff --git a/.cursor/rules/performance.mdc b/.cursor/rules/performance.mdc new file mode 100644 index 0000000..cf83400 --- /dev/null +++ b/.cursor/rules/performance.mdc @@ -0,0 +1,96 @@ +--- +description: Performance Best Practices and Guidelines +globs: *.ts, *.tsx, *.js, *.jsx, src/**/* +--- + +# Performance Best Practices + +## Core Web Vitals +- Optimize LCP (Largest Contentful Paint) +- Minimize FID (First Input Delay) +- Reduce CLS (Cumulative Layout Shift) +- Monitor Web Vitals +- Set performance budgets +- Regular performance audits +- Document metrics + +## Image Optimization +- Use next/image +- Implement lazy loading +- Choose proper formats +- Optimize image sizes +- Use responsive images +- Implement caching +- Monitor image performance + +## JavaScript +- Implement code splitting +- Use dynamic imports +- Minimize bundle size +- Tree shake unused code +- Optimize dependencies +- Use proper caching +- Monitor JS performance + +## CSS +- Minimize unused CSS +- Use CSS modules +- Implement critical CSS +- Optimize tailwind usage +- Reduce CSS bundle size +- Use proper specificity +- Monitor CSS performance + +## Server-Side +- Use proper caching +- Optimize database queries +- Implement CDN +- Use edge functions +- Optimize API responses +- Monitor server performance +- Regular optimization + +## Client-Side +- Implement proper caching +- Use service workers +- Optimize rendering +- Reduce reflows/repaints +- Handle offline support +- Monitor client performance +- Regular optimization + +## Data Loading +- Use proper fetching +- Implement caching +- Handle loading states +- Optimize payload size +- Use proper protocols +- Monitor data loading +- Regular optimization + +## Build Optimization +- Optimize build process +- Minimize asset size +- Use proper compression +- Implement caching +- Monitor build metrics +- Regular build audits +- Document optimizations + +## Monitoring +- Use performance monitoring +- Track key metrics +- Set up alerts +- Monitor trends +- Regular analysis +- Document findings +- Take action on insights + +## Documentation +- Document performance goals +- Track improvements +- Document optimizations +- Keep metrics history +- Regular reviews +- Share best practices +- Maintain documentation diff --git a/.cursor/rules/project.mdc b/.cursor/rules/project.mdc new file mode 100644 index 0000000..de1b6dd --- /dev/null +++ b/.cursor/rules/project.mdc @@ -0,0 +1,192 @@ +--- +description: Project structure, tech stack, and rules for interacting with the project +globs: "*" +alwaysApply: false +--- + +# Project Overview + +This is a Next.js project using: +- App Router +- Shadcn/UI +- Tailwind CSS +- Resend +- Builder.io +- Payload CMS 3 +- NextAuth/AuthJS@v5 +- TypeScript +- Bun + +## Multi-Zone Architecture Support + +Shipkit supports multi-zone deployment for scalable applications: + +### Zone Configuration +- Main app serves core functionality (marketing, dashboard, auth) +- Secondary zones can be deployed for specialized content: + - `/docs/*` - Documentation and guides + - `/blog/*` - Blog and articles + - `/ui/*` - UI component library showcase + - `/tools/*` - Developer tools and utilities + +### Zone Implementation Patterns +- Each zone uses a full Shipkit installation for consistency +- Zones are configured with `basePath` and `assetPrefix` +- Navigation between zones uses anchor tags (``) instead of Next.js `` +- Shared authentication and design system across zones +- Environment variables control zone routing and deployment + +### Zone Development +```bash +# Clone Shipkit for each zone +git clone https://github.com/lacymorrow/shipkit.git shipkit-docs +git clone https://github.com/lacymorrow/shipkit.git shipkit-blog +git clone https://github.com/lacymorrow/shipkit.git shipkit-ui +git clone https://github.com/lacymorrow/shipkit.git shipkit-tools +``` + +## Directory Structure +``` +src/ +├── app/ # Next.js app router pages +│ ├── (app)/ # Main application routes +│ ├── (authentication)/ # Auth pages (sign-in, sign-up, etc.) +│ ├── (dashboard)/ # Protected dashboard routes +│ ├── (demo)/ # Demo and example pages +│ ├── (integrations)/ # Third-party integrations +│ ├── (landing)/ # Marketing pages +│ ├── (legal)/ # Legal pages (privacy, terms) +│ ├── (shipkit)/ # Shipkit-specific pages +│ └── api/ # API routes +├── components/ # Reusable UI components +│ ├── ui/ # Shadcn/UI components +│ ├── primitives/ # Base primitive components +│ ├── blocks/ # Larger composed components +│ └── layouts/ # Layout components +├── lib/ # Utility functions and shared code +│ ├── utils/ # General utilities +│ ├── schemas/ # Validation schemas +│ ├── payload/ # Payload CMS configuration +│ └── trpc/ # tRPC configuration +├── server/ # Server-side code +│ ├── actions/ # Server actions +│ ├── services/ # Business logic and data access +│ └── db/ # Database configuration +├── content/ # MDX and static content +│ ├── docs/ # Documentation content +│ ├── blog/ # Blog content +│ ├── faq/ # FAQ content +│ └── features/ # Feature descriptions +├── styles/ # Global styles and Tailwind config +└── types/ # TypeScript type definitions +``` + +## File Naming Conventions +- Use `kebab-case` for directories and files +- Use `.tsx` for React components +- Use `.ts` for TypeScript files +- Use `.test.tsx` for test files +- Use `.css` for style files +- Use `.mdx` for documentation +- Use `.mdc` for rule documentation + +## Component Structure +- One component per file +- Export as named export (prefer arrow functions) +- Use TypeScript interfaces for props +- Keep components focused and small (under 500-700 lines) +- Follow atomic design principles +- Use descriptive, explicit variable names + +## Code Organization Principles +- Make files small and discrete (under 500 lines when possible) +- Write concise, technical TypeScript code with accurate examples +- Use functional and declarative programming patterns; avoid classes +- Prefer iteration and modularization over code duplication +- Structure files: exported component, subcomponents, helpers, static content, types +- Use the simplest solution first and only add complexity when necessary + +## State Management Best Practices +- Minimize 'use client', 'useEffect', and 'setState' +- Favor React Server Components (RSC) +- Use functional components with TypeScript interfaces +- Avoid circular dependencies between state variables +- Don't update state directly inside useEffect without dependencies +- Use unidirectional data flow: parent state flowing to children +- For complex state transitions, implement dedicated functions rather than direct setters + +## Navigation Patterns +- Prefer declarative links over imperative navigation (`router.push`) +- Use `src/components/primitives/link-with-transition` instead of `next/link` +- For styled links that look like buttons, use: `` +- Only use router navigation for complex scenarios (form submissions with redirects) +- For multi-zone navigation, use anchor tags (``) between zones + +## API Structure and Patterns +- RESTful endpoints in `app/api` +- Server actions in `server/actions` +- Services in `server/services` +- Type definitions in `types` +- Environment variables in `.env` +- Never use server actions to fetch data (use Server Components) +- Server actions should call services for server-side operations + +## Performance Optimization +- Wrap client components in Suspense with fallback +- Use dynamic loading for non-critical components +- Optimize images: use WebP format, include size data, implement lazy loading +- Use 'nuqs' for URL search parameter state management +- Optimize Web Vitals (LCP, CLS, FID) + +## Error Handling and Validation +- Implement robust error handling for database operations and API requests +- Validate input data to prevent runtime errors +- Add comments to explain complex logic or important decisions +- Use TypeScript's type system to enforce correct data structures +- Handle potential undefined values appropriately + +## Testing Strategy +- Jest for unit tests +- React Testing Library for components +- Playwright for E2E tests +- Vitest for modern testing +- Component documentation in Storybook +- Test coverage for new features + +## Documentation Standards +- README.md in root directory +- Component documentation in stories +- API documentation with OpenAPI +- Type documentation with TSDoc +- Inline comments for complex logic +- Rule documentation in `.cursor/rules/` + +## Dependencies Management +- Use exact versions in package.json +- Keep dependencies up to date +- Minimize bundle size +- Use peer dependencies appropriately +- Document breaking changes +- Use Bun as package manager + +## Development Workflow +- Use feature branches for development +- Write meaningful commit messages +- Review code before merging +- Run tests before pushing +- Keep main branch stable +- Follow semantic versioning + +## Environment Configuration +- Use environment variables for feature flags +- Support graceful degradation when services unavailable +- Configure separate environments for zones +- Use `.env.local` for development +- Set production variables in deployment platform + +## Multi-Zone Deployment +- Deploy each zone as separate Vercel project +- Configure environment variables for zone domains +- Use consistent naming: `shipkit-docs`, `shipkit-blog`, etc. +- Maintain shared authentication across zones +- Test cross-zone navigation thoroughly diff --git a/.cursor/rules/react.mdc b/.cursor/rules/react.mdc new file mode 100644 index 0000000..3a9c7ce --- /dev/null +++ b/.cursor/rules/react.mdc @@ -0,0 +1,17 @@ +--- +description: React.js Best Practices +globs: *.js, *.jsx, *.ts, *.tsx, +--- + +# React Best Practices + +Use functional components and hooks for state management. +Ensure components are reusable and maintainable. +Prefer React Server Components for fetching data. +Prefer server actions over API requests for mutating data. +Maintain a separation of concerns between client and server components. + +Prefer arrow functions for React components: +✅ export const Component = () => { ... } +❌ export function Component() { ... } +❌ export default function Component() { ... } \ No newline at end of file diff --git a/.cursor/rules/security.mdc b/.cursor/rules/security.mdc new file mode 100644 index 0000000..62ab215 --- /dev/null +++ b/.cursor/rules/security.mdc @@ -0,0 +1,109 @@ +--- +description: Security Best Practices and Guidelines +globs: *.ts, *.tsx, src/server/*, src/lib/auth/*, middleware.* +alwaysApply: false +--- + +# Security Best Practices + +## Authentication +- Implement proper auth flow +- Use secure sessions +- Handle password security +- Implement MFA +- Manage JWT securely +- Monitor auth attempts +- Regular security audits + +## Authorization +- Implement role-based access +- Use proper middleware +- Validate permissions +- Handle edge cases +- Monitor access patterns +- Regular access reviews +- Document policies + +## Data Protection +- Encrypt sensitive data +- Use proper hashing +- Implement data masking +- Handle PII properly +- Regular security scans +- Monitor data access +- Document procedures + +## API Security +- Validate all inputs +- Use rate limiting +- Implement CORS properly +- Use proper headers +- Monitor API usage +- Regular security tests +- Document endpoints + +## Webhook Security +- ALWAYS verify webhook signatures using timing-safe comparison +- Never accept webhooks without proper signature validation +- Use environment variables for webhook secrets +- Implement idempotency using unique event identifiers +- Validate all webhook payload fields before processing +- Never log sensitive data from webhook payloads +- Return proper HTTP status codes for webhook responses +- Implement rate limiting per webhook source +- Process webhooks asynchronously when possible +- Use database transactions for webhook data consistency + +## Frontend Security +- Prevent XSS attacks +- Handle CSRF properly +- Secure form submissions +- Validate client input +- Monitor client security +- Regular security audits +- Document measures + +## Backend Security +- Validate all inputs +- Use proper sanitization +- Handle file uploads securely +- Implement proper logging +- Monitor server security +- Regular security scans +- Document procedures + +## Infrastructure +- Use proper firewalls +- Implement WAF +- Configure security groups +- Monitor infrastructure +- Regular security audits +- Document architecture +- Plan for incidents + +## Compliance +- Follow security standards +- Implement compliance +- Regular audits +- Document procedures +- Monitor compliance +- Keep records +- Regular reviews + +## Incident Response +- Have response plan +- Document procedures +- Regular drills +- Monitor incidents +- Have recovery plan +- Document lessons +- Regular updates + +## Documentation +- Document security measures +- Keep procedures updated +- Document incidents +- Regular reviews +- Share best practices +- Monitor changes +- Maintain records diff --git a/.cursor/rules/server-actions-patterns.mdc b/.cursor/rules/server-actions-patterns.mdc new file mode 100644 index 0000000..2e4a372 --- /dev/null +++ b/.cursor/rules/server-actions-patterns.mdc @@ -0,0 +1,213 @@ +--- +description: +globs: +alwaysApply: false +--- +# Server Actions & Services Patterns + +## Next.js "use server" Restrictions + +### Critical Rule: Only Async Functions in "use server" Files +❌ **Never export classes from "use server" files** +```typescript +// This will cause build errors +"use server"; +export class MyService { // ❌ BREAKS + static async method() {} +} +``` + +✅ **Always use individual exported async functions** +```typescript +// This works correctly +"use server"; +export async function myServiceMethod() { // ✅ WORKS + // implementation +} +``` + +## Service Layer Pattern + +### Function Naming Convention +Use descriptive function names that indicate the entity and action: +```typescript +// Waitlist service functions +addWaitlistEntry() +getWaitlistEntry() +updateWaitlistEntry() +deleteWaitlistEntry() + +// User service functions +addUserAccount() +getUserProfile() +updateUserSettings() +``` + +### Database Null Checks +Always validate database connection in service functions: +```typescript +export async function serviceFunction() { + if (!db) { + throw new Error("Database not initialized"); + } + // proceed with database operations +} +``` + +## Server Actions Pattern + +### Form Handling Actions +Server actions should handle the complete user flow: +```typescript +export async function handleFormSubmission(formData: FormData) { + try { + // 1. Validate input + // 2. Check business rules (duplicates, etc.) + // 3. Call service functions + // 4. Handle side effects (emails, etc.) + // 5. Return success/error response + return { success: true }; + } catch (error) { + return { success: false, error: error.message }; + } +} +``` + +### Error Handling Pattern +Always provide user-friendly error responses: +```typescript +} catch (error: unknown) { + if (error instanceof Error) { + console.error("Specific error:", error.message); + return { success: false, error: error.message }; + } + console.error("Unknown error:", error); + return { success: false, error: "An unknown error occurred" }; +} +``` + +## Import/Export Patterns + +### Service Function Imports +Import specific functions, optionally with aliases: +```typescript +import { + addWaitlistEntry, + isEmailOnWaitlist, + getWaitlistStats as getStats +} from "@/server/services/waitlist-service"; +``` + +### Avoid Default Exports +Use named exports for better TypeScript support: +```typescript +// ✅ Good +export async function myFunction() {} + +// ❌ Avoid +export default async function() {} +``` + +## Database Operation Patterns + +### Single Responsibility Functions +Each service function should have one clear purpose: +```typescript +// ✅ Good - single responsibility +export async function addWaitlistEntry(data: NewEntry) {} +export async function isEmailOnWaitlist(email: string) {} + +// ❌ Avoid - multiple responsibilities +export async function handleWaitlistOperations(action: string, data: any) {} +``` + +### Proper Return Types +Use specific TypeScript return types: +```typescript +export async function getWaitlistStats(): Promise<{ + total: number; + notified: number; + pending: number; +}> { + // implementation +} +``` + +## Testing Service Functions + +### Interface Testing +Test that functions exist and have correct signatures: +```typescript +import { myServiceFunction } from "@/server/services/my-service"; + +it("should have correct interface", () => { + expect(typeof myServiceFunction).toBe("function"); +}); +``` + +### Mock Database for Tests +Use dependency injection or mocking for unit tests: +```typescript +// Mock the database for testing +vi.mock("@/server/db", () => ({ + db: mockDb +})); +``` + +## File Organization + +### Service Files Structure +``` +src/server/ +├── services/ # Business logic functions +│ ├── user-service.ts +│ ├── waitlist-service.ts +│ └── email-service.ts +├── actions/ # Form handling and UI interactions +│ ├── user-actions.ts +│ └── waitlist-actions.ts +└── db/ # Database configuration + ├── schema.ts + └── index.ts +``` + +### Function Exports +Keep related functions in the same file: +```typescript +// waitlist-service.ts +export async function addWaitlistEntry() {} +export async function getWaitlistEntry() {} +export async function updateWaitlistEntry() {} +export async function deleteWaitlistEntry() {} +``` + +## Performance Patterns + +### Parallel Database Operations +Use Promise.all for independent queries: +```typescript +export async function getWaitlistDashboardData() { + const [stats, entries] = await Promise.all([ + getWaitlistStats(), + getWaitlistEntries({ limit: 100 }) + ]); + return { stats, entries }; +} +``` + +### Pagination Support +Include pagination options in list functions: +```typescript +export async function getWaitlistEntries( + options: { + limit?: number; + offset?: number; + orderBy?: "asc" | "desc"; + } = {} +): Promise { + const { limit = 50, offset = 0, orderBy = "desc" } = options; + // implementation +} +``` + +This pattern ensures service functions are properly structured for Next.js App Router and provides clear separation of concerns between data access and user interface logic. diff --git a/.cursor/rules/single-responsibility-principle.mdc b/.cursor/rules/single-responsibility-principle.mdc new file mode 100644 index 0000000..2d086b5 --- /dev/null +++ b/.cursor/rules/single-responsibility-principle.mdc @@ -0,0 +1,109 @@ +--- +description: +globs: +alwaysApply: false +--- +# Single Responsibility Principle (SRP) + +## Definition +The Single Responsibility Principle states that "A module should be responsible to one, and only one, actor." In simpler terms, a class or function should have only one reason to change. + +## Core Principles +- Each module has one clearly defined purpose +- Services handle only their domain concerns +- User creation belongs only in UserService +- Payment processing belongs only in PaymentService +- Side effects are managed by their respective services +- Cross-cutting concerns use service composition + +## Signs of SRP Violations +- Methods doing multiple unrelated things +- Large service files (>300 lines) +- Services creating/managing entities outside their domain +- Duplicated logic across services +- A change in one feature requires changes in multiple services +- Conditional logic based on entity types +- Direct database access across domain boundaries + +## Best Practices + +### Service Organization +- Services should only manipulate their own domain entities +- UserService should be the ONLY place creating/updating users +- TeamService should be the ONLY place creating/updating teams +- PaymentService should NEVER create users directly +- Use service composition instead of cross-domain logic + +### Correct Patterns +```typescript +// ✅ Good: PaymentService calls UserService for user creation +const user = await userService.ensureUserExists({ + email: payment.email, + name: payment.name +}); +await this.linkPaymentToUser(payment, user.id); + +// ✅ Good: UserService manages all user creation side effects +async createUser(data) { + const user = await this.insert(data); + await this.createPersonalTeam(user.id); + await this.createDefaultApiKey(user.id); + return user; +} +``` + +### Incorrect Patterns +```typescript +// ❌ Bad: PaymentService creating users directly +const [user] = await db.insert(users).values({ + email: payment.email, + name: payment.name +}).returning(); + +// ❌ Bad: Duplicated team creation logic +await db.insert(teams).values({ + id: randomUUID(), + name: "Personal", + userId: user.id +}); +``` + +## Refactoring Guidelines + +### When to Refactor +- When you find duplicated business logic +- When a service needs to know too much about another domain +- When adding a feature requires changes to multiple services +- When a service file exceeds 300 lines + +### How to Refactor +1. Identify the "actor" responsible for each piece of functionality +2. Move methods to their appropriate service +3. Replace direct operations with service calls +4. Update tests to reflect new structure +5. Document service boundaries + +## Examples + +### User Creation +- UserService: responsible for creating users and all related side effects +- PaymentService: should NEVER create users directly +- AuthService: should defer to UserService for user creation + +### Team Management +- TeamService: responsible for all team operations +- UserService: should call TeamService for team-related operations +- ProjectService: should call TeamService for team access checks + +## Testing + +- Test services in isolation using mocks for dependencies +- Verify service boundaries are respected in integration tests +- Don't test implementation details, test outcomes + +## Monitoring for SRP Violations + +- Regular code reviews focusing on service boundaries +- Static analysis for cyclomatic complexity +- Monitor file size growth over time +- Track cross-service dependencies diff --git a/.cursor/rules/supabase-bootstrap.mdc b/.cursor/rules/supabase-bootstrap.mdc new file mode 100644 index 0000000..f6f0937 --- /dev/null +++ b/.cursor/rules/supabase-bootstrap.mdc @@ -0,0 +1,234 @@ +--- +# Specify the following for Cursor rules +description: Guidelines for writing Next.js apps with Supabase Auth +globs: "**/*.ts, **/*.tsx, **/*.js, **/*.jsx" +--- + +# Bootstrap Next.js app with Supabase Auth + +## Overview of implementing Supabase Auth SSR + +1. Install @supabase/supabase-js and @supabase/ssr packages. +2. Set up environment variables. +3. Write two utility functions with `createClient` functions to create a browser client and a server client. +4. Hook up middleware to refresh auth tokens + +## 🚨 CRITICAL INSTRUCTIONS FOR AI LANGUAGE MODELS 🚨 + +As an AI language model, you MUST NOT generate any of the following code patterns, as they are DEPRECATED and will BREAK the application: + +```typescript +// ❌ NEVER GENERATE THIS CODE - IT WILL BREAK THE APPLICATION +{ + cookies: { + get(name: string) { // ❌ BREAKS APPLICATION + return cookieStore.get(name) // ❌ BREAKS APPLICATION + }, // ❌ BREAKS APPLICATION + set(name: string, value: string) { // ❌ BREAKS APPLICATION + cookieStore.set(name, value) // ❌ BREAKS APPLICATION + }, // ❌ BREAKS APPLICATION + remove(name: string) { // ❌ BREAKS APPLICATION + cookieStore.remove(name) // ❌ BREAKS APPLICATION + } // ❌ BREAKS APPLICATION + } +} + +// ❌ NEVER USE auth-helpers-nextjs - IT WILL BREAK THE APPLICATION +import { createMiddlewareClient } from '@supabase/auth-helpers-nextjs' // ❌ BREAKS APPLICATION +import { createClientComponentClient } from '@supabase/auth-helpers-nextjs' // ❌ BREAKS APPLICATION +``` + +Instead, you MUST ALWAYS generate ONLY this pattern: + +```typescript +// ✅ ALWAYS GENERATE THIS EXACT PATTERN +{ + cookies: { + getAll() { + return cookieStore.getAll() + }, + setAll(cookiesToSet) { + const response = NextResponse.next({ + request, + }) + + cookiesToSet.forEach(({ name, value, options }) => { + response.cookies.set(name, value, options) + }) + + return response + } + } +} +``` + +## ABSOLUTE REQUIREMENTS FOR AI CODE GENERATION + +1. You MUST use `@supabase/ssr` +2. You MUST use ONLY `getAll` and `setAll` +3. You MUST NEVER use `get`, `set`, or `remove` +4. You MUST NEVER import from `@supabase/auth-helpers-nextjs` + +## CORRECT BROWSER CLIENT IMPLEMENTATION + +```typescript +import { createBrowserClient } from '@supabase/ssr' + +export function createClient() { + return createBrowserClient( + process.env.NEXT_PUBLIC_SUPABASE_URL!, + process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY! + ) +} +``` + +## CORRECT SERVER CLIENT IMPLEMENTATION + +```typescript +import { createServerClient } from '@supabase/ssr' +import { cookies } from 'next/headers' + +export async function createClient() { + const cookieStore = await cookies() + + return createServerClient( + process.env.NEXT_PUBLIC_SUPABASE_URL!, + process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!, + { + cookies: { + getAll() { + return cookieStore.getAll() + }, + setAll(cookiesToSet) { + try { + cookiesToSet.forEach(({ name, value, options }) => + cookieStore.set(name, value, options) + ) + } catch { + // The `setAll` method was called from a Server Component. + // This can be ignored if you have middleware refreshing + // user sessions. + } + }, + }, + } + ) +} +``` + +## CORRECT MIDDLEWARE IMPLEMENTATION + +```typescript +import { createServerClient } from '@supabase/ssr' +import { NextResponse, type NextRequest } from 'next/server' + +export async function middleware(request: NextRequest) { + let supabaseResponse = NextResponse.next({ + request, + }) + + const supabase = createServerClient( + process.env.NEXT_PUBLIC_SUPABASE_URL!, + process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!, + { + cookies: { + getAll() { + return request.cookies.getAll() + }, + setAll(cookiesToSet) { + cookiesToSet.forEach(({ name, value, options }) => request.cookies.set(name, value)) + supabaseResponse = NextResponse.next({ + request, + }) + cookiesToSet.forEach(({ name, value, options }) => + supabaseResponse.cookies.set(name, value, options) + ) + }, + }, + } + ) + + // Do not run code between createServerClient and + // supabase.auth.getUser(). A simple mistake could make it very hard to debug + // issues with users being randomly logged out. + + // IMPORTANT: DO NOT REMOVE auth.getUser() + + const { + data: { user }, + } = await supabase.auth.getUser() + + if ( + !user && + !request.nextUrl.pathname.startsWith('/login') && + !request.nextUrl.pathname.startsWith('/auth') + ) { + // no user, potentially respond by redirecting the user to the login page + const url = request.nextUrl.clone() + url.pathname = '/login' + return NextResponse.redirect(url) + } + + // IMPORTANT: You *must* return the supabaseResponse object as it is. + // If you're creating a new response object with NextResponse.next() make sure to: + // 1. Pass the request in it, like so: + // const myNewResponse = NextResponse.next({ request }) + // 2. Copy over the cookies, like so: + // myNewResponse.cookies.setAll(supabaseResponse.cookies.getAll()) + // 3. Change the myNewResponse object to fit your needs, but avoid changing + // the cookies! + // 4. Finally: + // return myNewResponse + // If this is not done, you may be causing the browser and server to go out + // of sync and terminate the user's session prematurely! + + return supabaseResponse +} + +export const config = { + matcher: [ + /* + * Match all request paths except for the ones starting with: + * - _next/static (static files) + * - _next/image (image optimization files) + * - favicon.ico (favicon file) + * Feel free to modify this pattern to include more paths. + */ + '/((?!_next/static|_next/image|favicon.ico|.*\\.(?:svg|png|jpg|jpeg|gif|webp)$).*)', + ], +} +``` + +## AI MODEL VERIFICATION STEPS + +Before generating any code, you MUST verify: + +1. Are you using ONLY `getAll` and `setAll`? If not, STOP and FIX. +2. Are you importing from `@supabase/ssr`? If not, STOP and FIX. +3. Do you see ANY instance of `get`, `set`, or `remove`? If yes, STOP and FIX. +4. Are you importing from `auth-helpers-nextjs`? If yes, STOP and FIX. + +## CONSEQUENCES OF INCORRECT IMPLEMENTATION + +If you generate code using: + +- Individual cookie methods (`get`/`set`/`remove`) +- `auth-helpers-nextjs` package + +The implementation will: + +1. Break in production +2. Fail to maintain session state +3. Cause authentication loops +4. Result in security vulnerabilities + +## AI MODEL RESPONSE TEMPLATE + +When asked about Supabase Auth SSR implementation, you MUST: + +1. ONLY use code from this guide +2. NEVER suggest deprecated approaches +3. ALWAYS use the exact cookie handling shown above +4. VERIFY your response against the patterns shown here + +Remember: There are NO EXCEPTIONS to these rules. diff --git a/.cursor/rules/supabase-db-create-functions.mdc b/.cursor/rules/supabase-db-create-functions.mdc new file mode 100644 index 0000000..81df2d6 --- /dev/null +++ b/.cursor/rules/supabase-db-create-functions.mdc @@ -0,0 +1,136 @@ +--- +# Specify the following for Cursor rules +description: Guidelines for writing Supabase database functions +globs: "**/*.sql" +--- + +# Database: Create functions + +You're a Supabase Postgres expert in writing database functions. Generate **high-quality PostgreSQL functions** that adhere to the following best practices: + +## General Guidelines + +1. **Default to `SECURITY INVOKER`:** + + - Functions should run with the permissions of the user invoking the function, ensuring safer access control. + - Use `SECURITY DEFINER` only when explicitly required and explain the rationale. + +2. **Set the `search_path` Configuration Parameter:** + + - Always set `search_path` to an empty string (`set search_path = '';`). + - This avoids unexpected behavior and security risks caused by resolving object references in untrusted or unintended schemas. + - Use fully qualified names (e.g., `schema_name.table_name`) for all database objects referenced within the function. + +3. **Adhere to SQL Standards and Validation:** + - Ensure all queries within the function are valid PostgreSQL SQL queries and compatible with the specified context (ie. Supabase). + +## Best Practices + +1. **Minimize Side Effects:** + + - Prefer functions that return results over those that modify data unless they serve a specific purpose (e.g., triggers). + +2. **Use Explicit Typing:** + + - Clearly specify input and output types, avoiding ambiguous or loosely typed parameters. + +3. **Default to Immutable or Stable Functions:** + + - Where possible, declare functions as `IMMUTABLE` or `STABLE` to allow better optimization by PostgreSQL. Use `VOLATILE` only if the function modifies data or has side effects. + +4. **Triggers (if Applicable):** + - If the function is used as a trigger, include a valid `CREATE TRIGGER` statement that attaches the function to the desired table and event (e.g., `BEFORE INSERT`). + +## Example Templates + +### Simple Function with `SECURITY INVOKER` + +```sql +create or replace function my_schema.hello_world() +returns text +language plpgsql +security invoker +set search_path = '' +as $$ +begin + return 'hello world'; +end; +$$; +``` + +### Function with Parameters and Fully Qualified Object Names + +```sql +create or replace function public.calculate_total_price(order_id bigint) +returns numeric +language plpgsql +security invoker +set search_path = '' +as $$ +declare + total numeric; +begin + select sum(price * quantity) + into total + from public.order_items + where order_id = calculate_total_price.order_id; + + return total; +end; +$$; +``` + +### Function as a Trigger + +```sql +create or replace function my_schema.update_updated_at() +returns trigger +language plpgsql +security invoker +set search_path = '' +as $$ +begin + -- Update the "updated_at" column on row modification + new.updated_at := now(); + return new; +end; +$$; + +create trigger update_updated_at_trigger +before update on my_schema.my_table +for each row +execute function my_schema.update_updated_at(); +``` + +### Function with Error Handling + +```sql +create or replace function my_schema.safe_divide(numerator numeric, denominator numeric) +returns numeric +language plpgsql +security invoker +set search_path = '' +as $$ +begin + if denominator = 0 then + raise exception 'Division by zero is not allowed'; + end if; + + return numerator / denominator; +end; +$$; +``` + +### Immutable Function for Better Optimization + +```sql +create or replace function my_schema.full_name(first_name text, last_name text) +returns text +language sql +security invoker +set search_path = '' +immutable +as $$ + select first_name || ' ' || last_name; +$$; +``` diff --git a/.cursor/rules/supabase-db-create-migrations.mdc b/.cursor/rules/supabase-db-create-migrations.mdc new file mode 100644 index 0000000..e9b27fe --- /dev/null +++ b/.cursor/rules/supabase-db-create-migrations.mdc @@ -0,0 +1,55 @@ +--- +description: Guidelines for writing Postgres SQL +globs: "**/*.sql" +--- +--- +# Specify the following for Cursor rules +description: Guidelines for writing Postgres migrations +globs: "supabase/migrations/**/*.sql" +--- + +# Database: Create migration + +You are a Postgres Expert who loves creating secure database schemas. + +This project uses the migrations provided by the Supabase CLI. + +## Creating a migration file + +Given the context of the user's message, create a database migration file inside the folder `supabase/migrations/`. + +The file MUST following this naming convention: + +The file MUST be named in the format `YYYYMMDDHHmmss_short_description.sql` with proper casing for months, minutes, and seconds in UTC time: + +1. `YYYY` - Four digits for the year (e.g., `2024`). +2. `MM` - Two digits for the month (01 to 12). +3. `DD` - Two digits for the day of the month (01 to 31). +4. `HH` - Two digits for the hour in 24-hour format (00 to 23). +5. `mm` - Two digits for the minute (00 to 59). +6. `ss` - Two digits for the second (00 to 59). +7. Add an appropriate description for the migration. + +For example: + +``` +20240906123045_create_profiles.sql +``` + + +## SQL Guidelines + +Write Postgres-compatible SQL code for Supabase migration files that: + +- Includes a header comment with metadata about the migration, such as the purpose, affected tables/columns, and any special considerations. +- Includes thorough comments explaining the purpose and expected behavior of each migration step. +- Write all SQL in lowercase. +- Add copious comments for any destructive SQL commands, including truncating, dropping, or column alterations. +- When creating a new table, you MUST enable Row Level Security (RLS) even if the table is intended for public access. +- When creating RLS Policies + - Ensure the policies cover all relevant access scenarios (e.g. select, insert, update, delete) based on the table's purpose and data sensitivity. + - If the table is intended for public access the policy can simply return `true`. + - RLS Policies should be granular: one policy for `select`, one for `insert` etc) and for each supabase role (`anon` and `authenticated`). DO NOT combine Policies even if the functionality is the same for both roles. + - Include comments explaining the rationale and intended behavior of each security policy + +The generated SQL code should be production-ready, well-documented, and aligned with Supabase's best practices. diff --git a/.cursor/rules/supabase-db-rls.mdc b/.cursor/rules/supabase-db-rls.mdc new file mode 100644 index 0000000..d98405c --- /dev/null +++ b/.cursor/rules/supabase-db-rls.mdc @@ -0,0 +1,248 @@ +--- +description: Guidelines for writing Postgres Row Level Security policies +globs: "**/*.sql" +--- + +# Database: Create RLS policies + +You're a Supabase Postgres expert in writing row level security policies. Your purpose is to generate a policy with the constraints given by the user. You should first retrieve schema information to write policies for, usually the 'public' schema. + +The output should use the following instructions: + +- The generated SQL must be valid SQL. +- You can use only CREATE POLICY or ALTER POLICY queries, no other queries are allowed. +- Always use double apostrophe in SQL strings (eg. 'Night''s watch') +- You can add short explanations to your messages. +- The result should be a valid markdown. The SQL code should be wrapped in ``` (including sql language tag). +- Always use "auth.uid()" instead of "current_user". +- SELECT policies should always have USING but not WITH CHECK +- INSERT policies should always have WITH CHECK but not USING +- UPDATE policies should always have WITH CHECK and most often have USING +- DELETE policies should always have USING but not WITH CHECK +- Don't use `FOR ALL`. Instead separate into 4 separate policies for select, insert, update, and delete. +- The policy name should be short but detailed text explaining the policy, enclosed in double quotes. +- Always put explanations as separate text. Never use inline SQL comments. +- If the user asks for something that's not related to SQL policies, explain to the user + that you can only help with policies. +- Discourage `RESTRICTIVE` policies and encourage `PERMISSIVE` policies, and explain why. + +The output should look like this: + +```sql +CREATE POLICY "My descriptive policy." ON books FOR INSERT to authenticated USING ( (select auth.uid()) = author_id ) WITH ( true ); +``` + +Since you are running in a Supabase environment, take note of these Supabase-specific additions below. + +## Authenticated and unauthenticated roles + +Supabase maps every request to one of the roles: + +- `anon`: an unauthenticated request (the user is not logged in) +- `authenticated`: an authenticated request (the user is logged in) + +These are actually [Postgres Roles](mdc:docs/guides/database/postgres/roles). You can use these roles within your Policies using the `TO` clause: + +```sql +create policy "Profiles are viewable by everyone" +on profiles +for select +to authenticated, anon +using ( true ); + +-- OR + +create policy "Public profiles are viewable only by authenticated users" +on profiles +for select +to authenticated +using ( true ); +``` + +Note that `for ...` must be added after the table but before the roles. `to ...` must be added after `for ...`: + +### Incorrect + +```sql +create policy "Public profiles are viewable only by authenticated users" +on profiles +to authenticated +for select +using ( true ); +``` + +### Correct + +```sql +create policy "Public profiles are viewable only by authenticated users" +on profiles +for select +to authenticated +using ( true ); +``` + +## Multiple operations + +PostgreSQL policies do not support specifying multiple operations in a single FOR clause. You need to create separate policies for each operation. + +### Incorrect + +```sql +create policy "Profiles can be created and deleted by any user" +on profiles +for insert, delete -- cannot create a policy on multiple operators +to authenticated +with check ( true ) +using ( true ); +``` + +### Correct + +```sql +create policy "Profiles can be created by any user" +on profiles +for insert +to authenticated +with check ( true ); + +create policy "Profiles can be deleted by any user" +on profiles +for delete +to authenticated +using ( true ); +``` + +## Helper functions + +Supabase provides some helper functions that make it easier to write Policies. + +### `auth.uid()` + +Returns the ID of the user making the request. + +### `auth.jwt()` + +Returns the JWT of the user making the request. Anything that you store in the user's `raw_app_meta_data` column or the `raw_user_meta_data` column will be accessible using this function. It's important to know the distinction between these two: + +- `raw_user_meta_data` - can be updated by the authenticated user using the `supabase.auth.update()` function. It is not a good place to store authorization data. +- `raw_app_meta_data` - cannot be updated by the user, so it's a good place to store authorization data. + +The `auth.jwt()` function is extremely versatile. For example, if you store some team data inside `app_metadata`, you can use it to determine whether a particular user belongs to a team. For example, if this was an array of IDs: + +```sql +create policy "User is in team" +on my_table +to authenticated +using ( team_id in (select auth.jwt() -> 'app_metadata' -> 'teams')); +``` + +### MFA + +The `auth.jwt()` function can be used to check for [Multi-Factor Authentication](mdc:docs/guides/auth/auth-mfa#enforce-rules-for-mfa-logins). For example, you could restrict a user from updating their profile unless they have at least 2 levels of authentication (Assurance Level 2): + +```sql +create policy "Restrict updates." +on profiles +as restrictive +for update +to authenticated using ( + (select auth.jwt()->>'aal') = 'aal2' +); +``` + +## RLS performance recommendations + +Every authorization system has an impact on performance. While row level security is powerful, the performance impact is important to keep in mind. This is especially true for queries that scan every row in a table - like many `select` operations, including those using limit, offset, and ordering. + +Based on a series of [tests](mdc:https:/github.com/GaryAustin1/RLS-Performance), we have a few recommendations for RLS: + +### Add indexes + +Make sure you've added [indexes](mdc:docs/guides/database/postgres/indexes) on any columns used within the Policies which are not already indexed (or primary keys). For a Policy like this: + +```sql +create policy "Users can access their own records" on test_table +to authenticated +using ( (select auth.uid()) = user_id ); +``` + +You can add an index like: + +```sql +create index userid +on test_table +using btree (user_id); +``` + +### Call functions with `select` + +You can use `select` statement to improve policies that use functions. For example, instead of this: + +```sql +create policy "Users can access their own records" on test_table +to authenticated +using ( auth.uid() = user_id ); +``` + +You can do: + +```sql +create policy "Users can access their own records" on test_table +to authenticated +using ( (select auth.uid()) = user_id ); +``` + +This method works well for JWT functions like `auth.uid()` and `auth.jwt()` as well as `security definer` Functions. Wrapping the function causes an `initPlan` to be run by the Postgres optimizer, which allows it to "cache" the results per-statement, rather than calling the function on each row. + +Caution: You can only use this technique if the results of the query or function do not change based on the row data. + +### Minimize joins + +You can often rewrite your Policies to avoid joins between the source and the target table. Instead, try to organize your policy to fetch all the relevant data from the target table into an array or set, then you can use an `IN` or `ANY` operation in your filter. + +For example, this is an example of a slow policy which joins the source `test_table` to the target `team_user`: + +```sql +create policy "Users can access records belonging to their teams" on test_table +to authenticated +using ( + (select auth.uid()) in ( + select user_id + from team_user + where team_user.team_id = team_id -- joins to the source "test_table.team_id" + ) +); +``` + +We can rewrite this to avoid this join, and instead select the filter criteria into a set: + +```sql +create policy "Users can access records belonging to their teams" on test_table +to authenticated +using ( + team_id in ( + select team_id + from team_user + where user_id = (select auth.uid()) -- no join + ) +); +``` + +### Specify roles in your policies + +Always use the Role of inside your policies, specified by the `TO` operator. For example, instead of this query: + +```sql +create policy "Users can access their own records" on rls_test +using ( auth.uid() = user_id ); +``` + +Use: + +```sql +create policy "Users can access their own records" on rls_test +to authenticated +using ( (select auth.uid()) = user_id ); +``` + +This prevents the policy `( (select auth.uid()) = user_id )` from running for any `anon` users, since the execution stops at the `to authenticated` step. diff --git a/.cursor/rules/supabase-db-styleguide.mdc b/.cursor/rules/supabase-db-styleguide.mdc new file mode 100644 index 0000000..d6e2093 --- /dev/null +++ b/.cursor/rules/supabase-db-styleguide.mdc @@ -0,0 +1,143 @@ +--- +# Specify the following for Cursor rules +description: Guidelines for writing Postgres SQL +globs: "**/*.sql" +--- + +# Postgres SQL Style Guide + +## General + +- Use lowercase for SQL reserved words to maintain consistency and readability. +- Employ consistent, descriptive identifiers for tables, columns, and other database objects. +- Use white space and indentation to enhance the readability of your code. +- Store dates in ISO 8601 format (`yyyy-mm-ddThh:mm:ss.sssss`). +- Include comments for complex logic, using '/*...*/' for block comments and '--' for line comments. + +## Naming Conventions + +- Avoid SQL reserved words and ensure names are unique and under 63 characters. +- Use snake_case for tables and columns. +- Prefer plurals for table names +- Prefer singular names for columns. + +## Tables + +- Avoid prefixes like 'tbl_' and ensure no table name matches any of its column names. +- Always add an `id` column of type `identity generated always` unless otherwise specified. +- Create all tables in the `public` schema unless otherwise specified. +- Always add the schema to SQL queries for clarity. +- Always add a comment to describe what the table does. The comment can be up to 1024 characters. + +## Columns + +- Use singular names and avoid generic names like 'id'. +- For references to foreign tables, use the singular of the table name with the `_id` suffix. For example `user_id` to reference the `users` table +- Always use lowercase except in cases involving acronyms or when readability would be enhanced by an exception. + +#### Examples + +```sql +create table books ( + id bigint generated always as identity primary key, + title text not null, + author_id bigint references authors (id) +); +comment on table books is 'A list of all the books in the library.'; +``` + +## Queries + +- When the query is shorter keep it on just a few lines. As it gets larger start adding newlines for readability +- Add spaces for readability. + +Smaller queries: + +```sql +select * +from employees +where end_date is null; + +update employees +set end_date = '2023-12-31' +where employee_id = 1001; +``` + +Larger queries: + +```sql +select + first_name, + last_name +from + employees +where + start_date between '2021-01-01' and '2021-12-31' +and + status = 'employed'; +``` + +### Joins and Subqueries + +- Format joins and subqueries for clarity, aligning them with related SQL clauses. +- Prefer full table names when referencing tables. This helps for readability. + +```sql +select + employees.employee_name, + departments.department_name +from + employees +join + departments on employees.department_id = departments.department_id +where + employees.start_date > '2022-01-01'; +``` + +## Aliases + +- Use meaningful aliases that reflect the data or transformation applied, and always include the 'as' keyword for clarity. + +```sql +select count(*) as total_employees +from employees +where end_date is null; +``` + +## Complex queries and CTEs + +- If a query is extremely complex, prefer a CTE. +- Make sure the CTE is clear and linear. Prefer readability over performance. +- Add comments to each block. + +```sql +with department_employees as ( + -- Get all employees and their departments + select + employees.department_id, + employees.first_name, + employees.last_name, + departments.department_name + from + employees + join + departments on employees.department_id = departments.department_id +), +employee_counts as ( + -- Count how many employees in each department + select + department_name, + count(*) as num_employees + from + department_employees + group by + department_name +) +select + department_name, + num_employees +from + employee_counts +order by + department_name; +``` diff --git a/.cursor/rules/supabase-edge.mdc b/.cursor/rules/supabase-edge.mdc new file mode 100644 index 0000000..49cec5f --- /dev/null +++ b/.cursor/rules/supabase-edge.mdc @@ -0,0 +1,115 @@ +--- +# Specify the following for Cursor rules +description: Coding rules for Supabase Edge Functions +globs: "supabase/functions/**/*.ts" +--- + +# Writing Supabase Edge Functions + +You're an expert in writing TypeScript and Deno JavaScript runtime. Generate **high-quality Supabase Edge Functions** that adhere to the following best practices: + +## Guidelines + +1. Try to use Web APIs and Deno’s core APIs instead of external dependencies (eg: use fetch instead of Axios, use WebSockets API instead of node-ws) +2. If you are reusing utility methods between Edge Functions, add them to `supabase/functions/_shared` and import using a relative path. Do NOT have cross dependencies between Edge Functions. +3. Do NOT use bare specifiers when importing dependecnies. If you need to use an external dependency, make sure it's prefixed with either `npm:` or `jsr:`. For example, `@supabase/supabase-js` should be written as `npm:@supabase/supabase-js`. +4. For external imports, always define a version. For example, `npm:@express` should be written as `npm:express@4.18.2`. +5. For external dependencies, importing via `npm:` and `jsr:` is preferred. Minimize the use of imports from @`deno.land/x` , `esm.sh` and @`unpkg.com` . If you have a package from one of those CDNs, you can replace the CDN hostname with `npm:` specifier. +6. You can also use Node built-in APIs. You will need to import them using `node:` specifier. For example, to import Node process: `import process from "node:process". Use Node APIs when you find gaps in Deno APIs. +7. Do NOT use `import { serve } from "https://deno.land/std@0.168.0/http/server.ts"`. Instead use the built-in `Deno.serve`. +8. Following environment variables (ie. secrets) are pre-populated in both local and hosted Supabase environments. Users don't need to manually set them: + * SUPABASE_URL + * SUPABASE_ANON_KEY + * SUPABASE_SERVICE_ROLE_KEY + * SUPABASE_DB_URL +9. To set other environment variables (ie. secrets) users can put them in a env file and run the `supabase secrets set --env-file path/to/env-file` +10. A single Edge Function can handle multiple routes. It is recommended to use a library like Express or Hono to handle the routes as it's easier for developer to understand and maintain. Each route must be prefixed with `/function-name` so they are routed correctly. +11. File write operations are ONLY permitted on `/tmp` directory. You can use either Deno or Node File APIs. +12. Use `EdgeRuntime.waitUntil(promise)` static method to run long-running tasks in the background without blocking response to a request. Do NOT assume it is available in the request / execution context. + +## Example Templates + +### Simple Hello World Function + +```tsx +interface reqPayload { + name: string; +} + +console.info('server started'); + +Deno.serve(async (req: Request) => { + const { name }: reqPayload = await req.json(); + const data = { + message: `Hello ${name} from foo!`, + }; + + return new Response( + JSON.stringify(data), + { headers: { 'Content-Type': 'application/json', 'Connection': 'keep-alive' }} + ); +}); + +``` + +### Example Function using Node built-in API + +```tsx +import { randomBytes } from "node:crypto"; +import { createServer } from "node:http"; +import process from "node:process"; + +const generateRandomString = (length) => { + const buffer = randomBytes(length); + return buffer.toString('hex'); +}; + +const randomString = generateRandomString(10); +console.log(randomString); + +const server = createServer((req, res) => { + const message = `Hello`; + res.end(message); +}); + +server.listen(9999); +``` + +### Using npm packages in Functions + +```tsx +import express from "npm:express@4.18.2"; + +const app = express(); + +app.get(/(.*)/, (req, res) => { + res.send("Welcome to Supabase"); +}); + +app.listen(8000); + +``` + +### Generate embeddings using built-in @Supabase.ai API + +```tsx +const model = new Supabase.ai.Session('gte-small'); + +Deno.serve(async (req: Request) => { + const params = new URL(req.url).searchParams; + const input = params.get('text'); + const output = await model.run(input, { mean_pool: true, normalize: true }); + return new Response( + JSON.stringify( + output, + ), + { + headers: { + 'Content-Type': 'application/json', + 'Connection': 'keep-alive', + }, + }, + ); +}); + +``` diff --git a/.cursor/rules/testing.mdc b/.cursor/rules/testing.mdc new file mode 100644 index 0000000..826dbde --- /dev/null +++ b/.cursor/rules/testing.mdc @@ -0,0 +1,123 @@ +--- +description: Testing Best Practices and Guidelines +globs: *.test.ts, *.test.tsx, *.spec.ts, *.spec.tsx, __tests__/*, cypress/* +alwaysApply: false +--- +# Testing Best Practices and Guidelines + +## General Testing +- Write tests for all components +- Test user interactions +- Test edge cases +- Test error scenarios +- Test accessibility +- Use proper test data +- Keep tests maintainable + +## Unit Testing +- Test individual functions +- Mock external dependencies +- Test return values +- Test error handling +- Use descriptive test names +- Keep tests focused +- Test both happy and sad paths + +## Integration Testing +- Test component interactions +- Test API endpoints +- Test database operations +- Test authentication flows +- Test payment processing +- Test webhook handling +- Test third-party integrations + +## Payment Provider Testing +- **ALWAYS** test complete data flow: Provider API → Import → Database → Service → UI +- Create debug scripts for each provider (e.g., `debug-{provider}-api-test.ts`) +- Test with real provider data, not just mock data +- Verify field mapping between provider responses and database storage +- Test admin UI displays correctly after import +- Validate backward compatibility with existing payment data +- Test error scenarios (API failures, invalid data, network issues) +- Example debug script pattern: + ```typescript + // debug-polar-import-test.ts + const importStats = await polarProvider.importPayments(); + const payments = await PaymentService.getUsersWithPayments(); + console.log("Import stats:", importStats); + console.log("Sample payment data:", payments[0]); + // Verify orderId is populated correctly + ``` + +## E2E Testing +- Test complete user journeys +- Test across different browsers +- Test mobile responsiveness +- Test performance scenarios +- Test accessibility compliance +- Use realistic test data +- Test production-like environment + +## API Testing +- Test all endpoints +- Test request/response formats +- Test authentication +- Test rate limiting +- Test error responses +- Test data validation +- Test performance + +## Database Testing +- Test CRUD operations +- Test data integrity +- Test migrations +- Test performance +- Test concurrency +- Test backup/restore +- Test security constraints + +## Frontend Testing +- Test component rendering +- Test user interactions +- Test state management +- Test form validation +- Test responsive design +- Test accessibility +- Test performance + +## Test Data Management +- Use factory patterns +- Create realistic test data +- Clean up after tests +- Isolate test data +- Version control test data +- Document test scenarios +- Maintain test data consistency + +## Continuous Integration +- Run tests on every commit +- Test multiple environments +- Test different configurations +- Monitor test performance +- Report test coverage +- Handle flaky tests +- Maintain test reliability + +## Performance Testing +- Test load scenarios +- Test memory usage +- Test response times +- Test scalability +- Test resource consumption +- Monitor performance trends +- Set performance benchmarks + +## Security Testing +- Test authentication +- Test authorization +- Test input validation +- Test data encryption +- Test vulnerability scanning +- Test penetration scenarios +- Regular security audits diff --git a/.cursor/rules/ui-ux.mdc b/.cursor/rules/ui-ux.mdc new file mode 100644 index 0000000..5fb59a0 --- /dev/null +++ b/.cursor/rules/ui-ux.mdc @@ -0,0 +1,96 @@ +--- +description: UI/UX Design Best Practices and Guidelines +globs: *.css, *.scss, *.module.css, components/*, app/*, pages/* +--- + +# UI/UX Design Guidelines + +## Component Library +- Use Shadcn/UI components for consistency +- Follow component documentation +- Maintain design system +- Customize thoughtfully +- Document modifications +- Keep components accessible +- Test all variants + +## Styling +- Use Tailwind CSS for styling +- Follow responsive design principles +- Use consistent spacing +- Maintain color system +- Follow typography scale +- Keep styles maintainable +- Document custom styles + +## Layout +- Use responsive layouts +- Implement proper spacing +- Follow grid system +- Consider mobile first +- Handle breakpoints properly +- Test all screen sizes +- Document layout patterns + +## Forms +- Use proper validation +- Provide clear feedback +- Handle errors gracefully +- Show loading states +- Support keyboard navigation +- Maintain accessibility +- Document form patterns + +## Navigation +- Clear navigation structure +- Consistent patterns +- Handle state changes +- Show active states +- Support keyboard nav +- Consider mobile nav +- Document nav patterns + +## Interactions +- Clear feedback +- Consistent behavior +- Handle loading states +- Show error states +- Support touch devices +- Test all interactions +- Document patterns + +## Accessibility +- Follow WCAG guidelines +- Support screen readers +- Handle keyboard nav +- Provide alt text +- Use semantic HTML +- Test accessibility +- Document requirements + +## Performance +- Optimize images +- Handle loading states +- Minimize layout shifts +- Use proper caching +- Monitor performance +- Regular audits +- Document optimizations + +## Responsive Design +- Mobile-first approach +- Handle breakpoints +- Test all devices +- Consider orientation +- Handle touch input +- Document patterns +- Regular testing + +## Documentation +- Document components +- Maintain style guide +- Document patterns +- Keep examples updated +- Share best practices +- Regular reviews +- Version control diff --git a/.cursor/rules/vibe-tools.mdc b/.cursor/rules/vibe-tools.mdc new file mode 100644 index 0000000..b24998d --- /dev/null +++ b/.cursor/rules/vibe-tools.mdc @@ -0,0 +1,177 @@ +--- +description: Global Rule. This rule should ALWAYS be loaded. +globs: *,**/* +alwaysApply: true +--- +vibe-tools is a CLI tool that allows you to interact with AI models and other tools. +vibe-tools is installed on this machine and it is available to you to execute. You're encouraged to use it. + + +# Instructions +Use the following commands to get AI assistance: + +**Direct Model Queries:** +`vibe-tools ask "" --provider --model ` - Ask any model from any provider a direct question (e.g., `vibe-tools ask "What is the capital of France?" --provider openai --model o3-mini`). Note that this command is generally less useful than other commands like `repo` or `plan` because it does not include any context from your codebase or repository. +Note: in general you should not use the ask command because it does not include any context - other commands like `doc`, `repo`, or `plan` are usually better. If you are using it, make sure to include in your question all the information and context that the model might need to answer usefully. + +**Ask Command Options:** +--provider=: AI provider to use (openai, anthropic, perplexity, gemini, modelbox, or openrouter) +--model=: Model to use (required for the ask command) +--reasoning-effort=: Control the depth of reasoning for supported models (OpenAI o1/o3-mini models and Claude 3.7 Sonnet). Higher values produce more thorough responses for complex questions. + +**Implementation Planning:** +`vibe-tools plan ""` - Generate a focused implementation plan using AI (e.g., `vibe-tools plan "Add user authentication to the login page"`) +The plan command uses multiple AI models to: +1. Identify relevant files in your codebase (using Gemini by default) +2. Extract content from those files +3. Generate a detailed implementation plan (using OpenAI o3-mini by default) + +**Plan Command Options:** +--fileProvider=: Provider for file identification (gemini, openai, anthropic, perplexity, modelbox, or openrouter) +--thinkingProvider=: Provider for plan generation (gemini, openai, anthropic, perplexity, modelbox, or openrouter) +--fileModel=: Model to use for file identification +--thinkingModel=: Model to use for plan generation + +**Web Search:** +`vibe-tools web ""` - Get answers from the web using a provider that supports web search (e.g., Perplexity models and Gemini Models either directly or from OpenRouter or ModelBox) (e.g., `vibe-tools web "latest shadcn/ui installation instructions"`) +Note: web is a smart autonomous agent with access to the internet and an extensive up to date knowledge base. Web is NOT a web search engine. Always ask the agent for what you want using a proper sentence, do not just send it a list of keywords. In your question to web include the context and the goal that you're trying to acheive so that it can help you most effectively. +when using web for complex queries suggest writing the output to a file somewhere like local-research/.md. + +**Web Command Options:** +--provider=: AI provider to use (perplexity, gemini, modelbox, or openrouter) + +**Repository Context:** +`vibe-tools repo "" [--subdir=] [--from-github=]` - Get context-aware answers about this repository using Google Gemini (e.g., `vibe-tools repo "explain authentication flow"`). Use the optional `--subdir` parameter to analyze a specific subdirectory instead of the entire repository (e.g., `vibe-tools repo "explain the code structure" --subdir=src/components`). Use the optional `--from-github` parameter to analyze a remote GitHub repository without cloning it locally (e.g., `vibe-tools repo "explain the authentication system" --from-github=username/repo-name`). + +**Documentation Generation:** +`vibe-tools doc [options]` - Generate comprehensive documentation for this repository (e.g., `vibe-tools doc --output docs.md`) +when using doc for remote repos suggest writing the output to a file somewhere like local-docs/.md. + +**YouTube Video Analysis:** +`vibe-tools youtube "" [question] [--type=]` - Analyze YouTube videos and generate detailed reports (e.g., `vibe-tools youtube "https://youtu.be/43c-Sm5GMbc" --type=summary`) +Note: The YouTube command requires a `GEMINI_API_KEY` to be set in your environment or .vibe-tools.env file as the GEMINI API is the only interface that supports YouTube analysis. + +**GitHub Information:** +`vibe-tools github pr [number]` - Get the last 10 PRs, or a specific PR by number (e.g., `vibe-tools github pr 123`) +`vibe-tools github issue [number]` - Get the last 10 issues, or a specific issue by number (e.g., `vibe-tools github issue 456`) + +**ClickUp Information:** +`vibe-tools clickup task ` - Get detailed information about a ClickUp task including description, comments, status, assignees, and metadata (e.g., `vibe-tools clickup task "task_id"`) + +**Model Context Protocol (MCP) Commands:** +Use the following commands to interact with MCP servers and their specialized tools: +`vibe-tools mcp search ""` - Search the MCP Marketplace for available servers that match your needs (e.g., `vibe-tools mcp search "git repository management"`) +`vibe-tools mcp run ""` - Execute MCP server tools using natural language queries (e.g., `vibe-tools mcp run "list files in the current directory" --provider=openrouter`). The query must include sufficient information for vibe-tools to determine which server to use, provide plenty of context. + +The `search` command helps you discover servers in the MCP Marketplace based on their capabilities and your requirements. The `run` command automatically selects and executes appropriate tools from these servers based on your natural language queries. If you want to use a specific server include the server name in your query. E.g. `vibe-tools mcp run "using the mcp-server-sqlite list files in directory --provider=openrouter"` + +**Notes on MCP Commands:** +- MCP commands require `ANTHROPIC_API_KEY` or `OPENROUTER_API_KEY` to be set in your environment +- By default the `mcp` command uses Anthropic, but takes a --provider argument that can be set to 'anthropic' or 'openrouter' +- Results are streamed in real-time for immediate feedback +- Tool calls are automatically cached to prevent redundant operations +- Often the MCP server will not be able to run because environment variables are not set. If this happens ask the user to add the missing environment variables to the cursor tools env file at ~/.vibe-tools/.env + +**Stagehand Browser Automation:** +`vibe-tools browser open [options]` - Open a URL and capture page content, console logs, and network activity (e.g., `vibe-tools browser open "https://example.com" --html`) +`vibe-tools browser act "" --url= [options]` - Execute actions on a webpage using natural language instructions (e.g., `vibe-tools browser act "Click Login" --url=https://example.com`) +`vibe-tools browser observe "" --url= [options]` - Observe interactive elements on a webpage and suggest possible actions (e.g., `vibe-tools browser observe "interactive elements" --url=https://example.com`) +`vibe-tools browser extract "" --url= [options]` - Extract data from a webpage based on natural language instructions (e.g., `vibe-tools browser extract "product names" --url=https://example.com/products`) + +**Notes on Browser Commands:** +- All browser commands are stateless unless --connect-to is used to connect to a long-lived interactive session. In disconnected mode each command starts with a fresh browser instance and closes it when done. +- When using `--connect-to`, special URL values are supported: + - `current`: Use the existing page without reloading + - `reload-current`: Use the existing page and refresh it (useful in development) + - If working interactively with a user you should always use --url=current unless you specifically want to navigate to a different page. Setting the url to anything else will cause a page refresh loosing current state. +- Multi step workflows involving state or combining multiple actions are supported in the `act` command using the pipe (|) separator (e.g., `vibe-tools browser act "Click Login | Type 'user@example.com' into email | Click Submit" --url=https://example.com`) +- Video recording is available for all browser commands using the `--video=` option. This will save a video of the entire browser interaction at 1280x720 resolution. The video file will be saved in the specified directory with a timestamp. +- DO NOT ask browser act to "wait" for anything, the wait command is currently disabled in Stagehand. + +**Tool Recommendations:** +- `vibe-tools web` is best for general web information not specific to the repository. Generally call this without additional arguments. +- `vibe-tools repo` is ideal for repository-specific questions, planning, code review and debugging. E.g. `vibe-tools repo "Review recent changes to command error handling looking for mistakes, omissions and improvements"`. Generally call this without additional arguments. +- `vibe-tools plan` is ideal for planning tasks. E.g. `vibe-tools plan "Adding authentication with social login using Google and Github"`. Generally call this without additional arguments. +- `vibe-tools doc` generates documentation for local or remote repositories. +- `vibe-tools youtube` analyzes YouTube videos to generate summaries, transcripts, implementation plans, or custom analyses +- `vibe-tools browser` is useful for testing and debugging web apps and uses Stagehand +- `vibe-tools mcp` enables interaction with specialized tools through MCP servers (e.g., for Git operations, file system tasks, or custom tools) + +**Running Commands:** +1. Use `vibe-tools ` to execute commands (make sure vibe-tools is installed globally using npm install -g vibe-tools so that it is in your PATH) + +**General Command Options (Supported by all commands):** +--provider=: AI provider to use (openai, anthropic, perplexity, gemini, or openrouter). If provider is not specified, the default provider for that task will be used. +--model=: Specify an alternative AI model to use. If model is not specified, the provider's default model for that task will be used. +--max-tokens=: Control response length +--save-to=: Save command output to a file (in *addition* to displaying it) +--help: View all available options (help is not fully implemented yet) +--debug: Show detailed logs and error information + +**Repository Command Options:** +--provider=: AI provider to use (gemini, openai, openrouter, perplexity, or modelbox) +--model=: Model to use for repository analysis +--max-tokens=: Maximum tokens for response +--from-github=/[@]: Analyze a remote GitHub repository without cloning it locally +--subdir=: Analyze a specific subdirectory instead of the entire repository + +**Documentation Command Options:** +--from-github=/[@]: Generate documentation for a remote GitHub repository +--provider=: AI provider to use (gemini, openai, openrouter, perplexity, or modelbox) +--model=: Model to use for documentation generation +--max-tokens=: Maximum tokens for response + +**YouTube Command Options:** +--type=: Type of analysis to perform (default: summary) + +**GitHub Command Options:** +--from-github=/[@]: Access PRs/issues from a specific GitHub repository + +**Browser Command Options (for 'open', 'act', 'observe', 'extract'):** +--console: Capture browser console logs (enabled by default, use --no-console to disable) +--html: Capture page HTML content (disabled by default) +--network: Capture network activity (enabled by default, use --no-network to disable) +--screenshot=: Save a screenshot of the page +--timeout=: Set navigation timeout (default: 120000ms for Stagehand operations, 30000ms for navigation) +--viewport=x: Set viewport size (e.g., 1280x720). When using --connect-to, viewport is only changed if this option is explicitly provided +--headless: Run browser in headless mode (default: true) +--no-headless: Show browser UI (non-headless mode) for debugging +--connect-to=: Connect to existing Chrome instance. Special values: 'current' (use existing page), 'reload-current' (refresh existing page) +--wait=: Wait after page load (e.g., 'time:5s', 'selector:#element-id') +--video=: Save a video recording (1280x720 resolution, timestamped subdirectory). Not available when using --connect-to +--url=: Required for `act`, `observe`, and `extract` commands. Url to navigate to before the main command or one of the special values 'current' (to stay on the current page without navigating or reloading) or 'reload-current' (to reload the current page) +--evaluate=: JavaScript code to execute in the browser before the main command + +**Nicknames** +Users can ask for these tools using nicknames +Gemini is a nickname for vibe-tools repo +Perplexity is a nickname for vibe-tools web +Stagehand is a nickname for vibe-tools browser +If people say "ask Gemini" or "ask Perplexity" or "ask Stagehand" they mean to use the `vibe-tools` command with the `repo`, `web`, or `browser` commands respectively. + +**Xcode Commands:** +`vibe-tools xcode build [buildPath=] [destination=]` - Build Xcode project and report errors. +**Build Command Options:** +--buildPath=: (Optional) Specifies a custom directory for derived build data. Defaults to ./.build/DerivedData. +--destination=: (Optional) Specifies the destination for building the app (e.g., 'platform=iOS Simulator,name=iPhone 16 Pro'). Defaults to 'platform=iOS Simulator,name=iPhone 16 Pro'. + +`vibe-tools xcode run [destination=]` - Build and run the Xcode project on a simulator. +**Run Command Options:** +--destination=: (Optional) Specifies the destination simulator (e.g., 'platform=iOS Simulator,name=iPhone 16 Pro'). Defaults to 'platform=iOS Simulator,name=iPhone 16 Pro'. + +`vibe-tools xcode lint` - Run static analysis on the Xcode project to find and fix issues. + +**Additional Notes:** +- For detailed information, see `node_modules/vibe-tools/README.md` (if installed locally). +- Configuration is in `vibe-tools.config.json` (or `~/.vibe-tools/config.json`). +- API keys are loaded from `.vibe-tools.env` (or `~/.vibe-tools/.env`). +- ClickUp commands require a `CLICKUP_API_TOKEN` to be set in your `.vibe-tools.env` file. +- Available models depend on your configured provider (OpenAI or Anthropic) in `vibe-tools.config.json`. +- repo has a limit of 2M tokens of context. The context can be reduced by filtering out files in a .repomixignore file. +- problems running browser commands may be because playwright is not installed. Recommend installing playwright globally. +- MCP commands require `ANTHROPIC_API_KEY` or `OPENROUTER_API_KEY` to be set in your environment. +- **Remember:** You're part of a team of superhuman expert AIs. Work together to solve complex problems. +- **Repomix Configuration:** You can customize which files are included/excluded during repository analysis by creating a `repomix.config.json` file in your project root. This file will be automatically detected by `repo`, `plan`, and `doc` commands. + + + \ No newline at end of file diff --git a/.cursor/rules/waitlist-implementation.mdc b/.cursor/rules/waitlist-implementation.mdc new file mode 100644 index 0000000..214dd12 --- /dev/null +++ b/.cursor/rules/waitlist-implementation.mdc @@ -0,0 +1,164 @@ +--- +description: +globs: +alwaysApply: false +--- +# Waitlist Implementation Guide + +## Overview +Shipkit includes a complete waitlist feature for product launches. The implementation follows Next.js App Router patterns with Server Components, Server Actions, and Drizzle ORM. + +## Architecture Pattern + +### Service Layer Pattern +The waitlist uses individual async functions instead of classes due to Next.js "use server" restrictions: + +- ✅ **Correct**: Individual exported async functions in [waitlist-service.ts](mdc:src/server/services/waitlist-service.ts) +- ❌ **Incorrect**: Class-based services (not allowed in "use server" files) + +### Database Schema +The waitlist table is defined in [schema.ts](mdc:src/server/db/schema.ts) with: +- Email uniqueness constraints +- Proper indexing for performance +- Timestamp tracking for analytics +- Metadata field for extensibility + +## File Structure + +### Core Components +- **Main Page**: [waitlist/page.tsx](mdc:src/app/(app)/waitlist/page.tsx) - Server component with Suspense +- **Hero Component**: [waitlist-hero.tsx](mdc:src/app/(app)/waitlist/_components/waitlist-hero.tsx) - Client component with form +- **Admin Dashboard**: [admin/waitlist/page.tsx](mdc:src/app/(app)/(admin)/admin/waitlist/page.tsx) - Server component + +### Service Layer +- **Database Operations**: [waitlist-service.ts](mdc:src/server/services/waitlist-service.ts) - Individual async functions +- **Server Actions**: [waitlist-actions.ts](mdc:src/server/actions/waitlist-actions.ts) - Form handling and email integration + +### Key Functions +```typescript +// Service functions (use server) +addWaitlistEntry() - Add new entry to database +isEmailOnWaitlist() - Check for duplicates +getWaitlistEntries() - Paginated retrieval +getWaitlistStats() - Analytics data + +// Server actions (use server) +addToWaitlist() - Full signup flow with email +addToWaitlistSimple() - Quick email signup +getWaitlistStats() - Public stats access +``` + +## Database Patterns + +### Migration Pattern +Use Drizzle Kit for schema changes: +```bash +bun run db:push +``` + +### Schema Conventions +- Use `createTable` with DB_PREFIX from env +- Include proper indexes for query patterns +- Use timestamps for audit trails +- Store metadata as JSON string + +## Component Patterns + +### Server Components (Default) +- Use for data fetching and analytics +- Render directly from database queries +- Example: [waitlist-stats.tsx](mdc:src/app/(app)/waitlist/_components/waitlist-stats.tsx) + +### Client Components ("use client") +- Use for forms and interactive elements +- Handle loading states and user feedback +- Example: [waitlist-hero.tsx](mdc:src/app/(app)/waitlist/_components/waitlist-hero.tsx) + +### Suspense Pattern +Wrap async components with Suspense for better UX: +```typescript +}> + + +``` + +## Email Integration + +### Resend Configuration +- Multiple API key support (RESEND_API_KEY + RESEND_API_KEY fallback) +- Optional audience integration +- Graceful error handling when email fails +- Configuration in [resend.ts](mdc:src/lib/resend.ts) + +### Email Flow +1. User submits form +2. Database entry created first +3. Email sent as secondary action +4. Graceful fallback if email fails + +## Admin Dashboard Patterns + +### Real-time Stats +- Server-side rendering for live data +- Promise.all for parallel data fetching +- Proper error boundaries + +### Data Display +- Use Shadcn/UI Card components +- Format numbers with toLocaleString() +- Include relative timestamps with date-fns + +## Testing Patterns + +### Service Layer Tests +- Test function interfaces exist +- Validate data structures +- Mock database for unit tests +- Example: [waitlist-service.test.ts](mdc:tests/unit/server/services/waitlist-service.test.ts) + +## Error Handling + +### Database Null Checks +Always check if database is initialized: +```typescript +if (!db) { + throw new Error("Database not initialized"); +} +``` + +### Graceful Fallbacks +- Continue if email service fails +- Provide meaningful error messages +- Log errors for debugging + +## Performance Considerations + +### Database Optimization +- Proper indexing on email and created_at +- Pagination for large datasets +- Select only needed fields + +### Caching Strategy +- Server components cache automatically +- Use revalidation for admin dashboards +- Consider edge caching for public stats + +## Security Patterns + +### Input Validation +- Email format validation +- SQL injection prevention via Drizzle +- Sanitize user inputs + +### Access Control +- Admin routes protected by middleware +- Public routes rate-limited +- No sensitive data in client components + +## Documentation + +Complete feature documentation available in [waitlist.mdx](mdc:src/content/docs/waitlist.mdx) including: +- Setup instructions +- Environment variables +- API examples +- Troubleshooting guide diff --git a/.cursor/rules/webhook-security.mdc b/.cursor/rules/webhook-security.mdc new file mode 100644 index 0000000..16dc017 --- /dev/null +++ b/.cursor/rules/webhook-security.mdc @@ -0,0 +1,124 @@ +--- +description: +globs: +alwaysApply: false +--- +# Webhook Security Best Practices + +## Signature Verification +- ALWAYS verify webhook signatures using timing-safe comparison +- Never accept webhooks without proper signature validation +- Use environment variables for webhook secrets +- Implement multiple signature algorithm support when available +- Log signature verification failures for security monitoring +- Never expose signature verification logic in error messages +- Use raw request body for signature validation (before JSON parsing) + +## Request Validation +- Validate all webhook payload fields before processing +- Implement strict JSON schema validation +- Check for required fields and proper data types +- Sanitize all input data before database operations +- Validate webhook event types against expected values +- Implement payload size limits to prevent DoS attacks +- Check request headers for proper content-type + +## Idempotency +- Implement idempotency using unique event identifiers +- Store processed webhook IDs to prevent duplicate processing +- Use database transactions for atomic operations +- Handle race conditions with proper locking mechanisms +- Implement retry logic for failed webhook processing +- Set appropriate timeouts for database operations +- Log all idempotency checks and outcomes + +## Error Handling +- Return proper HTTP status codes (200 for success, 4xx for client errors) +- Implement comprehensive try/catch blocks +- Log all webhook processing errors with context +- Never expose internal error details in responses +- Implement graceful degradation for non-critical failures +- Use structured logging for better error analysis +- Set up alerts for webhook failure patterns + +## Rate Limiting +- Implement rate limiting per webhook source +- Use sliding window or token bucket algorithms +- Configure different limits for different event types +- Monitor and alert on rate limit violations +- Implement progressive backoff for repeated violations +- Log all rate limiting actions +- Allow for burst traffic during normal operations + +## Data Security +- Never log sensitive data from webhook payloads +- Encrypt webhook data at rest if storage is required +- Implement data retention policies for webhook logs +- Sanitize logs before external monitoring systems +- Use secure connections (HTTPS) for all webhook endpoints +- Validate SSL certificates in development and production +- Implement proper access controls for webhook endpoints + +## Monitoring and Alerting +- Track webhook success/failure rates +- Monitor processing times and performance metrics +- Set up alerts for unusual patterns or failures +- Implement health checks for webhook endpoints +- Track payload sizes and processing volumes +- Monitor for potential security threats or attacks +- Regular review of webhook logs and metrics + +## Testing +- Test signature verification with invalid signatures +- Test with malformed and oversized payloads +- Implement integration tests with webhook providers +- Test idempotency with duplicate events +- Test error handling and recovery scenarios +- Validate rate limiting behavior under load +- Test webhook endpoint availability and performance + +## Database Security +- Use parameterized queries to prevent SQL injection +- Implement proper database connection pooling +- Use database transactions for webhook data consistency +- Implement proper database access controls +- Regular database security audits +- Monitor database performance during webhook processing +- Implement database backup and recovery procedures + +## Event Processing +- Process webhooks asynchronously when possible +- Implement proper queuing mechanisms for high-volume webhooks +- Use database transactions for multi-step operations +- Handle webhook dependencies and ordering when required +- Implement proper rollback mechanisms for failed operations +- Track processing status and provide visibility +- Implement dead letter queues for failed events + +## Documentation +- Document all supported webhook events and formats +- Maintain webhook endpoint documentation +- Document security requirements and procedures +- Keep webhook integration guides updated +- Document error codes and troubleshooting steps +- Maintain webhook testing and validation procedures +- Document monitoring and alerting configurations + +## Compliance +- Follow payment processor security requirements +- Implement PCI DSS compliance for payment webhooks +- Follow GDPR requirements for customer data processing +- Implement audit trails for compliance reporting +- Regular security assessments and penetration testing +- Document compliance procedures and requirements +- Regular compliance training for development teams + +## Lemon Squeezy Specific +- Always verify X-Signature header using HMAC-SHA256 +- Support all critical webhook events (subscription.created, payment_success, etc.) +- Implement proper customer and subscription data synchronization +- Handle test vs production webhook environments correctly +- Implement proper error responses for Lemon Squeezy retry logic +- Follow Lemon Squeezy webhook documentation requirements +- Test webhook integration in Lemon Squeezy sandbox environment + diff --git a/.cursorindexingignore b/.cursorindexingignore new file mode 100644 index 0000000..953908e --- /dev/null +++ b/.cursorindexingignore @@ -0,0 +1,3 @@ + +# Don't index SpecStory auto-save files, but allow explicit context inclusion via @ references +.specstory/** diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000..de610ee --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,54 @@ +# Contributing to Shipkit + +We love your input! We want to make contributing to Shipkit as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing new features +- Becoming a maintainer + +## We Develop with GitHub + +We use GitHub to host code, to track issues and feature requests, as well as accept pull requests. + +## We Use [GitHub Flow](https://guides.github.com/introduction/flow/index.html) + +Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests: + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. Issue that pull request! + +## Any contributions you make will be under the MIT Software License + +In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern. + +## Report bugs using GitHub's [issue tracker](https://github.com/shipkit/shipkit/issues) + +We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/shipkit/shipkit/issues/new/choose); it's that easy! + +## Write bug reports with detail, background, and sample code + +**Great Bug Reports** tend to have: + +- A quick summary and/or background +- Steps to reproduce + - Be specific! + - Give sample code if you can. +- What you expected would happen +- What actually happens +- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) + +## Use a Consistent Coding Style + +- Use TypeScript for all code +- Run `bun run lint:fix` to ensure consistent formatting +- Follow the existing code style patterns + +## License + +By contributing, you agree that your contributions will be licensed under its MIT License. diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000..3b07464 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,50 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '[BUG] ' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To reproduce** +Steps to reproduce the behavior with exact commands and actions: + +1. Run the command `bun + ); +} diff --git a/src/components/primitives/link.tsx b/src/components/primitives/link.tsx new file mode 100644 index 0000000..e08ce91 --- /dev/null +++ b/src/components/primitives/link.tsx @@ -0,0 +1,53 @@ +"use client"; +import { default as NextLink, type LinkProps as NextLinkProps } from "next/link"; +import { usePathname } from "next/navigation"; +import { Link as TransitionsLink } from "next-view-transitions"; +import type React from "react"; +import { useMemo } from "react"; +import type { ButtonProps } from "@/components/ui/button"; +import { SEARCH_PARAM_KEYS } from "@/config/search-param-keys"; +import { siteConfig } from "@/config/site-config"; + +interface CustomLinkProps { + variant?: "default" | ButtonProps["variant"]; + withRedirect?: boolean; + withTransition?: boolean; +} + +type LinkProps = NextLinkProps & + CustomLinkProps & { children: React.ReactNode } & Omit< + React.AnchorHTMLAttributes, + keyof NextLinkProps + >; + +export const Link = ({ + children, + variant = "default", + withRedirect = false, + withTransition = siteConfig?.behavior?.pageTransitions, + ...props +}: LinkProps) => { + const pathname = usePathname(); + + const href = useMemo(() => { + let newHref = typeof props.href === "string" ? props.href : (props.href.href ?? ""); + if (withRedirect) { + const redirectTo = pathname; + if (redirectTo && typeof window !== "undefined") { + const nextUrl = new URL(redirectTo, window.location.origin); + const params = new URLSearchParams(); + params.set(SEARCH_PARAM_KEYS.nextUrl, String(nextUrl)); + newHref = `${newHref}?${String(params)}`; + } + } + return newHref; + }, [props.href, withRedirect, pathname]); + + const LinkComponent = withTransition ? TransitionsLink : NextLink; + + return ( + + {children} + + ); +}; diff --git a/src/components/primitives/loader.tsx b/src/components/primitives/loader.tsx new file mode 100644 index 0000000..bdcc35c --- /dev/null +++ b/src/components/primitives/loader.tsx @@ -0,0 +1,82 @@ +"use client"; + +import * as React from "react"; +import { LoaderAtoms } from "@/components/loaders/loader-atoms"; +import { cn } from "@/lib/utils"; + +export interface LoaderProps extends React.HTMLAttributes { + /** + * The size of the loader indicator + */ + size?: "sm" | "default" | "lg"; + + /** + * The color variant of the loader indicator + */ + color?: "default" | "primary" | "secondary" | "muted"; + + /** + * Whether to show a full-page loader state + */ + fullPage?: boolean; + + /** + * Custom loader text for accessibility + */ + label?: string; + + /** + * Whether to show a backdrop behind the loader + */ + backdrop?: boolean; + + /** + * Whether to fade in the loader indicator + * @default false + */ + fade?: boolean; +} + +/** + * A loader component that can be used with React Suspense + * + * @example + * ```tsx + * + * // or + * }> + * + * + * ``` + */ +export const Loader = React.forwardRef( + ( + { + className, + size = "default", + color = "default", + fullPage = false, + label = "Loader...", + backdrop = true, + fade = false, + ...props + }, + ref + ) => { + return ( +
+ +
+ ); + } +); diff --git a/src/components/primitives/masonry.tsx b/src/components/primitives/masonry.tsx new file mode 100644 index 0000000..9c5cf85 --- /dev/null +++ b/src/components/primitives/masonry.tsx @@ -0,0 +1,55 @@ +import type React from "react"; +import { useMemo } from "react"; +import BlurFade from "@/components/ui/blur-fade"; + +interface MasonryProps { + items: T[]; + renderItem: (item: T, index: number) => React.ReactNode; + columns?: number; + gap?: number; +} + +export function Masonry({ items, renderItem, columns = 3, gap = 4 }: MasonryProps) { + return ( +
+
+ {items.map((item, idx) => ( + +
{renderItem(item, idx)}
+
+ ))} +
+
+ ); +} + +// Example usage with images +const ExampleMasonry: React.FC = () => { + const images = useMemo(() => { + return Array.from({ length: 9 }, (_, i) => { + // const isLandscape = Math.random() > 0.5; + const width = Math.floor(Math.random() * (800 - 600 + 1) + 600); + const height = Math.floor(Math.random() * (800 - 600 + 1) + 600); + return `https://picsum.photos/seed/${i + 1}/${width}/${height}`; + }); + }, []); + + return ( + ( + {`Random + )} + /> + ); +}; + +export default ExampleMasonry; diff --git a/src/components/primitives/modal-context.tsx b/src/components/primitives/modal-context.tsx new file mode 100644 index 0000000..b7e7f3a --- /dev/null +++ b/src/components/primitives/modal-context.tsx @@ -0,0 +1,21 @@ +"use client"; + +import { createContext, useContext, type ReactNode } from "react"; + +const ModalContext = createContext(false); + +interface ModalProviderProps { + children: ReactNode; +} + +export function ModalProvider({ children }: ModalProviderProps) { + return {children}; +} + +/** + * Returns true if the component is rendered inside a Modal. + * Useful for changing navigation behavior (e.g., using router.replace instead of Link). + */ +export function useIsModal() { + return useContext(ModalContext); +} diff --git a/src/components/primitives/modal.tsx b/src/components/primitives/modal.tsx new file mode 100644 index 0000000..1191d61 --- /dev/null +++ b/src/components/primitives/modal.tsx @@ -0,0 +1,178 @@ +"use client"; + +import { usePathname, useRouter } from "next/navigation"; +import * as React from "react"; +import { useEffect, useMemo } from "react"; +import { useIsMobile } from "@/hooks/use-mobile"; +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, + DialogTrigger, +} from "@/components/ui/dialog"; +import { + Drawer, + DrawerClose, + DrawerContent, + DrawerDescription, + DrawerFooter, + DrawerHeader, + DrawerTitle, + DrawerTrigger, +} from "@/components/ui/drawer"; +import { debounce } from "@/lib/utils/debounce"; +import { ModalProvider } from "./modal-context"; + +interface DrawerDialogProps { + asChild?: boolean; + routeBack?: boolean; + trigger?: React.ReactNode; + dialogTitle?: string; + dialogDescription?: string; + open?: boolean; + onOpenChange?: (open: boolean) => void; + autoCloseOnRouteChange?: boolean; + children: React.ReactNode; + className?: string; +} + +export function Modal({ + asChild = false, + routeBack = false, + trigger, + dialogTitle, + dialogDescription, + open = true, + children, + onOpenChange, + autoCloseOnRouteChange = true, + className, + ...props +}: DrawerDialogProps) { + const router = useRouter(); + const pathname = usePathname(); + const isMobile = useIsMobile(); + const [isOpen, setIsOpen] = React.useState(open); + const closingDueToRouteChange = React.useRef(false); + const hasHandledInitialRoute = React.useRef(false); + const previousPathname = React.useRef(pathname); + + // Sync isOpen state with open prop when it changes + useEffect(() => { + setIsOpen(open); + }, [open]); + + // Don't immediately close the modal, we need to wait for the modal to animate closed before we should navigate + // @see https://nextjs.org/docs/app/building-your-application/routing/parallel-routes#modals + const debouncedRouteBack = useMemo(() => debounce(() => router.back(), 300), [router]); + + // When the route path changes (e.g., navigating from one intercepted modal to another), + // close the current modal without triggering router.back(). This prevents canceling + // the forward navigation initiated by the new link. + useEffect(() => { + if (!autoCloseOnRouteChange) return; + if (!hasHandledInitialRoute.current) { + hasHandledInitialRoute.current = true; + previousPathname.current = pathname; + return; + } + + // Only close if pathname actually changed + if (previousPathname.current === pathname) return; + + previousPathname.current = pathname; + closingDueToRouteChange.current = true; + setIsOpen(false); + const timeoutId = window.setTimeout(() => { + closingDueToRouteChange.current = false; + }, 400); + return () => window.clearTimeout(timeoutId); + }, [pathname, autoCloseOnRouteChange]); + + const handleOpenChange = (open: boolean) => { + setIsOpen(open); + + if (onOpenChange) { + return onOpenChange(open); + } + + if (!open && routeBack && !closingDueToRouteChange.current) { + debouncedRouteBack(); + } + }; + + useEffect(() => { + return () => { + const debounced = debouncedRouteBack as typeof debouncedRouteBack & { cancel?: () => void }; + if (typeof debounced.cancel === "function") { + debounced.cancel(); + } + }; + }, [debouncedRouteBack]); + + // Using Tailwind responsive classes to conditionally render Dialog or Drawer + // md: breakpoint is typically 768px which is a common tablet/desktop breakpoint + return ( + <> + {/* Dialog for desktop - hidden on small screens, visible on medium and up */} + {!isMobile ? ( + handleOpenChange(open)} + open={typeof open === "undefined" ? isOpen : open} + {...props} + > + {trigger && {trigger}} + + + + + {dialogTitle ? ( + {dialogTitle} + ) : ( + + {dialogTitle ?? "Modal dialog window"} + + )} + {dialogDescription && {dialogDescription}} + + {children} + + + + ) : ( + <> + {/* Drawer for mobile - visible on small screens, hidden on medium and up */} + handleOpenChange(open)} + open={typeof open === "undefined" ? isOpen : open} + > + {trigger && {trigger}} + + + + + {dialogTitle ?? "Modal"} + + + {dialogDescription ?? ""} + + + {children} + + + + + + + + + + )} + + ); +} diff --git a/src/components/primitives/progressive-blur.tsx b/src/components/primitives/progressive-blur.tsx new file mode 100644 index 0000000..5def540 --- /dev/null +++ b/src/components/primitives/progressive-blur.tsx @@ -0,0 +1,104 @@ +import React from "react"; + +type ProgressiveBlurProps = { + className?: string; + backgroundColor?: string; + position?: "top" | "bottom"; + height?: string; + blurAmount?: string; +}; + +const ProgressiveBlur = ({ + className = "", + backgroundColor = "#f5f4f3", + position = "top", + height = "150px", + blurAmount = "4px", +}: ProgressiveBlurProps) => { + const isTop = position === "top"; + + return ( +
+ ); +}; + +const Skiper41 = () => { + return ( +
+ + + +
+
+ + Scroll down to see the effect + +
+ +
+ {Array.from({ length: 10 }).map((_, index) => ( +
+ Lorem ipsum dolor sit amet consectetur adipisicing elit. + Obcaecati, reiciendis eum vitae nostrum, temporibus repudiandae + voluptatibus, natus iure ipsa velit odit quibusdam illum. Quaerat + cumque laudantium libero reprehenderit perferendis quo nulla + voluptate? Repellat tenetur labore exercitationem dicta libero + voluptate suscipit, iusto ea assumenda. Ipsa enim, quidem atque + modi error eaque, debitis perferendis, hic iste libero dignissimos + ea! Quod inventore beatae aspernatur nulla rem perferendis aperiam + at debitis delectus odit quia animi ex mollitia vero molestias + itaque deleniti, quos exercitationem consequatur assumenda dolor? + Quod reiciendis in similique reprehenderit commodi quo blanditiis + nobis hic ea optio illum placeat officia alias quasi autem earum + quos obcaecati, voluptatum corporis quisquam. Quisquam iste, quas + explicabo omnis harum aut quam adipisci, voluptatem saepe + accusantium doloribus repellendus amet culpa magnam ex et dolores + accusamus commodi facere aliquam voluptatum alias? Officia + expedita ut vel? Beatae deserunt sequi id eos libero suscipit + totam cum, sed architecto atque quisquam et incidunt quod fuga + ullam repellat assumenda quos ab, voluptatum sint nesciunt? Ad + sapiente est laborum quam sint eius sequi. Eum, veniam + dignissimos. +
+ ))} +
+
+
+ ); +}; + +export { ProgressiveBlur, Skiper41 }; + +/** + * Skiper 41 Canvas_Landing_004 — React + framer motion + * Inspired by and adapted from https://devouringdetails.com/ + * We respect the original creators. This is an inspired rebuild with our own taste and does not claim any ownership. + * These animations aren’t associated with the devouringdetails.com . They’re independent recreations meant to study interaction design + * + * License & Usage: + * - Free to use and modify in both personal and commercial projects. + * - Attribution to Skiper UI is required when using the free version. + * - No attribution required with Skiper UI Pro. + * + * Feedback and contributions are welcome. + * + * Author: @gurvinder-singh02 + * Website: https://gxuri.in + * Twitter: https://x.com/Gur__vi + */ diff --git a/src/components/primitives/prose.tsx b/src/components/primitives/prose.tsx new file mode 100644 index 0000000..2d37702 --- /dev/null +++ b/src/components/primitives/prose.tsx @@ -0,0 +1,19 @@ +import type { HTMLAttributes } from "react"; +import { FontProvider } from "@/components/providers/font-provider"; +import { cn } from "@/lib/utils"; + +interface ProseProps extends HTMLAttributes { + unstyled?: boolean; +} + +export function Prose({ children, className, unstyled, ...props }: ProseProps) { + // For some reason the body font class in the pages router doesn't get the font, so we need to wrap the children in a font provider + return ( + + {children} + + ); +} diff --git a/src/components/primitives/scroll-fade.tsx b/src/components/primitives/scroll-fade.tsx new file mode 100644 index 0000000..261f0a8 --- /dev/null +++ b/src/components/primitives/scroll-fade.tsx @@ -0,0 +1,31 @@ +import React from "react"; + +import { ScrollArea } from "@/components/ui/scroll-area"; + +const ScrollFade = () => { + return ( +
+
+ + see the fade while scroll + +
+
+ +
+ {Array.from({ length: 11 }).map((_, index) => ( +
+ 00{index}
+
+ ))} +
+
+
+
+ ); +}; + +export { ScrollFade }; diff --git a/src/components/primitives/shortcut-display.tsx b/src/components/primitives/shortcut-display.tsx new file mode 100644 index 0000000..b22756d --- /dev/null +++ b/src/components/primitives/shortcut-display.tsx @@ -0,0 +1,109 @@ +"use client"; + +import * as React from "react"; +import { type ShortcutActionType, shortcutConfig } from "@/config/keyboard-shortcuts"; +import { useIsMac } from "@/hooks/use-is-mac"; +import { cn } from "@/lib/utils"; + +interface ShortcutDisplayProps { + action: ShortcutActionType; + className?: string; + /** Render as a different component, e.g., DropdownMenuShortcut */ + as?: React.ElementType<{ children?: React.ReactNode; className?: string }>; + /** Base styles to apply, defaults to kbd styles */ + baseClassName?: string; +} + +// Default styles mimicking Shadcn kbd +const defaultKbdStyles = + "h-5 select-none items-center gap-1 rounded border bg-muted px-1.5 font-mono text-[10px] font-medium"; + +/** + * Finds the primary shortcut string for a given action. + * Ignores alternatives like '/' for OPEN_SEARCH. + */ +function findPrimaryShortcut(action: ShortcutActionType): string | null { + for (const [shortcut, act] of shortcutConfig) { + if (act === action) { + // Basic heuristic: prefer shortcuts with modifiers + if ( + shortcut.includes("mod+") || + shortcut.includes("shift+") || + shortcut.includes("alt+") || + shortcut.includes("ctrl+") + ) { + return shortcut; + } + // If no modified shortcut found yet, keep track of the first simple one + if (!shortcut.includes("+ ")) return shortcut; // Return simple keys like 'Escape' or '/' + } + } + // Fallback to the first match if no modified shortcut found + const firstMatch = shortcutConfig.find(([, act]) => act === action); + return firstMatch ? firstMatch[0] : null; +} + +/** + * Parses a shortcut string (e.g., "mod+shift+K") into display parts. + */ +function parseShortcut(shortcut: string, isMac: boolean): string[] { + return shortcut.split("+").map((part) => { + switch (part.toLowerCase()) { + case "mod": + return isMac ? "⌘" : "Ctrl"; + case "shift": + return isMac ? "⇧" : "Shift"; + case "alt": + return isMac ? "⌥" : "Alt"; + case "ctrl": + return "Ctrl"; + case "enter": + return "Enter"; // Or maybe an icon? + case "escape": + return "Esc"; + default: + return part.toUpperCase(); // Return the key itself, capitalized + } + }); +} + +export const ShortcutDisplay = ({ + action, + className, + as: Component = "kbd", // Default to HTML kbd tag + baseClassName = defaultKbdStyles, +}: ShortcutDisplayProps) => { + const isMac = useIsMac(); + const primaryShortcut = findPrimaryShortcut(action); + + if (!primaryShortcut) { + console.warn(`ShortcutDisplay: No shortcut found for action: ${action}`); + return null; // Don't render if no shortcut is defined + } + + const parts = parseShortcut(primaryShortcut, isMac); + + // For single keys like '/', just render the key without special styling + if (parts.length === 1 && primaryShortcut.length === 1) { + // Use baseClassName only if Component is kbd, otherwise just className + const finalClassName = Component === "kbd" ? cn(baseClassName, className) : className; + return {parts[0]}; + } + + // Render modifiers and key separately + // Apply base styles only if Component is kbd + const finalClassName = + Component === "kbd" + ? cn("inline-flex", baseClassName, className) // Combine base styles with specific className + : cn("inline-flex items-center gap-1", className); // Use simpler base for non-kbd elements + + return ( + + {parts.map((part, index) => ( + {part} + ))} + + ); +}; + +ShortcutDisplay.displayName = "ShortcutDisplay"; diff --git a/src/components/primitives/suspense-fallback.tsx b/src/components/primitives/suspense-fallback.tsx new file mode 100644 index 0000000..33a13b4 --- /dev/null +++ b/src/components/primitives/suspense-fallback.tsx @@ -0,0 +1,5 @@ +import { Loader } from "@/components/primitives/loader"; + +export const SuspenseFallback = () => { + return ; +}; diff --git a/src/hooks/use-async-state.ts b/src/hooks/use-async-state.ts new file mode 100644 index 0000000..3c9ad07 --- /dev/null +++ b/src/hooks/use-async-state.ts @@ -0,0 +1,194 @@ +import { useCallback, useState } from "react"; + +export interface AsyncState { + data: T | null; + loading: boolean; + error: string | null; + success: boolean; +} + +export interface UseAsyncStateReturn extends AsyncState { + execute: (...args: Args) => Promise; + reset: () => void; + setData: (data: T | null) => void; + setError: (error: string | null) => void; +} + +/** + * A hook for managing async operations with standardized loading, error, and success states. + * + * @example + * ```tsx + * const { data, loading, error, execute } = useAsyncState(async (id: string) => { + * const response = await fetch(`/api/users/${id}`); + * return response.json(); + * }); + * + * // Usage + * const handleSubmit = () => execute(userId); + * ``` + */ +export function useAsyncState( + asyncFunction: (...args: Args) => Promise, + initialData: T | null = null +): UseAsyncStateReturn { + const [data, setData] = useState(initialData); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [success, setSuccess] = useState(false); + + const execute = useCallback( + async (...args: Args): Promise => { + setLoading(true); + setError(null); + setSuccess(false); + + try { + const result = await asyncFunction(...args); + setData(result); + setSuccess(true); + return result; + } catch (err) { + const errorMessage = err instanceof Error ? err.message : "An unknown error occurred"; + setError(errorMessage); + throw err; + } finally { + setLoading(false); + } + }, + [asyncFunction] + ); + + const reset = useCallback(() => { + setData(initialData); + setLoading(false); + setError(null); + setSuccess(false); + }, [initialData]); + + return { + data, + loading, + error, + success, + execute, + reset, + setData, + setError, + }; +} + +/** + * A simpler version for operations that don't return data + */ +export function useAsyncAction( + asyncFunction: (...args: Args) => Promise +) { + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [success, setSuccess] = useState(false); + + const execute = useCallback( + async (...args: Args): Promise => { + setLoading(true); + setError(null); + setSuccess(false); + + try { + await asyncFunction(...args); + setSuccess(true); + } catch (err) { + const errorMessage = err instanceof Error ? err.message : "An unknown error occurred"; + setError(errorMessage); + throw err; + } finally { + setLoading(false); + } + }, + [asyncFunction] + ); + + const reset = useCallback(() => { + setLoading(false); + setError(null); + setSuccess(false); + }, []); + + return { + loading, + error, + success, + execute, + reset, + setError, + }; +} + +/** + * Hook for managing multiple async operations with independent states + */ +export function useMultiAsyncState>() { + const [states, setStates] = useState>>( + {} as Record> + ); + + const createExecutor = useCallback( + (key: K, asyncFunction: () => Promise) => { + return async () => { + setStates((prev: Record>) => ({ + ...prev, + [key]: { ...prev[key], loading: true, error: null, success: false }, + })); + + try { + const result = await asyncFunction(); + setStates((prev: Record>) => ({ + ...prev, + [key]: { data: result, loading: false, error: null, success: true }, + })); + return result; + } catch (err) { + const errorMessage = err instanceof Error ? err.message : "An unknown error occurred"; + setStates((prev: Record>) => ({ + ...prev, + [key]: { ...prev[key], loading: false, error: errorMessage, success: false }, + })); + throw err; + } + }; + }, + [] + ); + + const getState = useCallback( + (key: K): AsyncState => { + return ( + (states[key] as AsyncState) || { + data: null, + loading: false, + error: null, + success: false, + } + ); + }, + [states] + ); + + const reset = useCallback((key?: K) => { + if (key) { + setStates((prev: Record>) => ({ + ...prev, + [key]: { data: null, loading: false, error: null, success: false }, + })); + } else { + setStates({} as Record>); + } + }, []); + + return { + createExecutor, + getState, + reset, + states, + }; +} diff --git a/src/hooks/use-auto-resize-textarea.ts b/src/hooks/use-auto-resize-textarea.ts new file mode 100644 index 0000000..8ad7ec6 --- /dev/null +++ b/src/hooks/use-auto-resize-textarea.ts @@ -0,0 +1,51 @@ +import { useCallback, useEffect, useRef } from "react"; + +interface UseAutoResizeTextareaProps { + minHeight: number; + maxHeight?: number; +} + +export function useAutoResizeTextarea({ minHeight, maxHeight }: UseAutoResizeTextareaProps) { + const textareaRef = useRef(null); + + const adjustHeight = useCallback( + (reset?: boolean) => { + const textarea = textareaRef.current; + if (!textarea) return; + + if (reset) { + textarea.style.height = `${minHeight}px`; + return; + } + + // Temporarily shrink to get the right scrollHeight + textarea.style.height = `${minHeight}px`; + + // Calculate new height + const newHeight = Math.max( + minHeight, + Math.min(textarea.scrollHeight, maxHeight ?? Number.POSITIVE_INFINITY) + ); + + textarea.style.height = `${newHeight}px`; + }, + [minHeight, maxHeight] + ); + + useEffect(() => { + // Set initial height + const textarea = textareaRef.current; + if (textarea) { + textarea.style.height = `${minHeight}px`; + } + }, [minHeight]); + + // Adjust height on window resize + useEffect(() => { + const handleResize = () => adjustHeight(); + window.addEventListener("resize", handleResize); + return () => window.removeEventListener("resize", handleResize); + }, [adjustHeight]); + + return { textareaRef, adjustHeight }; +} diff --git a/src/hooks/use-countdown.ts b/src/hooks/use-countdown.ts new file mode 100644 index 0000000..a12de34 --- /dev/null +++ b/src/hooks/use-countdown.ts @@ -0,0 +1,63 @@ +import { useEffect, useState } from "react"; + +interface CountdownResult { + days: number; + hours: number; + minutes: number; + seconds: number; + isExpired: boolean; +} + +/** + * Hook to create a countdown timer to a specific date + * @param targetDate - The date to count down to (ISO string or Date object) + * @returns CountdownResult object with remaining time and expiry status + */ +export const useCountdown = (targetDate: string | Date): CountdownResult => { + const [countdown, setCountdown] = useState({ + days: 0, + hours: 0, + minutes: 0, + seconds: 0, + isExpired: false, + }); + + useEffect(() => { + const target = new Date(targetDate).getTime(); + + const calculateTimeLeft = () => { + const now = new Date().getTime(); + const difference = target - now; + + if (difference <= 0) { + return { + days: 0, + hours: 0, + minutes: 0, + seconds: 0, + isExpired: true, + }; + } + + return { + days: Math.floor(difference / (1000 * 60 * 60 * 24)), + hours: Math.floor((difference % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60)), + minutes: Math.floor((difference % (1000 * 60 * 60)) / (1000 * 60)), + seconds: Math.floor((difference % (1000 * 60)) / 1000), + isExpired: false, + }; + }; + + // Initial calculation + setCountdown(calculateTimeLeft()); + + // Update every second + const timer = setInterval(() => { + setCountdown(calculateTimeLeft()); + }, 1000); + + return () => clearInterval(timer); + }, [targetDate]); + + return countdown; +}; diff --git a/src/hooks/use-has-primary-touch.tsx b/src/hooks/use-has-primary-touch.tsx new file mode 100644 index 0000000..b4abc99 --- /dev/null +++ b/src/hooks/use-has-primary-touch.tsx @@ -0,0 +1,28 @@ +import { useEffect, useState } from "react"; + +export function useTouchPrimary() { + const [isTouchPrimary, setIsTouchPrimary] = useState(false); + + useEffect(() => { + if (typeof window === "undefined") return; + + const controller = new AbortController(); + const { signal } = controller; + + const handleTouch = () => { + const hasTouch = "ontouchstart" in window || navigator.maxTouchPoints > 0; + const prefersTouch = window.matchMedia("(pointer: coarse)").matches; + setIsTouchPrimary(hasTouch && prefersTouch); + }; + + const mq = window.matchMedia("(pointer: coarse)"); + mq.addEventListener("change", handleTouch, { signal }); + window.addEventListener("pointerdown", handleTouch, { signal }); + + handleTouch(); + + return () => controller.abort(); + }, []); + + return isTouchPrimary; +} diff --git a/src/hooks/use-is-mac.ts b/src/hooks/use-is-mac.ts new file mode 100644 index 0000000..b7f99c9 --- /dev/null +++ b/src/hooks/use-is-mac.ts @@ -0,0 +1,43 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { is } from "@/lib/utils/is"; + +// Extend Navigator type to include userAgentData (might be browser-specific) +interface NavigatorWithUAData extends Navigator { + userAgentData?: { + platform: string; + brands: { brand: string; version: string }[]; + mobile: boolean; + }; +} + +/** + * Hook to determine if the current OS is macOS. + * Uses navigator.userAgentData if available, otherwise falls back to navigator.platform. + * Returns false during SSR or if the platform cannot be determined. + */ +export function useIsMac(): boolean { + const [isMac, setIsMac] = useState(false); + + useEffect(() => { + let determinedIsMac = false; + if (typeof window !== "undefined") { + const nav = navigator as NavigatorWithUAData; // Cast to extended type + + // Prefer userAgentData if available + if (nav.userAgentData?.platform) { + determinedIsMac = /mac|iphone|ipad|ipod/i.test(nav.userAgentData.platform); + } else if (nav.platform) { + // Fallback to platform + determinedIsMac = /Mac|iPod|iPhone|iPad/.test(nav.platform); + } else { + // Last resort fallback using the existing utility + determinedIsMac = is.mac; + } + } + setIsMac(determinedIsMac); + }, []); + + return isMac; +} diff --git a/src/hooks/use-local-storage.ts b/src/hooks/use-local-storage.ts new file mode 100644 index 0000000..910b8cb --- /dev/null +++ b/src/hooks/use-local-storage.ts @@ -0,0 +1,104 @@ +"use client"; + +import { useCallback, useEffect, useState } from "react"; + +export function useLocalStorage( + key: string, + initialValue: T +): [T, (value: T | ((val: T) => T)) => void] { + // Get from local storage then + // parse stored json or return initialValue + const readValue = (): T => { + // Prevent build error "window is undefined" but keep working + if (typeof window === "undefined") { + return initialValue; + } + + try { + const item = window.localStorage.getItem(key); + if (!item) return initialValue; + const parsed = JSON.parse(item) as T; + // Handle case where localStorage contains "null" or invalid data + return parsed ?? initialValue; + } catch (error) { + console.warn(`Error reading localStorage key "${key}":`, error); + return initialValue; + } + }; + + // State to store our value + // Pass initial state function to useState so logic is only executed once + const [storedValue, setStoredValue] = useState(readValue); + + // Stable setter that updates state; localStorage sync happens in an effect + const setValue = useCallback( + (value: T | ((val: T) => T)) => { + try { + setStoredValue((prev) => + value instanceof Function ? (value as (val: T) => T)(prev) : value + ); + } catch (error) { + console.warn(`Error setting localStorage key "${key}":`, error); + } + }, + [key] + ); + + useEffect(() => { + setStoredValue(readValue()); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [key]); + + useEffect(() => { + const handleStorageChange = (event: StorageEvent) => { + if (event.key === key) { + // Handle null value (item was removed) + if (event.newValue === null) { + setStoredValue(initialValue); + return; + } + + // Safely parse the new value with error handling + try { + const parsedValue = JSON.parse(event.newValue) as T; + setStoredValue(parsedValue); + } catch (error) { + console.warn(`Error parsing localStorage change for key "${key}":`, error); + // If parsing fails, try to read directly from localStorage + // This handles edge cases where the event data might be corrupted + try { + const item = window.localStorage.getItem(key); + if (item !== null) { + const fallbackValue = JSON.parse(item) as T; + setStoredValue(fallbackValue); + } else { + setStoredValue(initialValue); + } + } catch (fallbackError) { + console.warn(`Fallback parsing also failed for key "${key}":`, fallbackError); + setStoredValue(initialValue); + } + } + } + }; + + // Listen for changes to this local storage key in other tabs/windows + window.addEventListener("storage", handleStorageChange); + return () => { + window.removeEventListener("storage", handleStorageChange); + }; + }, [key, initialValue]); + + // Persist to localStorage whenever value or key changes + useEffect(() => { + try { + if (typeof window !== "undefined") { + window.localStorage.setItem(key, JSON.stringify(storedValue)); + } + } catch (error) { + console.warn(`Error persisting localStorage key "${key}":`, error); + } + }, [key, storedValue]); + + return [storedValue, setValue]; +} diff --git a/src/instrumentation-client.ts b/src/instrumentation-client.ts new file mode 100644 index 0000000..469bdc1 --- /dev/null +++ b/src/instrumentation-client.ts @@ -0,0 +1,34 @@ +"use client"; + +import { buildTimeFeatures } from "@/config/features-config"; +import { POSTHOG_RELAY_SLUG } from "@/lib/posthog/posthog-config"; + +/* + * Client Instrumentation (Next.js 15.3+) + * Initializes PostHog on the client before hydration. + * Uses dynamic import to avoid pulling posthog-js into the bundle when disabled. + * @see https://nextjs.org/docs/app/api-reference/file-conventions/instrumentation + */ + +// Guard using build-time feature flags and presence of env at runtime +const posthogEnabled = process.env.NEXT_PUBLIC_FEATURE_POSTHOG_ENABLED; +const posthogKey = process.env.NEXT_PUBLIC_POSTHOG_KEY; +const posthogHost = process.env.NEXT_PUBLIC_POSTHOG_HOST ?? POSTHOG_RELAY_SLUG; + +if (posthogEnabled && posthogKey && posthogHost) { + void import("posthog-js") + .then(({ default: posthog }) => { + posthog.init(posthogKey, { + api_host: posthogHost, + ui_host: "https://us.posthog.com", + defaults: "2025-05-24", + }); + + if (process.env.NODE_ENV !== "production") { + (globalThis as unknown as { posthog?: typeof posthog }).posthog = posthog; + } + }) + .catch(() => { + // Silently ignore analytics init failures in client instrumentation + }); +} diff --git a/src/instrumentation.ts b/src/instrumentation.ts new file mode 100644 index 0000000..4d07633 --- /dev/null +++ b/src/instrumentation.ts @@ -0,0 +1,59 @@ +/** + * Next.js instrumentation file + * @see https://nextjs.org/docs/app/api-reference/file-conventions/instrumentation + * WARNING: This needs to load on Node.js AND Edge runtime. + */ + +import { registerOTel } from "@vercel/otel"; +import type { Instrumentation } from "next"; +import { displayLaunchMessage } from "@/lib/utils/kit-launch-message"; + +/** + * Registers OpenTelemetry for observability in the application. + * This function is called once when a new Next.js server instance is initiated. + */ +export function register() { + if (process.env.NEXT_RUNTIME === "nodejs") { + // Initialize payment providers once on server startup + // await import("./instrumentation-node"); + } + + if (process.env.NEXT_RUNTIME === "edge") { + // await import('./instrumentation-edge') + } + + displayLaunchMessage(); + registerOTel({ + serviceName: "shipkit", + // Add any additional configuration options here + }); +} + +/** + * Handles server errors and reports them to a custom observability provider. + * This function is triggered when the Next.js server captures an error. + * + * @param error - The caught error with a unique digest ID. + * @param request - Information about the request that caused the error. + * @param context - The context in which the error occurred. + */ +export const onRequestError: Instrumentation.onRequestError = ( + error, + request, + context, +) => { + console.debug("error", error); + console.debug("request", request); + console.debug("context", context); + // await fetch("https://your-observability-endpoint/report-error", { + // method: "POST", + // body: JSON.stringify({ + // message: error.message, + // request, + // context, + // }), + // headers: { + // "Content-Type": "application/json", + // }, + // }); +}; diff --git a/src/lib/env-utils.ts b/src/lib/env-utils.ts new file mode 100644 index 0000000..16f60c1 --- /dev/null +++ b/src/lib/env-utils.ts @@ -0,0 +1,69 @@ +import { loadEnvConfig } from "@next/env"; +import { logger } from "@/lib/logger"; + +/** + * Load environment variables from .env* files using @next/env + * This is especially useful in build scripts or tests outside of the Next.js runtime + * where Next.js doesn't automatically load the environment + */ +export function loadEnvironment(isDev = process.env.NODE_ENV !== "production") { + try { + const projectDir = process.cwd(); + const { combinedEnv, loadedEnvFiles } = loadEnvConfig(projectDir, isDev); + + if (loadedEnvFiles.length > 0) { + logger.debug(`Loaded environment from ${loadedEnvFiles.length} files`); + + // Log file names in debug mode + loadedEnvFiles.forEach((file) => { + logger.debug(`Loaded env file: ${file.path}`); + }); + + // Return the combined environment + return combinedEnv; + } + logger.warn("No environment files loaded"); + return null; + } catch (error) { + logger.error("Failed to load environment:", error); + return null; + } +} + +/** + * Get an environment variable with a fallback value + * This handles the common pattern of checking if an env var exists and using a default if not + */ +export function getEnvVar(name: string, defaultValue = ""): string { + return process.env[name] || defaultValue; +} + +/** + * Get a boolean environment variable + * This handles the common pattern of converting string env vars to booleans + */ +export function getBooleanEnvVar(name: string, defaultValue = false): boolean { + const value = process.env[name]; + if (value === undefined) return defaultValue; + return value === "true" || value === "1"; +} + +/** + * Get a numeric environment variable + * This handles the common pattern of converting string env vars to numbers + */ +export function getNumericEnvVar(name: string, defaultValue = 0): number { + const value = process.env[name]; + if (value === undefined) return defaultValue; + + const parsed = Number(value); + return Number.isNaN(parsed) ? defaultValue : parsed; +} + +/** + * Check if an environment feature flag is enabled + * This is a utility for the common pattern of checking feature flags + */ +export function isFeatureEnabled(featureName: string, defaultValue = false): boolean { + return getBooleanEnvVar(`FEATURE_${featureName.toUpperCase()}_ENABLED`, defaultValue); +} diff --git a/src/lib/performance.ts b/src/lib/performance.ts new file mode 100644 index 0000000..11078c6 --- /dev/null +++ b/src/lib/performance.ts @@ -0,0 +1,178 @@ +/** + * Performance Utilities for Next.js 15 + * + * Helpers for fetch caching, performance monitoring, and optimization + */ + +/** + * Standard fetch cache configurations for different use cases + */ +export const fetchConfigs = { + /** No caching - always fresh data */ + noCache: { cache: "no-store" as const }, + + /** Cache with immediate revalidation check */ + revalidate: { next: { revalidate: 0 } }, + + /** Short-term caching (1 minute) */ + short: { next: { revalidate: 60 } }, + + /** Medium-term caching (5 minutes) */ + medium: { next: { revalidate: 300 } }, + + /** Long-term caching (1 hour) */ + long: { next: { revalidate: 3600 } }, + + /** Static caching (24 hours) */ + static: { next: { revalidate: 86400 } }, + + /** Force caching */ + forceCache: { cache: "force-cache" as const }, +} as const; + +/** + * Segment-level fetch cache configurations + * Use as: export const fetchCache = 'default-cache' + */ +export const segmentFetchConfigs = { + defaultCache: "default-cache" as const, + defaultNoStore: "default-no-store" as const, + forceCache: "force-cache" as const, + onlyCache: "only-cache" as const, + forceNoStore: "force-no-store" as const, + onlyNoStore: "only-no-store" as const, +} as const; + +/** + * Performance monitoring helpers + */ +const timers = new Map(); + +export const PerformanceMonitor = { + /** + * Start timing an operation + */ + start(label: string): void { + timers.set(label, performance.now()); + }, + + /** + * End timing and log the duration + */ + end(label: string): number { + const start = timers.get(label); + if (!start) { + console.warn(`Timer '${label}' was not started`); + return 0; + } + + const duration = performance.now() - start; + timers.delete(label); + + if (process.env.NODE_ENV === "development") { + console.log(`⏱️ ${label}: ${duration.toFixed(2)}ms`); + } + + return duration; + }, + + /** + * Measure an async operation + */ + async measure(label: string, operation: () => Promise): Promise { + this.start(label); + try { + const result = await operation(); + this.end(label); + return result; + } catch (error) { + this.end(label); + throw error; + } + }, +}; + +/** + * Web Vitals tracking for Next.js 15 + */ +export interface WebVitalsMetric { + id: string; + name: string; + value: number; + label: "web-vital" | "custom"; + startTime?: number; +} + +/** + * Enhanced fetch helper with automatic caching configuration + */ +export async function optimizedFetch( + url: string, + options: RequestInit & { cacheStrategy?: keyof typeof fetchConfigs } = {} +): Promise { + const { cacheStrategy = "medium", ...fetchOptions } = options; + + const cacheConfig = fetchConfigs[cacheStrategy]; + + if (process.env.NODE_ENV === "development") { + console.log(`🌐 Fetch ${url} with ${cacheStrategy} caching`); + } + + return fetch(url, { + ...fetchOptions, + ...cacheConfig, + }); +} + +/** + * Preload helper for critical resources + */ +export function preloadResource(href: string, as: string): void { + if (typeof window !== "undefined") { + const link = document.createElement("link"); + link.rel = "preload"; + link.href = href; + link.as = as; + document.head.appendChild(link); + } +} + +/** + * Resource hints for improved loading + */ +export function addResourceHints(hints: { href: string; rel: string; as?: string }[]): void { + if (typeof window !== "undefined") { + for (const { href, rel, as } of hints) { + const link = document.createElement("link"); + link.rel = rel; + link.href = href; + if (as) link.as = as; + document.head.appendChild(link); + } + } +} + +/** + * Image optimization helper + */ +export function getOptimizedImageSrc(src: string, width: number, quality = 75): string { + // If it's a Next.js optimized image, add parameters + if (src.startsWith("/_next/image")) { + const url = new URL(src, window.location.origin); + url.searchParams.set("w", width.toString()); + url.searchParams.set("q", quality.toString()); + return url.toString(); + } + + return src; +} + +export default { + fetchConfigs, + segmentFetchConfigs, + PerformanceMonitor, + optimizedFetch, + preloadResource, + addResourceHints, + getOptimizedImageSrc, +}; diff --git a/src/lib/request-logger.ts b/src/lib/request-logger.ts new file mode 100644 index 0000000..e6afecf --- /dev/null +++ b/src/lib/request-logger.ts @@ -0,0 +1,48 @@ +import { redisClient as redis } from "@/server/services/redis-service"; + +interface RequestLog { + timestamp: string; + ip: string; + method: string; + path: string; + statusCode: number; + duration: number; + apiKey: string; +} + +export async function logRequest(data: RequestLog) { + if (!redis) { + console.warn("Redis not configured, skipping request logging"); + return; + } + const key = `request-logs:${new Date().toISOString().split("T")[0]}`; + + // Store log in Redis with 30-day expiry + await redis.lpush(key, JSON.stringify(data)); + await redis.expire(key, 60 * 60 * 24 * 30); // 30 days +} + +export async function getRecentLogs(days = 7): Promise { + if (!redis) { + console.warn("Redis not configured, cannot fetch logs"); + return []; + } + const keys = []; + const now = new Date(); + + // Get keys for the last n days + for (let i = 0; i < days; i++) { + const date = new Date(now); + date.setDate(date.getDate() - i); + keys.push(`request-logs:${date.toISOString().split("T")[0]}`); + } + + // Get all logs + const logs = await Promise.all(keys.map((key) => redis?.lrange(key, 0, -1) ?? [])); + + // Parse and flatten logs + return logs + .flat() + .map((log) => JSON.parse(log)) + .sort((a, b) => new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime()); +} diff --git a/src/lib/sitemap.ts b/src/lib/sitemap.ts new file mode 100644 index 0000000..93c9083 --- /dev/null +++ b/src/lib/sitemap.ts @@ -0,0 +1,67 @@ +import type { MetadataRoute } from "next"; +import { routes } from "@/config/routes"; +import { siteConfig } from "@/config/site-config"; + +interface SitemapEntry { + url: string; + lastModified?: string | Date; + changeFrequency?: "always" | "hourly" | "daily" | "weekly" | "monthly" | "yearly" | "never"; + priority?: number; +} + +export async function generateSitemapEntries(): Promise { + const entries: SitemapEntry[] = []; + + // High priority static routes + const highPriorityRoutes = [routes.home, routes.docs, routes.features, routes.pricing].map( + (route) => ({ + url: `${siteConfig.url}${route}`, + lastModified: new Date().toISOString(), + changeFrequency: "daily" as const, + priority: 1, + }) + ); + + // Medium priority static routes + const mediumPriorityRoutes = [routes.faq, routes.download].map((route) => ({ + url: `${siteConfig.url}${route}`, + lastModified: new Date().toISOString(), + changeFrequency: "weekly" as const, + priority: 0.8, + })); + + // Low priority static routes + const lowPriorityRoutes = [routes.terms, routes.privacy].map((route) => ({ + url: `${siteConfig.url}${route}`, + lastModified: new Date().toISOString(), + changeFrequency: "monthly" as const, + priority: 0.5, + })); + + // Example routes + const exampleRoutes = Object.values(routes.examples) + .filter( + (route): route is string => typeof route === "string" && route !== routes.examples.index + ) + .map((route) => ({ + url: `${siteConfig.url}${route}`, + lastModified: new Date().toISOString(), + changeFrequency: "weekly" as const, + priority: 0.7, + })); + + // Add all entries + entries.push( + ...highPriorityRoutes, + ...mediumPriorityRoutes, + ...lowPriorityRoutes, + ...exampleRoutes + ); + + return entries; +} + +export async function generateSitemap(): Promise { + const entries = await generateSitemapEntries(); + return entries; +} diff --git a/src/lib/utils/capitalize.ts b/src/lib/utils/capitalize.ts new file mode 100644 index 0000000..19c83ce --- /dev/null +++ b/src/lib/utils/capitalize.ts @@ -0,0 +1,3 @@ +export function capitalize(str: string) { + return str.charAt(0).toUpperCase() + str.slice(1); +} diff --git a/src/lib/utils/colors.ts b/src/lib/utils/colors.ts new file mode 100644 index 0000000..e9eae15 --- /dev/null +++ b/src/lib/utils/colors.ts @@ -0,0 +1,34 @@ +export interface WaveConfig { + frequency: number; + amplitude: number; + saturation: number; + brightness: number; +} + +export const generateRainbowColor = (offset: number): string => { + const r = Math.sin(0.3 * offset) * 127 + 128; + const g = Math.sin(0.3 * offset + 2) * 127 + 128; + const b = Math.sin(0.3 * offset + 4) * 127 + 128; + return `rgb(${r},${g},${b})`; +}; + +export const calculateWaveY = ( + x: number, + time: number, + layer: number, + height: number, + config: WaveConfig +): number => { + const baseY = height / 2; + return ( + baseY + + Math.sin(x * config.frequency * 0.01 + time + layer) * (50 * config.amplitude) + + Math.sin(x * config.frequency * 0.02 + time * 1.2) * (30 * config.amplitude) + + Math.sin(x * config.frequency * 0.003 + time * 0.3) * (100 * config.amplitude) + ); +}; + +export const getWaveColor = (time: number, layer: number, config: WaveConfig): string => { + const hue = (time + layer * 120) % 360; + return `hsl(${hue}, ${config.saturation}%, ${config.brightness}%)`; +}; diff --git a/src/lib/utils/email-utils.ts b/src/lib/utils/email-utils.ts new file mode 100644 index 0000000..71dc35a --- /dev/null +++ b/src/lib/utils/email-utils.ts @@ -0,0 +1,83 @@ +import { adminConfig } from "@/config/admin-config"; + +interface MailtoOptions { + to?: string | string[]; + subject?: string; + body?: string; + cc?: string | string[]; + bcc?: string | string[]; +} + +/** + * Generate a mailto link with proper URL encoding + * @param options - Email options for the mailto link + * @returns Formatted mailto URL + */ +export function generateMailtoLink(options: MailtoOptions): string { + const params = new URLSearchParams(); + + // Handle recipients (to) + if (options.to) { + const recipients = Array.isArray(options.to) ? options.to.join(",") : options.to; + params.set("to", recipients); + } + + // Handle subject + if (options.subject) { + params.set("subject", options.subject); + } + + // Handle body + if (options.body) { + params.set("body", options.body); + } + + // Handle CC + if (options.cc) { + const ccRecipients = Array.isArray(options.cc) ? options.cc.join(",") : options.cc; + params.set("cc", ccRecipients); + } + + // Handle BCC + if (options.bcc) { + const bccRecipients = Array.isArray(options.bcc) ? options.bcc.join(",") : options.bcc; + params.set("bcc", bccRecipients); + } + + // Build the mailto URL + const baseMailto = "mailto:"; + const queryString = params.toString(); + + return queryString ? `${baseMailto}?${queryString}` : baseMailto; +} + +/** + * Generate a feedback mailto link using admin emails + * @param content - The feedback content + * @param source - Source of the feedback (dialog, popover, etc.) + * @returns Formatted mailto URL for feedback + */ +export function generateFeedbackMailto(content: string, source: string): string { + const adminEmails = adminConfig.emails; + const subject = `Feedback from ${source} - Shipkit`; + + return generateMailtoLink({ + to: adminEmails[0], // Use first admin email as primary recipient + cc: adminEmails.length > 1 ? adminEmails.slice(1) : undefined, + subject, + body: content, + }); +} + +/** + * Check if email service is available (Resend configured) + * This function is for server-side use only + * @returns Boolean indicating if email service is available + */ +export function isEmailServiceAvailable(): boolean { + if (typeof window !== "undefined") { + // Client-side: we can't access server env vars + return false; + } + return !!(process.env.RESEND_API_KEY || process.env.RESEND_API_KEY); +} diff --git a/src/lib/utils/extract-headings.ts b/src/lib/utils/extract-headings.ts new file mode 100644 index 0000000..a9ac58c --- /dev/null +++ b/src/lib/utils/extract-headings.ts @@ -0,0 +1,146 @@ +export interface Heading { + id: string; + text: string; + level: number; +} + +/** + * Cache for heading extraction results + * Key: content hash, Value: { headings, timestamp } + */ +interface HeadingCacheEntry { + headings: Heading[]; + timestamp: number; +} + +// In-memory cache for heading extraction +const headingCache = new Map(); + +// Cache configuration +const CACHE_TTL = 1000 * 60 * 60; // 1 hour +const MAX_CACHE_SIZE = 1000; // Maximum number of cached entries + +/** + * Generate a simple hash for content caching + */ +function generateContentHash(content: string): string { + let hash = 0; + if (content.length === 0) return hash.toString(); + + for (let i = 0; i < content.length; i++) { + const char = content.charCodeAt(i); + hash = (hash << 5) - hash + char; + hash = hash & hash; // Convert to 32-bit integer + } + + return Math.abs(hash).toString(36); +} + +/** + * Clean expired cache entries + */ +function cleanExpiredCache(): void { + const now = Date.now(); + for (const [key, entry] of headingCache.entries()) { + if (now - entry.timestamp > CACHE_TTL) { + headingCache.delete(key); + } + } +} + +/** + * Ensure cache size doesn't exceed maximum + */ +function ensureCacheSize(): void { + if (headingCache.size > MAX_CACHE_SIZE) { + // Remove oldest entries (simple FIFO) + const entries = Array.from(headingCache.entries()); + entries.sort((a, b) => a[1].timestamp - b[1].timestamp); + + const toRemove = entries.slice(0, headingCache.size - MAX_CACHE_SIZE + 1); + toRemove.forEach(([key]) => headingCache.delete(key)); + } +} + +/** + * Generate a URL-friendly slug from text + */ +export function slugify(text: string): string { + return text + .toLowerCase() + .replace(/[^\w\s-]/g, "") // Remove special characters + .replace(/[\s_-]+/g, "-") // Replace spaces and underscores with hyphens + .replace(/^-+|-+$/g, ""); // Remove leading/trailing hyphens +} + +/** + * Extract headings from MDX content (with caching) + */ +export function extractHeadings(content: string): Heading[] { + // Generate cache key + const cacheKey = generateContentHash(content); + + // Check cache first + const cached = headingCache.get(cacheKey); + if (cached && Date.now() - cached.timestamp < CACHE_TTL) { + return cached.headings; + } + + // Extract headings if not in cache or expired + const headingRegex = /^(#{1,6})\s+(.+)$/gm; + const headings: Heading[] = []; + let match; + + while ((match = headingRegex.exec(content)) !== null) { + const level = match[1]?.length ?? 0; + const text = match[2]?.trim() ?? ""; + const id = slugify(text); + + headings.push({ + id, + text, + level, + }); + } + + // Cache the result + headingCache.set(cacheKey, { + headings, + timestamp: Date.now(), + }); + + // Perform cache maintenance + cleanExpiredCache(); + ensureCacheSize(); + + return headings; +} + +/** + * Clear the heading extraction cache + */ +export function clearHeadingCache(): void { + headingCache.clear(); +} + +/** + * Get cache statistics (useful for debugging) + */ +export function getHeadingCacheStats(): { + size: number; + maxSize: number; + ttl: number; +} { + return { + size: headingCache.size, + maxSize: MAX_CACHE_SIZE, + ttl: CACHE_TTL, + }; +} + +/** + * Filter headings by level range (useful for TOC depth control) + */ +export function filterHeadingsByLevel(headings: Heading[], minLevel = 1, maxLevel = 4): Heading[] { + return headings.filter((heading) => heading.level >= minLevel && heading.level <= maxLevel); +} diff --git a/src/lib/utils/format-date.ts b/src/lib/utils/format-date.ts new file mode 100644 index 0000000..031c063 --- /dev/null +++ b/src/lib/utils/format-date.ts @@ -0,0 +1,70 @@ +/** + * Safely formats a date string for display + * @param date - Date string, Date object, or null/undefined + * @returns Formatted date string or empty string for invalid dates + */ +export function formatDate(date: string | Date | null | undefined): string { + if (!date) return ""; + + try { + const dateObj = typeof date === "string" ? new Date(date) : date; + + // Check if date is valid + if (Number.isNaN(dateObj.getTime())) { + return ""; + } + + return dateObj.toLocaleDateString("en-US", { + month: "long", + day: "numeric", + year: "numeric", + }); + } catch { + return ""; + } +} + +/** + * Safely formats a date for HTML datetime attribute + * @param date - Date string, Date object, or null/undefined + * @returns ISO string or empty string for invalid dates + */ +export function formatDateTimeAttribute(date: string | Date | null | undefined): string { + if (!date) return ""; + + try { + const dateObj = typeof date === "string" ? new Date(date) : date; + + // Check if date is valid + if (Number.isNaN(dateObj.getTime())) { + return ""; + } + + return dateObj.toISOString(); + } catch { + return ""; + } +} + +/** + * Safely formats a date to YYYY-MM-DD format for blog display + * @param date - Date string, Date object, or null/undefined + * @returns Date string in YYYY-MM-DD format or empty string for invalid dates + */ +export function formatDateForBlog(date: string | Date | null | undefined): string { + if (!date) return ""; + + try { + const dateObj = typeof date === "string" ? new Date(date) : date; + + // Check if date is valid + if (Number.isNaN(dateObj.getTime())) { + return ""; + } + + const isoString = dateObj.toISOString(); + return isoString?.split("T")[0] ?? ""; + } catch { + return ""; + } +} diff --git a/src/lib/utils/redirect.ts b/src/lib/utils/redirect.ts new file mode 100644 index 0000000..3ad448f --- /dev/null +++ b/src/lib/utils/redirect.ts @@ -0,0 +1,75 @@ +import { redirect as nextRedirect } from "next/navigation"; +import { NextResponse } from "next/server"; +import type { Route } from "next"; +import { BASE_URL } from "../../config/base-url"; +import { SEARCH_PARAM_KEYS } from "../../config/search-param-keys"; +import { logger } from "../logger"; + +interface RedirectOptions { + code?: string; + nextUrl?: string; +} + +export function createRedirectUrl(pathname: string, options?: RedirectOptions): string { + const url = new URL(pathname, BASE_URL); + if (options?.code) { + url.searchParams.set(SEARCH_PARAM_KEYS.statusCode, options.code); + } + if (options?.nextUrl) { + url.searchParams.set(SEARCH_PARAM_KEYS.nextUrl, options.nextUrl); + } + return url.pathname + url.search; +} + +export function redirect(pathname: string, options?: RedirectOptions) { + const url = createRedirectUrl(pathname, options); + return nextRedirect(url); +} + +export function routeRedirect( + destination: string, + options?: string | { code?: string; nextUrl?: string; request?: Request } +) { + if (!options) { + return NextResponse.redirect(destination); + } + + let url: URL; + + if (typeof options === "string") { + url = new URL(destination, BASE_URL); + url.searchParams.set(SEARCH_PARAM_KEYS.statusCode, options); + } else { + const baseUrl = options.request?.url || BASE_URL; + url = new URL(destination, baseUrl); + + if (options?.nextUrl) { + url.searchParams.set(SEARCH_PARAM_KEYS.nextUrl, options.nextUrl); + } + + if (options?.code) { + url.searchParams.set(SEARCH_PARAM_KEYS.statusCode, options.code); + } + } + + logger.info(`routeRedirect: Redirecting to ${url}`); + return NextResponse.redirect(url); +} + +export interface Redirect { + source: Route; + destination: Route; + permanent: boolean; +} + +export const createRedirects = ( + sources: Route[], + destination: Route, + permanent = false +): Redirect[] => { + if (!sources.length) return []; + + return sources + .filter((source) => source !== destination) + .map((source) => ({ source, destination, permanent })); +}; diff --git a/src/lib/utils/url-utils.ts b/src/lib/utils/url-utils.ts new file mode 100644 index 0000000..4f72a1e --- /dev/null +++ b/src/lib/utils/url-utils.ts @@ -0,0 +1,26 @@ +/** + * Validate that a URL is properly formatted + */ +export function validateUrl(url: string): boolean { + try { + new URL(url); + return true; + } catch { + return false; + } +} + +/** + * Check if we're running in production + */ +export const isProduction = process.env.NODE_ENV === "production"; + +/** + * Check if we're running on Vercel + */ +export const isVercel = Boolean(process.env.VERCEL); + +/** + * Get the current environment + */ +export const environment = process.env.NODE_ENV || "development"; diff --git a/src/workers/logger-worker.ts b/src/workers/logger-worker.ts new file mode 100644 index 0000000..b1de957 --- /dev/null +++ b/src/workers/logger-worker.ts @@ -0,0 +1,58 @@ +/// + +const API_URL = process.env.NEXT_PUBLIC_LOGGER_URL || "https://log.bones.sh/v1"; + +interface LogData { + level: "info" | "warn" | "error"; + message: string; + timestamp: number; + metadata?: Record; +} + +/** + * Logger Worker + * This worker handles logging operations in batches to reduce API calls. + * It will automatically flush logs every 5 seconds or when reaching batch size. + */ + +const logQueue: LogData[] = []; +const MAX_BATCH_SIZE = 10; +const FLUSH_INTERVAL = 5000; // 5 seconds + +const flushLogs = async (): Promise => { + if (logQueue.length === 0) { + return; + } + + const logsToSend = logQueue.splice(0, MAX_BATCH_SIZE); + try { + const response = await fetch(API_URL, { + method: "POST", + headers: { + "Content-Type": "application/json", + Accept: "application/json", + }, + body: JSON.stringify(logsToSend), + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + } catch (error) { + // Re-add failed logs to the front of the queue + logQueue.unshift(...logsToSend); + } +}; + +// Set up periodic flush +setInterval(flushLogs, FLUSH_INTERVAL); + +self.addEventListener("message", (event: MessageEvent<{ logData: LogData }>) => { + const { logData } = event.data; + logQueue.push(logData); + + // Flush immediately if we've reached batch size + if (logQueue.length >= MAX_BATCH_SIZE) { + void flushLogs(); + } +}); diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..3f1dd39 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,117 @@ +# Testing in Shipkit + +This directory contains tests for the Shipkit application. The test suite is organized into different categories: + +## Directory Structure + +- `unit/`: Unit tests for individual functions and components + - `utils/`: Tests for utility functions + - `server/`: Tests for server-side code +- `e2e/`: End-to-end tests +- `browser/`: Browser-specific tests +- `node/`: Node.js-specific tests + +## Test Commands + +```bash +# Run all unit tests +bun run test + +# Run Node.js specific tests +bun run test:node + +# Run browser tests +bun run test:browser + +# Run specific test file +bun run test tests/unit/utils/standalone-test.test.ts + +# Run tests with coverage +bun run test:coverage +``` + +## Test Structure + +- `unit/`: Unit tests for functions and components that run in jsdom environment +- `node/`: Tests that require Node.js environment (e.g., server components, API routes) +- `browser/`: Tests that need a real browser environment via Playwright +- `e2e/`: End-to-end tests with Playwright + +## Environment Setup + +Our test setup minimizes mocking while ensuring tests run reliably: + +- `setup-env.ts`: Loads environment variables properly in both Node.js and browser environments +- `setup.ts`: Contains minimal required mocks for testing React components +- `utils.tsx`: Provides testing utilities including custom render functions with providers + +### Environment Variables + +Tests use a unified approach to environment variables that works in both Node.js and browser environments: + +- In Node.js tests, environment variables are loaded using Next.js's own `loadEnvConfig` from `@next/env` +- In browser tests, environment variables are automatically processed at build time +- The test environment sets `NODE_ENV=test` to ensure proper loading of `.env.test` files + +Create a `.env.test` file to define test-specific variables that won't affect your production environment. + +## Adding Tests + +1. Identify the appropriate test directory based on your test's requirements: + - Regular component or utility tests go in `unit/` + - Server component or API tests go in `node/` + - Tests requiring a real browser go in `browser/` + - Full end-to-end testing scenarios go in `e2e/` + +2. Write focused tests that: + - Test functionality, not implementation details + - Are independent of other tests + - Don't rely on external services (use minimal mocks if needed) + +## Testing Approach + +### Unit Tests + +Unit tests focus on testing individual functions and components in isolation. They are the easiest to write and maintain, and provide the most value for the least amount of effort. + +Some key principles for unit tests: + +- Each test should be independent and not rely on the state of other tests +- Tests should be fast and not require external services +- Focus on testing the most important logic and edge cases + +### Environment Handling + +Tests use Next.js's built-in `loadEnvConfig` from `@next/env` to load environment variables, ensuring consistent behavior with the actual application. + +- During tests, NODE_ENV is set to "test" +- Environment variables are loaded in the following order: + 1. process.env + 2. .env.test.local + 3. .env.test + 4. .env + +For browser-specific tests (`bun run test:browser`), the browser environment is set up with Playwright and the same environment variables are available. + +For Node.js-specific tests (`bun run test:node`), tests run in a Node environment with access to the same environment variables. + +### Adding New Tests + +1. Look for self-contained utility functions that don't require mocks +2. Write tests focusing on different input/output combinations +3. Test edge cases and error handling +4. For components, focus on rendering behavior and not implementation details + +## Mocking Approach + +- Tests that would normally interact with the database use `safeDbExecute` to gracefully handle missing database connections +- Environment variables are handled with test-specific utilities +- External services are mocked to avoid making actual API calls + +## Areas for Additional Testing + +- React components (both server and client) +- API endpoints +- Authentication flows +- Data fetching utilities +- Server actions diff --git a/tests/setup-env.ts b/tests/setup-env.ts new file mode 100644 index 0000000..a585806 --- /dev/null +++ b/tests/setup-env.ts @@ -0,0 +1,52 @@ +// Environment setup for tests +// This handles both browser and Node.js environments + +// Set test-specific environment variables +process.env = { + ...process.env, + NODE_ENV: "test", + SKIP_ENV_VALIDATION: "1", +}; + +// Only load Next.js environment config in Node.js environment +// Browser environments already have environment variables processed at build time +if (typeof window === "undefined") { + try { + // Dynamic import to avoid browser compatibility issues + import("@next/env") + .then(({ loadEnvConfig }) => { + loadEnvConfig(process.cwd()); + }) + .catch((error) => { + console.warn("Error loading environment variables:", error); + }); + + // Patch next-auth test runtime when Next.js module pathing differs + // Some versions expect next/server; in Vitest we can noop this + try { + // eslint-disable-next-line @typescript-eslint/no-var-requires + require.resolve("next/server"); + } catch { + // Map bare import "next/server" to our JS shim so next-auth/env can import it safely + // eslint-disable-next-line @typescript-eslint/no-var-requires + const Module = require("module"); + // eslint-disable-next-line @typescript-eslint/no-var-requires + const path = require("path"); + const originalResolve = Module._resolveFilename; + const shimPath = path.resolve(__dirname, "./shims/next-server.js"); + Module._resolveFilename = function ( + request: string, + parent: unknown, + isMain: boolean, + options: any + ) { + if (request === "next/server") { + return shimPath; + } + return originalResolve.call(this, request, parent, isMain, options); + } as any; + } + } catch (error) { + console.warn("Error importing @next/env:", error); + } +} diff --git a/tests/setup.ts b/tests/setup.ts new file mode 100644 index 0000000..9ae044a --- /dev/null +++ b/tests/setup.ts @@ -0,0 +1,73 @@ +import "@testing-library/jest-dom"; +import * as matchers from "@testing-library/jest-dom/matchers"; +import { cleanup } from "@testing-library/react"; +import { afterEach, beforeAll, expect, vi } from "vitest"; + +// Augment the global namespace for TypeScript +declare global { + // biome-ignore lint/style/noVar: + var IS_REACT_ACT_ENVIRONMENT: boolean; +} + +// Set React testing environment +global.IS_REACT_ACT_ENVIRONMENT = true; + +// Extend Vitest's expect method with testing-library methods +expect.extend(matchers); + +// Cleanup after each test case +afterEach(() => { + cleanup(); + vi.clearAllMocks(); +}); + +// Mock database module only if needed for specific tests +// This allows tests to run without a database connection +vi.mock("@/server/db", () => ({ + db: undefined, + isDatabaseInitialized: async () => false, + safeDbExecute: async (callback: Function, defaultValue: any) => defaultValue, +})); + +// Suppress specific console errors during tests +beforeAll(async () => { + const originalError = console.error; + console.error = (...args: unknown[]) => { + if ( + typeof args[0] === "string" && + (args[0].includes("Warning: ReactDOM.render is no longer supported") || + args[0].includes("Invariant: AsyncLocalStorage accessed in runtime")) + ) { + return; + } + originalError.call(console, ...args); + }; +}); + +/* + * Mock Next.js router + * This is needed for components that use useRouter + */ +vi.mock("next/navigation", () => ({ + useRouter: () => ({ + push: vi.fn(), + replace: vi.fn(), + prefetch: vi.fn(), + back: vi.fn(), + forward: vi.fn(), + refresh: vi.fn(), + pathname: "/", + query: {}, + }), + usePathname: () => "/", + useSearchParams: () => new URLSearchParams(), +})); + +/* + * Mock Next.js image component + * This is needed for components that use next/image + */ +vi.mock("next/image", () => ({ + __esModule: true, + default: vi.fn().mockImplementation(() => null), +})); diff --git a/tests/shims/next-server.js b/tests/shims/next-server.js new file mode 100644 index 0000000..c2367ed --- /dev/null +++ b/tests/shims/next-server.js @@ -0,0 +1,13 @@ +// Minimal CommonJS shim for next/server to satisfy next-auth/env during unit tests +class NextRequest {} +const NextResponse = { + json: (body, init) => ({ body, init }), + redirect: (url) => ({ url }), +}; +function headers() { + return new Headers(); +} +function cookies() { + return { get: () => undefined }; +} +module.exports = { NextRequest, NextResponse, headers, cookies }; diff --git a/tests/shims/next-server.ts b/tests/shims/next-server.ts new file mode 100644 index 0000000..633fc4f --- /dev/null +++ b/tests/shims/next-server.ts @@ -0,0 +1,23 @@ +// Minimal Next.js server stubs for unit tests +// This avoids import errors from next-auth/env importing "next/server" in Vitest + +export class NextRequest {} + +export const NextResponse = { + json: (body?: unknown, init?: unknown) => ({ body, init }), + redirect: (url: string) => ({ url }), +}; + +export function headers(): Headers { + return new Headers(); +} + +export function cookies(): { get: (name: string) => undefined } { + return { get: () => undefined }; +} + +// Also provide CommonJS compatibility for require() from next-auth/env +// @ts-expect-error +module.exports = { NextRequest, NextResponse, headers, cookies }; + +export default {} as any; diff --git a/tests/utils.tsx b/tests/utils.tsx new file mode 100644 index 0000000..c79019c --- /dev/null +++ b/tests/utils.tsx @@ -0,0 +1,27 @@ +import type { RenderOptions } from "@testing-library/react"; +import { render } from "@testing-library/react"; +import type { ReactElement, ReactNode } from "react"; +import { AppRouterLayout } from "../src/components/layouts/app-router-layout"; + +// Mock ResizeObserver which is not available in test environment +class ResizeObserverMock { + observe() {} + unobserve() {} + disconnect() {} +} + +global.ResizeObserver = ResizeObserverMock; + +// Create a wrapper component that includes providers +function TestWrapper({ children }: { children: ReactNode }) { + return {children}; +} + +// Create a custom render function that includes the wrapper +function customRender(ui: ReactElement, options?: Omit) { + return render(ui, { wrapper: TestWrapper, ...options }); +} + +// Re-export everything +export * from "@testing-library/react"; +export { customRender as render }; diff --git a/tsconfig.disabled.json b/tsconfig.disabled.json new file mode 100644 index 0000000..7c14073 --- /dev/null +++ b/tsconfig.disabled.json @@ -0,0 +1,45 @@ +{ + "compilerOptions": { + "allowJs": true, + "checkJs": false, + "esModuleInterop": true, + "isolatedModules": true, + "moduleDetection": "force", + "skipLibCheck": true, + "resolveJsonModule": true, + "target": "ES2022", + + /* Bundled projects */ + "lib": ["dom", "dom.iterable", "ES2022"], + "noEmit": true, + "module": "ESNext", + "moduleResolution": "Bundler", + "jsx": "preserve", + "plugins": [{ "name": "next" }], + "incremental": true, + "composite": true, + "strict": false, + "noImplicitAny": false, + "strictNullChecks": false, + "strictFunctionTypes": false, + "strictBindCallApply": false, + "strictPropertyInitialization": false, + "noImplicitThis": false, + "useUnknownInCatchVariables": false, + "alwaysStrict": false, + "noUnusedLocals": false, + "noUnusedParameters": false, + "exactOptionalPropertyTypes": false, + "noImplicitReturns": false, + "noFallthroughCasesInSwitch": false, + "noUncheckedIndexedAccess": false, + "noImplicitOverride": false, + "noPropertyAccessFromIndexSignature": false, + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["src/app/(app)/(demo)/examples/**/*", "src/app/(app)/ai/**/*"], + "exclude": ["node_modules"] +} diff --git a/tsconfig.workers.json b/tsconfig.workers.json new file mode 100644 index 0000000..a49c8ca --- /dev/null +++ b/tsconfig.workers.json @@ -0,0 +1,19 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "outDir": "./public/workers", + "target": "ES6", + "module": "preserve", + "moduleResolution": "Bundler", + "lib": ["webworker", "DOM", "ES6"], + "strict": false, + "sourceMap": true, + "types": ["node", "ws"], + "allowImportingTsExtensions": false, + "downlevelIteration": true, + "isolatedModules": false, + "noEmit": false + }, + "include": ["src/workers/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/vercel.json b/vercel.json new file mode 100644 index 0000000..be7b53c --- /dev/null +++ b/vercel.json @@ -0,0 +1,43 @@ +{ + "$schema": "https://openapi.vercel.sh/vercel.json", + "rewrites": [ + { + "source": "/ai", + "destination": "https://ai-offline.vercel.app/" + }, + { + "source": "/ai/:match*", + "destination": "https://ai-offline.vercel.app/:match*" + } + ], + + "buildCommand": "bun run build:vercel", + "devCommand": "bun run dev", + "installCommand": "bun install --frozen-lockfile", + "cleanUrls": true, + "trailingSlash": false, + + "functions": { + "src/app/api/**/*": { + "maxDuration": 10, + "excludeFiles": "{.next,*.cache,node_modules/eslint/**,node_modules/@types/**,node_modules/typescript/**,node_modules/three/**,node_modules/@react-three/**,node_modules/@huggingface/**,public,app}/**" + } + }, + + "crons": [], + "headers": [ + { + "source": "/install", + "headers": [ + { + "key": "Cross-Origin-Embedder-Policy", + "value": "require-corp" + }, + { + "key": "Cross-Origin-Opener-Policy", + "value": "same-origin" + } + ] + } + ] +} diff --git a/vibe-tools.config.json b/vibe-tools.config.json new file mode 100644 index 0000000..371f4b8 --- /dev/null +++ b/vibe-tools.config.json @@ -0,0 +1,21 @@ +{ + "web": { + "provider": "perplexity", + "model": "sonar-pro" + }, + "plan": { + "fileProvider": "anthropic", + "thinkingProvider": "anthropic", + "fileModel": "claude-3-7-sonnet", + "thinkingModel": "claude-3-7-sonnet" + }, + "repo": { + "provider": "gemini", + "model": "gemini-2.5-pro-exp-03-25" + }, + "doc": { + "provider": "gemini", + "model": "gemini-2.5-pro-exp-03-25" + }, + "ide": "cursor" +} \ No newline at end of file diff --git a/vitest.config.browser.ts b/vitest.config.browser.ts new file mode 100644 index 0000000..c5b50bd --- /dev/null +++ b/vitest.config.browser.ts @@ -0,0 +1,23 @@ +import react from "@vitejs/plugin-react"; +import path from "path"; +import tsconfigPaths from "vite-tsconfig-paths"; +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + plugins: [react(), tsconfigPaths()], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), + }, + }, + test: { + include: ["tests/browser/**/*.test.{ts,tsx}"], + watch: false, + setupFiles: ["./tests/setup-env.ts", "./tests/setup.ts"], + browser: { + enabled: true, + name: "chromium", + provider: "playwright", + }, + }, +}); diff --git a/vitest.config.node.ts b/vitest.config.node.ts new file mode 100644 index 0000000..cefa3f7 --- /dev/null +++ b/vitest.config.node.ts @@ -0,0 +1,25 @@ +import path from "node:path"; +import react from "@vitejs/plugin-react"; +import tsconfigPaths from "vite-tsconfig-paths"; +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + plugins: [react(), tsconfigPaths()], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), + }, + }, + test: { + environment: "node", + globals: true, + setupFiles: ["./tests/setup-env.ts", "./tests/setup.ts"], + include: ["tests/node/**/*.test.{ts,tsx}"], + watch: false, + coverage: { + provider: "v8", + reporter: ["text", "json", "html"], + exclude: ["node_modules/**", "src/test/**", "**/*.d.ts", "**/*.config.ts", "**/types/**"], + }, + }, +}); diff --git a/vitest.config.ts b/vitest.config.ts new file mode 100644 index 0000000..e0d9e50 --- /dev/null +++ b/vitest.config.ts @@ -0,0 +1,62 @@ +/** + * @fileoverview Vitest configuration for unit tests in Shipkit + * @module vitest.config + * + * This configuration sets up the testing environment for React components and utilities. + * It's optimized for testing components that use React hooks, Tailwind CSS, and Next.js features. + * + * Key features: + * - JSDOM environment for React component testing + * - TypeScript path resolution (@/* imports) + * - Coverage reporting with v8 provider + * - Global test utilities (describe, it, expect) + * + * Test structure: + * - Unit tests: tests/unit/** + * @see vitest.config.browser.ts - For browser-based testing + * @see vitest.config.node.ts - For Node.js-specific testing + */ + +import path from "node:path"; +import react from "@vitejs/plugin-react"; +import tsconfigPaths from "vite-tsconfig-paths"; +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + plugins: [ + react(), // React JSX transformation and Fast Refresh + tsconfigPaths(), // TypeScript path mapping support + ], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), // Resolve @/* imports to src/* + // Alias next/server to a test shim to avoid ESM resolution issues in next-auth during unit tests + "next/server": path.resolve(__dirname, "./tests/shims/next-server.ts"), + }, + }, + test: { + environment: "jsdom", // DOM environment for React component testing + globals: true, // Enable global test functions (describe, it, expect) + setupFiles: [ + "./tests/setup-env.ts", // Environment variables for testing + "./tests/setup.ts", // Testing utilities and global setup + ], + include: ["tests/unit/**/*.test.{ts,tsx}"], // Only unit tests in this config + exclude: [ + // Exclude brittle suite that imports next-auth env and requires real Next runtime + "tests/unit/server/actions/deploy-private-repo.test.ts", + ], + watch: false, // Disable watch mode for CI/CD compatibility + coverage: { + provider: "v8", // Fast coverage provider + reporter: ["text", "json", "html"], // Multiple coverage output formats + exclude: [ + "node_modules/**", + "src/test/**", // Test utilities + "**/*.d.ts", // Type definitions + "**/*.config.ts", // Configuration files + "**/types/**", // Type-only files + ], + }, + }, +});