Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 14 additions & 9 deletions apps/chat/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,21 @@ CALCOM_WEBHOOK_SECRET=your-calcom-webhook-secret
CALCOM_APP_URL=https://app.cal.com

# ─── AI / LLM ────────────────────────────────────────────────────────────────
# Groq (fast inference). Get your key at console.groq.com
# Which AI provider to use. Options: groq | openai | anthropic | google
AI_PROVIDER=groq

# Model to use (optional). Each provider has a sensible default:
# groq: llama-3.3-70b-versatile
# openai: gpt-4o-mini
# anthropic: claude-haiku-4-5
# google: gemini-2.0-flash
# AI_MODEL=

# Provider API keys — only the key for your chosen AI_PROVIDER is required.
GROQ_API_KEY=your-groq-api-key
# Model to use (optional). Default: openai/gpt-oss-120b
# AI_MODEL=openai/gpt-oss-120b
#
# If you hit rate limits (TPD), try another Groq model with a separate quota:
# AI_MODEL=llama-3.3-70b-versatile
# AI_MODEL=llama-3.1-8b-instant
#
# To switch providers entirely, edit lib/ai-provider.ts (Anthropic, OpenAI, etc.)
# OPENAI_API_KEY=your-openai-api-key
# ANTHROPIC_API_KEY=your-anthropic-api-key
# GOOGLE_GENERATIVE_AI_API_KEY=your-google-api-key

# ─── App ──────────────────────────────────────────────────────────────────────
# Your deployed app URL (used for OAuth redirect URI and install page CTA)
Expand Down
10 changes: 7 additions & 3 deletions apps/chat/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ app/
lib/
bot.ts # Chat instance + all event handlers
agent.ts # AI agent tools (bookings, availability, etc.)
ai-provider.ts # AI provider config (Groq by default; swap to OpenAI, Anthropic, etc.)
ai-provider.ts # AI provider config (Groq, OpenAI, Anthropic, Google — set via AI_PROVIDER env var)
notifications.ts # Booking notification card builders
user-linking.ts # Redis: platform user <-> Cal.com account linking + token refresh
format-for-telegram.ts # Converts markdown/cards to Telegram-safe HTML
Expand Down Expand Up @@ -106,8 +106,12 @@ cp .env.example .env
| `CALCOM_WEBHOOK_SECRET` | ✅ | Set in Cal.com → Settings → Webhooks |
| `CALCOM_APP_URL` | ✅ | `https://app.cal.com` |
| `NEXT_PUBLIC_APP_URL` | ✅ | Your deployed app URL (used for OAuth redirects and install page) |
| `GROQ_API_KEY` | ✅ | From [console.groq.com](https://console.groq.com) — required for AI features |
| `AI_MODEL` | — | Override the default Groq model (e.g. `llama-3.3-70b-versatile`) |
| `AI_PROVIDER` | — | AI provider: `groq` \| `openai` \| `anthropic` \| `google` (default: `groq`) |
| `AI_MODEL` | — | Override the default model for the selected provider |
| `GROQ_API_KEY` | ✅* | Required when `AI_PROVIDER=groq` (default). From [console.groq.com](https://console.groq.com) |
| `OPENAI_API_KEY` | ✅* | Required when `AI_PROVIDER=openai` |
| `ANTHROPIC_API_KEY` | ✅* | Required when `AI_PROVIDER=anthropic` |
| `GOOGLE_GENERATIVE_AI_API_KEY`| ✅* | Required when `AI_PROVIDER=google` |
| `TELEGRAM_BOT_TOKEN` | — | From [@BotFather](https://t.me/BotFather) — required to enable Telegram |
| `TELEGRAM_BOT_USERNAME` | — | Your bot's username (e.g. `CalcomBot`) — required when `TELEGRAM_BOT_TOKEN` is set |
| `TELEGRAM_WEBHOOK_SECRET_TOKEN` | — | Optional secret to verify incoming Telegram webhook requests |
Expand Down
33 changes: 17 additions & 16 deletions apps/chat/lib/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1997,35 +1997,36 @@ export function isAIToolCallError(err: unknown): boolean {
const msg = err.message.toLowerCase();
const cause = err.cause as Error | undefined;
const causeMsg = cause?.message?.toLowerCase() ?? "";
const combined = `${msg} ${causeMsg}`;
return (
msg.includes("failed to call a function") ||
msg.includes("failed_generation") ||
msg.includes("invalid_request_error") ||
msg.includes("tool call validation failed") ||
msg.includes("which was not in request.tools") ||
msg.includes("tool choice is none") ||
causeMsg.includes("failed to call a function") ||
causeMsg.includes("failed_generation") ||
causeMsg.includes("tool call validation failed") ||
causeMsg.includes("tool choice is none")
combined.includes("failed to call a function") ||
combined.includes("failed_generation") ||
combined.includes("invalid_request_error") ||
combined.includes("tool call validation failed") ||
combined.includes("which was not in request.tools") ||
combined.includes("tool choice is none") ||
combined.includes("tool_use_failed") ||
combined.includes("invalid_tool_call")
);
}

/** True if the error is an AI/LLM rate limit (e.g. Groq tokens-per-day). */
/** True if the error is an AI/LLM rate limit (e.g. tokens-per-day, quota exceeded). */
export function isAIRateLimitError(err: unknown): boolean {
if (!(err instanceof Error)) return false;
const msg = err.message.toLowerCase();
const cause = err.cause as Error | undefined;
const causeMsg = cause?.message?.toLowerCase() ?? "";
const combined = `${msg} ${causeMsg}`;
const hasRateLimit =
msg.includes("rate limit") ||
msg.includes("tokens per day") ||
causeMsg.includes("rate limit") ||
causeMsg.includes("tokens per day");
combined.includes("rate limit") ||
combined.includes("tokens per day") ||
combined.includes("quota_exceeded") ||
combined.includes("resource_exhausted") ||
combined.includes("insufficient_quota");
const status429 =
(err as { statusCode?: number }).statusCode === 429 ||
(cause as { statusCode?: number } | undefined)?.statusCode === 429;
return hasRateLimit || (status429 && (msg.includes("retry") || causeMsg.includes("retry")));
return hasRateLimit || (status429 && (combined.includes("retry") || combined.includes("quota") || combined.includes("limit") || combined.includes("exhausted")));
Comment thread
dhairyashiil marked this conversation as resolved.
}

// ─── Agent stream ─────────────────────────────────────────────────────────────
Expand Down
73 changes: 47 additions & 26 deletions apps/chat/lib/ai-provider.ts
Original file line number Diff line number Diff line change
@@ -1,38 +1,59 @@
/**
* AI model provider configuration.
*
* To switch providers, change the import and the `getModel()` call below.
* The rest of the codebase only imports `getModel()` from this file.
*
* Examples:
*
* Groq (default — fast + cheap):
* import { createGroq } from "@ai-sdk/groq";
* const groq = createGroq({ apiKey: process.env.GROQ_API_KEY });
* return groq("llama-3.3-70b-versatile");
*
* Anthropic:
* import { anthropic } from "@ai-sdk/anthropic";
* return anthropic("claude-sonnet-4-20250514");
*
* OpenAI:
* import { openai } from "@ai-sdk/openai";
* return openai("gpt-4o");
* Controlled by two env vars:
* - AI_PROVIDER: which service to use (groq | openai | anthropic | google). Default: groq
* - AI_MODEL: optional model override (each provider has a sensible default)
*
* Any OpenAI-compatible provider (Together, Fireworks, etc.):
* import { createOpenAI } from "@ai-sdk/openai";
* const provider = createOpenAI({ baseURL: "https://api.together.xyz/v1", apiKey: "..." });
* return provider("meta-llama/Llama-3.3-70B-Instruct-Turbo");
* The rest of the codebase only imports `getModel()` from this file.
*/

import { createAnthropic } from "@ai-sdk/anthropic";
import { createGoogleGenerativeAI } from "@ai-sdk/google";
import { createGroq } from "@ai-sdk/groq";
import { createOpenAI } from "@ai-sdk/openai";
import type { LanguageModel } from "ai";

const groq = createGroq({
apiKey: process.env.GROQ_API_KEY,
});
export type AIProvider = "groq" | "openai" | "anthropic" | "google";

export const PROVIDER_CONFIG: Record<AIProvider, { defaultModel: string; apiKeyEnv: string }> = {
groq: {
defaultModel: "openai/gpt-oss-120b",
Comment thread
dhairyashiil marked this conversation as resolved.
apiKeyEnv: "GROQ_API_KEY",
},
openai: { defaultModel: "gpt-4o-mini", apiKeyEnv: "OPENAI_API_KEY" },
anthropic: {
defaultModel: "claude-haiku-4-5",
apiKeyEnv: "ANTHROPIC_API_KEY",
},
google: {
defaultModel: "gemini-2.0-flash",
apiKeyEnv: "GOOGLE_GENERATIVE_AI_API_KEY",
},
};

export const SUPPORTED_PROVIDERS = Object.keys(PROVIDER_CONFIG) as AIProvider[];

export function getModel(): LanguageModel {
// Model is configurable via AI_MODEL env var. See .env.example for alternatives.
return groq(process.env.AI_MODEL ?? "openai/gpt-oss-120b");
const provider = (process.env.AI_PROVIDER ?? "groq") as AIProvider;
const config = PROVIDER_CONFIG[provider];
if (!config) {
throw new Error(
`Unsupported AI_PROVIDER: "${provider}". Use one of: ${SUPPORTED_PROVIDERS.join(", ")}`
);
}

const model = process.env.AI_MODEL ?? config.defaultModel;
const apiKey = process.env[config.apiKeyEnv];

switch (provider) {
case "groq":
return createGroq({ apiKey })(model);
case "openai":
return createOpenAI({ apiKey })(model);
case "anthropic":
return createAnthropic({ apiKey })(model);
case "google":
return createGoogleGenerativeAI({ apiKey })(model);
}
}
17 changes: 17 additions & 0 deletions apps/chat/lib/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
* Validates required environment variables for the chat bot.
* Call at startup to fail fast if critical config is missing.
*/

import { type AIProvider, PROVIDER_CONFIG, SUPPORTED_PROVIDERS } from "./ai-provider";

export function validateRequiredEnv(): void {
const missing: string[] = [];

Expand All @@ -14,6 +17,20 @@ export function validateRequiredEnv(): void {
missing.push("TELEGRAM_BOT_USERNAME (required when TELEGRAM_BOT_TOKEN is set)");
}

// Validate AI_PROVIDER value
const provider = (process.env.AI_PROVIDER ?? "groq") as string;
if (!SUPPORTED_PROVIDERS.includes(provider as AIProvider)) {
throw new Error(
`Invalid AI_PROVIDER: "${provider}". Must be one of: ${SUPPORTED_PROVIDERS.join(", ")}`
);
}

// Validate that the selected provider's API key is present
const config = PROVIDER_CONFIG[provider as AIProvider];
if (config && !process.env[config.apiKeyEnv]) {
missing.push(`${config.apiKeyEnv} (required for AI_PROVIDER=${provider})`);
}

if (process.env.NODE_ENV === "production" && !process.env.REDIS_URL) {
throw new Error(
"REDIS_URL is required in production. The in-memory state adapter is not suitable for production (state is lost on restart, locks don't work across instances)."
Expand Down
2 changes: 2 additions & 0 deletions apps/chat/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
"deploy": "npx vercel --prod --yes"
},
"dependencies": {
"@ai-sdk/anthropic": "^3.0.58",
"@ai-sdk/google": "^3.0.43",
"@ai-sdk/groq": "3.0.29",
"@ai-sdk/openai": "3.0.41",
"@chat-adapter/shared": "4.19.0",
Expand Down
6 changes: 6 additions & 0 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading