diff --git a/packages/create-12-factor-agent/template/README.md b/packages/create-12-factor-agent/template/README.md
index 6b2a41303..75362ecfd 100644
--- a/packages/create-12-factor-agent/template/README.md
+++ b/packages/create-12-factor-agent/template/README.md
@@ -111,18 +111,29 @@ If you want to try swapping out the model, you can change the `client` line.
[Docs on baml clients can be found here](https://docs.boundaryml.com/guide/baml-basics/switching-llms)
-For example, you can configure [gemini](https://docs.boundaryml.com/ref/llm-client-providers/google-ai-gemini)
+For example, you can configure [gemini](https://docs.boundaryml.com/ref/llm-client-providers/google-ai-gemini)
or [anthropic](https://docs.boundaryml.com/ref/llm-client-providers/anthropic) as your model provider.
For example, to use openai with an OPENAI_API_KEY, you can do:
client "openai/gpt-4o"
+To use [MiniMax](https://www.minimaxi.com/) with a MINIMAX_API_KEY, you can use the pre-configured clients:
+
+ client MiniMaxM27 // MiniMax-M2.7 (latest, recommended)
+ client MiniMaxM27Highspeed // MiniMax-M2.7-highspeed (faster)
+ client MiniMaxM25 // MiniMax-M2.5 (previous generation)
+ client MiniMaxM25Highspeed // MiniMax-M2.5-highspeed (previous gen, faster)
+
Set your env vars
export BASETEN_API_KEY=...
-export BASETEN_BASE_URL=...
+ export BASETEN_BASE_URL=...
+
+For MiniMax (optional):
+
+ export MINIMAX_API_KEY=... # Get your key at https://www.minimaxi.com/
Try it out
diff --git a/packages/create-12-factor-agent/template/baml_src/agent.baml b/packages/create-12-factor-agent/template/baml_src/agent.baml
index b24ce5931..5de64408b 100644
--- a/packages/create-12-factor-agent/template/baml_src/agent.baml
+++ b/packages/create-12-factor-agent/template/baml_src/agent.baml
@@ -164,4 +164,29 @@ test MathOperationPostClarification {
@@assert(b, {{this.a == 3}})
@@assert(a, {{this.b == 12}})
}
+
+// Test using MiniMax M2.5 as the LLM provider
+test HelloWorldMiniMax {
+ functions [DetermineNextStep]
+ args {
+ thread #"
+
+ hello!
+
+ "#
+ }
+ @@assert(intent, {{this.intent == "request_more_information"}})
+}
+
+test MathOperationMiniMax {
+ functions [DetermineNextStep]
+ args {
+ thread #"
+
+ can you add 5 and 7?
+
+ "#
+ }
+ @@assert(intent, {{this.intent == "add"}})
+}
\ No newline at end of file
diff --git a/packages/create-12-factor-agent/template/baml_src/clients.baml b/packages/create-12-factor-agent/template/baml_src/clients.baml
index f12893b99..5c4d7ded1 100644
--- a/packages/create-12-factor-agent/template/baml_src/clients.baml
+++ b/packages/create-12-factor-agent/template/baml_src/clients.baml
@@ -35,12 +35,55 @@ client CustomHaiku {
}
}
+// MiniMax M2.7 - latest high-capability model (recommended)
+// Uses OpenAI-compatible API: https://www.minimaxi.com/
+client MiniMaxM27 {
+ provider openai
+ options {
+ model "MiniMax-M2.7"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
+// MiniMax M2.7-highspeed - faster variant optimized for low-latency tasks
+client MiniMaxM27Highspeed {
+ provider openai
+ retry_policy Exponential
+ options {
+ model "MiniMax-M2.7-highspeed"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
+// MiniMax M2.5 - previous generation model with 204K context window
+client MiniMaxM25 {
+ provider openai
+ options {
+ model "MiniMax-M2.5"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
+// MiniMax M2.5-highspeed - previous generation faster variant
+client MiniMaxM25Highspeed {
+ provider openai
+ retry_policy Exponential
+ options {
+ model "MiniMax-M2.5-highspeed"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
// https://docs.boundaryml.com/docs/snippets/clients/round-robin
client CustomFast {
provider round-robin
options {
// This will alternate between the two clients
- strategy [CustomGPT4oMini, CustomHaiku]
+ strategy [CustomGPT4oMini, CustomHaiku, MiniMaxM27Highspeed]
}
}
@@ -49,7 +92,7 @@ client OpenaiFallback {
provider fallback
options {
// This will try the clients in order until one succeeds
- strategy [CustomGPT4oMini, CustomGPT4oMini]
+ strategy [CustomGPT4oMini, CustomGPT4oMini, MiniMaxM27]
}
}
diff --git a/packages/create-12-factor-agent/template/test/minimax-integration.test.ts b/packages/create-12-factor-agent/template/test/minimax-integration.test.ts
new file mode 100644
index 000000000..4ab3312c3
--- /dev/null
+++ b/packages/create-12-factor-agent/template/test/minimax-integration.test.ts
@@ -0,0 +1,169 @@
+/**
+ * Integration tests for MiniMax provider via OpenAI-compatible API.
+ *
+ * These tests verify that the MiniMax API is reachable and produces
+ * valid responses using the same endpoint and model configuration
+ * defined in clients.baml.
+ *
+ * Requires MINIMAX_API_KEY environment variable to be set.
+ * Run with: MINIMAX_API_KEY=sk-... npx tsx test/minimax-integration.test.ts
+ */
+
+import * as assert from "assert";
+
+const MINIMAX_API_KEY = process.env.MINIMAX_API_KEY;
+const BASE_URL = "https://api.minimax.io/v1";
+
+let passed = 0;
+let failed = 0;
+let skipped = 0;
+
+async function test(name: string, fn: () => Promise) {
+ if (!MINIMAX_API_KEY) {
+ skipped++;
+ console.log(` SKIP: ${name} (MINIMAX_API_KEY not set)`);
+ return;
+ }
+ try {
+ await fn();
+ passed++;
+ console.log(` PASS: ${name}`);
+ } catch (err: any) {
+ failed++;
+ console.log(` FAIL: ${name}`);
+ console.log(` ${err.message}`);
+ }
+}
+
+async function callMiniMax(model: string, prompt: string, temperature = 0.7): Promise {
+ const response = await fetch(`${BASE_URL}/chat/completions`, {
+ method: "POST",
+ headers: {
+ Authorization: `Bearer ${MINIMAX_API_KEY}`,
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({
+ model,
+ messages: [
+ { role: "system", content: "You are a helpful assistant." },
+ { role: "user", content: prompt },
+ ],
+ temperature,
+ max_tokens: 100,
+ }),
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ throw new Error(`API error ${response.status}: ${errorText}`);
+ }
+
+ return response.json();
+}
+
+async function main() {
+ console.log("MiniMax Integration Tests\n");
+
+ // --- M2.7 model tests (latest) ---
+
+ await test("MiniMax-M2.7 responds to a simple prompt", async () => {
+ const result = await callMiniMax("MiniMax-M2.7", "Reply with exactly: hello");
+ assert.ok(result.choices, "Response should have choices array");
+ assert.ok(result.choices.length > 0, "Should have at least one choice");
+ assert.ok(
+ result.choices[0].message?.content,
+ "First choice should have message content"
+ );
+ });
+
+ await test("MiniMax-M2.7 returns valid chat completion format", async () => {
+ const result = await callMiniMax("MiniMax-M2.7", "What is 2+2?");
+ assert.ok(result.id, "Response should have an id");
+ assert.ok(result.model, "Response should have a model field");
+ assert.ok(result.choices[0].message.role === "assistant", "Role should be assistant");
+ assert.ok(result.usage, "Response should include usage info");
+ assert.ok(typeof result.usage.total_tokens === "number", "total_tokens should be a number");
+ });
+
+ await test("MiniMax-M2.7 respects temperature constraint", async () => {
+ const result = await callMiniMax("MiniMax-M2.7", "Say hi", 0.5);
+ assert.ok(result.choices, "Should respond with valid choices at temperature 0.5");
+ });
+
+ // --- M2.7-highspeed model tests ---
+
+ await test("MiniMax-M2.7-highspeed responds to a simple prompt", async () => {
+ const result = await callMiniMax("MiniMax-M2.7-highspeed", "Reply with exactly: world");
+ assert.ok(result.choices, "Response should have choices array");
+ assert.ok(result.choices.length > 0, "Should have at least one choice");
+ assert.ok(
+ result.choices[0].message?.content,
+ "First choice should have message content"
+ );
+ });
+
+ await test("MiniMax-M2.7-highspeed returns valid usage metrics", async () => {
+ const result = await callMiniMax("MiniMax-M2.7-highspeed", "Count to 3");
+ assert.ok(result.usage, "Response should include usage");
+ assert.ok(result.usage.prompt_tokens > 0, "prompt_tokens should be positive");
+ assert.ok(result.usage.completion_tokens > 0, "completion_tokens should be positive");
+ });
+
+ // --- M2.5 model tests (previous generation) ---
+
+ await test("MiniMax-M2.5 responds to a simple prompt", async () => {
+ const result = await callMiniMax("MiniMax-M2.5", "Reply with exactly: hello");
+ assert.ok(result.choices, "Response should have choices array");
+ assert.ok(result.choices.length > 0, "Should have at least one choice");
+ assert.ok(
+ result.choices[0].message?.content,
+ "First choice should have message content"
+ );
+ });
+
+ await test("MiniMax-M2.5 returns valid chat completion format", async () => {
+ const result = await callMiniMax("MiniMax-M2.5", "What is 2+2?");
+ assert.ok(result.id, "Response should have an id");
+ assert.ok(result.model, "Response should have a model field");
+ assert.ok(result.choices[0].message.role === "assistant", "Role should be assistant");
+ assert.ok(result.usage, "Response should include usage info");
+ assert.ok(typeof result.usage.total_tokens === "number", "total_tokens should be a number");
+ });
+
+ await test("MiniMax-M2.5 respects temperature constraint (non-zero)", async () => {
+ // MiniMax requires temperature in (0.0, 1.0]
+ const result = await callMiniMax("MiniMax-M2.5", "Say hi", 0.5);
+ assert.ok(result.choices, "Should respond with valid choices at temperature 0.5");
+ });
+
+ // --- M2.5-highspeed model tests ---
+
+ await test("MiniMax-M2.5-highspeed responds to a simple prompt", async () => {
+ const result = await callMiniMax("MiniMax-M2.5-highspeed", "Reply with exactly: world");
+ assert.ok(result.choices, "Response should have choices array");
+ assert.ok(result.choices.length > 0, "Should have at least one choice");
+ assert.ok(
+ result.choices[0].message?.content,
+ "First choice should have message content"
+ );
+ });
+
+ await test("MiniMax-M2.5-highspeed returns valid usage metrics", async () => {
+ const result = await callMiniMax("MiniMax-M2.5-highspeed", "Count to 3");
+ assert.ok(result.usage, "Response should include usage");
+ assert.ok(result.usage.prompt_tokens > 0, "prompt_tokens should be positive");
+ assert.ok(result.usage.completion_tokens > 0, "completion_tokens should be positive");
+ });
+
+ // --- Summary ---
+
+ console.log(
+ `\nResults: ${passed} passed, ${failed} failed, ${skipped} skipped, ${passed + failed + skipped} total`
+ );
+ process.exit(failed > 0 ? 1 : 0);
+}
+
+main().catch((err) => {
+ console.error("Test runner error:", err);
+ process.exit(1);
+});
diff --git a/packages/create-12-factor-agent/template/test/minimax-provider.test.ts b/packages/create-12-factor-agent/template/test/minimax-provider.test.ts
new file mode 100644
index 000000000..e4ca429cf
--- /dev/null
+++ b/packages/create-12-factor-agent/template/test/minimax-provider.test.ts
@@ -0,0 +1,247 @@
+/**
+ * Unit tests for MiniMax provider configuration in BAML clients.
+ *
+ * Validates that the clients.baml file correctly defines MiniMax
+ * client entries with proper model names, API base URL, and
+ * inclusion in round-robin/fallback strategies.
+ *
+ * Run with: npx tsx test/minimax-provider.test.ts
+ */
+
+import * as fs from "fs";
+import * as path from "path";
+import * as assert from "assert";
+
+const BAML_PATH = path.join(__dirname, "..", "baml_src", "clients.baml");
+
+function readBamlConfig(): string {
+ return fs.readFileSync(BAML_PATH, "utf-8");
+}
+
+let passed = 0;
+let failed = 0;
+
+function test(name: string, fn: () => void) {
+ try {
+ fn();
+ passed++;
+ console.log(` PASS: ${name}`);
+ } catch (err: any) {
+ failed++;
+ console.log(` FAIL: ${name}`);
+ console.log(` ${err.message}`);
+ }
+}
+
+console.log("MiniMax Provider Unit Tests\n");
+
+// --- MiniMax M2.7 client (latest, recommended) ---
+
+test("MiniMaxM27 client block is defined", () => {
+ const content = readBamlConfig();
+ assert.ok(
+ content.includes('client MiniMaxM27'),
+ "clients.baml should define MiniMaxM27 client"
+ );
+});
+
+test("MiniMaxM27 uses openai provider", () => {
+ const content = readBamlConfig();
+ const m27Block = content.split("client MiniMaxM27")[1].split("client")[0];
+ assert.ok(
+ m27Block.includes("provider openai"),
+ "MiniMaxM27 should use openai provider"
+ );
+});
+
+test("MiniMaxM27 uses correct model name", () => {
+ const content = readBamlConfig();
+ const m27Block = content.split("client MiniMaxM27")[1].split("client")[0];
+ assert.ok(
+ m27Block.includes('"MiniMax-M2.7"'),
+ 'MiniMaxM27 should use model "MiniMax-M2.7"'
+ );
+});
+
+test("MiniMaxM27 uses MINIMAX_API_KEY env var", () => {
+ const content = readBamlConfig();
+ const m27Block = content.split("client MiniMaxM27")[1].split("client")[0];
+ assert.ok(
+ m27Block.includes("env.MINIMAX_API_KEY"),
+ "MiniMaxM27 should reference env.MINIMAX_API_KEY"
+ );
+});
+
+test("MiniMaxM27 uses correct base_url", () => {
+ const content = readBamlConfig();
+ const m27Block = content.split("client MiniMaxM27")[1].split("client")[0];
+ assert.ok(
+ m27Block.includes('"https://api.minimax.io/v1"'),
+ "MiniMaxM27 should set base_url to https://api.minimax.io/v1"
+ );
+});
+
+// --- MiniMax M2.7-highspeed client ---
+
+test("MiniMaxM27Highspeed client block is defined", () => {
+ const content = readBamlConfig();
+ assert.ok(
+ content.includes("client MiniMaxM27Highspeed"),
+ "clients.baml should define MiniMaxM27Highspeed client"
+ );
+});
+
+test("MiniMaxM27Highspeed uses correct model name", () => {
+ const content = readBamlConfig();
+ const hsBlock = content.split("client MiniMaxM27Highspeed")[1].split("client")[0];
+ assert.ok(
+ hsBlock.includes('"MiniMax-M2.7-highspeed"'),
+ 'MiniMaxM27Highspeed should use model "MiniMax-M2.7-highspeed"'
+ );
+});
+
+test("MiniMaxM27Highspeed has retry policy", () => {
+ const content = readBamlConfig();
+ const hsBlock = content.split("client MiniMaxM27Highspeed")[1].split("client")[0];
+ assert.ok(
+ hsBlock.includes("retry_policy Exponential"),
+ "MiniMaxM27Highspeed should have Exponential retry policy"
+ );
+});
+
+test("MiniMaxM27Highspeed uses correct base_url", () => {
+ const content = readBamlConfig();
+ const hsBlock = content.split("client MiniMaxM27Highspeed")[1].split("client")[0];
+ assert.ok(
+ hsBlock.includes('"https://api.minimax.io/v1"'),
+ "MiniMaxM27Highspeed should set base_url to https://api.minimax.io/v1"
+ );
+});
+
+// --- MiniMax M2.5 client (previous generation) ---
+
+test("MiniMaxM25 client block is defined", () => {
+ const content = readBamlConfig();
+ assert.ok(
+ content.includes('client MiniMaxM25'),
+ "clients.baml should define MiniMaxM25 client"
+ );
+});
+
+test("MiniMaxM25 uses openai provider", () => {
+ const content = readBamlConfig();
+ const m25Block = content.split("client MiniMaxM25")[1].split("client")[0];
+ assert.ok(
+ m25Block.includes("provider openai"),
+ "MiniMaxM25 should use openai provider"
+ );
+});
+
+test("MiniMaxM25 uses correct model name", () => {
+ const content = readBamlConfig();
+ const m25Block = content.split("client MiniMaxM25")[1].split("client")[0];
+ assert.ok(
+ m25Block.includes('"MiniMax-M2.5"'),
+ 'MiniMaxM25 should use model "MiniMax-M2.5"'
+ );
+});
+
+test("MiniMaxM25 uses MINIMAX_API_KEY env var", () => {
+ const content = readBamlConfig();
+ const m25Block = content.split("client MiniMaxM25")[1].split("client")[0];
+ assert.ok(
+ m25Block.includes("env.MINIMAX_API_KEY"),
+ "MiniMaxM25 should reference env.MINIMAX_API_KEY"
+ );
+});
+
+test("MiniMaxM25 uses correct base_url", () => {
+ const content = readBamlConfig();
+ const m25Block = content.split("client MiniMaxM25")[1].split("client")[0];
+ assert.ok(
+ m25Block.includes('"https://api.minimax.io/v1"'),
+ "MiniMaxM25 should set base_url to https://api.minimax.io/v1"
+ );
+});
+
+// --- MiniMax M2.5-highspeed client ---
+
+test("MiniMaxM25Highspeed client block is defined", () => {
+ const content = readBamlConfig();
+ assert.ok(
+ content.includes("client MiniMaxM25Highspeed"),
+ "clients.baml should define MiniMaxM25Highspeed client"
+ );
+});
+
+test("MiniMaxM25Highspeed uses correct model name", () => {
+ const content = readBamlConfig();
+ const hsBlock = content.split("client MiniMaxM25Highspeed")[1].split("client")[0];
+ assert.ok(
+ hsBlock.includes('"MiniMax-M2.5-highspeed"'),
+ 'MiniMaxM25Highspeed should use model "MiniMax-M2.5-highspeed"'
+ );
+});
+
+test("MiniMaxM25Highspeed has retry policy", () => {
+ const content = readBamlConfig();
+ const hsBlock = content.split("client MiniMaxM25Highspeed")[1].split("client")[0];
+ assert.ok(
+ hsBlock.includes("retry_policy Exponential"),
+ "MiniMaxM25Highspeed should have Exponential retry policy"
+ );
+});
+
+test("MiniMaxM25Highspeed uses correct base_url", () => {
+ const content = readBamlConfig();
+ const hsBlock = content.split("client MiniMaxM25Highspeed")[1].split("client")[0];
+ assert.ok(
+ hsBlock.includes('"https://api.minimax.io/v1"'),
+ "MiniMaxM25Highspeed should set base_url to https://api.minimax.io/v1"
+ );
+});
+
+// --- Strategy inclusion (M2.7 is default in strategies) ---
+
+test("MiniMaxM27Highspeed is included in round-robin strategy", () => {
+ const content = readBamlConfig();
+ const rrBlock = content.split("client CustomFast")[1].split("client")[0];
+ assert.ok(
+ rrBlock.includes("MiniMaxM27Highspeed"),
+ "CustomFast round-robin should include MiniMaxM27Highspeed"
+ );
+});
+
+test("MiniMaxM27 is included in fallback strategy", () => {
+ const content = readBamlConfig();
+ const fbBlock = content.split("client OpenaiFallback")[1].split("retry_policy")[0];
+ assert.ok(
+ fbBlock.includes("MiniMaxM27"),
+ "OpenaiFallback should include MiniMaxM27"
+ );
+});
+
+// --- Existing providers preserved ---
+
+test("OpenAI clients are preserved", () => {
+ const content = readBamlConfig();
+ assert.ok(content.includes("client CustomGPT4o"), "CustomGPT4o should still exist");
+ assert.ok(content.includes("client CustomGPT4oMini"), "CustomGPT4oMini should still exist");
+});
+
+test("Anthropic clients are preserved", () => {
+ const content = readBamlConfig();
+ assert.ok(content.includes("client CustomSonnet"), "CustomSonnet should still exist");
+ assert.ok(content.includes("client CustomHaiku"), "CustomHaiku should still exist");
+});
+
+test("Retry policies are preserved", () => {
+ const content = readBamlConfig();
+ assert.ok(content.includes("retry_policy Constant"), "Constant retry policy should exist");
+ assert.ok(content.includes("retry_policy Exponential"), "Exponential retry policy should exist");
+});
+
+// --- Summary ---
+
+console.log(`\nResults: ${passed} passed, ${failed} failed, ${passed + failed} total`);
+process.exit(failed > 0 ? 1 : 0);
diff --git a/workshops/2025-05/final/baml_src/clients.baml b/workshops/2025-05/final/baml_src/clients.baml
index f12893b99..5c4d7ded1 100644
--- a/workshops/2025-05/final/baml_src/clients.baml
+++ b/workshops/2025-05/final/baml_src/clients.baml
@@ -35,12 +35,55 @@ client CustomHaiku {
}
}
+// MiniMax M2.7 - latest high-capability model (recommended)
+// Uses OpenAI-compatible API: https://www.minimaxi.com/
+client MiniMaxM27 {
+ provider openai
+ options {
+ model "MiniMax-M2.7"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
+// MiniMax M2.7-highspeed - faster variant optimized for low-latency tasks
+client MiniMaxM27Highspeed {
+ provider openai
+ retry_policy Exponential
+ options {
+ model "MiniMax-M2.7-highspeed"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
+// MiniMax M2.5 - previous generation model with 204K context window
+client MiniMaxM25 {
+ provider openai
+ options {
+ model "MiniMax-M2.5"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
+// MiniMax M2.5-highspeed - previous generation faster variant
+client MiniMaxM25Highspeed {
+ provider openai
+ retry_policy Exponential
+ options {
+ model "MiniMax-M2.5-highspeed"
+ api_key env.MINIMAX_API_KEY
+ base_url "https://api.minimax.io/v1"
+ }
+}
+
// https://docs.boundaryml.com/docs/snippets/clients/round-robin
client CustomFast {
provider round-robin
options {
// This will alternate between the two clients
- strategy [CustomGPT4oMini, CustomHaiku]
+ strategy [CustomGPT4oMini, CustomHaiku, MiniMaxM27Highspeed]
}
}
@@ -49,7 +92,7 @@ client OpenaiFallback {
provider fallback
options {
// This will try the clients in order until one succeeds
- strategy [CustomGPT4oMini, CustomGPT4oMini]
+ strategy [CustomGPT4oMini, CustomGPT4oMini, MiniMaxM27]
}
}