diff --git a/examples/ai-transport-message-per-response/javascript/README.md b/examples/ai-transport-message-per-response/javascript/README.md new file mode 100644 index 0000000000..c0e3f41b66 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/README.md @@ -0,0 +1,61 @@ +# AI Transport message per response streaming + +Enable realtime streaming of AI/LLM responses by appending tokens to a single message over Ably. + +AI Transport message-per-response streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime. Unlike the message-per-token pattern, all tokens for a response are appended to a single message, which appears as one entry in channel history. This makes it easy to retrieve and display conversation history while still delivering live tokens in realtime. + +The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime. + +Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions. + +## Resources + +Use the following methods to implement AI Transport message-per-response streaming: + +- [`client.channels.get()`](/docs/channels#create): creates a new or retrieves an existing channel for AI Transport token streaming. +- [`channel.publish()`](/docs/channels#publish): publishes the initial message and captures the serial for token appending. +- [`channel.appendMessage()`](/docs/messages#append): appends individual tokens to the message as they arrive from the LLM service. +- [`channel.subscribe()`](/docs/channels#subscribe): subscribes to messages, handling `message.create`, `message.append`, and `message.update` actions. +- [`channel.setOptions()`](/docs/channels/options) with [`rewind`](/docs/channels/options/rewind): enables seamless message recovery during reconnections, delivering historical messages as `message.update` events. + +Find out more about [AI Transport](/docs/ai-transport) and [message appending](/docs/ai-transport/features/token-streaming/message-per-response). + +## Getting started + +1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found: + + ```sh + git clone git@github.com:ably/docs.git + ``` + +2. Change directory: + + ```sh + cd examples/ + ``` + +3. Rename the environment file: + + ```sh + mv .env.example .env.local + ``` + +4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key. + +5. Install dependencies: + + ```sh + yarn install + ``` + +6. Run the server: + + ```sh + yarn run ai-transport-message-per-response-javascript + ``` + +7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming. + +## Open in CodeSandbox + +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. diff --git a/examples/ai-transport-message-per-response/javascript/index.html b/examples/ai-transport-message-per-response/javascript/index.html new file mode 100644 index 0000000000..b002b18e24 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/index.html @@ -0,0 +1,49 @@ + + + + + + + AI Transport Message Per Response - JavaScript + + + +
+ +
+
+
+ +
+ + Ready + + + +
+
+
+ Select a prompt below to get started + +
+
+
+ + +
+
+ +
+
+
+ + + + diff --git a/examples/ai-transport-message-per-response/javascript/package.json b/examples/ai-transport-message-per-response/javascript/package.json new file mode 100644 index 0000000000..710f1c4a12 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/package.json @@ -0,0 +1,10 @@ +{ + "name": "ai-transport-message-per-response-javascript", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + } +} diff --git a/examples/ai-transport-message-per-response/javascript/src/agent.ts b/examples/ai-transport-message-per-response/javascript/src/agent.ts new file mode 100644 index 0000000000..2c297e000f --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/agent.ts @@ -0,0 +1,52 @@ +// Agent Service +// This consumes LLM streams and publishes tokens using the message-per-response pattern +// All tokens are appended to a single message, which appears as one entry in channel history + +import * as Ably from 'ably'; +import { MockLLM } from './llm'; + +export class Agent { + private client: Ably.Realtime; + private channel: Ably.RealtimeChannel; + private llm: MockLLM; + + constructor(ablyKey: string, channelName: string) { + this.client = new Ably.Realtime({ + key: ablyKey, + clientId: 'ai-agent', + }); + this.channel = this.client.channels.get(channelName); + this.llm = new MockLLM(); + } + + async processPrompt(prompt: string): Promise { + const stream = await this.llm.responses.create(prompt); + let msgSerial: string | null = null; + + for await (const event of stream) { + if (event.type === 'message_start') { + // Create initial empty message and capture its serial + const publishResult = await this.channel.publish({ + name: 'response', + data: '', + }); + msgSerial = publishResult.serials[0]; + } else if (event.type === 'message_delta') { + // Append each token to the same message using its serial + if (msgSerial && event.text) { + this.channel.appendMessage({ + serial: msgSerial, + data: event.text, + }); + } + } else if (event.type === 'message_stop') { + // Stream complete - all tokens have been appended + console.log('Response complete'); + } + } + } + + disconnect(): void { + this.client.close(); + } +} diff --git a/examples/ai-transport-message-per-response/javascript/src/config.ts b/examples/ai-transport-message-per-response/javascript/src/config.ts new file mode 100644 index 0000000000..8617022a2d --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/config.ts @@ -0,0 +1,3 @@ +export const config = { + ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'YOUR_ABLY_KEY_HERE', +}; diff --git a/examples/ai-transport-message-per-response/javascript/src/llm.ts b/examples/ai-transport-message-per-response/javascript/src/llm.ts new file mode 100644 index 0000000000..ab9f1061f8 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/llm.ts @@ -0,0 +1,49 @@ +// Mock LLM Service +// This simulates a generic LLM SDK with streaming capabilities + +interface StreamEvent { + type: 'message_start' | 'message_delta' | 'message_stop'; + text?: string; + responseId: string; +} + +export class MockLLM { + private readonly responseText = + 'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.'; + + responses = { + create: (prompt: string) => this.createStream(prompt), + }; + + private async *createStream(_prompt: string): AsyncIterable { + const responseId = `resp_${crypto.randomUUID()}`; + + // Yield start event + yield { type: 'message_start', responseId }; + + // Chunk text into tokens (simulates LLM tokenization) + const tokens = this.chunkTextLikeAI(this.responseText); + + for (const token of tokens) { + // Simulate realistic delay between tokens + await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50)); + + // Yield token event + yield { type: 'message_delta', text: token, responseId }; + } + + // Yield stop event + yield { type: 'message_stop', responseId }; + } + + private chunkTextLikeAI(text: string): string[] { + const chunks: string[] = []; + let pos = 0; + while (pos < text.length) { + const size = Math.floor(Math.random() * 8) + 1; + chunks.push(text.slice(pos, pos + size)); + pos += size; + } + return chunks.filter((chunk) => chunk.length > 0); + } +} diff --git a/examples/ai-transport-message-per-response/javascript/src/script.ts b/examples/ai-transport-message-per-response/javascript/src/script.ts new file mode 100644 index 0000000000..b8748377ab --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/script.ts @@ -0,0 +1,96 @@ +import * as Ably from 'ably'; +import { Agent } from './agent'; +import { config } from './config'; + +// Generate unique channel name for this session +const CHANNEL_NAME = `ai:response-${crypto.randomUUID()}`; +const client = new Ably.Realtime({ + key: config.ABLY_KEY, +}); + +const channel = client.channels.get(CHANNEL_NAME); + +// Agent for processing prompts +const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + +// DOM elements +const responseTextElement = document.getElementById('response-text') as HTMLDivElement; +const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement; +const promptButton = document.getElementById('prompt-button') as HTMLButtonElement; +const processingStatus = document.getElementById('processing-status') as HTMLSpanElement; + +// Track responses by message serial +const responses = new Map(); +let currentSerial: string | null = null; + +const updateDisplay = () => { + if (currentSerial) { + responseTextElement.innerText = responses.get(currentSerial) || ''; + } +}; + +// Subscribe to messages - rewind delivers history as message.update, +// then seamlessly transitions to live message.append events +channel.subscribe((message: Ably.Message) => { + const serial = message.serial; + if (!serial) { + return; + } + + switch (message.action) { + case 'message.create': + responses.set(serial, message.data || ''); + currentSerial = serial; + processingStatus.innerText = 'Streaming'; + break; + case 'message.append': { + // Only append if this is for the current response + if (currentSerial === serial) { + const current = responses.get(serial) || ''; + responses.set(serial, current + (message.data || '')); + } + break; + } + case 'message.update': + // Full state from history or resync - always use it + responses.set(serial, message.data || ''); + currentSerial = serial; + break; + } + updateDisplay(); +}); + +const handlePromptClick = () => { + currentSerial = null; + responseTextElement.innerText = ''; + processingStatus.innerText = 'Streaming'; + + agent.processPrompt('What is Ably AI Transport?'); +}; + +const handleConnect = async () => { + // Set rewind option before attaching to get history as message.update events + channel.setOptions({ params: { rewind: '2m' } }); + await channel.attach(); + connectionToggle.innerText = 'Disconnect'; + processingStatus.innerText = 'Ready'; +}; + +const handleDisconnect = async () => { + await channel.detach(); + processingStatus.innerText = 'Paused'; + connectionToggle.innerText = 'Connect'; +}; + +const handleConnectionToggle = () => { + if (channel.state === 'attached') { + handleDisconnect(); + } else { + handleConnect(); + } +}; + +connectionToggle.onclick = handleConnectionToggle; +promptButton.onclick = handlePromptClick; + +handleConnect(); diff --git a/examples/ai-transport-message-per-response/javascript/src/styles.css b/examples/ai-transport-message-per-response/javascript/src/styles.css new file mode 100644 index 0000000000..b5c61c9567 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/src/styles.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; diff --git a/examples/ai-transport-message-per-response/javascript/tailwind.config.ts b/examples/ai-transport-message-per-response/javascript/tailwind.config.ts new file mode 100644 index 0000000000..1c86e1c371 --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/tailwind.config.ts @@ -0,0 +1,9 @@ +import baseConfig from '../../tailwind.config'; +import type { Config } from 'tailwindcss'; + +const config: Config = { + ...baseConfig, + content: ['./src/**/*.{js,ts,tsx}', './index.html'], +}; + +export default config; diff --git a/examples/ai-transport-message-per-response/javascript/vite.config.ts b/examples/ai-transport-message-per-response/javascript/vite.config.ts new file mode 100644 index 0000000000..3b1cf13b4f --- /dev/null +++ b/examples/ai-transport-message-per-response/javascript/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite'; +import baseConfig from '../../vite.config'; + +export default defineConfig({ + ...baseConfig, + envDir: '../../', +}); diff --git a/examples/ai-transport-message-per-response/react/README.md b/examples/ai-transport-message-per-response/react/README.md new file mode 100644 index 0000000000..bb10ac8f99 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/README.md @@ -0,0 +1,70 @@ +# AI Transport message per response streaming + +Enable realtime streaming of AI/LLM responses using the message-per-response pattern, where all tokens are appended to a single Ably message. + +AI Transport message-per-response streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime while maintaining a clean message history. Each complete AI response appears as a single message in channel history, making it easy to retrieve and display multi-response conversation history. + +The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime. + +Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions. + +## Resources + +Use the following components to implement AI Transport message-per-response streaming: + +- [`AblyProvider`](/docs/getting-started/react-hooks#ably-provider): initializes and manages a shared Ably client instance, passing it down through React context to enable realtime AI Transport functionality across the application. +- [`ChannelProvider`](/docs/getting-started/react-hooks#channel-provider): manages the state and functionality of a specific channel, providing access to AI response tokens and streaming state via React context. +- [`useChannel()`](/docs/getting-started/react-hooks#useChannel) hook: a hook to subscribe to messages with `message.create`, `message.append`, and `message.update` actions. +- [`rewind`](/docs/channels/options/rewind) channel option: enables seamless message recovery during reconnections, delivering historical messages as `message.update` events. +- [`appendMessage()`](/docs/api/realtime-sdk/channels#append-message): appends tokens to an existing message using its serial. + +Find out more about [AI Transport](/docs/ai-transport) and [message-per-response](/docs/ai-transport/features/token-streaming/message-per-response). + +## Getting started + +1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found: + + ```sh + git clone git@github.com:ably/docs.git + ``` + +2. Change directory: + + ```sh + cd examples/ + ``` + +3. Rename the environment file: + + ```sh + mv .env.example .env.local + ``` + +4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key. + +5. Install dependencies: + + ```sh + yarn install + ``` + +6. Run the server: + + ```sh + yarn run ai-transport-message-per-response-react + ``` + +7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming. + +## Open in CodeSandbox + +In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key. + +## How it works + +The message-per-response pattern works by: + +1. **Initial message**: When an agent response begins, publish an initial message with `message.create` action to the Ably channel with an empty or the first token as content. +2. **Token streaming**: Append subsequent tokens to the original message by publishing those tokens with the `message.append` action. +3. **Live delivery**: Clients subscribed to the channel receive each appended token in realtime, allowing them to progressively render the response. +4. **Compacted history**: The channel history contains only one message per agent response, which includes all tokens appended to it concatenated together. diff --git a/examples/ai-transport-message-per-response/react/index.html b/examples/ai-transport-message-per-response/react/index.html new file mode 100644 index 0000000000..2c7da1dce4 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/index.html @@ -0,0 +1,12 @@ + + + + + + AI Transport Message Per Response + + +
+ + + diff --git a/examples/ai-transport-message-per-response/react/package.json b/examples/ai-transport-message-per-response/react/package.json new file mode 100644 index 0000000000..efedff88ad --- /dev/null +++ b/examples/ai-transport-message-per-response/react/package.json @@ -0,0 +1,10 @@ +{ + "name": "ai-transport-message-per-response-react", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + } +} diff --git a/examples/ai-transport-message-per-response/react/postcss.config.js b/examples/ai-transport-message-per-response/react/postcss.config.js new file mode 100644 index 0000000000..2aa7205d4b --- /dev/null +++ b/examples/ai-transport-message-per-response/react/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/examples/ai-transport-message-per-response/react/src/App.tsx b/examples/ai-transport-message-per-response/react/src/App.tsx new file mode 100644 index 0000000000..07bb4b730b --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/App.tsx @@ -0,0 +1,151 @@ +import React, { useState } from 'react'; +import { AblyProvider, ChannelProvider, useChannel, useConnectionStateListener } from 'ably/react'; +import { Realtime, Message } from 'ably'; +import { Agent } from './agent'; +import { config } from './config'; +import './styles/styles.css'; + +// Generate unique channel name for this session +const CHANNEL_NAME = `ai:response-${crypto.randomUUID()}`; + +const client = new Realtime({ + key: config.ABLY_KEY, +}); + +const AITransportDemo: React.FC = () => { + const [responses, setResponses] = useState>(new Map()); + const [currentSerial, setCurrentSerial] = useState(null); + const [connectionState, setConnectionState] = useState('disconnected'); + const [isChannelDetached, setIsChannelDetached] = useState(false); + + // Agent persists across renders to avoid creating new connections + const agentRef = React.useRef(null); + if (!agentRef.current) { + agentRef.current = new Agent(config.ABLY_KEY, CHANNEL_NAME); + } + + // Subscribe to messages on the channel + const { channel } = useChannel(CHANNEL_NAME, (message: Message) => { + const serial = message.serial; + if (!serial) { + return; + } + + switch (message.action) { + case 'message.create': + // Initial message creation + setResponses((prev) => new Map(prev).set(serial, message.data || '')); + setCurrentSerial(serial); + break; + case 'message.append': + // Only append if this is for the current response + setCurrentSerial((current) => { + if (current === serial) { + setResponses((prev) => { + const newMap = new Map(prev); + const existing = newMap.get(serial) || ''; + return newMap.set(serial, existing + (message.data || '')); + }); + } + return current; + }); + break; + case 'message.update': + // Full state from history (rewind) - replace existing data + setResponses((prev) => new Map(prev).set(serial, message.data || '')); + setCurrentSerial(serial); + break; + } + }); + + useConnectionStateListener((stateChange: { current: string }) => { + setConnectionState(stateChange.current); + }); + + const currentResponse = currentSerial ? responses.get(currentSerial) || '' : ''; + + const handlePromptClick = () => { + if (connectionState !== 'connected' || isChannelDetached) { + return; + } + + setResponses(new Map()); + setCurrentSerial(null); + + agentRef.current?.processPrompt('What is Ably AI Transport?'); + }; + + const handleDisconnect = () => { + channel.detach(); + setIsChannelDetached(true); + }; + + const handleReconnect = async () => { + setIsChannelDetached(false); + // Set rewind option before reattaching to get history as message.update events + channel.setOptions({ params: { rewind: '2m' } }); + await channel.attach(); + }; + + return ( +
+ {/* Response section with always visible status */} +
+
+
+
+ + + {isChannelDetached ? 'Disconnected' : connectionState === 'connected' ? 'Connected' : 'Disconnected'} + + {/* Disconnect/Reconnect button */} + +
+
+
+ {currentResponse || 'Select a prompt below to get started'} +
+
+
+ + {/* Prompt selection */} +
+
+ +
+
+
+ ); +}; + +// Main App component with providers +const App: React.FC = () => { + return ( + + + + + + ); +}; + +export default App; diff --git a/examples/ai-transport-message-per-response/react/src/agent.ts b/examples/ai-transport-message-per-response/react/src/agent.ts new file mode 100644 index 0000000000..2c297e000f --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/agent.ts @@ -0,0 +1,52 @@ +// Agent Service +// This consumes LLM streams and publishes tokens using the message-per-response pattern +// All tokens are appended to a single message, which appears as one entry in channel history + +import * as Ably from 'ably'; +import { MockLLM } from './llm'; + +export class Agent { + private client: Ably.Realtime; + private channel: Ably.RealtimeChannel; + private llm: MockLLM; + + constructor(ablyKey: string, channelName: string) { + this.client = new Ably.Realtime({ + key: ablyKey, + clientId: 'ai-agent', + }); + this.channel = this.client.channels.get(channelName); + this.llm = new MockLLM(); + } + + async processPrompt(prompt: string): Promise { + const stream = await this.llm.responses.create(prompt); + let msgSerial: string | null = null; + + for await (const event of stream) { + if (event.type === 'message_start') { + // Create initial empty message and capture its serial + const publishResult = await this.channel.publish({ + name: 'response', + data: '', + }); + msgSerial = publishResult.serials[0]; + } else if (event.type === 'message_delta') { + // Append each token to the same message using its serial + if (msgSerial && event.text) { + this.channel.appendMessage({ + serial: msgSerial, + data: event.text, + }); + } + } else if (event.type === 'message_stop') { + // Stream complete - all tokens have been appended + console.log('Response complete'); + } + } + } + + disconnect(): void { + this.client.close(); + } +} diff --git a/examples/ai-transport-message-per-response/react/src/config.ts b/examples/ai-transport-message-per-response/react/src/config.ts new file mode 100644 index 0000000000..28bdb0c670 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/config.ts @@ -0,0 +1,3 @@ +export const config = { + ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'demo-key-for-examples:YOUR_ABLY_KEY_HERE', +}; diff --git a/examples/ai-transport-message-per-response/react/src/index.tsx b/examples/ai-transport-message-per-response/react/src/index.tsx new file mode 100644 index 0000000000..53a330f5c2 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/index.tsx @@ -0,0 +1,9 @@ +import { StrictMode } from 'react'; +import { createRoot } from 'react-dom/client'; +import App from './App'; + +createRoot(document.getElementById('root')).render( + + + , +); diff --git a/examples/ai-transport-message-per-response/react/src/llm.ts b/examples/ai-transport-message-per-response/react/src/llm.ts new file mode 100644 index 0000000000..ab9f1061f8 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/llm.ts @@ -0,0 +1,49 @@ +// Mock LLM Service +// This simulates a generic LLM SDK with streaming capabilities + +interface StreamEvent { + type: 'message_start' | 'message_delta' | 'message_stop'; + text?: string; + responseId: string; +} + +export class MockLLM { + private readonly responseText = + 'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.'; + + responses = { + create: (prompt: string) => this.createStream(prompt), + }; + + private async *createStream(_prompt: string): AsyncIterable { + const responseId = `resp_${crypto.randomUUID()}`; + + // Yield start event + yield { type: 'message_start', responseId }; + + // Chunk text into tokens (simulates LLM tokenization) + const tokens = this.chunkTextLikeAI(this.responseText); + + for (const token of tokens) { + // Simulate realistic delay between tokens + await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50)); + + // Yield token event + yield { type: 'message_delta', text: token, responseId }; + } + + // Yield stop event + yield { type: 'message_stop', responseId }; + } + + private chunkTextLikeAI(text: string): string[] { + const chunks: string[] = []; + let pos = 0; + while (pos < text.length) { + const size = Math.floor(Math.random() * 8) + 1; + chunks.push(text.slice(pos, pos + size)); + pos += size; + } + return chunks.filter((chunk) => chunk.length > 0); + } +} diff --git a/examples/ai-transport-message-per-response/react/src/styles/styles.css b/examples/ai-transport-message-per-response/react/src/styles/styles.css new file mode 100644 index 0000000000..b5c61c9567 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/src/styles/styles.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; diff --git a/examples/ai-transport-message-per-response/react/tailwind.config.ts b/examples/ai-transport-message-per-response/react/tailwind.config.ts new file mode 100644 index 0000000000..1c86e1c371 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/tailwind.config.ts @@ -0,0 +1,9 @@ +import baseConfig from '../../tailwind.config'; +import type { Config } from 'tailwindcss'; + +const config: Config = { + ...baseConfig, + content: ['./src/**/*.{js,ts,tsx}', './index.html'], +}; + +export default config; diff --git a/examples/ai-transport-message-per-response/react/tsconfig.json b/examples/ai-transport-message-per-response/react/tsconfig.json new file mode 100644 index 0000000000..e92702dbee --- /dev/null +++ b/examples/ai-transport-message-per-response/react/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ESNext", + "lib": ["DOM", "DOM.Iterable", "ESNext"], + "allowJs": false, + "skipLibCheck": true, + "esModuleInterop": false, + "allowSyntheticDefaultImports": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "module": "ESNext", + "moduleResolution": "Node", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx" + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/examples/ai-transport-message-per-response/react/tsconfig.node.json b/examples/ai-transport-message-per-response/react/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/examples/ai-transport-message-per-response/react/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/examples/ai-transport-message-per-response/react/vite.config.ts b/examples/ai-transport-message-per-response/react/vite.config.ts new file mode 100644 index 0000000000..3b1cf13b4f --- /dev/null +++ b/examples/ai-transport-message-per-response/react/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite'; +import baseConfig from '../../vite.config'; + +export default defineConfig({ + ...baseConfig, + envDir: '../../', +}); diff --git a/examples/ai-transport-message-per-token/javascript/src/script.ts b/examples/ai-transport-message-per-token/javascript/src/script.ts index 8687238245..398368acff 100644 --- a/examples/ai-transport-message-per-token/javascript/src/script.ts +++ b/examples/ai-transport-message-per-token/javascript/src/script.ts @@ -8,6 +8,10 @@ const client = new Ably.Realtime({ key: config.ABLY_KEY, }); const channel = client.channels.get(CHANNEL_NAME); + +// Agent for processing prompts +const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); + const responseTextElement = document.getElementById('response-text') as HTMLDivElement; const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement; const promptButton = document.getElementById('prompt-button') as HTMLButtonElement; @@ -90,7 +94,6 @@ const handlePromptClick = () => { currentResponseId = `request-${crypto.randomUUID()}`; responseText = ''; updateDisplay(); - const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); agent.processPrompt('What is Ably AI Transport?', currentResponseId); }; diff --git a/examples/ai-transport-message-per-token/react/src/App.tsx b/examples/ai-transport-message-per-token/react/src/App.tsx index 01dfd4eae2..c7ecc026ee 100644 --- a/examples/ai-transport-message-per-token/react/src/App.tsx +++ b/examples/ai-transport-message-per-token/react/src/App.tsx @@ -21,6 +21,12 @@ const AITransportDemo: React.FC = () => { const isHydrating = useRef(false); const pendingTokens = useRef([]); + // Agent persists across renders to avoid creating new connections + const agentRef = React.useRef(null); + if (!agentRef.current) { + agentRef.current = new Agent(config.ABLY_KEY, CHANNEL_NAME); + } + const { channel } = useChannel(CHANNEL_NAME, (message: Message) => { const responseId = message.extras?.headers?.responseId; @@ -57,8 +63,7 @@ const AITransportDemo: React.FC = () => { const responseId = `request-${crypto.randomUUID()}`; currentResponseId.current = responseId; - const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME); - agent.processPrompt('What is Ably AI Transport?', responseId); + agentRef.current?.processPrompt('What is Ably AI Transport?', responseId); }; const handleDisconnect = () => { diff --git a/examples/package.json b/examples/package.json index a88e37e6bd..857d8ecf32 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,7 +6,8 @@ "node": ">=20.0.0" }, "workspaces": [ - "ai-transport-message-per-token/react", + "ai-transport-message-per-response/javascript", + "ai-transport-message-per-response/react", "ai-transport-message-per-token/javascript", "ai-transport-message-per-token/react", "auth-generate-jwt/react", @@ -57,6 +58,8 @@ "spaces-member-location/javascript" ], "scripts": { + "ai-transport-message-per-response-javascript": "yarn workspace ai-transport-message-per-response-javascript dev", + "ai-transport-message-per-response-react": "yarn workspace ai-transport-message-per-response-react dev", "ai-transport-message-per-token-javascript": "yarn workspace ai-transport-message-per-token-javascript dev", "ai-transport-message-per-token-react": "yarn workspace ai-transport-message-per-token-react dev", "auth-generate-jwt-javascript": "yarn workspace auth-generate-jwt-javascript dev", @@ -110,7 +113,7 @@ "@ably/chat": "~1.1.0", "@ably/chat-react-ui-kit": "~0.3.0", "@ably/spaces": "~0.4.0", - "ably": "~2.16.0", + "ably": "~2.17.0", "cors": "^2.8.5", "franken-ui": "^2.0.0", "lodash": "^4.17.21", diff --git a/examples/yarn.lock b/examples/yarn.lock index 49e39d4d4c..a40fba73a9 100644 --- a/examples/yarn.lock +++ b/examples/yarn.lock @@ -900,10 +900,10 @@ magic-string "^0.27.0" react-refresh "^0.14.0" -ably@~2.16.0: - version "2.16.0" - resolved "https://registry.yarnpkg.com/ably/-/ably-2.16.0.tgz#b4042182e9ea54e621c60eb76997b3f760901fb4" - integrity sha512-X7SdHJC2ybCKAcFyyvi/VAN903q7JnEqdtpOXMM6TNWdNj/b40a4ijzEX/9lXSKddUJCiYM2KaFaVnSRn90YMw== +ably@~2.17.0: + version "2.17.0" + resolved "https://registry.yarnpkg.com/ably/-/ably-2.17.0.tgz#3d30547aebd3a70573277112d7f464e354f1e252" + integrity sha512-BJPxdFU2uuT4UDRUBcmLXRPNmXWGPIKZ+B7hMj1ygja3UZA2zox388yul1h1ie07V/8+Kn8fzPik3ewiSl5tAA== dependencies: "@ably/msgpack-js" "^0.4.0" dequal "^2.0.3" @@ -2836,16 +2836,7 @@ statuses@2.0.1: resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== -"string-width-cjs@npm:string-width@^4.2.0": - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string-width@^4.1.0: +"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.1.0: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -2863,14 +2854,7 @@ string-width@^5.0.1, string-width@^5.1.2: emoji-regex "^9.2.2" strip-ansi "^7.0.1" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1": - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - -strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== diff --git a/src/components/Examples/ExamplesRenderer.tsx b/src/components/Examples/ExamplesRenderer.tsx index e88c8d608d..0d7795d86d 100644 --- a/src/components/Examples/ExamplesRenderer.tsx +++ b/src/components/Examples/ExamplesRenderer.tsx @@ -37,7 +37,7 @@ const UserIndicator = ({ user }: { user: string }) => { const getDependencies = (id: string, products: string[], activeLanguage: LanguageKey) => { return { - ably: '~2.16.0', + ably: '~2.17.0', nanoid: '^5.0.7', minifaker: '1.34.1', 'franken-ui': '^2.0.0', diff --git a/src/data/examples/index.ts b/src/data/examples/index.ts index 024ccd64d5..b7d4ba13c7 100644 --- a/src/data/examples/index.ts +++ b/src/data/examples/index.ts @@ -13,6 +13,16 @@ export const examples: Example[] = [ metaTitle: 'Build AI message-per-token streaming with Ably AI Transport', metaDescription: `Stream AI-generated tokens in realtime using the message-per-token pattern with Ably's AI Transport. Implement scalable token streaming with low latency.`, }, + { + id: 'ai-transport-message-per-response', + name: 'Message per response streaming', + description: 'Stream AI responses by appending tokens to a single message using the message-per-response pattern.', + products: ['ai_transport'], + layout: 'single-horizontal', + visibleFiles: ['src/script.ts', 'src/llm.ts', 'src/agent.ts', 'App.tsx', 'llm.ts', 'agent.ts', 'index.tsx'], + metaTitle: 'Build AI message-per-response streaming with Ably AI Transport', + metaDescription: `Stream AI-generated tokens by appending them to a single message using Ably AI Transport. Each response appears as one compacted message in channel history.`, + }, { id: 'chat-presence', name: 'Chat presence',