diff --git a/docs/platforms/javascript/common/ai-agent-monitoring-browser/index.mdx b/docs/platforms/javascript/common/ai-agent-monitoring-browser/index.mdx
index d79059a9999ce..133c30a3fd037 100644
--- a/docs/platforms/javascript/common/ai-agent-monitoring-browser/index.mdx
+++ b/docs/platforms/javascript/common/ai-agent-monitoring-browser/index.mdx
@@ -55,361 +55,4 @@ Before setting up AI Agent Monitoring, ensure you have OpenAI
-- Anthropic
-- Google Gen AI SDK
-- LangChain
-- LangGraph
-
-Each integration page includes browser-specific examples with options like `recordInputs` and `recordOutputs`.
-
-
-
-
-```javascript
-import * as Sentry from "___SDK_PACKAGE___";
-import OpenAI from "openai";
-
-const client = Sentry.instrumentOpenAiClient(
- new OpenAI({ apiKey: "...", dangerouslyAllowBrowser: true }),
- {
- recordInputs: true,
- recordOutputs: true,
- }
-);
-
-// All calls are now instrumented
-const response = await client.chat.completions.create({
- model: "gpt-4o-mini",
- messages: [{ role: "user", content: "Hello!" }],
-});
-```
-
-
-
-
-
-## Manual Span Creation
-
-If you're using a library that Sentry doesn't provide helpers for, you can manually create spans. For your data to show up in [AI Agents Insights](https://sentry.io/orgredirect/organizations/:orgslug/insights/ai/agents/), spans must have well-defined names and data attributes.
-
-### Invoke Agent Span
-
-
-
-
-
-This span represents the execution of an AI agent, capturing the full lifecycle from receiving a task to producing a final response.
-
-**Key attributes:**
-- `gen_ai.agent.name` — The agent's name (e.g., "Weather Agent")
-- `gen_ai.request.model` — The underlying model used
-- `gen_ai.response.text` — The agent's final output
-- `gen_ai.usage.input_tokens` / `output_tokens` — Total token counts
-
-
-
-
-```javascript
-// Example agent implementation for demonstration
-const myAgent = {
- name: "Weather Agent",
- modelProvider: "openai",
- model: "gpt-4o-mini",
- async run() {
- // Agent implementation
- return {
- output: "The weather in Paris is sunny",
- usage: {
- inputTokens: 15,
- outputTokens: 8,
- },
- };
- },
-};
-
-Sentry.startSpan(
- {
- op: "gen_ai.invoke_agent",
- name: `invoke_agent ${myAgent.name}`,
- attributes: {
- "gen_ai.operation.name": "invoke_agent",
- "gen_ai.request.model": myAgent.model,
- "gen_ai.agent.name": myAgent.name,
- },
- },
- async (span) => {
- // run the agent
- const result = await myAgent.run();
-
- // set agent response
- span.setAttribute("gen_ai.response.text", JSON.stringify([result.output]));
-
- // set token usage
- span.setAttribute("gen_ai.usage.input_tokens", result.usage.inputTokens);
- span.setAttribute("gen_ai.usage.output_tokens", result.usage.outputTokens);
-
- return result;
- }
-);
-```
-
-
-
-
-
-
-
-
-
-### AI Client Span
-
-
-
-
-
-This span represents a chat or completion request to an LLM, capturing the messages, model configuration, and response.
-
-**Key attributes:**
-- `gen_ai.request.model` — The model name (required)
-- `gen_ai.request.messages` — Chat messages sent to the LLM
-- `gen_ai.request.max_tokens` — Token limit for the response
-- `gen_ai.response.text` — The model's response
-
-
-
-
-```javascript
-// Example AI implementation for demonstration
-const myAi = {
- modelProvider: "openai",
- model: "gpt-4o-mini",
- modelConfig: {
- temperature: 0.1,
- presencePenalty: 0.5,
- },
- async createMessage(messages, maxTokens) {
- // AI implementation
- return {
- output: "Here's a joke: Why don't scientists trust atoms? Because they make up everything!",
- usage: {
- inputTokens: 12,
- outputTokens: 24,
- },
- };
- },
-};
-
-Sentry.startSpan(
- {
- op: "gen_ai.chat",
- name: `chat ${myAi.model}`,
- attributes: {
- "gen_ai.operation.name": "chat",
- "gen_ai.request.model": myAi.model,
- },
- },
- async (span) => {
- // set up messages for LLM
- const maxTokens = 1024;
- const messages = [{ role: "user", content: "Tell me a joke" }];
-
- // set chat request data
- span.setAttribute("gen_ai.request.messages", JSON.stringify(messages));
- span.setAttribute("gen_ai.request.max_tokens", maxTokens);
- span.setAttribute("gen_ai.request.temperature", myAi.modelConfig.temperature);
-
- // ask the LLM
- const result = await myAi.createMessage(messages, maxTokens);
-
- // set response
- span.setAttribute("gen_ai.response.text", JSON.stringify([result.output]));
-
- // set token usage
- span.setAttribute("gen_ai.usage.input_tokens", result.usage.inputTokens);
- span.setAttribute("gen_ai.usage.output_tokens", result.usage.outputTokens);
-
- return result;
- }
-);
-```
-
-
-
-
-
-
-
-
-
-### Execute Tool Span
-
-
-
-
-
-This span represents the execution of a tool or function that was requested by an AI model, including the input arguments and resulting output.
-
-**Key attributes:**
-- `gen_ai.tool.name` — The tool's name (e.g., "random_number")
-- `gen_ai.tool.description` — Description of what the tool does
-- `gen_ai.tool.input` — The arguments passed to the tool
-- `gen_ai.tool.output` — The tool's return value
-
-
-
-
-```javascript
-// Example AI implementation for demonstration
-const myAi = {
- modelProvider: "openai",
- model: "gpt-4o-mini",
- async createMessage(messages, maxTokens) {
- // AI implementation that returns tool calls
- return {
- toolCalls: [
- {
- name: "random_number",
- description: "Generate a random number",
- arguments: { max: 10 },
- },
- ],
- };
- },
-};
-
-const messages = [{ role: "user", content: "Generate a random number between 0 and 10" }];
-
-// First, make the AI call
-const result = await Sentry.startSpan(
- { op: "gen_ai.chat", name: `chat ${myAi.model}` },
- () => myAi.createMessage(messages, 1024)
-);
-
-// Check if we should call a tool
-if (result.toolCalls && result.toolCalls.length > 0) {
- const tool = result.toolCalls[0];
-
- await Sentry.startSpan(
- {
- op: "gen_ai.execute_tool",
- name: `execute_tool ${tool.name}`,
- attributes: {
- "gen_ai.request.model": myAi.model,
- "gen_ai.tool.type": "function",
- "gen_ai.tool.name": tool.name,
- "gen_ai.tool.description": tool.description,
- "gen_ai.tool.input": JSON.stringify(tool.arguments),
- },
- },
- async (span) => {
- // run tool (example implementation)
- const toolResult = Math.floor(Math.random() * tool.arguments.max);
-
- // set tool result
- span.setAttribute("gen_ai.tool.output", String(toolResult));
-
- return toolResult;
- }
- );
-}
-```
-
-
-
-
-
-
-
-
-
-### Handoff Span
-
-
-
-
-
-This span marks the transition of control from one agent to another, typically when the current agent determines another agent is better suited to handle the task.
-
-**Requirements:**
-- `op` must be `"gen_ai.handoff"`
-- `name` should follow the pattern `"handoff from {source} to {target}"`
-- All [Common Span Attributes](#common-span-attributes) should be set
-
-The handoff span itself has no body — it just marks the transition point before the target agent starts.
-
-
-
-
-```javascript
-// Example agent implementations for demonstration
-const myAgent = {
- name: "Weather Agent",
- modelProvider: "openai",
- model: "gpt-4o-mini",
- async run() {
- // Agent implementation
- return {
- handoffTo: "Travel Agent",
- output: "I need to handoff to the travel agent for booking recommendations",
- };
- },
-};
-
-const otherAgent = {
- name: "Travel Agent",
- modelProvider: "openai",
- model: "gpt-4o-mini",
- async run() {
- // Other agent implementation
- return { output: "Here are some travel recommendations..." };
- },
-};
-
-// First agent execution
-const result = await Sentry.startSpan(
- { op: "gen_ai.invoke_agent", name: `invoke_agent ${myAgent.name}` },
- () => myAgent.run()
-);
-
-// Check if we should handoff to another agent
-if (result.handoffTo) {
- // Create handoff span
- await Sentry.startSpan(
- {
- op: "gen_ai.handoff",
- name: `handoff from ${myAgent.name} to ${otherAgent.name}`,
- attributes: {
- "gen_ai.request.model": myAgent.model,
- },
- },
- () => {
- // the handoff span just marks the handoff
- // no actual work is done here
- }
- );
-
- // Execute the other agent
- await Sentry.startSpan(
- { op: "gen_ai.invoke_agent", name: `invoke_agent ${otherAgent.name}` },
- () => otherAgent.run()
- );
-}
-```
-
-
-
-
-
-## Common Span Attributes
-
-
+
diff --git a/docs/platforms/react-native/ai-agent-monitoring/index.mdx b/docs/platforms/react-native/ai-agent-monitoring/index.mdx
new file mode 100644
index 0000000000000..d3542a5952d90
--- /dev/null
+++ b/docs/platforms/react-native/ai-agent-monitoring/index.mdx
@@ -0,0 +1,21 @@
+---
+title: AI Agent Monitoring
+sidebar_order: 7
+description: "Learn how to manually instrument AI agents in React Native applications."
+sidebar_section: features
+new: true
+---
+
+With Sentry AI Agent Monitoring, you can monitor and debug your AI systems with full-stack context. You'll be able to track key insights like token usage, latency, tool usage, and error rates. AI Agent Monitoring data will be fully connected to your other Sentry data like logs, errors, and traces.
+
+## Prerequisites
+
+Before setting up AI Agent Monitoring, ensure you have tracing enabled in your Sentry configuration.
+
+
+
+**React Native applications require manual instrumentation.** The automatic (OpenTelemetry-based) AI integrations shipped with `@sentry/node` rely on Node.js require-hooks that aren't available on Hermes or JavaScriptCore, so they can't be used from React Native. Use the manual client wrappers described below instead.
+
+
+
+
diff --git a/docs/platforms/react-native/integrations/anthropic.mdx b/docs/platforms/react-native/integrations/anthropic.mdx
new file mode 100644
index 0000000000000..0860145253dce
--- /dev/null
+++ b/docs/platforms/react-native/integrations/anthropic.mdx
@@ -0,0 +1,75 @@
+---
+title: Anthropic
+description: "Manually instrument the Anthropic SDK in React Native to capture spans and LLM interactions."
+---
+
+_Import name: `Sentry.instrumentAnthropicAiClient`_
+
+The `instrumentAnthropicAiClient` helper adds instrumentation for the [`@anthropic-ai/sdk`](https://www.npmjs.com/package/@anthropic-ai/sdk) by wrapping an Anthropic client instance and recording LLM interactions with configurable input/output capture. The OpenTelemetry-based automatic integration available for Node.js does not work in React Native, so wrapping the client manually is the only supported path.
+
+## Usage
+
+```javascript
+import * as Sentry from "@sentry/react-native";
+import Anthropic from "@anthropic-ai/sdk";
+
+const anthropic = new Anthropic({
+ // Warning: API keys included in your app bundle will be visible to anyone who
+ // inspects the bundle. Proxy LLM calls through your own backend whenever possible.
+ apiKey: "your-api-key",
+});
+
+const client = Sentry.instrumentAnthropicAiClient(anthropic, {
+ recordInputs: true,
+ recordOutputs: true,
+});
+
+// Use the wrapped client instead of the original anthropic instance
+const response = await client.messages.create({
+ model: "claude-3-5-sonnet-20241022",
+ max_tokens: 1024,
+ messages: [{ role: "user", content: "Hello!" }],
+});
+```
+
+Make sure tracing is enabled for the spans produced by this integration to be captured.
+
+## Configuration
+
+### Options
+
+The following options control what data is captured from Anthropic SDK calls:
+
+#### `recordInputs`
+
+_Type: `boolean` (optional)_
+
+Records inputs to Anthropic SDK calls (such as prompts and messages).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+#### `recordOutputs`
+
+_Type: `boolean` (optional)_
+
+Records outputs from Anthropic SDK calls (such as generated text and responses).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+## Supported Operations
+
+By default, tracing support is added to the following Anthropic SDK calls:
+
+- `messages.create()` - Create messages with Claude models
+- `messages.stream()` - Stream messages with Claude models
+- `messages.countTokens()` - Count tokens for messages
+- `models.get()` - Get model information
+- `completions.create()` - Create completions (legacy)
+- `models.retrieve()` - Retrieve model details
+- `beta.messages.create()` - Beta messages API
+
+Streaming and non-streaming requests are automatically detected and handled appropriately.
+
+## Supported Versions
+
+- `@anthropic-ai/sdk`: `>=0.19.2 <1.0.0`
diff --git a/docs/platforms/react-native/integrations/google-genai.mdx b/docs/platforms/react-native/integrations/google-genai.mdx
new file mode 100644
index 0000000000000..b3ba9bdf6e9ef
--- /dev/null
+++ b/docs/platforms/react-native/integrations/google-genai.mdx
@@ -0,0 +1,69 @@
+---
+title: Google Gen AI
+description: "Manually instrument the Google Gen AI SDK in React Native to capture spans and LLM interactions."
+---
+
+_Import name: `Sentry.instrumentGoogleGenAIClient`_
+
+The `instrumentGoogleGenAIClient` helper adds instrumentation for the [`@google/genai`](https://www.npmjs.com/package/@google/genai) SDK by wrapping a Google Gen AI client instance and recording LLM interactions with configurable input/output capture. The OpenTelemetry-based automatic integration available for Node.js does not work in React Native, so wrapping the client manually is the only supported path.
+
+## Usage
+
+```javascript
+import * as Sentry from "@sentry/react-native";
+import { GoogleGenAI } from "@google/genai";
+
+const genAI = new GoogleGenAI({
+ // Warning: API keys included in your app bundle will be visible to anyone who
+ // inspects the bundle. Proxy LLM calls through your own backend whenever possible.
+ apiKey: "your-api-key",
+});
+
+const client = Sentry.instrumentGoogleGenAIClient(genAI, {
+ recordInputs: true,
+ recordOutputs: true,
+});
+
+// Use the wrapped client instead of the original genAI instance
+const result = await client.models.generateContent("Hello!");
+```
+
+Make sure tracing is enabled for the spans produced by this integration to be captured.
+
+## Configuration
+
+### Options
+
+The following options control what data is captured from Google Gen AI SDK calls:
+
+#### `recordInputs`
+
+_Type: `boolean` (optional)_
+
+Records inputs to Google Gen AI SDK calls (such as prompts and messages).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+#### `recordOutputs`
+
+_Type: `boolean` (optional)_
+
+Records outputs from Google Gen AI SDK calls (such as generated text and responses).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+## Supported Operations
+
+By default, tracing support is added to the following Google Gen AI SDK calls:
+
+- `models.generateContent()` - Generate content with a given model
+- `models.generateContentStream()` - Stream content generation with a given model
+- `chats.create()` - Create chat sessions
+- `sendMessage()` - Send messages in chat sessions
+- `sendMessageStream()` - Stream messages in chat sessions
+
+Streaming and non-streaming requests are automatically detected and handled appropriately.
+
+## Supported Versions
+
+- `@google/genai`: `>=0.10.0 <2`
diff --git a/docs/platforms/react-native/integrations/langchain.mdx b/docs/platforms/react-native/integrations/langchain.mdx
new file mode 100644
index 0000000000000..b4041f58b68d6
--- /dev/null
+++ b/docs/platforms/react-native/integrations/langchain.mdx
@@ -0,0 +1,78 @@
+---
+title: LangChain
+description: "Manually instrument LangChain in React Native to capture spans for chat models, chains, and tools."
+---
+
+_Import name: `Sentry.createLangChainCallbackHandler`_
+
+The `createLangChainCallbackHandler` helper creates a Sentry-aware [LangChain callback handler](https://js.langchain.com/docs/concepts/callbacks/) that records spans for chat models, LLM calls, chains, and tools with configurable input/output capture. The OpenTelemetry-based automatic integration available for Node.js does not work in React Native, so passing the handler explicitly to your LangChain operations is the only supported path.
+
+## Usage
+
+```javascript
+import * as Sentry from "@sentry/react-native";
+import { ChatAnthropic } from "@langchain/anthropic";
+
+// Create a Sentry-aware LangChain callback handler
+const callbackHandler = Sentry.createLangChainCallbackHandler({
+ recordInputs: true,
+ recordOutputs: true,
+});
+
+const model = new ChatAnthropic({
+ model: "claude-3-5-sonnet-20241022",
+ // Warning: API keys included in your app bundle will be visible to anyone who
+ // inspects the bundle. Proxy LLM calls through your own backend whenever possible.
+ apiKey: "your-api-key",
+});
+
+await model.invoke("Tell me a joke", {
+ callbacks: [callbackHandler],
+});
+```
+
+Make sure tracing is enabled for the spans produced by this integration to be captured.
+
+## Configuration
+
+### Options
+
+The following options control what data is captured from LangChain operations:
+
+#### `recordInputs`
+
+_Type: `boolean` (optional)_
+
+Records inputs to LangChain operations (such as prompts and messages).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+#### `recordOutputs`
+
+_Type: `boolean` (optional)_
+
+Records outputs from LangChain operations (such as generated text and responses).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+## Supported Operations
+
+When the callback handler is attached, spans are produced for:
+
+- **Chat model invocations** - chat model calls
+- **LLM invocations** - LLM pipeline executions
+- **Chain executions** - chain invocations
+- **Tool executions** - tool calls
+
+The handler covers the following runnable entry points: `invoke()`, `stream()`, and `batch()`. It supports the following provider packages:
+
+- `@langchain/anthropic`
+- `@langchain/openai`
+- `@langchain/google-genai`
+- `@langchain/mistralai`
+- `@langchain/google-vertexai`
+- `@langchain/groq`
+
+## Supported Versions
+
+- `langchain`: `>=0.1.0 <2.0.0`
diff --git a/docs/platforms/react-native/integrations/langgraph.mdx b/docs/platforms/react-native/integrations/langgraph.mdx
new file mode 100644
index 0000000000000..8793e904ff859
--- /dev/null
+++ b/docs/platforms/react-native/integrations/langgraph.mdx
@@ -0,0 +1,86 @@
+---
+title: LangGraph
+description: "Manually instrument LangGraph in React Native to capture spans for agent compilation and invocation."
+---
+
+_Import name: `Sentry.instrumentLangGraph`_
+
+The `instrumentLangGraph` helper adds instrumentation for [`@langchain/langgraph`](https://www.npmjs.com/package/@langchain/langgraph) by wrapping a `StateGraph` before compilation and recording AI agent interactions with configurable input/output capture. The OpenTelemetry-based automatic integration available for Node.js does not work in React Native, so calling `instrumentLangGraph` on the graph **before** `.compile()` is the only supported path.
+
+## Usage
+
+```javascript
+import * as Sentry from "@sentry/react-native";
+import { ChatOpenAI } from "@langchain/openai";
+import { StateGraph, MessagesAnnotation, START, END } from "@langchain/langgraph";
+import { SystemMessage, HumanMessage } from "@langchain/core/messages";
+
+const llm = new ChatOpenAI({
+ modelName: "gpt-4o",
+ // Warning: API keys included in your app bundle will be visible to anyone who
+ // inspects the bundle. Proxy LLM calls through your own backend whenever possible.
+ apiKey: "your-api-key",
+});
+
+async function callLLM(state) {
+ const response = await llm.invoke(state.messages);
+ return {
+ messages: [...state.messages, response],
+ };
+}
+
+const agent = new StateGraph(MessagesAnnotation)
+ .addNode("agent", callLLM)
+ .addEdge(START, "agent")
+ .addEdge("agent", END);
+
+// Instrument the graph BEFORE compiling
+Sentry.instrumentLangGraph(agent, {
+ recordInputs: true,
+ recordOutputs: true,
+});
+
+const graph = agent.compile({ name: "my_agent" });
+
+const result = await graph.invoke({
+ messages: [
+ new SystemMessage("You are a helpful assistant."),
+ new HumanMessage("Hello!"),
+ ],
+});
+```
+
+Make sure tracing is enabled for the spans produced by this integration to be captured.
+
+## Configuration
+
+### Options
+
+The following options control what data is captured from LangGraph operations:
+
+#### `recordInputs`
+
+_Type: `boolean` (optional)_
+
+Records inputs to LangGraph operations (such as prompts and messages).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+#### `recordOutputs`
+
+_Type: `boolean` (optional)_
+
+Records outputs from LangGraph operations (such as generated text and responses).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+## Supported Operations
+
+By default, tracing support is added to the following LangGraph SDK calls:
+
+- **Agent Creation** (`gen_ai.create_agent`) - Captures spans when compiling a StateGraph into an executable agent
+- **Agent Invocation** (`gen_ai.invoke_agent`) - Captures spans for agent execution via `invoke()`
+
+## Supported Versions
+
+- `@langchain/langgraph`: `>=0.2.0 <2.0.0`
diff --git a/docs/platforms/react-native/integrations/openai.mdx b/docs/platforms/react-native/integrations/openai.mdx
new file mode 100644
index 0000000000000..2ee406e74cd2c
--- /dev/null
+++ b/docs/platforms/react-native/integrations/openai.mdx
@@ -0,0 +1,75 @@
+---
+title: OpenAI
+description: "Manually instrument the OpenAI SDK in React Native to capture spans and LLM interactions."
+---
+
+_Import name: `Sentry.instrumentOpenAiClient`_
+
+The `instrumentOpenAiClient` helper adds instrumentation for the [`openai`](https://www.npmjs.com/package/openai) SDK by wrapping an OpenAI client instance and recording LLM interactions with configurable input/output capture. The OpenTelemetry-based automatic integration available for Node.js does not work in React Native, so wrapping the client manually is the only supported path.
+
+## Usage
+
+```javascript
+import * as Sentry from "@sentry/react-native";
+import OpenAI from "openai";
+
+const openai = new OpenAI({
+ // Warning: API keys included in your app bundle will be visible to anyone who
+ // inspects the bundle. Proxy LLM calls through your own backend whenever possible.
+ apiKey: "your-api-key",
+});
+
+const client = Sentry.instrumentOpenAiClient(openai, {
+ recordInputs: true,
+ recordOutputs: true,
+});
+
+// Use the wrapped client instead of the original openai instance
+const response = await client.chat.completions.create({
+ model: "gpt-4o",
+ messages: [{ role: "user", content: "Hello!" }],
+});
+```
+
+Make sure tracing is enabled for the spans produced by this integration to be captured.
+
+## Configuration
+
+### Options
+
+The following options control what data is captured from OpenAI SDK calls:
+
+#### `recordInputs`
+
+_Type: `boolean` (optional)_
+
+Records inputs to OpenAI SDK calls (such as prompts and messages).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+#### `recordOutputs`
+
+_Type: `boolean` (optional)_
+
+Records outputs from OpenAI SDK calls (such as generated text and responses).
+
+Defaults to `true` if `sendDefaultPii` is `true`.
+
+## Supported Operations
+
+By default, tracing support is added to the following OpenAI SDK calls:
+
+- `chat.completions.create()` - Chat completion requests
+- `responses.create()` - Response SDK requests
+
+Streaming and non-streaming requests are automatically detected and handled appropriately.
+
+
+
+When using OpenAI's streaming API, you must also pass `stream_options: { include_usage: true }` to receive token usage data. Without this option, OpenAI does not include `prompt_tokens` or `completion_tokens` in streamed responses, and Sentry will be unable to capture `gen_ai.usage.input_tokens` / `gen_ai.usage.output_tokens` on the resulting span. This is an OpenAI API behavior, not a Sentry limitation. See [OpenAI docs on stream options](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options).
+
+
+
+## Supported Versions
+
+- `openai`: `>=4.0.0 <7`
diff --git a/includes/ai-agent-monitoring/manual-instrumentation.mdx b/includes/ai-agent-monitoring/manual-instrumentation.mdx
new file mode 100644
index 0000000000000..eb5d5005356d6
--- /dev/null
+++ b/includes/ai-agent-monitoring/manual-instrumentation.mdx
@@ -0,0 +1,373 @@
+## Using Integration Helpers
+
+
+
+
+
+For supported AI libraries, Sentry provides manual instrumentation helpers that simplify span creation. These helpers handle the complexity of creating properly structured spans with the correct attributes.
+
+**Supported libraries:**
+
+
+
+- OpenAI
+- Anthropic
+- Google Gen AI SDK
+- LangChain
+- LangGraph
+
+
+
+
+
+- OpenAI
+- Anthropic
+- Google Gen AI SDK
+- LangChain
+- LangGraph
+
+
+
+Each integration page includes a manual-instrumentation example with options like `recordInputs` and `recordOutputs`.
+
+
+
+
+```javascript
+import * as Sentry from "___SDK_PACKAGE___";
+import OpenAI from "openai";
+
+const client = Sentry.instrumentOpenAiClient(
+ new OpenAI({ apiKey: "...", dangerouslyAllowBrowser: true }),
+ {
+ recordInputs: true,
+ recordOutputs: true,
+ }
+);
+
+// All calls are now instrumented
+const response = await client.chat.completions.create({
+ model: "gpt-4o-mini",
+ messages: [{ role: "user", content: "Hello!" }],
+});
+```
+
+
+
+
+
+## Manual Span Creation
+
+If you're using a library that Sentry doesn't provide helpers for, you can manually create spans. For your data to show up in [AI Agents Insights](https://sentry.io/orgredirect/organizations/:orgslug/insights/ai/agents/), spans must have well-defined names and data attributes.
+
+### Invoke Agent Span
+
+
+
+
+
+This span represents the execution of an AI agent, capturing the full lifecycle from receiving a task to producing a final response.
+
+**Key attributes:**
+- `gen_ai.agent.name` — The agent's name (e.g., "Weather Agent")
+- `gen_ai.request.model` — The underlying model used
+- `gen_ai.response.text` — The agent's final output
+- `gen_ai.usage.input_tokens` / `output_tokens` — Total token counts
+
+
+
+
+```javascript
+// Example agent implementation for demonstration
+const myAgent = {
+ name: "Weather Agent",
+ modelProvider: "openai",
+ model: "gpt-4o-mini",
+ async run() {
+ // Agent implementation
+ return {
+ output: "The weather in Paris is sunny",
+ usage: {
+ inputTokens: 15,
+ outputTokens: 8,
+ },
+ };
+ },
+};
+
+Sentry.startSpan(
+ {
+ op: "gen_ai.invoke_agent",
+ name: `invoke_agent ${myAgent.name}`,
+ attributes: {
+ "gen_ai.operation.name": "invoke_agent",
+ "gen_ai.request.model": myAgent.model,
+ "gen_ai.agent.name": myAgent.name,
+ },
+ },
+ async (span) => {
+ // run the agent
+ const result = await myAgent.run();
+
+ // set agent response
+ span.setAttribute("gen_ai.response.text", JSON.stringify([result.output]));
+
+ // set token usage
+ span.setAttribute("gen_ai.usage.input_tokens", result.usage.inputTokens);
+ span.setAttribute("gen_ai.usage.output_tokens", result.usage.outputTokens);
+
+ return result;
+ }
+);
+```
+
+
+
+
+
+
+
+
+
+### AI Client Span
+
+
+
+
+
+This span represents a chat or completion request to an LLM, capturing the messages, model configuration, and response.
+
+**Key attributes:**
+- `gen_ai.request.model` — The model name (required)
+- `gen_ai.request.messages` — Chat messages sent to the LLM
+- `gen_ai.request.max_tokens` — Token limit for the response
+- `gen_ai.response.text` — The model's response
+
+
+
+
+```javascript
+// Example AI implementation for demonstration
+const myAi = {
+ modelProvider: "openai",
+ model: "gpt-4o-mini",
+ modelConfig: {
+ temperature: 0.1,
+ presencePenalty: 0.5,
+ },
+ async createMessage(messages, maxTokens) {
+ // AI implementation
+ return {
+ output: "Here's a joke: Why don't scientists trust atoms? Because they make up everything!",
+ usage: {
+ inputTokens: 12,
+ outputTokens: 24,
+ },
+ };
+ },
+};
+
+Sentry.startSpan(
+ {
+ op: "gen_ai.chat",
+ name: `chat ${myAi.model}`,
+ attributes: {
+ "gen_ai.operation.name": "chat",
+ "gen_ai.request.model": myAi.model,
+ },
+ },
+ async (span) => {
+ // set up messages for LLM
+ const maxTokens = 1024;
+ const messages = [{ role: "user", content: "Tell me a joke" }];
+
+ // set chat request data
+ span.setAttribute("gen_ai.request.messages", JSON.stringify(messages));
+ span.setAttribute("gen_ai.request.max_tokens", maxTokens);
+ span.setAttribute("gen_ai.request.temperature", myAi.modelConfig.temperature);
+
+ // ask the LLM
+ const result = await myAi.createMessage(messages, maxTokens);
+
+ // set response
+ span.setAttribute("gen_ai.response.text", JSON.stringify([result.output]));
+
+ // set token usage
+ span.setAttribute("gen_ai.usage.input_tokens", result.usage.inputTokens);
+ span.setAttribute("gen_ai.usage.output_tokens", result.usage.outputTokens);
+
+ return result;
+ }
+);
+```
+
+
+
+
+
+
+
+
+
+### Execute Tool Span
+
+
+
+
+
+This span represents the execution of a tool or function that was requested by an AI model, including the input arguments and resulting output.
+
+**Key attributes:**
+- `gen_ai.tool.name` — The tool's name (e.g., "random_number")
+- `gen_ai.tool.description` — Description of what the tool does
+- `gen_ai.tool.input` — The arguments passed to the tool
+- `gen_ai.tool.output` — The tool's return value
+
+
+
+
+```javascript
+// Example AI implementation for demonstration
+const myAi = {
+ modelProvider: "openai",
+ model: "gpt-4o-mini",
+ async createMessage(messages, maxTokens) {
+ // AI implementation that returns tool calls
+ return {
+ toolCalls: [
+ {
+ name: "random_number",
+ description: "Generate a random number",
+ arguments: { max: 10 },
+ },
+ ],
+ };
+ },
+};
+
+const messages = [{ role: "user", content: "Generate a random number between 0 and 10" }];
+
+// First, make the AI call
+const result = await Sentry.startSpan(
+ { op: "gen_ai.chat", name: `chat ${myAi.model}` },
+ () => myAi.createMessage(messages, 1024)
+);
+
+// Check if we should call a tool
+if (result.toolCalls && result.toolCalls.length > 0) {
+ const tool = result.toolCalls[0];
+
+ await Sentry.startSpan(
+ {
+ op: "gen_ai.execute_tool",
+ name: `execute_tool ${tool.name}`,
+ attributes: {
+ "gen_ai.request.model": myAi.model,
+ "gen_ai.tool.type": "function",
+ "gen_ai.tool.name": tool.name,
+ "gen_ai.tool.description": tool.description,
+ "gen_ai.tool.input": JSON.stringify(tool.arguments),
+ },
+ },
+ async (span) => {
+ // run tool (example implementation)
+ const toolResult = Math.floor(Math.random() * tool.arguments.max);
+
+ // set tool result
+ span.setAttribute("gen_ai.tool.output", String(toolResult));
+
+ return toolResult;
+ }
+ );
+}
+```
+
+
+
+
+
+
+
+
+
+### Handoff Span
+
+
+
+
+
+This span marks the transition of control from one agent to another, typically when the current agent determines another agent is better suited to handle the task.
+
+**Requirements:**
+- `op` must be `"gen_ai.handoff"`
+- `name` should follow the pattern `"handoff from {source} to {target}"`
+- All [Common Span Attributes](#common-span-attributes) should be set
+
+The handoff span itself has no body — it just marks the transition point before the target agent starts.
+
+
+
+
+```javascript
+// Example agent implementations for demonstration
+const myAgent = {
+ name: "Weather Agent",
+ modelProvider: "openai",
+ model: "gpt-4o-mini",
+ async run() {
+ // Agent implementation
+ return {
+ handoffTo: "Travel Agent",
+ output: "I need to handoff to the travel agent for booking recommendations",
+ };
+ },
+};
+
+const otherAgent = {
+ name: "Travel Agent",
+ modelProvider: "openai",
+ model: "gpt-4o-mini",
+ async run() {
+ // Other agent implementation
+ return { output: "Here are some travel recommendations..." };
+ },
+};
+
+// First agent execution
+const result = await Sentry.startSpan(
+ { op: "gen_ai.invoke_agent", name: `invoke_agent ${myAgent.name}` },
+ () => myAgent.run()
+);
+
+// Check if we should handoff to another agent
+if (result.handoffTo) {
+ // Create handoff span
+ await Sentry.startSpan(
+ {
+ op: "gen_ai.handoff",
+ name: `handoff from ${myAgent.name} to ${otherAgent.name}`,
+ attributes: {
+ "gen_ai.request.model": myAgent.model,
+ },
+ },
+ () => {
+ // the handoff span just marks the handoff
+ // no actual work is done here
+ }
+ );
+
+ // Execute the other agent
+ await Sentry.startSpan(
+ { op: "gen_ai.invoke_agent", name: `invoke_agent ${otherAgent.name}` },
+ () => otherAgent.run()
+ );
+}
+```
+
+
+
+
+
+## Common Span Attributes
+
+