Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion dev-packages/e2e-tests/test-applications/deno/deno.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
"imports": {
"@sentry/deno": "npm:@sentry/deno",
"@sentry/core": "npm:@sentry/core",
"@opentelemetry/api": "npm:@opentelemetry/api@^1.9.0"
"@opentelemetry/api": "npm:@opentelemetry/api@^1.9.0",
"ai": "npm:ai@^3.0.0",
"ai/test": "npm:ai@^3.0.0/test",
"zod": "npm:zod@^3.22.4"
},
"nodeModulesDir": "manual"
}
4 changes: 3 additions & 1 deletion dev-packages/e2e-tests/test-applications/deno/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
},
"dependencies": {
"@sentry/deno": "latest || *",
"@opentelemetry/api": "^1.9.0"
"@opentelemetry/api": "^1.9.0",
"ai": "^3.0.0",
"zod": "^3.22.4"
},
"devDependencies": {
"@playwright/test": "~1.56.0",
Expand Down
220 changes: 219 additions & 1 deletion dev-packages/e2e-tests/test-applications/deno/src/app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,23 @@ trace.setGlobalTracerProvider(fakeProvider as any);

// Sentry.init() must call trace.disable() to clear the fake provider above
import * as Sentry from '@sentry/deno';
import { generateText } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';
import { z } from 'zod';

Sentry.init({
environment: 'qa',
dsn: Deno.env.get('E2E_TEST_DSN'),
debug: !!Deno.env.get('DEBUG'),
tunnel: 'http://localhost:3031/',
tracesSampleRate: 1,
sendDefaultPii: true,
enableLogs: true,
});

const port = 3030;

Deno.serve({ port }, (req: Request) => {
Deno.serve({ port }, async (req: Request) => {
const url = new URL(req.url);

if (url.pathname === '/test-success') {
Expand Down Expand Up @@ -84,6 +89,219 @@ Deno.serve({ port }, (req: Request) => {
});
}

// Test breadcrumbs: add a breadcrumb then capture an error
if (url.pathname === '/test-breadcrumb') {
Sentry.addBreadcrumb({
message: 'test-breadcrumb',
category: 'custom',
level: 'info',
});
const exceptionId = Sentry.captureException(new Error('breadcrumb-test'));
return new Response(JSON.stringify({ exceptionId }), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test context: set user, tag, extra then capture an error
if (url.pathname === '/test-context') {
Sentry.setUser({ id: '123', email: 'test@sentry.io' });
Sentry.setTag('deno-runtime', 'true');
Sentry.setExtra('detail', { key: 'value' });
const exceptionId = Sentry.captureException(new Error('context-test'));
return new Response(JSON.stringify({ exceptionId }), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test scope isolation: tags inside withScope do not leak
if (url.pathname === '/test-scope-isolation') {
let insideId: string | undefined;
let outsideId: string | undefined;

Sentry.withScope(scope => {
scope.setTag('isolated', 'yes');
insideId = Sentry.captureException(new Error('inside-scope'));
});

outsideId = Sentry.captureException(new Error('outside-scope'));

return new Response(JSON.stringify({ insideId, outsideId }), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test outbound fetch instrumentation
if (url.pathname === '/test-outgoing-fetch') {
const response = await Sentry.startSpan({ name: 'test-outgoing-fetch' }, async () => {
const res = await fetch('http://localhost:3030/test-success');
return res.json();
});
return new Response(JSON.stringify(response), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test AI: Vercel AI SDK generateText with mock model
if (url.pathname === '/test-ai') {
const results = await Sentry.startSpan({ op: 'function', name: 'ai-test' }, async () => {
// First call - telemetry enabled by default
const result1 = await generateText({
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'First span here!',
}),
}),
prompt: 'Where is the first span?',
});

// Second call - explicitly enabled telemetry
const result2 = await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Second span here!',
}),
}),
prompt: 'Where is the second span?',
});

// Third call - with tool calls
const result3 = await generateText({
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'tool-calls',
usage: { promptTokens: 15, completionTokens: 25 },
text: 'Tool call completed!',
toolCalls: [
{
toolCallType: 'function',
toolCallId: 'call-1',
toolName: 'getWeather',
args: '{ "location": "San Francisco" }',
},
],
}),
}),
tools: {
getWeather: {
parameters: z.object({ location: z.string() }),
execute: async (args: { location: string }) => {
return `Weather in ${args.location}: Sunny, 72°F`;
},
},
},
prompt: 'What is the weather in San Francisco?',
});

// Fourth call - explicitly disabled telemetry, should not be captured
const result4 = await generateText({
experimental_telemetry: { isEnabled: false },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Should not be captured!',
}),
}),
prompt: 'Where is the disabled span?',
});

return {
result1: result1.text,
result2: result2.text,
result3: result3.text,
result4: result4.text,
};
});

return new Response(JSON.stringify(results), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test AI error: tool call that throws
if (url.pathname === '/test-ai-error') {
try {
await Sentry.startSpan({ op: 'function', name: 'ai-error-test' }, async () => {
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'tool-calls',
usage: { promptTokens: 15, completionTokens: 25 },
text: 'Tool call completed!',
toolCalls: [
{
toolCallType: 'function',
toolCallId: 'call-1',
toolName: 'getWeather',
args: '{ "location": "San Francisco" }',
},
],
}),
}),
tools: {
getWeather: {
parameters: z.object({ location: z.string() }),
execute: async (_args: { location: string }) => {
throw new Error('Tool call failed');
},
},
},
prompt: 'What is the weather in San Francisco?',
});
});
} catch (e) {
Sentry.captureException(e);
}

return new Response(JSON.stringify({ status: 'error-handled' }), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test metrics: emit counter, distribution, and gauge
if (url.pathname === '/test-metrics') {
Sentry.metrics.count('test.deno.count', 1, {
attributes: {
endpoint: '/test-metrics',
'random.attribute': 'Apples',
},
});
Sentry.metrics.distribution('test.deno.distribution', 100, {
attributes: {
endpoint: '/test-metrics',
'random.attribute': 'Bananas',
},
});
Sentry.metrics.gauge('test.deno.gauge', 200, {
attributes: {
endpoint: '/test-metrics',
'random.attribute': 'Cherries',
},
});
return new Response(JSON.stringify({ status: 'ok' }), {
headers: { 'Content-Type': 'application/json' },
});
}

// Test logs: emit a debug log via Sentry.logger
if (url.pathname === '/test-log') {
Sentry.logger.debug('Accessed /test-log route');
return new Response(JSON.stringify({ message: 'Log sent' }), {
headers: { 'Content-Type': 'application/json' },
});
}

return new Response('Not found', { status: 404 });
});

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import { expect, test } from '@playwright/test';
import { waitForTransaction, waitForError } from '@sentry-internal/test-utils';

test('should link AI errors to the correct trace', async ({ baseURL }) => {
const aiTransactionPromise = waitForTransaction('deno', event => {
return event?.spans?.some(span => span.description === 'ai-error-test') ?? false;
});

const errorEventPromise = waitForError('deno', event => {
return event.exception?.values?.[0]?.value?.includes('Tool call failed') ?? false;
});

await fetch(`${baseURL}/test-ai-error`);

const aiTransaction = await aiTransactionPromise;
const errorEvent = await errorEventPromise;

expect(aiTransaction).toBeDefined();

const spans = aiTransaction.spans || [];

// The parent span wrapping the AI call should exist
expect(spans).toEqual(
expect.arrayContaining([
expect.objectContaining({
description: 'ai-error-test',
op: 'function',
}),
]),
);

expect(errorEvent).toBeDefined();

// Verify error is linked to the same trace as the transaction
expect(errorEvent?.contexts?.trace?.trace_id).toBe(aiTransaction.contexts?.trace?.trace_id);
});
48 changes: 48 additions & 0 deletions dev-packages/e2e-tests/test-applications/deno/tests/ai.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import { expect, test } from '@playwright/test';
import { waitForTransaction } from '@sentry-internal/test-utils';

test('should create AI pipeline spans with Vercel AI SDK', async ({ baseURL }) => {
const aiTransactionPromise = waitForTransaction('deno', event => {
return event?.spans?.some(span => span.description === 'ai-test') ?? false;
});

await fetch(`${baseURL}/test-ai`);

const aiTransaction = await aiTransactionPromise;

expect(aiTransaction).toBeDefined();

const spans = aiTransaction.spans || [];

// The parent span wrapping all AI calls should exist
expect(spans).toEqual(
expect.arrayContaining([
expect.objectContaining({
description: 'ai-test',
op: 'function',
}),
]),
);

// Vercel AI SDK emits OTel spans for generateText calls.
// Due to the AI SDK monkey-patching limitation (https://github.com/vercel/ai/pull/6716),
// only explicitly opted-in calls produce telemetry spans.
// The explicitly enabled call (experimental_telemetry: { isEnabled: true }) should produce spans.
const aiSpans = spans.filter(
(span: any) =>
span.op === 'gen_ai.invoke_agent' ||
span.op === 'gen_ai.generate_text' ||
span.op === 'otel.span' ||
span.description?.includes('ai.generateText'),
);

// We expect at least one AI-related span from the explicitly enabled call
expect(aiSpans.length).toBeGreaterThanOrEqual(1);

// Verify the disabled call was not captured
const promptsInSpans = spans
.map((span: any) => span.data?.['vercel.ai.prompt'])
.filter((prompt: unknown): prompt is string => prompt !== undefined);
const hasDisabledPrompt = promptsInSpans.some((prompt: string) => prompt.includes('Where is the disabled span?'));
expect(hasDisabledPrompt).toBe(false);
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import { expect, test } from '@playwright/test';
import { waitForError } from '@sentry-internal/test-utils';

test('Sends error event with breadcrumbs', async ({ baseURL }) => {
const errorEventPromise = waitForError('deno', event => {
return !event.type && event.exception?.values?.[0]?.value === 'breadcrumb-test';
});

await fetch(`${baseURL}/test-breadcrumb`);

const errorEvent = await errorEventPromise;

expect(errorEvent.exception?.values).toHaveLength(1);
expect(errorEvent.exception?.values?.[0]?.value).toBe('breadcrumb-test');

expect(errorEvent.breadcrumbs).toEqual(
expect.arrayContaining([
expect.objectContaining({
message: 'test-breadcrumb',
category: 'custom',
level: 'info',
}),
]),
);
});
Loading
Loading