Skip to content

Commit ed07836

Browse files
authored
feat(cloudflare,vercel-edge): Add support for OpenAI instrumentation (#17338)
Adds support for OpenAI manual instrumentation in `@sentry/cloudflare` and `@sentry/vercel-edge`. To instrument the OpenAI client, wrap it with `Sentry.instrumentOpenAiClient` and set recording settings. ```js import * as Sentry from '@sentry/cloudflare'; import OpenAI from 'openai'; const openai = new OpenAI(); const client = Sentry.instrumentOpenAiClient(openai, { recordInputs: true, recordOutputs: true }); // use the wrapped client ```
1 parent ce66380 commit ed07836

File tree

10 files changed

+161
-6
lines changed

10 files changed

+161
-6
lines changed

CHANGELOG.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,22 @@ Sentry.init({
2525

2626
Spans matching the filter criteria will not be recorded. Potential child spans of filtered spans will be re-parented, if possible.
2727

28+
- **feat(cloudflare,vercel-edge): Add support for OpenAI instrumentation ([#17338](https://github.com/getsentry/sentry-javascript/pull/17338))**
29+
30+
Adds support for OpenAI manual instrumentation in `@sentry/cloudflare` and `@sentry/vercel-edge`.
31+
32+
To instrument the OpenAI client, wrap it with `Sentry.instrumentOpenAiClient` and set recording settings.
33+
34+
```js
35+
import * as Sentry from '@sentry/cloudflare';
36+
import OpenAI from 'openai';
37+
38+
const openai = new OpenAI();
39+
const client = Sentry.instrumentOpenAiClient(openai, { recordInputs: true, recordOutputs: true });
40+
41+
// use the wrapped client
42+
```
43+
2844
## 10.1.0
2945

3046
- feat(nuxt): Align build-time options to follow bundler plugins structure ([#17255](https://github.com/getsentry/sentry-javascript/pull/17255))
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import * as Sentry from '@sentry/cloudflare';
2+
import { MockOpenAi } from './mocks';
3+
4+
interface Env {
5+
SENTRY_DSN: string;
6+
}
7+
8+
const mockClient = new MockOpenAi({
9+
apiKey: 'mock-api-key',
10+
});
11+
12+
const client = Sentry.instrumentOpenAiClient(mockClient);
13+
14+
export default Sentry.withSentry(
15+
(env: Env) => ({
16+
dsn: env.SENTRY_DSN,
17+
tracesSampleRate: 1.0,
18+
}),
19+
{
20+
async fetch(_request, _env, _ctx) {
21+
const response = await client.chat?.completions?.create({
22+
model: 'gpt-3.5-turbo',
23+
messages: [
24+
{ role: 'system', content: 'You are a helpful assistant.' },
25+
{ role: 'user', content: 'What is the capital of France?' },
26+
],
27+
temperature: 0.7,
28+
max_tokens: 100,
29+
});
30+
31+
return new Response(JSON.stringify(response));
32+
},
33+
},
34+
);
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import type { OpenAiClient } from '@sentry/core';
2+
3+
export class MockOpenAi implements OpenAiClient {
4+
public chat?: Record<string, unknown>;
5+
public apiKey: string;
6+
7+
public constructor(config: { apiKey: string }) {
8+
this.apiKey = config.apiKey;
9+
10+
this.chat = {
11+
completions: {
12+
create: async (...args: unknown[]) => {
13+
const params = args[0] as { model: string; stream?: boolean };
14+
// Simulate processing time
15+
await new Promise(resolve => setTimeout(resolve, 10));
16+
17+
if (params.model === 'error-model') {
18+
const error = new Error('Model not found');
19+
(error as unknown as { status: number }).status = 404;
20+
(error as unknown as { headers: Record<string, string> }).headers = { 'x-request-id': 'mock-request-123' };
21+
throw error;
22+
}
23+
24+
return {
25+
id: 'chatcmpl-mock123',
26+
object: 'chat.completion',
27+
created: 1677652288,
28+
model: params.model,
29+
system_fingerprint: 'fp_44709d6fcb',
30+
choices: [
31+
{
32+
index: 0,
33+
message: {
34+
role: 'assistant',
35+
content: 'Hello from OpenAI mock!',
36+
},
37+
finish_reason: 'stop',
38+
},
39+
],
40+
usage: {
41+
prompt_tokens: 10,
42+
completion_tokens: 15,
43+
total_tokens: 25,
44+
},
45+
};
46+
},
47+
},
48+
};
49+
}
50+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import { expect, it } from 'vitest';
2+
import { createRunner } from '../../../runner';
3+
4+
// These tests are not exhaustive because the instrumentation is
5+
// already tested in the node integration tests and we merely
6+
// want to test that the instrumentation does not break in our
7+
// cloudflare SDK.
8+
9+
it('traces a basic chat completion request', async () => {
10+
const runner = createRunner(__dirname)
11+
.ignore('event')
12+
.expect(envelope => {
13+
const transactionEvent = envelope[1]?.[0]?.[1];
14+
15+
expect(transactionEvent.transaction).toBe('GET /');
16+
expect(transactionEvent.spans).toEqual(
17+
expect.arrayContaining([
18+
expect.objectContaining({
19+
data: expect.objectContaining({
20+
'gen_ai.operation.name': 'chat',
21+
'sentry.op': 'gen_ai.chat',
22+
'gen_ai.system': 'openai',
23+
'gen_ai.request.model': 'gpt-3.5-turbo',
24+
'gen_ai.request.temperature': 0.7,
25+
'gen_ai.response.model': 'gpt-3.5-turbo',
26+
'gen_ai.response.id': 'chatcmpl-mock123',
27+
'gen_ai.usage.input_tokens': 10,
28+
'gen_ai.usage.output_tokens': 15,
29+
'gen_ai.usage.total_tokens': 25,
30+
'gen_ai.response.finish_reasons': '["stop"]',
31+
}),
32+
description: 'chat gpt-3.5-turbo',
33+
op: 'gen_ai.chat',
34+
origin: 'manual',
35+
}),
36+
]),
37+
);
38+
})
39+
.start();
40+
await runner.makeRequest('get', '/');
41+
await runner.completed();
42+
});
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"name": "worker-name",
3+
"compatibility_date": "2025-06-17",
4+
"main": "index.ts",
5+
"compatibility_flags": ["nodejs_compat"]
6+
}

dev-packages/cloudflare-integration-tests/vite.config.mts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,12 @@ export default defineConfig({
2222
// already run in their own processes. We use threads instead because the
2323
// overhead is significantly less.
2424
pool: 'threads',
25+
// Run tests sequentially to avoid port conflicts with wrangler dev processes
26+
poolOptions: {
27+
threads: {
28+
singleThread: true,
29+
},
30+
},
2531
reporters: process.env.DEBUG
2632
? ['default', { summary: false }]
2733
: process.env.GITHUB_ACTIONS

packages/cloudflare/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ export {
6969
functionToStringIntegration,
7070
// eslint-disable-next-line deprecation/deprecation
7171
inboundFiltersIntegration,
72+
instrumentOpenAiClient,
7273
eventFiltersIntegration,
7374
linkedErrorsIntegration,
7475
requestDataIntegration,

packages/core/src/utils/openai/index.ts

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ import type {
2424
ChatCompletionChunk,
2525
InstrumentedMethod,
2626
OpenAiChatCompletionObject,
27-
OpenAiClient,
2827
OpenAiIntegration,
2928
OpenAiOptions,
3029
OpenAiResponse,
@@ -294,7 +293,7 @@ function instrumentMethod<T extends unknown[], R>(
294293
/**
295294
* Create a deep proxy for OpenAI client instrumentation
296295
*/
297-
function createDeepProxy(target: object, currentPath = '', options?: OpenAiOptions): OpenAiClient {
296+
function createDeepProxy<T extends object>(target: T, currentPath = '', options?: OpenAiOptions): T {
298297
return new Proxy(target, {
299298
get(obj: object, prop: string): unknown {
300299
const value = (obj as Record<string, unknown>)[prop];
@@ -316,13 +315,13 @@ function createDeepProxy(target: object, currentPath = '', options?: OpenAiOptio
316315

317316
return value;
318317
},
319-
});
318+
}) as T;
320319
}
321320

322321
/**
323322
* Instrument an OpenAI client with Sentry tracing
324323
* Can be used across Node.js, Cloudflare Workers, and Vercel Edge
325324
*/
326-
export function instrumentOpenAiClient(client: OpenAiClient, options?: OpenAiOptions): OpenAiClient {
325+
export function instrumentOpenAiClient<T extends object>(client: T, options?: OpenAiOptions): T {
327326
return createDeepProxy(client, '', options);
328327
}

packages/core/src/utils/openai/types.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,11 @@ export type AttributeValue =
1515

1616
export interface OpenAiOptions {
1717
/**
18-
* Enable or disable input recording. Enabled if `sendDefaultPii` is `true`
18+
* Enable or disable input recording.
1919
*/
2020
recordInputs?: boolean;
2121
/**
22-
* Enable or disable output recording. Enabled if `sendDefaultPii` is `true`
22+
* Enable or disable output recording.
2323
*/
2424
recordOutputs?: boolean;
2525
}

packages/vercel-edge/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ export {
6969
functionToStringIntegration,
7070
// eslint-disable-next-line deprecation/deprecation
7171
inboundFiltersIntegration,
72+
instrumentOpenAiClient,
7273
eventFiltersIntegration,
7374
linkedErrorsIntegration,
7475
requestDataIntegration,

0 commit comments

Comments
 (0)