Skip to content

Commit e647ba1

Browse files
authored
fix(node): Add origin for OpenAI spans & test auto instrumentation (#17519)
We used to mock the OpenAI client and manually instrument this. This adds a test for actual auto-instrumentation of OpenAI, and adjusts the origin to be fixed.
1 parent 14eba56 commit e647ba1

File tree

7 files changed

+157
-41
lines changed

7 files changed

+157
-41
lines changed

dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ it('traces a basic chat completion request', async () => {
3131
}),
3232
description: 'chat gpt-3.5-turbo',
3333
op: 'gen_ai.chat',
34-
origin: 'manual',
34+
origin: 'auto.function.openai',
3535
}),
3636
]),
3737
);

dev-packages/node-integration-tests/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@
5858
"nock": "^13.5.5",
5959
"node-cron": "^3.0.3",
6060
"node-schedule": "^2.1.1",
61+
"openai": "5.18.1",
6162
"pg": "8.16.0",
6263
"postgres": "^3.4.7",
6364
"proxy": "^2.1.1",

dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ describe('OpenAI Tool Calls integration', () => {
6565
data: {
6666
'gen_ai.operation.name': 'chat',
6767
'sentry.op': 'gen_ai.chat',
68-
'sentry.origin': 'manual',
68+
'sentry.origin': 'auto.function.openai',
6969
'gen_ai.system': 'openai',
7070
'gen_ai.request.model': 'gpt-4',
7171
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
@@ -83,15 +83,15 @@ describe('OpenAI Tool Calls integration', () => {
8383
},
8484
description: 'chat gpt-4',
8585
op: 'gen_ai.chat',
86-
origin: 'manual',
86+
origin: 'auto.function.openai',
8787
status: 'ok',
8888
}),
8989
// Second span - chat completion with tools and streaming
9090
expect.objectContaining({
9191
data: {
9292
'gen_ai.operation.name': 'chat',
9393
'sentry.op': 'gen_ai.chat',
94-
'sentry.origin': 'manual',
94+
'sentry.origin': 'auto.function.openai',
9595
'gen_ai.system': 'openai',
9696
'gen_ai.request.model': 'gpt-4',
9797
'gen_ai.request.stream': true,
@@ -111,15 +111,15 @@ describe('OpenAI Tool Calls integration', () => {
111111
},
112112
description: 'chat gpt-4 stream-response',
113113
op: 'gen_ai.chat',
114-
origin: 'manual',
114+
origin: 'auto.function.openai',
115115
status: 'ok',
116116
}),
117117
// Third span - responses API with tools (non-streaming)
118118
expect.objectContaining({
119119
data: {
120120
'gen_ai.operation.name': 'responses',
121121
'sentry.op': 'gen_ai.responses',
122-
'sentry.origin': 'manual',
122+
'sentry.origin': 'auto.function.openai',
123123
'gen_ai.system': 'openai',
124124
'gen_ai.request.model': 'gpt-4',
125125
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
@@ -137,15 +137,15 @@ describe('OpenAI Tool Calls integration', () => {
137137
},
138138
description: 'responses gpt-4',
139139
op: 'gen_ai.responses',
140-
origin: 'manual',
140+
origin: 'auto.function.openai',
141141
status: 'ok',
142142
}),
143143
// Fourth span - responses API with tools and streaming
144144
expect.objectContaining({
145145
data: {
146146
'gen_ai.operation.name': 'responses',
147147
'sentry.op': 'gen_ai.responses',
148-
'sentry.origin': 'manual',
148+
'sentry.origin': 'auto.function.openai',
149149
'gen_ai.system': 'openai',
150150
'gen_ai.request.model': 'gpt-4',
151151
'gen_ai.request.stream': true,
@@ -165,7 +165,7 @@ describe('OpenAI Tool Calls integration', () => {
165165
},
166166
description: 'responses gpt-4 stream-response',
167167
op: 'gen_ai.responses',
168-
origin: 'manual',
168+
origin: 'auto.function.openai',
169169
status: 'ok',
170170
}),
171171
]),
@@ -179,7 +179,7 @@ describe('OpenAI Tool Calls integration', () => {
179179
data: {
180180
'gen_ai.operation.name': 'chat',
181181
'sentry.op': 'gen_ai.chat',
182-
'sentry.origin': 'manual',
182+
'sentry.origin': 'auto.function.openai',
183183
'gen_ai.system': 'openai',
184184
'gen_ai.request.model': 'gpt-4',
185185
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
@@ -200,15 +200,15 @@ describe('OpenAI Tool Calls integration', () => {
200200
},
201201
description: 'chat gpt-4',
202202
op: 'gen_ai.chat',
203-
origin: 'manual',
203+
origin: 'auto.function.openai',
204204
status: 'ok',
205205
}),
206206
// Second span - chat completion with tools and streaming with PII
207207
expect.objectContaining({
208208
data: {
209209
'gen_ai.operation.name': 'chat',
210210
'sentry.op': 'gen_ai.chat',
211-
'sentry.origin': 'manual',
211+
'sentry.origin': 'auto.function.openai',
212212
'gen_ai.system': 'openai',
213213
'gen_ai.request.model': 'gpt-4',
214214
'gen_ai.request.stream': true,
@@ -230,15 +230,15 @@ describe('OpenAI Tool Calls integration', () => {
230230
},
231231
description: 'chat gpt-4 stream-response',
232232
op: 'gen_ai.chat',
233-
origin: 'manual',
233+
origin: 'auto.function.openai',
234234
status: 'ok',
235235
}),
236236
// Third span - responses API with tools (non-streaming) with PII
237237
expect.objectContaining({
238238
data: {
239239
'gen_ai.operation.name': 'responses',
240240
'sentry.op': 'gen_ai.responses',
241-
'sentry.origin': 'manual',
241+
'sentry.origin': 'auto.function.openai',
242242
'gen_ai.system': 'openai',
243243
'gen_ai.request.model': 'gpt-4',
244244
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
@@ -258,15 +258,15 @@ describe('OpenAI Tool Calls integration', () => {
258258
},
259259
description: 'responses gpt-4',
260260
op: 'gen_ai.responses',
261-
origin: 'manual',
261+
origin: 'auto.function.openai',
262262
status: 'ok',
263263
}),
264264
// Fourth span - responses API with tools and streaming with PII
265265
expect.objectContaining({
266266
data: {
267267
'gen_ai.operation.name': 'responses',
268268
'sentry.op': 'gen_ai.responses',
269-
'sentry.origin': 'manual',
269+
'sentry.origin': 'auto.function.openai',
270270
'gen_ai.system': 'openai',
271271
'gen_ai.request.model': 'gpt-4',
272272
'gen_ai.request.stream': true,
@@ -288,7 +288,7 @@ describe('OpenAI Tool Calls integration', () => {
288288
},
289289
description: 'responses gpt-4 stream-response',
290290
op: 'gen_ai.responses',
291-
origin: 'manual',
291+
origin: 'auto.function.openai',
292292
status: 'ok',
293293
}),
294294
]),
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import express from 'express';
2+
import OpenAI from 'openai';
3+
4+
const PORT = 3333;
5+
6+
function startMockOpenAiServer() {
7+
const app = express();
8+
app.use(express.json());
9+
10+
app.post('/openai/chat/completions', (req, res) => {
11+
res.send({
12+
id: 'chatcmpl-mock123',
13+
object: 'chat.completion',
14+
created: 1677652288,
15+
model: req.body.model,
16+
system_fingerprint: 'fp_44709d6fcb',
17+
choices: [
18+
{
19+
index: 0,
20+
message: {
21+
role: 'assistant',
22+
content: 'Hello from OpenAI mock!',
23+
},
24+
finish_reason: 'stop',
25+
},
26+
],
27+
usage: {
28+
prompt_tokens: 10,
29+
completion_tokens: 15,
30+
total_tokens: 25,
31+
},
32+
});
33+
});
34+
return app.listen(PORT);
35+
}
36+
37+
async function run() {
38+
const server = startMockOpenAiServer();
39+
40+
const client = new OpenAI({
41+
baseURL: `http://localhost:${PORT}/openai`,
42+
apiKey: 'mock-api-key',
43+
});
44+
45+
const response = await client.chat.completions.create({
46+
model: 'gpt-3.5-turbo',
47+
messages: [
48+
{ role: 'system', content: 'You are a helpful assistant.' },
49+
{ role: 'user', content: 'What is the capital of France?' },
50+
],
51+
temperature: 0.7,
52+
max_tokens: 100,
53+
});
54+
55+
// eslint-disable-next-line no-console
56+
console.log(JSON.stringify(response));
57+
58+
server.close();
59+
}
60+
61+
run();

0 commit comments

Comments
 (0)