Skip to content

Commit 11221d6

Browse files
authored
fix(core): Fix truncation to only keep last message in vercel (#19080)
- no longer set deprecated `GEN_AI_PROMPT_ATTRIBUTE` - `convertPromptToMessages` did not handle inputs that were already in messages formats (fallback to `[]`), which was seemingly the culprit for truncation failing - set the original Vercel `AI_PROMPT_ATTRIBUTE` to the truncated messages format irrespective of the input as well, so that we get truncation for the original Vercel attribute as well (before we sent the all messages in the original namespace) - handle array format for prompt inputs as well Tests: - unit tests that `convertPromptToMessages` properly handles inputs that are already in messages format - truncation tests for Vercel ai Closes #19060
1 parent b9fbb9c commit 11221d6

File tree

6 files changed

+312
-55
lines changed

6 files changed

+312
-55
lines changed
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
import * as Sentry from '@sentry/node';
2+
import { generateText } from 'ai';
3+
import { MockLanguageModelV1 } from 'ai/test';
4+
5+
async function run() {
6+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
7+
const largeContent1 = 'A'.repeat(15000); // ~15KB
8+
const largeContent2 = 'B'.repeat(15000); // ~15KB
9+
const largeContent3 = 'C'.repeat(25000) + 'D'.repeat(25000); // ~50KB (will be truncated)
10+
11+
// Test 1: Messages array with large last message that gets truncated
12+
// Only the last message should be kept, and it should be truncated to only Cs
13+
await generateText({
14+
experimental_telemetry: { isEnabled: true },
15+
model: new MockLanguageModelV1({
16+
doGenerate: async () => ({
17+
rawCall: { rawPrompt: null, rawSettings: {} },
18+
finishReason: 'stop',
19+
usage: { promptTokens: 10, completionTokens: 5 },
20+
text: 'Response to truncated messages',
21+
}),
22+
}),
23+
messages: [
24+
{ role: 'user', content: largeContent1 },
25+
{ role: 'assistant', content: largeContent2 },
26+
{ role: 'user', content: largeContent3 },
27+
],
28+
});
29+
30+
// Test 2: Messages array where last message is small and kept intact
31+
const smallContent = 'This is a small message that fits within the limit';
32+
await generateText({
33+
experimental_telemetry: { isEnabled: true },
34+
model: new MockLanguageModelV1({
35+
doGenerate: async () => ({
36+
rawCall: { rawPrompt: null, rawSettings: {} },
37+
finishReason: 'stop',
38+
usage: { promptTokens: 10, completionTokens: 5 },
39+
text: 'Response to small message',
40+
}),
41+
}),
42+
messages: [
43+
{ role: 'user', content: largeContent1 },
44+
{ role: 'assistant', content: largeContent2 },
45+
{ role: 'user', content: smallContent },
46+
],
47+
});
48+
});
49+
}
50+
51+
run();

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 43 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import {
55
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
66
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
77
GEN_AI_OPERATION_NAME_ATTRIBUTE,
8-
GEN_AI_PROMPT_ATTRIBUTE,
98
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
109
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
1110
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
@@ -90,7 +89,6 @@ describe('Vercel AI integration', () => {
9089
// Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
9190
expect.objectContaining({
9291
data: {
93-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
9492
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
9593
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
9694
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
@@ -105,7 +103,7 @@ describe('Vercel AI integration', () => {
105103
'vercel.ai.model.provider': 'mock-provider',
106104
'vercel.ai.operationId': 'ai.generateText',
107105
'vercel.ai.pipeline.name': 'generateText',
108-
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
106+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
109107
'vercel.ai.response.finishReason': 'stop',
110108
'vercel.ai.settings.maxRetries': 2,
111109
'vercel.ai.settings.maxSteps': 1,
@@ -230,7 +228,6 @@ describe('Vercel AI integration', () => {
230228
// First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
231229
expect.objectContaining({
232230
data: {
233-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}',
234231
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
235232
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
236233
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
@@ -245,7 +242,7 @@ describe('Vercel AI integration', () => {
245242
'vercel.ai.model.provider': 'mock-provider',
246243
'vercel.ai.operationId': 'ai.generateText',
247244
'vercel.ai.pipeline.name': 'generateText',
248-
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
245+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
249246
'vercel.ai.response.finishReason': 'stop',
250247
'vercel.ai.settings.maxRetries': 2,
251248
'vercel.ai.settings.maxSteps': 1,
@@ -303,7 +300,6 @@ describe('Vercel AI integration', () => {
303300
// Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
304301
expect.objectContaining({
305302
data: {
306-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
307303
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
308304
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
309305
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
@@ -318,7 +314,7 @@ describe('Vercel AI integration', () => {
318314
'vercel.ai.model.provider': 'mock-provider',
319315
'vercel.ai.operationId': 'ai.generateText',
320316
'vercel.ai.pipeline.name': 'generateText',
321-
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
317+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
322318
'vercel.ai.response.finishReason': 'stop',
323319
'vercel.ai.settings.maxRetries': 2,
324320
'vercel.ai.settings.maxSteps': 1,
@@ -375,7 +371,6 @@ describe('Vercel AI integration', () => {
375371
// Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
376372
expect.objectContaining({
377373
data: {
378-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}',
379374
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
380375
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
381376
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
@@ -391,7 +386,7 @@ describe('Vercel AI integration', () => {
391386
'vercel.ai.model.provider': 'mock-provider',
392387
'vercel.ai.operationId': 'ai.generateText',
393388
'vercel.ai.pipeline.name': 'generateText',
394-
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
389+
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
395390
'vercel.ai.response.finishReason': 'tool-calls',
396391
'vercel.ai.settings.maxRetries': 2,
397392
'vercel.ai.settings.maxSteps': 1,
@@ -796,4 +791,43 @@ describe('Vercel AI integration', () => {
796791
});
797792
},
798793
);
794+
795+
createEsmAndCjsTests(
796+
__dirname,
797+
'scenario-message-truncation.mjs',
798+
'instrument-with-pii.mjs',
799+
(createRunner, test) => {
800+
test('truncates messages when they exceed byte limit', async () => {
801+
await createRunner()
802+
.ignore('event')
803+
.expect({
804+
transaction: {
805+
transaction: 'main',
806+
spans: expect.arrayContaining([
807+
// First call: Last message truncated (only C's remain, D's are cropped)
808+
expect.objectContaining({
809+
data: expect.objectContaining({
810+
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
811+
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/),
812+
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages',
813+
}),
814+
}),
815+
// Second call: Last message is small and kept intact
816+
expect.objectContaining({
817+
data: expect.objectContaining({
818+
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
819+
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining(
820+
'This is a small message that fits within the limit',
821+
),
822+
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to small message',
823+
}),
824+
}),
825+
]),
826+
},
827+
})
828+
.start()
829+
.completed();
830+
});
831+
},
832+
);
799833
});

dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import {
55
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
66
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
77
GEN_AI_OPERATION_NAME_ATTRIBUTE,
8-
GEN_AI_PROMPT_ATTRIBUTE,
98
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
109
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
1110
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
@@ -92,12 +91,11 @@ describe('Vercel AI integration (V5)', () => {
9291
'vercel.ai.model.provider': 'mock-provider',
9392
'vercel.ai.operationId': 'ai.generateText',
9493
'vercel.ai.pipeline.name': 'generateText',
95-
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
94+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
9695
'vercel.ai.response.finishReason': 'stop',
9796
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
9897
'vercel.ai.settings.maxRetries': 2,
9998
'vercel.ai.streaming': false,
100-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
10199
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
102100
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
103101
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
@@ -229,14 +227,13 @@ describe('Vercel AI integration (V5)', () => {
229227
'vercel.ai.model.provider': 'mock-provider',
230228
'vercel.ai.operationId': 'ai.generateText',
231229
'vercel.ai.pipeline.name': 'generateText',
232-
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
230+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
233231
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
234232
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
235233
'vercel.ai.response.finishReason': 'stop',
236234
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!',
237235
'vercel.ai.settings.maxRetries': 2,
238236
'vercel.ai.streaming': false,
239-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}',
240237
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
241238
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
242239
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -290,14 +287,13 @@ describe('Vercel AI integration (V5)', () => {
290287
'vercel.ai.model.provider': 'mock-provider',
291288
'vercel.ai.operationId': 'ai.generateText',
292289
'vercel.ai.pipeline.name': 'generateText',
293-
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
290+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
294291
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
295292
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
296293
'vercel.ai.response.finishReason': 'stop',
297294
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
298295
'vercel.ai.settings.maxRetries': 2,
299296
'vercel.ai.streaming': false,
300-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
301297
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
302298
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
303299
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -350,14 +346,13 @@ describe('Vercel AI integration (V5)', () => {
350346
'vercel.ai.model.provider': 'mock-provider',
351347
'vercel.ai.operationId': 'ai.generateText',
352348
'vercel.ai.pipeline.name': 'generateText',
353-
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
349+
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
354350
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
355351
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
356352
'vercel.ai.response.finishReason': 'tool-calls',
357353
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
358354
'vercel.ai.settings.maxRetries': 2,
359355
'vercel.ai.streaming': false,
360-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}',
361356
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
362357
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
363358
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,

dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ import { afterAll, describe, expect } from 'vitest';
44
import {
55
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
66
GEN_AI_OPERATION_NAME_ATTRIBUTE,
7-
GEN_AI_PROMPT_ATTRIBUTE,
87
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
98
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
109
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
@@ -93,13 +92,12 @@ describe('Vercel AI integration (V6)', () => {
9392
'vercel.ai.model.provider': 'mock-provider',
9493
'vercel.ai.operationId': 'ai.generateText',
9594
'vercel.ai.pipeline.name': 'generateText',
96-
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
95+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
9796
'vercel.ai.request.headers.user-agent': expect.any(String),
9897
'vercel.ai.response.finishReason': 'stop',
9998
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
10099
'vercel.ai.settings.maxRetries': 2,
101100
'vercel.ai.streaming': false,
102-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
103101
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
104102
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
105103
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
@@ -232,14 +230,13 @@ describe('Vercel AI integration (V6)', () => {
232230
'vercel.ai.model.provider': 'mock-provider',
233231
'vercel.ai.operationId': 'ai.generateText',
234232
'vercel.ai.pipeline.name': 'generateText',
235-
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
233+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
236234
'vercel.ai.request.headers.user-agent': expect.any(String),
237235
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
238236
'vercel.ai.response.finishReason': 'stop',
239237
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!',
240238
'vercel.ai.settings.maxRetries': 2,
241239
'vercel.ai.streaming': false,
242-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}',
243240
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
244241
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
245242
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -293,14 +290,13 @@ describe('Vercel AI integration (V6)', () => {
293290
'vercel.ai.model.provider': 'mock-provider',
294291
'vercel.ai.operationId': 'ai.generateText',
295292
'vercel.ai.pipeline.name': 'generateText',
296-
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
293+
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
297294
'vercel.ai.request.headers.user-agent': expect.any(String),
298295
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
299296
'vercel.ai.response.finishReason': 'stop',
300297
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
301298
'vercel.ai.settings.maxRetries': 2,
302299
'vercel.ai.streaming': false,
303-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}',
304300
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
305301
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
306302
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -353,14 +349,13 @@ describe('Vercel AI integration (V6)', () => {
353349
'vercel.ai.model.provider': 'mock-provider',
354350
'vercel.ai.operationId': 'ai.generateText',
355351
'vercel.ai.pipeline.name': 'generateText',
356-
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
352+
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
357353
'vercel.ai.request.headers.user-agent': expect.any(String),
358354
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
359355
'vercel.ai.response.finishReason': 'tool-calls',
360356
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
361357
'vercel.ai.settings.maxRetries': 2,
362358
'vercel.ai.streaming': false,
363-
[GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}',
364359
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
365360
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
366361
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,

0 commit comments

Comments
 (0)