Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: false,
transport: loggingTransport,
integrations: [
Sentry.openAIIntegration({
recordInputs: true,
recordOutputs: true,
enableTruncation: false,
}),
],
beforeSendTransaction: event => {
if (event.transaction.includes('/openai/')) {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

l: why drop here?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This drops the express http transaction afaik, so that we can use the testing framework's .expect({ transaction } and match the ai transaction instead.

The other scenarios also do this.

return null;
}
return event;
},
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json({ limit: '10mb' }));

app.post('/openai/chat/completions', (req, res) => {
res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: req.body.model,
choices: [
{
index: 0,
message: { role: 'assistant', content: 'Hello!' },
finish_reason: 'stop',
},
],
usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
});
});

app.post('/openai/responses', (req, res) => {
res.send({
id: 'resp_mock456',
object: 'response',
created_at: 1677652290,
model: req.body.model,
output: [
{
type: 'message',
id: 'msg_mock_output_1',
status: 'completed',
role: 'assistant',
content: [{ type: 'output_text', text: 'Response text', annotations: [] }],
},
],
output_text: 'Response text',
status: 'completed',
usage: { input_tokens: 5, output_tokens: 3, total_tokens: 8 },
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Chat completion with long content (would normally be truncated)
const longContent = 'A'.repeat(50_000);
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: longContent }],
});

// Responses API with long string input (would normally be truncated)
const longStringInput = 'B'.repeat(50_000);
await client.responses.create({
model: 'gpt-4',
input: longStringInput,
});
});

server.close();
}

run();
37 changes: 37 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/openai/test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,43 @@ describe('OpenAI integration', () => {
});
});

const longContent = 'A'.repeat(50_000);

const EXPECTED_TRANSACTION_NO_TRUNCATION = {
transaction: 'main',
spans: expect.arrayContaining([
// Chat completion with long content should not be truncated
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([{ role: 'user', content: longContent }]),
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
}),
}),
// Responses API long string input should not be truncated or wrapped in quotes
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'B'.repeat(50_000),
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
}),
}),
]),
};

createEsmAndCjsTests(
__dirname,
'scenario-no-truncation.mjs',
'instrument-no-truncation.mjs',
(createRunner, test) => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_NO_TRUNCATION })
.start()
.completed();
});
},
);

const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS = {
transaction: 'main',
spans: expect.arrayContaining([
Expand Down
11 changes: 11 additions & 0 deletions packages/core/src/tracing/ai/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,17 @@ export function endStreamSpan(span: Span, state: StreamResponseState, recordOutp
span.end();
}

/**
* Serialize a value to a JSON string without truncation.
* Strings are returned as-is, arrays and objects are JSON-stringified.
*/
export function getJsonString<T>(value: T | T[]): string {
if (typeof value === 'string') {
return value;
}
return JSON.stringify(value);
}

/**
* Get the truncated JSON string for a string or array of strings.
*
Expand Down
18 changes: 13 additions & 5 deletions packages/core/src/tracing/openai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import type { InstrumentedMethodEntry } from '../ai/utils';
import {
buildMethodPath,
extractSystemInstructions,
getJsonString,
getTruncatedJsonString,
resolveAIRecordingOptions,
wrapPromiseWithMethods,
Expand Down Expand Up @@ -78,7 +79,12 @@ function extractRequestAttributes(args: unknown[], operationName: string): Recor
}

// Extract and record AI request inputs, if present. This is intentionally separate from response attributes.
function addRequestAttributes(span: Span, params: Record<string, unknown>, operationName: string): void {
function addRequestAttributes(
span: Span,
params: Record<string, unknown>,
operationName: string,
enableTruncation: boolean,
): void {
// Store embeddings input on a separate attribute and do not truncate it
if (operationName === 'embeddings' && 'input' in params) {
const input = params.input;
Expand Down Expand Up @@ -119,8 +125,10 @@ function addRequestAttributes(span: Span, params: Record<string, unknown>, opera
span.setAttribute(GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, systemInstructions);
}

const truncatedInput = getTruncatedJsonString(filteredMessages);
span.setAttribute(GEN_AI_INPUT_MESSAGES_ATTRIBUTE, truncatedInput);
span.setAttribute(
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
enableTruncation ? getTruncatedJsonString(filteredMessages) : getJsonString(filteredMessages),
);

if (Array.isArray(filteredMessages)) {
span.setAttribute(GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, filteredMessages.length);
Expand Down Expand Up @@ -162,7 +170,7 @@ function instrumentMethod<T extends unknown[], R>(
originalResult = originalMethod.apply(context, args);

if (options.recordInputs && params) {
addRequestAttributes(span, params, operationName);
addRequestAttributes(span, params, operationName, options.enableTruncation ?? true);
}

// Return async processing
Expand Down Expand Up @@ -200,7 +208,7 @@ function instrumentMethod<T extends unknown[], R>(
originalResult = originalMethod.apply(context, args);

if (options.recordInputs && params) {
addRequestAttributes(span, params, operationName);
addRequestAttributes(span, params, operationName, options.enableTruncation ?? true);
}

return originalResult.then(
Expand Down
5 changes: 5 additions & 0 deletions packages/core/src/tracing/openai/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ export interface OpenAiOptions {
* Enable or disable output recording.
*/
recordOutputs?: boolean;
/**
* Enable or disable truncation of recorded input messages.
* Defaults to `true`.
*/
enableTruncation?: boolean;
}

export interface OpenAiClient {
Expand Down
Loading