diff --git a/CHANGELOG.md b/CHANGELOG.md index 56351c03..f43a707b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,8 +33,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **`BaggageBuilder.sourceMetadataName()` renamed to `BaggageBuilder.channelName()`** — Fluent setter for the channel name baggage value. - **`BaggageBuilder.sourceMetadataDescription()` renamed to `BaggageBuilder.channelLink()`** — Fluent setter for the channel link baggage value. +- **`OutputResponse.messages` type changed from `string[]` to `OutputMessages`** — The `OutputMessages` union type (`string[] | OutputMessage[]`) allows passing structured OTEL gen-ai `OutputMessage` objects with `finish_reason`, multi-modal parts, etc. Existing code passing `string[]` continues to work (auto-converted to OTEL format internally). +- **`recordInputMessages()` / `recordOutputMessages()` parameter type widened** — Methods now accept `InputMessages` (`string[] | ChatMessage[]`) and `OutputMessages` (`string[] | OutputMessage[]`). Plain `string[]` input is auto-wrapped to OTEL gen-ai format. These methods are no longer available on `ExecuteToolScope`. + ### Added (`@microsoft/agents-a365-observability`) +- **OTEL Gen-AI Message Format types** — New types aligned with [OpenTelemetry Gen-AI semantic conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/): `MessageRole`, `FinishReason`, `Modality`, `ChatMessage`, `OutputMessage`, `InputMessages`, `OutputMessages`, and discriminated `MessagePart` union (`TextPart`, `ToolCallRequestPart`, `ToolCallResponsePart`, `ReasoningPart`, `BlobPart`, `FilePart`, `UriPart`, `ServerToolCallPart`, `ServerToolCallResponsePart`, `GenericPart`). - **`SpanDetails`** — New interface grouping `parentContext`, `startTime`, `endTime`, `spanKind` for scope construction. - **`CallerDetails`** — New interface wrapping `userDetails` and `callerAgentDetails` for `InvokeAgentScope`. - **`Request`** — Unified request context interface (`conversationId`, `channel`, `content`, `sessionId`) used across all scopes. diff --git a/packages/agents-a365-observability/docs/design.md b/packages/agents-a365-observability/docs/design.md index 19663470..fd5e4583 100644 --- a/packages/agents-a365-observability/docs/design.md +++ b/packages/agents-a365-observability/docs/design.md @@ -211,6 +211,103 @@ using scope = ExecuteToolScope.start( scope.recordResponse('Tool result'); ``` +#### OutputScope ([OutputScope.ts](../src/tracing/scopes/OutputScope.ts)) + +Traces outgoing agent output messages: + +```typescript +import { OutputScope, OutputResponse } from '@microsoft/agents-a365-observability'; + +const response: OutputResponse = { messages: ['Hello!', 'How can I help?'] }; + +using scope = OutputScope.start( + { conversationId: 'conv-123', channel: { name: 'Teams' } }, + response, + agentDetails // Must include tenantId +); + +scope.recordOutputMessages(['Additional response']); +// Messages are flushed to the span attribute on dispose +``` + +### Message Format (OTEL Gen-AI Semantic Conventions) + +The SDK uses [OpenTelemetry Gen-AI semantic conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/) for message tracing. All `recordInputMessages`/`recordOutputMessages` methods accept both plain strings and structured OTEL message objects. + +#### Message Types ([contracts.ts](../src/tracing/contracts.ts)) + +| Type | Description | +|------|-------------| +| `ChatMessage` | Input message with `role`, `parts[]`, and optional `name` | +| `OutputMessage` | Output message extending `ChatMessage` with `finish_reason` | +| `InputMessages` | Union: `string[] \| ChatMessage[]` | +| `OutputMessages` | Union: `string[] \| OutputMessage[]` | +| `MessageRole` | Enum: `system`, `user`, `assistant`, `tool` | +| `FinishReason` | Enum: `stop`, `length`, `content_filter`, `tool_call`, `error` | +| `MessagePart` | Discriminated union of all content part types | + +#### Message Part Types + +| Part Type | `type` Discriminator | Purpose | +|-----------|---------------------|---------| +| `TextPart` | `text` | Plain text content | +| `ToolCallRequestPart` | `tool_call` | Tool invocation by the model | +| `ToolCallResponsePart` | `tool_call_response` | Tool execution result | +| `ReasoningPart` | `reasoning` | Chain-of-thought / reasoning content | +| `BlobPart` | `blob` | Inline base64 binary data (image, audio, video) | +| `FilePart` | `file` | Reference to a pre-uploaded file | +| `UriPart` | `uri` | External URI reference | +| `ServerToolCallPart` | `server_tool_call` | Server-side tool invocation | +| `ServerToolCallResponsePart` | `server_tool_call_response` | Server-side tool response | +| `GenericPart` | *(custom)* | Extensible part for future types | + +#### Auto-Wrapping Behavior + +Plain `string[]` input is automatically wrapped to OTEL format: +- Input strings become `ChatMessage` with `role: 'user'` and a single `TextPart` +- Output strings become `OutputMessage` with `role: 'assistant'` and a single `TextPart` + +#### Structured Message Example + +```typescript +import { ChatMessage, OutputMessage, MessageRole, FinishReason } from '@microsoft/agents-a365-observability'; + +// Structured input with system prompt and user message +const input: ChatMessage[] = [ + { role: MessageRole.SYSTEM, parts: [{ type: 'text', content: 'You are a helpful assistant.' }] }, + { role: MessageRole.USER, parts: [{ type: 'text', content: 'What is the weather?' }] } +]; +scope.recordInputMessages(input); + +// Structured output with tool call and finish reason +const output: OutputMessage[] = [{ + role: MessageRole.ASSISTANT, + parts: [ + { type: 'text', content: 'Let me check that for you.' }, + { type: 'tool_call', name: 'get_weather', id: 'call_1', arguments: { city: 'Seattle' } } + ], + finish_reason: FinishReason.TOOL_CALL +}]; +scope.recordOutputMessages(output); +``` + +#### Message Serialization and Truncation ([message-utils.ts](../src/tracing/message-utils.ts)) + +Messages are serialized to JSON and stored as span attributes. When the serialized output exceeds `MAX_ATTRIBUTE_LENGTH` (8192 chars), a binary-search algorithm finds the maximum number of leading messages that fit, appending a sentinel message indicating how many were dropped. Single messages exceeding the limit fall back to string truncation. + +#### Scope Visibility + +`recordInputMessages`/`recordOutputMessages` are `protected` on the base `OpenTelemetryScope` class and exposed as `public` only on scopes where they are semantically appropriate: + +| Scope | `recordInputMessages` | `recordOutputMessages` | +|-------|----------------------|----------------------| +| `InvokeAgentScope` | public | public | +| `InferenceScope` | public | public | +| `OutputScope` | — | public (accumulating) | +| `ExecuteToolScope` | — | — | + +`ExecuteToolScope` records tool input/output via `ToolCallDetails.arguments` and `recordResponse()` instead. + ### BaggageBuilder ([BaggageBuilder.ts](../src/tracing/middleware/BaggageBuilder.ts)) Fluent API for setting OpenTelemetry baggage: @@ -387,12 +484,14 @@ src/ ├── ObservabilityBuilder.ts # Configuration builder ├── tracing/ │ ├── constants.ts # OpenTelemetry attribute keys -│ ├── contracts.ts # Data interfaces and enums +│ ├── contracts.ts # Data interfaces, enums, OTEL message types +│ ├── message-utils.ts # Message conversion and serialization │ ├── scopes/ │ │ ├── OpenTelemetryScope.ts # Base scope class │ │ ├── InvokeAgentScope.ts # Agent invocation tracing │ │ ├── InferenceScope.ts # LLM inference tracing -│ │ └── ExecuteToolScope.ts # Tool execution tracing +│ │ ├── ExecuteToolScope.ts # Tool execution tracing +│ │ └── OutputScope.ts # Output message tracing │ ├── middleware/ │ │ └── BaggageBuilder.ts # Baggage context builder │ ├── processors/ diff --git a/packages/agents-a365-observability/src/index.ts b/packages/agents-a365-observability/src/index.ts index 4366cfa8..a2a942ca 100644 --- a/packages/agents-a365-observability/src/index.ts +++ b/packages/agents-a365-observability/src/index.ts @@ -39,6 +39,7 @@ export { InvokeAgentScopeDetails, UserDetails, CallerDetails, + // eslint-disable-next-line @typescript-eslint/no-deprecated -- intentional re-export for backward compatibility EnhancedAgentDetails, ServiceEndpoint, InferenceDetails, @@ -46,6 +47,25 @@ export { InferenceResponse, OutputResponse, SpanDetails, + // OTEL gen-ai message format types + MessageRole, + FinishReason, + Modality, + TextPart, + ToolCallRequestPart, + ToolCallResponsePart, + ReasoningPart, + BlobPart, + FilePart, + UriPart, + ServerToolCallPart, + ServerToolCallResponsePart, + GenericPart, + MessagePart, + ChatMessage, + OutputMessage, + InputMessages, + OutputMessages, } from './tracing/contracts'; // Scopes diff --git a/packages/agents-a365-observability/src/tracing/contracts.ts b/packages/agents-a365-observability/src/tracing/contracts.ts index e58b335e..c37476bf 100644 --- a/packages/agents-a365-observability/src/tracing/contracts.ts +++ b/packages/agents-a365-observability/src/tracing/contracts.ts @@ -21,7 +21,6 @@ export enum ExecutionType { Unknown = 'Unknown' } - /** * Represents different roles that can invoke an agent */ @@ -43,12 +42,11 @@ export enum InvocationRole { * Represents different operation for types for model inference */ export enum InferenceOperationType { - CHAT = 'Chat', - TEXT_COMPLETION = 'TextCompletion', - GENERATE_CONTENT = 'GenerateContent' + CHAT = 'Chat', + TEXT_COMPLETION = 'TextCompletion', + GENERATE_CONTENT = 'GenerateContent' } - /** * Represents channel for an invocation */ @@ -192,7 +190,7 @@ export interface CallerDetails { callerAgentDetails?: AgentDetails; } -/* +/** * @deprecated Use AgentDetails. EnhancedAgentDetails is now an alias of AgentDetails. */ export type EnhancedAgentDetails = AgentDetails; @@ -209,7 +207,6 @@ export interface ServiceEndpoint { /** The protocol (e.g., http, https) */ protocol?: string; - } /** @@ -267,16 +264,16 @@ export interface InferenceResponse { /** Number of output tokens generated */ outputTokens?: number; - } /** * Represents a response containing output messages from an agent. * Used with OutputScope for output message tracing. + * Accepts plain strings or structured OTEL OutputMessage objects. */ export interface OutputResponse { /** The output messages from the agent */ - messages: string[]; + messages: OutputMessages; } /** @@ -303,3 +300,149 @@ export interface SpanDetails { spanLinks?: Link[]; } +// --------------------------------------------------------------------------- +// OpenTelemetry Semantic Convention – Gen-AI Message Format +// https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-input-messages.json +// https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-output-messages.json +// --------------------------------------------------------------------------- + +/** + * Role of a message participant per OTEL gen-ai semantic conventions. + */ +export enum MessageRole { + SYSTEM = 'system', + USER = 'user', + ASSISTANT = 'assistant', + TOOL = 'tool' +} + +/** + * Reason a model stopped generating per OTEL gen-ai semantic conventions. + */ +export enum FinishReason { + STOP = 'stop', + LENGTH = 'length', + CONTENT_FILTER = 'content_filter', + TOOL_CALL = 'tool_call', + ERROR = 'error' +} + +/** + * Media modality for blob, file, and URI parts. + */ +export enum Modality { + IMAGE = 'image', + VIDEO = 'video', + AUDIO = 'audio' +} + +// ---- Message part types (discriminated union on `type`) -------------------- + +/** Plain text content. */ +export interface TextPart { + type: 'text'; + content: string; +} + +/** A tool call requested by the model. */ +export interface ToolCallRequestPart { + type: 'tool_call'; + name: string; + id?: string; + arguments?: unknown; +} + +/** Result of a tool call. */ +export interface ToolCallResponsePart { + type: 'tool_call_response'; + id?: string; + response?: unknown; +} + +/** Model reasoning / chain-of-thought content. */ +export interface ReasoningPart { + type: 'reasoning'; + content: string; +} + +/** Inline binary data (base64-encoded). */ +export interface BlobPart { + type: 'blob'; + modality: Modality | string; + mime_type?: string; + content: string; +} + +/** Reference to a pre-uploaded file. */ +export interface FilePart { + type: 'file'; + modality: Modality | string; + mime_type?: string; + file_id: string; +} + +/** External URI reference. */ +export interface UriPart { + type: 'uri'; + modality: Modality | string; + mime_type?: string; + uri: string; +} + +/** Server-side tool invocation. */ +export interface ServerToolCallPart { + type: 'server_tool_call'; + name: string; + id?: string; + server_tool_call: Record; +} + +/** Server-side tool response. */ +export interface ServerToolCallResponsePart { + type: 'server_tool_call_response'; + id?: string; + server_tool_call_response: Record; +} + +/** Extensible part for custom / future types. */ +export interface GenericPart { + type: string; + [key: string]: unknown; +} + +/** + * Union of all message part types per OTEL gen-ai semantic conventions. + */ +export type MessagePart = + | TextPart + | ToolCallRequestPart + | ToolCallResponsePart + | ReasoningPart + | BlobPart + | FilePart + | UriPart + | ServerToolCallPart + | ServerToolCallResponsePart + | GenericPart; + +/** + * An input message sent to a model (OTEL gen-ai semantic conventions). + */ +export interface ChatMessage { + role: MessageRole | string; + parts: MessagePart[]; + name?: string; +} + +/** + * An output message produced by a model (OTEL gen-ai semantic conventions). + */ +export interface OutputMessage extends ChatMessage { + finish_reason?: FinishReason | string; +} + +/** Accepted input for `recordInputMessages`. */ +export type InputMessages = string[] | ChatMessage[]; + +/** Accepted input for `recordOutputMessages`. */ +export type OutputMessages = string[] | OutputMessage[]; diff --git a/packages/agents-a365-observability/src/tracing/message-utils.ts b/packages/agents-a365-observability/src/tracing/message-utils.ts new file mode 100644 index 00000000..b6323eb9 --- /dev/null +++ b/packages/agents-a365-observability/src/tracing/message-utils.ts @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + ChatMessage, + OutputMessage, + MessageRole, + InputMessages, + OutputMessages +} from './contracts'; +import { MAX_ATTRIBUTE_LENGTH, truncateValue } from './util'; + +/** + * Type guard that returns `true` when the array contains plain strings + * rather than structured OTEL message objects. + * Note: empty arrays are treated as string[] (both paths produce the same `[]` result). + */ +export function isStringArray(arr: InputMessages | OutputMessages): arr is string[] { + return arr.length === 0 || typeof arr[0] === 'string'; +} + +/** + * Converts an array of plain strings into OTEL {@link ChatMessage} objects + * with role `user` and a single `TextPart`. + */ +export function toInputMessages(messages: string[]): ChatMessage[] { + return messages.map((content) => ({ + role: MessageRole.USER, + parts: [{ type: 'text' as const, content }] + })); +} + +/** + * Converts an array of plain strings into OTEL {@link OutputMessage} objects + * with role `assistant` and a single `TextPart`. + */ +export function toOutputMessages(messages: string[]): OutputMessage[] { + return messages.map((content) => ({ + role: MessageRole.ASSISTANT, + parts: [{ type: 'text' as const, content }] + })); +} + +/** + * Serializes a message array to JSON, truncating trailing messages + * with a sentinel when the result exceeds {@link MAX_ATTRIBUTE_LENGTH}. + */ +export function serializeMessages(messages: T[]): string { + const json = JSON.stringify(messages); + if (json.length <= MAX_ATTRIBUTE_LENGTH) { + return json; + } + + const total = messages.length; + const serialized = messages.map((m) => JSON.stringify(m)); + + // Precompute prefix sums so we can get the length of any slice in O(1). + const prefixLen = new Array(total + 1); + prefixLen[0] = 0; + for (let i = 0; i < total; i++) { + prefixLen[i + 1] = prefixLen[i] + serialized[i].length + 1; // +1 for comma + } + + // Precompute sentinel lengths for each possible drop count. + // The sentinel text varies with digit width, so cache per-count. + const sentinelLenCache = new Array(total); + for (let dropped = 1; dropped < total; dropped++) { + sentinelLenCache[dropped] = buildSentinel(dropped, total).length; + } + + // Binary search for the max count in [1, total-1] that fits. + let lo = 1; + let hi = total - 1; + let bestCount = 0; + + while (lo <= hi) { + const mid = (lo + hi) >>> 1; + // Array: '[' + count items with commas + sentinel + ']' + const len = 2 + prefixLen[mid] + sentinelLenCache[total - mid]; + if (len <= MAX_ATTRIBUTE_LENGTH) { + bestCount = mid; + lo = mid + 1; + } else { + hi = mid - 1; + } + } + + if (bestCount > 0) { + const sentinel = buildSentinel(total - bestCount, total); + return '[' + serialized.slice(0, bestCount).join(',') + ',' + sentinel + ']'; + } + + return truncateValue('[' + serialized[0] + ']'); +} + +function buildSentinel(dropped: number, total: number): string { + return JSON.stringify({ role: MessageRole.SYSTEM, parts: [{ type: 'text' as const, content: `[truncated: ${dropped} of ${total} messages omitted]` }] }); +} diff --git a/packages/agents-a365-observability/src/tracing/scopes/ExecuteToolScope.ts b/packages/agents-a365-observability/src/tracing/scopes/ExecuteToolScope.ts index 032ede0c..c69c3078 100644 --- a/packages/agents-a365-observability/src/tracing/scopes/ExecuteToolScope.ts +++ b/packages/agents-a365-observability/src/tracing/scopes/ExecuteToolScope.ts @@ -89,4 +89,5 @@ export class ExecuteToolScope extends OpenTelemetryScope { public recordResponse(response: string): void { this.setTagMaybe(OpenTelemetryConstants.GEN_AI_TOOL_CALL_RESULT_KEY, response); } + } diff --git a/packages/agents-a365-observability/src/tracing/scopes/InferenceScope.ts b/packages/agents-a365-observability/src/tracing/scopes/InferenceScope.ts index 0f663bbf..8c07fbfa 100644 --- a/packages/agents-a365-observability/src/tracing/scopes/InferenceScope.ts +++ b/packages/agents-a365-observability/src/tracing/scopes/InferenceScope.ts @@ -10,6 +10,8 @@ import { UserDetails, Request, SpanDetails, + InputMessages, + OutputMessages, } from '../contracts'; /** @@ -81,22 +83,6 @@ export class InferenceScope extends OpenTelemetryScope { } } - /** - * Records the input messages for telemetry tracking. - * @param messages Array of input messages - */ - public recordInputMessages(messages: string[]): void { - this.setTagMaybe(OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, JSON.stringify(messages)); - } - - /** - * Records the output messages for telemetry tracking. - * @param messages Array of output messages - */ - public recordOutputMessages(messages: string[]): void { - this.setTagMaybe(OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY, JSON.stringify(messages)); - } - /** * Records the number of input tokens for telemetry tracking. * @param inputTokens Number of input tokens @@ -123,4 +109,21 @@ export class InferenceScope extends OpenTelemetryScope { } } + /** + * Records the input messages for telemetry tracking. + * Accepts plain strings (auto-wrapped as OTEL ChatMessage with role `user`) or structured ChatMessage objects. + * @param messages Array of input message strings or ChatMessage objects + */ + public override recordInputMessages(messages: InputMessages): void { + super.recordInputMessages(messages); + } + + /** + * Records the output messages for telemetry tracking. + * Accepts plain strings (auto-wrapped as OTEL OutputMessage with role `assistant`) or structured OutputMessage objects. + * @param messages Array of output message strings or OutputMessage objects + */ + public override recordOutputMessages(messages: OutputMessages): void { + super.recordOutputMessages(messages); + } } diff --git a/packages/agents-a365-observability/src/tracing/scopes/InvokeAgentScope.ts b/packages/agents-a365-observability/src/tracing/scopes/InvokeAgentScope.ts index c1db9ac8..6e4c679e 100644 --- a/packages/agents-a365-observability/src/tracing/scopes/InvokeAgentScope.ts +++ b/packages/agents-a365-observability/src/tracing/scopes/InvokeAgentScope.ts @@ -9,6 +9,8 @@ import { Request, SpanDetails, AgentDetails, + InputMessages, + OutputMessages, } from '../contracts'; import { OpenTelemetryConstants } from '../constants'; @@ -108,17 +110,19 @@ export class InvokeAgentScope extends OpenTelemetryScope { /** * Records the input messages for telemetry tracking. - * @param messages Array of input messages + * Accepts plain strings (auto-wrapped as OTEL ChatMessage with role `user`) or structured ChatMessage objects. + * @param messages Array of input message strings or ChatMessage objects */ - public recordInputMessages(messages: string[]): void { - this.setTagMaybe(OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, JSON.stringify(messages)); + public override recordInputMessages(messages: InputMessages): void { + super.recordInputMessages(messages); } /** * Records the output messages for telemetry tracking. - * @param messages Array of output messages + * Accepts plain strings (auto-wrapped as OTEL OutputMessage with role `assistant`) or structured OutputMessage objects. + * @param messages Array of output message strings or OutputMessage objects */ - public recordOutputMessages(messages: string[]): void { - this.setTagMaybe(OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY, JSON.stringify(messages)); + public override recordOutputMessages(messages: OutputMessages): void { + super.recordOutputMessages(messages); } } diff --git a/packages/agents-a365-observability/src/tracing/scopes/OpenTelemetryScope.ts b/packages/agents-a365-observability/src/tracing/scopes/OpenTelemetryScope.ts index 82c765a3..d85bcf2a 100644 --- a/packages/agents-a365-observability/src/tracing/scopes/OpenTelemetryScope.ts +++ b/packages/agents-a365-observability/src/tracing/scopes/OpenTelemetryScope.ts @@ -3,9 +3,10 @@ import { trace, SpanKind, Span, SpanStatusCode, context, AttributeValue, SpanContext, TimeInput } from '@opentelemetry/api'; import { OpenTelemetryConstants } from '../constants'; -import { AgentDetails, UserDetails, SpanDetails } from '../contracts'; +import { AgentDetails, UserDetails, SpanDetails, InputMessages, OutputMessages } from '../contracts'; import { createContextWithParentSpanRef } from '../context/parent-span-context'; import { isParentSpanRef } from '../context/trace-context-propagation'; +import { isStringArray, toInputMessages, toOutputMessages, serializeMessages } from '../message-utils'; import logger from '../../utils/logging'; /** @@ -19,7 +20,6 @@ export abstract class OpenTelemetryScope implements Disposable { private customStartTime?: TimeInput; private customEndTime?: TimeInput; private errorType?: string; - private exception?: Error; private hasEnded = false; /** @@ -132,7 +132,6 @@ export abstract class OpenTelemetryScope implements Disposable { this.errorType = error.constructor.name; } - this.exception = error; this.span.setStatus({ code: SpanStatusCode.ERROR, message: error.message @@ -172,6 +171,26 @@ export abstract class OpenTelemetryScope implements Disposable { } } + /** + * Records the input messages for telemetry tracking. + * Accepts plain strings (auto-wrapped as OTEL ChatMessage) or structured ChatMessage objects. + * @param messages Array of input message strings or ChatMessage objects + */ + protected recordInputMessages(messages: InputMessages): void { + const otelMessages = isStringArray(messages) ? toInputMessages(messages) : messages; + this.setTagMaybe(OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, serializeMessages(otelMessages)); + } + + /** + * Records the output messages for telemetry tracking. + * Accepts plain strings (auto-wrapped as OTEL OutputMessage) or structured OutputMessage objects. + * @param messages Array of output message strings or OutputMessage objects + */ + protected recordOutputMessages(messages: OutputMessages): void { + const otelMessages = isStringArray(messages) ? toOutputMessages(messages) : messages; + this.setTagMaybe(OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY, serializeMessages(otelMessages)); + } + /** * Sets a tag on the span if the value is not null or undefined. * @param name The tag name diff --git a/packages/agents-a365-observability/src/tracing/scopes/OutputScope.ts b/packages/agents-a365-observability/src/tracing/scopes/OutputScope.ts index c60aaf7a..7f9afc14 100644 --- a/packages/agents-a365-observability/src/tracing/scopes/OutputScope.ts +++ b/packages/agents-a365-observability/src/tracing/scopes/OutputScope.ts @@ -3,14 +3,15 @@ import { SpanKind } from '@opentelemetry/api'; import { OpenTelemetryScope } from './OpenTelemetryScope'; -import { AgentDetails, UserDetails, OutputResponse, Request, SpanDetails } from '../contracts'; +import { AgentDetails, UserDetails, OutputResponse, Request, SpanDetails, OutputMessage, OutputMessages } from '../contracts'; import { OpenTelemetryConstants } from '../constants'; +import { isStringArray, toOutputMessages, serializeMessages } from '../message-utils'; /** * Provides OpenTelemetry tracing scope for output message tracing with parent span linking. */ export class OutputScope extends OpenTelemetryScope { - private _outputMessages: string[]; + private _outputMessages: OutputMessage[]; private _outputMessagesDirty = false; /** @@ -58,30 +59,31 @@ export class OutputScope extends OpenTelemetryScope { userDetails, ); - // Initialize accumulated messages list from the response - this._outputMessages = [...response.messages]; + // Initialize accumulated messages list from the response (convert plain strings to OTEL format if needed) + this._outputMessages = isStringArray(response.messages) ? toOutputMessages(response.messages) : [...response.messages]; // Set initial output messages attribute this.setTagMaybe( OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY, - JSON.stringify(this._outputMessages) + serializeMessages(this._outputMessages) ); // Set conversation and channel this.setTagMaybe(OpenTelemetryConstants.GEN_AI_CONVERSATION_ID_KEY, request.conversationId); this.setTagMaybe(OpenTelemetryConstants.CHANNEL_NAME_KEY, request.channel?.name); this.setTagMaybe(OpenTelemetryConstants.CHANNEL_LINK_KEY, request.channel?.description); - } /** * Records the output messages for telemetry tracking. * Appends the provided messages to the accumulated output messages list. + * Accepts plain strings (auto-wrapped as OTEL OutputMessage) or structured OutputMessage objects. * The updated attribute is flushed when the scope is disposed. - * @param messages Array of output messages to append. + * @param messages Array of output message strings or OutputMessage objects to append. */ - public recordOutputMessages(messages: string[]): void { - this._outputMessages.push(...messages); + public recordOutputMessages(messages: OutputMessages): void { + const otelMessages = isStringArray(messages) ? toOutputMessages(messages) : messages; + this._outputMessages.push(...otelMessages); this._outputMessagesDirty = true; } @@ -89,7 +91,7 @@ export class OutputScope extends OpenTelemetryScope { if (this._outputMessagesDirty) { this.setTagMaybe( OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY, - JSON.stringify(this._outputMessages) + serializeMessages(this._outputMessages) ); } super[Symbol.dispose](); diff --git a/tests/observability/core/output-scope.test.ts b/tests/observability/core/output-scope.test.ts index 4839d5ba..ba1863cc 100644 --- a/tests/observability/core/output-scope.test.ts +++ b/tests/observability/core/output-scope.test.ts @@ -12,6 +12,9 @@ import { OutputResponse, OpenTelemetryConstants, ParentSpanRef, + OutputMessage, + MessageRole, + FinishReason, } from '@microsoft/agents-a365-observability'; describe('OutputScope', () => { @@ -91,7 +94,10 @@ describe('OutputScope', () => { expect(attributes[OpenTelemetryConstants.CHANNEL_NAME_KEY]).toBe('Email'); expect(attributes[OpenTelemetryConstants.CHANNEL_LINK_KEY]).toBe('https://email.link'); const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); - expect(parsed).toEqual(['First message', 'Second message']); + expect(parsed).toEqual([ + { role: 'assistant', parts: [{ type: 'text', content: 'First message' }] }, + { role: 'assistant', parts: [{ type: 'text', content: 'Second message' }] }, + ]); }); it('should append messages with recordOutputMessages and flush on dispose', async () => { @@ -106,7 +112,12 @@ describe('OutputScope', () => { const { attributes } = getLastSpan(); const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); - expect(parsed).toEqual(['Initial', 'Appended 1', 'Appended 2', 'Appended 3']); + expect(parsed).toEqual([ + { role: 'assistant', parts: [{ type: 'text', content: 'Initial' }] }, + { role: 'assistant', parts: [{ type: 'text', content: 'Appended 1' }] }, + { role: 'assistant', parts: [{ type: 'text', content: 'Appended 2' }] }, + { role: 'assistant', parts: [{ type: 'text', content: 'Appended 3' }] }, + ]); }); it('should use parent span reference for linking', async () => { @@ -126,6 +137,41 @@ describe('OutputScope', () => { }); it('should throw when agentDetails.tenantId is missing', () => { - expect(() => OutputScope.start({}, { messages: ['m'] }, { agentId: 'a' } as any)).toThrow('OutputScope: tenantId is required on agentDetails'); + expect(() => OutputScope.start({}, { messages: ['m'] }, { agentId: 'a' } as AgentDetails)).toThrow('OutputScope: tenantId is required on agentDetails'); + }); + + it('should accept structured OutputMessage[] without re-wrapping', async () => { + const structured: OutputMessage[] = [ + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'Hello structured' }], finish_reason: FinishReason.STOP }, + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'Second structured' }] }, + ]; + const response: OutputResponse = { messages: structured }; + + const scope = OutputScope.start(testRequest, response, testAgentDetails); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); + // Should pass through as-is, preserving finish_reason and not double-wrapping + expect(parsed).toEqual(structured); + }); + + it('should accept structured OutputMessage[] in recordOutputMessages', async () => { + const initial: OutputMessage[] = [ + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'Initial structured' }] }, + ]; + const appended: OutputMessage[] = [ + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'Appended structured' }], finish_reason: FinishReason.STOP }, + ]; + + const scope = OutputScope.start(testRequest, { messages: initial }, testAgentDetails); + scope.recordOutputMessages(appended); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); + expect(parsed).toEqual([...initial, ...appended]); }); }); diff --git a/tests/observability/core/scope-messages.test.ts b/tests/observability/core/scope-messages.test.ts new file mode 100644 index 00000000..5e04d231 --- /dev/null +++ b/tests/observability/core/scope-messages.test.ts @@ -0,0 +1,263 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { describe, it, expect, beforeAll, afterAll, beforeEach } from '@jest/globals'; +import { BasicTracerProvider, InMemorySpanExporter, SimpleSpanProcessor } from '@opentelemetry/sdk-trace-base'; +import { trace, context as otelContext } from '@opentelemetry/api'; +import { AsyncLocalStorageContextManager } from '@opentelemetry/context-async-hooks'; + +import { + InferenceScope, + InvokeAgentScope, + AgentDetails, + InferenceDetails, + InferenceOperationType, + OpenTelemetryConstants, + ChatMessage, + OutputMessage, + MessageRole, + FinishReason, + Modality, +} from '@microsoft/agents-a365-observability'; +import { + serializeMessages, +} from '@microsoft/agents-a365-observability/src/tracing/message-utils'; + +const testAgentDetails: AgentDetails = { + agentId: 'test-agent-msg', + agentName: 'Message Test Agent', + tenantId: '11111111-1111-1111-1111-111111111111', +}; + +const testInferenceDetails: InferenceDetails = { + operationName: InferenceOperationType.CHAT, + model: 'gpt-4', + providerName: 'openai', +}; + +const testRequest = { conversationId: 'conv-msg-1' }; + +describe('Scope message recording', () => { + let exporter: InMemorySpanExporter; + let provider: BasicTracerProvider; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let flushProvider: any; + let contextManager: AsyncLocalStorageContextManager; + + beforeAll(() => { + contextManager = new AsyncLocalStorageContextManager(); + contextManager.enable(); + otelContext.setGlobalContextManager(contextManager); + + exporter = new InMemorySpanExporter(); + const processor = new SimpleSpanProcessor(exporter); + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const globalProvider: any = trace.getTracerProvider(); + if (globalProvider && typeof globalProvider.addSpanProcessor === 'function') { + globalProvider.addSpanProcessor(processor); + flushProvider = globalProvider; + } else { + provider = new BasicTracerProvider({ spanProcessors: [processor] }); + trace.setGlobalTracerProvider(provider); + flushProvider = provider; + } + }); + + beforeEach(() => { + exporter.reset(); + }); + + afterAll(async () => { + exporter.reset(); + await provider?.shutdown?.(); + contextManager.disable(); + otelContext.disable(); + }); + + function getLastSpan() { + const spans = exporter.getFinishedSpans(); + expect(spans.length).toBeGreaterThan(0); + const span = spans[spans.length - 1]; + return { span, attributes: span.attributes }; + } + + // --------------------------------------------------------------------------- + // recordInputMessages / recordOutputMessages (tested via InferenceScope; + // InvokeAgentScope overrides are trivial super calls sharing the same path) + // --------------------------------------------------------------------------- + describe('recordInputMessages / recordOutputMessages', () => { + it('should convert string[] input to OTEL ChatMessage format', async () => { + const scope = InferenceScope.start(testRequest, testInferenceDetails, testAgentDetails); + scope.recordInputMessages(['What is the weather?', 'And traffic?']); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY] as string); + + expect(parsed).toEqual([ + { role: 'user', parts: [{ type: 'text', content: 'What is the weather?' }] }, + { role: 'user', parts: [{ type: 'text', content: 'And traffic?' }] }, + ]); + }); + + it('should pass through ChatMessage[] input without re-wrapping', async () => { + const structured: ChatMessage[] = [ + { role: MessageRole.SYSTEM, parts: [{ type: 'text', content: 'You are a helpful assistant.' }] }, + { role: MessageRole.USER, parts: [{ type: 'text', content: 'Hello!' }] }, + ]; + + const scope = InferenceScope.start(testRequest, testInferenceDetails, testAgentDetails); + scope.recordInputMessages(structured); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY] as string); + + expect(parsed).toEqual(structured); + }); + + it('should convert string[] output to OTEL OutputMessage format', async () => { + const scope = InferenceScope.start(testRequest, testInferenceDetails, testAgentDetails); + scope.recordOutputMessages(['The weather is sunny.']); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); + + expect(parsed).toEqual([ + { role: 'assistant', parts: [{ type: 'text', content: 'The weather is sunny.' }] }, + ]); + }); + + it('should preserve finish_reason on OutputMessage[]', async () => { + const structured: OutputMessage[] = [ + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'Done.' }], finish_reason: FinishReason.STOP }, + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'Partial...' }], finish_reason: FinishReason.LENGTH }, + ]; + + const scope = InferenceScope.start(testRequest, testInferenceDetails, testAgentDetails); + scope.recordOutputMessages(structured); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); + + expect(parsed[0].finish_reason).toBe('stop'); + expect(parsed[1].finish_reason).toBe('length'); + }); + }); + + // --------------------------------------------------------------------------- + // InvokeAgentScope-specific: recordResponse() delegates to recordOutputMessages + // --------------------------------------------------------------------------- + describe('InvokeAgentScope.recordResponse', () => { + it('should convert response string to OTEL OutputMessage format', async () => { + const scope = InvokeAgentScope.start(testRequest, {}, testAgentDetails); + scope.recordResponse('Test response'); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); + + expect(parsed).toHaveLength(1); + expect(parsed[0].role).toBe('assistant'); + expect(parsed[0].parts[0].content).toBe('Test response'); + }); + }); + + // --------------------------------------------------------------------------- + // Complex OTEL message part types (serialization round-trips) + // --------------------------------------------------------------------------- + describe('Complex message part types', () => { + it('should serialize tool call request and response parts', async () => { + const messages: ChatMessage[] = [ + { + role: MessageRole.ASSISTANT, + parts: [ + { type: 'text', content: 'Let me search for that.' }, + { type: 'tool_call', name: 'search', id: 'call_123', arguments: { query: 'test' } }, + ], + }, + { + role: MessageRole.TOOL, + parts: [ + { type: 'tool_call_response', id: 'call_123', response: { results: ['item1'] } }, + ], + }, + ]; + + const scope = InferenceScope.start(testRequest, testInferenceDetails, testAgentDetails); + scope.recordInputMessages(messages); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY] as string); + + expect(parsed[0].parts[1].type).toBe('tool_call'); + expect(parsed[0].parts[1].arguments).toEqual({ query: 'test' }); + expect(parsed[1].parts[0].type).toBe('tool_call_response'); + expect(parsed[1].parts[0].response).toEqual({ results: ['item1'] }); + }); + + it('should serialize reasoning parts with finish_reason', async () => { + const messages: OutputMessage[] = [{ + role: MessageRole.ASSISTANT, + parts: [ + { type: 'reasoning', content: 'The user is asking about weather...' }, + { type: 'text', content: 'The weather is sunny.' }, + ], + finish_reason: FinishReason.STOP, + }]; + + const scope = InferenceScope.start(testRequest, testInferenceDetails, testAgentDetails); + scope.recordOutputMessages(messages); + scope.dispose(); + + await flushProvider.forceFlush(); + const { attributes } = getLastSpan(); + const parsed = JSON.parse(attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY] as string); + + expect(parsed[0].parts[0].type).toBe('reasoning'); + expect(parsed[0].parts[1].type).toBe('text'); + expect(parsed[0].finish_reason).toBe('stop'); + }); + + it('should serialize blob, file, and URI parts', () => { + const messages: ChatMessage[] = [{ + role: MessageRole.USER, + parts: [ + { type: 'blob', modality: Modality.IMAGE, mime_type: 'image/png', content: 'iVBORw0KGgo=' }, + { type: 'file', modality: Modality.VIDEO, mime_type: 'video/mp4', file_id: 'file-123' }, + { type: 'uri', modality: Modality.AUDIO, mime_type: 'audio/mp3', uri: 'https://example.com/audio.mp3' }, + ], + }]; + + const parsed = JSON.parse(serializeMessages(messages)); + + expect(parsed[0].parts[0].modality).toBe('image'); + expect(parsed[0].parts[1].file_id).toBe('file-123'); + expect(parsed[0].parts[2].uri).toBe('https://example.com/audio.mp3'); + }); + + it('should serialize server tool call and generic parts', () => { + const messages: ChatMessage[] = [ + { role: MessageRole.ASSISTANT, parts: [{ type: 'server_tool_call', name: 'mcp_tool', id: 'stc_1', server_tool_call: { endpoint: '/api' } }] }, + { role: MessageRole.TOOL, parts: [{ type: 'server_tool_call_response', id: 'stc_1', server_tool_call_response: { status: 'ok' } }] }, + { role: MessageRole.USER, parts: [{ type: 'custom_annotation', timestamp: '00:01:23', note: 'Important' }] }, + ]; + + const parsed = JSON.parse(serializeMessages(messages)); + + expect(parsed[0].parts[0].server_tool_call.endpoint).toBe('/api'); + expect(parsed[1].parts[0].server_tool_call_response.status).toBe('ok'); + expect(parsed[2].parts[0].type).toBe('custom_annotation'); + }); + }); +}); diff --git a/tests/observability/extension/hosting/output-logging-middleware.test.ts b/tests/observability/extension/hosting/output-logging-middleware.test.ts index fcaa82ee..915b2471 100644 --- a/tests/observability/extension/hosting/output-logging-middleware.test.ts +++ b/tests/observability/extension/hosting/output-logging-middleware.test.ts @@ -148,7 +148,9 @@ describe('OutputLoggingMiddleware', () => { const outputSpan = spans.find(s => s.name.includes('output_messages')); expect(outputSpan).toBeDefined(); - expect(outputSpan!.attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY]).toBe(JSON.stringify(['Hi there!'])); + expect(outputSpan!.attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY]).toBe( + JSON.stringify([{ role: 'assistant', parts: [{ type: 'text', content: 'Hi there!' }] }]) + ); }); it('should skip non-message activities in OutputScope', async () => { @@ -241,7 +243,9 @@ describe('OutputLoggingMiddleware', () => { await flushProvider.forceFlush(); const outputSpan = exporter.getFinishedSpans().find(s => s.name.includes('output_messages')); expect(outputSpan).toBeDefined(); - expect(outputSpan!.attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY]).toBe(JSON.stringify(['Async reply'])); + expect(outputSpan!.attributes[OpenTelemetryConstants.GEN_AI_OUTPUT_MESSAGES_KEY]).toBe( + JSON.stringify([{ role: 'assistant', parts: [{ type: 'text', content: 'Async reply' }] }]) + ); }); it('should link async reply OutputScope to parent when parentSpanRef is set', async () => { diff --git a/tests/observability/extension/hosting/scope-utils.test.ts b/tests/observability/extension/hosting/scope-utils.test.ts index d9e62d09..0c6800af 100644 --- a/tests/observability/extension/hosting/scope-utils.test.ts +++ b/tests/observability/extension/hosting/scope-utils.test.ts @@ -91,7 +91,7 @@ describe('ScopeUtils.populateFromTurnContext', () => { [OpenTelemetryConstants.GEN_AI_AGENT_EMAIL_KEY, 'agent-upn@contoso.com'], [OpenTelemetryConstants.GEN_AI_AGENT_DESCRIPTION_KEY, 'assistant'], [OpenTelemetryConstants.TENANT_ID_KEY, 'tenant-123'], - [OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, JSON.stringify(['input text'])] + [OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, JSON.stringify([{ role: 'user', parts: [{ type: 'text', content: 'input text' }] }])] ]) ); scope?.dispose(); @@ -153,7 +153,7 @@ describe('ScopeUtils.populateFromTurnContext', () => { [OpenTelemetryConstants.GEN_AI_CALLER_AGENT_APPLICATION_ID_KEY, 'caller-agentBlueprintId'], [OpenTelemetryConstants.GEN_AI_CALLER_AGENT_EMAIL_KEY, 'user@contoso.com'], [OpenTelemetryConstants.TENANT_ID_KEY, 'tenant-123'], - [OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, JSON.stringify(['invoke message'])], + [OpenTelemetryConstants.GEN_AI_INPUT_MESSAGES_KEY, JSON.stringify([{ role: 'user', parts: [{ type: 'text', content: 'invoke message' }] }])], [OpenTelemetryConstants.GEN_AI_AGENT_ID_KEY, 'agent-1'], [OpenTelemetryConstants.GEN_AI_AGENT_NAME_KEY, 'Agent One'], [OpenTelemetryConstants.GEN_AI_AGENT_DESCRIPTION_KEY, 'assistant'] diff --git a/tests/observability/tracing/message-utils.test.ts b/tests/observability/tracing/message-utils.test.ts new file mode 100644 index 00000000..9361ac9a --- /dev/null +++ b/tests/observability/tracing/message-utils.test.ts @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + MAX_ATTRIBUTE_LENGTH, + MessageRole, + ChatMessage, + OutputMessage, +} from '@microsoft/agents-a365-observability'; +import { + isStringArray, + toInputMessages, + toOutputMessages, + serializeMessages, +} from '@microsoft/agents-a365-observability/src/tracing/message-utils'; + +describe('isStringArray', () => { + it('returns true for string[]', () => { + expect(isStringArray(['hello', 'world'])).toBe(true); + }); + + it('returns true for empty array', () => { + expect(isStringArray([])).toBe(true); + }); + + it('returns false for ChatMessage[]', () => { + const messages: ChatMessage[] = [ + { role: MessageRole.USER, parts: [{ type: 'text', content: 'hi' }] }, + ]; + expect(isStringArray(messages)).toBe(false); + }); + + it('returns false for OutputMessage[]', () => { + const messages: OutputMessage[] = [ + { role: MessageRole.ASSISTANT, parts: [{ type: 'text', content: 'hello' }], finish_reason: 'stop' }, + ]; + expect(isStringArray(messages)).toBe(false); + }); +}); + +describe('toInputMessages', () => { + it('wraps strings as ChatMessage with role=user and TextPart', () => { + const result = toInputMessages(['hello', 'world']); + expect(result).toEqual([ + { role: 'user', parts: [{ type: 'text', content: 'hello' }] }, + { role: 'user', parts: [{ type: 'text', content: 'world' }] }, + ]); + }); + + it('handles empty array', () => { + expect(toInputMessages([])).toEqual([]); + }); + + it('preserves message content exactly', () => { + const content = ' special chars: <>&"\' \n\ttabs '; + const result = toInputMessages([content]); + expect(result[0].parts[0]).toEqual({ type: 'text', content }); + }); +}); + +describe('toOutputMessages', () => { + it('wraps strings as OutputMessage with role=assistant and TextPart', () => { + const result = toOutputMessages(['response 1', 'response 2']); + expect(result).toEqual([ + { role: 'assistant', parts: [{ type: 'text', content: 'response 1' }] }, + { role: 'assistant', parts: [{ type: 'text', content: 'response 2' }] }, + ]); + }); + + it('handles empty array', () => { + expect(toOutputMessages([])).toEqual([]); + }); +}); + +describe('serializeMessages', () => { + it('returns JSON for small arrays within limit', () => { + const messages: ChatMessage[] = [ + { role: MessageRole.USER, parts: [{ type: 'text', content: 'hello' }] }, + ]; + const result = serializeMessages(messages); + expect(result).toBe(JSON.stringify(messages)); + }); + + it('returns JSON for empty array', () => { + expect(serializeMessages([])).toBe('[]'); + }); + + it('truncates trailing messages when over MAX_ATTRIBUTE_LENGTH', () => { + // Create messages that collectively exceed MAX_ATTRIBUTE_LENGTH + const longContent = 'x'.repeat(1000); + const messages: ChatMessage[] = Array.from({ length: 20 }, (_, i) => ({ + role: MessageRole.USER, + parts: [{ type: 'text' as const, content: `${longContent}-${i}` }], + })); + + // Verify the full array exceeds the limit + expect(JSON.stringify(messages).length).toBeGreaterThan(MAX_ATTRIBUTE_LENGTH); + + const result = serializeMessages(messages); + expect(result.length).toBeLessThanOrEqual(MAX_ATTRIBUTE_LENGTH); + + const parsed = JSON.parse(result); + // Should have fewer items than original + a sentinel at the end + expect(parsed.length).toBeLessThan(messages.length); + }); + + it('appends sentinel with correct drop count', () => { + const longContent = 'x'.repeat(1000); + const messages: ChatMessage[] = Array.from({ length: 20 }, (_, i) => ({ + role: MessageRole.USER, + parts: [{ type: 'text' as const, content: `${longContent}-${i}` }], + })); + + const result = serializeMessages(messages); + const parsed = JSON.parse(result); + + // Last element should be the sentinel + const sentinel = parsed[parsed.length - 1]; + expect(sentinel.role).toBe('system'); + expect(sentinel.parts[0].type).toBe('text'); + expect(sentinel.parts[0].content).toMatch(/\[truncated: \d+ of 20 messages omitted\]/); + + // Verify the count is correct + const keptCount = parsed.length - 1; // excluding sentinel + const droppedCount = 20 - keptCount; + expect(sentinel.parts[0].content).toBe(`[truncated: ${droppedCount} of 20 messages omitted]`); + }); + + it('falls back to truncateValue when single item exceeds limit', () => { + const hugeContent = 'y'.repeat(MAX_ATTRIBUTE_LENGTH + 1000); + const messages: ChatMessage[] = [ + { role: MessageRole.USER, parts: [{ type: 'text', content: hugeContent }] }, + ]; + + const result = serializeMessages(messages); + expect(result.length).toBeLessThanOrEqual(MAX_ATTRIBUTE_LENGTH); + expect(result).toContain('...[truncated]'); + }); + + it('handles boundary: exactly at MAX_ATTRIBUTE_LENGTH', () => { + // Create a message whose JSON is exactly MAX_ATTRIBUTE_LENGTH + // We'll find the right content length by subtracting the wrapper overhead + const wrapper: ChatMessage = { role: MessageRole.USER, parts: [{ type: 'text', content: '' }] }; + const overhead = JSON.stringify([wrapper]).length; + const contentLength = MAX_ATTRIBUTE_LENGTH - overhead; + const message: ChatMessage = { role: MessageRole.USER, parts: [{ type: 'text', content: 'a'.repeat(contentLength) }] }; + + const json = JSON.stringify([message]); + expect(json.length).toBe(MAX_ATTRIBUTE_LENGTH); + + // Should return without truncation + const result = serializeMessages([message]); + expect(result).toBe(json); + }); + + it('handles boundary: 1 byte over MAX_ATTRIBUTE_LENGTH with two messages', () => { + // Create two messages that together are 1 byte over the limit + const wrapper: ChatMessage = { role: MessageRole.USER, parts: [{ type: 'text', content: '' }] }; + const twoItemOverhead = JSON.stringify([wrapper, wrapper]).length; + const contentPerItem = Math.floor((MAX_ATTRIBUTE_LENGTH - twoItemOverhead) / 2); + + const messages: ChatMessage[] = [ + { role: MessageRole.USER, parts: [{ type: 'text', content: 'a'.repeat(contentPerItem + 1) }] }, + { role: MessageRole.USER, parts: [{ type: 'text', content: 'b'.repeat(contentPerItem + 1) }] }, + ]; + + expect(JSON.stringify(messages).length).toBeGreaterThan(MAX_ATTRIBUTE_LENGTH); + + const result = serializeMessages(messages); + expect(result.length).toBeLessThanOrEqual(MAX_ATTRIBUTE_LENGTH); + }); +});