diff --git a/packages/core/src/tracing/openai/index.ts b/packages/core/src/tracing/openai/index.ts index 0f83bf2cd3eb..a8a0c93389c4 100644 --- a/packages/core/src/tracing/openai/index.ts +++ b/packages/core/src/tracing/openai/index.ts @@ -12,7 +12,6 @@ import { GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; @@ -26,18 +25,8 @@ import { } from '../ai/utils'; import { OPENAI_METHOD_REGISTRY } from './constants'; import { instrumentStream } from './streaming'; -import type { ChatCompletionChunk, OpenAiOptions, OpenAiResponse, OpenAIStream, ResponseStreamingEvent } from './types'; -import { - addChatCompletionAttributes, - addConversationAttributes, - addEmbeddingsAttributes, - addResponsesApiAttributes, - extractRequestParameters, - isChatCompletionResponse, - isConversationResponse, - isEmbeddingsResponse, - isResponsesApiResponse, -} from './utils'; +import type { ChatCompletionChunk, OpenAiOptions, OpenAIStream, ResponseStreamingEvent } from './types'; +import { addResponseAttributes, extractRequestParameters } from './utils'; /** * Extract available tools from request parameters @@ -88,33 +77,6 @@ function extractRequestAttributes(args: unknown[], operationName: string): Recor return attributes; } -/** - * Add response attributes to spans - * This supports Chat Completion, Responses API, Embeddings, and Conversations API responses - */ -function addResponseAttributes(span: Span, result: unknown, recordOutputs?: boolean): void { - if (!result || typeof result !== 'object') return; - - const response = result as OpenAiResponse; - - if (isChatCompletionResponse(response)) { - addChatCompletionAttributes(span, response, recordOutputs); - if (recordOutputs && response.choices?.length) { - const responseTexts = response.choices.map(choice => choice.message?.content || ''); - span.setAttributes({ [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: JSON.stringify(responseTexts) }); - } - } else if (isResponsesApiResponse(response)) { - addResponsesApiAttributes(span, response, recordOutputs); - if (recordOutputs && response.output_text) { - span.setAttributes({ [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: response.output_text }); - } - } else if (isEmbeddingsResponse(response)) { - addEmbeddingsAttributes(span, response); - } else if (isConversationResponse(response)) { - addConversationAttributes(span, response); - } -} - // Extract and record AI request inputs, if present. This is intentionally separate from response attributes. function addRequestAttributes(span: Span, params: Record, operationName: string): void { // Store embeddings input on a separate attribute and do not truncate it diff --git a/packages/core/src/tracing/openai/streaming.ts b/packages/core/src/tracing/openai/streaming.ts index dec3457269da..f9f466228857 100644 --- a/packages/core/src/tracing/openai/streaming.ts +++ b/packages/core/src/tracing/openai/streaming.ts @@ -3,9 +3,14 @@ import { SPAN_STATUS_ERROR } from '../../tracing'; import type { Span } from '../../types-hoist/span'; import { GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { RESPONSE_EVENT_TYPES } from './constants'; import type { @@ -15,12 +20,7 @@ import type { ResponseFunctionCall, ResponseStreamingEvent, } from './types'; -import { - isChatCompletionChunk, - isResponsesApiStreamEvent, - setCommonResponseAttributes, - setTokenUsageAttributes, -} from './utils'; +import { isChatCompletionChunk, isResponsesApiStreamEvent } from './utils'; /** * State object used to accumulate information from a stream of OpenAI events/chunks. @@ -240,35 +240,31 @@ export async function* instrumentStream( yield event; } } finally { - setCommonResponseAttributes(span, state.responseId, state.responseModel); - setTokenUsageAttributes(span, state.promptTokens, state.completionTokens, state.totalTokens); - - span.setAttributes({ + const attrs: Record = { + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: state.responseId, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: state.responseModel, [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - }); + }; + + if (state.promptTokens !== undefined) attrs[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = state.promptTokens; + if (state.completionTokens !== undefined) attrs[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = state.completionTokens; + if (state.totalTokens !== undefined) attrs[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = state.totalTokens; if (state.finishReasons.length) { - span.setAttributes({ - [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(state.finishReasons), - }); + attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify(state.finishReasons); } if (recordOutputs && state.responseTexts.length) { - span.setAttributes({ - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: state.responseTexts.join(''), - }); + attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = state.responseTexts.join(''); } - // Set tool calls attribute if any were accumulated const chatCompletionToolCallsArray = Object.values(state.chatCompletionToolCalls); const allToolCalls = [...chatCompletionToolCallsArray, ...state.responsesApiToolCalls]; - if (allToolCalls.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(allToolCalls), - }); + attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(allToolCalls); } + span.setAttributes(attrs); span.end(); } } diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index c2986349e14a..ded7b3ff0e3b 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -1,4 +1,5 @@ import type { Span } from '../../types-hoist/span'; +import type { SpanAttributeValue } from '../../types-hoist/span'; import { GEN_AI_CONVERSATION_ID_ATTRIBUTE, GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE, @@ -12,71 +13,13 @@ import { GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import type { - ChatCompletionChunk, - OpenAiChatCompletionObject, - OpenAIConversationObject, - OpenAICreateEmbeddingsObject, - OpenAIResponseObject, - ResponseStreamingEvent, -} from './types'; - -/** - * Check if response is a Chat Completion object - */ -export function isChatCompletionResponse(response: unknown): response is OpenAiChatCompletionObject { - return ( - response !== null && - typeof response === 'object' && - 'object' in response && - (response as Record).object === 'chat.completion' - ); -} - -/** - * Check if response is a Responses API object - */ -export function isResponsesApiResponse(response: unknown): response is OpenAIResponseObject { - return ( - response !== null && - typeof response === 'object' && - 'object' in response && - (response as Record).object === 'response' - ); -} - -/** - * Check if response is an Embeddings API object - */ -export function isEmbeddingsResponse(response: unknown): response is OpenAICreateEmbeddingsObject { - if (response === null || typeof response !== 'object' || !('object' in response)) { - return false; - } - const responseObject = response as Record; - return ( - responseObject.object === 'list' && - typeof responseObject.model === 'string' && - responseObject.model.toLowerCase().includes('embedding') - ); -} - -/** - * Check if response is a Conversations API object - * @see https://platform.openai.com/docs/api-reference/conversations - */ -export function isConversationResponse(response: unknown): response is OpenAIConversationObject { - return ( - response !== null && - typeof response === 'object' && - 'object' in response && - (response as Record).object === 'conversation' - ); -} +import type { ChatCompletionChunk, ResponseStreamingEvent } from './types'; /** * Check if streaming event is from the Responses API @@ -104,157 +47,108 @@ export function isChatCompletionChunk(event: unknown): event is ChatCompletionCh } /** - * Add attributes for Chat Completion responses + * Add response attributes to a span using duck-typing. + * Works for Chat Completions, Responses API, Embeddings, and Conversations API responses. */ -export function addChatCompletionAttributes( - span: Span, - response: OpenAiChatCompletionObject, - recordOutputs?: boolean, -): void { - setCommonResponseAttributes(span, response.id, response.model); - if (response.usage) { - setTokenUsageAttributes( - span, - response.usage.prompt_tokens, - response.usage.completion_tokens, - response.usage.total_tokens, - ); +export function addResponseAttributes(span: Span, result: unknown, recordOutputs?: boolean): void { + if (!result || typeof result !== 'object') return; + + const response = result as Record; + const attrs: Record = {}; + + // Response ID + if (typeof response.id === 'string') { + attrs[GEN_AI_RESPONSE_ID_ATTRIBUTE] = response.id; } + + // Response model + if (typeof response.model === 'string') { + attrs[GEN_AI_RESPONSE_MODEL_ATTRIBUTE] = response.model; + } + + // Conversation ID (conversation objects use id as conversation link) + if (response.object === 'conversation' && typeof response.id === 'string') { + attrs[GEN_AI_CONVERSATION_ID_ATTRIBUTE] = response.id; + } + + // Token usage — supports both naming conventions (chat: prompt_tokens/completion_tokens, responses: input_tokens/output_tokens) + if (response.usage && typeof response.usage === 'object') { + const usage = response.usage as Record; + + const inputTokens = usage.prompt_tokens ?? usage.input_tokens; + if (typeof inputTokens === 'number') { + attrs[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = inputTokens; + } + + const outputTokens = usage.completion_tokens ?? usage.output_tokens; + if (typeof outputTokens === 'number') { + attrs[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = outputTokens; + } + + if (typeof usage.total_tokens === 'number') { + attrs[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = usage.total_tokens; + } + } + + // Finish reasons from choices (chat completions) if (Array.isArray(response.choices)) { - const finishReasons = response.choices + const choices = response.choices as Array>; + const finishReasons = choices .map(choice => choice.finish_reason) - .filter((reason): reason is string => reason !== null); + .filter((reason): reason is string => typeof reason === 'string'); if (finishReasons.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(finishReasons), - }); + attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify(finishReasons); } - // Extract tool calls from all choices (only if recordOutputs is true) if (recordOutputs) { - const toolCalls = response.choices - .map(choice => choice.message?.tool_calls) + // Response text from choices + const responseTexts = choices.map(choice => { + const message = choice.message as Record | undefined; + return (message?.content as string) || ''; + }); + attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = JSON.stringify(responseTexts); + + // Tool calls from choices + const toolCalls = choices + .map(choice => { + const message = choice.message as Record | undefined; + return message?.tool_calls; + }) .filter(calls => Array.isArray(calls) && calls.length > 0) .flat(); if (toolCalls.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(toolCalls), - }); + attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(toolCalls); } } } -} -/** - * Add attributes for Responses API responses - */ -export function addResponsesApiAttributes(span: Span, response: OpenAIResponseObject, recordOutputs?: boolean): void { - setCommonResponseAttributes(span, response.id, response.model); - if (response.status) { - span.setAttributes({ - [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify([response.status]), - }); - } - if (response.usage) { - setTokenUsageAttributes( - span, - response.usage.input_tokens, - response.usage.output_tokens, - response.usage.total_tokens, - ); + // Finish reason from status (responses API) + if (typeof response.status === 'string') { + // Only set if not already set from choices + if (!attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]) { + attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify([response.status]); + } } - // Extract function calls from output (only if recordOutputs is true) if (recordOutputs) { - const responseWithOutput = response as OpenAIResponseObject & { output?: unknown[] }; - if (Array.isArray(responseWithOutput.output) && responseWithOutput.output.length > 0) { - // Filter for function_call type objects in the output array - const functionCalls = responseWithOutput.output.filter( - (item): unknown => - // oxlint-disable-next-line typescript/prefer-optional-chain - typeof item === 'object' && item !== null && (item as Record).type === 'function_call', - ); + // Response text from output_text (responses API) + if (typeof response.output_text === 'string' && !attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]) { + attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = response.output_text; + } + // Tool calls from output array (responses API) + if (Array.isArray(response.output) && response.output.length > 0 && !attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]) { + const functionCalls = (response.output as Array>).filter( + item => item?.type === 'function_call', + ); if (functionCalls.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(functionCalls), - }); + attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(functionCalls); } } } -} - -/** - * Add attributes for Embeddings API responses - */ -export function addEmbeddingsAttributes(span: Span, response: OpenAICreateEmbeddingsObject): void { - span.setAttributes({ - [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: response.model, - }); - if (response.usage) { - setTokenUsageAttributes(span, response.usage.prompt_tokens, undefined, response.usage.total_tokens); - } -} - -/** - * Add attributes for Conversations API responses - * @see https://platform.openai.com/docs/api-reference/conversations - */ -export function addConversationAttributes(span: Span, response: OpenAIConversationObject): void { - const { id } = response; - - span.setAttributes({ - [GEN_AI_RESPONSE_ID_ATTRIBUTE]: id, - // The conversation id is used to link messages across API calls - [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: id, - }); -} - -/** - * Set token usage attributes - * @param span - The span to add attributes to - * @param promptTokens - The number of prompt tokens - * @param completionTokens - The number of completion tokens - * @param totalTokens - The number of total tokens - */ -export function setTokenUsageAttributes( - span: Span, - promptTokens?: number, - completionTokens?: number, - totalTokens?: number, -): void { - if (promptTokens !== undefined) { - span.setAttributes({ - [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: promptTokens, - }); - } - if (completionTokens !== undefined) { - span.setAttributes({ - [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: completionTokens, - }); - } - if (totalTokens !== undefined) { - span.setAttributes({ - [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: totalTokens, - }); - } -} - -/** - * Set common response attributes - * @param span - The span to add attributes to - * @param id - The response id - * @param model - The response model - */ -export function setCommonResponseAttributes(span: Span, id: string, model: string): void { - span.setAttributes({ - [GEN_AI_RESPONSE_ID_ATTRIBUTE]: id, - }); - span.setAttributes({ - [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: model, - }); + span.setAttributes(attrs); } /** diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts index 3f8fd0045f2e..5af1ac635264 100644 --- a/packages/core/test/lib/utils/openai-utils.test.ts +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -1,12 +1,6 @@ import { describe, expect, it } from 'vitest'; import { buildMethodPath } from '../../../src/tracing/ai/utils'; -import { - isChatCompletionChunk, - isChatCompletionResponse, - isConversationResponse, - isResponsesApiResponse, - isResponsesApiStreamEvent, -} from '../../../src/tracing/openai/utils'; +import { isChatCompletionChunk, isResponsesApiStreamEvent } from '../../../src/tracing/openai/utils'; describe('openai-utils', () => { describe('buildMethodPath', () => { @@ -17,50 +11,6 @@ describe('openai-utils', () => { }); }); - describe('isChatCompletionResponse', () => { - it('should return true for valid chat completion responses', () => { - const validResponse = { - object: 'chat.completion', - id: 'chatcmpl-123', - model: 'gpt-4', - choices: [], - }; - expect(isChatCompletionResponse(validResponse)).toBe(true); - }); - - it('should return false for invalid responses', () => { - expect(isChatCompletionResponse(null)).toBe(false); - expect(isChatCompletionResponse(undefined)).toBe(false); - expect(isChatCompletionResponse('string')).toBe(false); - expect(isChatCompletionResponse(123)).toBe(false); - expect(isChatCompletionResponse({})).toBe(false); - expect(isChatCompletionResponse({ object: 'different' })).toBe(false); - expect(isChatCompletionResponse({ object: null })).toBe(false); - }); - }); - - describe('isResponsesApiResponse', () => { - it('should return true for valid responses API responses', () => { - const validResponse = { - object: 'response', - id: 'resp_123', - model: 'gpt-4', - choices: [], - }; - expect(isResponsesApiResponse(validResponse)).toBe(true); - }); - - it('should return false for invalid responses', () => { - expect(isResponsesApiResponse(null)).toBe(false); - expect(isResponsesApiResponse(undefined)).toBe(false); - expect(isResponsesApiResponse('string')).toBe(false); - expect(isResponsesApiResponse(123)).toBe(false); - expect(isResponsesApiResponse({})).toBe(false); - expect(isResponsesApiResponse({ object: 'different' })).toBe(false); - expect(isResponsesApiResponse({ object: null })).toBe(false); - }); - }); - describe('isResponsesApiStreamEvent', () => { it('should return true for valid responses API stream events', () => { expect(isResponsesApiStreamEvent({ type: 'response.created' })).toBe(true); @@ -103,36 +53,4 @@ describe('openai-utils', () => { expect(isChatCompletionChunk({ object: null })).toBe(false); }); }); - - describe('isConversationResponse', () => { - it('should return true for valid conversation responses', () => { - const validConversation = { - object: 'conversation', - id: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', - created_at: 1704067200, - }; - expect(isConversationResponse(validConversation)).toBe(true); - }); - - it('should return true for conversation with metadata', () => { - const conversationWithMetadata = { - object: 'conversation', - id: 'conv_123', - created_at: 1704067200, - metadata: { user_id: 'user_123' }, - }; - expect(isConversationResponse(conversationWithMetadata)).toBe(true); - }); - - it('should return false for invalid responses', () => { - expect(isConversationResponse(null)).toBe(false); - expect(isConversationResponse(undefined)).toBe(false); - expect(isConversationResponse('string')).toBe(false); - expect(isConversationResponse(123)).toBe(false); - expect(isConversationResponse({})).toBe(false); - expect(isConversationResponse({ object: 'thread' })).toBe(false); - expect(isConversationResponse({ object: 'response' })).toBe(false); - expect(isConversationResponse({ object: null })).toBe(false); - }); - }); });