diff --git a/.changeset/great-lamps-boil.md b/.changeset/great-lamps-boil.md new file mode 100644 index 0000000..e4d6cd6 --- /dev/null +++ b/.changeset/great-lamps-boil.md @@ -0,0 +1,5 @@ +--- +"@mrck-labs/grid-core": patch +--- + +fix tool caling again diff --git a/packages/core/src/factories/configurable-agent.factory.ts b/packages/core/src/factories/configurable-agent.factory.ts index ae05179..922c995 100644 --- a/packages/core/src/factories/configurable-agent.factory.ts +++ b/packages/core/src/factories/configurable-agent.factory.ts @@ -774,8 +774,8 @@ export const createConfigurableAgent = async ({ let fullText = ""; // Call streaming LLM with tools - // Tool events are sent via sendUpdate callback in onStepFinish - const { textStream, generation } = + // Use fullStream to capture all events including tool calls + const { fullStream, generation } = await base.llmService.runStreamedLLMWithTools({ messages: workingMessages, tools: availableTools, @@ -795,21 +795,64 @@ export const createConfigurableAgent = async ({ ...mergedLlmOptions, }); - // Stream text chunks - for await (const chunk of textStream) { - fullText += chunk; + // Process the full stream which includes text, tool calls, and tool results + for await (const part of fullStream) { + // Handle text deltas + if (part.type === "text-delta") { + fullText += part.textDelta; + + // Yield chunk to caller + yield { + type: "text_delta", + content: part.textDelta, + }; + + // Also send via sendUpdate for IPC + await sendUpdate({ + type: "text_delta", + content: part.textDelta, + }); + } - // Yield chunk to caller - yield { - type: "text_delta", - content: chunk, - }; + // Handle tool calls - emit when tool is being called + if (part.type === "tool-call") { + const toolCallData = { + toolCallId: part.toolCallId, + toolName: part.toolName, + args: part.args, + }; + + yield { + type: "tool_execution", + content: JSON.stringify(toolCallData), + metadata: toolCallData, + }; + + await sendUpdate({ + type: "tool_execution", + content: JSON.stringify(toolCallData), + }); + } - // Also send via sendUpdate for IPC - await sendUpdate({ - type: "text_delta", - content: chunk, - }); + // Handle tool results - emit when tool returns + if (part.type === "tool-result") { + const toolResultData = { + toolCallId: part.toolCallId, + toolName: part.toolName, + result: part.result, + }; + + yield { + type: "tool_response", + content: JSON.stringify(toolResultData), + metadata: toolResultData, + }; + + await sendUpdate({ + type: "tool_response", + content: JSON.stringify(toolResultData), + }); + } } // Build final response diff --git a/packages/core/src/services/base.llm.service.ts b/packages/core/src/services/base.llm.service.ts index 218071a..54610e4 100644 --- a/packages/core/src/services/base.llm.service.ts +++ b/packages/core/src/services/base.llm.service.ts @@ -392,6 +392,7 @@ export const baseLLMService = ( options: LLMServiceOptions & { tools?: any[] } ): Promise<{ textStream: AsyncIterable; + fullStream: AsyncIterable; generation: any; }> => { const { @@ -446,7 +447,7 @@ export const baseLLMService = ( // Forward provider-specific options ...(providerOptions ? { providerOptions: providerOptions as any } : {}), onStepFinish: (step) => { - // Tool telemetry + // Tool telemetry - fires when each step completes step.content.forEach((content) => { if (content.type === "tool-call") { const sc: any = content; @@ -463,35 +464,29 @@ export const baseLLMService = ( toolName, args ); - if (sendUpdate) { - sendUpdate({ - type: "tool_execution", - content: JSON.stringify(content), - }); - } } if (content.type === "tool-result") { const sc: any = content; const toolCallId = sc.toolCallId ?? sc.id ?? sc.callId; - const result = sc.result ?? sc.output ?? sc.data; + const resultData = sc.result ?? sc.output ?? sc.data; langfuse.endToolSpanForSession( options.context.sessionToken, toolCallId, - result + resultData ); - if (sendUpdate) { - sendUpdate({ - type: "tool_response", - content: JSON.stringify(content), - }); - } } }); }, }); - return { textStream: result.textStream, generation }; + // Return both textStream and fullStream + // fullStream contains all events including tool-call and tool-result + return { + textStream: result.textStream, + fullStream: result.fullStream, + generation, + }; }; const isAvailable = async (): Promise => { diff --git a/packages/core/src/types/llm.types.ts b/packages/core/src/types/llm.types.ts index d21e3ee..e7ad82f 100644 --- a/packages/core/src/types/llm.types.ts +++ b/packages/core/src/types/llm.types.ts @@ -123,6 +123,7 @@ export interface LLMService { options: LLMServiceOptions & { tools?: any[] } ): Promise<{ textStream: AsyncIterable; + fullStream: AsyncIterable; generation: any; }>;