From 481cc009a392549017c08f27b9500f5ab41415be Mon Sep 17 00:00:00 2001 From: iml1s Date: Wed, 11 Mar 2026 02:20:25 +0800 Subject: [PATCH] fix(llm_client): remove response_format json_object for local LLM compatibility LM Studio and Ollama do not support response_format: json_object, only json_schema or text. This causes errors when using local LLMs. The existing markdown fence cleanup logic in chat_json() already handles parsing JSON from raw LLM output, making response_format unnecessary. This change follows the same pattern as commit 985f89f which improved compatibility with diverse model outputs. Tested with: LM Studio + qwen3.5-9b (full predict pipeline passes) --- backend/app/utils/llm_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/app/utils/llm_client.py b/backend/app/utils/llm_client.py index 6c1a81f4..e332eebb 100644 --- a/backend/app/utils/llm_client.py +++ b/backend/app/utils/llm_client.py @@ -88,7 +88,8 @@ def chat_json( messages=messages, temperature=temperature, max_tokens=max_tokens, - response_format={"type": "json_object"} + # 不設 response_format 以相容 LM Studio / Ollama 等本地模型 + # 依賴 prompt 中的 JSON 指示 + 下方的 markdown 清理邏輯 ) # 清理markdown代码块标记 cleaned_response = response.strip()