From bd03aa5d5fbd131512624756b2d98ceea3c32beb Mon Sep 17 00:00:00 2001 From: Charles-Henri ROBICHE Date: Tue, 17 Mar 2026 08:36:13 +0100 Subject: [PATCH] fix: omit system_prompt when empty to avoid Anthropic API rejection Anthropic's API rejects messages with `{'role': 'system', 'content': ''}`. When no system prompt is provided, the code was passing `system_prompt=""` to PydanticAI Agent, which translated to an empty system message. This caused all LLM calls without an explicit system prompt (e.g. `detect_base_urls`) to fail with a 400 error when routed through LiteLLM to any Anthropic model (claude-opus, claude-sonnet, claude-haiku). The fix omits the `system_prompt` kwarg entirely when there is no system prompt, so PydanticAI never generates a system message. Co-Authored-By: Claude Opus 4.6 (1M context) --- cli/helpers/llm/_conversation.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cli/helpers/llm/_conversation.py b/cli/helpers/llm/_conversation.py index a47eafa..844771a 100644 --- a/cli/helpers/llm/_conversation.py +++ b/cli/helpers/llm/_conversation.py @@ -85,14 +85,17 @@ async def _run(self, prompt: str, *, output_type: Any) -> Any: max_tokens=self._max_tokens, ) - agent = Agent( - model, - system_prompt=self._system or "", + agent_kwargs: dict[str, Any] = dict( + model=model, tools=self._tools or [], deps_type=ToolDeps, output_type=output_type, model_settings=settings, ) + if self._system: + agent_kwargs["system_prompt"] = self._system + + agent = Agent(**agent_kwargs) flushed_len = len(self._messages)