diff --git a/src/llama_stack/providers/inline/responses/builtin/responses/types.py b/src/llama_stack/providers/inline/responses/builtin/responses/types.py index ff6367bf3b..c959b68397 100644 --- a/src/llama_stack/providers/inline/responses/builtin/responses/types.py +++ b/src/llama_stack/providers/inline/responses/builtin/responses/types.py @@ -58,7 +58,7 @@ class ChatCompletionResult: """Result of processing streaming chat completion chunks.""" response_id: str - content: list[str] + content: list[str | None] tool_calls: dict[int, OpenAIChatCompletionToolCall] created: int model: str @@ -72,7 +72,7 @@ class ChatCompletionResult: @property def content_text(self) -> str: """Get joined content as string.""" - return "".join(self.content) + return "".join(content for content in self.content if content is not None) @property def has_tool_calls(self) -> bool: diff --git a/src/llama_stack/providers/utils/inference/prompt_adapter.py b/src/llama_stack/providers/utils/inference/prompt_adapter.py index 26aac76de8..99d0b9e4ea 100644 --- a/src/llama_stack/providers/utils/inference/prompt_adapter.py +++ b/src/llama_stack/providers/utils/inference/prompt_adapter.py @@ -33,7 +33,7 @@ def _process(c) -> str: if isinstance(c, str): return c elif isinstance(c, TextContentItem) or isinstance(c, OpenAIChatCompletionContentPartTextParam): - return c.text + return c.text or "" elif isinstance(c, ImageContentItem) or isinstance(c, OpenAIChatCompletionContentPartImageParam): return "" elif isinstance(c, OpenAIFile): diff --git a/tests/unit/providers/inline/responses/builtin/responses/test_types.py b/tests/unit/providers/inline/responses/builtin/responses/test_types.py new file mode 100644 index 0000000000..6bd570eeaf --- /dev/null +++ b/tests/unit/providers/inline/responses/builtin/responses/test_types.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +"""Regression tests for None-safe response content joins.""" + +from llama_stack.providers.inline.responses.builtin.responses.types import ChatCompletionResult + + +def _build_result(content: list[str | None]) -> ChatCompletionResult: + return ChatCompletionResult( + response_id="resp_123", + content=content, + tool_calls={}, + created=0, + model="test-model", + finish_reason="stop", + message_item_id="msg_123", + tool_call_item_ids={}, + content_part_emitted=False, + ) + + +def test_content_text_skips_none_entries(): + assert _build_result([None, "tool result"]).content_text == "tool result" + + +def test_content_text_returns_empty_string_for_none_only_content(): + assert _build_result([None]).content_text == "" diff --git a/tests/unit/providers/utils/inference/test_prompt_adapter_none_safety.py b/tests/unit/providers/utils/inference/test_prompt_adapter_none_safety.py new file mode 100644 index 0000000000..980a3a4500 --- /dev/null +++ b/tests/unit/providers/utils/inference/test_prompt_adapter_none_safety.py @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +"""Regression tests for None-safe prompt content joins.""" + +from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str +from llama_stack_api.common.content_types import TextContentItem + + +class TestInterleavedContentAsStrNoneSafety: + def test_none_content_returns_empty_string(self): + assert interleaved_content_as_str(None) == "" + + def test_text_content_item_with_none_text_returns_empty_string(self): + item = TextContentItem.model_construct(text=None) + + assert interleaved_content_as_str([item]) == "" + + def test_text_content_item_with_valid_text_is_preserved(self): + item = TextContentItem(text="hello world") + + assert interleaved_content_as_str([item]) == "hello world"