Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class ChatCompletionResult:
"""Result of processing streaming chat completion chunks."""

response_id: str
content: list[str]
content: list[str | None]
tool_calls: dict[int, OpenAIChatCompletionToolCall]
created: int
model: str
Expand All @@ -72,7 +72,7 @@ class ChatCompletionResult:
@property
def content_text(self) -> str:
"""Get joined content as string."""
return "".join(self.content)
return "".join(content for content in self.content if content is not None)

@property
def has_tool_calls(self) -> bool:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def _process(c) -> str:
if isinstance(c, str):
return c
elif isinstance(c, TextContentItem) or isinstance(c, OpenAIChatCompletionContentPartTextParam):
return c.text
return c.text or ""
elif isinstance(c, ImageContentItem) or isinstance(c, OpenAIChatCompletionContentPartImageParam):
return "<image>"
elif isinstance(c, OpenAIFile):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

"""Regression tests for None-safe response content joins."""

from llama_stack.providers.inline.responses.builtin.responses.types import ChatCompletionResult


def _build_result(content: list[str | None]) -> ChatCompletionResult:
return ChatCompletionResult(
response_id="resp_123",
content=content,
tool_calls={},
created=0,
model="test-model",
finish_reason="stop",
message_item_id="msg_123",
tool_call_item_ids={},
content_part_emitted=False,
)


def test_content_text_skips_none_entries():
assert _build_result([None, "tool result"]).content_text == "tool result"


def test_content_text_returns_empty_string_for_none_only_content():
assert _build_result([None]).content_text == ""
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

"""Regression tests for None-safe prompt content joins."""

from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
from llama_stack_api.common.content_types import TextContentItem


class TestInterleavedContentAsStrNoneSafety:
def test_none_content_returns_empty_string(self):
assert interleaved_content_as_str(None) == ""

def test_text_content_item_with_none_text_returns_empty_string(self):
item = TextContentItem.model_construct(text=None)

assert interleaved_content_as_str([item]) == ""

def test_text_content_item_with_valid_text_is_preserved(self):
item = TextContentItem(text="hello world")

assert interleaved_content_as_str([item]) == "hello world"
Loading