Skip to content

Commit aeaa43e

Browse files
committed
chore: add openai test script
1 parent 3ee834a commit aeaa43e

3 files changed

Lines changed: 37 additions & 10 deletions

File tree

openagent/router/openai.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -168,26 +168,26 @@ async def create_chat_completion(request: ChatCompletionRequest):
168168
agent = build_workflow(llm)
169169

170170
combined_message = "\n".join([f"{msg.role}: {msg.content}" for msg in request.messages])
171-
171+
172172
tool_calls = []
173173
assistant_message = None
174174

175175
async for event in agent.astream_events(
176-
{"messages": [HumanMessage(content=combined_message)]},
177-
version="v1"
176+
{"messages": [HumanMessage(content=combined_message)]},
177+
version="v1"
178178
):
179179
if event["event"] == "on_tool_end":
180180
tool_name = event["name"]
181181
tool_input = event["data"]["input"]
182-
182+
183183
tool_call = ToolCall(
184184
function={
185185
"name": tool_name,
186186
"arguments": json.dumps(tool_input)
187187
}
188188
)
189189
tool_calls.append(tool_call)
190-
190+
191191
elif event["event"] == "on_chat_model_stream":
192192
if isinstance(event["data"]["chunk"].content, str):
193193
assistant_message = (assistant_message or "") + event["data"]["chunk"].content
@@ -199,7 +199,7 @@ async def create_chat_completion(request: ChatCompletionRequest):
199199
choice = ChatChoice(
200200
index=0,
201201
message=ChatMessage(
202-
role="assistant",
202+
role="assistant",
203203
content=assistant_message,
204204
tool_calls=tool_calls if tool_calls else None
205205
),
@@ -209,7 +209,7 @@ async def create_chat_completion(request: ChatCompletionRequest):
209209
# Estimate token usage
210210
prompt_tokens = sum(len(msg.content.split()) * 1.3 for msg in request.messages)
211211
completion_tokens = len(assistant_message.split()) * 1.3 if assistant_message else 0
212-
212+
213213
usage = Usage(
214214
prompt_tokens=int(prompt_tokens),
215215
completion_tokens=int(completion_tokens),
@@ -244,7 +244,7 @@ async def stream_chat_completion(request: ChatCompletionRequest):
244244
model=request.model,
245245
choices=[StreamChoice(
246246
index=0,
247-
delta=DeltaMessage(role="assistant"),
247+
delta=DeltaMessage(role="assistant", content=""),
248248
)]
249249
)
250250
yield f"data: {chunk.json()}\n\n"
@@ -270,15 +270,15 @@ async def stream_chat_completion(request: ChatCompletionRequest):
270270
# Handle tool responses
271271
tool_name = event["name"]
272272
tool_input = event["data"]["input"]
273-
273+
274274
# Create a tool call response
275275
tool_call = ToolCall(
276276
function={
277277
"name": tool_name,
278278
"arguments": json.dumps(tool_input)
279279
}
280280
)
281-
281+
282282
chunk = ChatCompletionStreamResponse(
283283
model=request.model,
284284
choices=[StreamChoice(

tests/openai/__init__.py

Whitespace-only changes.

tests/openai/example.sh

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
curl http://localhost:8000/v1/chat/completions \
2+
-H "Content-Type: application/json" \
3+
-d '{
4+
"model": "gpt-4o",
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "Hello!"
9+
}
10+
]
11+
}'
12+
13+
14+
15+
16+
curl http://localhost:8000/v1/chat/completions \
17+
-H "Content-Type: application/json" \
18+
-d '{
19+
"model": "gpt-4o",
20+
"stream": "true",
21+
"messages": [
22+
{
23+
"role": "user",
24+
"content": "Hello!"
25+
}
26+
]
27+
}'

0 commit comments

Comments
 (0)