Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 25 additions & 10 deletions src/openagents/agents/collaborator_agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from ast import Dict
import logging
import re
from typing import Dict

from openagents.agents.runner import AgentRunner
from openagents.models.agent_config import AgentTriggerConfigItem
Expand All @@ -22,6 +23,16 @@ def __init__(self, **kwargs):
else:
self._triggers_map = {}

def _is_mentioned(self, event) -> bool:
"""Check if this agent is @mentioned in the event payload."""
payload = event.payload or {}
if payload.get("mentioned_agent_id") == self.client.agent_id:
return True
content = payload.get("content", "")
if content and re.search(rf"@{re.escape(self.client.agent_id)}\b", content):
return True
return False

async def react(self, context: EventContext):
"""React to an incoming message using agent orchestrator."""
trigger = self._triggers_map.get(context.incoming_event.event_name)
Expand All @@ -30,13 +41,17 @@ async def react(self, context: EventContext):
f"Trigger found for event: {context.incoming_event.event_name}, responding with trigger instruction"
)
await self.run_agent(context=context, instruction=trigger.instruction)
elif self.agent_config is not None and self.agent_config.react_to_all_messages:
logger.debug(
f"No trigger found for event: {context.incoming_event.event_name} but react_to_all_messages is True, responding"
)
await self.run_agent(context=context)
elif self._is_mentioned(context.incoming_event):
logger.debug(
f"Agent @mentioned in event: {context.incoming_event.event_name}, responding"
)
await self.run_agent(context=context)
else:
if self.agent_config is not None and self.agent_config.react_to_all_messages:
logger.debug(
f"No trigger found for event: {context.incoming_event.event_name} but react_to_all_messages is True, responding with default instruction"
)
await self.run_agent(context=context)
else:
logger.debug(
f"No trigger found for event: {context.incoming_event.event_name} and react_to_all_messages is False, doing nothing"
)
logger.debug(
f"No trigger for event: {context.incoming_event.event_name}, not mentioned, skipping"
)
1 change: 0 additions & 1 deletion src/openagents/agents/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -802,7 +802,6 @@ async def _async_start(

self._running = True
# Start the loop in a background task
# Start the loop in a background task
self._loop_task = asyncio.create_task(self._async_loop())
# Setup the agent
await self.setup()
Expand Down
3 changes: 3 additions & 0 deletions src/openagents/client/cli_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,13 @@
from typing import Optional

import typer
import yaml
from rich.panel import Panel
from rich.progress import BarColumn, Progress, SpinnerColumn, TaskProgressColumn, TextColumn, TimeElapsedColumn
from rich.table import Table
from rich import box

from openagents.client.cli_helpers import configure_workspace_logging
from openagents.client.cli_shared import app, console

agent_app = typer.Typer(
Expand Down
32 changes: 31 additions & 1 deletion src/openagents/lms/providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,13 +160,43 @@ async def chat_completion(
tools: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
"""Generate chat completion using Anthropic API."""
# Convert messages to Anthropic format
# Convert messages from OpenAI format to Anthropic format.
# Key differences:
# OpenAI role:"assistant" + tool_calls → Anthropic role:"assistant" + tool_use content blocks
# OpenAI role:"tool" → Anthropic role:"user" + tool_result content blocks
anthropic_messages = []
system_message = None

for msg in messages:
if msg["role"] == "system":
system_message = msg["content"]
elif msg["role"] == "assistant" and msg.get("tool_calls"):
# Convert assistant tool_calls to Anthropic tool_use content blocks
content_blocks = []
if msg.get("content"):
content_blocks.append({"type": "text", "text": msg["content"]})
for tc in msg["tool_calls"]:
func = tc.get("function", tc)
content_blocks.append({
"type": "tool_use",
"id": tc["id"],
"name": func.get("name", tc.get("name", "")),
"input": json.loads(func["arguments"]) if isinstance(func.get("arguments"), str) else func.get("arguments", {}),
})
anthropic_messages.append({"role": "assistant", "content": content_blocks})
elif msg["role"] == "tool":
# Convert tool results to Anthropic tool_result content blocks.
# Consecutive tool results are merged into a single user message.
tool_result_block = {
"type": "tool_result",
"tool_use_id": msg.get("tool_call_id", ""),
"content": str(msg.get("content", "")),
}
# Merge with previous user message if it already has tool_result blocks
if anthropic_messages and anthropic_messages[-1]["role"] == "user" and isinstance(anthropic_messages[-1]["content"], list):
anthropic_messages[-1]["content"].append(tool_result_block)
else:
anthropic_messages.append({"role": "user", "content": [tool_result_block]})
else:
anthropic_messages.append(
{"role": msg["role"], "content": msg["content"]}
Expand Down
23 changes: 13 additions & 10 deletions src/openagents/utils/mcp_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import logging
import os
from contextlib import AsyncExitStack
from typing import Any, Dict, List

try:
Expand Down Expand Up @@ -34,6 +35,7 @@ def __init__(self):
self._mcp_clients: Dict[str, Any] = {}
self._mcp_tools: List[AgentTool] = []
self._mcp_sessions: Dict[str, ClientSession] = {}
self._exit_stack = AsyncExitStack()

async def setup_mcp_clients(self, mcp_configs: List[MCPServerConfig]) -> List[AgentTool]:
"""Setup MCP clients based on configuration.
Expand Down Expand Up @@ -97,25 +99,26 @@ async def _setup_stdio_mcp_client(self, mcp_config: MCPServerConfig):
env=env
)

# Use the stdio client from the MCP library
transport = await stdio_client(server_params).__aenter__()
read_stream, write_stream = transport

# Create a session over those streams
session = ClientSession(read_stream, write_stream)
await session.__aenter__()
# Use AsyncExitStack to properly manage the nested context managers
# (stdio_client spawns background tasks that must stay alive)
read_stream, write_stream = await self._exit_stack.enter_async_context(
stdio_client(server_params)
)

session = await self._exit_stack.enter_async_context(
ClientSession(read_stream, write_stream)
)

# Initialize the session
await session.initialize()

logger.info(f"Connected to stdio MCP server '{mcp_config.name}'")

# Store the session and transport info
# Store the session info (transport managed by AsyncExitStack)
mcp_client = {
"name": mcp_config.name,
"type": "stdio",
"session": session,
"transport": transport,
"config": mcp_config
}

Expand Down Expand Up @@ -225,7 +228,7 @@ async def _add_mcp_tools(self, server_name: str, session: 'ClientSession'):
name=f"mcp_{server_name}_{tool_name}",
description=tool_description,
func=self._create_mcp_session_tool_function(server_name, tool_name),
parameters=tool_parameters
input_schema=tool_parameters
)

self._mcp_tools.append(adapter_tool)
Expand Down
Loading