@@ -50,6 +50,7 @@ class AgentResult(BaseModel):
5050# ---- Runtime cache ----
5151PRELOADED_TOOLS : List [Any ] = []
5252RUN_TRACES : List [ToolTrace ] = []
53+ DOMAIN_PROMPTS_TEXT : str = ""
5354
5455
5556def _get_env (key : str , default : Optional [str ] = None ) -> Optional [str ]:
@@ -136,19 +137,32 @@ def _runner(**kwargs):
136137
137138
138139def initialize_runtime (tool_root : Optional [str ] = None ) -> None :
139- global PRELOADED_TOOLS
140+ global PRELOADED_TOOLS , DOMAIN_PROMPTS_TEXT
140141 try :
141142 exts = discover_extensions (tool_root )
142143 except Exception :
143144 exts = []
144145 tools : List [Any ] = []
146+ domain_prompts : List [str ] = []
145147 for ext in exts :
146148 for fn in (ext .get ("tools" ) or []):
147149 try :
148150 tools .append (_wrap_callable_as_tool (fn , ext .get ("name" , "unknown" )))
149151 except Exception :
150152 continue
153+ # Collect full domain system prompts
154+ try :
155+ name = ext .get ("name" , "" )
156+ sp = ext .get ("system_prompt" , "" )
157+ if isinstance (name , str ) and isinstance (sp , str ) and sp .strip ():
158+ domain_prompts .append (f"[Domain: { name } ]\n { sp .strip ()} " )
159+ except Exception :
160+ pass
151161 PRELOADED_TOOLS = tools
162+ try :
163+ DOMAIN_PROMPTS_TEXT = "\n \n " .join ([p for p in domain_prompts if p ])
164+ except Exception :
165+ DOMAIN_PROMPTS_TEXT = ""
152166
153167
154168def _active_models () -> Dict [str , str ]:
@@ -192,7 +206,12 @@ async def run_agent(user_prompt: str, chat_history: Optional[str] = None, memory
192206 if isinstance (llm , str ) and llm .strip () in {"low" , "med" , "high" }
193207 else get_chat_model (role = "domain" , model = _get_env ("REACT_MODEL" , "gpt-4.1" ), callbacks = [LLMRunTracer ("react" )], temperature = 0.0 )
194208 )
195- agent = create_react_agent (model , tools = tools )
209+ # Include combined domain system prompts as the agent's system prompt when available
210+ if isinstance (DOMAIN_PROMPTS_TEXT , str ) and DOMAIN_PROMPTS_TEXT .strip ():
211+ final_prompt = "Domain system prompts:\n " + DOMAIN_PROMPTS_TEXT .strip ()
212+ agent = create_react_agent (model , tools = tools , prompt = final_prompt )
213+ else :
214+ agent = create_react_agent (model , tools = tools )
196215 except Exception as e :
197216 msg = f"Error building ReAct agent: { str (e )} "
198217 return AgentResult (final = msg , results = [], timings = [], content = msg , response_time_secs = 0.0 , traces = [])
@@ -215,11 +234,11 @@ async def run_agent(user_prompt: str, chat_history: Optional[str] = None, memory
215234 import asyncio
216235 t0 = time .perf_counter ()
217236 try :
218- result = await agent .ainvoke ({"messages" : messages }, config = {"recursion_limit" : 8 , "callbacks" : [LLMRunTracer ("react" )]})
237+ result = await agent .ainvoke ({"messages" : messages }, config = {"recursion_limit" : 16 , "callbacks" : [LLMRunTracer ("react" )]})
219238 except RuntimeError :
220239 # fallback loop handling if needed
221240 loop = asyncio .get_event_loop ()
222- result = await agent .ainvoke ({"messages" : messages }, config = {"recursion_limit" : 8 , "callbacks" : [LLMRunTracer ("react" )]})
241+ result = await agent .ainvoke ({"messages" : messages }, config = {"recursion_limit" : 16 , "callbacks" : [LLMRunTracer ("react" )]})
223242 elapsed = time .perf_counter () - t0
224243
225244 # Extract final content
@@ -249,6 +268,93 @@ async def run_agent(user_prompt: str, chat_history: Optional[str] = None, memory
249268 )
250269
251270
271+ async def run_agent_stream (user_prompt : str , chat_history : Optional [str ] = None , memory : Optional [str ] = None , tool_root : Optional [str ] = None , llm : Optional [str ] = None ):
272+ """Yield incremental text chunks while the agent generates a response.
273+
274+ Fallback: if streaming is unavailable, yields the final response once.
275+ """
276+ # Discover/warm tools if not already done or if tool_root differs
277+ if not PRELOADED_TOOLS or isinstance (tool_root , str ):
278+ initialize_runtime (tool_root = tool_root )
279+ tools = PRELOADED_TOOLS or []
280+ if not tools :
281+ yield "No tools discovered. Ensure files matching *_tool.py exist under extensions/."
282+ return
283+
284+ # Build a simple ReAct agent with all tools
285+ try :
286+ from langgraph .prebuilt import create_react_agent
287+ # Use tier if provided; else fall back to env model
288+ model = (
289+ get_chat_model (role = "domain" , tier = llm , callbacks = [LLMRunTracer ("react" )], temperature = 0.0 )
290+ if isinstance (llm , str ) and llm .strip () in {"low" , "med" , "high" }
291+ else get_chat_model (role = "domain" , model = _get_env ("REACT_MODEL" , "gpt-4.1" ), callbacks = [LLMRunTracer ("react" )], temperature = 0.0 )
292+ )
293+ # Include combined domain system prompts as the agent's system prompt when available
294+ if isinstance (DOMAIN_PROMPTS_TEXT , str ) and DOMAIN_PROMPTS_TEXT .strip ():
295+ final_prompt = "Domain system prompts:\n " + DOMAIN_PROMPTS_TEXT .strip ()
296+ agent = create_react_agent (model , tools = tools , prompt = final_prompt )
297+ else :
298+ agent = create_react_agent (model , tools = tools )
299+ except Exception :
300+ # If building agent fails, just yield non-streaming result from run_agent
301+ res = await run_agent (user_prompt , chat_history = chat_history , memory = memory , tool_root = tool_root , llm = llm )
302+ yield res .final
303+ return
304+
305+ # Prepare messages
306+ from langchain_core .messages import SystemMessage , HumanMessage
307+ messages : List [Any ] = []
308+ if chat_history or memory :
309+ messages .append (SystemMessage (content = (
310+ "Conversation context to consider when responding.\n "
311+ f"Chat history:\n { chat_history or '' } \n \n "
312+ f"Memory:\n { memory or '' } "
313+ )))
314+ messages .append (HumanMessage (content = user_prompt ))
315+
316+ # Clear traces for this run
317+ del RUN_TRACES [:]
318+
319+ yielded_any = False
320+ try :
321+ # Prefer event-streaming for token deltas
322+ async for event in agent .astream_events ({"messages" : messages }, config = {"recursion_limit" : 16 , "callbacks" : [LLMRunTracer ("react" )]}, version = "v1" ):
323+ try :
324+ ev = event .get ("event" ) if isinstance (event , dict ) else getattr (event , "event" , None )
325+ if ev == "on_chat_model_stream" :
326+ data = event .get ("data" ) if isinstance (event , dict ) else getattr (event , "data" , {})
327+ chunk = (data or {}).get ("chunk" ) if isinstance (data , dict ) else getattr (data , "chunk" , None )
328+ content = getattr (chunk , "content" , None )
329+
330+ # Handle both string content (OpenAI) and list content (Anthropic)
331+ text = None
332+ if isinstance (content , str ):
333+ text = content
334+ elif isinstance (content , list ) and content :
335+ # Anthropic format: [{'text': '...', 'type': 'text', 'index': 0}]
336+ for item in content :
337+ if isinstance (item , dict ) and item .get ("type" ) == "text" :
338+ text = item .get ("text" , "" )
339+ break
340+
341+ if text :
342+ yielded_any = True
343+ yield text
344+ except Exception :
345+ # Ignore malformed events; continue streaming
346+ continue
347+ except Exception :
348+ # If streaming path fails, fall back to single-shot
349+ pass
350+
351+ if not yielded_any :
352+ # Fallback to non-streaming execution
353+ res = await run_agent (user_prompt , chat_history = chat_history , memory = memory , tool_root = tool_root , llm = llm )
354+ yield res .final
355+ return
356+
357+
252358def main (argv : Optional [List [str ]] = None ) -> int :
253359 import argparse
254360 parser = argparse .ArgumentParser (description = "Simple ReAct agent over all tools" )
0 commit comments