1414 - Tracks LLM inference automatically via the OpenAI integration
1515
1616Run with: uv run agentic_example.py
17- Requires: OPENAI_API_KEY environment variable. Set WILDEDGE_DSN to send events.
17+ Requires: OPENROUTER_API_KEY environment variable. Set WILDEDGE_DSN to send events.
1818"""
1919
2020import json
21+ import os
22+ import time
23+ import uuid
2124
2225from openai import OpenAI
2326
2831 integrations = "openai" ,
2932)
3033
31- openai_client = OpenAI ()
34+ openai_client = OpenAI (
35+ base_url = "https://openrouter.ai/api/v1" ,
36+ api_key = os .getenv ("OPENROUTER_API_KEY" ),
37+ )
3238
3339# --- Tools -------------------------------------------------------------------
3440
6571
6672
6773def get_weather (city : str ) -> str :
68- # Stub: replace with a real weather API call.
74+ # ~150ms to simulate a real weather API call.
75+ time .sleep (0.15 )
6976 return json .dumps ({"city" : city , "temperature_c" : 18 , "condition" : "partly cloudy" })
7077
7178
7279def calculator (expression : str ) -> str :
80+ # ~60ms to simulate a remote computation call.
81+ time .sleep (0.06 )
7382 try :
7483 result = eval (expression , {"__builtins__" : {}}) # noqa: S307
7584 return json .dumps ({"expression" : expression , "result" : result })
@@ -97,8 +106,23 @@ def call_tool(name: str, arguments: dict) -> str:
97106 return result
98107
99108
109+ def retrieve_context (query : str ) -> str :
110+ """Fetch relevant context from the vector store (~120ms)."""
111+ with we .span (
112+ kind = "retrieval" ,
113+ name = "vector_search" ,
114+ input_summary = query [:200 ],
115+ ) as span :
116+ time .sleep (0.12 )
117+ result = f"[context: background knowledge relevant to '{ query [:40 ]} ']"
118+ span .output_summary = result
119+ return result
120+
121+
100122def run_agent (task : str , step_index : int , messages : list ) -> str :
101- messages .append ({"role" : "user" , "content" : task })
123+ # Fetch context before the first reasoning step, include it in the user turn.
124+ context = retrieve_context (task )
125+ messages .append ({"role" : "user" , "content" : f"{ task } \n \n Context: { context } " })
102126
103127 while True :
104128 with we .span (
@@ -108,15 +132,16 @@ def run_agent(task: str, step_index: int, messages: list) -> str:
108132 input_summary = task [:200 ],
109133 ) as span :
110134 response = openai_client .chat .completions .create (
111- model = "gpt-4o " ,
135+ model = "qwen/qwen3.5-flash-02-23 " ,
112136 messages = messages ,
113137 tools = TOOLS ,
114138 tool_choice = "auto" ,
139+ max_tokens = 512 ,
115140 )
116141 choice = response .choices [0 ]
117142 span .output_summary = choice .finish_reason
118143
119- messages .append (choice .message )
144+ messages .append (choice .message . model_dump ( exclude_none = True ) )
120145
121146 if choice .finish_reason == "tool_calls" :
122147 step_index += 1
@@ -130,8 +155,11 @@ def run_agent(task: str, step_index: int, messages: list) -> str:
130155 "content" : result ,
131156 }
132157 )
158+ # Not instrumented: context window update between tool calls (~80ms).
159+ # Shows up as a gap stripe in the trace view.
160+ time .sleep (0.08 )
133161 else :
134- return choice .message .content
162+ return choice .message .content or ""
135163
136164
137165# --- Main --------------------------------------------------------------------
@@ -144,11 +172,10 @@ def run_agent(task: str, step_index: int, messages: list) -> str:
144172system_prompt = "You are a helpful assistant. Use tools when needed."
145173messages = [{"role" : "system" , "content" : system_prompt }]
146174
147- with we .trace (agent_id = "demo-agent" , run_id = "example-run-001" ):
175+ with we .trace (agent_id = "demo-agent" , run_id = str ( uuid . uuid4 ()) ):
148176 for i , task in enumerate (TASKS , start = 1 ):
149177 print (f"\n Task { i } : { task } " )
150178 reply = run_agent (task , step_index = i , messages = messages )
151179 print (f"Reply: { reply } " )
152180
153181we .flush ()
154- print ("\n Done. Events flushed to WildEdge." )
0 commit comments