|
| 1 | +# /// script |
| 2 | +# requires-python = ">=3.10" |
| 3 | +# dependencies = ["wildedge-sdk", "openai"] |
| 4 | +# |
| 5 | +# [tool.uv.sources] |
| 6 | +# wildedge-sdk = { path = "..", editable = true } |
| 7 | +# /// |
| 8 | +"""Agentic workflow example with tool use. |
| 9 | +
|
| 10 | +Demonstrates WildEdge tracing for a simple agent that: |
| 11 | + - Runs within a trace (one per agent session) |
| 12 | + - Wraps each reasoning step in an agent_step span |
| 13 | + - Wraps each tool call in a tool span |
| 14 | + - Tracks LLM inference automatically via the OpenAI integration |
| 15 | +
|
| 16 | +Run with: uv run agentic_example.py |
| 17 | +Requires: OPENROUTER_API_KEY environment variable. Set WILDEDGE_DSN to send events. |
| 18 | +""" |
| 19 | + |
| 20 | +import json |
| 21 | +import os |
| 22 | +import time |
| 23 | +import uuid |
| 24 | + |
| 25 | +from openai import OpenAI |
| 26 | + |
| 27 | +import wildedge |
| 28 | + |
| 29 | +we = wildedge.init( |
| 30 | + app_version="1.0.0", |
| 31 | + integrations="openai", |
| 32 | +) |
| 33 | + |
| 34 | +openai_client = OpenAI( |
| 35 | + base_url="https://openrouter.ai/api/v1", |
| 36 | + api_key=os.getenv("OPENROUTER_API_KEY"), |
| 37 | +) |
| 38 | + |
| 39 | +# --- Tools ------------------------------------------------------------------- |
| 40 | + |
| 41 | +TOOLS = [ |
| 42 | + { |
| 43 | + "type": "function", |
| 44 | + "function": { |
| 45 | + "name": "get_weather", |
| 46 | + "description": "Return current weather for a city.", |
| 47 | + "parameters": { |
| 48 | + "type": "object", |
| 49 | + "properties": { |
| 50 | + "city": {"type": "string"}, |
| 51 | + }, |
| 52 | + "required": ["city"], |
| 53 | + }, |
| 54 | + }, |
| 55 | + }, |
| 56 | + { |
| 57 | + "type": "function", |
| 58 | + "function": { |
| 59 | + "name": "calculator", |
| 60 | + "description": "Evaluate a simple arithmetic expression.", |
| 61 | + "parameters": { |
| 62 | + "type": "object", |
| 63 | + "properties": { |
| 64 | + "expression": {"type": "string"}, |
| 65 | + }, |
| 66 | + "required": ["expression"], |
| 67 | + }, |
| 68 | + }, |
| 69 | + }, |
| 70 | +] |
| 71 | + |
| 72 | + |
| 73 | +def get_weather(city: str) -> str: |
| 74 | + # ~150ms to simulate a real weather API call. |
| 75 | + time.sleep(0.15) |
| 76 | + return json.dumps({"city": city, "temperature_c": 18, "condition": "partly cloudy"}) |
| 77 | + |
| 78 | + |
| 79 | +def calculator(expression: str) -> str: |
| 80 | + # ~60ms to simulate a remote computation call. |
| 81 | + time.sleep(0.06) |
| 82 | + try: |
| 83 | + result = eval(expression, {"__builtins__": {}}) # noqa: S307 |
| 84 | + return json.dumps({"expression": expression, "result": result}) |
| 85 | + except Exception as e: |
| 86 | + return json.dumps({"error": str(e)}) |
| 87 | + |
| 88 | + |
| 89 | +TOOL_HANDLERS = { |
| 90 | + "get_weather": get_weather, |
| 91 | + "calculator": calculator, |
| 92 | +} |
| 93 | + |
| 94 | + |
| 95 | +# --- Agent loop -------------------------------------------------------------- |
| 96 | + |
| 97 | + |
| 98 | +def call_tool(name: str, arguments: dict) -> str: |
| 99 | + with we.span( |
| 100 | + kind="tool", |
| 101 | + name=name, |
| 102 | + input_summary=json.dumps(arguments)[:200], |
| 103 | + ) as span: |
| 104 | + result = TOOL_HANDLERS[name](**arguments) |
| 105 | + span.output_summary = result[:200] |
| 106 | + return result |
| 107 | + |
| 108 | + |
| 109 | +def retrieve_context(query: str) -> str: |
| 110 | + """Fetch relevant context from the vector store (~120ms).""" |
| 111 | + with we.span( |
| 112 | + kind="retrieval", |
| 113 | + name="vector_search", |
| 114 | + input_summary=query[:200], |
| 115 | + ) as span: |
| 116 | + time.sleep(0.12) |
| 117 | + result = f"[context: background knowledge relevant to '{query[:40]}']" |
| 118 | + span.output_summary = result |
| 119 | + return result |
| 120 | + |
| 121 | + |
| 122 | +def run_agent(task: str, step_index: int, messages: list) -> str: |
| 123 | + # Fetch context before the first reasoning step, include it in the user turn. |
| 124 | + context = retrieve_context(task) |
| 125 | + messages.append({"role": "user", "content": f"{task}\n\nContext: {context}"}) |
| 126 | + |
| 127 | + while True: |
| 128 | + with we.span( |
| 129 | + kind="agent_step", |
| 130 | + name="reason", |
| 131 | + step_index=step_index, |
| 132 | + input_summary=task[:200], |
| 133 | + ) as span: |
| 134 | + response = openai_client.chat.completions.create( |
| 135 | + model="qwen/qwen3.5-flash-02-23", |
| 136 | + messages=messages, |
| 137 | + tools=TOOLS, |
| 138 | + tool_choice="auto", |
| 139 | + max_tokens=512, |
| 140 | + ) |
| 141 | + choice = response.choices[0] |
| 142 | + span.output_summary = choice.finish_reason |
| 143 | + |
| 144 | + messages.append(choice.message.model_dump(exclude_none=True)) |
| 145 | + |
| 146 | + if choice.finish_reason == "tool_calls": |
| 147 | + step_index += 1 |
| 148 | + for tool_call in choice.message.tool_calls: |
| 149 | + arguments = json.loads(tool_call.function.arguments) |
| 150 | + result = call_tool(tool_call.function.name, arguments) |
| 151 | + messages.append( |
| 152 | + { |
| 153 | + "role": "tool", |
| 154 | + "tool_call_id": tool_call.id, |
| 155 | + "content": result, |
| 156 | + } |
| 157 | + ) |
| 158 | + # Not instrumented: context window update between tool calls (~80ms). |
| 159 | + # Shows up as a gap stripe in the trace view. |
| 160 | + time.sleep(0.08) |
| 161 | + else: |
| 162 | + return choice.message.content or "" |
| 163 | + |
| 164 | + |
| 165 | +# --- Main -------------------------------------------------------------------- |
| 166 | + |
| 167 | +TASKS = [ |
| 168 | + "What's the weather like in Tokyo, and what is 42 * 18?", |
| 169 | + "Is it warmer in Paris or Berlin right now?", |
| 170 | +] |
| 171 | + |
| 172 | +system_prompt = "You are a helpful assistant. Use tools when needed." |
| 173 | +messages = [{"role": "system", "content": system_prompt}] |
| 174 | + |
| 175 | +with we.trace(agent_id="demo-agent", run_id=str(uuid.uuid4())): |
| 176 | + for i, task in enumerate(TASKS, start=1): |
| 177 | + print(f"\nTask {i}: {task}") |
| 178 | + reply = run_agent(task, step_index=i, messages=messages) |
| 179 | + print(f"Reply: {reply}") |
| 180 | + |
| 181 | +we.flush() |
0 commit comments