-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
130 lines (105 loc) · 3.82 KB
/
agent.py
File metadata and controls
130 lines (105 loc) · 3.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
"""
Copyright (c) 2025 AI Leader X (aileaderx.com). All Rights Reserved.
This software is the property of AI Leader X. Unauthorized copying, distribution,
or modification of this software, via any medium, is strictly prohibited without
prior written permission. For inquiries, visit https://aileaderx.com
"""
# -------------------------
# Imports
# -------------------------
# LangGraph core for graph-based agent flow
from typing_extensions import Literal
from langgraph.graph import StateGraph, END
# LangChain core message types
from langgraph.types import Command, interrupt
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
# LangChain tool definition decorator
from langchain_core.tools import tool
# Utility to convert LangChain tools into OpenAI-compatible schema
from langchain_core.utils.function_calling import convert_to_openai_tool
import logging
from agent_base import ModelAdapter, AgentBase
import agent_base
from graph import SystemHelper, GraphState
# -------------------------
# 1. TOOL DEFINITIONS
# -------------------------
@tool
def calculator(expression: str) -> str:
"""
A basic calculator tool that evaluates mathematical expressions.
Example input: '2 + 2', '5 * 10'
"""
print("🧮 Evaluating expression:", expression)
logging.info(f"Evaluating: {expression}")
return expression
# Register tools and convert them to OpenAI-compatible schema
tools = [calculator]
openai_tools = [
convert_to_openai_tool(tool) for tool in tools
] # Needed for models that support OpenAI-style tool calling
tool_registry = {
tool.name: tool for tool in tools
} # Mapping tool name → function reference
# -------------------------
# 2. LLM Setup (LM Studio or other local OpenAI-compatible API)
# -------------------------
llm = AgentBase(ModelAdapter.mistral())
marshall = SystemHelper(llm, tools=tool_registry)
# -------------------------
# 6. Agent Nodes
# TODO: Expand a bit more into defined objects
# -------------------------
def generator(state: GraphState) -> Command[Literal["librarian"]]:
state = marshall.invoke(state)
response = state["messages"][-1]
print("🤖 Generator response:", response.content)
return Command(
update={
"messages": [
SystemMessage(content=agent_base.WORLD_LIBRARIAN_PROMPT),
response,
]
},
goto="librarian",
)
def librarian(state: GraphState) -> Command[Literal[END]]:
ask = input("What questions would you ask to test the consistency of the world rules? ")
print(
"🤖 Librarian response:",
marshall.invoke(
{"messages": state["messages"] + [HumanMessage(content=ask)]}
)["messages"][-1].content,
)
return Command(
goto=END,
)
# -------------------------
# 7. LangGraph DAG Setup
# -------------------------
# Initialize LangGraph with shared state definition
builder = StateGraph(GraphState)
# Define graph nodes and their functions
# builder.add_node("agent", lambda x: marshall.invoke(x))
# builder.add_node("tool", lambda x: marshall.invoke_tool(x))
builder.add_node("agent", generator)
builder.add_node("librarian", librarian)
# Set the start of the graph
builder.set_entry_point("agent")
builder.add_edge("librarian", END)
# Compile graph into runnable object
graph = builder.compile()
# -------------------------
# 8. Run the Graph
# -------------------------
if __name__ == "__main__":
# Define conversation input
inputs = {
"messages": [
SystemMessage(content=agent_base.WORLD_GENERATOR_PROMPT),
HumanMessage(content="""Wheel of Time, pre-Breaking of the World era."""),
]
}
# TODO: me - Investigate ASCII throbbers
print("\n🚀 Running agents...\n")
result = graph.invoke(inputs) # Run the compiled LangGraph DAG with input state