forked from Y-Research-SBU/QuantAgent
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrading_graph.py
More file actions
261 lines (223 loc) · 10 KB
/
trading_graph.py
File metadata and controls
261 lines (223 loc) · 10 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
"""
TradingGraph: Orchestrates the multi-agent trading system using LangChain and LangGraph.
Initializes LLMs, toolkits, and agent nodes for indicator, pattern, and trend analysis.
"""
import os
from typing import Dict
from langchain_anthropic import ChatAnthropic
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from langchain_qwq import ChatQwen
from langgraph.prebuilt import ToolNode
from default_config import DEFAULT_CONFIG
from graph_setup import SetGraph
from graph_util import TechnicalTools
class TradingGraph:
"""
Main orchestrator for the multi-agent trading system.
Sets up LLMs, toolkits, and agent nodes for indicator, pattern, and trend analysis.
"""
def __init__(self, config=None):
# --- Configuration and LLMs ---
self.config = config if config is not None else DEFAULT_CONFIG.copy()
# Initialize LLMs with provider support
self.agent_llm = self._create_llm(
provider=self.config.get("agent_llm_provider", "openai"),
model=self.config.get("agent_llm_model", "gpt-4o-mini"),
temperature=self.config.get("agent_llm_temperature", 0.1),
)
self.graph_llm = self._create_llm(
provider=self.config.get("graph_llm_provider", "openai"),
model=self.config.get("graph_llm_model", "gpt-4o"),
temperature=self.config.get("graph_llm_temperature", 0.1),
)
self.toolkit = TechnicalTools()
# --- Create tool nodes for each agent ---
# self.tool_nodes = self._set_tool_nodes()
# --- Graph logic and setup ---
self.graph_setup = SetGraph(
self.agent_llm,
self.graph_llm,
self.toolkit,
# self.tool_nodes,
)
# --- The main LangGraph graph object ---
self.graph = self.graph_setup.set_graph()
def _get_api_key(self, provider: str = "openai") -> str:
"""
Get API key with proper validation and error handling.
Args:
provider: The provider name ("openai", "anthropic", or "qwen")
Returns:
str: The API key for the specified provider
Raises:
ValueError: If API key is missing or invalid
"""
if provider == "openai":
# First check if API key is provided in config
api_key = self.config.get("api_key")
# If not in config, check environment variable
if not api_key:
api_key = os.environ.get("OPENAI_API_KEY")
# Validate the API key
if not api_key:
raise ValueError(
"OpenAI API key not found. Please set it using one of these methods:\n"
"1. Set environment variable: export OPENAI_API_KEY='your-key-here'\n"
"2. Update the config with: config['api_key'] = 'your-key-here'\n"
"3. Use the web interface to update the API key"
)
if api_key == "your-openai-api-key-here" or api_key == "":
raise ValueError(
"Please replace the placeholder API key with your actual OpenAI API key. "
"You can get one from: https://platform.openai.com/api-keys"
)
elif provider == "anthropic":
# First check if API key is provided in config
api_key = self.config.get("anthropic_api_key")
# If not in config, check environment variable
if not api_key:
api_key = os.environ.get("ANTHROPIC_API_KEY")
# Validate the API key
if not api_key:
raise ValueError(
"Anthropic API key not found. Please set it using one of these methods:\n"
"1. Set environment variable: export ANTHROPIC_API_KEY='your-key-here'\n"
"2. Update the config with: config['anthropic_api_key'] = 'your-key-here'\n"
)
if api_key == "":
raise ValueError(
"Please provide your actual Anthropic API key. "
"You can get one from: https://console.anthropic.com/"
)
elif provider == "qwen":
# First check if API key is provided in config
api_key = self.config.get("qwen_api_key")
# If not in config, check environment variable
if not api_key:
api_key = os.environ.get("DASHSCOPE_API_KEY")
# Validate the API key
if not api_key:
raise ValueError(
"Qwen API key not found. Please set it using one of these methods:\n"
"1. Set environment variable: export DASHSCOPE_API_KEY='your-key-here'\n"
"2. Update the config with: config['qwen_api_key'] = 'your-key-here'\n"
)
if api_key == "":
raise ValueError(
"Please provide your actual Qwen API key. "
"You can get one from: https://dashscope.console.aliyun.com/"
)
else:
raise ValueError(f"Unsupported provider: {provider}. Must be 'openai', 'anthropic', or 'qwen'")
return api_key
def _create_llm(
self, provider: str, model: str, temperature: float
) -> BaseChatModel:
"""
Create an LLM instance based on the provider.
Args:
provider: The provider name ("openai", "anthropic", or "qwen")
model: The model name (e.g., "gpt-4o", "claude-3-5-sonnet-20241022", "qwen-vl-max-latest")
temperature: The temperature setting for the model
Returns:
BaseChatModel: An instance of the appropriate LLM class
"""
api_key = self._get_api_key(provider)
if provider == "openai":
return ChatOpenAI(
model=model,
temperature=temperature,
api_key=api_key,
)
elif provider == "anthropic":
# ChatAnthropic handles SystemMessage extraction automatically
# It extracts SystemMessage from the message list and passes it as 'system' parameter
# The messages array should contain at least one non-SystemMessage
return ChatAnthropic(
model=model,
temperature=temperature,
api_key=api_key,
)
elif provider == "qwen":
return ChatQwen(
model=model,
temperature=temperature,
api_key=api_key,
max_retries=4,
)
else:
raise ValueError(f"Unsupported provider: {provider}. Must be 'openai', 'anthropic', or 'qwen'")
# def _set_tool_nodes(self) -> Dict[str, ToolNode]:
# """
# Define tool nodes for each agent type (indicator, pattern, trend).
# """
# return {
# "indicator": ToolNode(
# [
# self.toolkit.compute_macd,
# self.toolkit.compute_roc,
# self.toolkit.compute_rsi,
# self.toolkit.compute_stoch,
# self.toolkit.compute_willr,
# ]
# ),
# "pattern": ToolNode(
# [
# self.toolkit.generate_kline_image,
# ]
# ),
# "trend": ToolNode([self.toolkit.generate_trend_image]),
# }
def refresh_llms(self):
"""
Refresh the LLM objects with the current API key from environment.
This is called when the API key is updated.
"""
# Recreate LLM objects with current config values
self.agent_llm = self._create_llm(
provider=self.config.get("agent_llm_provider", "openai"),
model=self.config.get("agent_llm_model", "gpt-4o-mini"),
temperature=self.config.get("agent_llm_temperature", 0.1),
)
self.graph_llm = self._create_llm(
provider=self.config.get("graph_llm_provider", "openai"),
model=self.config.get("graph_llm_model", "gpt-4o"),
temperature=self.config.get("graph_llm_temperature", 0.1),
)
# Recreate the graph setup with new LLMs
self.graph_setup = SetGraph(
self.agent_llm,
self.graph_llm,
self.toolkit,
# self.tool_nodes,
)
# Recreate the main graph
self.graph = self.graph_setup.set_graph()
def update_api_key(self, api_key: str, provider: str = "openai"):
"""
Update the API key in the config and refresh LLMs.
This method is called by the web interface when API key is updated.
Args:
api_key (str): The new API key
provider (str): The provider name ("openai" or "anthropic"), defaults to "openai"
"""
if provider == "openai":
# Update the config with the new API key
self.config["api_key"] = api_key
# Also update the environment variable for consistency
os.environ["OPENAI_API_KEY"] = api_key
elif provider == "anthropic":
# Update the config with the new API key
self.config["anthropic_api_key"] = api_key
# Also update the environment variable for consistency
os.environ["ANTHROPIC_API_KEY"] = api_key
elif provider == "qwen":
# Update the config with the new API key
self.config["qwen_api_key"] = api_key
# Also update the environment variable for consistency
os.environ["DASHSCOPE_API_KEY"] = api_key
else:
raise ValueError(f"Unsupported provider: {provider}. Must be 'openai', 'anthropic', or 'qwen'")
# Refresh the LLMs with the new API key
self.refresh_llms()