-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
238 lines (193 loc) · 9.07 KB
/
main.py
File metadata and controls
238 lines (193 loc) · 9.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
import sys
import io
if sys.stdout.encoding != 'utf-8':
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
import os
from typing import TypedDict, List
from langgraph.graph import StateGraph, START, END
from gradient_adk import entrypoint, RequestContext
from gradient import AsyncGradient
import pathlib
import re
from pathvalidate import sanitize_filename
# --- 1. THE SHARED MEMORY (The 'State') ---
# This is where the agent stores and carries data through the workflow.
class HackathonState(TypedDict):
client: AsyncGradient
raw_overview: str
hackathon_name: str
folder_name: str
requirements: List[str]
judging_criteria: List[str]
platform_accounts: List[dict]
required_keys: List[str]
gitignore_content: str
requirements_content: str
setup_commands: List[str]
scaffold_files: List[str]
project_concepts: List[str]
# --- 2. THE WORKSTATIONS (The 'Nodes') ---
# This function acts as your first workstation: The Analyst.
async def analyst_node(state: HackathonState):
print("--- NODE 1: Analyzing Hackathon Overview ---")
# Access the client from the state
client = state["client"]
# The System Prompt: Defines the "Expert Analyst" persona and specific rules
system_instructions = (
"You are a Senior Hackathon Analyst. Your goal is to extract structured setup data from a DEVPOST overview.\n"
"RULES:\n"
"1. Identify mandatory requirements (Must-Builds).\n"
"2. Extract judging criteria and their importance.\n"
"3. Identify proprietary platforms (e.g., DigitalOcean) that require account sign-ups.\n"
"4. List specific API keys or access keys mentioned.\n"
"FORMAT: Return your findings in a clear, categorized summary."
)
# Call the model (we use the 'openai-gpt-oss-120b' model provided by Gradient)
# Use the local 'client' variable
response = await client.chat.completions.create(
model="openai-gpt-oss-120b",
messages=[
{"role": "system", "content": system_instructions},
{"role": "user", "content": f"Analyze this hackathon overview:\n\n{state['raw_overview']}"}
],
)
content = response.choices[0].message.content
print(f"Analysis Complete: {content[:100]}...") # Log the first 100 chars for verification
# In a production version, we would parse this content into the lists.
# For this hackathon step, we will store the raw analysis to be used by the Scaffolder.
# Logic update: The LLM will now also identify the hackathon name.
# Clean the title more aggressively
raw_title = content.split('\n')[0].replace("#", "").split('–')[0].split('-')[0].strip()
return {
"hackathon_name": raw_title,
"requirements": [content],
"required_keys": ["LLM_API_KEY"]
}
# This function acts as your second workstation: The Scaffolder.
async def scaffolder_node(state: HackathonState):
print(f"--- NODE 2: Innovating Infrastructure for {state['hackathon_name'][:30]}... ---")
# Access the client from the state
client = state["client"]
# 1. Standardized Folder Naming
# Force lowercase, remove special characters, and limit to a concise slug
clean_name = re.sub(r'[^a-z0-9]+', '_', state['hackathon_name'].lower()).strip('_')
folder_name = "_".join(clean_name.split('_')[:4])
sandbox_path = pathlib.Path(f"./output/{folder_name}")
sandbox_path.mkdir(parents=True, exist_ok=True)
# 2. Dynamic Requirements Inference
req_prompt = (
f"Based on these requirements: {state['requirements'][0]}\n"
"Return ONLY a comma-separated list of Python library names needed. "
"Example: boto3, requests, pandas"
)
# 3. Dynamic Gitignore Inference
gi_prompt = (
f"Based on these requirements: {state['requirements'][0]}\n"
"Return ONLY a comma-separated list of file patterns to ignore in Git. "
"Example: .aws/, *.log, cdk.out/"
)
# Execution & Merging Logic
async def get_inference(prompt):
try:
resp = await client.chat.completions.create(
model="openai-gpt-oss-120b",
messages=[{"role": "user", "content": prompt}]
)
return [item.strip() for item in resp.choices[0].message.content.split(',') if item.strip()]
except Exception as e:
print(f"⚠️ Inference failed: {e}")
return []
# Get both sets of suggestions
inferred_libs = await get_inference(req_prompt)
inferred_ignores = await get_inference(gi_prompt)
# Merge Strategy: Core Standard + LLM Inferred (Deduplicated)
core_libs = ["gradient-adk", "gradient-sdk", "langgraph", "python-dotenv"]
reqs_content = "\n".join(sorted(list(set(core_libs + [l.lower() for l in inferred_libs]))))
core_ignores = [".env", ".venv/", "__pycache__/", "*.pyc", ".gradient/", ".DS_Store"]
gitignore_content = "\n".join(sorted(list(set(core_ignores + inferred_ignores))))
# 4. Physical Write to Sandbox
(sandbox_path / ".gitignore").write_text(gitignore_content, encoding="utf-8")
(sandbox_path / "requirements.txt").write_text(reqs_content, encoding="utf-8")
print(f"✅ Sandbox created at: {sandbox_path}")
return {
"gitignore_content": gitignore_content,
"requirements_content": reqs_content,
"folder_name": folder_name,
"setup_commands": [
f"cd ./output/{folder_name}",
"python -m venv .venv",
"pip install -r requirements.txt"
]
}
# This function acts as your third workstation: The Creative.
async def creative_node(state: HackathonState):
print("--- NODE 3: Brainstorming Project Concepts ---")
# Access the client from the state
client = state["client"]
# TRANSPARENCY: Verify we still have the data from Node 2
has_gitignore = "YES" if "gitignore_content" in state else "NO"
print(f"DEBUG: Creative Node received gitignore_content? {has_gitignore}")
# The Persona: Focused on your "Singles and Doubles" philosophy [cite: 11]
system_instructions = (
"You are a Creative Strategist for hackathons. Your goal is to brainstorm 2-3 project ideas.\n"
"CRITERIA:\n"
"1. Creative: Unique angles that stand out[cite: 36].\n"
"2. Simple: Prioritize 'singles and doubles' over complex 'homeruns'.\n"
"3. Valid: Must meet all extracted requirements[cite: 38].\n"
"FORMAT: Return ideas with a Title, Description, and 'Why it fits' section."
)
response = await client.chat.completions.create(
model="openai-gpt-oss-120b",
messages=[
{"role": "system", "content": system_instructions},
{"role": "user", "content": f"Requirements: {state['requirements']}\n\nBrainstorm simple concepts."}
],
)
# Instead of state.update or state["key"] = ...
# Just RETURN the new piece of data
return {
"project_concepts": [response.choices[0].message.content]
}
# --- 3. THE ENTRYPOINT (DigitalOcean Integration) ---
# This is the 'Front Door' the platform uses to run your agent.
@entrypoint
async def main(input: dict, context: RequestContext):
# In the cloud, DigitalOcean injects this.
inference_client = AsyncGradient(
model_access_key=os.environ.get("GRADIENT_MODEL_ACCESS_KEY")
)
builder = StateGraph(HackathonState)
builder.add_node("analyst", analyst_node)
builder.add_node("scaffolder", scaffolder_node)
builder.add_node("creative", creative_node)
builder.add_edge(START, "analyst")
builder.add_edge("analyst", "scaffolder")
builder.add_edge("scaffolder", "creative")
builder.add_edge("creative", END)
graph = builder.compile()
initial_input = {
"raw_overview": input.get("text", "No overview provided."),
"client": inference_client
}
result = await graph.ainvoke(initial_input)
# Fetch the dynamic name, fallback to 'hackathon' just in case
dynamic_prefix = result.get('folder_name', 'hackathon')
md_filename = f"{dynamic_prefix}_report.md"
# --- NEW: WRITE READABLE MARKDOWN REPORT ---
with open(md_filename, "w", encoding="utf-8") as f:
f.write("# 🏆 Hackathon Helper Report\n\n")
f.write("## 📋 1. Analysis & Requirements\n")
f.write(f"{result['requirements'][0]}\n\n")
f.write("## 🛠️ 2. Infrastructure Blueprint\n")
f.write("### .gitignore Content:\n```text\n")
# .get("key", "default") prevents the script from crashing if the key is missing
f.write(f"{result.get('gitignore_content', 'Check Node 2 logs for content.')}\n```\n")
f.write("### requirements.txt Content:\n```text\n")
f.write(f"{result.get('requirements_content', 'Check Node 2 logs for content.')}\n```\n")
f.write("## 🚀 3. Proposed Concepts\n")
for concept in result.get('project_concepts', []):
f.write(f"{concept}\n")
# DELETE the client from the result so it doesn't try to turn into JSON
if "client" in result:
del result["client"]
return result