-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
222 lines (186 loc) · 7.66 KB
/
main.py
File metadata and controls
222 lines (186 loc) · 7.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
"""
main.py
-------
Entry point for the 03-research-agent pipeline.
Usage examples
--------------
# Parse all PDFs and start an interactive research Q&A session
python main.py --papers-dir data/papers --interactive
# Ask the agent a single question and exit
python main.py --papers-dir data/papers --query "What methodologies are used across these papers?"
# Generate a full gap analysis report
python main.py --papers-dir data/papers --topic "transformer models" --report
# Combine: generate a report and also run an interactive session
python main.py --papers-dir data/papers --topic "NLP" --report --interactive
# Save the report to a specific file
python main.py --papers-dir data/papers --topic "BERT fine-tuning" --report --output reports/bert.md
"""
import argparse
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
# ---------------------------------------------------------------------------
# Load environment variables from .env before any other imports that might
# need OPENAI_API_KEY (e.g., langchain_openai)
# ---------------------------------------------------------------------------
load_dotenv()
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog="main.py",
description="AI Research Agent — analyse a collection of research PDFs.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
"--papers-dir",
default=os.getenv("PAPERS_DIR", "data/papers"),
metavar="DIR",
help="Directory containing *.pdf files (default: data/papers)",
)
parser.add_argument(
"--topic",
default="Research Analysis",
metavar="TOPIC",
help="Research topic label used in the report title (default: 'Research Analysis')",
)
parser.add_argument(
"--model",
default=os.getenv("OPENAI_MODEL", "gpt-4"),
metavar="MODEL",
help="OpenAI model name (default: gpt-4)",
)
parser.add_argument(
"--query",
default=None,
metavar="QUESTION",
help="Ask the agent a single question and exit.",
)
parser.add_argument(
"--report",
action="store_true",
help="Run gap analysis and generate a Markdown report.",
)
parser.add_argument(
"--output",
default=None,
metavar="PATH",
help="Output file path for the Markdown report (only used with --report).",
)
parser.add_argument(
"--interactive",
action="store_true",
help="Start an interactive Q&A session with the agent.",
)
return parser
def _check_api_key() -> None:
"""Exit early with a clear error if the OpenAI key is missing."""
if not os.getenv("OPENAI_API_KEY"):
print(
"[main] ERROR: OPENAI_API_KEY environment variable is not set.\n"
" Copy .env.example to .env and add your key.",
file=sys.stderr,
)
sys.exit(1)
def main() -> None:
parser = _build_parser()
args = parser.parse_args()
_check_api_key()
# ------------------------------------------------------------------
# Lazy imports so startup is fast when there are argument errors
# ------------------------------------------------------------------
from langchain_openai import ChatOpenAI
from src.agent import create_research_agent, run_agent
from src.gap_analyzer import analyze_gaps, format_gap_analysis
from src.paper_indexer import index_papers
from src.paper_parser import parse_all_papers
from src.report_generator import generate_report
# ------------------------------------------------------------------
# Step 1: Validate papers directory
# ------------------------------------------------------------------
papers_dir = Path(args.papers_dir)
if not papers_dir.exists():
print(f"[main] ERROR: Papers directory '{papers_dir}' does not exist.", file=sys.stderr)
sys.exit(1)
pdf_count = len(list(papers_dir.glob("*.pdf")))
if pdf_count == 0:
print(
f"[main] ERROR: No PDF files found in '{papers_dir}'.\n"
" Add research papers as .pdf files and try again.",
file=sys.stderr,
)
sys.exit(1)
print(f"[main] Found {pdf_count} PDF file(s) in '{papers_dir}'.")
# ------------------------------------------------------------------
# Step 2: Initialise LLM
# ------------------------------------------------------------------
print(f"[main] Using model: {args.model}")
llm = ChatOpenAI(
model=args.model,
temperature=0, # deterministic output for research tasks
openai_api_key=os.environ["OPENAI_API_KEY"],
)
# ------------------------------------------------------------------
# Step 3: Parse all papers with LLM
# ------------------------------------------------------------------
print("\n[main] === Step 1/3: Parsing papers ===")
paper_metadata = parse_all_papers(args.papers_dir, llm)
if not paper_metadata:
print("[main] ERROR: No papers were successfully parsed.", file=sys.stderr)
sys.exit(1)
print(f"[main] Parsed {len(paper_metadata)} paper(s).")
# ------------------------------------------------------------------
# Step 4: Index papers in FAISS
# ------------------------------------------------------------------
print("\n[main] === Step 2/3: Indexing papers in FAISS ===")
vector_store = index_papers(args.papers_dir)
# ------------------------------------------------------------------
# Step 5: Create the research agent
# ------------------------------------------------------------------
print("\n[main] === Step 3/3: Building research agent ===")
agent = create_research_agent(vector_store, paper_metadata, llm)
print("[main] Agent ready.\n")
# ------------------------------------------------------------------
# Step 6a: Generate report (--report)
# ------------------------------------------------------------------
if args.report:
print("[main] Running gap analysis…")
gaps = analyze_gaps(paper_metadata, llm)
print(format_gap_analysis(gaps))
report = generate_report(
paper_metadata_list=paper_metadata,
gap_analysis=gaps,
topic=args.topic,
output_path=args.output,
)
print(f"[main] Report generated ({len(report)} characters).")
# ------------------------------------------------------------------
# Step 6b: Single query (--query)
# ------------------------------------------------------------------
if args.query:
run_agent(args.query, agent)
# ------------------------------------------------------------------
# Step 6c: Interactive session (--interactive)
# ------------------------------------------------------------------
if args.interactive:
print("\n[main] Entering interactive mode. Type 'exit' or 'quit' to stop.\n")
while True:
try:
user_input = input("You: ").strip()
except (EOFError, KeyboardInterrupt):
print("\n[main] Exiting.")
break
if not user_input:
continue
if user_input.lower() in {"exit", "quit", "q"}:
print("[main] Goodbye!")
break
run_agent(user_input, agent)
# If no action flag was given, print help
if not args.report and not args.query and not args.interactive:
parser.print_help()
print(
"\n[main] No action specified. Use --query, --report, or --interactive."
)
if __name__ == "__main__":
main()