-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapi.py
More file actions
105 lines (83 loc) · 2.92 KB
/
api.py
File metadata and controls
105 lines (83 loc) · 2.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
"""
Serves the RAG chain to the Chrome Extension via Server-Sent Events.
Run: uvicorn api:app --reload --port 8000
"""
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
import json
from src.rag import RegIQ, format_context, format_source, SYSTEM_PROMPT
from dotenv import load_dotenv
load_dotenv()
app = FastAPI(title="RegIQ", version="1.0.0")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
rag = RegIQ(model="gpt-4o-mini", top_k=6)
class Question(BaseModel):
question: str
jurisdiction: str = "bnr"
@app.get("/health")
def health():
return {"status": "ok", "ready": rag.is_ready}
@app.post("/ask")
def ask(body: Question):
result = rag.ask(body.question)
return {
"answer": result["answer"],
"sources": result["sources"],
}
@app.post("/ask/stream")
async def ask_stream(body: Question):
"""Stream answer tokens as Server-Sent Events."""
def generate():
from langchain.prompts import ChatPromptTemplate
rag._load()
sources_docs = rag._retriever.invoke(body.question)
context = format_context(sources_docs)
prompt = ChatPromptTemplate.from_messages([
("system", SYSTEM_PROMPT),
("human", "{question}"),
])
messages = prompt.format_messages(
context=context,
question=body.question,
)
# ── Stream tokens and accumulate full answer ───────────────────────
full_text = ""
for chunk in rag.llm.stream(messages):
token = chunk.content
if token:
full_text += token
yield f"data: {json.dumps({'token': token})}\n\n"
# ── Only send citation chips if the answer actually cites a directive ──
# Greetings, off-topic replies, and redirections get no chips.
has_citation = "Directive" in full_text or "Article" in full_text
sources = []
if has_citation:
seen = set()
for doc in sources_docs:
citation = format_source(doc)
if citation not in seen:
seen.add(citation)
sources.append({
"citation": citation,
"preview": (
doc.page_content[:180] + "..."
if len(doc.page_content) > 180
else doc.page_content
),
})
yield f"data: {json.dumps({'sources': sources, 'done': True})}\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"X-Accel-Buffering": "no",
},
)