-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgraph.py
More file actions
119 lines (87 loc) · 3.83 KB
/
graph.py
File metadata and controls
119 lines (87 loc) · 3.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from pydantic import BaseModel
from langchain_ollama import ChatOllama
from langgraph.graph import START, END, StateGraph
from langgraph.types import Send
from time import time
import streamlit as st
from schemas import *
from prompts import *
from utils import *
from dotenv import load_dotenv
load_dotenv()
llm = ChatOllama(model="llama3.1:latest")
reasoning_llm = ChatOllama(model="deepseek-r1:8b")
def build_first_queries(state: ReportState):
class QueryList(BaseModel):
queries: List[str]
user_input = state.user_input
prompt = build_queries.format(user_input=user_input)
query_llm = llm.with_structured_output(QueryList)
result = query_llm.invoke(prompt)
return {"queries": result.queries}
def spawn_researchers(state: ReportState):
return [Send("single_search", query)
for query in state.queries]
def single_search(query: str):
tavily_client = TavilyClient()
results = tavily_client.search(query,
max_results=1,
include_raw_content=False)
query_results = []
for result in results["results"]:
url = result["url"]
url_extraction = tavily_client.extract(url)
if len(url_extraction["results"]) > 0:
raw_content = url_extraction["results"][0]["raw_content"]
prompt = resume_search.format(user_input=user_input,
search_results=raw_content)
llm_result = llm.invoke(prompt)
query_results += [QueryResult(title=result["title"],
url=url,
resume=llm_result.content)]
return {"queries_results": query_results}
def final_writer(state: ReportState):
search_results = ""
references = ""
for i, result in enumerate(state.queries_results):
search_results += f"[{i+1}]\n\n"
search_results += f"Title: {result.title}\n"
search_results += f"URL: {result.url}\n"
search_results += f"Content: {result.resume}\n"
search_results += f"================\n\n"
references += f"[{i+1}] - [{result.title}]({result.url})\n"
prompt = build_final_response.format(user_input=user_input,
search_results=search_results)
llm_result = reasoning_llm.invoke(prompt)
print(llm_result)
final_response = llm_result.content + "\n\n References:\n" + references
return {"final_response": final_response}
builder = StateGraph(ReportState)
builder.add_node("build_first_queries", build_first_queries)
builder.add_node("single_search", single_search)
builder.add_node("final_writer", final_writer)
builder.add_edge(START, "build_first_queries")
builder.add_conditional_edges("build_first_queries",
spawn_researchers,
["single_search"])
builder.add_edge("single_search", "final_writer")
builder.add_edge("final_writer", END)
graph = builder.compile()
if __name__ == "__main__":
st.title("🌎 Local Perplexity")
user_input = st.text_input("Qual a sua pergunta?",
value="How is the process of building a LLM?")
if st.button("Pesquisar"):
with st.status("Gerando resposta"):
for output in graph.stream({"user_input": user_input},
stream_mode="debug"
):
if output["type"] == "task_result":
st.write(f"Running {output['payload']['name']}")
st.write(output)
response = output["payload"]["result"][0][1]
think_str = response.split("</think>")[0]
final_response = response.split("</think>")[1]
with st.expander("🧠 Reflexão", expanded=False):
st.write(think_str)
st.write(final_response)