-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
147 lines (118 loc) · 4.57 KB
/
agent.py
File metadata and controls
147 lines (118 loc) · 4.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
from abc import ABC, abstractmethod
from typing import Any, Dict
from a2a_protocol import AgentMessage, create_message
class BaseAgent(ABC):
"""Base class for all agents"""
def __init__(self, name: str, llm=None):
self.name = name
self.llm = llm
self.message_history = []
def receive_message(self, message: AgentMessage) -> AgentMessage:
"""Receive a message from another agent"""
self.message_history.append(message)
return self.process_message(message)
def send_message(self,
to_agent: str,
task: str,
data: Dict[str, Any],
message_type: str = "request"
) -> AgentMessage:
"""Create and Send a message to another agent"""
return create_message(
from_agent=self.name,
to_agent=to_agent,
message_type=message_type,
task=task,
data=data,
)
@abstractmethod
def process_message(self, message: AgentMessage) -> AgentMessage:
"""Process a message and return a response"""
pass
class ResearcherAgent(BaseAgent):
"""Agent responsible for retrieving relevant documents"""
def __init__(self, name: str, vector_store, llm=None):
super().__init__(name, llm)
self.vector_store = vector_store
def process_message(self, message: AgentMessage) -> AgentMessage:
"""Process a message and return a response"""
query = message["data"].get("query","")
# retrieve relevant documents using RAG
doc_texts = self.retrieve(query)
response_data = {
"documents": doc_texts,
"count": len(doc_texts),
"query": query,
}
return self.send_message(
to_agent=message["from_agent"], # send back to the sender
task="retrieval_complete",
data=response_data,
message_type="response",
)
def retrieve(self, query):
"""Retrieve relevant documents from the vector store"""
relevant_docs = self.vector_store.similarity_search(query, k=5)
return [doc.page_content for doc in relevant_docs]
class SummarizerAgent(BaseAgent):
"""Agent responsible for summarizing documents"""
def __init__(self, name: str, llm=None):
super().__init__(name, llm)
def process_message(self, message: AgentMessage) -> AgentMessage:
"""Process a message and return a summarized response"""
documents = message["data"].get("documents", [])
summary = self.summarize(documents)
response_data = {
"summary": summary,
"source_count": len(documents),
}
return self.send_message(
to_agent=message["from_agent"],
task="summarization_complete",
data=response_data,
message_type="response",
)
def summarize(self, documents: list[str]) -> str:
"""Summarize a list of documents"""
combined_text = "\n\n".join(documents)
prompt = f"""Summarize the following documents, focusing on the key points and main ideas.
{combined_text}
Summary:
"""
if self.llm:
response = self.llm.invoke(prompt)
return response.content
else:
return f"No LLM provided, summary of {len(documents)} documents"
class AnswerAgent(BaseAgent):
"""Agent responsible for generating final answers"""
def __init__(self, name: str, llm=None):
super().__init__(name, llm)
def process_message(self, message: AgentMessage) -> AgentMessage:
"""Process a message and return an answer"""
query = message["data"].get("query", "")
context = message["data"].get("context", "")
answer = self.generate_answer(query, context)
response_data = {
"answer": answer,
"query": query
}
return self.send_message(
to_agent=message["from_agent"],
task="answer_generation_complete",
data=response_data,
message_type="response",
)
def generate_answer(self, query: str, context: str) -> str:
"""Generating an answer"""
prompt = f"""Answer the question based on the context provided.
Context:
{context}
Question:
{query}
Answer based on the context. If the context doesn't have enough information, say so."""
if self.llm:
response = self.llm.invoke(prompt)
return response.content
else:
return f"No LLM provided, answer generation requires LLM."