forked from graniet/llm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent_builder_example.rs
More file actions
97 lines (88 loc) · 3.41 KB
/
agent_builder_example.rs
File metadata and controls
97 lines (88 loc) · 3.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
//! Port of `reactive_agent_example.rs` to the new `AgentBuilder`.
//!
//! Three agents cooperate via a shared reactive memory:
//! 1. proposer (assistant) - answers user questions.
//! 2. reviewer - judges the answer (ACCEPT / REJECT).
//! 3. resumer - summarizes the discussion when the answer is accepted.
use llm::{
agent::AgentBuilder,
builder::{LLMBackend, LLMBuilder},
chat::ChatMessage,
cond,
memory::{SharedMemory, SlidingWindowMemory},
};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let shared_memory = SharedMemory::new_reactive(SlidingWindowMemory::new(10));
let proposer = AgentBuilder::new()
.role("assistant")
.on("user", cond!(any))
.on("reviewer", cond!(contains "REJECT"))
.llm(
LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into()))
.model("gpt-3.5-turbo")
.system("You are a proposer agent. Answer user questions accurately and concisely. When you receive REJECT from a reviewer, correct your previous response.")
)
.stt(
LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into()))
.model("whisper-1")
)
.tts(
LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into()))
.model("tts-1")
.voice("alloy")
)
.memory(shared_memory.clone())
.build()?;
let _reviewer = AgentBuilder::new()
.role("reviewer")
.on("assistant", cond!(any))
.debounce(800)
.llm(
LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into()))
.model("o3")
.system("Respond with a single word: ACCEPT (if the assistant is correct) or REJECT (if wrong). No explanation..")
.validator(|resp| {
if resp == "ACCEPT" || resp == "REJECT" {
Ok(())
} else {
Err("Invalid response".to_string())
}
})
.validator_attempts(3)
)
.memory(shared_memory.clone())
.build()?;
let _resumer = AgentBuilder::new()
.role("resumer")
.on("reviewer", cond!(contains "ACCEPT"))
.llm(
LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into()))
.model("gpt-4o")
.system("You are a resumer agent. Summarize the conversation between the proposer and the reviewer."),
)
.memory(shared_memory.clone())
.build()?;
let task = ChatMessage::user()
.content("how much R in the word strawberry ?")
.build();
_ = proposer.chat(&[task]).await;
let Some(mut receiver) = shared_memory.subscribe() else {
eprintln!("No shared memory subscriber available");
return Ok(());
};
while let Ok(evt) = receiver.recv().await {
println!("{} said: {}", evt.role, evt.msg.content);
}
Ok(())
}