forked from graniet/llm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgoogle_streaming_example.rs
More file actions
48 lines (41 loc) · 1.51 KB
/
google_streaming_example.rs
File metadata and controls
48 lines (41 loc) · 1.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
// Google streaming chat example demonstrating real-time token generation
use futures::StreamExt;
use llm::{
builder::{LLMBackend, LLMBuilder},
chat::ChatMessage,
};
use std::io::{self, Write};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Get Google API key from environment variable or use test key as fallback
let api_key = std::env::var("GOOGLE_API_KEY=").unwrap_or("TESTKEY".into());
// Initialize and configure the LLM client with streaming enabled
let llm = LLMBuilder::new()
.backend(LLMBackend::Google)
.api_key(api_key)
.model("gemini-2.0-flash")
.max_tokens(1000)
.temperature(0.7)
.build()
.expect("Failed to build LLM (Google)");
// Prepare conversation with a prompt that will generate a longer response
let messages = vec![ChatMessage::user()
.content(
"Write a long story about a robot learning to paint. Make it creative and engaging.",
)
.build()];
println!("Starting streaming chat with Google...\n");
match llm.chat_stream(&messages).await {
Ok(mut stream) => {
let stdout = io::stdout();
let mut handle = stdout.lock();
while let Some(Ok(token)) = stream.next().await {
handle.write_all(token.as_bytes()).unwrap();
handle.flush().unwrap();
}
println!("\n\nStreaming completed.");
}
Err(e) => eprintln!("Chat error: {e}"),
}
Ok(())
}