forked from graniet/llm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtool_calling_example.rs
More file actions
43 lines (38 loc) · 1.85 KB
/
tool_calling_example.rs
File metadata and controls
43 lines (38 loc) · 1.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
// Import required modules from the LLM library for OpenAI integration
use llm::{
builder::{FunctionBuilder, LLMBackend, LLMBuilder, ParamBuilder},
chat::ChatMessage, // Chat-related structures
};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Get OpenAI API key from environment variable or use test key as fallback
let api_key = std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into());
// Initialize and configure the LLM client
let llm = LLMBuilder::new()
.backend(LLMBackend::OpenAI) // Use OpenAI as the LLM provider
.api_key(api_key) // Set the API key
.model("gpt-3.5-turbo") // Use GPT-3.5 Turbo model
.max_tokens(512) // Limit response length
.temperature(0.7) // Control response randomness (0.0-1.0)
.function(
FunctionBuilder::new("weather_function")
.description("Use this tool to get the weather in a specific city")
.param(
ParamBuilder::new("url")
.type_of("string")
.description("The url to get the weather from for the city"),
)
.required(vec!["url".to_string()]),
)
.build()
.expect("Failed to build LLM");
// Prepare conversation history with example messages
let messages = vec![ChatMessage::user().content("You are a weather assistant. What is the weather in Tokyo? Use the tools that you have available").build()];
// Send chat request and handle the response
// this returns the response as a string. The tool call is also returned as a serialized string. We can deserialize if needed.
match llm.chat_with_tools(&messages, llm.tools()).await {
Ok(text) => println!("Chat response:\n{text}"),
Err(e) => eprintln!("Chat error: {e}"),
}
Ok(())
}