Skip to content

algolyzer/groq-go

Repository files navigation

Groq Go SDK

Go Reference License: MIT

The unofficial Go client library for the Groq Cloud API. This SDK provides idiomatic, high-performance Go access to Groq's LPU™ Inference Engine, enabling ultra-fast AI applications.

🚀 Features

  • Chat Completions: Standard request/response with support for all Groq models.
  • Streaming: Real-time token streaming with full usage statistics support.
  • JSON Mode: Enforce structured JSON outputs for reliable parsing.
  • Tool Calling: Native support for function calling (Agentic workflows).
  • Audio: Speech-to-Text (Whisper) and Text-to-Speech (TTS).
  • Configurable: Custom HTTP clients, timeouts, and base URLs.

📦 Installation

go get github.com/algolyzer/groq-go

⚙️ Configuration

Initialize the client with your API key. You can also configure the base URL or HTTP client if needed.

import "github.com/algolyzer/groq-go"

func main() {
// Basic initialization
client := groq.NewClient(os.Getenv("GROQ_API_KEY"))

// Advanced initialization (optional)
// client := groq.NewClient(
//     os.Getenv("GROQ_API_KEY"),
//     groq.WithBaseURL("[https://api.groq.com/openai/v1](https://api.groq.com/openai/v1)"),
//     groq.WithHTTPClient(&http.Client{Timeout: 60 * time.Second}),
// )
}

📖 Usage Examples

1. Chat Completion (Standard)

Generate a simple text response.

resp, err := client.CreateChatCompletion(context.Background(), groq.ChatCompletionRequest{
Model: "llama-3.3-70b-versatile",
Messages: []groq.ChatMessage{
{Role: groq.RoleUser, Content: "Explain quantum computing in 2 sentences."},
},
})
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.Choices[0].Message.Content)

2. Streaming (Real-time)

Stream tokens as they are generated. Includes support for Usage Statistics at the end of the stream.

stream, err := client.CreateChatCompletionStream(context.Background(), groq.ChatCompletionRequest{
Model: "llama-3.3-70b-versatile",
Messages: []groq.ChatMessage{
{Role: groq.RoleUser, Content: "Write a haiku about code."},
},
// Request token usage stats (optional)
StreamOptions: &groq.StreamOptions{IncludeUsage: true},
})
if err != nil {
log.Fatal(err)
}
defer stream.Close()

for {
chunk, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}

// Print content delta
if len(chunk.Choices) > 0 {
fmt.Print(chunk.Choices[0].Delta.Content)
}

// Check for final usage stats
if chunk.Usage != nil {
fmt.Printf("\n\n[Total Tokens: %d]\n", chunk.Usage.TotalTokens)
}
}

3. JSON Mode (Structured Output)

Force the model to output valid JSON.

resp, err := client.CreateChatCompletion(context.Background(), groq.ChatCompletionRequest{
Model: "llama-3.1-8b-instant",
Messages: []groq.ChatMessage{
{Role: groq.RoleSystem, Content: "You are a database api."},
{Role: groq.RoleUser, Content: "Return a user object for John Doe."},
},
// Enable JSON mode
Format: &groq.ResponseFormat{Type: "json_object"},
})

fmt.Println(resp.Choices[0].Message.Content)
// Output: { "name": "John Doe", "id": 12345, "role": "user" }

4. Tool Calling (Function Calling)

Define tools that the model can request to call.

// 1. Define the tool
tools := []groq.Tool{
{
Type: "function",
Function: groq.ToolFunction{
Name:        "get_weather",
Description: "Get the weather for a location",
Parameters: map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"location": map[string]interface{}{"type": "string"},
},
"required": []string{"location"},
},
},
},
}

// 2. Send request
resp, _ := client.CreateChatCompletion(context.Background(), groq.ChatCompletionRequest{
Model:      "llama-3.3-70b-versatile",
Messages:   []groq.ChatMessage{{Role: groq.RoleUser, Content: "Weather in NY?"}},
Tools:      tools,
ToolChoice: "auto",
})

// 3. Check if model wants to call a tool
msg := resp.Choices[0].Message
if len(msg.ToolCalls) > 0 {
fmt.Printf("Tool to call: %s\n", msg.ToolCalls[0].Function.Name)
fmt.Printf("Arguments: %s\n", msg.ToolCalls[0].Function.Arguments)
}

5. Audio Transcription (Whisper)

Transcribe audio files using Groq's distil-whisper models.

resp, err := client.CreateTranscription(context.Background(), groq.AudioTranscriptionRequest{
FilePath: "meeting.m4a",
Model:    "whisper-large-v3",
Language: "en",
})
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.Text)

6. Text-to-Speech (TTS)

Generate audio from text.

audioBytes, err := client.CreateSpeech(context.Background(), groq.CreateSpeechRequest{
Model: "playai-tts", // or other supported TTS models
Input: "The quick brown fox jumps over the lazy dog.",
Voice: "autumn",
})
if err != nil {
log.Fatal(err)
}

os.WriteFile("output.mp3", audioBytes, 0644)

To run the examples, navigate to the examples directory:

export GROQ_API_KEY="your_api_key_here"
go run examples/chat/main.go
go run examples/stream/main.go

📜 License

Distributed under the MIT License. See LICENSE for more information.