A lightweight, idiomatic AI SDK for Go β inspired by Vercel AI SDK.
- Simple API β
GenerateText,StreamText,Embed,EmbedMany,GenerateSpeech, andStreamSpeechcover most use cases - Provider-agnostic β swap between OpenAI, Anthropic, Google, Edge TTS, or any OpenAI-compatible endpoint
- Model discovery β
ListModelsfetches available models,Testchecks provider connectivity and model support - Tool calling β define tools with Go structs, SDK infers JSON Schema and handles multi-step execution
- MCP support β connect to MCP servers and expose remote MCP tools as Twilight AI
sdk.Toolvalues - Streaming β first-class channel-based streaming with fine-grained
StreamParttypes - Multi-step execution β automatic tool-call loop with configurable
MaxSteps - Rich message types β text, images, files, reasoning content, tool calls/results
- Embeddings β generate embeddings with
Embed/EmbedMany, supports OpenAI and Google providers - Speech synthesis β generate speech with
GenerateSpeech/StreamSpeech, supports Edge TTS with an open provider model - Approval flow β optional human-in-the-loop approval for sensitive tool calls
go get github.com/memohai/twilight-aiRequires Go 1.25+.
package main
import (
"context"
"fmt"
"log"
"github.com/memohai/twilight-ai/provider/openai/completions"
"github.com/memohai/twilight-ai/sdk"
)
func main() {
provider := completions.New(
completions.WithAPIKey("sk-..."),
)
model := provider.ChatModel("gpt-4o-mini")
text, err := sdk.GenerateText(context.Background(),
sdk.WithModel(model),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("Explain Go channels in 3 sentences."),
}),
)
if err != nil {
log.Fatal(err)
}
fmt.Println(text)
}import "github.com/memohai/twilight-ai/provider/openai/responses"
provider := responses.New(
responses.WithAPIKey("sk-..."),
)
model := provider.ChatModel("gpt-4o-mini")
text, err := sdk.GenerateText(context.Background(),
sdk.WithModel(model),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("Explain Go channels in 3 sentences."),
}),
)The Responses API is OpenAI's newer API with first-class support for reasoning models (o3, o4-mini), URL citation annotations, and a flat input format. See Providers for details.
import "github.com/memohai/twilight-ai/provider/anthropic/messages"
provider := messages.New(
messages.WithAPIKey("sk-ant-..."),
)
model := provider.ChatModel("claude-sonnet-4-20250514")
maxTokens := 1024
text, err := sdk.GenerateText(context.Background(),
sdk.WithModel(model),
sdk.WithMaxTokens(maxTokens),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("Explain Go channels in 3 sentences."),
}),
)For extended thinking (reasoning), configure the provider with WithThinking:
provider := messages.New(
messages.WithAPIKey("sk-ant-..."),
messages.WithThinking(messages.ThinkingConfig{
Type: "enabled",
BudgetTokens: 4000,
}),
)import "github.com/memohai/twilight-ai/provider/google/generativeai"
provider := generativeai.New(
generativeai.WithAPIKey("AIza..."),
)
model := provider.ChatModel("gemini-2.5-flash")
text, err := sdk.GenerateText(context.Background(),
sdk.WithModel(model),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("Explain Go channels in 3 sentences."),
}),
)sr, err := sdk.StreamText(ctx,
sdk.WithModel(model),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("Write a haiku about concurrency."),
}),
)
if err != nil {
log.Fatal(err)
}
for part := range sr.Stream {
switch p := part.(type) {
case *sdk.TextDeltaPart:
fmt.Print(p.Text)
case *sdk.ErrorPart:
log.Fatal(p.Error)
}
}Define a struct for your tool's parameters β the SDK infers the JSON Schema automatically:
type WeatherParams struct {
City string `json:"city" jsonschema:"City name"`
}
weatherTool := sdk.NewTool("get_weather", "Get current weather for a city",
func(ctx *sdk.ToolExecContext, input WeatherParams) (any, error) {
return map[string]any{"city": input.City, "temp": "22Β°C"}, nil
},
)
result, err := sdk.GenerateTextResult(ctx,
sdk.WithModel(model),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("What's the weather in Tokyo?"),
}),
sdk.WithTools([]sdk.Tool{weatherTool}),
sdk.WithMaxSteps(5),
)You can also load tools from an MCP server and use them like normal Twilight AI tools:
import (
"context"
"log"
"os/exec"
"github.com/memohai/twilight-ai/provider/openai/completions"
"github.com/memohai/twilight-ai/sdk"
"github.com/modelcontextprotocol/go-sdk/mcp"
)
// HTTP / streamable MCP
mcpClient, err := sdk.CreateMCPClient(context.Background(), &sdk.MCPClientConfig{
Type: sdk.MCPTransportHTTP, // default; may be omitted
URL: "https://example.com/mcp",
Headers: map[string]string{
"Authorization": "Bearer <token>",
},
})
if err != nil {
log.Fatal(err)
}
defer mcpClient.Close()
tools, err := mcpClient.Tools(context.Background())
if err != nil {
log.Fatal(err)
}
provider := completions.New(completions.WithAPIKey("sk-..."))
model := provider.ChatModel("gpt-4o-mini")
result, err := sdk.GenerateTextResult(context.Background(),
sdk.WithModel(model),
sdk.WithMessages([]sdk.Message{
sdk.UserMessage("Use the available MCP tools to answer this request."),
}),
sdk.WithTools(tools),
sdk.WithMaxSteps(5),
)
if err != nil {
log.Fatal(err)
}
log.Println(result.Text)For stdio, create the MCP transport yourself with the official MCP Go SDK and pass it in:
transport := &mcp.CommandTransport{
Command: exec.Command("my-mcp-server"),
}
mcpClient, err := sdk.CreateMCPClient(context.Background(), &sdk.MCPClientConfig{
Transport: transport,
})Twilight AI converts mcp.Tool definitions into sdk.Tool automatically:
InputSchemais converted into*jsonschema.Schema- tool execution calls
session.CallTool(...)under the hood - MCP text content is returned as the tool output passed back into the model
Generate vector embeddings for text using OpenAI or Google:
import "github.com/memohai/twilight-ai/provider/openai/embedding"
provider := embedding.New(embedding.WithAPIKey("sk-..."))
model := provider.EmbeddingModel("text-embedding-3-small")
// Single value
vec, err := sdk.Embed(ctx, "Hello world", sdk.WithEmbeddingModel(model))
// vec is []float64
// Multiple values
result, err := sdk.EmbedMany(ctx, []string{"Hello", "World"},
sdk.WithEmbeddingModel(model),
sdk.WithDimensions(256),
)
// result.Embeddings is [][]float64
// result.Usage.Tokens reports token consumptionGoogle Gemini embeddings:
import "github.com/memohai/twilight-ai/provider/google/embedding"
provider := embedding.New(
embedding.WithAPIKey("AIza..."),
embedding.WithTaskType("RETRIEVAL_DOCUMENT"),
)
model := provider.EmbeddingModel("gemini-embedding-001")
vec, err := sdk.Embed(ctx, "Hello world", sdk.WithEmbeddingModel(model))Generate speech audio from text using Edge TTS (free, no API key required):
import "github.com/memohai/twilight-ai/provider/edge/speech"
provider := speech.New()
model := provider.SpeechModel("edge-read-aloud")
// Generate complete audio
result, err := sdk.GenerateSpeech(ctx,
sdk.WithSpeechModel(model),
sdk.WithText("Hello, world!"),
sdk.WithSpeechConfig(map[string]any{
"voice": "en-US-EmmaMultilingualNeural",
"speed": 1.0,
}),
)
// result.Audio is []byte, result.ContentType is "audio/mpeg"Stream audio chunks for low-latency playback:
sr, err := sdk.StreamSpeech(ctx,
sdk.WithSpeechModel(model),
sdk.WithText("δ½ ε₯½οΌθΏζ―ζ΅εΌθ―ι³εζγ"),
sdk.WithSpeechConfig(map[string]any{
"voice": "zh-CN-XiaoxiaoNeural",
}),
)
for chunk := range sr.Stream {
// write chunk to audio player or file
}Test connectivity and discover available models before making generation requests:
provider := completions.New(completions.WithAPIKey("sk-..."))
// Check provider connectivity
result := provider.Test(context.Background())
switch result.Status {
case sdk.ProviderStatusOK:
fmt.Println("Provider is healthy")
case sdk.ProviderStatusUnhealthy:
fmt.Println("Connected but unhealthy:", result.Message)
case sdk.ProviderStatusUnreachable:
fmt.Println("Cannot connect:", result.Message)
}
// List all available models
models, err := provider.ListModels(context.Background())
for _, m := range models {
fmt.Println(m.ID)
}
// Check if a specific model is supported
model := provider.ChatModel("gpt-4o")
testResult, err := model.Test(context.Background())
if testResult.Supported {
fmt.Println("Model is supported")
}| Document | Description |
|---|---|
| Getting Started | Installation, setup, and first request |
| Providers | Provider interface, OpenAI, Anthropic, and Google Gemini |
| Embeddings | Generate vector embeddings with OpenAI and Google |
| Speech | Speech synthesis with Edge TTS and custom providers |
| Tool Calling | Defining local tools, MCP tools, multi-step execution, approval flow |
| Streaming | Channel-based streaming and StreamPart types |
| API Reference | Complete type and function reference |
| Provider | Constructor | API | Status |
|---|---|---|---|
| OpenAI Chat Completions | completions.New() |
/chat/completions |
β Stable |
| OpenAI Responses | responses.New() |
/responses |
β Stable |
| OpenAI Codex | codex.New() |
/codex/responses |
β Stable |
| OpenAI-compatible (DeepSeek, Groq, etc.) | completions.New() + WithBaseURL |
/chat/completions |
β Stable |
| OpenRouter Responses | responses.New() + WithBaseURL |
/responses |
β Stable |
| Anthropic | messages.New() |
/messages |
β Stable |
| Google Gemini | generativeai.New() |
Generative AI API | β Stable |
| OpenAI Embeddings | embedding.New() |
/embeddings |
β Stable |
| Google Embeddings | embedding.New() |
embedContent / batchEmbedContents |
β Stable |
| Edge TTS | speech.New() |
Bing WebSocket | β Stable |