Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ Séance provides a unified interface to communicate with different providers lik
## Features

- **Unified CLI**: A single command to interact with multiple LLM providers.
- **Provider Support**: Currently supports OpenAI, Google Gemini, Anthropic, and OpenRouter.
- **Provider Support**: Currently supports OpenAI, Google Gemini, Anthropic, OpenRouter, and LMStudio.
- **Simple Configuration**: Configure all your API keys in a single INI file.
- **Extensible**: Designed to be easily extendable with new providers.

Expand Down Expand Up @@ -49,6 +49,11 @@ key = ...

[openrouter]
key = ...

[lmstudio]
# No key is needed for LMStudio
endpoint = http://localhost:1234/v1/chat/completions
model = Qwen3-4B-Thinking-2507-MLX-8bit
```

If your configuration file becomes corrupted, Séance will detect it and offer to delete the file for you.
Expand Down
34 changes: 8 additions & 26 deletions src/seance/commands.nim
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import std/logging
import std/options
import std/os
import std/strutils
import std/tables
import std/terminal
import std/json

Expand Down Expand Up @@ -104,32 +103,15 @@ proc chat*(
if schema.isSome:
schemaJson = some(parseFile(schema.get))

var usedProvider: Provider
if provider.isSome:
usedProvider = provider.get()
else:
usedProvider = config.defaultProvider

var providerConf: ProviderConfig
let providerName = ($usedProvider).normalize()
var found = false
for k, v in config.providers.mpairs:
if k == providerName:
providerConf = v
found = true
break

if not found:
raise newException(ConfigError, "Provider '" & providerName & "' not found in config.")

if providerConf.key.len == 0:
raise newException(ConfigError, "API key for provider '" & providerName & "' is not set.")

let llmProvider: ChatProvider = newProvider(some(usedProvider), some(providerConf))
let llmProvider: ChatProvider = newProvider(provider)
let modelUsed = llmProvider.getFinalModel(model)
let result = llmProvider.chat(sessionObj.messages, some(modelUsed), json, schemaJson)
info "Using " & modelUsed & "\n"
echo result.content
var result: ChatResult
try:
result = llmProvider.chat(sessionObj.messages, some(modelUsed), json, schemaJson)
info "Using " & modelUsed & "\n"
echo result.content
except IOError as e:
quit(e.msg)

if sessionId.len > 0 and not noSession:
sessionObj.messages.add(ChatMessage(role: assistant, content: result.content))
Expand Down
34 changes: 21 additions & 13 deletions src/seance/config.nim
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,14 @@ proc loadConfig*(): SeanceConfig =
discard
else:
if not providersTable.hasKey(currentSection):
providersTable[currentSection] = ProviderConfig(key: "", model: none(string))
providersTable[currentSection] = ProviderConfig(key: "", model: none(string), endpoint: none(string))
case e.key
of "key":
providersTable[currentSection].key = e.value
of "model":
providersTable[currentSection].model = some(e.value)
of "endpoint":
providersTable[currentSection].endpoint = some(e.value)
else:
discard
of cfgOption:
Expand All @@ -84,7 +86,8 @@ proc loadConfig*(): SeanceConfig =
close(p)

for section, providerConfig in providersTable.pairs:
if providerConfig.key.len == 0:
let providerEnum = parseProvider(section)
if providerConfig.key.len == 0 and providerEnum != LMStudio:
raise newException(ConfigError, "API key ('key') is missing for provider [" & section & "] in " & configPath)

debug "Config loaded. Default provider: " & $defaultProvider & ", auto session: " & $autoSession
Expand Down Expand Up @@ -119,23 +122,28 @@ proc createConfigWizard(): SeanceConfig =
else:
echo "Invalid provider. Please choose from the list."

stdout.write "Now, enter your API key: "
let apiKey = stdin.readLine.strip

let providerName = providerStr
let providerEnum = parseProvider(providerName)
var apiKey = ""
var endpoint = ""

if providerEnum != LMStudio:
stdout.write "Now, enter your API key: "
apiKey = stdin.readLine.strip
else:
stdout.write "Enter the LM Studio endpoint (or press Enter for default): "
endpoint = stdin.readLine.strip

let model = DefaultModels[providerEnum]

# Create the config content
let content = """
[seance]
default_provider = $1
var content = "[seance]\ndefault_provider = $1\n\n[$1]\n" % [providerName]
if apiKey.len > 0:
content &= "key = $1\n" % [apiKey]
if endpoint.len > 0:
content &= "endpoint = $1\n" % [endpoint]
content &= "model = $1\n" % [model]

[$1]
key = $2
model = $3
""" % [providerName, apiKey, model]

try:
writeFile(configPath, content)
Expand All @@ -144,7 +152,7 @@ model = $3
raise newException(ConfigError, "Failed to write config file: " & e.msg)

var providersTable = initTable[string, ProviderConfig]()
providersTable[providerName] = ProviderConfig(key: apiKey, model: some(model))
providersTable[providerName] = ProviderConfig(key: apiKey, model: some(model), endpoint: some(endpoint))

return SeanceConfig(
providers: providersTable,
Expand Down
8 changes: 6 additions & 2 deletions src/seance/defaults.nim
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,14 @@ const
DefaultProvider* = Gemini

DefaultModels* : Table[Provider, string] = {
OpenAI: "gpt-4.1-nano-2025-04-14",
OpenAI: "gpt-5-nano",
Anthropic: "claude-3-5-haiku-20241022",
Gemini: "gemini-2.5-flash-lite-preview-06-17",
OpenRouter: "z-ai/glm-4.5-air"
OpenRouter: "z-ai/glm-4.5-air",
LMStudio: "openai/gpt-oss-20b"
}.toTable

const
DefaultLMStudioEndpoint* = "http://localhost:1234/v1/chat/completions"

# let DefaultModel* = DefaultModels[DefaultProvider]
23 changes: 15 additions & 8 deletions src/seance/providers.nim
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import config, logging, options, strutils, tables, types
from defaults import DefaultModels, DefaultProvider

from providers/common import chat, defaultHttpPostHandler
from providers/common import chat, defaultHttpPostHandler, defaultHttpGetHandler
from providers/anthropic import AnthropicProvider
from providers/gemini import GeminiProvider
from providers/openai import OpenAIProvider
from providers/openrouter import OpenRouterProvider
from providers/lmstudio import LMStudioProvider

export ChatProvider, ChatMessage, MessageRole, ChatResult, Provider, chat

Expand All @@ -21,20 +22,26 @@ proc newProvider*(provider: Option[Provider] = none(Provider), providerConf: Opt
usedProvider = provider.get(config.defaultProvider)
let providerName = ($usedProvider).normalize()
if not config.providers.hasKey(providerName):
raise newException(ConfigError, "Provider '" & providerName & "' not found in config.")
finalProviderConf = config.providers[providerName]
if usedProvider == LMStudio:
finalProviderConf = ProviderConfig(key: "", model: none(string), endpoint: none(string))
else:
raise newException(ConfigError, "Provider '" & providerName & "' not found in config.")
else:
finalProviderConf = config.providers[providerName]

if finalProviderConf.key.len == 0:
if finalProviderConf.key.len == 0 and usedProvider notin [LMStudio]:
raise newException(ConfigError, "API key for provider '" & $usedProvider & "' is not set.")

debug "Provider config: " & $finalProviderConf

case usedProvider
of Anthropic:
result = AnthropicProvider(conf: finalProviderConf, defaultModel: DefaultModels[Anthropic], postRequestHandler: defaultHttpPostHandler)
result = AnthropicProvider(conf: finalProviderConf, defaultModel: DefaultModels[Anthropic], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler)
of Gemini:
result = GeminiProvider(conf: finalProviderConf, defaultModel: DefaultModels[Gemini], postRequestHandler: defaultHttpPostHandler)
result = GeminiProvider(conf: finalProviderConf, defaultModel: DefaultModels[Gemini], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler)
of OpenAI:
result = OpenAIProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenAI], postRequestHandler: defaultHttpPostHandler)
result = OpenAIProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenAI], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler)
of OpenRouter:
result = OpenRouterProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenRouter], postRequestHandler: defaultHttpPostHandler)
result = OpenRouterProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenRouter], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler)
of LMStudio:
result = LMStudioProvider(conf: finalProviderConf, defaultModel: DefaultModels[LMStudio], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler)
5 changes: 5 additions & 0 deletions src/seance/providers/common.nim
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,11 @@ proc defaultHttpPostHandler*(url: string, body: string, headers: HttpHeaders): R
client.headers = headers
result = client.post(url, body = body)

proc defaultHttpGetHandler*(url: string): Response =
let client = newHttpClient()
defer: client.close()
result = client.get(url)

proc getFinalModel*(provider: ChatProvider, model: Option[string] = none(string)): string =
## Determines the final model to be used, respecting overrides and defaults.
return model.get(provider.conf.model.get(provider.defaultModel))
Expand Down
143 changes: 143 additions & 0 deletions src/seance/providers/lmstudio.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import ../defaults
import ../types
import common

import std/[httpclient, json, logging, options, sequtils, streams, strutils, terminal]

# --- Internal types for LMStudio API ---
type
LMStudioModel* = object
id*: string
state*: Option[string]

LMStudioModelsResponse* = object
data*: seq[LMStudioModel]

LMStudioChatRequest* = object
model*: string
messages*: seq[ChatMessage]

LMStudioProvider* = ref object of ChatProvider

proc fromLMStudio*(node: JsonNode): ChatResponse =
var choices: seq[ChatChoice] = @[]
if node.hasKey("choices"):
for choiceNode in to(node["choices"], seq[JsonNode]):
if choiceNode.hasKey("message"):
let messageNode = choiceNode["message"]
var role: MessageRole
if messageNode.hasKey("role"):
let roleStr = messageNode["role"].getStr()
case roleStr
of "system": role = system
of "user": role = user
of "assistant": role = assistant
else: role = system # Default to system if unknown
else: role = system # Default to system if role key is missing
let content = if messageNode.hasKey("content"):
messageNode["content"].getStr()
else: ""
choices.add(ChatChoice(message: ChatMessage(role: role, content: content)))
let model = if node.hasKey("model"): node["model"].getStr() else: ""
result = ChatResponse(choices: choices, model: model)

# --- Provider Implementation ---

method chat*(provider: LMStudioProvider, messages: seq[ChatMessage], model: Option[string] = none(string), jsonMode: bool = false, schema: Option[JsonNode] = none(JsonNode)): ChatResult =
## Implementation of the chat method for LMStudio using a live API call
var usedModel = provider.getFinalModel(model)
let endpoint = provider.conf.endpoint.get(DefaultLMStudioEndpoint)
let modelsUrl = endpoint.replace("/chat/completions", "/models")

try:
let modelsResponse = provider.getRequestHandler(modelsUrl)
let modelsBody = modelsResponse.body
let modelsJson = parseJson(modelsBody)
let availableModels = to(modelsJson, LMStudioModelsResponse)
var requestedModel: Option[LMStudioModel] = none(LMStudioModel)
for m in availableModels.data:
if m.id == usedModel:
requestedModel = some(m)
break

if requestedModel.isSome and requestedModel.get().state.get("not-loaded") != "loaded":
let loadedModels = availableModels.data.filter(proc(m: LMStudioModel): bool = m.state.get("not-loaded") == "loaded")
if isatty(stdin):
echo "The model '", usedModel, "' is not currently loaded."
if loadedModels.len > 0:
echo "Loaded models are: ", loadedModels.map(proc(m: LMStudioModel): string = m.id).join(", ")
stdout.write "Would you like to load '", usedModel, "' or use a loaded model? (load/use) "
let choice = stdin.readLine().strip().toLowerAscii()
if choice == "use":
if loadedModels.len == 1:
usedModel = loadedModels[0].id
else:
stdout.write "Please specify which loaded model to use: "
usedModel = stdin.readLine().strip()
else:
stdout.write "Would you like to load '", usedModel, "'? (y/N) "
let choice = stdin.readLine().strip().toLowerAscii()
if choice != "y":
quit(0)
except Exception as e:
debug "Could not parse the response from LM Studio's /v1/models endpoint. Raw error: " & e.msg

var requestHeaders = newHttpHeaders([
("Content-Type", "application/json")
])

if provider.conf.key.len > 0:
requestHeaders.add("Authorization", "Bearer " & provider.conf.key)

var processedMessages = messages
if jsonMode:
# LMStudio requires the word "json" in the prompt to use response_format
# Append a system message to ensure this requirement is met
processedMessages.add(ChatMessage(role: system, content: "Return the response in JSON format."))

var requestBody: string
if jsonMode:
let schemaNode = schema.get(%*{"type": "object"})
let request = LMStudioChatRequest(model: usedModel, messages: processedMessages)
var requestJson = %*request
requestJson["response_format"] = %*{"type": "json_object"}
requestBody = $requestJson
else:
let request = LMStudioChatRequest(model: usedModel, messages: processedMessages)
requestBody = $(%*request)

info "LMStudio Request Body: " & requestBody
debug "curl -X POST " & endpoint & " -H \"Content-Type: application/json\" -d '" & requestBody & "'"

let response = provider.postRequestHandler(endpoint, requestBody, requestHeaders)
let responseBodyContent = streams.readAll(response.bodyStream)

debug "LMStudio Response Status: " & $response.code
debug "LMStudio Response Body: " & responseBodyContent

if response.code notin {Http200, Http201}:
var errorMessage = "LMStudio API Error " & $response.code
try:
let errorJson = parseJson(responseBodyContent)
if errorJson.hasKey("error") and errorJson["error"].hasKey("message"):
errorMessage &= ": " & errorJson["error"]["message"].getStr()
else:
errorMessage &= ": " & responseBodyContent
except JsonParsingError:
errorMessage &= ": " & responseBodyContent
raise newException(IOError, errorMessage)

let apiResponse = fromLMStudio(parseJson(responseBodyContent))
if apiResponse.choices.len > 0 and apiResponse.choices[0].message.content.len > 0:
let content = apiResponse.choices[0].message.content
let model = if apiResponse.model.len > 0: apiResponse.model else: usedModel
if model != usedModel:
info "Model fallback: " & usedModel & " was requested, but " & model & " was used. This can happen if the requested model is not loaded in LMStudio."
return ChatResult(content: content, model: model)
elif apiResponse.choices.len > 0 and apiResponse.choices[0].message.content.len == 0:
let refusal = "empty content"
return ChatResult(content: "AI Refusal: " & refusal, model: usedModel)
else:
let errorMessage = "LMStudio response contained no choices or refusal."
error errorMessage
raise newException(ValueError, errorMessage)
Loading