diff --git a/README.md b/README.md index bfd070d..45c9d63 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Séance provides a unified interface to communicate with different providers lik ## Features - **Unified CLI**: A single command to interact with multiple LLM providers. -- **Provider Support**: Currently supports OpenAI, Google Gemini, Anthropic, and OpenRouter. +- **Provider Support**: Currently supports OpenAI, Google Gemini, Anthropic, OpenRouter, and LMStudio. - **Simple Configuration**: Configure all your API keys in a single INI file. - **Extensible**: Designed to be easily extendable with new providers. @@ -49,6 +49,11 @@ key = ... [openrouter] key = ... + +[lmstudio] +# No key is needed for LMStudio +endpoint = http://localhost:1234/v1/chat/completions +model = Qwen3-4B-Thinking-2507-MLX-8bit ``` If your configuration file becomes corrupted, Séance will detect it and offer to delete the file for you. diff --git a/src/seance/commands.nim b/src/seance/commands.nim index 6ab10ca..284dd24 100644 --- a/src/seance/commands.nim +++ b/src/seance/commands.nim @@ -11,7 +11,6 @@ import std/logging import std/options import std/os import std/strutils -import std/tables import std/terminal import std/json @@ -104,32 +103,15 @@ proc chat*( if schema.isSome: schemaJson = some(parseFile(schema.get)) - var usedProvider: Provider - if provider.isSome: - usedProvider = provider.get() - else: - usedProvider = config.defaultProvider - - var providerConf: ProviderConfig - let providerName = ($usedProvider).normalize() - var found = false - for k, v in config.providers.mpairs: - if k == providerName: - providerConf = v - found = true - break - - if not found: - raise newException(ConfigError, "Provider '" & providerName & "' not found in config.") - - if providerConf.key.len == 0: - raise newException(ConfigError, "API key for provider '" & providerName & "' is not set.") - - let llmProvider: ChatProvider = newProvider(some(usedProvider), some(providerConf)) + let llmProvider: ChatProvider = newProvider(provider) let modelUsed = llmProvider.getFinalModel(model) - let result = llmProvider.chat(sessionObj.messages, some(modelUsed), json, schemaJson) - info "Using " & modelUsed & "\n" - echo result.content + var result: ChatResult + try: + result = llmProvider.chat(sessionObj.messages, some(modelUsed), json, schemaJson) + info "Using " & modelUsed & "\n" + echo result.content + except IOError as e: + quit(e.msg) if sessionId.len > 0 and not noSession: sessionObj.messages.add(ChatMessage(role: assistant, content: result.content)) diff --git a/src/seance/config.nim b/src/seance/config.nim index 6529b0b..6b9b990 100644 --- a/src/seance/config.nim +++ b/src/seance/config.nim @@ -66,12 +66,14 @@ proc loadConfig*(): SeanceConfig = discard else: if not providersTable.hasKey(currentSection): - providersTable[currentSection] = ProviderConfig(key: "", model: none(string)) + providersTable[currentSection] = ProviderConfig(key: "", model: none(string), endpoint: none(string)) case e.key of "key": providersTable[currentSection].key = e.value of "model": providersTable[currentSection].model = some(e.value) + of "endpoint": + providersTable[currentSection].endpoint = some(e.value) else: discard of cfgOption: @@ -84,7 +86,8 @@ proc loadConfig*(): SeanceConfig = close(p) for section, providerConfig in providersTable.pairs: - if providerConfig.key.len == 0: + let providerEnum = parseProvider(section) + if providerConfig.key.len == 0 and providerEnum != LMStudio: raise newException(ConfigError, "API key ('key') is missing for provider [" & section & "] in " & configPath) debug "Config loaded. Default provider: " & $defaultProvider & ", auto session: " & $autoSession @@ -119,23 +122,28 @@ proc createConfigWizard(): SeanceConfig = else: echo "Invalid provider. Please choose from the list." - stdout.write "Now, enter your API key: " - let apiKey = stdin.readLine.strip - let providerName = providerStr let providerEnum = parseProvider(providerName) + var apiKey = "" + var endpoint = "" + + if providerEnum != LMStudio: + stdout.write "Now, enter your API key: " + apiKey = stdin.readLine.strip + else: + stdout.write "Enter the LM Studio endpoint (or press Enter for default): " + endpoint = stdin.readLine.strip let model = DefaultModels[providerEnum] # Create the config content - let content = """ -[seance] -default_provider = $1 + var content = "[seance]\ndefault_provider = $1\n\n[$1]\n" % [providerName] + if apiKey.len > 0: + content &= "key = $1\n" % [apiKey] + if endpoint.len > 0: + content &= "endpoint = $1\n" % [endpoint] + content &= "model = $1\n" % [model] -[$1] -key = $2 -model = $3 -""" % [providerName, apiKey, model] try: writeFile(configPath, content) @@ -144,7 +152,7 @@ model = $3 raise newException(ConfigError, "Failed to write config file: " & e.msg) var providersTable = initTable[string, ProviderConfig]() - providersTable[providerName] = ProviderConfig(key: apiKey, model: some(model)) + providersTable[providerName] = ProviderConfig(key: apiKey, model: some(model), endpoint: some(endpoint)) return SeanceConfig( providers: providersTable, diff --git a/src/seance/defaults.nim b/src/seance/defaults.nim index 3e4de10..b34fbfb 100644 --- a/src/seance/defaults.nim +++ b/src/seance/defaults.nim @@ -7,10 +7,14 @@ const DefaultProvider* = Gemini DefaultModels* : Table[Provider, string] = { - OpenAI: "gpt-4.1-nano-2025-04-14", + OpenAI: "gpt-5-nano", Anthropic: "claude-3-5-haiku-20241022", Gemini: "gemini-2.5-flash-lite-preview-06-17", - OpenRouter: "z-ai/glm-4.5-air" + OpenRouter: "z-ai/glm-4.5-air", + LMStudio: "openai/gpt-oss-20b" }.toTable +const + DefaultLMStudioEndpoint* = "http://localhost:1234/v1/chat/completions" + # let DefaultModel* = DefaultModels[DefaultProvider] \ No newline at end of file diff --git a/src/seance/providers.nim b/src/seance/providers.nim index 10eb15b..f040754 100644 --- a/src/seance/providers.nim +++ b/src/seance/providers.nim @@ -1,11 +1,12 @@ import config, logging, options, strutils, tables, types from defaults import DefaultModels, DefaultProvider -from providers/common import chat, defaultHttpPostHandler +from providers/common import chat, defaultHttpPostHandler, defaultHttpGetHandler from providers/anthropic import AnthropicProvider from providers/gemini import GeminiProvider from providers/openai import OpenAIProvider from providers/openrouter import OpenRouterProvider +from providers/lmstudio import LMStudioProvider export ChatProvider, ChatMessage, MessageRole, ChatResult, Provider, chat @@ -21,20 +22,26 @@ proc newProvider*(provider: Option[Provider] = none(Provider), providerConf: Opt usedProvider = provider.get(config.defaultProvider) let providerName = ($usedProvider).normalize() if not config.providers.hasKey(providerName): - raise newException(ConfigError, "Provider '" & providerName & "' not found in config.") - finalProviderConf = config.providers[providerName] + if usedProvider == LMStudio: + finalProviderConf = ProviderConfig(key: "", model: none(string), endpoint: none(string)) + else: + raise newException(ConfigError, "Provider '" & providerName & "' not found in config.") + else: + finalProviderConf = config.providers[providerName] - if finalProviderConf.key.len == 0: + if finalProviderConf.key.len == 0 and usedProvider notin [LMStudio]: raise newException(ConfigError, "API key for provider '" & $usedProvider & "' is not set.") debug "Provider config: " & $finalProviderConf case usedProvider of Anthropic: - result = AnthropicProvider(conf: finalProviderConf, defaultModel: DefaultModels[Anthropic], postRequestHandler: defaultHttpPostHandler) + result = AnthropicProvider(conf: finalProviderConf, defaultModel: DefaultModels[Anthropic], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler) of Gemini: - result = GeminiProvider(conf: finalProviderConf, defaultModel: DefaultModels[Gemini], postRequestHandler: defaultHttpPostHandler) + result = GeminiProvider(conf: finalProviderConf, defaultModel: DefaultModels[Gemini], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler) of OpenAI: - result = OpenAIProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenAI], postRequestHandler: defaultHttpPostHandler) + result = OpenAIProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenAI], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler) of OpenRouter: - result = OpenRouterProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenRouter], postRequestHandler: defaultHttpPostHandler) + result = OpenRouterProvider(conf: finalProviderConf, defaultModel: DefaultModels[OpenRouter], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler) + of LMStudio: + result = LMStudioProvider(conf: finalProviderConf, defaultModel: DefaultModels[LMStudio], postRequestHandler: defaultHttpPostHandler, getRequestHandler: defaultHttpGetHandler) diff --git a/src/seance/providers/common.nim b/src/seance/providers/common.nim index d589096..3090017 100644 --- a/src/seance/providers/common.nim +++ b/src/seance/providers/common.nim @@ -13,6 +13,11 @@ proc defaultHttpPostHandler*(url: string, body: string, headers: HttpHeaders): R client.headers = headers result = client.post(url, body = body) +proc defaultHttpGetHandler*(url: string): Response = + let client = newHttpClient() + defer: client.close() + result = client.get(url) + proc getFinalModel*(provider: ChatProvider, model: Option[string] = none(string)): string = ## Determines the final model to be used, respecting overrides and defaults. return model.get(provider.conf.model.get(provider.defaultModel)) diff --git a/src/seance/providers/lmstudio.nim b/src/seance/providers/lmstudio.nim new file mode 100644 index 0000000..3ba89d8 --- /dev/null +++ b/src/seance/providers/lmstudio.nim @@ -0,0 +1,143 @@ +import ../defaults +import ../types +import common + +import std/[httpclient, json, logging, options, sequtils, streams, strutils, terminal] + +# --- Internal types for LMStudio API --- +type + LMStudioModel* = object + id*: string + state*: Option[string] + + LMStudioModelsResponse* = object + data*: seq[LMStudioModel] + + LMStudioChatRequest* = object + model*: string + messages*: seq[ChatMessage] + + LMStudioProvider* = ref object of ChatProvider + +proc fromLMStudio*(node: JsonNode): ChatResponse = + var choices: seq[ChatChoice] = @[] + if node.hasKey("choices"): + for choiceNode in to(node["choices"], seq[JsonNode]): + if choiceNode.hasKey("message"): + let messageNode = choiceNode["message"] + var role: MessageRole + if messageNode.hasKey("role"): + let roleStr = messageNode["role"].getStr() + case roleStr + of "system": role = system + of "user": role = user + of "assistant": role = assistant + else: role = system # Default to system if unknown + else: role = system # Default to system if role key is missing + let content = if messageNode.hasKey("content"): + messageNode["content"].getStr() + else: "" + choices.add(ChatChoice(message: ChatMessage(role: role, content: content))) + let model = if node.hasKey("model"): node["model"].getStr() else: "" + result = ChatResponse(choices: choices, model: model) + +# --- Provider Implementation --- + +method chat*(provider: LMStudioProvider, messages: seq[ChatMessage], model: Option[string] = none(string), jsonMode: bool = false, schema: Option[JsonNode] = none(JsonNode)): ChatResult = + ## Implementation of the chat method for LMStudio using a live API call + var usedModel = provider.getFinalModel(model) + let endpoint = provider.conf.endpoint.get(DefaultLMStudioEndpoint) + let modelsUrl = endpoint.replace("/chat/completions", "/models") + + try: + let modelsResponse = provider.getRequestHandler(modelsUrl) + let modelsBody = modelsResponse.body + let modelsJson = parseJson(modelsBody) + let availableModels = to(modelsJson, LMStudioModelsResponse) + var requestedModel: Option[LMStudioModel] = none(LMStudioModel) + for m in availableModels.data: + if m.id == usedModel: + requestedModel = some(m) + break + + if requestedModel.isSome and requestedModel.get().state.get("not-loaded") != "loaded": + let loadedModels = availableModels.data.filter(proc(m: LMStudioModel): bool = m.state.get("not-loaded") == "loaded") + if isatty(stdin): + echo "The model '", usedModel, "' is not currently loaded." + if loadedModels.len > 0: + echo "Loaded models are: ", loadedModels.map(proc(m: LMStudioModel): string = m.id).join(", ") + stdout.write "Would you like to load '", usedModel, "' or use a loaded model? (load/use) " + let choice = stdin.readLine().strip().toLowerAscii() + if choice == "use": + if loadedModels.len == 1: + usedModel = loadedModels[0].id + else: + stdout.write "Please specify which loaded model to use: " + usedModel = stdin.readLine().strip() + else: + stdout.write "Would you like to load '", usedModel, "'? (y/N) " + let choice = stdin.readLine().strip().toLowerAscii() + if choice != "y": + quit(0) + except Exception as e: + debug "Could not parse the response from LM Studio's /v1/models endpoint. Raw error: " & e.msg + + var requestHeaders = newHttpHeaders([ + ("Content-Type", "application/json") + ]) + + if provider.conf.key.len > 0: + requestHeaders.add("Authorization", "Bearer " & provider.conf.key) + + var processedMessages = messages + if jsonMode: + # LMStudio requires the word "json" in the prompt to use response_format + # Append a system message to ensure this requirement is met + processedMessages.add(ChatMessage(role: system, content: "Return the response in JSON format.")) + + var requestBody: string + if jsonMode: + let schemaNode = schema.get(%*{"type": "object"}) + let request = LMStudioChatRequest(model: usedModel, messages: processedMessages) + var requestJson = %*request + requestJson["response_format"] = %*{"type": "json_object"} + requestBody = $requestJson + else: + let request = LMStudioChatRequest(model: usedModel, messages: processedMessages) + requestBody = $(%*request) + + info "LMStudio Request Body: " & requestBody + debug "curl -X POST " & endpoint & " -H \"Content-Type: application/json\" -d '" & requestBody & "'" + + let response = provider.postRequestHandler(endpoint, requestBody, requestHeaders) + let responseBodyContent = streams.readAll(response.bodyStream) + + debug "LMStudio Response Status: " & $response.code + debug "LMStudio Response Body: " & responseBodyContent + + if response.code notin {Http200, Http201}: + var errorMessage = "LMStudio API Error " & $response.code + try: + let errorJson = parseJson(responseBodyContent) + if errorJson.hasKey("error") and errorJson["error"].hasKey("message"): + errorMessage &= ": " & errorJson["error"]["message"].getStr() + else: + errorMessage &= ": " & responseBodyContent + except JsonParsingError: + errorMessage &= ": " & responseBodyContent + raise newException(IOError, errorMessage) + + let apiResponse = fromLMStudio(parseJson(responseBodyContent)) + if apiResponse.choices.len > 0 and apiResponse.choices[0].message.content.len > 0: + let content = apiResponse.choices[0].message.content + let model = if apiResponse.model.len > 0: apiResponse.model else: usedModel + if model != usedModel: + info "Model fallback: " & usedModel & " was requested, but " & model & " was used. This can happen if the requested model is not loaded in LMStudio." + return ChatResult(content: content, model: model) + elif apiResponse.choices.len > 0 and apiResponse.choices[0].message.content.len == 0: + let refusal = "empty content" + return ChatResult(content: "AI Refusal: " & refusal, model: usedModel) + else: + let errorMessage = "LMStudio response contained no choices or refusal." + error errorMessage + raise newException(ValueError, errorMessage) diff --git a/src/seance/providers/openai.nim b/src/seance/providers/openai.nim index a00d0e2..1fdbeb2 100644 --- a/src/seance/providers/openai.nim +++ b/src/seance/providers/openai.nim @@ -30,7 +30,8 @@ proc fromOpenAI*(node: JsonNode): ChatResponse = messageNode["content"].getStr() else: "" choices.add(ChatChoice(message: ChatMessage(role: role, content: content))) - result = ChatResponse(choices: choices) + let model = if node.hasKey("model"): node["model"].getStr() else: "" + result = ChatResponse(choices: choices, model: model) const ApiUrl = "https://api.openai.com/v1/chat/completions" @@ -71,14 +72,24 @@ method chat*(provider: OpenAIProvider, messages: seq[ChatMessage], model: Option debug "OpenAI Response Body: " & responseBodyContent if response.code notin {Http200, Http201}: - let errorMessage = "OpenAI API Error " & $response.code & ": " & responseBodyContent - error errorMessage + var errorMessage = "OpenAI API Error " & $response.code + try: + let errorJson = parseJson(responseBodyContent) + if errorJson.hasKey("error") and errorJson["error"].hasKey("message"): + errorMessage &= ": " & errorJson["error"]["message"].getStr() + else: + errorMessage &= ": " & responseBodyContent + except JsonParsingError: + errorMessage &= ": " & responseBodyContent raise newException(IOError, errorMessage) let apiResponse = fromOpenAI(parseJson(responseBodyContent)) if apiResponse.choices.len > 0 and apiResponse.choices[0].message.content.len > 0: let content = apiResponse.choices[0].message.content - return ChatResult(content: content, model: usedModel) + let model = if apiResponse.model.len > 0: apiResponse.model else: usedModel + if model != usedModel: + info "Model fallback: " & usedModel & " was requested, but " & model & " was used." + return ChatResult(content: content, model: model) elif apiResponse.choices.len > 0 and apiResponse.choices[0].message.content.len == 0: let refusal = "empty content" return ChatResult(content: "AI Refusal: " & refusal, model: usedModel) diff --git a/src/seance/types.nim b/src/seance/types.nim index 72b70af..eb7db9b 100644 --- a/src/seance/types.nim +++ b/src/seance/types.nim @@ -6,6 +6,7 @@ import json type HttpPostHandler* = proc(url: string, body: string, headers: HttpHeaders): Response + HttpGetHandler* = proc(url: string): Response MessageRole* = enum system, user, assistant @@ -46,16 +47,19 @@ type defaultModel*: string # Separate out to facilitate mocking postRequestHandler*: HttpPostHandler + getRequestHandler*: HttpGetHandler Provider* = enum Anthropic, Gemini, OpenAI, - OpenRouter + OpenRouter, + LMStudio ProviderConfig* = object key*: string model*: Option[string] + endpoint*: Option[string] SeanceConfig* = object providers*: Table[string, ProviderConfig] @@ -70,5 +74,6 @@ proc parseProvider*(providerName: string): Provider = of "gemini": result = Gemini of "anthropic": result = Anthropic of "openrouter": result = OpenRouter + of "lmstudio": result = LMStudio else: raise newException(ConfigError, "Unknown provider: " & providerName) diff --git a/tests/t_providers_lmstudio.nim b/tests/t_providers_lmstudio.nim new file mode 100644 index 0000000..066b023 --- /dev/null +++ b/tests/t_providers_lmstudio.nim @@ -0,0 +1,169 @@ +import seance/types +import seance/defaults +import seance/providers +import seance/config + +import std/[json, tables, options, streams, httpclient, logging, unittest, os, strutils] + +# --- Manual Mocking Setup for HTTP POST Request --- +var mockHttpResponse: Response +var capturedUrl: string +var capturedRequestBody: string +var capturedHeaders: HttpHeaders + +var mockModelsResponse: Response + +proc mockPostRequestHandler(url: string, requestBodyStr: string, headers: HttpHeaders): Response = + debug "--- Inside mockPostRequestHandler ---" + debug "Received Headers in mock: " & $headers + capturedUrl = url + capturedRequestBody = requestBodyStr + capturedHeaders = headers + return mockHttpResponse + +proc mockGetRequestHandler(url: string): Response = + debug "--- Inside mockGetRequestHandler ---" + debug "Received URL in mock: " & url + if "models" in url: + return mockModelsResponse + else: + return Response(status: "404 Not Found", bodyStream: newStringStream("")) + +# --- Test Suites --- + +suite "LMStudio Provider": + let testMessages = @[ + ChatMessage(role: system, content: "You are a test assistant."), + ChatMessage(role: user, content: "What is the capital of testing?") + ] + + setup: + mockHttpResponse = Response() + capturedUrl = "" + capturedRequestBody = "" + capturedHeaders = newHttpHeaders() + addHandler(newConsoleLogger(levelThreshold = lvlInfo)) + + teardown: + discard + + test "chat method sends correct request without auth header": + mockHttpResponse = Response( + status: "200 OK", + bodyStream: newStringStream("""{"choices": [{"message": {"role": "assistant", "content": "Local response!"}}]}""") + ) + + let conf = ProviderConfig(key: "", model: none(string), endpoint: none(string)) + let provider = newProvider(some(LMStudio), some(conf)) + provider.postRequestHandler = mockPostRequestHandler + + let result = provider.chat(testMessages, model = none(string), jsonMode = false, schema = none(JsonNode)) + + check capturedUrl == DefaultLMStudioEndpoint + check not capturedHeaders.hasKey("Authorization") + check capturedHeaders["Content-Type"] == "application/json" + + let requestJson = parseJson(capturedRequestBody) + check requestJson["model"].getStr() == DefaultModels[LMStudio] + check result.content == "Local response!" + + test "chat method sends auth header when key is provided": + mockHttpResponse = Response( + status: "200 OK", + bodyStream: newStringStream("""{"choices": [{"message": {"role": "assistant", "content": "Local response with key!"}}]}""") + ) + + let conf = ProviderConfig(key: "test-key", model: none(string), endpoint: none(string)) + let provider = newProvider(some(LMStudio), some(conf)) + provider.postRequestHandler = mockPostRequestHandler + + discard provider.chat(testMessages, model = none(string), jsonMode = false, schema = none(JsonNode)) + + check capturedHeaders.hasKey("Authorization") + check capturedHeaders["Authorization"] == "Bearer test-key" + + test "chat method uses custom endpoint from config": + mockHttpResponse = Response( + status: "200 OK", + bodyStream: newStringStream("""{"choices": [{"message": {"role": "assistant", "content": "Custom endpoint response!"}}]}""") + ) + + let customEndpoint = "http://localhost:8080/v1/chat/completions" + let conf = ProviderConfig(key: "", model: none(string), endpoint: some(customEndpoint)) + let provider = newProvider(some(LMStudio), some(conf)) + provider.postRequestHandler = mockPostRequestHandler + + discard provider.chat(testMessages, model = none(string), jsonMode = false, schema = none(JsonNode)) + + check capturedUrl == customEndpoint + + test "chat method raises IOError with parsed message on non-2xx HTTP status code": + let errorMessage = "Failed to load model" + mockHttpResponse = Response( + status: "404 Not Found", + bodyStream: newStringStream("""{"error": {"message": "$1"}}""" % errorMessage) + ) + let conf = ProviderConfig(key: "", model: none(string), endpoint: none(string)) + let provider = newProvider(some(LMStudio), some(conf)) + provider.postRequestHandler = mockPostRequestHandler + + try: + discard provider.chat(testMessages, model = none(string), jsonMode = false, schema = none(JsonNode)) + fail() + except IOError as e: + check e.msg.contains(errorMessage) + + test "chat method raises ValueError on empty choices array": + mockHttpResponse = Response( + status: "200 OK", + bodyStream: newStringStream("""{"choices": []}""") + ) + let conf = ProviderConfig(key: "", model: none(string), endpoint: none(string)) + let provider = newProvider(some(LMStudio), some(conf)) + provider.postRequestHandler = mockPostRequestHandler + + expect ValueError: + discard provider.chat(testMessages, model = none(string), jsonMode = false, schema = none(JsonNode)) + + test "newProvider uses default config when lmstudio is not in config file": + # Create a temporary config file without an [lmstudio] section + let tempConfPath = "temp_test_config.ini" + writeFile(tempConfPath, "[seance]\ndefault_provider = lmstudio") + setConfigPath(tempConfPath) + + # Initialize the provider without a specific config + let provider = newProvider(some(LMStudio)) + + # Assert that the provider was created with the default settings + check provider.defaultModel == DefaultModels[LMStudio] + check provider.conf.key == "" + check provider.conf.model.isNone() + check provider.conf.endpoint.isNone() + + # Clean up the temporary file + removeFile(tempConfPath) + setConfigPath("") # Reset config path + + test "chat method proceeds when model is not loaded in non-interactive mode": + # Mock the models endpoint to return a list of available models + mockModelsResponse = Response( + status: "200 OK", + bodyStream: newStringStream("""{"data": [{"id": "model-1", "state": "loaded"}, {"id": "model-2", "state": "not-loaded"}]}""") + ) + # Mock the chat completions endpoint + mockHttpResponse = Response( + status: "200 OK", + bodyStream: newStringStream("""{"choices": [{"message": {"role": "assistant", "content": "Fallback response!"}}], "model": "model-2"}""") + ) + + let conf = ProviderConfig(key: "", model: some("model-2"), endpoint: none(string)) + let provider = newProvider(some(LMStudio), some(conf)) + provider.postRequestHandler = mockPostRequestHandler + provider.getRequestHandler = mockGetRequestHandler + + # In a non-interactive session, it should just proceed + let result = provider.chat(testMessages, model = none(string), jsonMode = false, schema = none(JsonNode)) + + let requestJson = parseJson(capturedRequestBody) + check requestJson["model"].getStr() == "model-2" + check result.model == "model-2"