From 6617967e36bf48e8249fc191a6d466fedb7430c8 Mon Sep 17 00:00:00 2001 From: rajackar Date: Tue, 31 Mar 2026 11:16:21 +0200 Subject: [PATCH 1/2] Added timout option for Ollama provider in .env, config, and provider implementation. --- .env.example | 2 ++ crucix.config.mjs | 1 + lib/llm/index.mjs | 2 +- lib/llm/ollama.mjs | 4 ++-- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.env.example b/.env.example index 674e882..c6d20e6 100644 --- a/.env.example +++ b/.env.example @@ -42,6 +42,8 @@ LLM_API_KEY= LLM_MODEL= # Ollama base URL (only needed if not using default http://localhost:11434) OLLAMA_BASE_URL= +# Timeout for local Ollama instances. Default is 120000 for 120 seconds. Increase if your local model needs more time to complete. +OLLAMA_TIMEOUT= # === Telegram Alerts (optional, requires LLM) === # Create a bot via @BotFather, get chat ID via @userinfobot diff --git a/crucix.config.mjs b/crucix.config.mjs index 887d760..b22294a 100644 --- a/crucix.config.mjs +++ b/crucix.config.mjs @@ -11,6 +11,7 @@ export default { apiKey: process.env.LLM_API_KEY || null, model: process.env.LLM_MODEL || null, baseUrl: process.env.OLLAMA_BASE_URL || null, + timeout: parseInt(process.env.OLLAMA_TIMEOUT || '0', 10) || null, }, telegram: { diff --git a/lib/llm/index.mjs b/lib/llm/index.mjs index 21fd64a..ca4f3c9 100644 --- a/lib/llm/index.mjs +++ b/lib/llm/index.mjs @@ -47,7 +47,7 @@ export function createLLMProvider(llmConfig) { case "mistral": return new MistralProvider({ apiKey, model }); case "ollama": - return new OllamaProvider({ model, baseUrl: llmConfig.baseUrl }); + return new OllamaProvider({ model, baseUrl: llmConfig.baseUrl, timeout: llmConfig.timeout }); case 'grok': return new GrokProvider({ apiKey, model }); default: diff --git a/lib/llm/ollama.mjs b/lib/llm/ollama.mjs index 5bb509a..bf1f7d3 100644 --- a/lib/llm/ollama.mjs +++ b/lib/llm/ollama.mjs @@ -10,10 +10,10 @@ export class OllamaProvider extends LLMProvider { this.name = 'ollama'; this.baseUrl = (config.baseUrl || 'http://localhost:11434').replace(/\/+$/, ''); this.model = config.model || 'llama3.1:8b'; + this.timeout = config.timeout || parseInt(process.env.OLLAMA_TIMEOUT || '120000', 10); } get isConfigured() { return !!this.model; } - async complete(systemPrompt, userMessage, opts = {}) { const res = await fetch(`${this.baseUrl}/v1/chat/completions`, { method: 'POST', @@ -26,7 +26,7 @@ export class OllamaProvider extends LLMProvider { { role: 'user', content: userMessage }, ], }), - signal: AbortSignal.timeout(opts.timeout || 120000), + signal: AbortSignal.timeout(opts.timeout || this.timeout), }); if (!res.ok) { From 997d5f39511a604568eac2d8022e38e106135eb6 Mon Sep 17 00:00:00 2001 From: rajackar Date: Tue, 31 Mar 2026 11:28:22 +0200 Subject: [PATCH 2/2] Added timeout explanation to README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index a05f962..7ce078a 100644 --- a/README.md +++ b/README.md @@ -417,6 +417,8 @@ All settings are in `.env` with sensible defaults: | `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral`, or `grok` | | `LLM_API_KEY` | — | API key (not needed for codex) | | `LLM_MODEL` | per-provider default | Override model selection | +| `OLLAMA_BASE_URL` | Ollama base URL (only needed if not using default http://localhost:11434) | +| `OLLAMA_TIMEOUT` | imeout for local Ollama instances. Default is 120000 for 120 seconds. | | `TELEGRAM_BOT_TOKEN` | disabled | For Telegram alerts + bot commands | | `TELEGRAM_CHAT_ID` | — | Your Telegram chat ID | | `TELEGRAM_CHANNELS` | — | Extra channel IDs to monitor (comma-separated) |