diff --git a/.env.example b/.env.example index 44eb01b..578b618 100644 --- a/.env.example +++ b/.env.example @@ -31,12 +31,12 @@ REFRESH_INTERVAL_MINUTES=15 # === LLM Layer (optional) === # Enables AI-enhanced trade ideas and breaking news Telegram alerts. -# Provider options: anthropic | openai | gemini | codex | openrouter | minimax +# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral LLM_PROVIDER= # Not needed for codex (uses ~/.codex/auth.json) LLM_API_KEY= # Optional override. Each provider has a sensible default: -# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 +# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | mistral: mistral-small-latest LLM_MODEL= # === Telegram Alerts (optional, requires LLM) === diff --git a/README.md b/README.md index 6bb7538..f4f7a73 100644 --- a/README.md +++ b/README.md @@ -158,10 +158,10 @@ Alerts are delivered as rich embeds with color-coded sidebars: red for FLASH, ye **Optional dependency:** The full bot requires `discord.js`. Install it with `npm install discord.js`. If it's not installed, Crucix automatically falls back to webhook-only mode. ### Optional LLM Layer -Connect any of 6 LLM providers for enhanced analysis: +Connect any of 7 LLM providers for enhanced analysis: - **AI trade ideas** — quantitative analyst producing 5-8 actionable ideas citing specific data - **Smarter alert evaluation** — LLM classifies signals into FLASH/PRIORITY/ROUTINE tiers with cross-domain correlation and confidence scoring -- Providers: Anthropic Claude, OpenAI, Google Gemini, OpenRouter (Unified API), OpenAI Codex (ChatGPT subscription), MiniMax +- Providers: Anthropic Claude, OpenAI, Google Gemini, OpenRouter (Unified API), OpenAI Codex (ChatGPT subscription), MiniMax, Mistral - Graceful fallback — when LLM is unavailable, a rule-based engine takes over alert evaluation. LLM failures never crash the sweep cycle. --- @@ -194,7 +194,7 @@ These three unlock the most valuable economic and satellite data. Each takes abo ### LLM Provider (optional, for AI-enhanced ideas) -Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax` +Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral` | Provider | Key Required | Default Model | |----------|-------------|---------------| @@ -204,6 +204,7 @@ Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrou | `openrouter` | `LLM_API_KEY` | openrouter/auto | | `codex` | None (uses `~/.codex/auth.json`) | gpt-5.3-codex | | `minimax` | `LLM_API_KEY` | MiniMax-M2.5 | +| `mistral` | `LLM_API_KEY` | mistral-small-latest | For Codex, run `npx @openai/codex login` to authenticate via your ChatGPT subscription. @@ -273,7 +274,7 @@ crucix/ │ └── jarvis.html # Self-contained Jarvis HUD │ ├── lib/ -│ ├── llm/ # LLM abstraction (5 providers, raw fetch, no SDKs) +│ ├── llm/ # LLM abstraction (7 providers, raw fetch, no SDKs) │ │ ├── provider.mjs # Base class │ │ ├── anthropic.mjs # Claude │ │ ├── openai.mjs # GPT @@ -281,6 +282,7 @@ crucix/ │ │ ├── openrouter.mjs # OpenRouter (Unified API) │ │ ├── codex.mjs # Codex (ChatGPT subscription) │ │ ├── minimax.mjs # MiniMax (M2.5, 204K context) +│ │ ├── mistral.mjs # Mistral (OpenAI-compatible, JSON mode) │ │ ├── ideas.mjs # LLM-powered trade idea generation │ │ └── index.mjs # Factory: createLLMProvider() │ ├── delta/ # Change tracking between sweeps @@ -382,7 +384,7 @@ All settings are in `.env` with sensible defaults: |----------|---------|-------------| | `PORT` | `3117` | Dashboard server port | | `REFRESH_INTERVAL_MINUTES` | `15` | Auto-refresh interval | -| `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, or `minimax` | +| `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, or `mistral` | | `LLM_API_KEY` | — | API key (not needed for codex) | | `LLM_MODEL` | per-provider default | Override model selection | | `TELEGRAM_BOT_TOKEN` | disabled | For Telegram alerts + bot commands | diff --git a/lib/llm/index.mjs b/lib/llm/index.mjs index 7320a66..b2d16ee 100644 --- a/lib/llm/index.mjs +++ b/lib/llm/index.mjs @@ -6,6 +6,7 @@ import { OpenRouterProvider } from './openrouter.mjs'; import { GeminiProvider } from './gemini.mjs'; import { CodexProvider } from './codex.mjs'; import { MiniMaxProvider } from './minimax.mjs'; +import { MistralProvider } from './mistral.mjs'; export { LLMProvider } from './provider.mjs'; export { AnthropicProvider } from './anthropic.mjs'; @@ -14,6 +15,7 @@ export { OpenRouterProvider } from './openrouter.mjs'; export { GeminiProvider } from './gemini.mjs'; export { CodexProvider } from './codex.mjs'; export { MiniMaxProvider } from './minimax.mjs'; +export { MistralProvider } from './mistral.mjs'; /** * Create an LLM provider based on config. @@ -38,6 +40,8 @@ export function createLLMProvider(llmConfig) { return new CodexProvider({ model }); case 'minimax': return new MiniMaxProvider({ apiKey, model }); + case 'mistral': + return new MistralProvider({ apiKey, model }); default: console.warn(`[LLM] Unknown provider "${provider}". LLM features disabled.`); return null; diff --git a/lib/llm/mistral.mjs b/lib/llm/mistral.mjs new file mode 100644 index 0000000..de6fefd --- /dev/null +++ b/lib/llm/mistral.mjs @@ -0,0 +1,53 @@ +// Mistral Provider — raw fetch, no SDK +// Uses Mistral's OpenAI-compatible Chat Completions API + +import { LLMProvider } from './provider.mjs'; + +export class MistralProvider extends LLMProvider { + constructor(config) { + super(config); + this.name = 'mistral'; + this.apiKey = config.apiKey; + this.model = config.model || 'mistral-small-latest'; + } + + get isConfigured() { return !!this.apiKey; } + + async complete(systemPrompt, userMessage, opts = {}) { + const res = await fetch('https://api.mistral.ai/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + model: this.model, + max_tokens: opts.maxTokens || 4096, + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: userMessage }, + ], + // Enforce JSON output so callers can parse directly without markdown stripping + response_format: { type: 'json_object' }, + }), + signal: AbortSignal.timeout(opts.timeout || 60000), + }); + + if (!res.ok) { + const err = await res.text().catch(() => ''); + throw new Error(`Mistral API ${res.status}: ${err.substring(0, 200)}`); + } + + const data = await res.json(); + const text = data.choices?.[0]?.message?.content || ''; + + return { + text, + usage: { + inputTokens: data.usage?.prompt_tokens || 0, + outputTokens: data.usage?.completion_tokens || 0, + }, + model: data.model || this.model, + }; + } +} diff --git a/package.json b/package.json index 5b90bf2..9972127 100644 --- a/package.json +++ b/package.json @@ -27,10 +27,13 @@ "npm": ">=10" }, "dependencies": { + "@mistralai/mistralai": "^2.0.0", "express": "^5.1.0" }, "optionalDependencies": { - "discord.js": "^14.25.1" }, + "discord.js": "^14.25.1" + }, "overrides": { - "undici": "^7.24.4" } + "undici": "^7.24.4" + } }