diff --git a/.env.example b/.env.example index 61d7e03..f239b91 100644 --- a/.env.example +++ b/.env.example @@ -22,6 +22,20 @@ DATABASE_URL="file:./prisma/dev.db" # Optional: custom API base URL (proxy or local model server) # ANTHROPIC_BASE_URL= +# ── MiniMax auth ──────────────────────────────────────────────────── +# MiniMax API key (get one at https://platform.minimaxi.com) +# MINIMAX_API_KEY=eyJ... + +# Optional: custom MiniMax API base URL (defaults to https://api.minimax.io/v1) +# MINIMAX_BASE_URL= + +# ── OpenAI auth ───────────────────────────────────────────────────── +# OpenAI API key (alternative provider) +# OPENAI_API_KEY=sk-... + +# Optional: custom OpenAI API base URL +# OPENAI_BASE_URL= + # ── Access control (optional) ──────────────────────────────────────── # Set BOTH to enable HTTP Basic Auth on the entire app. diff --git a/README.md b/README.md index 14f36f9..66bed3a 100644 --- a/README.md +++ b/README.md @@ -240,7 +240,9 @@ All settings are manageable in the **Settings** page at `/settings` or via envir | Anthropic API Key | `ANTHROPIC_API_KEY` | Optional if Claude CLI is signed in — otherwise required for AI features | | API Base URL | `ANTHROPIC_BASE_URL` | Custom endpoint for proxies or local Anthropic-compatible models | | AI Model | Settings page only | Haiku 4.5 (default, fastest/cheapest), Sonnet 4.6, Opus 4.6 | -| OpenAI Key | Settings page only | Alternative provider if no Anthropic key is set | +| OpenAI Key | `OPENAI_API_KEY` | Alternative provider — GPT-4.1 Mini/Nano/Full, o4-mini, o3 | +| MiniMax Key | `MINIMAX_API_KEY` | Alternative provider — M2.7 (1M context), M2.5, M2.5-highspeed | +| MiniMax Base URL | `MINIMAX_BASE_URL` | Custom MiniMax API endpoint (default: `https://api.minimax.io/v1`) | | Database | `DATABASE_URL` | SQLite file path (default: `file:./prisma/dev.db`) | ### Custom API Endpoint @@ -354,6 +356,7 @@ For Prisma command and workflow details, see: | [SQLite](https://sqlite.org) | — | Local database — zero setup, includes FTS5 | | [Tailwind CSS](https://tailwindcss.com) | v4 | Styling | | [Anthropic SDK](https://docs.anthropic.com) | — | Vision, semantic tagging, categorization, search | +| [MiniMax](https://platform.minimaxi.com) | — | Alternative AI provider (M2.7 1M context, M2.5) | | [@xyflow/react](https://xyflow.com) | 12 | Interactive mindmap graph | | [Framer Motion](https://www.framer.com/motion/) | 12 | Animations | | [Radix UI](https://www.radix-ui.com) | — | Accessible UI primitives | diff --git a/__tests__/minimax-ai-client.test.ts b/__tests__/minimax-ai-client.test.ts new file mode 100644 index 0000000..3a8d8e6 --- /dev/null +++ b/__tests__/minimax-ai-client.test.ts @@ -0,0 +1,276 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest' +import { MiniMaxAIClient } from '@/lib/ai-client' +import type OpenAI from 'openai' + +function createMockOpenAI(responseContent: string): OpenAI { + return { + chat: { + completions: { + create: vi.fn().mockResolvedValue({ + choices: [{ message: { content: responseContent } }], + }), + }, + }, + } as unknown as OpenAI +} + +describe('MiniMaxAIClient', () => { + it('should have provider set to minimax', () => { + const mock = createMockOpenAI('hello') + const client = new MiniMaxAIClient(mock) + expect(client.provider).toBe('minimax') + }) + + it('should return text from completion', async () => { + const mock = createMockOpenAI('Hello from MiniMax') + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [{ role: 'user', content: 'hi' }], + }) + + expect(result.text).toBe('Hello from MiniMax') + }) + + it('should strip thinking tags from response', async () => { + const mock = createMockOpenAI( + 'internal reasoning here\nActual answer' + ) + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.5', + max_tokens: 100, + messages: [{ role: 'user', content: 'test' }], + }) + + expect(result.text).toBe('Actual answer') + expect(result.text).not.toContain('') + }) + + it('should strip multi-line thinking tags', async () => { + const mock = createMockOpenAI( + '\nline1\nline2\nline3\n\n\nClean output' + ) + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.5', + max_tokens: 100, + messages: [{ role: 'user', content: 'test' }], + }) + + expect(result.text).toBe('Clean output') + }) + + it('should handle empty response', async () => { + const mock = { + chat: { + completions: { + create: vi.fn().mockResolvedValue({ + choices: [{ message: { content: null } }], + }), + }, + }, + } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 10, + messages: [{ role: 'user', content: 'hi' }], + }) + + expect(result.text).toBe('') + }) + + it('should handle empty choices', async () => { + const mock = { + chat: { + completions: { + create: vi.fn().mockResolvedValue({ choices: [] }), + }, + }, + } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 10, + messages: [{ role: 'user', content: 'hi' }], + }) + + expect(result.text).toBe('') + }) + + it('should pass model and max_tokens to SDK', async () => { + const createFn = vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'ok' } }], + }) + const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 512, + messages: [{ role: 'user', content: 'test' }], + }) + + expect(createFn).toHaveBeenCalledWith( + expect.objectContaining({ + model: 'MiniMax-M2.7', + max_tokens: 512, + }) + ) + }) + + it('should convert string messages correctly', async () => { + const createFn = vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'ok' } }], + }) + const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [ + { role: 'user', content: 'hello' }, + { role: 'assistant', content: 'hi there' }, + { role: 'user', content: 'how are you?' }, + ], + }) + + const call = createFn.mock.calls[0][0] + expect(call.messages).toHaveLength(3) + expect(call.messages[0]).toEqual({ role: 'user', content: 'hello' }) + expect(call.messages[1]).toEqual({ role: 'assistant', content: 'hi there' }) + expect(call.messages[2]).toEqual({ role: 'user', content: 'how are you?' }) + }) + + it('should convert image content blocks to base64 data URLs', async () => { + const createFn = vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'I see an image' } }], + }) + const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [ + { + role: 'user', + content: [ + { type: 'text', text: 'What is this?' }, + { + type: 'image', + source: { + type: 'base64', + media_type: 'image/png', + data: 'iVBORw0KGgo=', + }, + }, + ], + }, + ], + }) + + const call = createFn.mock.calls[0][0] + const msg = call.messages[0] + expect(msg.role).toBe('user') + expect(msg.content).toHaveLength(2) + expect(msg.content[0]).toEqual({ type: 'text', text: 'What is this?' }) + expect(msg.content[1]).toEqual({ + type: 'image_url', + image_url: { url: 'data:image/png;base64,iVBORw0KGgo=' }, + }) + }) + + it('should filter non-text parts from assistant messages', async () => { + const createFn = vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'ok' } }], + }) + const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [ + { + role: 'assistant', + content: [ + { type: 'text', text: 'some text' }, + { + type: 'image', + source: { type: 'base64', media_type: 'image/png', data: 'abc' }, + }, + ], + }, + ], + }) + + const call = createFn.mock.calls[0][0] + const msg = call.messages[0] + expect(msg.role).toBe('assistant') + // Only text parts should remain for assistant + expect(msg.content.every((p: { type: string }) => p.type === 'text')).toBe(true) + }) + + it('should handle text content block with missing text', async () => { + const createFn = vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'ok' } }], + }) + const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [ + { + role: 'user', + content: [{ type: 'text' }], + }, + ], + }) + + const call = createFn.mock.calls[0][0] + expect(call.messages[0].content[0].text).toBe('') + }) + + it('should handle response with no thinking tags (pass-through)', async () => { + const mock = createMockOpenAI('Regular response without thinking') + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [{ role: 'user', content: 'test' }], + }) + + expect(result.text).toBe('Regular response without thinking') + }) + + it('should propagate SDK errors', async () => { + const mock = { + chat: { + completions: { + create: vi.fn().mockRejectedValue(new Error('API rate limit')), + }, + }, + } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + await expect( + client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 100, + messages: [{ role: 'user', content: 'test' }], + }) + ).rejects.toThrow('API rate limit') + }) +}) diff --git a/__tests__/minimax-auth.test.ts b/__tests__/minimax-auth.test.ts new file mode 100644 index 0000000..a3f68e4 --- /dev/null +++ b/__tests__/minimax-auth.test.ts @@ -0,0 +1,95 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' +import { resolveMiniMaxClient } from '@/lib/minimax-auth' + +describe('resolveMiniMaxClient', () => { + const originalEnv = process.env + + beforeEach(() => { + process.env = { ...originalEnv } + delete process.env.MINIMAX_API_KEY + delete process.env.MINIMAX_BASE_URL + }) + + afterEach(() => { + process.env = originalEnv + }) + + it('should use override key when provided', () => { + const client = resolveMiniMaxClient({ overrideKey: 'test-override-key' }) + expect(client).toBeDefined() + expect(client.baseURL).toBe('https://api.minimax.io/v1') + }) + + it('should use DB key when provided', () => { + const client = resolveMiniMaxClient({ dbKey: 'test-db-key' }) + expect(client).toBeDefined() + expect(client.baseURL).toBe('https://api.minimax.io/v1') + }) + + it('should use env var MINIMAX_API_KEY', () => { + process.env.MINIMAX_API_KEY = 'test-env-key' + const client = resolveMiniMaxClient() + expect(client).toBeDefined() + expect(client.baseURL).toBe('https://api.minimax.io/v1') + }) + + it('should prioritize overrideKey over dbKey', () => { + const client = resolveMiniMaxClient({ + overrideKey: 'override', + dbKey: 'db', + }) + expect(client).toBeDefined() + }) + + it('should prioritize dbKey over env var', () => { + process.env.MINIMAX_API_KEY = 'env-key' + const client = resolveMiniMaxClient({ dbKey: 'db-key' }) + expect(client).toBeDefined() + }) + + it('should use custom base URL from options', () => { + const client = resolveMiniMaxClient({ + overrideKey: 'key', + baseURL: 'https://custom.api.com/v1', + }) + expect(client.baseURL).toBe('https://custom.api.com/v1') + }) + + it('should use MINIMAX_BASE_URL env var', () => { + process.env.MINIMAX_API_KEY = 'key' + process.env.MINIMAX_BASE_URL = 'https://proxy.example.com/v1' + const client = resolveMiniMaxClient() + expect(client.baseURL).toBe('https://proxy.example.com/v1') + }) + + it('should throw when no key is available', () => { + expect(() => resolveMiniMaxClient()).toThrow( + 'No MiniMax API key found' + ) + }) + + it('should allow proxy without key when baseURL provided', () => { + const client = resolveMiniMaxClient({ baseURL: 'https://proxy.local' }) + expect(client).toBeDefined() + expect(client.baseURL).toBe('https://proxy.local') + }) + + it('should trim whitespace from keys', () => { + const client = resolveMiniMaxClient({ overrideKey: ' key-with-spaces ' }) + expect(client).toBeDefined() + }) + + it('should not use empty override key', () => { + process.env.MINIMAX_API_KEY = 'env-key' + const client = resolveMiniMaxClient({ overrideKey: ' ' }) + // Falls through to env key + expect(client).toBeDefined() + }) + + it('should not use empty db key', () => { + process.env.MINIMAX_API_KEY = 'env-key' + const client = resolveMiniMaxClient({ dbKey: '' }) + // Falls through to env key + expect(client).toBeDefined() + }) +}) diff --git a/__tests__/minimax-integration.test.ts b/__tests__/minimax-integration.test.ts new file mode 100644 index 0000000..2081bca --- /dev/null +++ b/__tests__/minimax-integration.test.ts @@ -0,0 +1,160 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' +import { MiniMaxAIClient } from '@/lib/ai-client' +import type OpenAI from 'openai' + +/** + * Integration tests for MiniMax as an AI provider in Siftly. + * + * These test end-to-end scenarios like categorization and search + * using the MiniMaxAIClient with mocked SDK responses. + */ + +function createMockSDK(response: string): OpenAI { + return { + chat: { + completions: { + create: vi.fn().mockResolvedValue({ + choices: [{ message: { content: response } }], + }), + }, + }, + } as unknown as OpenAI +} + +describe('MiniMax integration - categorization pipeline', () => { + it('should handle JSON categorization response', async () => { + const jsonResponse = JSON.stringify({ + results: [ + { id: '1', categories: [{ slug: 'ai-resources', confidence: 0.9 }] }, + { id: '2', categories: [{ slug: 'dev-tools', confidence: 0.85 }, { slug: 'ai-resources', confidence: 0.7 }] }, + ], + }) + const client = new MiniMaxAIClient(createMockSDK(jsonResponse)) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 4096, + messages: [ + { + role: 'user', + content: 'Categorize these bookmarks...', + }, + ], + }) + + const parsed = JSON.parse(result.text) + expect(parsed.results).toHaveLength(2) + expect(parsed.results[0].categories[0].slug).toBe('ai-resources') + expect(parsed.results[1].categories).toHaveLength(2) + }) + + it('should strip thinking tags before JSON parsing', async () => { + const response = 'Let me analyze these bookmarks...\n{"results":[{"id":"1","categories":[{"slug":"funny-memes","confidence":0.95}]}]}' + const client = new MiniMaxAIClient(createMockSDK(response)) + + const result = await client.createMessage({ + model: 'MiniMax-M2.5', + max_tokens: 4096, + messages: [{ role: 'user', content: 'Categorize...' }], + }) + + const parsed = JSON.parse(result.text) + expect(parsed.results[0].categories[0].slug).toBe('funny-memes') + }) + + it('should handle semantic tag generation response', async () => { + const tagResponse = JSON.stringify({ + results: [ + { + id: '1', + tags: ['machine-learning', 'python', 'neural-networks', 'deep-learning'], + sentiment: 'positive', + people: ['Andrej Karpathy'], + companies: ['OpenAI'], + }, + ], + }) + const client = new MiniMaxAIClient(createMockSDK(tagResponse)) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 4096, + messages: [{ role: 'user', content: 'Generate semantic tags...' }], + }) + + const parsed = JSON.parse(result.text) + expect(parsed.results[0].tags).toContain('machine-learning') + expect(parsed.results[0].people).toContain('Andrej Karpathy') + }) +}) + +describe('MiniMax integration - search reranking', () => { + it('should handle search reranking response', async () => { + const rankResponse = JSON.stringify({ + ranked: [ + { id: '3', score: 0.95, explanation: 'Directly about AI coding tools' }, + { id: '1', score: 0.72, explanation: 'Mentions coding in context' }, + ], + }) + const client = new MiniMaxAIClient(createMockSDK(rankResponse)) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 2048, + messages: [ + { + role: 'user', + content: 'Rerank these search results for query "best AI coding tools"...', + }, + ], + }) + + const parsed = JSON.parse(result.text) + expect(parsed.ranked).toHaveLength(2) + expect(parsed.ranked[0].score).toBeGreaterThan(parsed.ranked[1].score) + }) +}) + +describe('MiniMax integration - vision analysis', () => { + it('should handle image analysis with base64 content', async () => { + const visionResponse = JSON.stringify({ + tags: ['screenshot', 'code-editor', 'dark-theme', 'python', 'terminal'], + ocr_text: 'def hello_world():', + scene: 'programming workspace', + }) + const createFn = vi.fn().mockResolvedValue({ + choices: [{ message: { content: visionResponse } }], + }) + const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI + const client = new MiniMaxAIClient(mock) + + const result = await client.createMessage({ + model: 'MiniMax-M2.7', + max_tokens: 2048, + messages: [ + { + role: 'user', + content: [ + { type: 'text', text: 'Analyze this image' }, + { + type: 'image', + source: { + type: 'base64', + media_type: 'image/jpeg', + data: '/9j/4AAQSkZJRg==', + }, + }, + ], + }, + ], + }) + + const parsed = JSON.parse(result.text) + expect(parsed.tags).toContain('code-editor') + expect(parsed.ocr_text).toBe('def hello_world():') + + // Verify image was sent as data URL + const call = createFn.mock.calls[0][0] + expect(call.messages[0].content[1].image_url.url).toContain('data:image/jpeg;base64,') + }) +}) diff --git a/app/api/settings/route.ts b/app/api/settings/route.ts index f06373e..9e7f315 100644 --- a/app/api/settings/route.ts +++ b/app/api/settings/route.ts @@ -22,14 +22,22 @@ const ALLOWED_OPENAI_MODELS = [ 'o3', ] as const +const ALLOWED_MINIMAX_MODELS = [ + 'MiniMax-M2.7', + 'MiniMax-M2.5', + 'MiniMax-M2.5-highspeed', +] as const + export async function GET(): Promise { try { - const [anthropic, anthropicModel, provider, openai, openaiModel, xClientId, xClientSecret] = await Promise.all([ + const [anthropic, anthropicModel, provider, openai, openaiModel, minimax, minimaxModel, xClientId, xClientSecret] = await Promise.all([ prisma.setting.findUnique({ where: { key: 'anthropicApiKey' } }), prisma.setting.findUnique({ where: { key: 'anthropicModel' } }), prisma.setting.findUnique({ where: { key: 'aiProvider' } }), prisma.setting.findUnique({ where: { key: 'openaiApiKey' } }), prisma.setting.findUnique({ where: { key: 'openaiModel' } }), + prisma.setting.findUnique({ where: { key: 'minimaxApiKey' } }), + prisma.setting.findUnique({ where: { key: 'minimaxModel' } }), prisma.setting.findUnique({ where: { key: 'x_oauth_client_id' } }), prisma.setting.findUnique({ where: { key: 'x_oauth_client_secret' } }), ]) @@ -42,6 +50,9 @@ export async function GET(): Promise { openaiApiKey: maskKey(openai?.value ?? null), hasOpenaiKey: openai !== null, openaiModel: openaiModel?.value ?? 'gpt-4.1-mini', + minimaxApiKey: maskKey(minimax?.value ?? null), + hasMinimaxKey: minimax !== null, + minimaxModel: minimaxModel?.value ?? 'MiniMax-M2.7', xOAuthClientId: maskKey(xClientId?.value ?? null), xOAuthClientSecret: maskKey(xClientSecret?.value ?? null), hasXOAuth: !!xClientId?.value, @@ -62,6 +73,8 @@ export async function POST(request: NextRequest): Promise { provider?: string openaiApiKey?: string openaiModel?: string + minimaxApiKey?: string + minimaxModel?: string xOAuthClientId?: string xOAuthClientSecret?: string } = {} @@ -71,11 +84,11 @@ export async function POST(request: NextRequest): Promise { return NextResponse.json({ error: 'Invalid JSON body' }, { status: 400 }) } - const { anthropicApiKey, anthropicModel, provider, openaiApiKey, openaiModel } = body + const { anthropicApiKey, anthropicModel, provider, openaiApiKey, openaiModel, minimaxApiKey, minimaxModel } = body // Save provider if provided if (provider !== undefined) { - if (provider !== 'anthropic' && provider !== 'openai') { + if (provider !== 'anthropic' && provider !== 'openai' && provider !== 'minimax') { return NextResponse.json({ error: 'Invalid provider' }, { status: 400 }) } await prisma.setting.upsert({ @@ -115,6 +128,20 @@ export async function POST(request: NextRequest): Promise { return NextResponse.json({ saved: true }) } + // Save MiniMax model if provided + if (minimaxModel !== undefined) { + if (!(ALLOWED_MINIMAX_MODELS as readonly string[]).includes(minimaxModel)) { + return NextResponse.json({ error: 'Invalid MiniMax model' }, { status: 400 }) + } + await prisma.setting.upsert({ + where: { key: 'minimaxModel' }, + update: { value: minimaxModel }, + create: { key: 'minimaxModel', value: minimaxModel }, + }) + invalidateSettingsCache() + return NextResponse.json({ saved: true }) + } + // Save Anthropic key if provided if (anthropicApiKey !== undefined) { if (typeof anthropicApiKey !== 'string' || anthropicApiKey.trim() === '') { @@ -161,6 +188,29 @@ export async function POST(request: NextRequest): Promise { } } + // Save MiniMax key if provided + if (minimaxApiKey !== undefined) { + if (typeof minimaxApiKey !== 'string' || minimaxApiKey.trim() === '') { + return NextResponse.json({ error: 'Invalid minimaxApiKey value' }, { status: 400 }) + } + const trimmed = minimaxApiKey.trim() + try { + await prisma.setting.upsert({ + where: { key: 'minimaxApiKey' }, + update: { value: trimmed }, + create: { key: 'minimaxApiKey', value: trimmed }, + }) + invalidateSettingsCache() + return NextResponse.json({ saved: true }) + } catch (err) { + console.error('Settings POST (minimax) error:', err) + return NextResponse.json( + { error: `Failed to save: ${err instanceof Error ? err.message : String(err)}` }, + { status: 500 } + ) + } + } + // Save X OAuth credentials if provided const { xOAuthClientId, xOAuthClientSecret } = body const xKeys: { key: string; value: string | undefined }[] = [ @@ -198,7 +248,7 @@ export async function DELETE(request: NextRequest): Promise { return NextResponse.json({ error: 'Invalid JSON body' }, { status: 400 }) } - const allowed = ['anthropicApiKey', 'openaiApiKey', 'x_oauth_client_id', 'x_oauth_client_secret'] + const allowed = ['anthropicApiKey', 'openaiApiKey', 'minimaxApiKey', 'x_oauth_client_id', 'x_oauth_client_secret'] if (!body.key || !allowed.includes(body.key)) { return NextResponse.json({ error: 'Invalid key' }, { status: 400 }) } diff --git a/app/api/settings/test/route.ts b/app/api/settings/test/route.ts index 84c12af..e1291a6 100644 --- a/app/api/settings/test/route.ts +++ b/app/api/settings/test/route.ts @@ -2,6 +2,7 @@ import { NextRequest, NextResponse } from 'next/server' import prisma from '@/lib/db' import { resolveAnthropicClient, getCliAuthStatus } from '@/lib/claude-cli-auth' import { resolveOpenAIClient } from '@/lib/openai-auth' +import { resolveMiniMaxClient } from '@/lib/minimax-auth' export async function POST(request: NextRequest): Promise { let body: { provider?: string } = {} @@ -76,5 +77,34 @@ export async function POST(request: NextRequest): Promise { } } + if (provider === 'minimax') { + const setting = await prisma.setting.findUnique({ where: { key: 'minimaxApiKey' } }) + const dbKey = setting?.value?.trim() + + let client + try { + client = resolveMiniMaxClient({ dbKey }) + } catch { + return NextResponse.json({ working: false, error: 'No MiniMax API key found. Add one in Settings or set MINIMAX_API_KEY.' }) + } + + try { + await client.chat.completions.create({ + model: 'MiniMax-M2.7', + max_tokens: 5, + messages: [{ role: 'user', content: 'hi' }], + }) + return NextResponse.json({ working: true }) + } catch (err) { + const msg = err instanceof Error ? err.message : String(err) + const friendly = msg.includes('401') || msg.includes('invalid_api_key') + ? 'Invalid API key' + : msg.includes('403') + ? 'Key does not have permission' + : msg.slice(0, 120) + return NextResponse.json({ working: false, error: friendly }) + } + } + return NextResponse.json({ error: 'Unknown provider' }, { status: 400 }) } diff --git a/app/settings/page.tsx b/app/settings/page.tsx index f022624..46a19ed 100644 --- a/app/settings/page.tsx +++ b/app/settings/page.tsx @@ -36,6 +36,12 @@ const OPENAI_MODELS = [ { value: 'o3', label: 'o3', description: 'Reasoning' }, ] +const MINIMAX_MODELS = [ + { value: 'MiniMax-M2.7', label: 'M2.7', description: '1M Context, Latest' }, + { value: 'MiniMax-M2.5', label: 'M2.5', description: '204K Context' }, + { value: 'MiniMax-M2.5-highspeed', label: 'M2.5 Highspeed', description: '204K, Fastest' }, +] + interface Toast { type: 'success' | 'error' @@ -106,7 +112,7 @@ function ApiKeyField({ }: { label: string placeholder: string - fieldKey: 'anthropicApiKey' | 'openaiApiKey' + fieldKey: 'anthropicApiKey' | 'openaiApiKey' | 'minimaxApiKey' hint: string docHref: string onToast: (t: Toast) => void @@ -125,7 +131,7 @@ function ApiKeyField({ fetch('/api/settings') .then((r) => r.json()) .then((d: Record) => { - const hasKeyField = fieldKey === 'openaiApiKey' ? 'hasOpenaiKey' : 'hasAnthropicKey' + const hasKeyField = fieldKey === 'openaiApiKey' ? 'hasOpenaiKey' : fieldKey === 'minimaxApiKey' ? 'hasMinimaxKey' : 'hasAnthropicKey' const hasKey = d[hasKeyField] const masked = d[fieldKey] as string | null if (hasKey && masked) setSavedMasked(masked) @@ -305,7 +311,7 @@ function ModelSelector({ onToast, }: { models: { value: string; label: string; description: string }[] - settingKey: 'anthropicModel' | 'openaiModel' + settingKey: 'anthropicModel' | 'openaiModel' | 'minimaxModel' defaultValue: string onToast: (t: Toast) => void }) { @@ -495,7 +501,7 @@ function CodexCliStatusBox() { ) } -function ProviderToggle({ value, onChange }: { value: 'anthropic' | 'openai'; onChange: (v: 'anthropic' | 'openai') => void }) { +function ProviderToggle({ value, onChange }: { value: 'anthropic' | 'openai' | 'minimax'; onChange: (v: 'anthropic' | 'openai' | 'minimax') => void }) { return (
+
) } function ApiKeySection({ onToast }: { onToast: (t: Toast) => void }) { - const [provider, setProvider] = useState<'anthropic' | 'openai' | null>(null) + const [provider, setProvider] = useState<'anthropic' | 'openai' | 'minimax' | null>(null) useEffect(() => { fetch('/api/settings') .then((r) => r.json()) .then((d: { provider?: string }) => { - setProvider(d.provider === 'openai' ? 'openai' : 'anthropic') + setProvider(d.provider === 'openai' ? 'openai' : d.provider === 'minimax' ? 'minimax' : 'anthropic') }) .catch(() => setProvider('anthropic')) }, []) - async function handleProviderChange(newProvider: 'anthropic' | 'openai') { + async function handleProviderChange(newProvider: 'anthropic' | 'openai' | 'minimax') { const prev = provider setProvider(newProvider) + const labels: Record = { anthropic: 'Anthropic', openai: 'OpenAI', minimax: 'MiniMax' } try { const res = await fetch('/api/settings', { method: 'POST', @@ -544,7 +561,7 @@ function ApiKeySection({ onToast }: { onToast: (t: Toast) => void }) { body: JSON.stringify({ provider: newProvider }), }) if (!res.ok) throw new Error('Failed to save provider') - onToast({ type: 'success', message: `Switched to ${newProvider === 'openai' ? 'OpenAI' : 'Anthropic'}` }) + onToast({ type: 'success', message: `Switched to ${labels[newProvider]}` }) } catch { setProvider(prev) // revert on failure onToast({ type: 'error', message: 'Failed to save provider preference' }) @@ -598,7 +615,7 @@ function ApiKeySection({ onToast }: { onToast: (t: Toast) => void }) { - ) : ( + ) : provider === 'openai' ? ( <>
@@ -622,6 +639,27 @@ function ApiKeySection({ onToast }: { onToast: (t: Toast) => void }) {
+ ) : ( +
+
+ + +

MiniMax M2.7 supports 1M context window — great for large batch categorization

+
+
)}

Keys are stored in plaintext in your local SQLite database (prisma/dev.db). Do not expose the database file.

@@ -756,7 +794,7 @@ function DangerZoneSection({ onToast }: { onToast: (t: Toast) => void }) { const TECH_STACK = [ { label: 'Next.js 15', color: 'bg-zinc-800 text-zinc-300 border-zinc-700' }, { label: 'Prisma + SQLite', color: 'bg-zinc-800 text-zinc-300 border-zinc-700' }, - { label: 'Anthropic / OpenAI', color: 'bg-blue-500/10 text-blue-300 border-blue-500/20' }, + { label: 'Anthropic / OpenAI / MiniMax', color: 'bg-blue-500/10 text-blue-300 border-blue-500/20' }, { label: 'React Flow', color: 'bg-zinc-800 text-zinc-300 border-zinc-700' }, { label: 'Tailwind CSS', color: 'bg-cyan-500/10 text-cyan-300 border-cyan-500/20' }, ] diff --git a/lib/ai-client.ts b/lib/ai-client.ts index ca8135b..affff04 100644 --- a/lib/ai-client.ts +++ b/lib/ai-client.ts @@ -2,6 +2,7 @@ import Anthropic from '@anthropic-ai/sdk' import OpenAI from 'openai' import { resolveAnthropicClient } from './claude-cli-auth' import { resolveOpenAIClient } from './openai-auth' +import { resolveMiniMaxClient } from './minimax-auth' import { getProvider } from './settings' export interface AIContentBlock { @@ -20,7 +21,7 @@ export interface AIResponse { } export interface AIClient { - provider: 'anthropic' | 'openai' + provider: 'anthropic' | 'openai' | 'minimax' createMessage(params: { model: string max_tokens: number @@ -99,12 +100,54 @@ export class OpenAIAIClient implements AIClient { } } +// Wrap MiniMax via OpenAI-compatible SDK (temperature clamped to (0, 1]) +export class MiniMaxAIClient implements AIClient { + provider = 'minimax' as const + constructor(private sdk: OpenAI) {} + + async createMessage(params: { model: string; max_tokens: number; messages: AIMessage[] }): Promise { + const messages: OpenAI.ChatCompletionMessageParam[] = params.messages.map((m): OpenAI.ChatCompletionMessageParam => { + if (typeof m.content === 'string') { + if (m.role === 'assistant') return { role: 'assistant' as const, content: m.content } + return { role: 'user' as const, content: m.content } + } + const parts: OpenAI.ChatCompletionContentPart[] = m.content.map(b => { + if (b.type === 'image' && b.source) { + return { + type: 'image_url' as const, + image_url: { url: `data:${b.source.media_type};base64,${b.source.data}` }, + } + } + return { type: 'text' as const, text: b.text ?? '' } + }) + if (m.role === 'assistant') return { role: 'assistant' as const, content: parts.filter((p): p is OpenAI.ChatCompletionContentPartText => p.type === 'text') } + return { role: 'user' as const, content: parts } + }) + + const completion = await this.sdk.chat.completions.create({ + model: params.model, + max_tokens: params.max_tokens, + messages, + }) + + let text = completion.choices[0]?.message?.content ?? '' + // Strip thinking tags that MiniMax M2.5+ may include + text = text.replace(/[\s\S]*?<\/think>\s*/g, '') + return { text } + } +} + export async function resolveAIClient(options: { overrideKey?: string dbKey?: string } = {}): Promise { const provider = await getProvider() + if (provider === 'minimax') { + const client = resolveMiniMaxClient(options) + return new MiniMaxAIClient(client) + } + if (provider === 'openai') { const client = resolveOpenAIClient(options) return new OpenAIAIClient(client) diff --git a/lib/minimax-auth.ts b/lib/minimax-auth.ts new file mode 100644 index 0000000..d0a8527 --- /dev/null +++ b/lib/minimax-auth.ts @@ -0,0 +1,34 @@ +import OpenAI from 'openai' + +/** + * Resolve a MiniMax-compatible OpenAI client. + * + * MiniMax exposes an OpenAI-compatible API at https://api.minimax.io/v1. + * Auth priority: + * 1. Override key (from request body) + * 2. DB-saved key + * 3. MINIMAX_API_KEY env var + * 4. Custom base URL (proxy) + */ +export function resolveMiniMaxClient(options: { + overrideKey?: string + dbKey?: string + baseURL?: string +} = {}): OpenAI { + const baseURL = options.baseURL ?? process.env.MINIMAX_BASE_URL ?? 'https://api.minimax.io/v1' + + if (options.overrideKey?.trim()) { + return new OpenAI({ apiKey: options.overrideKey.trim(), baseURL }) + } + + if (options.dbKey?.trim()) { + return new OpenAI({ apiKey: options.dbKey.trim(), baseURL }) + } + + const envKey = process.env.MINIMAX_API_KEY?.trim() + if (envKey) return new OpenAI({ apiKey: envKey, baseURL }) + + if (options.baseURL) return new OpenAI({ apiKey: 'proxy', baseURL }) + + throw new Error('No MiniMax API key found. Add your key in Settings, or set MINIMAX_API_KEY.') +} diff --git a/lib/settings.ts b/lib/settings.ts index f1d7810..7da5eb8 100644 --- a/lib/settings.ts +++ b/lib/settings.ts @@ -4,12 +4,15 @@ import prisma from '@/lib/db' let _cachedModel: string | null = null let _modelCacheExpiry = 0 -let _cachedProvider: 'anthropic' | 'openai' | null = null +let _cachedProvider: 'anthropic' | 'openai' | 'minimax' | null = null let _providerCacheExpiry = 0 let _cachedOpenAIModel: string | null = null let _openAIModelCacheExpiry = 0 +let _cachedMiniMaxModel: string | null = null +let _miniMaxModelCacheExpiry = 0 + const CACHE_TTL = 5 * 60 * 1000 /** @@ -26,10 +29,11 @@ export async function getAnthropicModel(): Promise { /** * Get the active AI provider (cached for 5 minutes). */ -export async function getProvider(): Promise<'anthropic' | 'openai'> { +export async function getProvider(): Promise<'anthropic' | 'openai' | 'minimax'> { if (_cachedProvider && Date.now() < _providerCacheExpiry) return _cachedProvider const setting = await prisma.setting.findUnique({ where: { key: 'aiProvider' } }) - _cachedProvider = setting?.value === 'openai' ? 'openai' : 'anthropic' + const val = setting?.value + _cachedProvider = val === 'openai' ? 'openai' : val === 'minimax' ? 'minimax' : 'anthropic' _providerCacheExpiry = Date.now() + CACHE_TTL return _cachedProvider } @@ -45,11 +49,23 @@ export async function getOpenAIModel(): Promise { return _cachedOpenAIModel } +/** + * Get the configured MiniMax model from settings (cached for 5 minutes). + */ +export async function getMiniMaxModel(): Promise { + if (_cachedMiniMaxModel && Date.now() < _miniMaxModelCacheExpiry) return _cachedMiniMaxModel + const setting = await prisma.setting.findUnique({ where: { key: 'minimaxModel' } }) + _cachedMiniMaxModel = setting?.value ?? 'MiniMax-M2.7' + _miniMaxModelCacheExpiry = Date.now() + CACHE_TTL + return _cachedMiniMaxModel +} + /** * Get the model for the currently active provider. */ export async function getActiveModel(): Promise { const provider = await getProvider() + if (provider === 'minimax') return getMiniMaxModel() return provider === 'openai' ? getOpenAIModel() : getAnthropicModel() } @@ -63,4 +79,6 @@ export function invalidateSettingsCache(): void { _providerCacheExpiry = 0 _cachedOpenAIModel = null _openAIModelCacheExpiry = 0 + _cachedMiniMaxModel = null + _miniMaxModelCacheExpiry = 0 } diff --git a/package.json b/package.json index 69404bb..104ac87 100644 --- a/package.json +++ b/package.json @@ -12,6 +12,7 @@ "build": "next build", "start": "next start", "lint": "eslint", + "test": "vitest run", "siftly": "tsx cli/siftly.ts" }, "dependencies": { @@ -46,6 +47,7 @@ "prisma": "^7.4.2", "tailwindcss": "^4", "tsx": "^4.21.0", - "typescript": "^5" + "typescript": "^5", + "vitest": "^4.1.1" } } diff --git a/vitest.config.ts b/vitest.config.ts new file mode 100644 index 0000000..e99a7d4 --- /dev/null +++ b/vitest.config.ts @@ -0,0 +1,14 @@ +import { defineConfig } from 'vitest/config' +import { resolve } from 'path' + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + }, + resolve: { + alias: { + '@': resolve(__dirname, '.'), + }, + }, +})