Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,20 @@ DATABASE_URL="file:./prisma/dev.db"
# Optional: custom API base URL (proxy or local model server)
# ANTHROPIC_BASE_URL=

# ── MiniMax auth ────────────────────────────────────────────────────
# MiniMax API key (get one at https://platform.minimaxi.com)
# MINIMAX_API_KEY=eyJ...

# Optional: custom MiniMax API base URL (defaults to https://api.minimax.io/v1)
# MINIMAX_BASE_URL=

# ── OpenAI auth ─────────────────────────────────────────────────────
# OpenAI API key (alternative provider)
# OPENAI_API_KEY=sk-...

# Optional: custom OpenAI API base URL
# OPENAI_BASE_URL=

# ── Access control (optional) ────────────────────────────────────────

# Set BOTH to enable HTTP Basic Auth on the entire app.
Expand Down
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,9 @@ All settings are manageable in the **Settings** page at `/settings` or via envir
| Anthropic API Key | `ANTHROPIC_API_KEY` | Optional if Claude CLI is signed in — otherwise required for AI features |
| API Base URL | `ANTHROPIC_BASE_URL` | Custom endpoint for proxies or local Anthropic-compatible models |
| AI Model | Settings page only | Haiku 4.5 (default, fastest/cheapest), Sonnet 4.6, Opus 4.6 |
| OpenAI Key | Settings page only | Alternative provider if no Anthropic key is set |
| OpenAI Key | `OPENAI_API_KEY` | Alternative provider — GPT-4.1 Mini/Nano/Full, o4-mini, o3 |
| MiniMax Key | `MINIMAX_API_KEY` | Alternative provider — M2.7 (1M context), M2.5, M2.5-highspeed |
| MiniMax Base URL | `MINIMAX_BASE_URL` | Custom MiniMax API endpoint (default: `https://api.minimax.io/v1`) |
| Database | `DATABASE_URL` | SQLite file path (default: `file:./prisma/dev.db`) |

### Custom API Endpoint
Expand Down Expand Up @@ -354,6 +356,7 @@ For Prisma command and workflow details, see:
| [SQLite](https://sqlite.org) | — | Local database — zero setup, includes FTS5 |
| [Tailwind CSS](https://tailwindcss.com) | v4 | Styling |
| [Anthropic SDK](https://docs.anthropic.com) | — | Vision, semantic tagging, categorization, search |
| [MiniMax](https://platform.minimaxi.com) | — | Alternative AI provider (M2.7 1M context, M2.5) |
| [@xyflow/react](https://xyflow.com) | 12 | Interactive mindmap graph |
| [Framer Motion](https://www.framer.com/motion/) | 12 | Animations |
| [Radix UI](https://www.radix-ui.com) | — | Accessible UI primitives |
Expand Down
276 changes: 276 additions & 0 deletions __tests__/minimax-ai-client.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,276 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
import { MiniMaxAIClient } from '@/lib/ai-client'
import type OpenAI from 'openai'

function createMockOpenAI(responseContent: string): OpenAI {
return {
chat: {
completions: {
create: vi.fn().mockResolvedValue({
choices: [{ message: { content: responseContent } }],
}),
},
},
} as unknown as OpenAI
}

describe('MiniMaxAIClient', () => {
it('should have provider set to minimax', () => {
const mock = createMockOpenAI('hello')
const client = new MiniMaxAIClient(mock)
expect(client.provider).toBe('minimax')
})

it('should return text from completion', async () => {
const mock = createMockOpenAI('Hello from MiniMax')
const client = new MiniMaxAIClient(mock)

const result = await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [{ role: 'user', content: 'hi' }],
})

expect(result.text).toBe('Hello from MiniMax')
})

it('should strip thinking tags from response', async () => {
const mock = createMockOpenAI(
'<think>internal reasoning here</think>\nActual answer'
)
const client = new MiniMaxAIClient(mock)

const result = await client.createMessage({
model: 'MiniMax-M2.5',
max_tokens: 100,
messages: [{ role: 'user', content: 'test' }],
})

expect(result.text).toBe('Actual answer')
expect(result.text).not.toContain('<think>')
})

it('should strip multi-line thinking tags', async () => {
const mock = createMockOpenAI(
'<think>\nline1\nline2\nline3\n</think>\n\nClean output'
)
const client = new MiniMaxAIClient(mock)

const result = await client.createMessage({
model: 'MiniMax-M2.5',
max_tokens: 100,
messages: [{ role: 'user', content: 'test' }],
})

expect(result.text).toBe('Clean output')
})

it('should handle empty response', async () => {
const mock = {
chat: {
completions: {
create: vi.fn().mockResolvedValue({
choices: [{ message: { content: null } }],
}),
},
},
} as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

const result = await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 10,
messages: [{ role: 'user', content: 'hi' }],
})

expect(result.text).toBe('')
})

it('should handle empty choices', async () => {
const mock = {
chat: {
completions: {
create: vi.fn().mockResolvedValue({ choices: [] }),
},
},
} as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

const result = await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 10,
messages: [{ role: 'user', content: 'hi' }],
})

expect(result.text).toBe('')
})

it('should pass model and max_tokens to SDK', async () => {
const createFn = vi.fn().mockResolvedValue({
choices: [{ message: { content: 'ok' } }],
})
const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 512,
messages: [{ role: 'user', content: 'test' }],
})

expect(createFn).toHaveBeenCalledWith(
expect.objectContaining({
model: 'MiniMax-M2.7',
max_tokens: 512,
})
)
})

it('should convert string messages correctly', async () => {
const createFn = vi.fn().mockResolvedValue({
choices: [{ message: { content: 'ok' } }],
})
const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [
{ role: 'user', content: 'hello' },
{ role: 'assistant', content: 'hi there' },
{ role: 'user', content: 'how are you?' },
],
})

const call = createFn.mock.calls[0][0]
expect(call.messages).toHaveLength(3)
expect(call.messages[0]).toEqual({ role: 'user', content: 'hello' })
expect(call.messages[1]).toEqual({ role: 'assistant', content: 'hi there' })
expect(call.messages[2]).toEqual({ role: 'user', content: 'how are you?' })
})

it('should convert image content blocks to base64 data URLs', async () => {
const createFn = vi.fn().mockResolvedValue({
choices: [{ message: { content: 'I see an image' } }],
})
const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'What is this?' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'iVBORw0KGgo=',
},
},
],
},
],
})

const call = createFn.mock.calls[0][0]
const msg = call.messages[0]
expect(msg.role).toBe('user')
expect(msg.content).toHaveLength(2)
expect(msg.content[0]).toEqual({ type: 'text', text: 'What is this?' })
expect(msg.content[1]).toEqual({
type: 'image_url',
image_url: { url: 'data:image/png;base64,iVBORw0KGgo=' },
})
})

it('should filter non-text parts from assistant messages', async () => {
const createFn = vi.fn().mockResolvedValue({
choices: [{ message: { content: 'ok' } }],
})
const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [
{
role: 'assistant',
content: [
{ type: 'text', text: 'some text' },
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'abc' },
},
],
},
],
})

const call = createFn.mock.calls[0][0]
const msg = call.messages[0]
expect(msg.role).toBe('assistant')
// Only text parts should remain for assistant
expect(msg.content.every((p: { type: string }) => p.type === 'text')).toBe(true)
})

it('should handle text content block with missing text', async () => {
const createFn = vi.fn().mockResolvedValue({
choices: [{ message: { content: 'ok' } }],
})
const mock = { chat: { completions: { create: createFn } } } as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [
{
role: 'user',
content: [{ type: 'text' }],
},
],
})

const call = createFn.mock.calls[0][0]
expect(call.messages[0].content[0].text).toBe('')
})

it('should handle response with no thinking tags (pass-through)', async () => {
const mock = createMockOpenAI('Regular response without thinking')
const client = new MiniMaxAIClient(mock)

const result = await client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [{ role: 'user', content: 'test' }],
})

expect(result.text).toBe('Regular response without thinking')
})

it('should propagate SDK errors', async () => {
const mock = {
chat: {
completions: {
create: vi.fn().mockRejectedValue(new Error('API rate limit')),
},
},
} as unknown as OpenAI
const client = new MiniMaxAIClient(mock)

await expect(
client.createMessage({
model: 'MiniMax-M2.7',
max_tokens: 100,
messages: [{ role: 'user', content: 'test' }],
})
).rejects.toThrow('API rate limit')
})
})
Loading