From 363ee600540ea2022f1c4374f2c8afe23fbb3a4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Bi=C5=82as?= Date: Fri, 20 Mar 2026 13:10:26 +0100 Subject: [PATCH 1/2] feat: add OpenCode CLI provider Route summaries through the OpenCode runtime so fetched pages can use the same local CLI workflow as the other supported coding assistants. --- README.md | 14 +- .../src/entrypoints/options/model-presets.ts | 1 + apps/chrome-extension/src/lib/settings.ts | 12 +- docs/README.md | 2 +- docs/agent.md | 2 +- docs/cli.md | 25 ++-- docs/config.md | 9 +- docs/llm.md | 6 +- src/config/parse-helpers.ts | 8 +- src/config/sections.ts | 5 + src/config/types.ts | 3 +- src/daemon/agent-model.ts | 4 +- src/daemon/chat.ts | 28 +++- src/daemon/env-snapshot.ts | 1 + src/daemon/models.ts | 6 + src/llm/cli-provider-output.ts | 124 +++++++++++++++++- src/llm/cli.ts | 55 +++++++- src/llm/provider-profile.ts | 19 ++- src/model-auto-cli.ts | 9 +- src/model-spec.ts | 21 ++- src/run/cli-fallback-state.ts | 8 +- src/run/env.ts | 12 +- src/run/flows/asset/summary-attempts.ts | 1 + src/run/help.ts | 5 +- src/run/run-config.ts | 3 +- src/run/run-models.ts | 52 +++++++- src/run/run-settings-parse.ts | 1 + src/run/summary-engine.ts | 6 + src/run/types.ts | 3 +- tests/asset.summary-attempts.test.ts | 2 + tests/chrome.daemon-payload.test.ts | 4 +- tests/daemon.chat.test.ts | 36 +++++ tests/daemon.models.test.ts | 5 + tests/llm.cli.more-branches-2.test.ts | 4 + tests/llm.cli.test.ts | 89 +++++++++++++ tests/llm.provider-capabilities.test.ts | 6 +- tests/model-spec.test.ts | 10 ++ tests/run.env.test.ts | 5 + tests/run.models.test.ts | 30 +++++ 39 files changed, 560 insertions(+), 76 deletions(-) create mode 100644 tests/run.models.test.ts diff --git a/README.md b/README.md index 47f99b73..c3bb187e 100644 --- a/README.md +++ b/README.md @@ -304,7 +304,7 @@ Use `summarize --help` or `summarize help` for the full help text. - `--verbose`: debug/diagnostics on stderr - `--metrics off|on|detailed`: metrics output (default `on`) -### Coding CLIs (Codex, Claude, Gemini, Agent) +### Coding CLIs (Codex, Claude, Gemini, Agent, OpenCode) Summarize can use common coding CLIs as local model backends: @@ -312,11 +312,12 @@ Summarize can use common coding CLIs as local model backends: - `claude` -> `--cli claude` / `--model cli/claude/` - `gemini` -> `--cli gemini` / `--model cli/gemini/` - `agent` (Cursor Agent CLI) -> `--cli agent` / `--model cli/agent/` +- `opencode` -> `--cli opencode` / `--model cli/opencode/` (or `cli/opencode` for `cli.opencode.model` / the OpenCode default model) Requirements: -- Binary installed and on `PATH` (or set `CODEX_PATH`, `CLAUDE_PATH`, `GEMINI_PATH`, `AGENT_PATH`) -- Provider authenticated (`codex login`, `claude auth`, `gemini` login flow, `agent login` or `CURSOR_API_KEY`) +- Binary installed and on `PATH` (or set `CODEX_PATH`, `CLAUDE_PATH`, `GEMINI_PATH`, `AGENT_PATH`, `OPENCODE_PATH`) +- Provider authenticated (`codex login`, `claude auth`, `gemini` login flow, `agent login` or `CURSOR_API_KEY`, `opencode auth login`) Quick smoke test: @@ -327,13 +328,14 @@ summarize --cli codex --plain --timeout 2m /tmp/summarize-cli-smoke.txt summarize --cli claude --plain --timeout 2m /tmp/summarize-cli-smoke.txt summarize --cli gemini --plain --timeout 2m /tmp/summarize-cli-smoke.txt summarize --cli agent --plain --timeout 2m /tmp/summarize-cli-smoke.txt +summarize --cli opencode --plain --timeout 2m /tmp/summarize-cli-smoke.txt ``` Set explicit CLI allowlist/order: ```json { - "cli": { "enabled": ["codex", "claude", "gemini", "agent"] } + "cli": { "enabled": ["codex", "claude", "gemini", "agent", "opencode"] } } ``` @@ -345,7 +347,7 @@ Configure implicit auto CLI fallback: "autoFallback": { "enabled": true, "onlyWhenNoApiKeys": true, - "order": ["claude", "gemini", "codex", "agent"] + "order": ["claude", "gemini", "codex", "agent", "opencode"] } } } @@ -361,7 +363,7 @@ CLI attempts are prepended when: - `cli.enabled` is set (explicit allowlist/order), or - implicit auto selection is active and `cli.autoFallback` is enabled. -Default fallback behavior: only when no API keys are configured, order `claude, gemini, codex, agent`, and remember/prioritize last successful provider (`~/.summarize/cli-state.json`). +Default fallback behavior: only when no API keys are configured, order `claude, gemini, codex, agent, opencode`, and remember/prioritize last successful provider (`~/.summarize/cli-state.json`). Set explicit CLI attempts: diff --git a/apps/chrome-extension/src/entrypoints/options/model-presets.ts b/apps/chrome-extension/src/entrypoints/options/model-presets.ts index 0a8233fe..37789536 100644 --- a/apps/chrome-extension/src/entrypoints/options/model-presets.ts +++ b/apps/chrome-extension/src/entrypoints/options/model-presets.ts @@ -39,6 +39,7 @@ export function createModelPresetsController({ if (p.cliGemini === true) hints.push("cli/gemini"); if (p.cliCodex === true) hints.push("cli/codex"); if (p.cliAgent === true) hints.push("cli/agent"); + if (p.cliOpencode === true) hints.push("cli/opencode"); } if (discovery.localModelsSource && typeof discovery.localModelsSource === "object") { hints.push("local: openai/"); diff --git a/apps/chrome-extension/src/lib/settings.ts b/apps/chrome-extension/src/lib/settings.ts index d44edc4e..ab033448 100644 --- a/apps/chrome-extension/src/lib/settings.ts +++ b/apps/chrome-extension/src/lib/settings.ts @@ -113,7 +113,15 @@ function normalizeAutoCliOrder(value: unknown): string { .filter(Boolean); const out: string[] = []; for (const item of items) { - if (item !== "claude" && item !== "gemini" && item !== "codex" && item !== "agent") continue; + if ( + item !== "claude" && + item !== "gemini" && + item !== "codex" && + item !== "agent" && + item !== "opencode" + ) { + continue; + } if (!out.includes(item)) out.push(item); } return out.length > 0 ? out.join(",") : defaultSettings.autoCliOrder; @@ -230,7 +238,7 @@ export const defaultSettings: Settings = { summaryTimestamps: true, extendedLogging: false, autoCliFallback: true, - autoCliOrder: "claude,gemini,codex,agent", + autoCliOrder: "claude,gemini,codex,agent,opencode", hoverPrompt: "Plain text only (no Markdown). Summarize the linked page concisely in 1-2 sentences; aim for 100-200 characters.", transcriber: "", diff --git a/docs/README.md b/docs/README.md index 9d0221c5..9ae51729 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,7 +6,7 @@ summary: "Docs index for summarize behaviors and modes." - `docs/chrome-extension.md` — Chrome side panel extension + daemon setup/troubleshooting - `docs/cache.md` — cache design + config (SQLite) -- `docs/cli.md` — CLI models (Claude/Codex/Gemini) +- `docs/cli.md` — CLI models (Claude/Codex/Gemini/OpenCode) - `docs/config.md` — config file location, precedence, and schema - `docs/extract-only.md` — extract mode (no summary LLM call) - `docs/firecrawl.md` — Firecrawl mode + API key diff --git a/docs/agent.md b/docs/agent.md index 20c108ce..b4dcac73 100644 --- a/docs/agent.md +++ b/docs/agent.md @@ -138,7 +138,7 @@ Returns cached chat history for the same cache key as `/v1/agent`. - **Auto model**: uses existing auto-selection logic (`buildAutoModelAttempts`), preferring API-key transports and then CLI fallback when available. - **Synthetic models**: created for OpenAI-compatible base URLs (local/openrouter). - `maxOutputTokens` defaults to 2048 or `maxOutputTokens` override. -- CLI models are supported as auto fallback and via explicit `cli//` overrides. +- CLI models are supported as auto fallback and via explicit `cli/` or `cli//` overrides. - If the daemon still says no model is available after key/install changes, restart or reinstall it so the saved environment snapshot refreshes. ## Page Content Payload diff --git a/docs/cli.md b/docs/cli.md index 036d7fa2..c8fbb166 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -1,12 +1,12 @@ --- -summary: "CLI model providers and config for Claude, Codex, Gemini, and Cursor Agent." +summary: "CLI model providers and config for Claude, Codex, Gemini, Cursor Agent, and OpenCode." read_when: - "When changing CLI model integration." --- # CLI models -Summarize can use installed CLIs (Claude, Codex, Gemini, Cursor Agent) as local model backends. +Summarize can use installed CLIs (Claude, Codex, Gemini, Cursor Agent, OpenCode) as local model backends. ## Model ids @@ -14,6 +14,8 @@ Summarize can use installed CLIs (Claude, Codex, Gemini, Cursor Agent) as local - `cli/codex/` (e.g. `cli/codex/gpt-5.2`) - `cli/gemini/` (e.g. `cli/gemini/gemini-3-flash`) - `cli/agent/` (e.g. `cli/agent/gpt-5.2`) +- `cli/opencode` (uses `cli.opencode.model` when configured, otherwise the OpenCode runtime default model) +- `cli/opencode/` (e.g. `cli/opencode/openai/gpt-5.4`) Use `--cli [provider]` (case-insensitive) for the provider default, or `--model cli//` to pin a model. If `--cli` is provided without a provider, auto selection is used with CLI enabled. @@ -28,7 +30,7 @@ Auto mode can prepend CLI attempts in two ways: - Auto CLI fallback (`cli.autoFallback`, default enabled): - Applies only to **implicit** auto (when no model is set via flag/env/config). - Default behavior: only when no API key is configured. - - Default order: `claude, gemini, codex, agent`. + - Default order: `claude, gemini, codex, agent, opencode`. - Remembers + prioritizes the last successful CLI provider (`~/.summarize/cli-state.json`). Gemini CLI performance: summarize sets `GEMINI_CLI_NO_RELAUNCH=true` for Gemini CLI runs to avoid a costly self-relaunch (can be overridden by setting it yourself). @@ -49,7 +51,7 @@ Configure auto CLI fallback: "autoFallback": { "enabled": true, "onlyWhenNoApiKeys": true, - "order": ["claude", "gemini", "codex", "agent"] + "order": ["claude", "gemini", "codex", "agent", "opencode"] } } } @@ -70,7 +72,7 @@ Note: `--model auto` (explicit) does not trigger auto CLI fallback unless `cli.e Binary lookup: - `CLAUDE_PATH`, `CODEX_PATH`, `GEMINI_PATH` (optional overrides) -- `AGENT_PATH` (optional override) +- `AGENT_PATH`, `OPENCODE_PATH` (optional overrides) - Otherwise uses `PATH` ## Attachments (images/files) @@ -82,17 +84,18 @@ path-based prompt and enables the required tool flags: - Gemini: `--yolo` and `--include-directories ` - Codex: `codex exec --output-last-message ...` and `-i ` for images - Agent: uses built-in file tools in `agent --print` mode (no extra flags) +- OpenCode: `opencode run --format json --file ` ## Config ```json { "cli": { - "enabled": ["claude", "gemini", "codex", "agent"], + "enabled": ["claude", "gemini", "codex", "agent", "opencode"], "autoFallback": { "enabled": true, "onlyWhenNoApiKeys": true, - "order": ["claude", "gemini", "codex", "agent"] + "order": ["claude", "gemini", "codex", "agent", "opencode"] }, "codex": { "model": "gpt-5.2" }, "gemini": { "model": "gemini-3-flash", "extraArgs": ["--verbose"] }, @@ -104,6 +107,9 @@ path-based prompt and enables the required tool flags: "agent": { "model": "gpt-5.2", "binary": "/usr/local/bin/agent" + }, + "opencode": { + "binary": "/usr/local/bin/opencode" } } } @@ -111,10 +117,11 @@ path-based prompt and enables the required tool flags: Notes: -- CLI output is treated as text only (no token accounting). +- CLI output is treated as text; usage and cost are recorded when the CLI exposes them. - If a CLI call fails, auto mode falls back to the next candidate. - Cursor Agent CLI uses the `agent` binary and relies on Cursor CLI auth (login or `CURSOR_API_KEY`). - Gemini CLI is invoked in headless mode with `--prompt` for compatibility with current Gemini CLI releases. +- OpenCode CLI is invoked as `opencode run --format json` and reads the summary prompt from stdin. ## Quick smoke test (all CLI providers) @@ -127,9 +134,11 @@ summarize --cli codex --plain --timeout 2m /tmp/summarize-cli-smoke.txt summarize --cli claude --plain --timeout 2m /tmp/summarize-cli-smoke.txt summarize --cli gemini --plain --timeout 2m /tmp/summarize-cli-smoke.txt summarize --cli agent --plain --timeout 2m /tmp/summarize-cli-smoke.txt +summarize --cli opencode --plain --timeout 2m /tmp/summarize-cli-smoke.txt ``` If Agent fails with auth, run `agent login` (interactive) or set `CURSOR_API_KEY`. +If OpenCode fails with auth, run `opencode auth login` for the provider you want to use. ## Generate free preset (OpenRouter) diff --git a/docs/config.md b/docs/config.md index fce487e2..2f12832c 100644 --- a/docs/config.md +++ b/docs/config.md @@ -316,15 +316,16 @@ Examples: ```json { "cli": { - "enabled": ["gemini", "agent"], + "enabled": ["gemini", "agent", "opencode"], "autoFallback": { "enabled": true, "onlyWhenNoApiKeys": true, - "order": ["claude", "gemini", "codex", "agent"] + "order": ["claude", "gemini", "codex", "agent", "opencode"] }, "codex": { "model": "gpt-5.2" }, "claude": { "binary": "/usr/local/bin/claude", "extraArgs": ["--verbose"] }, - "agent": { "binary": "/usr/local/bin/agent", "model": "gpt-5.2" } + "agent": { "binary": "/usr/local/bin/agent", "model": "gpt-5.2" }, + "opencode": { "binary": "/usr/local/bin/opencode" } } } ``` @@ -333,7 +334,7 @@ Notes: - `cli.enabled` is an allowlist (and order) for auto + explicit CLI model ids. - `cli.autoFallback` controls implicit-auto CLI fallback when `cli.enabled` is not set. -- Default auto fallback order: `claude, gemini, codex, agent`. +- Default auto fallback order: `claude, gemini, codex, agent, opencode`. - Auto fallback stores the last successful provider in `~/.summarize/cli-state.json` and prioritizes it on the next run. - `cli..binary` overrides CLI binary discovery. - `cli..extraArgs` appends extra CLI args. diff --git a/docs/llm.md b/docs/llm.md index 333c839b..d6690bd3 100644 --- a/docs/llm.md +++ b/docs/llm.md @@ -34,7 +34,7 @@ installed, auto mode can use local CLI models via `cli.enabled` or implicit auto - `ANTHROPIC_API_KEY` (required for `anthropic/...` models) - `ANTHROPIC_BASE_URL` (optional; override Anthropic API endpoint) - `SUMMARIZE_MODEL` (optional; overrides default model selection) -- `CLAUDE_PATH` / `CODEX_PATH` / `GEMINI_PATH` / `AGENT_PATH` (optional; override CLI binary paths) +- `CLAUDE_PATH` / `CODEX_PATH` / `GEMINI_PATH` / `AGENT_PATH` / `OPENCODE_PATH` (optional; override CLI binary paths) ## Flags @@ -44,6 +44,8 @@ installed, auto mode can use local CLI models via `cli.enabled` or implicit auto - `cli/claude/sonnet` - `cli/gemini/gemini-3-flash` - `cli/agent/gpt-5.2` + - `cli/opencode` + - `cli/opencode/openai/gpt-5.4` - `google/gemini-3-flash` - `openai/gpt-5-mini` - `nvidia/z-ai/glm5` @@ -53,7 +55,7 @@ installed, auto mode can use local CLI models via `cli.enabled` or implicit auto - `anthropic/claude-sonnet-4-5` - `openrouter/meta-llama/llama-3.3-70b-instruct:free` (force OpenRouter) - `--cli [provider]` - - Examples: `--cli claude`, `--cli Gemini`, `--cli codex`, `--cli agent` (equivalent to `--model cli/`); `--cli` alone uses auto selection with CLI enabled. + - Examples: `--cli claude`, `--cli Gemini`, `--cli codex`, `--cli agent`, `--cli opencode` (equivalent to `--model cli/`); `--cli` alone uses auto selection with CLI enabled. - `--model auto` - See `docs/model-auto.md` - `--model ` diff --git a/src/config/parse-helpers.ts b/src/config/parse-helpers.ts index a135d3bb..7d404f7e 100644 --- a/src/config/parse-helpers.ts +++ b/src/config/parse-helpers.ts @@ -10,7 +10,13 @@ export function parseOptionalBaseUrl(raw: unknown): string | undefined { export function parseCliProvider(value: unknown, path: string): CliProvider { const trimmed = typeof value === "string" ? value.trim().toLowerCase() : ""; - if (trimmed === "claude" || trimmed === "codex" || trimmed === "gemini" || trimmed === "agent") { + if ( + trimmed === "claude" || + trimmed === "codex" || + trimmed === "gemini" || + trimmed === "agent" || + trimmed === "opencode" + ) { return trimmed as CliProvider; } throw new Error(`Invalid config file ${path}: unknown CLI provider "${String(value)}".`); diff --git a/src/config/sections.ts b/src/config/sections.ts index 105f7ec6..8ba26bcf 100644 --- a/src/config/sections.ts +++ b/src/config/sections.ts @@ -303,6 +303,9 @@ export function parseCliConfig(root: Record, path: string): Cli const codex = value.codex ? parseCliProviderConfig(value.codex, path, "codex") : undefined; const gemini = value.gemini ? parseCliProviderConfig(value.gemini, path, "gemini") : undefined; const agent = value.agent ? parseCliProviderConfig(value.agent, path, "agent") : undefined; + const opencode = value.opencode + ? parseCliProviderConfig(value.opencode, path, "opencode") + : undefined; if (typeof value.autoFallback !== "undefined" && typeof value.magicAuto !== "undefined") { throw new Error( `Invalid config file ${path}: use only one of "cli.autoFallback" or legacy "cli.magicAuto".`, @@ -334,6 +337,7 @@ export function parseCliConfig(root: Record, path: string): Cli codex || gemini || agent || + opencode || autoFallback || promptOverride || typeof allowTools === "boolean" || @@ -345,6 +349,7 @@ export function parseCliConfig(root: Record, path: string): Cli ...(codex ? { codex } : {}), ...(gemini ? { gemini } : {}), ...(agent ? { agent } : {}), + ...(opencode ? { opencode } : {}), ...(autoFallback ? { autoFallback } : {}), ...(promptOverride ? { promptOverride } : {}), ...(typeof allowTools === "boolean" ? { allowTools } : {}), diff --git a/src/config/types.ts b/src/config/types.ts index b6a7ec4a..7ef69d63 100644 --- a/src/config/types.ts +++ b/src/config/types.ts @@ -1,6 +1,6 @@ export type AutoRuleKind = "text" | "website" | "youtube" | "image" | "video" | "file"; export type VideoMode = "auto" | "transcript" | "understand"; -export type CliProvider = "claude" | "codex" | "gemini" | "agent"; +export type CliProvider = "claude" | "codex" | "gemini" | "agent" | "opencode"; export type CliProviderConfig = { binary?: string; extraArgs?: string[]; @@ -18,6 +18,7 @@ export type CliConfig = { codex?: CliProviderConfig; gemini?: CliProviderConfig; agent?: CliProviderConfig; + opencode?: CliProviderConfig; autoFallback?: CliAutoFallbackConfig; magicAuto?: CliAutoFallbackConfig; promptOverride?: string; diff --git a/src/daemon/agent-model.ts b/src/daemon/agent-model.ts index 1419c01c..e2dbd9e1 100644 --- a/src/daemon/agent-model.ts +++ b/src/daemon/agent-model.ts @@ -133,6 +133,7 @@ function buildNoAgentModelAvailableError({ codex?: boolean; gemini?: boolean; agent?: boolean; + opencode?: boolean; }; }): Error { const checked = attempts.map((attempt) => attempt.userModelId); @@ -152,7 +153,8 @@ function buildNoAgentModelAvailableError({ if (attempt.requiredEnv === "CLI_CLAUDE") return "claude"; if (attempt.requiredEnv === "CLI_CODEX") return "codex"; if (attempt.requiredEnv === "CLI_GEMINI") return "gemini"; - return "agent"; + if (attempt.requiredEnv === "CLI_AGENT") return "agent"; + return "opencode"; }) .filter((provider) => !cliAvailability[provider as keyof typeof cliAvailability]), ), diff --git a/src/daemon/chat.ts b/src/daemon/chat.ts index 4c8efab2..a0b19eab 100644 --- a/src/daemon/chat.ts +++ b/src/daemon/chat.ts @@ -1,5 +1,5 @@ import type { Context, Message } from "@mariozechner/pi-ai"; -import type { SummarizeConfig } from "../config.js"; +import type { CliProvider, SummarizeConfig } from "../config.js"; import { runCliModel } from "../llm/cli.js"; import type { LlmApiKeys } from "../llm/generate-text.js"; import { streamTextWithContext } from "../llm/generate-text.js"; @@ -24,6 +24,24 @@ const SYSTEM_PROMPT = `You are Summarize Chat. You answer questions about the current page content. Keep responses concise and grounded in the page.`; +function resolveConfiguredCliModel( + provider: CliProvider, + configForCli: SummarizeConfig | null | undefined, +): string | null { + const cli = configForCli?.cli; + const raw = + provider === "claude" + ? cli?.claude?.model + : provider === "codex" + ? cli?.codex?.model + : provider === "gemini" + ? cli?.gemini?.model + : provider === "agent" + ? cli?.agent?.model + : cli?.opencode?.model; + return typeof raw === "string" && raw.trim().length > 0 ? raw.trim() : null; +} + function normalizeMessages(messages: Message[]): Message[] { return messages.map((message) => ({ ...message, @@ -114,13 +132,17 @@ export async function streamChatResponse({ return null; } if (requested.transport === "cli") { + const cliModel = + requested.cliModel ?? resolveConfiguredCliModel(requested.cliProvider, configForCli); return { - userModelId: requested.userModelId, + userModelId: cliModel + ? `cli/${requested.cliProvider}/${cliModel}` + : requested.userModelId, modelId: null, forceOpenRouter: false, transport: "cli" as const, cliProvider: requested.cliProvider, - cliModel: requested.cliModel, + cliModel, }; } return { diff --git a/src/daemon/env-snapshot.ts b/src/daemon/env-snapshot.ts index caf1ae41..66bc6c7e 100644 --- a/src/daemon/env-snapshot.ts +++ b/src/daemon/env-snapshot.ts @@ -37,6 +37,7 @@ const ENV_KEYS = [ "CODEX_PATH", "GEMINI_PATH", "AGENT_PATH", + "OPENCODE_PATH", "UVX_PATH", ] as const; diff --git a/src/daemon/models.ts b/src/daemon/models.ts index e896c0c6..0ba088a3 100644 --- a/src/daemon/models.ts +++ b/src/daemon/models.ts @@ -138,6 +138,7 @@ export async function buildModelPickerOptions({ cliGemini: boolean; cliCodex: boolean; cliAgent: boolean; + cliOpencode: boolean; }; openaiBaseUrl: string | null; localModelsSource: { kind: "openai-compatible"; baseUrlHost: string } | null; @@ -156,12 +157,14 @@ export async function buildModelPickerOptions({ cliGemini: false, cliCodex: false, cliAgent: false, + cliOpencode: false, }; const cliAvailability = resolveCliAvailability({ env: envForRun, config: configForCli }); providers.cliClaude = Boolean(cliAvailability.claude); providers.cliGemini = Boolean(cliAvailability.gemini); providers.cliCodex = Boolean(cliAvailability.codex); providers.cliAgent = Boolean(cliAvailability.agent); + providers.cliOpencode = Boolean(cliAvailability.opencode); const options: ModelPickerOption[] = [{ id: "auto", label: "Auto" }]; @@ -177,6 +180,9 @@ export async function buildModelPickerOptions({ if (providers.cliAgent) { options.push({ id: "cli/agent", label: "CLI: Cursor Agent" }); } + if (providers.cliOpencode) { + options.push({ id: "cli/opencode", label: "CLI: OpenCode" }); + } if (providers.openrouter) { options.push({ id: "free", label: "Free (OpenRouter)" }); diff --git a/src/llm/cli-provider-output.ts b/src/llm/cli-provider-output.ts index ffdda5e6..93c5b795 100644 --- a/src/llm/cli-provider-output.ts +++ b/src/llm/cli-provider-output.ts @@ -1,12 +1,12 @@ import type { CliProvider } from "../config.js"; import type { LlmTokenUsage } from "./generate-text.js"; -export type JsonCliProvider = Exclude; +export type JsonCliProvider = Exclude; const JSON_RESULT_FIELDS = ["result", "response", "output", "message", "text"] as const; export function isJsonCliProvider(provider: CliProvider): provider is JsonCliProvider { - return provider !== "codex"; + return provider !== "codex" && provider !== "opencode"; } const parseJsonFromOutput = (output: string): unknown | null => { @@ -36,6 +36,12 @@ const toNumber = (value: unknown): number | null => { return value; }; +const sumNullable = (current: number | null, next: number | null): number | null => { + if (typeof next !== "number") return current; + if (typeof current !== "number") return next; + return current + next; +}; + const parseClaudeUsage = (payload: Record): LlmTokenUsage | null => { const usage = payload.usage; if (!usage || typeof usage !== "object") return null; @@ -151,6 +157,120 @@ export const parseCodexUsageFromJsonl = ( return { usage, costUsd }; }; +function parseOpenCodeTokens(payload: Record): LlmTokenUsage | null { + const tokens = payload.tokens; + if (!tokens || typeof tokens !== "object") return null; + const record = tokens as Record; + const promptTokens = toNumber(record.input); + const completionTokens = toNumber(record.output); + const totalTokens = + toNumber(record.total) ?? + (typeof promptTokens === "number" && typeof completionTokens === "number" + ? promptTokens + completionTokens + : null); + if (promptTokens === null && completionTokens === null && totalTokens === null) return null; + return { promptTokens, completionTokens, totalTokens }; +} + +function extractOpenCodeErrorMessage(payload: Record): string | null { + const error = payload.error; + if (!error) return null; + if (typeof error === "string" && error.trim().length > 0) return error.trim(); + if (typeof error !== "object") return null; + const errorRecord = error as Record; + const data = errorRecord.data; + if (data && typeof data === "object") { + const message = (data as Record).message; + if (typeof message === "string" && message.trim().length > 0) return message.trim(); + } + const message = errorRecord.message; + if (typeof message === "string" && message.trim().length > 0) return message.trim(); + const name = errorRecord.name; + return typeof name === "string" && name.trim().length > 0 ? name.trim() : null; +} + +export function parseOpenCodeOutputFromJsonl(output: string): { + text: string; + usage: LlmTokenUsage | null; + costUsd: number | null; +} { + const trimmed = output.trim(); + if (!trimmed) { + throw new Error("CLI returned empty output"); + } + + const lines = trimmed + .split(/\r?\n/) + .map((line) => line.trim()) + .filter((line) => line.length > 0); + + const textParts: string[] = []; + const errorMessages: string[] = []; + let promptTokens: number | null = null; + let completionTokens: number | null = null; + let totalTokens: number | null = null; + let costUsd: number | null = null; + let sawStructuredEvent = false; + + for (const line of lines) { + if (!line.startsWith("{")) continue; + try { + const parsed = JSON.parse(line) as Record; + sawStructuredEvent = true; + + if (parsed.type === "text") { + const part = parsed.part; + if (part && typeof part === "object") { + const text = (part as Record).text; + if (typeof text === "string" && text.length > 0) { + textParts.push(text); + } + } + continue; + } + + if (parsed.type === "step_finish") { + const part = parsed.part; + if (!part || typeof part !== "object") continue; + const usage = parseOpenCodeTokens(part as Record); + if (usage) { + promptTokens = sumNullable(promptTokens, usage.promptTokens); + completionTokens = sumNullable(completionTokens, usage.completionTokens); + totalTokens = sumNullable(totalTokens, usage.totalTokens); + } + const cost = toNumber((part as Record).cost); + if (typeof cost === "number") { + costUsd = typeof costUsd === "number" ? costUsd + cost : cost; + } + continue; + } + + if (parsed.type === "error") { + const message = extractOpenCodeErrorMessage(parsed); + if (message) errorMessages.push(message); + } + } catch { + // ignore malformed JSON lines + } + } + + const text = textParts.join("").trim(); + const usage = + promptTokens !== null || completionTokens !== null || totalTokens !== null + ? { promptTokens, completionTokens, totalTokens } + : null; + if (text) { + return { text, usage, costUsd }; + } + if (errorMessages.length > 0) { + throw new Error(errorMessages.join("\n")); + } + if (sawStructuredEvent) { + throw new Error("CLI returned empty output"); + } + return { text: trimmed, usage: null, costUsd: null }; +} + function extractJsonResultText(payload: Record): string | null { for (const key of JSON_RESULT_FIELDS) { const value = payload[key]; diff --git a/src/llm/cli.ts b/src/llm/cli.ts index 09a8b6f3..9fb54c98 100644 --- a/src/llm/cli.ts +++ b/src/llm/cli.ts @@ -8,6 +8,7 @@ import { execCliWithInput } from "./cli-exec.js"; import { isJsonCliProvider, parseCodexUsageFromJsonl, + parseOpenCodeOutputFromJsonl, parseJsonProviderOutput, type JsonCliProvider, } from "./cli-provider-output.js"; @@ -18,6 +19,7 @@ const DEFAULT_BINARIES: Record = { codex: "codex", gemini: "gemini", agent: "agent", + opencode: "opencode", }; const PROVIDER_PATH_ENV: Record = { @@ -25,6 +27,7 @@ const PROVIDER_PATH_ENV: Record = { codex: "CODEX_PATH", gemini: "GEMINI_PATH", agent: "AGENT_PATH", + opencode: "OPENCODE_PATH", }; type RunCliModelOptions = { @@ -57,7 +60,8 @@ function getCliProviderConfig( if (provider === "claude") return config.claude; if (provider === "codex") return config.codex; if (provider === "gemini") return config.gemini; - return config.agent; + if (provider === "agent") return config.agent; + return config.opencode; } export function isCliDisabled( @@ -147,19 +151,26 @@ export async function runCliModel({ : env; const providerConfig = getCliProviderConfig(provider, config); + const requestedModel = isNonEmptyString(model) + ? model.trim() + : isNonEmptyString(providerConfig?.model) + ? providerConfig.model.trim() + : null; + const providerExtraArgs: string[] = []; if (providerConfig?.extraArgs?.length) { - args.push(...providerConfig.extraArgs); + providerExtraArgs.push(...providerConfig.extraArgs); } if (extraArgs?.length) { - args.push(...extraArgs); + providerExtraArgs.push(...extraArgs); } if (provider === "codex") { + args.push(...providerExtraArgs); const outputDir = await fs.mkdtemp(path.join(tmpdir(), "summarize-codex-")); const outputPath = path.join(outputDir, "last-message.txt"); args.push("exec", "--output-last-message", outputPath, "--skip-git-repo-check", "--json"); - if (model && model.trim().length > 0) { - args.push("-m", model.trim()); + if (requestedModel) { + args.push("-m", requestedModel); } const hasVerbosityOverride = args.some((arg) => arg.includes("text.verbosity")); if (!hasVerbosityOverride) { @@ -191,10 +202,42 @@ export async function runCliModel({ throw new Error("CLI returned empty output"); } + if (provider === "opencode") { + const isolatedCwd = + !allowTools && !cwd ? await fs.mkdtemp(path.join(tmpdir(), "summarize-opencode-")) : null; + try { + args.push("run", ...providerExtraArgs, "--format", "json"); + if (requestedModel) { + args.push("--model", requestedModel); + } + const { stdout } = await execCliWithInput({ + execFileImpl: execFileFn, + cmd: binary, + args, + input: prompt, + timeoutMs, + env: effectiveEnv, + cwd: isolatedCwd ?? cwd, + }); + return parseOpenCodeOutputFromJsonl(stdout); + } finally { + if (isolatedCwd) { + await fs.rm(isolatedCwd, { recursive: true, force: true }).catch(() => {}); + } + } + } + if (!isJsonCliProvider(provider)) { throw new Error(`Unsupported CLI provider "${provider}".`); } - const input = appendJsonProviderArgs({ provider, args, allowTools, model, prompt }); + args.push(...providerExtraArgs); + const input = appendJsonProviderArgs({ + provider, + args, + allowTools, + model: requestedModel, + prompt, + }); const { stdout } = await execCliWithInput({ execFileImpl: execFileFn, diff --git a/src/llm/provider-profile.ts b/src/llm/provider-profile.ts index b5316718..e5ddec82 100644 --- a/src/llm/provider-profile.ts +++ b/src/llm/provider-profile.ts @@ -16,7 +16,8 @@ export type RequiredModelEnv = | "CLI_CLAUDE" | "CLI_CODEX" | "CLI_GEMINI" - | "CLI_AGENT"; + | "CLI_AGENT" + | "CLI_OPENCODE"; type GatewayProviderProfile = { requiredEnv: RequiredModelEnv; @@ -64,14 +65,21 @@ const GATEWAY_PROVIDER_PROFILES: Record }, }; -export const DEFAULT_CLI_MODELS: Record = { +export const DEFAULT_CLI_MODELS: Record = { claude: "sonnet", codex: "gpt-5.2", gemini: "gemini-3-flash", agent: "gpt-5.2", + opencode: null, }; -export const DEFAULT_AUTO_CLI_ORDER: CliProvider[] = ["claude", "gemini", "codex", "agent"]; +export const DEFAULT_AUTO_CLI_ORDER: CliProvider[] = [ + "claude", + "gemini", + "codex", + "agent", + "opencode", +]; export function parseCliProviderName(raw: string): CliProvider | null { const normalized = raw.trim().toLowerCase(); @@ -79,6 +87,7 @@ export function parseCliProviderName(raw: string): CliProvider | null { if (normalized === "codex") return "codex"; if (normalized === "gemini") return "gemini"; if (normalized === "agent") return "agent"; + if (normalized === "opencode") return "opencode"; return null; } @@ -89,7 +98,9 @@ export function requiredEnvForCliProvider(provider: CliProvider): RequiredModelE ? "CLI_GEMINI" : provider === "agent" ? "CLI_AGENT" - : "CLI_CLAUDE"; + : provider === "opencode" + ? "CLI_OPENCODE" + : "CLI_CLAUDE"; } export function getGatewayProviderProfile(provider: GatewayProvider): GatewayProviderProfile { diff --git a/src/model-auto-cli.ts b/src/model-auto-cli.ts index 751d7972..b37ef3cf 100644 --- a/src/model-auto-cli.ts +++ b/src/model-auto-cli.ts @@ -95,9 +95,8 @@ export function prependCliCandidates({ const add = (provider: CliProvider, modelOverride?: string) => { if (hasExplicitEnabledList && !isCliProviderEnabled(provider, config)) return; - const model = modelOverride?.trim() || DEFAULT_CLI_MODELS[provider]; - if (!model) return; - const id = `cli/${provider}/${model}`; + const model = modelOverride?.trim() || DEFAULT_CLI_MODELS[provider] || null; + const id = model ? `cli/${provider}/${model}` : `cli/${provider}`; if (!cliCandidates.includes(id)) cliCandidates.push(id); }; @@ -109,7 +108,9 @@ export function prependCliCandidates({ ? cli?.codex?.model : provider === "agent" ? cli?.agent?.model - : cli?.claude?.model; + : provider === "opencode" + ? cli?.opencode?.model + : cli?.claude?.model; add(provider, modelOverride); } diff --git a/src/model-spec.ts b/src/model-spec.ts index d9e179d4..02b72a09 100644 --- a/src/model-spec.ts +++ b/src/model-spec.ts @@ -1,18 +1,12 @@ import type { CliProvider } from "./config.js"; import { normalizeGatewayStyleModelId, parseGatewayStyleModelId } from "./llm/model-id.js"; import { + DEFAULT_CLI_MODELS, type RequiredModelEnv, requiredEnvForCliProvider, resolveRequiredEnvForModelId, } from "./llm/provider-capabilities.js"; -const DEFAULT_CLI_MODELS: Record = { - claude: "sonnet", - codex: "gpt-5.2", - gemini: "gemini-3-flash", - agent: "gpt-5.2", -}; - export type FixedModelSpec = | { transport: "native"; @@ -46,7 +40,7 @@ export type FixedModelSpec = llmModelId: null; openrouterProviders: null; forceOpenRouter: false; - requiredEnv: "CLI_CLAUDE" | "CLI_CODEX" | "CLI_GEMINI" | "CLI_AGENT"; + requiredEnv: "CLI_CLAUDE" | "CLI_CODEX" | "CLI_GEMINI" | "CLI_AGENT" | "CLI_OPENCODE"; cliProvider: CliProvider; cliModel: string | null; }; @@ -131,18 +125,21 @@ export function parseRequestedModelId(raw: string): RequestedModel { providerRaw !== "claude" && providerRaw !== "codex" && providerRaw !== "gemini" && - providerRaw !== "agent" + providerRaw !== "agent" && + providerRaw !== "opencode" ) { - throw new Error(`Invalid CLI model id "${trimmed}". Expected cli//.`); + throw new Error( + `Invalid CLI model id "${trimmed}". Expected cli/ or cli//.`, + ); } const cliProvider = providerRaw as CliProvider; const requestedModel = parts.slice(2).join("/").trim(); const cliModel = requestedModel.length > 0 ? requestedModel : DEFAULT_CLI_MODELS[cliProvider]; const requiredEnv = requiredEnvForCliProvider(cliProvider) as Extract< RequiredModelEnv, - "CLI_CLAUDE" | "CLI_CODEX" | "CLI_GEMINI" | "CLI_AGENT" + "CLI_CLAUDE" | "CLI_CODEX" | "CLI_GEMINI" | "CLI_AGENT" | "CLI_OPENCODE" >; - const userModelId = `cli/${cliProvider}/${cliModel}`; + const userModelId = cliModel ? `cli/${cliProvider}/${cliModel}` : `cli/${cliProvider}`; return { kind: "fixed", transport: "cli", diff --git a/src/run/cli-fallback-state.ts b/src/run/cli-fallback-state.ts index 9bdd144e..5387b6e2 100644 --- a/src/run/cli-fallback-state.ts +++ b/src/run/cli-fallback-state.ts @@ -11,7 +11,13 @@ function resolveStatePath(env: Record): string | nul } function parseCliProvider(value: unknown): CliProvider | null { - if (value === "claude" || value === "codex" || value === "gemini" || value === "agent") { + if ( + value === "claude" || + value === "codex" || + value === "gemini" || + value === "agent" || + value === "opencode" + ) { return value; } return null; diff --git a/src/run/env.ts b/src/run/env.ts index d9720e2f..49e337b4 100644 --- a/src/run/env.ts +++ b/src/run/env.ts @@ -54,7 +54,7 @@ export function resolveCliAvailability({ config: ConfigForCli; }): Partial> { const cliConfig = config?.cli ?? null; - const providers: CliProvider[] = ["claude", "codex", "gemini", "agent"]; + const providers: CliProvider[] = ["claude", "codex", "gemini", "agent", "opencode"]; const availability: Partial> = {}; for (const provider of providers) { if (isCliDisabled(provider, cliConfig)) { @@ -80,9 +80,12 @@ export function parseCliUserModelId(modelId: string): { provider !== "claude" && provider !== "codex" && provider !== "gemini" && - provider !== "agent" + provider !== "agent" && + provider !== "opencode" ) { - throw new Error(`Invalid CLI model id "${modelId}". Expected cli//.`); + throw new Error( + `Invalid CLI model id "${modelId}". Expected cli/ or cli//.`, + ); } const model = parts.slice(2).join("/").trim(); return { provider, model: model.length > 0 ? model : null }; @@ -94,7 +97,8 @@ export function parseCliProviderArg(raw: string): CliProvider { normalized === "claude" || normalized === "codex" || normalized === "gemini" || - normalized === "agent" + normalized === "agent" || + normalized === "opencode" ) { return normalized as CliProvider; } diff --git a/src/run/flows/asset/summary-attempts.ts b/src/run/flows/asset/summary-attempts.ts index 33e0dfec..1c848790 100644 --- a/src/run/flows/asset/summary-attempts.ts +++ b/src/run/flows/asset/summary-attempts.ts @@ -119,6 +119,7 @@ export async function buildAssetCliContext({ const extraArgsByProvider: Partial> = { gemini: ["--include-directories", dir], codex: args.attachment.kind === "image" ? ["-i", filePath] : undefined, + opencode: ["--file", filePath], }; return { diff --git a/src/run/help.ts b/src/run/help.ts index 1011204f..8fe34138 100644 --- a/src/run/help.ts +++ b/src/run/help.ts @@ -110,7 +110,7 @@ export function buildProgram() { .option("--retries ", "LLM retry attempts on timeout (default: 1).", "1") .option( "--model ", - "LLM model id: auto, , cli//, xai/..., openai/..., nvidia/..., google/..., anthropic/..., zai/... or openrouter// (default: auto)", + "LLM model id: auto, , cli/ or cli//, xai/..., openai/..., nvidia/..., google/..., anthropic/..., zai/... or openrouter// (default: auto)", undefined, ) .option( @@ -126,7 +126,7 @@ export function buildProgram() { .addOption( new Option( "--cli [provider]", - "Use a CLI provider: claude, gemini, codex, agent (equivalent to --model cli/). If omitted, use auto selection with CLI enabled.", + "Use a CLI provider: claude, gemini, codex, agent, opencode (equivalent to --model cli/). If omitted, use auto selection with CLI enabled.", ), ) .option("--extract", "Print extracted content and exit (no LLM summary)", false) @@ -273,6 +273,7 @@ ${heading("Env Vars")} CODEX_PATH optional (path to Codex CLI binary) GEMINI_PATH optional (path to Gemini CLI binary) AGENT_PATH optional (path to Cursor Agent CLI binary) + OPENCODE_PATH optional (path to OpenCode CLI binary) SUMMARIZE_MODEL optional (overrides default model selection) SUMMARIZE_THEME optional (${CLI_THEME_NAMES.join(", ")}) SUMMARIZE_TRUECOLOR optional (force 24-bit color) diff --git a/src/run/run-config.ts b/src/run/run-config.ts index 383a38b5..0fcc83b7 100644 --- a/src/run/run-config.ts +++ b/src/run/run-config.ts @@ -2,6 +2,7 @@ import type { CliProvider, SummarizeConfig } from "../config.js"; import { loadSummarizeConfig } from "../config.js"; import { parseVideoMode } from "../flags.js"; import { type OutputLanguage, parseOutputLanguage } from "../language.js"; +import { DEFAULT_AUTO_CLI_ORDER } from "../llm/provider-capabilities.js"; import { parseBooleanEnv } from "./env.js"; export type ConfigState = { @@ -57,7 +58,7 @@ export function resolveConfigState({ const cliEnabledOverride: CliProvider[] | null = (() => { if (!cliFlagPresent || cliProviderArg) return null; if (Array.isArray(config?.cli?.enabled)) return config.cli.enabled; - return ["claude", "gemini", "codex", "agent"]; + return DEFAULT_AUTO_CLI_ORDER.slice(); })(); const cliConfigForRun = cliEnabledOverride ? { ...(config?.cli ?? {}), enabled: cliEnabledOverride } diff --git a/src/run/run-models.ts b/src/run/run-models.ts index f9b76524..4caf8e7e 100644 --- a/src/run/run-models.ts +++ b/src/run/run-models.ts @@ -1,8 +1,43 @@ -import type { ModelConfig, SummarizeConfig } from "../config.js"; +import type { CliProvider, ModelConfig, SummarizeConfig } from "../config.js"; import type { RequestedModel } from "../model-spec.js"; import { parseRequestedModelId } from "../model-spec.js"; import { BUILTIN_MODELS } from "./constants.js"; +function resolveConfiguredCliModel( + provider: CliProvider, + config: SummarizeConfig | null, +): string | null { + const cli = config?.cli; + const raw = + provider === "claude" + ? cli?.claude?.model + : provider === "codex" + ? cli?.codex?.model + : provider === "gemini" + ? cli?.gemini?.model + : provider === "agent" + ? cli?.agent?.model + : cli?.opencode?.model; + return typeof raw === "string" && raw.trim().length > 0 ? raw.trim() : null; +} + +function resolveRequestedCliModelFromConfig( + requestedModel: RequestedModel, + config: SummarizeConfig | null, +): RequestedModel { + if (requestedModel.kind !== "fixed" || requestedModel.transport !== "cli") return requestedModel; + if (requestedModel.cliModel) return requestedModel; + + const configuredModel = resolveConfiguredCliModel(requestedModel.cliProvider, config); + if (!configuredModel) return requestedModel; + + return { + ...requestedModel, + userModelId: `cli/${requestedModel.cliProvider}/${configuredModel}`, + cliModel: configuredModel, + }; +} + export type ModelSelection = { requestedModel: RequestedModel; requestedModelInput: string; @@ -101,18 +136,23 @@ export function resolveModelSelection({ return parseRequestedModelId(requestedModelInput); })(); + const requestedModelResolved = resolveRequestedCliModelFromConfig( + requestedModel, + configForModelSelection, + ); + const requestedModelLabel = isNamedModelSelection ? requestedModelInput - : requestedModel.kind === "auto" + : requestedModelResolved.kind === "auto" ? "auto" - : requestedModel.userModelId; + : requestedModelResolved.userModelId; - const isFallbackModel = requestedModel.kind === "auto"; + const isFallbackModel = requestedModelResolved.kind === "auto"; const isImplicitAutoSelection = - requestedModel.kind === "auto" && requestedModelSource === "default"; + requestedModelResolved.kind === "auto" && requestedModelSource === "default"; return { - requestedModel, + requestedModel: requestedModelResolved, requestedModelInput, requestedModelLabel, isNamedModelSelection, diff --git a/src/run/run-settings-parse.ts b/src/run/run-settings-parse.ts index ea5c5c62..74f775b7 100644 --- a/src/run/run-settings-parse.ts +++ b/src/run/run-settings-parse.ts @@ -41,6 +41,7 @@ export const parseCliProvider = (raw: string): CliProvider | null => { if (normalized === "gemini") return "gemini"; if (normalized === "codex") return "codex"; if (normalized === "agent") return "agent"; + if (normalized === "opencode") return "opencode"; return null; }; diff --git a/src/run/summary-engine.ts b/src/run/summary-engine.ts index c8b000c9..ad1c20a9 100644 --- a/src/run/summary-engine.ts +++ b/src/run/summary-engine.ts @@ -119,6 +119,9 @@ export function createSummaryEngine(deps: SummaryEngineDeps) { if (requiredEnv === "CLI_AGENT") { return Boolean(deps.cliAvailability.agent); } + if (requiredEnv === "CLI_OPENCODE") { + return Boolean(deps.cliAvailability.opencode); + } if (requiredEnv === "GEMINI_API_KEY") { return deps.keyFlags.googleConfigured; } @@ -153,6 +156,9 @@ export function createSummaryEngine(deps: SummaryEngineDeps) { if (attempt.requiredEnv === "CLI_AGENT") { return `Cursor Agent CLI not found for model ${attempt.userModelId}. Install Cursor CLI or set AGENT_PATH.`; } + if (attempt.requiredEnv === "CLI_OPENCODE") { + return `OpenCode CLI not found for model ${attempt.userModelId}. Install OpenCode CLI or set OPENCODE_PATH.`; + } return `Missing ${attempt.requiredEnv} for model ${attempt.userModelId}. Set the env var or choose a different --model.`; }; diff --git a/src/run/types.ts b/src/run/types.ts index 1f1b862c..4217eb4e 100644 --- a/src/run/types.ts +++ b/src/run/types.ts @@ -11,7 +11,8 @@ export type ModelAttemptRequiredEnv = | "CLI_CLAUDE" | "CLI_CODEX" | "CLI_GEMINI" - | "CLI_AGENT"; + | "CLI_AGENT" + | "CLI_OPENCODE"; export type ModelAttempt = { transport: "native" | "openrouter" | "cli"; diff --git a/tests/asset.summary-attempts.test.ts b/tests/asset.summary-attempts.test.ts index 2b09a758..610b6bad 100644 --- a/tests/asset.summary-attempts.test.ts +++ b/tests/asset.summary-attempts.test.ts @@ -273,6 +273,7 @@ describe("asset summary attempts", () => { extraArgsByProvider: { gemini: ["--include-directories", "/tmp/assets"], codex: ["-i", "/tmp/assets/file.png"], + opencode: ["--file", "/tmp/assets/file.png"], }, }); }); @@ -298,6 +299,7 @@ describe("asset summary attempts", () => { expect(result?.extraArgsByProvider).toEqual({ gemini: ["--include-directories", "/tmp/assets"], codex: undefined, + opencode: ["--file", "/tmp/assets/file.png"], }); }); }); diff --git a/tests/chrome.daemon-payload.test.ts b/tests/chrome.daemon-payload.test.ts index ec26729d..cd59b8ba 100644 --- a/tests/chrome.daemon-payload.test.ts +++ b/tests/chrome.daemon-payload.test.ts @@ -26,7 +26,7 @@ describe("chrome/daemon-payload", () => { length: "xl", language: "auto", autoCliFallback: true, - autoCliOrder: "claude,gemini,codex,agent", + autoCliOrder: "claude,gemini,codex,agent,opencode", maxCharacters: defaultSettings.maxChars, }); }); @@ -70,7 +70,7 @@ describe("chrome/daemon-payload", () => { retries: 2, maxOutputTokens: "2k", autoCliFallback: true, - autoCliOrder: "claude,gemini,codex,agent", + autoCliOrder: "claude,gemini,codex,agent,opencode", maxCharacters: defaultSettings.maxChars, }); }); diff --git a/tests/daemon.chat.test.ts b/tests/daemon.chat.test.ts index dd25c351..3f58e1ee 100644 --- a/tests/daemon.chat.test.ts +++ b/tests/daemon.chat.test.ts @@ -111,6 +111,42 @@ describe("daemon/chat", () => { expect(events).toEqual([{ event: "content", data: "cli hello" }, { event: "metrics" }]); }); + it("resolves configured OpenCode models before emitting chat metadata", async () => { + const home = mkdtempSync(join(tmpdir(), "summarize-daemon-chat-opencode-fixed-")); + const meta: Array<{ model?: string | null }> = []; + + await streamChatResponse({ + env: { HOME: home }, + fetchImpl: fetch, + configForCli: { + cli: { + opencode: { + model: "openai/gpt-5.4", + }, + }, + }, + session: { + id: "s-opencode-fixed", + lastMeta: { model: null, modelLabel: null, inputSummary: null, summaryFromCache: null }, + }, + pageUrl: "https://example.com", + pageTitle: "Example", + pageContent: "Hello world", + messages: [{ role: "user", content: "Hi" }], + modelOverride: "cli/opencode", + pushToSession: () => {}, + emitMeta: (patch) => meta.push(patch), + }); + + expect(runCliModel).toHaveBeenCalledWith( + expect.objectContaining({ + provider: "opencode", + model: "openai/gpt-5.4", + }), + ); + expect(meta[0]?.model).toBe("cli/opencode/openai/gpt-5.4"); + }); + it("routes openrouter overrides through openrouter transport", async () => { const home = mkdtempSync(join(tmpdir(), "summarize-daemon-chat-openrouter-")); const meta: Array<{ model?: string | null }> = []; diff --git a/tests/daemon.models.test.ts b/tests/daemon.models.test.ts index b3395041..23ce07cf 100644 --- a/tests/daemon.models.test.ts +++ b/tests/daemon.models.test.ts @@ -50,8 +50,11 @@ describe("daemon /v1/models", () => { it("includes available CLI model options", async () => { const binDir = mkdtempSync(path.join(tmpdir(), "summarize-cli-bin-")); const claudePath = path.join(binDir, "claude"); + const opencodePath = path.join(binDir, "opencode"); writeFileSync(claudePath, "#!/bin/sh\nexit 0\n", "utf8"); + writeFileSync(opencodePath, "#!/bin/sh\nexit 0\n", "utf8"); chmodSync(claudePath, 0o755); + chmodSync(opencodePath, 0o755); const result = await buildModelPickerOptions({ env: {}, @@ -62,7 +65,9 @@ describe("daemon /v1/models", () => { expect(result.ok).toBe(true); expect(result.providers.cliClaude).toBe(true); + expect(result.providers.cliOpencode).toBe(true); expect(result.options.some((o) => o.id === "cli/claude")).toBe(true); + expect(result.options.some((o) => o.id === "cli/opencode")).toBe(true); }); it("includes NVIDIA models when NVIDIA_API_KEY is set", async () => { diff --git a/tests/llm.cli.more-branches-2.test.ts b/tests/llm.cli.more-branches-2.test.ts index ec6c6acf..724c5124 100644 --- a/tests/llm.cli.more-branches-2.test.ts +++ b/tests/llm.cli.more-branches-2.test.ts @@ -25,9 +25,13 @@ describe("llm/cli more branches", () => { expect(resolveCliBinary("agent", null, { AGENT_PATH: " /tmp/agent-bin " })).toBe( "/tmp/agent-bin", ); + expect(resolveCliBinary("opencode", null, { OPENCODE_PATH: " /tmp/opencode-bin " })).toBe( + "/tmp/opencode-bin", + ); expect(resolveCliBinary("claude", null, {})).toBe("claude"); expect(resolveCliBinary("agent", null, {})).toBe("agent"); + expect(resolveCliBinary("opencode", null, {})).toBe("opencode"); }); it("includes stderr in exec error messages", async () => { diff --git a/tests/llm.cli.test.ts b/tests/llm.cli.test.ts index 59758bae..8981c943 100644 --- a/tests/llm.cli.test.ts +++ b/tests/llm.cli.test.ts @@ -195,6 +195,92 @@ describe("runCliModel", () => { expect(seen[0]?.[seen[0].length - 1]).toBe("Test"); }); + it("handles OpenCode JSONL output via stdin", async () => { + const seen: string[][] = []; + let stdinText = ""; + let seenCwd = ""; + const execFileImpl: ExecFileFn = ((_cmd, args, options, cb) => { + seen.push(args); + seenCwd = ((options as { cwd?: string } | undefined)?.cwd ?? "") as string; + cb?.( + null, + [ + JSON.stringify({ type: "step_start", part: { type: "step-start" } }), + JSON.stringify({ type: "text", part: { type: "text", text: "ok from opencode" } }), + JSON.stringify({ + type: "step_finish", + part: { + type: "step-finish", + cost: 0.25, + tokens: { input: 7, output: 3, total: 10 }, + }, + }), + ].join("\n"), + "", + ); + return { + stdin: { + write: (chunk: string | Buffer) => { + stdinText += String(chunk); + }, + end: () => {}, + }, + } as unknown as ReturnType; + }) as ExecFileFn; + + const result = await runCliModel({ + provider: "opencode", + prompt: "Test", + model: "openai/gpt-5.4", + allowTools: false, + timeoutMs: 1000, + env: {}, + execFileImpl, + config: null, + }); + + expect(result.text).toBe("ok from opencode"); + expect(result.costUsd).toBe(0.25); + expect(result.usage).toEqual({ promptTokens: 7, completionTokens: 3, totalTokens: 10 }); + expect(seen[0]).toContain("run"); + expect(seen[0]).toContain("--format"); + expect(seen[0]).toContain("json"); + expect(seen[0]).toContain("--model"); + expect(seen[0]).toContain("openai/gpt-5.4"); + expect(stdinText).toBe("Test"); + expect(seenCwd).toContain("summarize-opencode-"); + }); + + it("uses configured OpenCode model when none is passed explicitly", async () => { + const seen: string[][] = []; + const execFileImpl: ExecFileFn = ((_cmd, args, _options, cb) => { + seen.push(args); + cb?.( + null, + JSON.stringify({ type: "text", part: { type: "text", text: "ok from config model" } }), + "", + ); + return { + stdin: { write: () => {}, end: () => {} }, + } as unknown as ReturnType; + }) as ExecFileFn; + + const result = await runCliModel({ + provider: "opencode", + prompt: "Test", + model: null, + allowTools: false, + timeoutMs: 1000, + env: {}, + execFileImpl, + config: { opencode: { model: "openai/gpt-5.2" } }, + }); + + expect(result.text).toBe("ok from config model"); + expect(seen[0]).toContain("--model"); + expect(seen[0]).toContain("openai/gpt-5.2"); + }); + it("accepts common JSON output fields across JSON CLI providers", async () => { const providers: Array<{ provider: CliProvider; model: string }> = [ { provider: "claude", model: "sonnet" }, @@ -415,6 +501,9 @@ describe("cli helpers", () => { "/opt/codex", ); expect(resolveCliBinary("agent", null, { AGENT_PATH: "/opt/agent" })).toBe("/opt/agent"); + expect(resolveCliBinary("opencode", null, { OPENCODE_PATH: "/opt/opencode" })).toBe( + "/opt/opencode", + ); expect(resolveCliBinary("gemini", null, {})).toBe("gemini"); }); }); diff --git a/tests/llm.provider-capabilities.test.ts b/tests/llm.provider-capabilities.test.ts index 2954d668..30cabaf7 100644 --- a/tests/llm.provider-capabilities.test.ts +++ b/tests/llm.provider-capabilities.test.ts @@ -15,10 +15,13 @@ import { describe("llm provider capabilities", () => { it("exposes stable CLI defaults and parsing", () => { - expect(DEFAULT_AUTO_CLI_ORDER).toEqual(["claude", "gemini", "codex", "agent"]); + expect(DEFAULT_AUTO_CLI_ORDER).toEqual(["claude", "gemini", "codex", "agent", "opencode"]); expect(DEFAULT_CLI_MODELS.gemini).toBe("gemini-3-flash"); + expect(DEFAULT_CLI_MODELS.opencode).toBeNull(); expect(parseCliProviderName(" GeMiNi ")).toBe("gemini"); + expect(parseCliProviderName(" OpenCode ")).toBe("opencode"); expect(requiredEnvForCliProvider("agent")).toBe("CLI_AGENT"); + expect(requiredEnvForCliProvider("opencode")).toBe("CLI_OPENCODE"); }); it("tracks native provider capabilities centrally", () => { @@ -45,6 +48,7 @@ describe("llm provider capabilities", () => { it("resolves provider requirements and OpenAI-compatible config centrally", () => { expect(resolveRequiredEnvForModelId("cli/gemini")).toBe("CLI_GEMINI"); + expect(resolveRequiredEnvForModelId("cli/opencode")).toBe("CLI_OPENCODE"); expect(resolveRequiredEnvForModelId("openrouter/openai/gpt-5-mini")).toBe("OPENROUTER_API_KEY"); expect(resolveRequiredEnvForModelId("nvidia/meta/llama-3.1-8b-instruct")).toBe( "NVIDIA_API_KEY", diff --git a/tests/model-spec.test.ts b/tests/model-spec.test.ts index 62f9470b..9b0c38a6 100644 --- a/tests/model-spec.test.ts +++ b/tests/model-spec.test.ts @@ -46,6 +46,16 @@ describe("model spec parsing", () => { expect(parsed.requiredEnv).toBe("CLI_GEMINI"); }); + it("uses the OpenCode runtime default model when missing", () => { + const parsed = parseRequestedModelId("cli/opencode"); + expect(parsed.kind).toBe("fixed"); + expect(parsed.transport).toBe("cli"); + expect(parsed.userModelId).toBe("cli/opencode"); + expect(parsed.cliProvider).toBe("opencode"); + expect(parsed.cliModel).toBeNull(); + expect(parsed.requiredEnv).toBe("CLI_OPENCODE"); + }); + it("rejects invalid cli providers", () => { expect(() => parseRequestedModelId("cli/unknown/model")).toThrow(/Invalid CLI model id/); }); diff --git a/tests/run.env.test.ts b/tests/run.env.test.ts index 206a200c..47be7dcf 100644 --- a/tests/run.env.test.ts +++ b/tests/run.env.test.ts @@ -44,7 +44,12 @@ describe("run/env", () => { provider: "gemini", model: null, }); + expect(parseCliUserModelId("cli/opencode/openai/gpt-5.4")).toEqual({ + provider: "opencode", + model: "openai/gpt-5.4", + }); expect(parseCliProviderArg(" AGENT ")).toBe("agent"); + expect(parseCliProviderArg(" OpenCode ")).toBe("opencode"); }); it("rejects invalid cli providers and model ids", () => { diff --git a/tests/run.models.test.ts b/tests/run.models.test.ts new file mode 100644 index 00000000..42a3f6e4 --- /dev/null +++ b/tests/run.models.test.ts @@ -0,0 +1,30 @@ +import { describe, expect, it } from "vitest"; +import { resolveModelSelection } from "../src/run/run-models.js"; + +describe("run model selection", () => { + it("resolves provider-default OpenCode ids through summarize config", () => { + const config = { + cli: { + opencode: { + model: "openai/gpt-5.4", + }, + }, + }; + + const result = resolveModelSelection({ + config, + configForCli: config, + configPath: null, + envForRun: {}, + explicitModelArg: "cli/opencode", + }); + + expect(result.requestedModel.kind).toBe("fixed"); + expect(result.requestedModel.userModelId).toBe("cli/opencode/openai/gpt-5.4"); + expect(result.requestedModelLabel).toBe("cli/opencode/openai/gpt-5.4"); + if (result.requestedModel.kind === "fixed" && result.requestedModel.transport === "cli") { + expect(result.requestedModel.cliProvider).toBe("opencode"); + expect(result.requestedModel.cliModel).toBe("openai/gpt-5.4"); + } + }); +}); From de7a7d3729a18529b0e18a90d161cb4456d28cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Bi=C5=82as?= Date: Fri, 20 Mar 2026 14:06:18 +0100 Subject: [PATCH 2/2] fix: stabilize opencode CI checks --- .../src/entrypoints/background/panel-utils.ts | 24 +-- tests/chrome.panel-utils.test.ts | 27 ++++ tests/llm.cli.test.ts | 45 ++++++ tests/llm.provider-capabilities.test.ts | 23 +++ tests/model-auto.test.ts | 90 +++++++++++ tests/run.env.test.ts | 16 +- tests/run.models.test.ts | 152 ++++++++++++++++++ 7 files changed, 366 insertions(+), 11 deletions(-) diff --git a/apps/chrome-extension/src/entrypoints/background/panel-utils.ts b/apps/chrome-extension/src/entrypoints/background/panel-utils.ts index 2b2623ad..7c16ee4f 100644 --- a/apps/chrome-extension/src/entrypoints/background/panel-utils.ts +++ b/apps/chrome-extension/src/entrypoints/background/panel-utils.ts @@ -54,6 +54,17 @@ export function resolveOptionsUrl(): string { return chrome.runtime.getURL(page); } +function isContentTabUrl(url: string | null | undefined): url is string { + if (!url) return false; + return !( + url.startsWith("chrome-extension://") || + url.startsWith("chrome://") || + url.startsWith("moz-extension://") || + url.startsWith("edge://") || + url.startsWith("about:") + ); +} + export async function openOptionsWindow() { const url = resolveOptionsUrl(); try { @@ -86,11 +97,7 @@ export async function getActiveTab(windowId?: number): Promise - typeof tab.url === "string" && - !tab.url.startsWith("chrome-extension://") && - !tab.url.startsWith("chrome://"), + (tab) => isContentTabUrl(tab.url), ) ?? null; - return contentTab ?? activeTab ?? null; + return contentTab; } export function normalizeUrl(value: string) { diff --git a/tests/chrome.panel-utils.test.ts b/tests/chrome.panel-utils.test.ts index 3c7eb7f8..7f8a278b 100644 --- a/tests/chrome.panel-utils.test.ts +++ b/tests/chrome.panel-utils.test.ts @@ -134,6 +134,33 @@ describe("chrome panel utils", () => { expect(chrome.tabs.query).toHaveBeenNthCalledWith(2, { windowId: 7 }); }); + it("falls back to a real content tab when the active tab is about:blank", async () => { + vi.mocked(chrome.tabs.query) + .mockResolvedValueOnce([{ id: 9, url: "about:blank" }]) + .mockResolvedValueOnce([ + { id: 9, url: "about:blank" }, + { id: 3, url: "https://example.com/article" }, + ]); + + await expect(getActiveTab()).resolves.toMatchObject({ + id: 3, + url: "https://example.com/article", + }); + expect(chrome.tabs.query).toHaveBeenNthCalledWith(1, { active: true, currentWindow: true }); + expect(chrome.tabs.query).toHaveBeenNthCalledWith(2, { currentWindow: true }); + }); + + it("returns null when a window has no content tabs", async () => { + vi.mocked(chrome.tabs.query) + .mockResolvedValueOnce([{ id: 9, url: "about:blank" }]) + .mockResolvedValueOnce([ + { id: 9, url: "about:blank" }, + { id: 10, url: "chrome-extension://test/sidepanel.html" }, + ]); + + await expect(getActiveTab(7)).resolves.toBeNull(); + }); + it("formats slide timestamps for minutes and hours", () => { expect(formatSlideTimestamp(65)).toBe("1:05"); expect(formatSlideTimestamp(3723)).toBe("1:02:03"); diff --git a/tests/llm.cli.test.ts b/tests/llm.cli.test.ts index 8981c943..5237e84b 100644 --- a/tests/llm.cli.test.ts +++ b/tests/llm.cli.test.ts @@ -281,6 +281,51 @@ describe("runCliModel", () => { expect(seen[0]).toContain("openai/gpt-5.2"); }); + it("reuses the provided cwd for OpenCode when tools are allowed", async () => { + const seen: string[][] = []; + let stdinText = ""; + let seenCwd = ""; + const execFileImpl: ExecFileFn = ((_cmd, args, options, cb) => { + seen.push(args); + seenCwd = ((options as { cwd?: string } | undefined)?.cwd ?? "") as string; + cb?.(null, JSON.stringify({ type: "text", part: { type: "text", text: "ok" } }), ""); + return { + stdin: { + write: (chunk: string | Buffer) => { + stdinText += String(chunk); + }, + end: () => {}, + }, + } as unknown as ReturnType; + }) as ExecFileFn; + + const result = await runCliModel({ + provider: "opencode", + prompt: "Test", + model: null, + allowTools: true, + timeoutMs: 1000, + env: {}, + execFileImpl, + config: { opencode: { extraArgs: ["--config", "fast"] } }, + cwd: "/tmp/opencode-cwd", + extraArgs: ["--approval-mode", "never"], + }); + + expect(result.text).toBe("ok"); + expect(seen[0]).toEqual([ + "run", + "--config", + "fast", + "--approval-mode", + "never", + "--format", + "json", + ]); + expect(stdinText).toBe("Test"); + expect(seenCwd).toBe("/tmp/opencode-cwd"); + }); + it("accepts common JSON output fields across JSON CLI providers", async () => { const providers: Array<{ provider: CliProvider; model: string }> = [ { provider: "claude", model: "sonnet" }, diff --git a/tests/llm.provider-capabilities.test.ts b/tests/llm.provider-capabilities.test.ts index 30cabaf7..5ddeea6f 100644 --- a/tests/llm.provider-capabilities.test.ts +++ b/tests/llm.provider-capabilities.test.ts @@ -20,6 +20,7 @@ describe("llm provider capabilities", () => { expect(DEFAULT_CLI_MODELS.opencode).toBeNull(); expect(parseCliProviderName(" GeMiNi ")).toBe("gemini"); expect(parseCliProviderName(" OpenCode ")).toBe("opencode"); + expect(parseCliProviderName("nope")).toBeNull(); expect(requiredEnvForCliProvider("agent")).toBe("CLI_AGENT"); expect(requiredEnvForCliProvider("opencode")).toBe("CLI_OPENCODE"); }); @@ -49,6 +50,7 @@ describe("llm provider capabilities", () => { it("resolves provider requirements and OpenAI-compatible config centrally", () => { expect(resolveRequiredEnvForModelId("cli/gemini")).toBe("CLI_GEMINI"); expect(resolveRequiredEnvForModelId("cli/opencode")).toBe("CLI_OPENCODE"); + expect(resolveRequiredEnvForModelId("cli/nope/test")).toBe("CLI_CLAUDE"); expect(resolveRequiredEnvForModelId("openrouter/openai/gpt-5-mini")).toBe("OPENROUTER_API_KEY"); expect(resolveRequiredEnvForModelId("nvidia/meta/llama-3.1-8b-instruct")).toBe( "NVIDIA_API_KEY", @@ -68,4 +70,25 @@ describe("llm provider capabilities", () => { isOpenRouter: false, }); }); + + it("returns false for invalid video model ids and requires provider keys", () => { + expect(isVideoUnderstandingCapableModelId("not-a-model")).toBe(false); + expect(isVideoUnderstandingCapableModelId("invalid-provider/model")).toBe(false); + expect(() => + resolveOpenAiCompatibleClientConfigForProvider({ + provider: "zai", + openaiApiKey: null, + openrouterApiKey: null, + openaiBaseUrlOverride: null, + }), + ).toThrow(/Missing Z_AI_API_KEY/); + expect(() => + resolveOpenAiCompatibleClientConfigForProvider({ + provider: "nvidia", + openaiApiKey: null, + openrouterApiKey: null, + openaiBaseUrlOverride: null, + }), + ).toThrow(/Missing NVIDIA_API_KEY/); + }); }); diff --git a/tests/model-auto.test.ts b/tests/model-auto.test.ts index 698ba825..664925a1 100644 --- a/tests/model-auto.test.ts +++ b/tests/model-auto.test.ts @@ -1,6 +1,7 @@ import { describe, expect, it } from "vitest"; import type { SummarizeConfig } from "../src/config.js"; import { buildAutoModelAttempts } from "../src/model-auto.js"; +import { prependCliCandidates, resolveCliAutoFallbackConfig } from "../src/model-auto-cli.js"; describe("auto model selection", () => { it("preserves candidate order (native then OpenRouter fallback)", () => { @@ -503,4 +504,93 @@ describe("auto model selection", () => { expect(attempts[0]?.userModelId).toBe("cli/gemini/gemini-3-flash"); expect(attempts[1]?.userModelId).toBe("cli/claude/sonnet"); }); + + it("prepends a bare OpenCode CLI fallback when no default model is configured", () => { + const config: SummarizeConfig = { + model: { mode: "auto", rules: [{ candidates: ["openai/gpt-5-mini"] }] }, + }; + const attempts = buildAutoModelAttempts({ + kind: "text", + promptTokens: 100, + desiredOutputTokens: 50, + requiresVideoUnderstanding: false, + env: {}, + config, + catalog: null, + openrouterProvidersFromEnv: null, + cliAvailability: { opencode: true }, + isImplicitAutoSelection: true, + }); + + expect(attempts[0]?.userModelId).toBe("cli/opencode"); + }); + + it("uses the configured OpenCode model for CLI fallback candidates", () => { + const config: SummarizeConfig = { + model: { mode: "auto", rules: [{ candidates: ["openai/gpt-5-mini"] }] }, + cli: { + opencode: { + model: "openai/gpt-5.4", + }, + }, + }; + const attempts = buildAutoModelAttempts({ + kind: "text", + promptTokens: 100, + desiredOutputTokens: 50, + requiresVideoUnderstanding: false, + env: {}, + config, + catalog: null, + openrouterProvidersFromEnv: null, + cliAvailability: { opencode: true }, + isImplicitAutoSelection: true, + }); + + expect(attempts[0]?.userModelId).toBe("cli/opencode/openai/gpt-5.4"); + }); + + it("dedupes configured CLI auto-fallback order", () => { + const config: SummarizeConfig = { + cli: { + autoFallback: { + enabled: true, + onlyWhenNoApiKeys: false, + order: ["opencode", "claude", "opencode"], + }, + }, + }; + + expect(resolveCliAutoFallbackConfig(config)).toEqual({ + enabled: true, + onlyWhenNoApiKeys: false, + order: ["opencode", "claude"], + }); + }); + + it("does not prepend CLI candidates when an explicit enabled list is empty", () => { + expect( + prependCliCandidates({ + candidates: ["openai/gpt-5-mini"], + config: { cli: { enabled: [] } }, + env: {}, + isImplicitAutoSelection: true, + allowAutoCliFallback: false, + lastSuccessfulCliProvider: null, + }), + ).toEqual(["openai/gpt-5-mini"]); + }); + + it("dedupes duplicate explicit OpenCode CLI entries", () => { + expect( + prependCliCandidates({ + candidates: ["openai/gpt-5-mini"], + config: { cli: { enabled: ["opencode", "opencode"] } }, + env: {}, + isImplicitAutoSelection: true, + allowAutoCliFallback: false, + lastSuccessfulCliProvider: null, + }), + ).toEqual(["cli/opencode", "openai/gpt-5-mini"]); + }); }); diff --git a/tests/run.env.test.ts b/tests/run.env.test.ts index 47be7dcf..d430b789 100644 --- a/tests/run.env.test.ts +++ b/tests/run.env.test.ts @@ -1,12 +1,13 @@ import { chmodSync, mkdtempSync, writeFileSync } from "node:fs"; import { tmpdir } from "node:os"; -import { join } from "node:path"; +import { delimiter, join } from "node:path"; import { describe, expect, it } from "vitest"; import { hasUvxCli, parseBooleanEnv, parseCliProviderArg, parseCliUserModelId, + resolveCliAvailability, resolveExecutableInPath, } from "../src/run/env.js"; @@ -52,6 +53,19 @@ describe("run/env", () => { expect(parseCliProviderArg(" OpenCode ")).toBe("opencode"); }); + it("detects OpenCode availability from PATH and respects cli.enabled", () => { + const opencode = makeBin("opencode"); + const pathEnv = [opencode.dir].join(delimiter); + + expect(resolveCliAvailability({ env: { PATH: pathEnv }, config: null }).opencode).toBe(true); + expect( + resolveCliAvailability({ + env: { PATH: pathEnv }, + config: { cli: { enabled: ["claude"] } }, + }).opencode, + ).toBe(false); + }); + it("rejects invalid cli providers and model ids", () => { expect(() => parseCliProviderArg("nope")).toThrow(/Unsupported --cli/); expect(() => parseCliUserModelId("cli/nope/test")).toThrow(/Invalid CLI model id/); diff --git a/tests/run.models.test.ts b/tests/run.models.test.ts index 42a3f6e4..ab19dbbe 100644 --- a/tests/run.models.test.ts +++ b/tests/run.models.test.ts @@ -27,4 +27,156 @@ describe("run model selection", () => { expect(result.requestedModel.cliModel).toBe("openai/gpt-5.4"); } }); + + it("keeps bare OpenCode ids when no configured model is available", () => { + const result = resolveModelSelection({ + config: { cli: { opencode: { model: " " } } }, + configForCli: { cli: { opencode: { model: " " } } }, + configPath: null, + envForRun: {}, + explicitModelArg: "cli/opencode", + }); + + expect(result.requestedModel.kind).toBe("fixed"); + expect(result.requestedModel.userModelId).toBe("cli/opencode"); + expect(result.requestedModelLabel).toBe("cli/opencode"); + if (result.requestedModel.kind === "fixed" && result.requestedModel.transport === "cli") { + expect(result.requestedModel.cliProvider).toBe("opencode"); + expect(result.requestedModel.cliModel).toBeNull(); + } + }); + + it("does not override explicit OpenCode model ids from config defaults", () => { + const config = { + cli: { + opencode: { + model: "openai/gpt-5.2", + }, + }, + }; + + const result = resolveModelSelection({ + config, + configForCli: config, + configPath: null, + envForRun: {}, + explicitModelArg: "cli/opencode/openai/gpt-5.4", + }); + + expect(result.requestedModel.kind).toBe("fixed"); + expect(result.requestedModel.userModelId).toBe("cli/opencode/openai/gpt-5.4"); + expect(result.requestedModelLabel).toBe("cli/opencode/openai/gpt-5.4"); + if (result.requestedModel.kind === "fixed" && result.requestedModel.transport === "cli") { + expect(result.requestedModel.cliProvider).toBe("opencode"); + expect(result.requestedModel.cliModel).toBe("openai/gpt-5.4"); + } + }); + + it("resolves named bare OpenCode models through cli config", () => { + const config = { + models: { + localCli: { + id: "cli/opencode", + }, + }, + cli: { + opencode: { + model: "openai/gpt-5.4", + }, + }, + }; + + const result = resolveModelSelection({ + config, + configForCli: config, + configPath: "/tmp/summarize.json", + envForRun: {}, + explicitModelArg: "localCli", + }); + + expect(result.isNamedModelSelection).toBe(true); + expect(result.requestedModel.kind).toBe("fixed"); + expect(result.requestedModelLabel).toBe("localCli"); + if (result.requestedModel.kind === "fixed" && result.requestedModel.transport === "cli") { + expect(result.requestedModel.userModelId).toBe("cli/opencode/openai/gpt-5.4"); + expect(result.requestedModel.cliProvider).toBe("opencode"); + expect(result.requestedModel.cliModel).toBe("openai/gpt-5.4"); + } + }); + + it("uses SUMMARIZE_MODEL when no explicit model is passed", () => { + const result = resolveModelSelection({ + config: null, + configForCli: null, + configPath: null, + envForRun: { SUMMARIZE_MODEL: "cli/opencode/openai/gpt-5.4" }, + explicitModelArg: null, + }); + + expect(result.requestedModelInput).toBe("cli/opencode/openai/gpt-5.4"); + expect(result.requestedModelLabel).toBe("cli/opencode/openai/gpt-5.4"); + expect(result.isImplicitAutoSelection).toBe(false); + expect(result.isFallbackModel).toBe(false); + }); + + it("uses the configured named model when no explicit model is passed", () => { + const config = { + model: { name: "localCli" as const }, + models: { + localCli: { + id: "cli/opencode", + }, + }, + cli: { + opencode: { + model: "openai/gpt-5.4", + }, + }, + }; + + const result = resolveModelSelection({ + config, + configForCli: config, + configPath: "/tmp/summarize.json", + envForRun: {}, + explicitModelArg: null, + }); + + expect(result.requestedModelInput).toBe("localCli"); + expect(result.isNamedModelSelection).toBe(true); + expect(result.requestedModelLabel).toBe("localCli"); + if (result.requestedModel.kind === "fixed" && result.requestedModel.transport === "cli") { + expect(result.requestedModel.userModelId).toBe("cli/opencode/openai/gpt-5.4"); + } + }); + + it("rejects unknown bare model ids with a config hint", () => { + expect(() => + resolveModelSelection({ + config: null, + configForCli: null, + configPath: "/tmp/summarize.json", + envForRun: {}, + explicitModelArg: "unknown", + }), + ).toThrow(/Define it in \/tmp\/summarize\.json under "models"/); + }); + + it("rejects named models that only alias another preset", () => { + expect(() => + resolveModelSelection({ + config: { + models: { + nestedAlias: { + name: "otherPreset", + }, + }, + }, + configForCli: null, + configPath: "/tmp/summarize.json", + envForRun: {}, + explicitModelArg: "nestedAlias", + }), + ).toThrow(/unsupported model config/); + }); });