diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3d0a0c39d..c4b6334ce 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,7 +8,7 @@ on:
jobs:
ci:
- runs-on: ubuntu-latest
+ runs-on: macos-latest
steps:
- uses: actions/checkout@v4
diff --git a/DEV-LOG.md b/DEV-LOG.md
index 18700fca6..45b869cca 100644
--- a/DEV-LOG.md
+++ b/DEV-LOG.md
@@ -1,5 +1,54 @@
# DEV-LOG
+## OpenAI 接口兼容 (2026-04-03)
+
+**分支**: `feature/openai`
+
+在 `/login` 流程中新增 "OpenAI Compatible" 选项,支持 Ollama、DeepSeek、vLLM、One API 等兼容 OpenAI Chat Completions API 的第三方服务。用户通过 `/login` 配置后,所有 API 请求自动走 OpenAI 路径。
+
+**改动文件(10 个,+384 / -134):**
+
+| 文件 | 变更 |
+|------|------|
+| `.github/workflows/ci.yml` | CI runner 从 `ubuntu-latest` 改为 `macos-latest` |
+| `README.md` | TODO 列表新增 "OpenAI 接口兼容" 条目 |
+| `src/components/ConsoleOAuthFlow.tsx` | 新增 `openai_chat_api` OAuth state(含 Base URL / API Key / 3 个模型映射字段);idle 选择列表新增 "OpenAI Compatible" 选项;完整表单 UI(Tab 切换、Enter 保存);保存时写入 `modelType: 'openai'` + env 到 settings.json;OAuth 登录时重置 `modelType` 为 `anthropic` |
+| `src/services/api/openai/index.ts` | 从直接 `yield* adaptOpenAIStreamToAnthropic()` 改为完整流处理循环:累积 content blocks(text/tool_use/thinking)、按 `content_block_stop` yield `AssistantMessage`、同时 yield `StreamEvent` 用于实时显示;错误处理改用新签名 `createAssistantAPIErrorMessage({ content, apiError, error })` |
+| `src/services/api/openai/convertMessages.ts` | 输入类型从 Anthropic SDK `BetaMessageParam[]` 改为内部 `(UserMessage \| AssistantMessage)[]`;通过 `msg.type` 而非 `msg.role` 判断角色;从 `msg.message.content` 读取内容;跳过 `cache_edits` / `server_tool_use` 等内部 block 类型 |
+| `src/services/api/openai/modelMapping.ts` | 移除 `OPENAI_MODEL_MAP` JSON 环境变量 + 缓存机制;新增 `getModelFamily()` 按 haiku/sonnet/opus 分类;解析优先级改为:`OPENAI_MODEL` → `ANTHROPIC_DEFAULT_{FAMILY}_MODEL` → `DEFAULT_MODEL_MAP` → 原名透传 |
+| `src/services/api/openai/__tests__/convertMessages.test.ts` | 测试输入从裸 `{ role, content }` 改为 `makeUserMsg()` / `makeAssistantMsg()` 包装的内部格式 |
+| `src/services/api/openai/__tests__/modelMapping.test.ts` | 测试从 `OPENAI_MODEL_MAP` 改为 `ANTHROPIC_DEFAULT_{HAIKU,SONNET,OPUS}_MODEL`;新增 3 个 env var override 测试 |
+| `src/utils/model/providers.ts` | `getAPIProvider()` 新增最高优先级:从 settings.json `modelType` 字段判断;环境变量 `CLAUDE_CODE_USE_OPENAI` 降为次优先 |
+| `src/utils/settings/types.ts` | `SettingsSchema` 新增 `modelType` 字段:`z.enum(['anthropic', 'openai']).optional()` |
+
+**关键设计决策:**
+
+1. **`modelType` 存入 settings.json** — 而非纯环境变量,使 `/login` 配置持久化,重启后仍然生效
+2. **复用 `ANTHROPIC_DEFAULT_*_MODEL` 环境变量** — 而非新增 `OPENAI_MODEL_MAP`,与 Custom Platform 共用同一套模型映射配置,减少用户认知负担
+3. **流处理双 yield** — 同时 yield `AssistantMessage`(给消费方处理工具调用)和 `StreamEvent`(给 REPL 实时渲染),与 Anthropic 路径行为对齐
+4. **OAuth 登录重置 modelType** — 用户切换回官方 Anthropic 登录时自动重置为 `anthropic`,避免残留配置导致请求走错误路径
+
+**配置方式:**
+
+```
+/login → 选择 "OpenAI Compatible" → 填写 Base URL / API Key / 模型名称
+```
+
+或手动编辑 `~/.claude/settings.json`:
+
+```json
+{
+ "modelType": "openai",
+ "env": {
+ "OPENAI_BASE_URL": "http://localhost:11434/v1",
+ "OPENAI_API_KEY": "ollama",
+ "ANTHROPIC_DEFAULT_SONNET_MODEL": "qwen3:32b"
+ }
+}
+```
+
+---
+
## Enable Remote Control / BRIDGE_MODE (2026-04-03)
**PR**: [claude-code-best/claude-code#60](https://github.com/claude-code-best/claude-code/pull/60)
diff --git a/README.md b/README.md
index 1dfb92cb5..c9f30b4a3 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,7 @@
- [x] 添加自定义 GrowthBook 支持 (GB 也是开源的, 现在你可以配置一个自定义的遥控平台) [文档](https://ccb.agent-aura.top/docs/internals/growthbook-adapter)
- [x] 自定义 login 模式, 大家可以用这个配置 Claude 的模型!
- [x] 修复搜索工具的 rg 缺失问题(需要重新 bun i)
+ - [ ] OpenAI 接口兼容! /login 然后配置 OpenAI 平台即可!
- [ ] V6 大规模重构石山代码, 全面模块分包
- [ ] V6 将会为全新分支, 届时 main 分支将会封存为历史版本
diff --git a/bun.lock b/bun.lock
index d45cbf301..5f53002de 100644
--- a/bun.lock
+++ b/bun.lock
@@ -89,6 +89,7 @@
"lru-cache": "^11.2.7",
"marked": "^17.0.5",
"modifiers-napi": "workspace:*",
+ "openai": "^4.73.0",
"p-map": "^7.0.4",
"picomatch": "^4.0.4",
"plist": "^3.1.0",
@@ -741,6 +742,8 @@
"@types/node": ["@types/node@25.5.0", "https://registry.npmmirror.com/@types/node/-/node-25.5.0.tgz", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw=="],
+ "@types/node-fetch": ["@types/node-fetch@2.6.13", "https://registry.npmmirror.com/@types/node-fetch/-/node-fetch-2.6.13.tgz", { "dependencies": { "@types/node": "*", "form-data": "^4.0.4" } }, "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw=="],
+
"@types/pg": ["@types/pg@8.15.6", "https://registry.npmmirror.com/@types/pg/-/pg-8.15.6.tgz", { "dependencies": { "@types/node": "*", "pg-protocol": "*", "pg-types": "^2.2.0" } }, "sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ=="],
"@types/pg-pool": ["@types/pg-pool@2.0.7", "https://registry.npmmirror.com/@types/pg-pool/-/pg-pool-2.0.7.tgz", { "dependencies": { "@types/pg": "*" } }, "sha512-U4CwmGVQcbEuqpyju8/ptOKg6gEC+Tqsvj2xS9o1g71bUh8twxnC6ZL5rZKCsGN0iyH0CwgUyc9VR5owNQF9Ng=="],
@@ -763,6 +766,8 @@
"@xmldom/xmldom": ["@xmldom/xmldom@0.8.12", "https://registry.npmmirror.com/@xmldom/xmldom/-/xmldom-0.8.12.tgz", {}, "sha512-9k/gHF6n/pAi/9tqr3m3aqkuiNosYTurLLUtc7xQ9sxB/wm7WPygCv8GYa6mS0fLJEHhqMC1ATYhz++U/lRHqg=="],
+ "abort-controller": ["abort-controller@3.0.0", "https://registry.npmmirror.com/abort-controller/-/abort-controller-3.0.0.tgz", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="],
+
"accepts": ["accepts@2.0.0", "https://registry.npmmirror.com/accepts/-/accepts-2.0.0.tgz", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
"acorn": ["acorn@8.16.0", "https://registry.npmmirror.com/acorn/-/acorn-8.16.0.tgz", { "bin": { "acorn": "bin/acorn" } }, "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw=="],
@@ -771,6 +776,8 @@
"agent-base": ["agent-base@8.0.0", "https://registry.npmmirror.com/agent-base/-/agent-base-8.0.0.tgz", {}, "sha512-QT8i0hCz6C/KQ+KTAbSNwCHDGdmUJl2tp2ZpNlGSWCfhUNVbYG2WLE3MdZGBAgXPV4GAvjGMxo+C1hroyxmZEg=="],
+ "agentkeepalive": ["agentkeepalive@4.6.0", "https://registry.npmmirror.com/agentkeepalive/-/agentkeepalive-4.6.0.tgz", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="],
+
"ajv": ["ajv@8.18.0", "https://registry.npmmirror.com/ajv/-/ajv-8.18.0.tgz", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A=="],
"ajv-formats": ["ajv-formats@3.0.1", "https://registry.npmmirror.com/ajv-formats/-/ajv-formats-3.0.1.tgz", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ=="],
@@ -927,6 +934,8 @@
"etag": ["etag@1.8.1", "https://registry.npmmirror.com/etag/-/etag-1.8.1.tgz", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="],
+ "event-target-shim": ["event-target-shim@5.0.1", "https://registry.npmmirror.com/event-target-shim/-/event-target-shim-5.0.1.tgz", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="],
+
"eventsource": ["eventsource@3.0.7", "https://registry.npmmirror.com/eventsource/-/eventsource-3.0.7.tgz", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="],
"eventsource-parser": ["eventsource-parser@3.0.6", "https://registry.npmmirror.com/eventsource-parser/-/eventsource-parser-3.0.6.tgz", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="],
@@ -973,8 +982,12 @@
"form-data": ["form-data@4.0.5", "https://registry.npmmirror.com/form-data/-/form-data-4.0.5.tgz", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "hasown": "^2.0.2", "mime-types": "^2.1.12" } }, "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w=="],
+ "form-data-encoder": ["form-data-encoder@1.7.2", "https://registry.npmmirror.com/form-data-encoder/-/form-data-encoder-1.7.2.tgz", {}, "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="],
+
"formatly": ["formatly@0.3.0", "https://registry.npmmirror.com/formatly/-/formatly-0.3.0.tgz", { "dependencies": { "fd-package-json": "^2.0.0" }, "bin": { "formatly": "bin/index.mjs" } }, "sha512-9XNj/o4wrRFyhSMJOvsuyMwy8aUfBaZ1VrqHVfohyXf0Sw0e+yfKG+xZaY3arGCOMdwFsqObtzVOc1gU9KiT9w=="],
+ "formdata-node": ["formdata-node@4.4.1", "https://registry.npmmirror.com/formdata-node/-/formdata-node-4.4.1.tgz", { "dependencies": { "node-domexception": "1.0.0", "web-streams-polyfill": "4.0.0-beta.3" } }, "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ=="],
+
"formdata-polyfill": ["formdata-polyfill@4.0.10", "https://registry.npmmirror.com/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", { "dependencies": { "fetch-blob": "^3.1.2" } }, "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g=="],
"forwarded": ["forwarded@0.2.0", "https://registry.npmmirror.com/forwarded/-/forwarded-0.2.0.tgz", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="],
@@ -1045,6 +1058,8 @@
"human-signals": ["human-signals@8.0.1", "https://registry.npmmirror.com/human-signals/-/human-signals-8.0.1.tgz", {}, "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ=="],
+ "humanize-ms": ["humanize-ms@1.2.1", "https://registry.npmmirror.com/humanize-ms/-/humanize-ms-1.2.1.tgz", { "dependencies": { "ms": "^2.0.0" } }, "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ=="],
+
"iconv-lite": ["iconv-lite@0.7.2", "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.7.2.tgz", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw=="],
"ignore": ["ignore@7.0.5", "https://registry.npmmirror.com/ignore/-/ignore-7.0.5.tgz", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="],
@@ -1181,7 +1196,7 @@
"node-domexception": ["node-domexception@1.0.0", "https://registry.npmmirror.com/node-domexception/-/node-domexception-1.0.0.tgz", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="],
- "node-fetch": ["node-fetch@3.3.2", "https://registry.npmmirror.com/node-fetch/-/node-fetch-3.3.2.tgz", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="],
+ "node-fetch": ["node-fetch@2.7.0", "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.7.0.tgz", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
"node-forge": ["node-forge@1.4.0", "https://registry.npmmirror.com/node-forge/-/node-forge-1.4.0.tgz", {}, "sha512-LarFH0+6VfriEhqMMcLX2F7SwSXeWwnEAJEsYm5QKWchiVYVvJyV9v7UDvUv+w5HO23ZpQTXDv/GxdDdMyOuoQ=="],
@@ -1197,6 +1212,8 @@
"open": ["open@10.2.0", "https://registry.npmmirror.com/open/-/open-10.2.0.tgz", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="],
+ "openai": ["openai@4.104.0", "https://registry.npmmirror.com/openai/-/openai-4.104.0.tgz", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" }, "peerDependencies": { "ws": "^8.18.0", "zod": "^3.23.8" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA=="],
+
"os-tmpdir": ["os-tmpdir@1.0.2", "https://registry.npmmirror.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz", {}, "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g=="],
"oxc-parser": ["oxc-parser@0.121.0", "https://registry.npmmirror.com/oxc-parser/-/oxc-parser-0.121.0.tgz", { "dependencies": { "@oxc-project/types": "^0.121.0" }, "optionalDependencies": { "@oxc-parser/binding-android-arm-eabi": "0.121.0", "@oxc-parser/binding-android-arm64": "0.121.0", "@oxc-parser/binding-darwin-arm64": "0.121.0", "@oxc-parser/binding-darwin-x64": "0.121.0", "@oxc-parser/binding-freebsd-x64": "0.121.0", "@oxc-parser/binding-linux-arm-gnueabihf": "0.121.0", "@oxc-parser/binding-linux-arm-musleabihf": "0.121.0", "@oxc-parser/binding-linux-arm64-gnu": "0.121.0", "@oxc-parser/binding-linux-arm64-musl": "0.121.0", "@oxc-parser/binding-linux-ppc64-gnu": "0.121.0", "@oxc-parser/binding-linux-riscv64-gnu": "0.121.0", "@oxc-parser/binding-linux-riscv64-musl": "0.121.0", "@oxc-parser/binding-linux-s390x-gnu": "0.121.0", "@oxc-parser/binding-linux-x64-gnu": "0.121.0", "@oxc-parser/binding-linux-x64-musl": "0.121.0", "@oxc-parser/binding-openharmony-arm64": "0.121.0", "@oxc-parser/binding-wasm32-wasi": "0.121.0", "@oxc-parser/binding-win32-arm64-msvc": "0.121.0", "@oxc-parser/binding-win32-ia32-msvc": "0.121.0", "@oxc-parser/binding-win32-x64-msvc": "0.121.0" } }, "sha512-ek9o58+SCv6AV7nchiAcUJy1DNE2CC5WRdBcO0mF+W4oRjNQfPO7b3pLjTHSFECpHkKGOZSQxx3hk8viIL5YCg=="],
@@ -1417,7 +1434,7 @@
"walk-up-path": ["walk-up-path@4.0.0", "https://registry.npmmirror.com/walk-up-path/-/walk-up-path-4.0.0.tgz", {}, "sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A=="],
- "web-streams-polyfill": ["web-streams-polyfill@3.3.3", "https://registry.npmmirror.com/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", {}, "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw=="],
+ "web-streams-polyfill": ["web-streams-polyfill@4.0.0-beta.3", "https://registry.npmmirror.com/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", {}, "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug=="],
"webidl-conversions": ["webidl-conversions@3.0.1", "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
@@ -1755,10 +1772,14 @@
"external-editor/iconv-lite": ["iconv-lite@0.4.24", "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3" } }, "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA=="],
+ "fetch-blob/web-streams-polyfill": ["web-streams-polyfill@3.3.3", "https://registry.npmmirror.com/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", {}, "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw=="],
+
"form-data/mime-types": ["mime-types@2.1.35", "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
"gaxios/https-proxy-agent": ["https-proxy-agent@7.0.6", "https://registry.npmmirror.com/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="],
+ "gaxios/node-fetch": ["node-fetch@3.3.2", "https://registry.npmmirror.com/node-fetch/-/node-fetch-3.3.2.tgz", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="],
+
"gtoken/gaxios": ["gaxios@6.7.1", "https://registry.npmmirror.com/gaxios/-/gaxios-6.7.1.tgz", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "is-stream": "^2.0.0", "node-fetch": "^2.6.9", "uuid": "^9.0.1" } }, "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ=="],
"http-proxy-agent/agent-base": ["agent-base@7.1.4", "https://registry.npmmirror.com/agent-base/-/agent-base-7.1.4.tgz", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="],
@@ -1773,6 +1794,8 @@
"npm-run-path/path-key": ["path-key@4.0.0", "https://registry.npmmirror.com/path-key/-/path-key-4.0.0.tgz", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="],
+ "openai/@types/node": ["@types/node@18.19.130", "https://registry.npmmirror.com/@types/node/-/node-18.19.130.tgz", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
+
"parse5-htmlparser2-tree-adapter/parse5": ["parse5@6.0.1", "https://registry.npmmirror.com/parse5/-/parse5-6.0.1.tgz", {}, "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="],
"proper-lockfile/signal-exit": ["signal-exit@3.0.7", "https://registry.npmmirror.com/signal-exit/-/signal-exit-3.0.7.tgz", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="],
@@ -1909,8 +1932,6 @@
"gtoken/gaxios/is-stream": ["is-stream@2.0.1", "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],
- "gtoken/gaxios/node-fetch": ["node-fetch@2.7.0", "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.7.0.tgz", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
-
"gtoken/gaxios/uuid": ["uuid@9.0.1", "https://registry.npmmirror.com/uuid/-/uuid-9.0.1.tgz", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="],
"image-processor-napi/sharp/@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.33.5", "https://registry.npmmirror.com/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.0.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ=="],
@@ -1951,6 +1972,8 @@
"image-processor-napi/sharp/@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.33.5", "https://registry.npmmirror.com/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz", { "os": "win32", "cpu": "x64" }, "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg=="],
+ "openai/@types/node/undici-types": ["undici-types@5.26.5", "https://registry.npmmirror.com/undici-types/-/undici-types-5.26.5.tgz", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
+
"qrcode/yargs/cliui": ["cliui@6.0.0", "https://registry.npmmirror.com/cliui/-/cliui-6.0.0.tgz", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^6.2.0" } }, "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ=="],
"qrcode/yargs/string-width": ["string-width@4.2.3", "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
@@ -1969,8 +1992,6 @@
"@anthropic-ai/vertex-sdk/google-auth-library/gaxios/is-stream": ["is-stream@2.0.1", "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],
- "@anthropic-ai/vertex-sdk/google-auth-library/gaxios/node-fetch": ["node-fetch@2.7.0", "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.7.0.tgz", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
-
"@anthropic-ai/vertex-sdk/google-auth-library/gaxios/uuid": ["uuid@9.0.1", "https://registry.npmmirror.com/uuid/-/uuid-9.0.1.tgz", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA=="],
"@anthropic-ai/vertex-sdk/google-auth-library/gcp-metadata/google-logging-utils": ["google-logging-utils@0.0.2", "https://registry.npmmirror.com/google-logging-utils/-/google-logging-utils-0.0.2.tgz", {}, "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ=="],
diff --git a/docs/plans/openai-compatibility.md b/docs/plans/openai-compatibility.md
new file mode 100644
index 000000000..68fa9f158
--- /dev/null
+++ b/docs/plans/openai-compatibility.md
@@ -0,0 +1,421 @@
+# OpenAI 协议兼容层
+
+## 概述
+
+claude-code 支持通过 OpenAI Chat Completions API(`/v1/chat/completions`)兼容任意 OpenAI 协议端点,包括 Ollama、DeepSeek、vLLM、One API、LiteLLM 等。
+
+核心策略为**流适配器模式**:在 `queryModel()` 中插入提前返回分支,将 Anthropic 格式请求转为 OpenAI 格式,调用 OpenAI SDK,再将 SSE 流转换回 `BetaRawMessageStreamEvent` 格式。下游代码(流处理循环、query.ts、QueryEngine.ts、REPL)**完全不改**。
+
+## 环境变量
+
+| 变量 | 必需 | 说明 |
+|---|---|---|
+| `CLAUDE_CODE_USE_OPENAI` | 是 | 设为 `1` 启用 OpenAI 后端 |
+| `OPENAI_API_KEY` | 是 | API key(Ollama 等可设为任意值) |
+| `OPENAI_BASE_URL` | 推荐 | 端点 URL(如 `http://localhost:11434/v1`) |
+| `OPENAI_MODEL` | 可选 | 覆盖所有请求的模型名(跳过映射) |
+| `OPENAI_MODEL_MAP` | 可选 | JSON 映射,如 `{"claude-sonnet-4-6":"gpt-4o"}` |
+| `OPENAI_ORG_ID` | 可选 | Organization ID |
+| `OPENAI_PROJECT_ID` | 可选 | Project ID |
+
+### 使用示例
+
+```bash
+# Ollama
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=ollama \
+OPENAI_BASE_URL=http://localhost:11434/v1 \
+OPENAI_MODEL=qwen2.5-coder-32b \
+bun run dev
+
+# DeepSeek(自动支持 Thinking)
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=sk-xxx \
+OPENAI_BASE_URL=https://api.deepseek.com/v1 \
+OPENAI_MODEL=deepseek-chat \
+bun run dev
+
+# vLLM
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=token-abc123 \
+OPENAI_BASE_URL=http://localhost:8000/v1 \
+OPENAI_MODEL=Qwen/Qwen2.5-Coder-32B-Instruct \
+bun run dev
+
+# One API / LiteLLM
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=sk-your-key \
+OPENAI_BASE_URL=https://your-one-api.example.com/v1 \
+OPENAI_MODEL=gpt-4o \
+bun run dev
+
+# 自定义模型映射
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=sk-xxx \
+OPENAI_BASE_URL=https://my-gateway.example.com/v1 \
+OPENAI_MODEL_MAP='{"claude-sonnet-4-6":"gpt-4o-2024-11-20","claude-haiku-4-5":"gpt-4o-mini"}' \
+bun run dev
+```
+
+## 架构
+
+### 请求流程
+
+```
+queryModel() [claude.ts]
+ ├── 共享预处理(消息归一化、工具过滤、媒体裁剪)
+ └── if (getAPIProvider() === 'openai')
+ └── queryModelOpenAI() [openai/index.ts]
+ ├── resolveOpenAIModel() → 解析模型名
+ ├── normalizeMessagesForAPI() → 共享消息预处理
+ ├── toolToAPISchema() → 构建工具 schema
+ ├── anthropicMessagesToOpenAI() → 消息格式转换
+ ├── anthropicToolsToOpenAI() → 工具格式转换
+ ├── openai.chat.completions.create({ stream: true })
+ └── adaptOpenAIStreamToAnthropic() → 流格式转换
+ ├── delta.reasoning_content → thinking 块
+ ├── delta.content → text 块
+ ├── delta.tool_calls → tool_use 块
+ ├── usage.cached_tokens → cache_read_input_tokens
+ └── yield BetaRawMessageStreamEvent
+```
+
+### 模型名解析优先级
+
+`resolveOpenAIModel()` 的解析顺序:
+
+1. `OPENAI_MODEL` 环境变量 → 直接使用,覆盖所有
+2. `OPENAI_MODEL_MAP` JSON 查表 → 自定义映射
+3. 内置默认映射(见下表)
+4. 以上都不匹配 → 原名透传
+
+### 内置模型映射
+
+| Anthropic 模型 | OpenAI 映射 |
+|---|---|
+| `claude-sonnet-4-6` | `gpt-4o` |
+| `claude-sonnet-4-5-20250929` | `gpt-4o` |
+| `claude-sonnet-4-20250514` | `gpt-4o` |
+| `claude-3-7-sonnet-20250219` | `gpt-4o` |
+| `claude-3-5-sonnet-20241022` | `gpt-4o` |
+| `claude-opus-4-6` | `o3` |
+| `claude-opus-4-5-20251101` | `o3` |
+| `claude-opus-4-1-20250805` | `o3` |
+| `claude-opus-4-20250514` | `o3` |
+| `claude-haiku-4-5-20251001` | `gpt-4o-mini` |
+| `claude-3-5-haiku-20241022` | `gpt-4o-mini` |
+
+同时会自动剥离 `[1m]` 后缀(Claude 特有的 modifier)。
+
+## 文件结构
+
+### 新增文件
+
+```
+src/services/api/openai/
+├── client.ts # OpenAI SDK 客户端工厂(~50 行)
+├── convertMessages.ts # Anthropic → OpenAI 消息格式转换(~190 行)
+├── convertTools.ts # Anthropic → OpenAI 工具格式转换(~70 行)
+├── streamAdapter.ts # SSE 流转换核心,含 thinking + caching(~270 行)
+├── modelMapping.ts # 模型名解析(~60 行)
+├── index.ts # 公共入口 queryModelOpenAI()(~110 行)
+└── __tests__/
+ ├── convertMessages.test.ts # 10 个测试
+ ├── convertTools.test.ts # 7 个测试
+ ├── modelMapping.test.ts # 6 个测试
+ └── streamAdapter.test.ts # 14 个测试(含 thinking + caching)
+```
+
+### 修改文件
+
+| 文件 | 改动 |
+|---|---|
+| `src/utils/model/providers.ts` | 添加 `'openai'` provider 类型 + `CLAUDE_CODE_USE_OPENAI` 检查(最高优先级) |
+| `src/utils/model/configs.ts` | 每个 ModelConfig 添加 `openai` 键 |
+| `src/services/api/claude.ts` | 在 `stripExcessMediaItems()` 后插入 OpenAI 提前返回分支(~8 行) |
+| `package.json` | 添加 `"openai": "^4.73.0"` 依赖 |
+
+## 消息转换规则
+
+### Anthropic → OpenAI
+
+| Anthropic | OpenAI |
+|---|---|
+| `system` prompt(`string[]`) | `role: "system"` 消息(`\n\n` 拼接) |
+| `user` + `text` 块 | `role: "user"` 消息 |
+| `assistant` + `text` 块 | `role: "assistant"` + `content` |
+| `assistant` + `tool_use` 块 | `role: "assistant"` + `tool_calls[]` |
+| `user` + `tool_result` 块 | `role: "tool"` + `tool_call_id` |
+| `thinking` 块 | 静默丢弃(请求侧) |
+
+### 工具转换
+
+| Anthropic | OpenAI |
+|---|---|
+| `{ name, description, input_schema }` | `{ type: "function", function: { name, description, parameters } }` |
+| `cache_control`, `defer_loading` 等字段 | 剥离 |
+| `tool_choice: { type: "auto" }` | `"auto"` |
+| `tool_choice: { type: "any" }` | `"required"` |
+| `tool_choice: { type: "tool", name }` | `{ type: "function", function: { name } }` |
+
+### 消息转换示例
+
+```
+Anthropic: OpenAI:
+[
+ system: ["You are helpful."], [
+ { role: "system",
+ { role: "user", content: "You are helpful." },
+ content: [ { role: "user",
+ { type: "text", text: "Run ls" } content: "Run ls"
+ ] },
+ }, { role: "assistant",
+ { role: "assistant", content: "I'll check.",
+ content: [ tool_calls: [{
+ { type: "text", text: "I'll check."}, id: "tu_123",
+ { type: "tool_use", type: "function",
+ id: "tu_123", name: "bash", function: {
+ input: { command: "ls" } } name: "bash",
+ ] arguments: '{"command":"ls"}'
+ }, }] }
+ { role: "user", { role: "tool",
+ content: [ tool_call_id: "tu_123",
+ { type: "tool_result", content: "file1\nfile2"
+ tool_use_id: "tu_123", }
+ content: "file1\nfile2" ]
+ ]
+ }
+]
+```
+
+## 流转换规则
+
+### SSE Chunk → Anthropic Event 映射
+
+| OpenAI Chunk | Anthropic Event |
+|---|---|
+| 首个 chunk | `message_start`(含 usage) |
+| `delta.reasoning_content` | `content_block_start(thinking)` + `thinking_delta` |
+| `delta.content` | `content_block_start(text)` + `text_delta` |
+| `delta.tool_calls` | `content_block_start(tool_use)` + `input_json_delta` |
+| `finish_reason: "stop"` | `message_delta(stop_reason: "end_turn")` |
+| `finish_reason: "tool_calls"` | `message_delta(stop_reason: "tool_use")` |
+| `finish_reason: "length"` | `message_delta(stop_reason: "max_tokens")` |
+
+### 块顺序
+
+当模型返回 `reasoning_content` 时(如 DeepSeek),块顺序与 Anthropic 一致:
+
+```
+thinking block (index 0) ← delta.reasoning_content
+text block (index 1) ← delta.content
+```
+
+或:
+
+```
+thinking block (index 0) ← delta.reasoning_content
+tool_use block (index 1) ← delta.tool_calls
+```
+
+无 `reasoning_content` 时:
+
+```
+text block (index 0) ← delta.content
+tool_use block (index 1) ← delta.tool_calls(如果有)
+```
+
+### finish_reason 映射
+
+| OpenAI | Anthropic |
+|---|---|
+| `stop` | `end_turn` |
+| `tool_calls` | `tool_use` |
+| `length` | `max_tokens` |
+| `content_filter` | `end_turn` |
+
+### 事件序列示例
+
+**纯文本响应**:
+```
+OpenAI chunks:
+ delta.content = "Hello"
+ delta.content = " world"
+ finish_reason = "stop"
+
+→ Anthropic events:
+ message_start { message: { id, role: 'assistant', usage: {...} } }
+ content_block_start { index: 0, content_block: { type: 'text' } }
+ content_block_delta { index: 0, delta: { type: 'text_delta', text: 'Hello' } }
+ content_block_delta { index: 0, delta: { type: 'text_delta', text: ' world' } }
+ content_block_stop { index: 0 }
+ message_delta { delta: { stop_reason: 'end_turn' } }
+ message_stop
+```
+
+**Thinking + 文本(DeepSeek 风格)**:
+```
+OpenAI chunks:
+ delta.reasoning_content = "Let me think..."
+ delta.reasoning_content = " step by step."
+ delta.content = "The answer is 42."
+ finish_reason = "stop"
+
+→ Anthropic events:
+ message_start { ... }
+ content_block_start { index: 0, content_block: { type: 'thinking', signature: '' } }
+ content_block_delta { index: 0, delta: { type: 'thinking_delta', thinking: 'Let me think...' } }
+ content_block_delta { index: 0, delta: { type: 'thinking_delta', thinking: ' step by step.' } }
+ content_block_stop { index: 0 }
+ content_block_start { index: 1, content_block: { type: 'text' } }
+ content_block_delta { index: 1, delta: { type: 'text_delta', text: 'The answer is 42.' } }
+ content_block_stop { index: 1 }
+ message_delta { delta: { stop_reason: 'end_turn' } }
+ message_stop
+```
+
+**工具调用**:
+```
+OpenAI chunks:
+ delta.tool_calls[0] = { id: 'call_xxx', function: { name: 'bash', arguments: '' } }
+ delta.tool_calls[0].function.arguments = '{"comm'
+ delta.tool_calls[0].function.arguments = 'and":"ls"}'
+ finish_reason = "tool_calls"
+
+→ Anthropic events:
+ message_start { ... }
+ content_block_start { index: 0, content_block: { type: 'tool_use', id: 'call_xxx', name: 'bash' } }
+ content_block_delta { index: 0, delta: { type: 'input_json_delta', partial_json: '{"comm' } }
+ content_block_delta { index: 0, delta: { type: 'input_json_delta', partial_json: 'and":"ls"}' } }
+ content_block_stop { index: 0 }
+ message_delta { delta: { stop_reason: 'tool_use' } }
+ message_stop
+```
+
+## 功能支持
+
+### Thinking(思维链)
+
+**请求侧**:不需要显式配置。支持思维链的模型(DeepSeek 等)会自动返回 `delta.reasoning_content`。
+
+**响应侧**:`delta.reasoning_content` 被转换为 Anthropic `thinking` content block:
+
+```ts
+// content_block_start
+{ type: 'content_block_start', index: 0,
+ content_block: { type: 'thinking', thinking: '', signature: '' } }
+
+// content_block_delta
+{ type: 'content_block_delta', index: 0,
+ delta: { type: 'thinking_delta', thinking: 'Let me analyze...' } }
+```
+
+thinking block 在 text/tool_use block 之前自动关闭,保持 Anthropic 的块顺序。
+
+### Prompt Caching
+
+**请求侧**:OpenAI 端点使用自动缓存,无需显式设置 `cache_control`。
+
+**响应侧**:OpenAI 的 `usage.prompt_tokens_details.cached_tokens` 被映射到 Anthropic 的 `cache_read_input_tokens`:
+
+```
+OpenAI: usage.prompt_tokens_details.cached_tokens = 800
+ ↓
+Anthropic: message_start.message.usage.cache_read_input_tokens = 800
+```
+
+在 `message_start` 的 usage 中报告缓存命中量。
+
+### 工具调用(Tool Use)
+
+完整支持 OpenAI function calling 格式。所有本地工具(Bash、FileEdit、Grep、Glob、Agent 等)透明工作——它们通过 JSON 输入输出通信,格式无关。
+
+工具参数以 `input_json_delta` 形式流式传输,由下游代码拼接解析。
+
+### 不支持的功能
+
+| 功能 | 策略 |
+|---|---|
+| Beta Headers | 不发送 |
+| Server Tools (advisor) | 不发送 |
+| Structured Output | 不发送 |
+| Fast Mode / Effort | 不发送 |
+| Tool Search / defer_loading | 不启用,所有工具直接发送 |
+| Anthropic Signature | thinking block 的 `signature` 字段为空字符串 |
+| cache_creation_input_tokens | 始终为 0(OpenAI 不区分创建/读取) |
+
+## 测试
+
+```bash
+# 运行所有 OpenAI 适配层测试
+bun test src/services/api/openai/__tests__/
+
+# 单独运行
+bun test src/services/api/openai/__tests__/streamAdapter.test.ts # 14 tests(含 thinking + caching)
+bun test src/services/api/openai/__tests__/convertMessages.test.ts # 10 tests
+bun test src/services/api/openai/__tests__/convertTools.test.ts # 7 tests
+bun test src/services/api/openai/__tests__/modelMapping.test.ts # 6 tests
+```
+
+当前测试覆盖:**39 tests / 73 assertions / 0 fail**。
+
+### 测试覆盖矩阵
+
+| 功能 | convertMessages | convertTools | streamAdapter | modelMapping |
+|---|---|---|---|---|
+| 文本消息转换 | ✅ | | | |
+| tool_use 转换 | ✅ | | | |
+| tool_result 转换 | ✅ | | | |
+| thinking 剥离 | ✅ | | | |
+| 完整对话流程 | ✅ | | | |
+| 工具 schema 转换 | | ✅ | | |
+| tool_choice 映射 | | ✅ | | |
+| 纯文本流 | | | ✅ | |
+| 工具调用流 | | | ✅ | |
+| 混合文本+工具 | | | ✅ | |
+| finish_reason 映射 | | | ✅ | |
+| thinking 流 | | | ✅ | |
+| thinking+text 切换 | | | ✅ | |
+| thinking+tool_use 切换 | | | ✅ | |
+| 块索引正确性 | | | ✅ | |
+| cached_tokens 映射 | | | ✅ | |
+| OPENAI_MODEL 覆盖 | | | | ✅ |
+| 默认模型映射 | | | | ✅ |
+| 未知模型透传 | | | | ✅ |
+| [1m] 后缀剥离 | | | | ✅ |
+
+## 端到端验证
+
+```bash
+# 1. 安装依赖
+bun install
+
+# 2. 运行单元测试
+bun test src/services/api/openai/__tests__/
+
+# 3. 连接实际端点(以 Ollama 为例)
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=ollama \
+OPENAI_BASE_URL=http://localhost:11434/v1 \
+OPENAI_MODEL=qwen2.5-coder-32b \
+bun run dev
+
+# 4. 连接 DeepSeek(测试 thinking 支持)
+CLAUDE_CODE_USE_OPENAI=1 \
+OPENAI_API_KEY=sk-xxx \
+OPENAI_BASE_URL=https://api.deepseek.com/v1 \
+OPENAI_MODEL=deepseek-reasoner \
+bun run dev
+
+# 5. 确认现有测试不受影响
+bun test # 无 CLAUDE_CODE_USE_OPENAI 时走原有路径
+```
+
+## 代码统计
+
+| 类别 | 行数 |
+|---|---|
+| 新增源码 | ~620 行 |
+| 新增测试 | ~450 行 |
+| 改动现有代码 | ~25 行 |
+| **总计** | **~1100 行** |
diff --git a/package.json b/package.json
index ca25ed5eb..20637d6fd 100644
--- a/package.json
+++ b/package.json
@@ -53,6 +53,7 @@
},
"dependencies": {},
"devDependencies": {
+ "openai": "^4.73.0",
"@alcalzone/ansi-tokenize": "^0.3.0",
"@ant/claude-for-chrome-mcp": "workspace:*",
"@ant/computer-use-input": "workspace:*",
diff --git a/src/components/ConsoleOAuthFlow.tsx b/src/components/ConsoleOAuthFlow.tsx
index 7d777cb18..03b6e0e7a 100644
--- a/src/components/ConsoleOAuthFlow.tsx
+++ b/src/components/ConsoleOAuthFlow.tsx
@@ -38,6 +38,15 @@ type OAuthStatus = {
opusModel: string;
activeField: 'base_url' | 'api_key' | 'haiku_model' | 'sonnet_model' | 'opus_model';
} // Custom platform: configure API endpoint and model names
+| {
+ state: 'openai_chat_api';
+ baseUrl: string;
+ apiKey: string;
+ haikuModel: string;
+ sonnetModel: string;
+ opusModel: string;
+ activeField: 'base_url' | 'api_key' | 'haiku_model' | 'sonnet_model' | 'opus_model';
+} // OpenAI Chat Completions API platform
| {
state: 'ready_to_start';
} // Flow started, waiting for browser to open
@@ -246,6 +255,8 @@ export function ConsoleOAuthFlow({
if (!orgResult.valid) {
throw new Error((orgResult as { valid: false; message: string }).message);
}
+ // Reset modelType to anthropic when using OAuth login
+ updateSettingsForSource('userSettings', { modelType: 'anthropic' } as any);
setOAuthStatus({
state: 'success'
});
@@ -416,6 +427,9 @@ function OAuthStatusMessage(t0) {
t6 = [{
label: Custom Platform ·{" "}Configure your own API endpoint{"\n"},
value: "custom_platform"
+ }, {
+ label: OpenAI Compatible ·{" "}Ollama, DeepSeek, vLLM, One API, etc.{"\n"},
+ value: "openai_chat_api"
}, t4, t5, {
label: 3rd-party platform ·{" "}Amazon Bedrock, Microsoft Foundry, or Vertex AI{"\n"},
value: "platform"
@@ -438,6 +452,17 @@ function OAuthStatusMessage(t0) {
opusModel: process.env.ANTHROPIC_DEFAULT_OPUS_MODEL ?? "",
activeField: "base_url"
});
+ } else if (value_0 === "openai_chat_api") {
+ logEvent("tengu_openai_chat_api_selected", {});
+ setOAuthStatus({
+ state: "openai_chat_api",
+ baseUrl: process.env.OPENAI_BASE_URL ?? "",
+ apiKey: process.env.OPENAI_API_KEY ?? "",
+ haikuModel: process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL ?? "",
+ sonnetModel: process.env.ANTHROPIC_DEFAULT_SONNET_MODEL ?? "",
+ opusModel: process.env.ANTHROPIC_DEFAULT_OPUS_MODEL ?? "",
+ activeField: "base_url"
+ });
} else if (value_0 === "platform") {
logEvent("tengu_oauth_platform_selected", {});
setOAuthStatus({
@@ -568,7 +593,7 @@ function OAuthStatusMessage(t0) {
if (finalVals.haiku_model) env.ANTHROPIC_DEFAULT_HAIKU_MODEL = finalVals.haiku_model;
if (finalVals.sonnet_model) env.ANTHROPIC_DEFAULT_SONNET_MODEL = finalVals.sonnet_model;
if (finalVals.opus_model) env.ANTHROPIC_DEFAULT_OPUS_MODEL = finalVals.opus_model;
- const { error } = updateSettingsForSource('userSettings', { env } as any);
+ const { error } = updateSettingsForSource('userSettings', { modelType: 'anthropic' as any, env } as any);
if (error) {
setOAuthStatus({ state: 'error', message: `Failed to save: ${error.message}`, toRetry: { state: 'custom_platform', baseUrl: '', apiKey: '', haikuModel: '', sonnetModel: '', opusModel: '', activeField: 'base_url' } });
} else {
@@ -639,6 +664,107 @@ function OAuthStatusMessage(t0) {
Tab to switch · Enter on last field to save · Esc to go back
;
}
+ case "openai_chat_api":
+ {
+ type OpenAIField = 'base_url' | 'api_key' | 'haiku_model' | 'sonnet_model' | 'opus_model';
+ const OPENAI_FIELDS: OpenAIField[] = ['base_url', 'api_key', 'haiku_model', 'sonnet_model', 'opus_model'];
+ const op = oauthStatus as { state: 'openai_chat_api'; activeField: OpenAIField; baseUrl: string; apiKey: string; haikuModel: string; sonnetModel: string; opusModel: string };
+ const { activeField, baseUrl, apiKey, haikuModel, sonnetModel, opusModel } = op;
+ const openaiDisplayValues: Record = { base_url: baseUrl, api_key: apiKey, haiku_model: haikuModel, sonnet_model: sonnetModel, opus_model: opusModel };
+
+ const [openaiInputValue, setOpenaiInputValue] = useState(() => openaiDisplayValues[activeField]);
+ const [openaiInputCursorOffset, setOpenaiInputCursorOffset] = useState(() => openaiDisplayValues[activeField].length);
+
+ const buildOpenAIState = useCallback((field: OpenAIField, value: string, newActive?: OpenAIField) => {
+ const s = { state: 'openai_chat_api' as const, activeField: newActive ?? activeField, baseUrl, apiKey, haikuModel, sonnetModel, opusModel };
+ switch (field) {
+ case 'base_url': return { ...s, baseUrl: value };
+ case 'api_key': return { ...s, apiKey: value };
+ case 'haiku_model': return { ...s, haikuModel: value };
+ case 'sonnet_model': return { ...s, sonnetModel: value };
+ case 'opus_model': return { ...s, opusModel: value };
+ }
+ }, [activeField, baseUrl, apiKey, haikuModel, sonnetModel, opusModel]);
+
+ const doOpenAISave = useCallback(() => {
+ const finalVals = { ...openaiDisplayValues, [activeField]: openaiInputValue };
+ const env: Record = {};
+ if (finalVals.base_url) env.OPENAI_BASE_URL = finalVals.base_url;
+ if (finalVals.api_key) env.OPENAI_API_KEY = finalVals.api_key;
+ if (finalVals.haiku_model) env.ANTHROPIC_DEFAULT_HAIKU_MODEL = finalVals.haiku_model;
+ if (finalVals.sonnet_model) env.ANTHROPIC_DEFAULT_SONNET_MODEL = finalVals.sonnet_model;
+ if (finalVals.opus_model) env.ANTHROPIC_DEFAULT_OPUS_MODEL = finalVals.opus_model;
+ const { error } = updateSettingsForSource('userSettings', { modelType: 'openai' as any, env } as any);
+ if (error) {
+ setOAuthStatus({ state: 'error', message: `Failed to save: ${error.message}`, toRetry: { state: 'openai_chat_api', baseUrl: '', apiKey: '', haikuModel: '', sonnetModel: '', opusModel: '', activeField: 'base_url' } });
+ } else {
+ for (const [k, v] of Object.entries(env)) process.env[k] = v;
+ setOAuthStatus({ state: 'success' });
+ void onDone();
+ }
+ }, [activeField, openaiInputValue, openaiDisplayValues, setOAuthStatus, onDone]);
+
+ const handleOpenAIEnter = useCallback(() => {
+ const idx = OPENAI_FIELDS.indexOf(activeField);
+ setOAuthStatus(buildOpenAIState(activeField, openaiInputValue));
+ if (idx === OPENAI_FIELDS.length - 1) {
+ doOpenAISave();
+ } else {
+ const next = OPENAI_FIELDS[idx + 1]!;
+ setOpenaiInputValue(openaiDisplayValues[next] ?? '');
+ setOpenaiInputCursorOffset((openaiDisplayValues[next] ?? '').length);
+ }
+ }, [activeField, openaiInputValue, buildOpenAIState, doOpenAISave, openaiDisplayValues, setOAuthStatus]);
+
+ useKeybinding('tabs:next', () => {
+ const idx = OPENAI_FIELDS.indexOf(activeField);
+ if (idx < OPENAI_FIELDS.length - 1) {
+ setOAuthStatus(buildOpenAIState(activeField, openaiInputValue, OPENAI_FIELDS[idx + 1]));
+ setOpenaiInputValue(openaiDisplayValues[OPENAI_FIELDS[idx + 1]!] ?? '');
+ setOpenaiInputCursorOffset((openaiDisplayValues[OPENAI_FIELDS[idx + 1]!] ?? '').length);
+ }
+ }, { context: 'Tabs' });
+ useKeybinding('tabs:previous', () => {
+ const idx = OPENAI_FIELDS.indexOf(activeField);
+ if (idx > 0) {
+ setOAuthStatus(buildOpenAIState(activeField, openaiInputValue, OPENAI_FIELDS[idx - 1]));
+ setOpenaiInputValue(openaiDisplayValues[OPENAI_FIELDS[idx - 1]!] ?? '');
+ setOpenaiInputCursorOffset((openaiDisplayValues[OPENAI_FIELDS[idx - 1]!] ?? '').length);
+ }
+ }, { context: 'Tabs' });
+ useKeybinding('confirm:no', () => {
+ setOAuthStatus({ state: 'idle' });
+ }, { context: 'Confirmation' });
+
+ const openaiColumns = useTerminalSize().columns - 20;
+
+ const renderOpenAIRow = (field: OpenAIField, label: string, opts?: { mask?: boolean }) => {
+ const active = activeField === field;
+ const val = openaiDisplayValues[field];
+ return
+ {` ${label} `}
+
+ {active
+ ?
+ : (val
+ ? {opts?.mask ? val.slice(0, 8) + '·'.repeat(Math.max(0, val.length - 8)) : val}
+ : null)}
+ ;
+ };
+
+ return
+ OpenAI Compatible API Setup
+ Configure an OpenAI Chat Completions compatible endpoint (e.g. Ollama, DeepSeek, vLLM).
+
+ {renderOpenAIRow('base_url', 'Base URL ')}
+ {renderOpenAIRow('api_key', 'API Key ', { mask: true })}
+ {renderOpenAIRow('haiku_model', 'Haiku ')}
+ {renderOpenAIRow('sonnet_model', 'Sonnet ')}
+ {renderOpenAIRow('opus_model', 'Opus ')}
+
+ Tab to switch · Enter on last field to save · Esc to go back
+ ;
+ }
case "waiting_for_login":
{
let t1;
diff --git a/src/services/api/claude.ts b/src/services/api/claude.ts
index fd4f25110..bc6f380f2 100644
--- a/src/services/api/claude.ts
+++ b/src/services/api/claude.ts
@@ -1301,6 +1301,15 @@ async function* queryModel(
API_MAX_MEDIA_PER_REQUEST,
)
+ // OpenAI-compatible provider: delegate to the OpenAI adapter layer
+ // after shared preprocessing (message normalization, tool filtering,
+ // media stripping) but before Anthropic-specific logic (betas, thinking, caching).
+ if (getAPIProvider() === 'openai') {
+ const { queryModelOpenAI } = await import('./openai/index.js')
+ yield* queryModelOpenAI(messagesForAPI, systemPrompt, filteredTools, signal, options)
+ return
+ }
+
// Instrumentation: Track message count after normalization
logEvent('tengu_api_after_normalize', {
postNormalizedMessageCount: messagesForAPI.length,
diff --git a/src/services/api/openai/__tests__/convertMessages.test.ts b/src/services/api/openai/__tests__/convertMessages.test.ts
new file mode 100644
index 000000000..0e69f1ca8
--- /dev/null
+++ b/src/services/api/openai/__tests__/convertMessages.test.ts
@@ -0,0 +1,157 @@
+import { describe, expect, test } from 'bun:test'
+import { anthropicMessagesToOpenAI } from '../convertMessages.js'
+import type { UserMessage, AssistantMessage } from '../../../../types/message.js'
+
+// Helpers to create internal-format messages
+function makeUserMsg(content: string | any[]): UserMessage {
+ return {
+ type: 'user',
+ uuid: '00000000-0000-0000-0000-000000000000',
+ message: { role: 'user', content },
+ } as UserMessage
+}
+
+function makeAssistantMsg(content: string | any[]): AssistantMessage {
+ return {
+ type: 'assistant',
+ uuid: '00000000-0000-0000-0000-000000000001',
+ message: { role: 'assistant', content },
+ } as AssistantMessage
+}
+
+describe('anthropicMessagesToOpenAI', () => {
+ test('converts system prompt to system message', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeUserMsg('hello')],
+ ['You are helpful.'] as any,
+ )
+ expect(result[0]).toEqual({ role: 'system', content: 'You are helpful.' })
+ })
+
+ test('joins multiple system prompt strings', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeUserMsg('hi')],
+ ['Part 1', 'Part 2'] as any,
+ )
+ expect(result[0]).toEqual({ role: 'system', content: 'Part 1\n\nPart 2' })
+ })
+
+ test('skips empty system prompt', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeUserMsg('hi')],
+ [] as any,
+ )
+ expect(result[0].role).toBe('user')
+ })
+
+ test('converts simple user text message', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeUserMsg('hello world')],
+ [] as any,
+ )
+ expect(result).toEqual([{ role: 'user', content: 'hello world' }])
+ })
+
+ test('converts user message with content array', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeUserMsg([
+ { type: 'text', text: 'line 1' },
+ { type: 'text', text: 'line 2' },
+ ])],
+ [] as any,
+ )
+ expect(result).toEqual([{ role: 'user', content: 'line 1\nline 2' }])
+ })
+
+ test('converts assistant message with text', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeAssistantMsg('response text')],
+ [] as any,
+ )
+ expect(result).toEqual([{ role: 'assistant', content: 'response text' }])
+ })
+
+ test('converts assistant message with tool_use', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeAssistantMsg([
+ { type: 'text', text: 'Let me help.' },
+ {
+ type: 'tool_use' as const,
+ id: 'toolu_123',
+ name: 'bash',
+ input: { command: 'ls' },
+ },
+ ])],
+ [] as any,
+ )
+ expect(result).toEqual([{
+ role: 'assistant',
+ content: 'Let me help.',
+ tool_calls: [{
+ id: 'toolu_123',
+ type: 'function',
+ function: { name: 'bash', arguments: '{"command":"ls"}' },
+ }],
+ }])
+ })
+
+ test('converts tool_result to tool message', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeUserMsg([
+ {
+ type: 'tool_result' as const,
+ tool_use_id: 'toolu_123',
+ content: 'file1.txt\nfile2.txt',
+ },
+ ])],
+ [] as any,
+ )
+ expect(result).toEqual([{
+ role: 'tool',
+ tool_call_id: 'toolu_123',
+ content: 'file1.txt\nfile2.txt',
+ }])
+ })
+
+ test('strips thinking blocks', () => {
+ const result = anthropicMessagesToOpenAI(
+ [makeAssistantMsg([
+ { type: 'thinking' as const, thinking: 'internal thoughts...' },
+ { type: 'text', text: 'visible response' },
+ ])],
+ [] as any,
+ )
+ expect(result).toEqual([{ role: 'assistant', content: 'visible response' }])
+ })
+
+ test('handles full conversation with tools', () => {
+ const result = anthropicMessagesToOpenAI(
+ [
+ makeUserMsg('list files'),
+ makeAssistantMsg([
+ {
+ type: 'tool_use' as const,
+ id: 'toolu_abc',
+ name: 'bash',
+ input: { command: 'ls' },
+ },
+ ]),
+ makeUserMsg([
+ {
+ type: 'tool_result' as const,
+ tool_use_id: 'toolu_abc',
+ content: 'file.txt',
+ },
+ ]),
+ ],
+ ['You are helpful.'] as any,
+ )
+
+ expect(result).toHaveLength(4)
+ expect(result[0].role).toBe('system')
+ expect(result[1].role).toBe('user')
+ expect(result[2].role).toBe('assistant')
+ expect((result[2] as any).tool_calls).toBeDefined()
+ expect(result[3].role).toBe('tool')
+ })
+})
diff --git a/src/services/api/openai/__tests__/convertTools.test.ts b/src/services/api/openai/__tests__/convertTools.test.ts
new file mode 100644
index 000000000..847c63ce8
--- /dev/null
+++ b/src/services/api/openai/__tests__/convertTools.test.ts
@@ -0,0 +1,85 @@
+import { describe, expect, test } from 'bun:test'
+import { anthropicToolsToOpenAI, anthropicToolChoiceToOpenAI } from '../convertTools.js'
+
+describe('anthropicToolsToOpenAI', () => {
+ test('converts basic tool', () => {
+ const tools = [
+ {
+ type: 'custom',
+ name: 'bash',
+ description: 'Run a bash command',
+ input_schema: {
+ type: 'object',
+ properties: { command: { type: 'string' } },
+ required: ['command'],
+ },
+ },
+ ]
+
+ const result = anthropicToolsToOpenAI(tools as any)
+
+ expect(result).toEqual([{
+ type: 'function',
+ function: {
+ name: 'bash',
+ description: 'Run a bash command',
+ parameters: {
+ type: 'object',
+ properties: { command: { type: 'string' } },
+ required: ['command'],
+ },
+ },
+ }])
+ })
+
+ test('uses empty schema when input_schema missing', () => {
+ const tools = [{ type: 'custom', name: 'noop', description: 'no-op' }]
+ const result = anthropicToolsToOpenAI(tools as any)
+
+ expect(result[0].function.parameters).toEqual({ type: 'object', properties: {} })
+ })
+
+ test('strips Anthropic-specific fields', () => {
+ const tools = [
+ {
+ type: 'custom',
+ name: 'bash',
+ description: 'Run bash',
+ input_schema: { type: 'object', properties: {} },
+ cache_control: { type: 'ephemeral' },
+ defer_loading: true,
+ },
+ ]
+ const result = anthropicToolsToOpenAI(tools as any)
+
+ expect((result[0] as any).cache_control).toBeUndefined()
+ expect((result[0] as any).defer_loading).toBeUndefined()
+ })
+
+ test('handles empty tools array', () => {
+ expect(anthropicToolsToOpenAI([])).toEqual([])
+ })
+})
+
+describe('anthropicToolChoiceToOpenAI', () => {
+ test('maps auto', () => {
+ expect(anthropicToolChoiceToOpenAI({ type: 'auto' })).toBe('auto')
+ })
+
+ test('maps any to required', () => {
+ expect(anthropicToolChoiceToOpenAI({ type: 'any' })).toBe('required')
+ })
+
+ test('maps tool to function', () => {
+ const result = anthropicToolChoiceToOpenAI({ type: 'tool', name: 'bash' })
+ expect(result).toEqual({ type: 'function', function: { name: 'bash' } })
+ })
+
+ test('returns undefined for undefined input', () => {
+ expect(anthropicToolChoiceToOpenAI(undefined)).toBeUndefined()
+ })
+
+ test('returns undefined for unknown type', () => {
+ expect(anthropicToolChoiceToOpenAI({ type: 'unknown' })).toBeUndefined()
+ })
+})
diff --git a/src/services/api/openai/__tests__/modelMapping.test.ts b/src/services/api/openai/__tests__/modelMapping.test.ts
new file mode 100644
index 000000000..89bf976ac
--- /dev/null
+++ b/src/services/api/openai/__tests__/modelMapping.test.ts
@@ -0,0 +1,62 @@
+import { describe, expect, test, beforeEach, afterEach } from 'bun:test'
+import { resolveOpenAIModel } from '../modelMapping.js'
+
+describe('resolveOpenAIModel', () => {
+ const originalEnv = {
+ OPENAI_MODEL: process.env.OPENAI_MODEL,
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL,
+ ANTHROPIC_DEFAULT_SONNET_MODEL: process.env.ANTHROPIC_DEFAULT_SONNET_MODEL,
+ ANTHROPIC_DEFAULT_OPUS_MODEL: process.env.ANTHROPIC_DEFAULT_OPUS_MODEL,
+ }
+
+ beforeEach(() => {
+ delete process.env.OPENAI_MODEL
+ delete process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL
+ delete process.env.ANTHROPIC_DEFAULT_SONNET_MODEL
+ delete process.env.ANTHROPIC_DEFAULT_OPUS_MODEL
+ })
+
+ afterEach(() => {
+ Object.assign(process.env, originalEnv)
+ })
+
+ test('OPENAI_MODEL env var overrides all', () => {
+ process.env.OPENAI_MODEL = 'my-custom-model'
+ expect(resolveOpenAIModel('claude-sonnet-4-6')).toBe('my-custom-model')
+ })
+
+ test('ANTHROPIC_DEFAULT_SONNET_MODEL overrides default map', () => {
+ process.env.ANTHROPIC_DEFAULT_SONNET_MODEL = 'my-sonnet'
+ expect(resolveOpenAIModel('claude-sonnet-4-6')).toBe('my-sonnet')
+ })
+
+ test('ANTHROPIC_DEFAULT_HAIKU_MODEL overrides default map', () => {
+ process.env.ANTHROPIC_DEFAULT_HAIKU_MODEL = 'my-haiku'
+ expect(resolveOpenAIModel('claude-haiku-4-5-20251001')).toBe('my-haiku')
+ })
+
+ test('ANTHROPIC_DEFAULT_OPUS_MODEL overrides default map', () => {
+ process.env.ANTHROPIC_DEFAULT_OPUS_MODEL = 'my-opus'
+ expect(resolveOpenAIModel('claude-opus-4-6')).toBe('my-opus')
+ })
+
+ test('maps known Anthropic model via DEFAULT_MODEL_MAP', () => {
+ expect(resolveOpenAIModel('claude-sonnet-4-6')).toBe('gpt-4o')
+ })
+
+ test('maps haiku model', () => {
+ expect(resolveOpenAIModel('claude-haiku-4-5-20251001')).toBe('gpt-4o-mini')
+ })
+
+ test('maps opus model', () => {
+ expect(resolveOpenAIModel('claude-opus-4-6')).toBe('o3')
+ })
+
+ test('passes through unknown model name', () => {
+ expect(resolveOpenAIModel('some-random-model')).toBe('some-random-model')
+ })
+
+ test('strips [1m] suffix', () => {
+ expect(resolveOpenAIModel('claude-sonnet-4-6[1m]')).toBe('gpt-4o')
+ })
+})
diff --git a/src/services/api/openai/__tests__/streamAdapter.test.ts b/src/services/api/openai/__tests__/streamAdapter.test.ts
new file mode 100644
index 000000000..bf3b9278d
--- /dev/null
+++ b/src/services/api/openai/__tests__/streamAdapter.test.ts
@@ -0,0 +1,434 @@
+import { describe, expect, test } from 'bun:test'
+import { adaptOpenAIStreamToAnthropic } from '../streamAdapter.js'
+import type { ChatCompletionChunk } from 'openai/resources/chat/completions/completions.mjs'
+
+/** Helper to create a mock async iterable from chunk array */
+function mockStream(chunks: ChatCompletionChunk[]): AsyncIterable {
+ return {
+ [Symbol.asyncIterator]() {
+ let i = 0
+ return {
+ async next() {
+ if (i >= chunks.length) return { done: true, value: undefined }
+ return { done: false, value: chunks[i++] }
+ },
+ }
+ },
+ }
+}
+
+/** Create a minimal ChatCompletionChunk */
+function makeChunk(overrides: Partial & any = {}): ChatCompletionChunk {
+ return {
+ id: 'chatcmpl-test',
+ object: 'chat.completion.chunk',
+ created: 1234567890,
+ model: 'gpt-4o',
+ choices: [],
+ ...overrides,
+ } as ChatCompletionChunk
+}
+
+async function collectEvents(chunks: ChatCompletionChunk[]) {
+ const events: any[] = []
+ for await (const event of adaptOpenAIStreamToAnthropic(mockStream(chunks), 'gpt-4o')) {
+ events.push(event)
+ }
+ return events
+}
+
+describe('adaptOpenAIStreamToAnthropic', () => {
+ test('emits message_start on first chunk', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { role: 'assistant', content: '' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { content: 'hello' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {},
+ finish_reason: 'stop',
+ }],
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
+ }),
+ ])
+
+ expect(events[0].type).toBe('message_start')
+ expect(events[0].message.role).toBe('assistant')
+ expect(events[0].message.model).toBe('gpt-4o')
+ })
+
+ test('converts text content stream', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{ index: 0, delta: { content: 'Hello' }, finish_reason: null }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: { content: ' world' }, finish_reason: null }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ }),
+ ])
+
+ const types = events.map(e => e.type)
+ expect(types).toContain('message_start')
+ expect(types).toContain('content_block_start')
+ expect(types.filter(t => t === 'content_block_delta').length).toBe(2)
+ expect(types).toContain('content_block_stop')
+ expect(types).toContain('message_delta')
+ expect(types).toContain('message_stop')
+
+ const textDeltas = events.filter(e => e.type === 'content_block_delta') as any[]
+ expect(textDeltas[0].delta.text).toBe('Hello')
+ expect(textDeltas[1].delta.text).toBe(' world')
+ })
+
+ test('converts tool_calls stream', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {
+ tool_calls: [{
+ index: 0,
+ id: 'call_abc',
+ type: 'function',
+ function: { name: 'bash', arguments: '' },
+ }],
+ },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {
+ tool_calls: [{
+ index: 0,
+ function: { arguments: '{"comm' },
+ }],
+ },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {
+ tool_calls: [{
+ index: 0,
+ function: { arguments: 'and":"ls"}' },
+ }],
+ },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'tool_calls' }],
+ }),
+ ])
+
+ const blockStart = events.find(e => e.type === 'content_block_start') as any
+ expect(blockStart.content_block.type).toBe('tool_use')
+ expect(blockStart.content_block.name).toBe('bash')
+
+ const jsonDeltas = events.filter(
+ e => e.type === 'content_block_delta' && e.delta.type === 'input_json_delta',
+ ) as any[]
+ const fullArgs = jsonDeltas.map(d => d.delta.partial_json).join('')
+ expect(fullArgs).toBe('{"command":"ls"}')
+ })
+
+ test('maps finish_reason stop to end_turn', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{ index: 0, delta: { content: 'hi' }, finish_reason: null }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ }),
+ ])
+
+ const msgDelta = events.find(e => e.type === 'message_delta') as any
+ expect(msgDelta.delta.stop_reason).toBe('end_turn')
+ })
+
+ test('maps finish_reason tool_calls to tool_use', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {
+ tool_calls: [{ index: 0, id: 'call_1', function: { name: 'bash', arguments: '{}' } }],
+ },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'tool_calls' }],
+ }),
+ ])
+
+ const msgDelta = events.find(e => e.type === 'message_delta') as any
+ expect(msgDelta.delta.stop_reason).toBe('tool_use')
+ })
+
+ test('maps finish_reason length to max_tokens', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{ index: 0, delta: { content: 'truncated' }, finish_reason: null }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'length' }],
+ }),
+ ])
+
+ const msgDelta = events.find(e => e.type === 'message_delta') as any
+ expect(msgDelta.delta.stop_reason).toBe('max_tokens')
+ })
+
+ test('handles mixed text and tool_calls', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{ index: 0, delta: { content: 'Thinking...' }, finish_reason: null }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {
+ tool_calls: [{ index: 0, id: 'call_1', function: { name: 'grep', arguments: '{"p":"test"}' } }],
+ },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'tool_calls' }],
+ }),
+ ])
+
+ const blockStarts = events.filter(e => e.type === 'content_block_start') as any[]
+ expect(blockStarts.length).toBe(2)
+ expect(blockStarts[0].content_block.type).toBe('text')
+ expect(blockStarts[1].content_block.type).toBe('tool_use')
+ })
+})
+
+describe('thinking support (reasoning_content)', () => {
+ test('converts reasoning_content to thinking block', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { reasoning_content: 'Let me analyze this...' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { reasoning_content: ' step by step.' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ }),
+ ])
+
+ // Should have a thinking content block
+ const blockStart = events.find(e => e.type === 'content_block_start') as any
+ expect(blockStart.content_block.type).toBe('thinking')
+ expect(blockStart.content_block.signature).toBe('')
+
+ // Should have thinking_delta events
+ const thinkingDeltas = events.filter(
+ e => e.type === 'content_block_delta' && e.delta.type === 'thinking_delta',
+ ) as any[]
+ expect(thinkingDeltas.length).toBe(2)
+ expect(thinkingDeltas[0].delta.thinking).toBe('Let me analyze this...')
+ expect(thinkingDeltas[1].delta.thinking).toBe(' step by step.')
+ })
+
+ test('converts reasoning then content (DeepSeek-style)', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { reasoning_content: 'Thinking about the answer...' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { content: 'Here is my answer.' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ }),
+ ])
+
+ // Should have two content blocks: thinking + text
+ const blockStarts = events.filter(e => e.type === 'content_block_start') as any[]
+ expect(blockStarts.length).toBe(2)
+ expect(blockStarts[0].content_block.type).toBe('thinking')
+ expect(blockStarts[1].content_block.type).toBe('text')
+
+ // Thinking block should be closed before text block starts
+ const blockStops = events.filter(e => e.type === 'content_block_stop') as any[]
+ expect(blockStops[0].index).toBe(0) // thinking block closed at index 0
+ expect(blockStarts[1].index).toBe(1) // text block starts at index 1
+
+ // Verify text delta
+ const textDelta = events.find(
+ e => e.type === 'content_block_delta' && e.delta.type === 'text_delta',
+ ) as any
+ expect(textDelta.delta.text).toBe('Here is my answer.')
+ })
+
+ test('handles reasoning then tool_calls', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { reasoning_content: 'I need to run a command.' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: {
+ tool_calls: [{ index: 0, id: 'call_1', function: { name: 'bash', arguments: '{"c":"ls"}' } }],
+ },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'tool_calls' }],
+ }),
+ ])
+
+ const blockStarts = events.filter(e => e.type === 'content_block_start') as any[]
+ expect(blockStarts.length).toBe(2)
+ expect(blockStarts[0].content_block.type).toBe('thinking')
+ expect(blockStarts[1].content_block.type).toBe('tool_use')
+ })
+
+ test('thinking block index is 0, text block index is 1', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { reasoning_content: 'reason' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { content: 'answer' },
+ finish_reason: null,
+ }],
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ }),
+ ])
+
+ const blockStarts = events.filter(e => e.type === 'content_block_start') as any[]
+ expect(blockStarts[0].index).toBe(0)
+ expect(blockStarts[1].index).toBe(1)
+ })
+})
+
+describe('prompt caching support', () => {
+ test('maps cached_tokens to cache_read_input_tokens', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{
+ index: 0,
+ delta: { content: 'hi' },
+ finish_reason: null,
+ }],
+ usage: {
+ prompt_tokens: 1000,
+ completion_tokens: 0,
+ total_tokens: 1000,
+ prompt_tokens_details: { cached_tokens: 800 },
+ } as any,
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ usage: {
+ prompt_tokens: 1000,
+ completion_tokens: 50,
+ total_tokens: 1050,
+ prompt_tokens_details: { cached_tokens: 800 },
+ } as any,
+ }),
+ ])
+
+ const msgStart = events.find(e => e.type === 'message_start') as any
+ expect(msgStart.message.usage.cache_read_input_tokens).toBe(800)
+ expect(msgStart.message.usage.input_tokens).toBe(1000)
+ })
+
+ test('defaults cache_read_input_tokens to 0 when no cached_tokens', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{ index: 0, delta: { content: 'hi' }, finish_reason: null }],
+ usage: { prompt_tokens: 100, completion_tokens: 0, total_tokens: 100 },
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ }),
+ ])
+
+ const msgStart = events.find(e => e.type === 'message_start') as any
+ expect(msgStart.message.usage.cache_read_input_tokens).toBe(0)
+ expect(msgStart.message.usage.cache_creation_input_tokens).toBe(0)
+ })
+
+ test('updates cached_tokens from later chunks', async () => {
+ const events = await collectEvents([
+ makeChunk({
+ choices: [{ index: 0, delta: { content: 'hi' }, finish_reason: null }],
+ usage: {
+ prompt_tokens: 500,
+ completion_tokens: 0,
+ total_tokens: 500,
+ } as any,
+ }),
+ makeChunk({
+ choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
+ usage: {
+ prompt_tokens: 500,
+ completion_tokens: 10,
+ total_tokens: 510,
+ prompt_tokens_details: { cached_tokens: 300 },
+ } as any,
+ }),
+ ])
+
+ const msgStart = events.find(e => e.type === 'message_start') as any
+ // First chunk had no cached_tokens, so initially 0
+ // But the message_start usage reflects the first chunk's data
+ expect(msgStart.message.usage.cache_read_input_tokens).toBe(0)
+ expect(msgStart.message.usage.input_tokens).toBe(500)
+ })
+})
diff --git a/src/services/api/openai/client.ts b/src/services/api/openai/client.ts
new file mode 100644
index 000000000..111e8a330
--- /dev/null
+++ b/src/services/api/openai/client.ts
@@ -0,0 +1,48 @@
+import OpenAI from 'openai'
+import { getProxyFetchOptions } from 'src/utils/proxy.js'
+import { isEnvTruthy } from '../../utils/envUtils.js'
+
+/**
+ * Environment variables:
+ *
+ * OPENAI_API_KEY: Required. API key for the OpenAI-compatible endpoint.
+ * OPENAI_BASE_URL: Recommended. Base URL for the endpoint (e.g. http://localhost:11434/v1).
+ * OPENAI_ORG_ID: Optional. Organization ID.
+ * OPENAI_PROJECT_ID: Optional. Project ID.
+ */
+
+let cachedClient: OpenAI | null = null
+
+export function getOpenAIClient(options?: {
+ maxRetries?: number
+ fetchOverride?: typeof fetch
+ source?: string
+}): OpenAI {
+ if (cachedClient) return cachedClient
+
+ const apiKey = process.env.OPENAI_API_KEY || ''
+ const baseURL = process.env.OPENAI_BASE_URL
+
+ const client = new OpenAI({
+ apiKey,
+ ...(baseURL && { baseURL }),
+ maxRetries: options?.maxRetries ?? 0,
+ timeout: parseInt(process.env.API_TIMEOUT_MS || String(600 * 1000), 10),
+ dangerouslyAllowBrowser: true,
+ ...(process.env.OPENAI_ORG_ID && { organization: process.env.OPENAI_ORG_ID }),
+ ...(process.env.OPENAI_PROJECT_ID && { project: process.env.OPENAI_PROJECT_ID }),
+ fetchOptions: getProxyFetchOptions({ forAnthropicAPI: false }) as RequestInit,
+ ...(options?.fetchOverride && { fetch: options.fetchOverride }),
+ })
+
+ if (!options?.fetchOverride) {
+ cachedClient = client
+ }
+
+ return client
+}
+
+/** Clear the cached client (useful when env vars change). */
+export function clearOpenAIClientCache(): void {
+ cachedClient = null
+}
diff --git a/src/services/api/openai/convertMessages.ts b/src/services/api/openai/convertMessages.ts
new file mode 100644
index 000000000..63fe6c719
--- /dev/null
+++ b/src/services/api/openai/convertMessages.ts
@@ -0,0 +1,184 @@
+import type {
+ BetaContentBlockParam,
+ BetaToolResultBlockParam,
+ BetaToolUseBlock,
+} from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
+import type {
+ ChatCompletionAssistantMessageParam,
+ ChatCompletionMessageParam,
+ ChatCompletionSystemMessageParam,
+ ChatCompletionToolMessageParam,
+ ChatCompletionUserMessageParam,
+} from 'openai/resources/chat/completions/completions.mjs'
+import type { AssistantMessage, UserMessage } from '../../../types/message.js'
+import type { SystemPrompt } from '../../../utils/systemPromptType.js'
+
+/**
+ * Convert internal (UserMessage | AssistantMessage)[] to OpenAI-format messages.
+ *
+ * Key conversions:
+ * - system prompt → role: "system" message prepended
+ * - tool_use blocks → tool_calls[] on assistant message
+ * - tool_result blocks → role: "tool" messages
+ * - thinking blocks → silently dropped
+ * - cache_control → stripped
+ */
+export function anthropicMessagesToOpenAI(
+ messages: (UserMessage | AssistantMessage)[],
+ systemPrompt: SystemPrompt,
+): ChatCompletionMessageParam[] {
+ const result: ChatCompletionMessageParam[] = []
+
+ // Prepend system prompt as system message
+ const systemText = systemPromptToText(systemPrompt)
+ if (systemText) {
+ result.push({
+ role: 'system',
+ content: systemText,
+ } satisfies ChatCompletionSystemMessageParam)
+ }
+
+ for (const msg of messages) {
+ switch (msg.type) {
+ case 'user':
+ result.push(...convertInternalUserMessage(msg))
+ break
+ case 'assistant':
+ result.push(...convertInternalAssistantMessage(msg))
+ break
+ default:
+ break
+ }
+ }
+
+ return result
+}
+
+function systemPromptToText(systemPrompt: SystemPrompt): string {
+ if (!systemPrompt || systemPrompt.length === 0) return ''
+ return systemPrompt
+ .filter(Boolean)
+ .join('\n\n')
+}
+
+function convertInternalUserMessage(
+ msg: UserMessage,
+): ChatCompletionMessageParam[] {
+ const result: ChatCompletionMessageParam[] = []
+ const content = msg.message.content
+
+ if (typeof content === 'string') {
+ result.push({
+ role: 'user',
+ content,
+ } satisfies ChatCompletionUserMessageParam)
+ } else if (Array.isArray(content)) {
+ const textParts: string[] = []
+ const toolResults: BetaToolResultBlockParam[] = []
+
+ for (const block of content) {
+ if (typeof block === 'string') {
+ textParts.push(block)
+ } else if (block.type === 'text') {
+ textParts.push(block.text)
+ } else if (block.type === 'tool_result') {
+ toolResults.push(block as BetaToolResultBlockParam)
+ }
+ // Skip image, document, thinking, cache_edits, etc.
+ }
+
+ if (textParts.length > 0) {
+ result.push({
+ role: 'user',
+ content: textParts.join('\n'),
+ } satisfies ChatCompletionUserMessageParam)
+ }
+
+ for (const tr of toolResults) {
+ result.push(convertToolResult(tr))
+ }
+ }
+
+ return result
+}
+
+function convertToolResult(
+ block: BetaToolResultBlockParam,
+): ChatCompletionToolMessageParam {
+ let content: string
+ if (typeof block.content === 'string') {
+ content = block.content
+ } else if (Array.isArray(block.content)) {
+ content = block.content
+ .map(c => {
+ if (typeof c === 'string') return c
+ if ('text' in c) return c.text
+ return ''
+ })
+ .filter(Boolean)
+ .join('\n')
+ } else {
+ content = ''
+ }
+
+ return {
+ role: 'tool',
+ tool_call_id: block.tool_use_id,
+ content,
+ } satisfies ChatCompletionToolMessageParam
+}
+
+function convertInternalAssistantMessage(
+ msg: AssistantMessage,
+): ChatCompletionMessageParam[] {
+ const content = msg.message.content
+
+ if (typeof content === 'string') {
+ return [
+ {
+ role: 'assistant',
+ content,
+ } satisfies ChatCompletionAssistantMessageParam,
+ ]
+ }
+
+ if (!Array.isArray(content)) {
+ return [
+ {
+ role: 'assistant',
+ content: '',
+ } satisfies ChatCompletionAssistantMessageParam,
+ ]
+ }
+
+ const textParts: string[] = []
+ const toolCalls: NonNullable = []
+
+ for (const block of content) {
+ if (typeof block === 'string') {
+ textParts.push(block)
+ } else if (block.type === 'text') {
+ textParts.push(block.text)
+ } else if (block.type === 'tool_use') {
+ const tu = block as BetaToolUseBlock
+ toolCalls.push({
+ id: tu.id,
+ type: 'function',
+ function: {
+ name: tu.name,
+ arguments:
+ typeof tu.input === 'string' ? tu.input : JSON.stringify(tu.input),
+ },
+ })
+ }
+ // Skip thinking, redacted_thinking, server_tool_use, etc.
+ }
+
+ const result: ChatCompletionAssistantMessageParam = {
+ role: 'assistant',
+ content: textParts.length > 0 ? textParts.join('\n') : null,
+ ...(toolCalls.length > 0 && { tool_calls: toolCalls }),
+ }
+
+ return [result]
+}
diff --git a/src/services/api/openai/convertTools.ts b/src/services/api/openai/convertTools.ts
new file mode 100644
index 000000000..4e7d4864f
--- /dev/null
+++ b/src/services/api/openai/convertTools.ts
@@ -0,0 +1,68 @@
+import type { BetaToolUnion } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
+import type { ChatCompletionTool } from 'openai/resources/chat/completions/completions.mjs'
+
+/**
+ * Convert Anthropic tool schemas to OpenAI function calling format.
+ *
+ * Anthropic: { name, description, input_schema }
+ * OpenAI: { type: "function", function: { name, description, parameters } }
+ *
+ * Anthropic-specific fields (cache_control, defer_loading, etc.) are stripped.
+ */
+export function anthropicToolsToOpenAI(
+ tools: BetaToolUnion[],
+): ChatCompletionTool[] {
+ return tools
+ .filter(tool => {
+ // Only convert standard tools (skip server tools like computer_use, etc.)
+ return tool.type === 'custom' || !('type' in tool) || tool.type !== 'server'
+ })
+ .map(tool => {
+ // Handle the various tool shapes from Anthropic SDK
+ const anyTool = tool as Record
+ const name = (anyTool.name as string) || ''
+ const description = (anyTool.description as string) || ''
+ const inputSchema = anyTool.input_schema as Record | undefined
+
+ return {
+ type: 'function' as const,
+ function: {
+ name,
+ description,
+ parameters: inputSchema || { type: 'object', properties: {} },
+ },
+ } satisfies ChatCompletionTool
+ })
+}
+
+/**
+ * Map Anthropic tool_choice to OpenAI tool_choice format.
+ *
+ * Anthropic → OpenAI:
+ * - { type: "auto" } → "auto"
+ * - { type: "any" } → "required"
+ * - { type: "tool", name } → { type: "function", function: { name } }
+ * - undefined → undefined (use provider default)
+ */
+export function anthropicToolChoiceToOpenAI(
+ toolChoice: unknown,
+): string | { type: 'function'; function: { name: string } } | undefined {
+ if (!toolChoice || typeof toolChoice !== 'object') return undefined
+
+ const tc = toolChoice as Record
+ const type = tc.type as string
+
+ switch (type) {
+ case 'auto':
+ return 'auto'
+ case 'any':
+ return 'required'
+ case 'tool':
+ return {
+ type: 'function',
+ function: { name: tc.name as string },
+ }
+ default:
+ return undefined
+ }
+}
diff --git a/src/services/api/openai/index.ts b/src/services/api/openai/index.ts
new file mode 100644
index 000000000..9195b3d5b
--- /dev/null
+++ b/src/services/api/openai/index.ts
@@ -0,0 +1,208 @@
+import type { BetaToolUnion } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
+import type { SystemPrompt } from '../../../utils/systemPromptType.js'
+import type { Message, StreamEvent, SystemAPIErrorMessage, AssistantMessage } from '../../../types/message.js'
+import type { Tools } from '../../../Tool.js'
+import { getOpenAIClient } from './client.js'
+import { anthropicMessagesToOpenAI } from './convertMessages.js'
+import { anthropicToolsToOpenAI, anthropicToolChoiceToOpenAI } from './convertTools.js'
+import { adaptOpenAIStreamToAnthropic } from './streamAdapter.js'
+import { resolveOpenAIModel } from './modelMapping.js'
+import { normalizeMessagesForAPI } from '../../../utils/messages.js'
+import { toolToAPISchema } from '../../../utils/api.js'
+import { getEmptyToolPermissionContext } from '../../../Tool.js'
+import { logForDebugging } from '../../../utils/debug.js'
+import type { Options } from '../claude.js'
+import { randomUUID } from 'crypto'
+import {
+ createAssistantAPIErrorMessage,
+ normalizeContentFromAPI,
+} from '../../../utils/messages.js'
+
+/**
+ * OpenAI-compatible query path. Converts Anthropic-format messages/tools to
+ * OpenAI format, calls the OpenAI-compatible endpoint, and converts the
+ * SSE stream back to Anthropic BetaRawMessageStreamEvent for consumption
+ * by the existing query pipeline.
+ */
+export async function* queryModelOpenAI(
+ messages: Message[],
+ systemPrompt: SystemPrompt,
+ tools: Tools,
+ signal: AbortSignal,
+ options: Options,
+): AsyncGenerator<
+ StreamEvent | AssistantMessage | SystemAPIErrorMessage,
+ void
+> {
+ try {
+ // 1. Resolve model name
+ const openaiModel = resolveOpenAIModel(options.model)
+
+ // 2. Normalize messages using shared preprocessing
+ const messagesForAPI = normalizeMessagesForAPI(messages, tools)
+
+ // 3. Build tool schemas
+ const toolSchemas = await Promise.all(
+ tools.map(tool =>
+ toolToAPISchema(tool, {
+ getToolPermissionContext: options.getToolPermissionContext,
+ tools,
+ agents: options.agents,
+ allowedAgentTypes: options.allowedAgentTypes,
+ model: options.model,
+ }),
+ ),
+ )
+ // Filter out non-standard tools (server tools like advisor)
+ const standardTools = toolSchemas.filter(
+ (t): t is BetaToolUnion & { type: string } => {
+ const anyT = t as Record
+ return anyT.type !== 'advisor_20260301' && anyT.type !== 'computer_20250124'
+ },
+ )
+
+ // 4. Convert messages and tools to OpenAI format
+ const openaiMessages = anthropicMessagesToOpenAI(messagesForAPI, systemPrompt)
+ const openaiTools = anthropicToolsToOpenAI(standardTools)
+ const openaiToolChoice = anthropicToolChoiceToOpenAI(options.toolChoice)
+
+ // 5. Get client and make streaming request
+ const client = getOpenAIClient({
+ maxRetries: 0,
+ fetchOverride: options.fetchOverride,
+ source: options.querySource,
+ })
+
+ logForDebugging(`[OpenAI] Calling model=${openaiModel}, messages=${openaiMessages.length}, tools=${openaiTools.length}`)
+
+ // 6. Call OpenAI API with streaming
+ const stream = await client.chat.completions.create(
+ {
+ model: openaiModel,
+ messages: openaiMessages,
+ ...(openaiTools.length > 0 && {
+ tools: openaiTools,
+ ...(openaiToolChoice && { tool_choice: openaiToolChoice }),
+ }),
+ stream: true,
+ stream_options: { include_usage: true },
+ ...(options.temperatureOverride !== undefined && {
+ temperature: options.temperatureOverride,
+ }),
+ },
+ {
+ signal,
+ },
+ )
+
+ // 7. Convert OpenAI stream to Anthropic events, then process into
+ // AssistantMessage + StreamEvent (matching the Anthropic path behavior)
+ const adaptedStream = adaptOpenAIStreamToAnthropic(stream, openaiModel)
+
+ // Accumulate content blocks and usage, same as the Anthropic path in claude.ts
+ const contentBlocks: Record = {}
+ let partialMessage: any = undefined
+ let usage = {
+ input_tokens: 0,
+ output_tokens: 0,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }
+ let ttftMs = 0
+ const start = Date.now()
+
+ for await (const event of adaptedStream) {
+ switch (event.type) {
+ case 'message_start': {
+ partialMessage = (event as any).message
+ ttftMs = Date.now() - start
+ if ((event as any).message?.usage) {
+ usage = {
+ ...usage,
+ ...((event as any).message.usage),
+ }
+ }
+ break
+ }
+ case 'content_block_start': {
+ const idx = (event as any).index
+ const cb = (event as any).content_block
+ if (cb.type === 'tool_use') {
+ contentBlocks[idx] = { ...cb, input: '' }
+ } else if (cb.type === 'text') {
+ contentBlocks[idx] = { ...cb, text: '' }
+ } else if (cb.type === 'thinking') {
+ contentBlocks[idx] = { ...cb, thinking: '', signature: '' }
+ } else {
+ contentBlocks[idx] = { ...cb }
+ }
+ break
+ }
+ case 'content_block_delta': {
+ const idx = (event as any).index
+ const delta = (event as any).delta
+ const block = contentBlocks[idx]
+ if (!block) break
+ if (delta.type === 'text_delta') {
+ block.text = (block.text || '') + delta.text
+ } else if (delta.type === 'input_json_delta') {
+ block.input = (block.input || '') + delta.partial_json
+ } else if (delta.type === 'thinking_delta') {
+ block.thinking = (block.thinking || '') + delta.thinking
+ } else if (delta.type === 'signature_delta') {
+ block.signature = delta.signature
+ }
+ break
+ }
+ case 'content_block_stop': {
+ const idx = (event as any).index
+ const block = contentBlocks[idx]
+ if (!block || !partialMessage) break
+
+ const m: AssistantMessage = {
+ message: {
+ ...partialMessage,
+ content: normalizeContentFromAPI(
+ [block],
+ tools,
+ options.agentId,
+ ),
+ },
+ requestId: undefined,
+ type: 'assistant',
+ uuid: randomUUID(),
+ timestamp: new Date().toISOString(),
+ }
+ yield m
+ break
+ }
+ case 'message_delta': {
+ const deltaUsage = (event as any).usage
+ if (deltaUsage) {
+ usage = { ...usage, ...deltaUsage }
+ }
+ // Update the stop_reason on the last yielded message
+ // (we don't have a reference here, but the consumer handles this)
+ break
+ }
+ case 'message_stop':
+ break
+ }
+
+ // Also yield as StreamEvent for real-time display (matching Anthropic path)
+ yield {
+ type: 'stream_event',
+ event,
+ ...(event.type === 'message_start' ? { ttftMs } : undefined),
+ } as StreamEvent
+ }
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error)
+ logForDebugging(`[OpenAI] Error: ${errorMessage}`, { level: 'error' })
+ yield createAssistantAPIErrorMessage({
+ content: `API Error: ${errorMessage}`,
+ apiError: 'api_error',
+ error: error instanceof Error ? error : new Error(String(error)),
+ })
+ }
+}
diff --git a/src/services/api/openai/modelMapping.ts b/src/services/api/openai/modelMapping.ts
new file mode 100644
index 000000000..ba546fe48
--- /dev/null
+++ b/src/services/api/openai/modelMapping.ts
@@ -0,0 +1,56 @@
+/**
+ * Default mapping from Anthropic model names to OpenAI model names.
+ * Used only when ANTHROPIC_DEFAULT_*_MODEL env vars are not set.
+ */
+const DEFAULT_MODEL_MAP: Record = {
+ 'claude-sonnet-4-20250514': 'gpt-4o',
+ 'claude-sonnet-4-5-20250929': 'gpt-4o',
+ 'claude-sonnet-4-6': 'gpt-4o',
+ 'claude-opus-4-20250514': 'o3',
+ 'claude-opus-4-1-20250805': 'o3',
+ 'claude-opus-4-5-20251101': 'o3',
+ 'claude-opus-4-6': 'o3',
+ 'claude-haiku-4-5-20251001': 'gpt-4o-mini',
+ 'claude-3-5-haiku-20241022': 'gpt-4o-mini',
+ 'claude-3-7-sonnet-20250219': 'gpt-4o',
+ 'claude-3-5-sonnet-20241022': 'gpt-4o',
+}
+
+/**
+ * Determine the model family (haiku / sonnet / opus) from an Anthropic model ID.
+ */
+function getModelFamily(model: string): 'haiku' | 'sonnet' | 'opus' | null {
+ if (/haiku/i.test(model)) return 'haiku'
+ if (/opus/i.test(model)) return 'opus'
+ if (/sonnet/i.test(model)) return 'sonnet'
+ return null
+}
+
+/**
+ * Resolve the OpenAI model name for a given Anthropic model.
+ *
+ * Priority:
+ * 1. OPENAI_MODEL env var (override all)
+ * 2. ANTHROPIC_DEFAULT_{FAMILY}_MODEL env var (e.g. ANTHROPIC_DEFAULT_SONNET_MODEL)
+ * 3. DEFAULT_MODEL_MAP lookup
+ * 4. Pass through original model name
+ */
+export function resolveOpenAIModel(anthropicModel: string): string {
+ // Highest priority: explicit override
+ if (process.env.OPENAI_MODEL) {
+ return process.env.OPENAI_MODEL
+ }
+
+ // Strip [1m] suffix if present (Claude-specific modifier)
+ const cleanModel = anthropicModel.replace(/\[1m\]$/, '')
+
+ // Check ANTHROPIC_DEFAULT_*_MODEL env vars based on model family
+ const family = getModelFamily(cleanModel)
+ if (family) {
+ const envVar = `ANTHROPIC_DEFAULT_${family.toUpperCase()}_MODEL`
+ const override = process.env[envVar]
+ if (override) return override
+ }
+
+ return DEFAULT_MODEL_MAP[cleanModel] ?? cleanModel
+}
diff --git a/src/services/api/openai/streamAdapter.ts b/src/services/api/openai/streamAdapter.ts
new file mode 100644
index 000000000..0c925fa7d
--- /dev/null
+++ b/src/services/api/openai/streamAdapter.ts
@@ -0,0 +1,310 @@
+import type { BetaRawMessageStreamEvent } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
+import type { ChatCompletionChunk } from 'openai/resources/chat/completions/completions.mjs'
+import { randomUUID } from 'crypto'
+
+/**
+ * Adapt an OpenAI streaming response into Anthropic BetaRawMessageStreamEvent.
+ *
+ * Mapping:
+ * First chunk → message_start
+ * delta.reasoning_content → content_block_start(thinking) + thinking_delta + content_block_stop
+ * delta.content → content_block_start(text) + text_delta + content_block_stop
+ * delta.tool_calls → content_block_start(tool_use) + input_json_delta + content_block_stop
+ * finish_reason → message_delta(stop_reason) + message_stop
+ * usage.cached_tokens → cache_read_input_tokens in message_start usage
+ *
+ * Thinking support:
+ * DeepSeek and compatible providers send `delta.reasoning_content` for chain-of-thought.
+ * This is mapped to Anthropic's `thinking` content blocks:
+ * content_block_start: { type: 'thinking', thinking: '', signature: '' }
+ * content_block_delta: { type: 'thinking_delta', thinking: '...' }
+ *
+ * Prompt caching:
+ * OpenAI reports cached tokens in usage.prompt_tokens_details.cached_tokens.
+ * This is mapped to Anthropic's cache_read_input_tokens.
+ */
+export async function* adaptOpenAIStreamToAnthropic(
+ stream: AsyncIterable,
+ model: string,
+): AsyncGenerator {
+ const messageId = `msg_${randomUUID().replace(/-/g, '').slice(0, 24)}`
+
+ let started = false
+ let currentContentIndex = -1
+
+ // Track tool_use blocks: tool_calls index → { contentIndex, id, name, arguments }
+ const toolBlocks = new Map()
+
+ // Track thinking block state
+ let thinkingBlockOpen = false
+
+ // Track text block state
+ let textBlockOpen = false
+
+ // Track usage
+ let inputTokens = 0
+ let outputTokens = 0
+ let cachedTokens = 0
+
+ // Track all open content block indices (for cleanup)
+ const openBlockIndices = new Set()
+
+ for await (const chunk of stream) {
+ const choice = chunk.choices?.[0]
+ const delta = choice?.delta
+
+ // Extract usage from any chunk that carries it
+ if (chunk.usage) {
+ inputTokens = chunk.usage.prompt_tokens ?? inputTokens
+ outputTokens = chunk.usage.completion_tokens ?? outputTokens
+ // OpenAI prompt caching: prompt_tokens_details.cached_tokens
+ const details = (chunk.usage as any).prompt_tokens_details
+ if (details?.cached_tokens) {
+ cachedTokens = details.cached_tokens
+ }
+ }
+
+ // Emit message_start on first chunk
+ if (!started) {
+ started = true
+
+ yield {
+ type: 'message_start',
+ message: {
+ id: messageId,
+ type: 'message',
+ role: 'assistant',
+ content: [],
+ model,
+ stop_reason: null,
+ stop_sequence: null,
+ usage: {
+ input_tokens: inputTokens,
+ output_tokens: 0,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: cachedTokens,
+ },
+ },
+ } as BetaRawMessageStreamEvent
+ }
+
+ if (!delta) continue
+
+ // Handle reasoning_content → Anthropic thinking block
+ // DeepSeek and compatible providers send delta.reasoning_content
+ const reasoningContent = (delta as any).reasoning_content
+ if (reasoningContent != null && reasoningContent !== '') {
+ if (!thinkingBlockOpen) {
+ currentContentIndex++
+ thinkingBlockOpen = true
+ openBlockIndices.add(currentContentIndex)
+
+ yield {
+ type: 'content_block_start',
+ index: currentContentIndex,
+ content_block: {
+ type: 'thinking',
+ thinking: '',
+ signature: '',
+ },
+ } as BetaRawMessageStreamEvent
+ }
+
+ yield {
+ type: 'content_block_delta',
+ index: currentContentIndex,
+ delta: {
+ type: 'thinking_delta',
+ thinking: reasoningContent,
+ },
+ } as BetaRawMessageStreamEvent
+ }
+
+ // Handle text content
+ if (delta.content != null && delta.content !== '') {
+ if (!textBlockOpen) {
+ // Close thinking block if still open (reasoning done, now generating answer)
+ if (thinkingBlockOpen) {
+ yield {
+ type: 'content_block_stop',
+ index: currentContentIndex,
+ } as BetaRawMessageStreamEvent
+ openBlockIndices.delete(currentContentIndex)
+ thinkingBlockOpen = false
+ }
+
+ currentContentIndex++
+ textBlockOpen = true
+ openBlockIndices.add(currentContentIndex)
+
+ yield {
+ type: 'content_block_start',
+ index: currentContentIndex,
+ content_block: {
+ type: 'text',
+ text: '',
+ },
+ } as BetaRawMessageStreamEvent
+ }
+
+ yield {
+ type: 'content_block_delta',
+ index: currentContentIndex,
+ delta: {
+ type: 'text_delta',
+ text: delta.content,
+ },
+ } as BetaRawMessageStreamEvent
+ }
+
+ // Handle tool calls
+ if (delta.tool_calls) {
+ for (const tc of delta.tool_calls) {
+ const tcIndex = tc.index
+
+ if (!toolBlocks.has(tcIndex)) {
+ // Close thinking block if open
+ if (thinkingBlockOpen) {
+ yield {
+ type: 'content_block_stop',
+ index: currentContentIndex,
+ } as BetaRawMessageStreamEvent
+ openBlockIndices.delete(currentContentIndex)
+ thinkingBlockOpen = false
+ }
+
+ // Close text block if open
+ if (textBlockOpen) {
+ yield {
+ type: 'content_block_stop',
+ index: currentContentIndex,
+ } as BetaRawMessageStreamEvent
+ openBlockIndices.delete(currentContentIndex)
+ textBlockOpen = false
+ }
+
+ // Start new tool_use block
+ currentContentIndex++
+ const toolId = tc.id || `toolu_${randomUUID().replace(/-/g, '').slice(0, 24)}`
+ const toolName = tc.function?.name || ''
+
+ toolBlocks.set(tcIndex, {
+ contentIndex: currentContentIndex,
+ id: toolId,
+ name: toolName,
+ arguments: '',
+ })
+ openBlockIndices.add(currentContentIndex)
+
+ yield {
+ type: 'content_block_start',
+ index: currentContentIndex,
+ content_block: {
+ type: 'tool_use',
+ id: toolId,
+ name: toolName,
+ input: {},
+ },
+ } as BetaRawMessageStreamEvent
+ }
+
+ // Stream argument fragments
+ const argFragment = tc.function?.arguments
+ if (argFragment) {
+ toolBlocks.get(tcIndex)!.arguments += argFragment
+ yield {
+ type: 'content_block_delta',
+ index: toolBlocks.get(tcIndex)!.contentIndex,
+ delta: {
+ type: 'input_json_delta',
+ partial_json: argFragment,
+ },
+ } as BetaRawMessageStreamEvent
+ }
+ }
+ }
+
+ // Handle finish
+ if (choice?.finish_reason) {
+ // Close thinking block if still open
+ if (thinkingBlockOpen) {
+ yield {
+ type: 'content_block_stop',
+ index: currentContentIndex,
+ } as BetaRawMessageStreamEvent
+ openBlockIndices.delete(currentContentIndex)
+ thinkingBlockOpen = false
+ }
+
+ // Close text block if still open
+ if (textBlockOpen) {
+ yield {
+ type: 'content_block_stop',
+ index: currentContentIndex,
+ } as BetaRawMessageStreamEvent
+ openBlockIndices.delete(currentContentIndex)
+ textBlockOpen = false
+ }
+
+ // Close all tool blocks that haven't been closed yet
+ for (const [, block] of toolBlocks) {
+ if (openBlockIndices.has(block.contentIndex)) {
+ yield {
+ type: 'content_block_stop',
+ index: block.contentIndex,
+ } as BetaRawMessageStreamEvent
+ openBlockIndices.delete(block.contentIndex)
+ }
+ }
+
+ // Map finish_reason to Anthropic stop_reason
+ const stopReason = mapFinishReason(choice.finish_reason)
+
+ yield {
+ type: 'message_delta',
+ delta: {
+ stop_reason: stopReason,
+ stop_sequence: null,
+ },
+ usage: {
+ output_tokens: outputTokens,
+ },
+ } as BetaRawMessageStreamEvent
+
+ yield {
+ type: 'message_stop',
+ } as BetaRawMessageStreamEvent
+ }
+ }
+
+ // Safety: close any remaining open blocks if stream ended without finish_reason
+ for (const idx of openBlockIndices) {
+ yield {
+ type: 'content_block_stop',
+ index: idx,
+ } as BetaRawMessageStreamEvent
+ }
+}
+
+/**
+ * Map OpenAI finish_reason to Anthropic stop_reason.
+ *
+ * stop → end_turn
+ * tool_calls → tool_use
+ * length → max_tokens
+ * content_filter → end_turn
+ */
+function mapFinishReason(reason: string): string {
+ switch (reason) {
+ case 'stop':
+ return 'end_turn'
+ case 'tool_calls':
+ return 'tool_use'
+ case 'length':
+ return 'max_tokens'
+ case 'content_filter':
+ return 'end_turn'
+ default:
+ return 'end_turn'
+ }
+}
diff --git a/src/utils/model/configs.ts b/src/utils/model/configs.ts
index 89f243d87..d9bfae0f9 100644
--- a/src/utils/model/configs.ts
+++ b/src/utils/model/configs.ts
@@ -11,6 +11,7 @@ export const CLAUDE_3_7_SONNET_CONFIG = {
bedrock: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
vertex: 'claude-3-7-sonnet@20250219',
foundry: 'claude-3-7-sonnet',
+ openai: 'claude-3-7-sonnet-20250219',
} as const satisfies ModelConfig
export const CLAUDE_3_5_V2_SONNET_CONFIG = {
@@ -18,6 +19,7 @@ export const CLAUDE_3_5_V2_SONNET_CONFIG = {
bedrock: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
vertex: 'claude-3-5-sonnet-v2@20241022',
foundry: 'claude-3-5-sonnet',
+ openai: 'claude-3-5-sonnet-20241022',
} as const satisfies ModelConfig
export const CLAUDE_3_5_HAIKU_CONFIG = {
@@ -25,6 +27,7 @@ export const CLAUDE_3_5_HAIKU_CONFIG = {
bedrock: 'us.anthropic.claude-3-5-haiku-20241022-v1:0',
vertex: 'claude-3-5-haiku@20241022',
foundry: 'claude-3-5-haiku',
+ openai: 'claude-3-5-haiku-20241022',
} as const satisfies ModelConfig
export const CLAUDE_HAIKU_4_5_CONFIG = {
@@ -32,6 +35,7 @@ export const CLAUDE_HAIKU_4_5_CONFIG = {
bedrock: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
vertex: 'claude-haiku-4-5@20251001',
foundry: 'claude-haiku-4-5',
+ openai: 'claude-haiku-4-5-20251001',
} as const satisfies ModelConfig
export const CLAUDE_SONNET_4_CONFIG = {
@@ -39,6 +43,7 @@ export const CLAUDE_SONNET_4_CONFIG = {
bedrock: 'us.anthropic.claude-sonnet-4-20250514-v1:0',
vertex: 'claude-sonnet-4@20250514',
foundry: 'claude-sonnet-4',
+ openai: 'claude-sonnet-4-20250514',
} as const satisfies ModelConfig
export const CLAUDE_SONNET_4_5_CONFIG = {
@@ -46,6 +51,7 @@ export const CLAUDE_SONNET_4_5_CONFIG = {
bedrock: 'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
vertex: 'claude-sonnet-4-5@20250929',
foundry: 'claude-sonnet-4-5',
+ openai: 'claude-sonnet-4-5-20250929',
} as const satisfies ModelConfig
export const CLAUDE_OPUS_4_CONFIG = {
@@ -53,6 +59,7 @@ export const CLAUDE_OPUS_4_CONFIG = {
bedrock: 'us.anthropic.claude-opus-4-20250514-v1:0',
vertex: 'claude-opus-4@20250514',
foundry: 'claude-opus-4',
+ openai: 'claude-opus-4-20250514',
} as const satisfies ModelConfig
export const CLAUDE_OPUS_4_1_CONFIG = {
@@ -60,6 +67,7 @@ export const CLAUDE_OPUS_4_1_CONFIG = {
bedrock: 'us.anthropic.claude-opus-4-1-20250805-v1:0',
vertex: 'claude-opus-4-1@20250805',
foundry: 'claude-opus-4-1',
+ openai: 'claude-opus-4-1-20250805',
} as const satisfies ModelConfig
export const CLAUDE_OPUS_4_5_CONFIG = {
@@ -67,6 +75,7 @@ export const CLAUDE_OPUS_4_5_CONFIG = {
bedrock: 'us.anthropic.claude-opus-4-5-20251101-v1:0',
vertex: 'claude-opus-4-5@20251101',
foundry: 'claude-opus-4-5',
+ openai: 'claude-opus-4-5-20251101',
} as const satisfies ModelConfig
export const CLAUDE_OPUS_4_6_CONFIG = {
@@ -74,6 +83,7 @@ export const CLAUDE_OPUS_4_6_CONFIG = {
bedrock: 'us.anthropic.claude-opus-4-6-v1',
vertex: 'claude-opus-4-6',
foundry: 'claude-opus-4-6',
+ openai: 'claude-opus-4-6',
} as const satisfies ModelConfig
export const CLAUDE_SONNET_4_6_CONFIG = {
@@ -81,6 +91,7 @@ export const CLAUDE_SONNET_4_6_CONFIG = {
bedrock: 'us.anthropic.claude-sonnet-4-6',
vertex: 'claude-sonnet-4-6',
foundry: 'claude-sonnet-4-6',
+ openai: 'claude-sonnet-4-6',
} as const satisfies ModelConfig
// @[MODEL LAUNCH]: Register the new config here.
diff --git a/src/utils/model/providers.ts b/src/utils/model/providers.ts
index aba9b7d7f..a082cd298 100644
--- a/src/utils/model/providers.ts
+++ b/src/utils/model/providers.ts
@@ -1,16 +1,24 @@
import type { AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS } from '../../services/analytics/index.js'
+import { getInitialSettings } from '../settings/settings.js'
import { isEnvTruthy } from '../envUtils.js'
-export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry'
+export type APIProvider = 'firstParty' | 'bedrock' | 'vertex' | 'foundry' | 'openai'
export function getAPIProvider(): APIProvider {
- return isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
- ? 'bedrock'
- : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
- ? 'vertex'
- : isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)
- ? 'foundry'
- : 'firstParty'
+ // 1. Check settings.json modelType field (highest priority)
+ const modelType = getInitialSettings().modelType
+ if (modelType === 'openai') return 'openai'
+
+ // 2. Check environment variables (backward compatibility)
+ return isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI)
+ ? 'openai'
+ : isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)
+ ? 'bedrock'
+ : isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)
+ ? 'vertex'
+ : isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)
+ ? 'foundry'
+ : 'firstParty'
}
export function getAPIProviderForStatsig(): AnalyticsMetadata_I_VERIFIED_THIS_IS_NOT_CODE_OR_FILEPATHS {
diff --git a/src/utils/settings/types.ts b/src/utils/settings/types.ts
index ba89edd8e..9c55a7f45 100644
--- a/src/utils/settings/types.ts
+++ b/src/utils/settings/types.ts
@@ -372,6 +372,13 @@ export const SettingsSchema = lazySchema(() =>
permissions: PermissionsSchema()
.optional()
.describe('Tool usage permissions configuration'),
+ modelType: z
+ .enum(['anthropic', 'openai'])
+ .optional()
+ .describe(
+ 'API provider type. "anthropic" uses the Anthropic API (default), "openai" uses the OpenAI Chat Completions API (/v1/chat/completions). ' +
+ 'When set to "openai", configure OPENAI_API_KEY, OPENAI_BASE_URL, and OPENAI_MODEL in env.',
+ ),
model: z
.string()
.optional()
diff --git a/tests/integration/cli-arguments.test.ts b/tests/integration/cli-arguments.test.ts
index 7099ec044..6b0e23e9d 100644
--- a/tests/integration/cli-arguments.test.ts
+++ b/tests/integration/cli-arguments.test.ts
@@ -1,11 +1,10 @@
import { describe, expect, test } from "bun:test";
+import { Command } from "@commander-js/extra-typings";
// Test Commander.js option parsing independently from main.tsx initialization.
// main.tsx has heavy bootstrap dependencies; we test the CLI argument parsing
// patterns it uses to ensure correct behavior.
-const { Command } = require("/Users/konghayao/code/ai/claude-code/node_modules/.old_modules-13e6b62a502cda34/commander/index.js");
-
function createTestProgram(): Command {
const program = new Command();
program