diff --git a/crates/forge_domain/src/provider.rs b/crates/forge_domain/src/provider.rs index 3a978baf59..c77ce1b28f 100644 --- a/crates/forge_domain/src/provider.rs +++ b/crates/forge_domain/src/provider.rs @@ -70,6 +70,7 @@ impl ProviderId { pub const MINIMAX: ProviderId = ProviderId(Cow::Borrowed("minimax")); pub const CODEX: ProviderId = ProviderId(Cow::Borrowed("codex")); pub const OPENCODE_ZEN: ProviderId = ProviderId(Cow::Borrowed("opencode_zen")); + pub const NOVITA: ProviderId = ProviderId(Cow::Borrowed("novita")); /// Returns all built-in provider IDs /// @@ -100,6 +101,7 @@ impl ProviderId { ProviderId::MINIMAX, ProviderId::CODEX, ProviderId::OPENCODE_ZEN, + ProviderId::NOVITA, ] } @@ -123,6 +125,7 @@ impl ProviderId { "io_intelligence" => "IOIntelligence".to_string(), "minimax" => "MiniMax".to_string(), "codex" => "Codex".to_string(), + "novita" => "Novita".to_string(), _ => { // For other providers, use UpperCamelCase conversion use convert_case::{Case, Casing}; @@ -165,6 +168,7 @@ impl std::str::FromStr for ProviderId { "io_intelligence" => ProviderId::IO_INTELLIGENCE, "minimax" => ProviderId::MINIMAX, "codex" => ProviderId::CODEX, + "novita" => ProviderId::NOVITA, // For custom providers, use Cow::Owned to avoid memory leaks custom => ProviderId(Cow::Owned(custom.to_string())), }; diff --git a/crates/forge_repo/src/provider/provider.json b/crates/forge_repo/src/provider/provider.json index 1ba9dc8822..4b03b23caa 100644 --- a/crates/forge_repo/src/provider/provider.json +++ b/crates/forge_repo/src/provider/provider.json @@ -2383,5 +2383,53 @@ } ], "auth_methods": ["api_key"] + }, + { + "id": "novita", + "api_key_vars": "NOVITA_API_KEY", + "url_param_vars": [], + "response_type": "OpenAI", + "url": "https://api.novita.ai/openai/v1/chat/completions", + "models": [ + { + "id": "moonshotai/kimi-k2.5", + "name": "Kimi K2.5", + "description": "Moonshot AI's flagship MoE model with 262K context, function calling, structured output, reasoning, and vision capabilities", + "context_length": 262144, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text", "image"] + }, + { + "id": "zai-org/glm-5", + "name": "GLM-5", + "description": "Zhipu AI's flagship MoE model with 202K context, function calling, structured output, and reasoning capabilities", + "context_length": 202800, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text"] + }, + { + "id": "minimax/minimax-m2.5", + "name": "MiniMax M2.5", + "description": "MiniMax's MoE model with 204K context, function calling, structured output, and reasoning capabilities", + "context_length": 204800, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text"] + }, + { + "id": "qwen/qwen3-embedding-0.6b", + "name": "Qwen3 Embedding 0.6B", + "description": "Qwen3 embedding model with 1024 dimensions and 8K max input", + "context_length": 8192, + "tools_supported": false, + "input_modalities": ["text"] + } + ], + "auth_methods": ["api_key"] } ]