Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,34 @@ Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp).

Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).

### Using Codex with LM Studio

Codex can run fully locally by delegating inference to [LM Studio](https://lmstudio.ai/).

1. Launch LM Studio and enable the **Local Inference Server** (Preferences → Developer).
2. Start any LM Studio model from the **My Models** tab. Codex looks for the model identifier exposed by the LM Studio server.
3. Run Codex with the LM Studio backend:

```shell
# Interactive session using the default LLaMA 3.1 8B Instruct model
codex --backend lmstudio

# Explicitly pick one of the supported architectures
codex --backend lmstudio --model qwen3
codex exec --backend lmstudio --model qwen3-moe "summarize this repo"
```

Codex understands the following architecture aliases when `--backend lmstudio` is selected:

| Alias | LM Studio model identifier |
| ---------- | --------------------------------------------------- |
| `llama` | `meta-llama/Meta-Llama-3.1-8B-Instruct` |
| `qwen2` | `Qwen/Qwen2-7B-Instruct` |
| `qwen3` | `Qwen/Qwen3-7B-Instruct` |
| `qwen3-moe`| `Qwen/Qwen3-MoE-A2.7B-Instruct` |

You can also pass the exact LM Studio identifier (for example `my-org/custom-model`) if you are running a different checkpoint. Codex verifies that the requested model is available from LM Studio and surfaces clear errors when it is not.

---

### Docs & FAQ
Expand Down
16 changes: 16 additions & 0 deletions codex-rs/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions codex-rs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ members = [
"login",
"mcp-client",
"mcp-server",
"lmstudio",
"mcp-types",
"ollama",
"protocol",
Expand Down Expand Up @@ -50,6 +51,7 @@ codex-mcp-client = { path = "mcp-client" }
codex-mcp-server = { path = "mcp-server" }
codex-ollama = { path = "ollama" }
codex-protocol = { path = "protocol" }
codex-lmstudio = { path = "lmstudio" }
codex-rmcp-client = { path = "rmcp-client" }
codex-protocol-ts = { path = "protocol-ts" }
codex-responses-api-proxy = { path = "responses-api-proxy" }
Expand Down
36 changes: 36 additions & 0 deletions codex-rs/common/src/backend_cli_arg.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
use clap::ValueEnum;

/// CLI flag values for selecting the Codex runtime backend.
#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)]
#[value(rename_all = "kebab-case")]
pub enum BackendCliArg {
/// Use the default OpenAI backend.
Openai,
/// Use the bundled open-source Ollama integration.
Oss,
/// Use a local LM Studio instance.
Lmstudio,
}

impl BackendCliArg {
/// Returns the model provider key associated with this backend, if any.
pub fn provider_key(self) -> Option<&'static str> {
match self {
BackendCliArg::Openai => None,
BackendCliArg::Oss => Some(codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID),
BackendCliArg::Lmstudio => Some(codex_core::BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID),
}
}

pub fn is_oss(self) -> bool {
matches!(self, BackendCliArg::Oss)
}

pub fn is_lmstudio(self) -> bool {
matches!(self, BackendCliArg::Lmstudio)
}

pub fn is_local(self) -> bool {
matches!(self, BackendCliArg::Oss | BackendCliArg::Lmstudio)
}
}
6 changes: 6 additions & 0 deletions codex-rs/common/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
#[cfg(feature = "cli")]
mod backend_cli_arg;

#[cfg(feature = "cli")]
pub use backend_cli_arg::BackendCliArg;

#[cfg(feature = "cli")]
mod approval_mode_cli_arg;

Expand Down
2 changes: 2 additions & 0 deletions codex-rs/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,12 @@ pub mod parse_command;
mod truncate;
mod unified_exec;
mod user_instructions;
pub use model_provider_info::BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID;
pub use model_provider_info::BUILT_IN_OSS_MODEL_PROVIDER_ID;
pub use model_provider_info::ModelProviderInfo;
pub use model_provider_info::WireApi;
pub use model_provider_info::built_in_model_providers;
pub use model_provider_info::create_lmstudio_provider_with_base_url;
pub use model_provider_info::create_oss_provider_with_base_url;
mod conversation_manager;
mod event_mapping;
Expand Down
87 changes: 85 additions & 2 deletions codex-rs/core/src/model_provider_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,17 +248,20 @@ impl ModelProviderInfo {
}

const DEFAULT_OLLAMA_PORT: u32 = 11434;
const DEFAULT_LM_STUDIO_PORT: u32 = 1234;

pub const BUILT_IN_OSS_MODEL_PROVIDER_ID: &str = "oss";
pub const BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID: &str = "lmstudio";

/// Built-in default provider list.
pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
use ModelProviderInfo as P;

// We do not want to be in the business of adjucating which third-party
// providers are bundled with Codex CLI, so we only include the OpenAI and
// open source ("oss") providers by default. Users are encouraged to add to
// `model_providers` in config.toml to add their own providers.
// local open source providers (Ollama "oss" and LM Studio) by default. Users
// are encouraged to add to `model_providers` in config.toml to add their own
// providers.
[
(
"openai",
Expand Down Expand Up @@ -300,6 +303,10 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
},
),
(BUILT_IN_OSS_MODEL_PROVIDER_ID, create_oss_provider()),
(
BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID,
create_lmstudio_provider(),
),
]
.into_iter()
.map(|(k, v)| (k.to_string(), v))
Expand Down Expand Up @@ -344,6 +351,45 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
}
}

pub fn create_lmstudio_provider() -> ModelProviderInfo {
let base_url = match std::env::var("CODEX_LM_STUDIO_BASE_URL")
.ok()
.filter(|v| !v.trim().is_empty())
{
Some(url) => url,
None => format!(
"http://localhost:{port}/v1",
port = std::env::var("CODEX_LM_STUDIO_PORT")
.ok()
.filter(|v| !v.trim().is_empty())
.and_then(|v| v.parse::<u32>().ok())
.unwrap_or(DEFAULT_LM_STUDIO_PORT)
),
};

create_lmstudio_provider_with_base_url(&base_url)
}

pub fn create_lmstudio_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
ModelProviderInfo {
name: "LM Studio".into(),
base_url: Some(base_url.into()),
env_key: None,
env_key_instructions: Some(
"Launch LM Studio and enable the local inference server (Preferences → Developer → Enable local server)."
.into(),
),
wire_api: WireApi::Chat,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
}
}

fn matches_azure_responses_base_url(base_url: &str) -> bool {
let base = base_url.to_ascii_lowercase();
const AZURE_MARKERS: [&str; 5] = [
Expand Down Expand Up @@ -386,6 +432,43 @@ base_url = "http://localhost:11434/v1"
assert_eq!(expected_provider, provider);
}

#[test]
fn test_deserialize_lmstudio_model_provider_toml() {
let provider_toml = r#"
name = "LM Studio"
base_url = "http://localhost:1234/v1"
"#;
let expected_provider = ModelProviderInfo {
name: "LM Studio".into(),
base_url: Some("http://localhost:1234/v1".into()),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Chat,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
};

let provider: ModelProviderInfo = toml::from_str(provider_toml).unwrap();
assert_eq!(expected_provider, provider);
}

#[test]
fn test_create_lmstudio_provider_with_base_url() {
let provider = create_lmstudio_provider_with_base_url("http://localhost:9999/v1");
assert_eq!(provider.name, "LM Studio");
assert_eq!(
provider.base_url.as_deref(),
Some("http://localhost:9999/v1")
);
assert_eq!(provider.wire_api, WireApi::Chat);
assert!(!provider.requires_openai_auth);
}

#[test]
fn test_deserialize_azure_model_provider_toml() {
let azure_provider_toml = r#"
Expand Down
1 change: 1 addition & 0 deletions codex-rs/exec/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ codex-common = { workspace = true, features = [
"sandbox_summary",
] }
codex-core = { workspace = true }
codex-lmstudio = { workspace = true }
codex-ollama = { workspace = true }
codex-protocol = { workspace = true }
owo-colors = { workspace = true }
Expand Down
7 changes: 6 additions & 1 deletion codex-rs/exec/src/cli.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use clap::Parser;
use clap::ValueEnum;
use codex_common::BackendCliArg;
use codex_common::CliConfigOverrides;
use std::path::PathBuf;

Expand All @@ -18,7 +19,11 @@ pub struct Cli {
#[arg(long, short = 'm')]
pub model: Option<String>,

#[arg(long = "oss", default_value_t = false)]
/// Select the runtime backend Codex should connect to.
#[arg(long = "backend", value_enum, conflicts_with = "oss")]
pub backend: Option<BackendCliArg>,

#[arg(long = "oss", default_value_t = false, conflicts_with = "backend")]
pub oss: bool,

/// Select the sandbox policy to use when executing model-generated shell
Expand Down
Loading
Loading