Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ Codex understands the following architecture aliases when `--backend lmstudio` i
| `qwen3-moe` | `qwen/qwen3-coder-30b` |
| `qwen3-moe-a3b`| `qwen/qwen3-30b-a3b-2507` |

You can also pass the exact LM Studio identifier (for example `my-org/custom-model`) if you are running a different checkpoint. Codex verifies that the requested model is available from LM Studio and surfaces clear errors when it is not.
Aliases are case-insensitive and you can mix spaces, hyphens, or underscores (for example, `qwen3 coder 30b a3b`). You can also pass the exact LM Studio identifier (for example `my-org/custom-model`) if you are running a different checkpoint. Codex verifies that the requested model is available from LM Studio and surfaces clear errors when it is not.

When you select the LM Studio backend Codex automatically enables structured JSON output so the agent can reliably capture command results. No extra flags are required.

Expand Down
33 changes: 33 additions & 0 deletions codex-rs/core/src/model_family.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,15 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
supports_reasoning_summaries: true,
needs_special_apply_patch_instructions: true,
)
} else if slug.starts_with("mistralai/devstral")
|| slug.starts_with("qwen/qwen2")
|| slug.starts_with("qwen/qwen3")
{
model_family!(
slug,
slug,
apply_patch_tool_type: Some(ApplyPatchToolType::Function),
)
} else {
None
}
Expand All @@ -129,3 +138,27 @@ pub fn derive_default_model_family(model: &str) -> ModelFamily {
base_instructions: BASE_INSTRUCTIONS.to_string(),
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::tool_apply_patch::ApplyPatchToolType;

#[test]
fn lmstudio_models_use_function_apply_patch_tool() {
for slug in [
"mistralai/devstral-small-2507",
"qwen/qwen2.5-coder-14b",
"qwen/qwen3-coder-30b",
"qwen/qwen3-30b-a3b-2507",
] {
let family = find_family_for_model(slug)
.unwrap_or_else(|| panic!("expected lmstudio slug {slug:?} to map"));
assert_eq!(
family.apply_patch_tool_type,
Some(ApplyPatchToolType::Function),
"LM Studio slug {slug} should expose the function-style apply_patch tool"
);
}
}
}
49 changes: 49 additions & 0 deletions codex-rs/core/src/openai_tools.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1059,6 +1059,55 @@ mod tests {
);
}

#[test]
fn lmstudio_models_include_apply_patch_tool() {
let model_family = find_family_for_model("qwen/qwen3-coder-30b")
.expect("LM Studio slugs should map to a model family");
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: false,
use_streamable_shell_tool: false,
include_view_image_tool: false,
experimental_unified_exec_tool: false,
});

let tools = get_openai_tools(&config, None);
assert!(tools.iter().any(|tool| match tool {
OpenAiTool::Function(ResponsesApiTool { name, .. }) => name == "apply_patch",
_ => false,
}));
}

#[test]
fn chat_completions_tools_include_apply_patch_for_lmstudio() {
let model_family = find_family_for_model("qwen/qwen3-coder-30b")
.expect("LM Studio slugs should map to a model family");
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: false,
use_streamable_shell_tool: false,
include_view_image_tool: false,
experimental_unified_exec_tool: false,
});

let tools = get_openai_tools(&config, None);
let chat_tools = create_tools_json_for_chat_completions_api(&tools)
.expect("conversion to chat tools should succeed");

assert!(chat_tools.iter().any(|tool| {
tool.get("type").and_then(JsonValue::as_str) == Some("function")
&& tool
.get("function")
.and_then(|fn_value| fn_value.get("name"))
.and_then(JsonValue::as_str)
== Some("apply_patch")
}));
}

#[test]
fn test_shell_tool() {
let tool = super::create_shell_tool();
Expand Down
79 changes: 79 additions & 0 deletions codex-rs/exec/tests/suite/lmstudio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ async fn exec_resolves_lmstudio_model_aliases() -> anyhow::Result<()> {
("qwen3-moe", "qwen/qwen3-coder-30b"),
("qwen3moe", "qwen/qwen3-coder-30b"),
("qwen3-moe-a3b", "qwen/qwen3-30b-a3b-2507"),
("qwen3 coder 30b a3b", "qwen/qwen3-30b-a3b-2507"),
("Qwen3 Coder 30B", "qwen/qwen3-coder-30b"),
];

for (alias, expected_model) in cases {
Expand Down Expand Up @@ -109,3 +111,80 @@ async fn exec_resolves_lmstudio_model_aliases() -> anyhow::Result<()> {

Ok(())
}

#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn exec_enables_apply_patch_tool_for_lmstudio() -> anyhow::Result<()> {
let test = test_codex_exec();
let server = responses::start_mock_server().await;

let models_payload = serde_json::json!({
"data": [
{ "id": DEFAULT_LM_STUDIO_MODEL }
]
});

Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(200).set_body_json(models_payload))
.expect(1)
.mount(&server)
.await;

let chat_stream = concat!(
"data: {\"choices\":[{\"delta\":{\"content\":\"ok\"}}]}\n\n",
"data: {\"choices\":[{\"delta\":{}}]}\n\n",
"data: [DONE]\n\n",
);

Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(chat_stream, "text/event-stream"),
)
.expect(1)
.mount(&server)
.await;

test.cmd()
.env("CODEX_LM_STUDIO_BASE_URL", format!("{}/v1", server.uri()))
.arg("--skip-git-repo-check")
.arg("--backend")
.arg("lmstudio")
.arg(DEFAULT_LM_STUDIO_MODEL)
.assert()
.success();

let requests = server
.received_requests()
.await
.expect("failed to capture requests");

let chat_request = requests
.iter()
.find(|req| req.method == Method::POST && req.url.path() == "/v1/chat/completions")
.context("LM Studio chat completion request missing")?;

let payload: Value = serde_json::from_slice(&chat_request.body)
.context("LM Studio chat completion request should be valid JSON")?;
let tools = payload
.get("tools")
.and_then(Value::as_array)
.context("LM Studio request missing tools array")?;

assert!(
tools.iter().any(|tool| {
tool.get("type").and_then(Value::as_str) == Some("function")
&& tool
.get("function")
.and_then(|fn_value| fn_value.get("name"))
.and_then(Value::as_str)
== Some("apply_patch")
}),
"LM Studio chat request should include the apply_patch tool: {tools:?}"
);

server.verify().await;
Ok(())
}
31 changes: 26 additions & 5 deletions codex-rs/lmstudio/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,14 @@ const MODEL_ALIAS_TABLE: &[(&str, &str)] = &[
("qwen3_moe", "qwen/qwen3-coder-30b"),
("qwen-3-moe", "qwen/qwen3-coder-30b"),
("qwen3-coder", "qwen/qwen3-coder-30b"),
("qwen3 coder", "qwen/qwen3-coder-30b"),
("qwen3-30b", "qwen/qwen3-coder-30b"),
("qwen3 coder 30b", "qwen/qwen3-coder-30b"),
("qwen3-moe-a3b", "qwen/qwen3-30b-a3b-2507"),
("qwen3-moe-a3b-2507", "qwen/qwen3-30b-a3b-2507"),
("qwen3-30b-a3b", "qwen/qwen3-30b-a3b-2507"),
("qwen3 coder a3b", "qwen/qwen3-30b-a3b-2507"),
("qwen3 coder 30b a3b", "qwen/qwen3-30b-a3b-2507"),
];

const MODEL_ALIAS_HINTS: &[(&str, &str)] = &[
Expand All @@ -58,6 +62,15 @@ fn alias_examples() -> String {
.join(", ")
}

fn normalized_alias_forms(value: &str) -> (String, String) {
let lowercase = value.to_ascii_lowercase();
let compact = lowercase
.chars()
.filter(|c| !matches!(c, '-' | '_' | ' '))
.collect();
(lowercase, compact)
}

/// Error returned when a provided LM Studio model alias cannot be resolved.
#[derive(Debug, Clone)]
pub struct UnsupportedModelAliasError {
Expand Down Expand Up @@ -114,11 +127,11 @@ pub fn resolve_model_identifier(model: Option<&str>) -> Result<String, Unsupport
if trimmed.is_empty() {
return Err(UnsupportedModelAliasError::new(trimmed));
}
let normalized = trimmed.to_ascii_lowercase();
if let Some((_, canonical)) = MODEL_ALIAS_TABLE
.iter()
.find(|(alias, _)| *alias == normalized)
{
let (normalized, normalized_compact) = normalized_alias_forms(trimmed);
if let Some((_, canonical)) = MODEL_ALIAS_TABLE.iter().find(|(alias, _)| {
let (alias_normalized, alias_compact) = normalized_alias_forms(alias);
alias_normalized == normalized || alias_compact == normalized_compact
}) {
return Ok((*canonical).to_string());
}
if trimmed.contains('/') || trimmed.contains(':') {
Expand Down Expand Up @@ -272,6 +285,14 @@ mod tests {
resolve_model_identifier(Some("qwen3-moe-a3b")).unwrap(),
"qwen/qwen3-30b-a3b-2507"
);
assert_eq!(
resolve_model_identifier(Some("qwen3 coder 30b a3b")).unwrap(),
"qwen/qwen3-30b-a3b-2507"
);
assert_eq!(
resolve_model_identifier(Some("Qwen3 Coder 30B")).unwrap(),
"qwen/qwen3-coder-30b"
);
}

#[test]
Expand Down
Loading