diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 185584115b9..def63df845f 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -61,9 +61,8 @@ pub mod edit; pub mod profile; pub mod types; -pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex"; -const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex"; -pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex"; +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max"; +const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max"; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of diff --git a/codex-rs/core/src/openai_models/model_presets.rs b/codex-rs/core/src/openai_models/model_presets.rs index 3d46c695cc6..6c004656948 100644 --- a/codex-rs/core/src/openai_models/model_presets.rs +++ b/codex-rs/core/src/openai_models/model_presets.rs @@ -209,13 +209,10 @@ static PRESETS: Lazy> = Lazy::new(|| { ] }); -pub(crate) fn builtin_model_presets(auth_mode: Option) -> Vec { +pub(crate) fn builtin_model_presets(_auth_mode: Option) -> Vec { PRESETS .iter() - .filter(|preset| match auth_mode { - Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max", - _ => preset.show_in_picker, - }) + .filter(|preset| preset.show_in_picker) .cloned() .collect() } @@ -228,21 +225,10 @@ pub fn all_model_presets() -> &'static Vec { #[cfg(test)] mod tests { use super::*; - use codex_app_server_protocol::AuthMode; #[test] fn only_one_default_model_is_configured() { let default_models = PRESETS.iter().filter(|preset| preset.is_default).count(); assert!(default_models == 1); } - - #[test] - fn gpt_5_1_codex_max_hidden_for_api_key_auth() { - let presets = builtin_model_presets(Some(AuthMode::ApiKey)); - assert!( - presets - .iter() - .all(|preset| preset.id != "gpt-5.1-codex-max") - ); - } } diff --git a/codex-rs/core/tests/suite/list_models.rs b/codex-rs/core/tests/suite/list_models.rs index 9303820163f..6348841c6fb 100644 --- a/codex-rs/core/tests/suite/list_models.rs +++ b/codex-rs/core/tests/suite/list_models.rs @@ -30,7 +30,12 @@ async fn list_models_returns_chatgpt_models() -> Result<()> { } fn expected_models_for_api_key() -> Vec { - vec![gpt_5_1_codex(), gpt_5_1_codex_mini(), gpt_5_1()] + vec![ + gpt_5_1_codex_max(), + gpt_5_1_codex(), + gpt_5_1_codex_mini(), + gpt_5_1(), + ] } fn expected_models_for_chatgpt() -> Vec { diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index ff5fe9a5405..6120c7978d4 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -63,7 +63,7 @@ use tokio::sync::mpsc::unbounded_channel; use crate::history_cell::UpdateAvailableHistoryCell; const GPT_5_1_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey]; -const GPT_5_1_CODEX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT]; +const GPT_5_1_CODEX_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey]; #[derive(Debug, Clone)] pub struct AppExitInfo { @@ -1438,7 +1438,7 @@ mod tests { Some(AuthMode::ChatGPT), HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, )); - assert!(!migration_prompt_allows_auth_mode( + assert!(migration_prompt_allows_auth_mode( Some(AuthMode::ApiKey), HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, )); diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index f956ef5c8ab..c257f7c1bdd 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -2031,7 +2031,7 @@ impl ChatWidget { } fn lower_cost_preset(&self) -> Option { - let models = self.models_manager.available_models.blocking_read(); + let models = self.models_manager.available_models.try_read().ok()?; models .iter() .find(|preset| preset.model == NUDGE_MODEL_SLUG) @@ -2138,13 +2138,17 @@ impl ChatWidget { /// a second popup is shown to choose the reasoning effort. pub(crate) fn open_model_popup(&mut self) { let current_model = self.config.model.clone(); - let presets: Vec = self - .models_manager - .available_models - .blocking_read() - .iter() - .cloned() - .collect(); + let presets: Vec = + // todo(aibrahim): make this async function + if let Ok(models) = self.models_manager.available_models.try_read() { + models.clone() + } else { + self.add_info_message( + "Models are being updated; please try /model again in a moment.".to_string(), + None, + ); + return; + }; let mut items: Vec = Vec::new(); for preset in presets.into_iter() { diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap index 6cfce48b895..56a209ef73a 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap @@ -5,9 +5,11 @@ expression: popup Select Model and Effort Access legacy models by running codex -m or in your config.toml -› 1. gpt-5.1-codex Optimized for codex. - 2. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less +› 1. gpt-5.1-codex-max Latest Codex-optimized flagship for deep and fast + reasoning. + 2. gpt-5.1-codex Optimized for codex. + 3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less capable. - 3. gpt-5.1 Broad world knowledge with strong general reasoning. + 4. gpt-5.1 Broad world knowledge with strong general reasoning. Press enter to select reasoning effort, or esc to dismiss. diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index c4fd31f5481..de8c06488ea 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -573,18 +573,19 @@ impl TooltipHistoryCell { impl HistoryCell for TooltipHistoryCell { fn display_lines(&self, width: u16) -> Vec> { - let indent: Line<'static> = " ".into(); - let mut lines = Vec::new(); - let tooltip_line: Line<'static> = vec!["Tip: ".cyan(), self.tip.into()].into(); - let wrap_opts = RtOptions::new(usize::from(width.max(1))) - .initial_indent(indent.clone()) - .subsequent_indent(indent.clone()); - lines.extend( - word_wrap_line(&tooltip_line, wrap_opts.clone()) - .into_iter() - .map(|line| line_to_static(&line)), + let indent = " "; + let indent_width = UnicodeWidthStr::width(indent); + let wrap_width = usize::from(width.max(1)) + .saturating_sub(indent_width) + .max(1); + let mut lines: Vec> = Vec::new(); + append_markdown( + &format!("**Tip:** {}", self.tip), + Some(wrap_width), + &mut lines, ); - lines + + prefix_lines(lines, indent.into(), indent.into()) } } diff --git a/codex-rs/tui/tooltips.txt b/codex-rs/tui/tooltips.txt index 09167eb4fcb..70c254d2935 100644 --- a/codex-rs/tui/tooltips.txt +++ b/codex-rs/tui/tooltips.txt @@ -8,4 +8,4 @@ Type / to open the command popup; Tab autocompletes slash commands and saved pro Use /prompts: key=value to expand a saved prompt with placeholders before sending. With the composer empty, press Esc to step back and edit your last message; Enter confirms. Paste an image with Ctrl+V to attach it to your next message. -You can resume a previous conversation by doing `codex resume` \ No newline at end of file +You can resume a previous conversation by running `codex resume`