Skip to content
Merged

pr #4

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions codex-rs/core/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,8 @@ pub mod edit;
pub mod profile;
pub mod types;

pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex";
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max";

/// Maximum number of bytes of the documentation that will be embedded. Larger
/// files are *silently truncated* to this size so we do not take up too much of
Expand Down
18 changes: 2 additions & 16 deletions codex-rs/core/src/openai_models/model_presets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,13 +209,10 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
]
});

pub(crate) fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
pub(crate) fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS
.iter()
.filter(|preset| match auth_mode {
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max",
_ => preset.show_in_picker,
})
.filter(|preset| preset.show_in_picker)
.cloned()
.collect()
}
Expand All @@ -228,21 +225,10 @@ pub fn all_model_presets() -> &'static Vec<ModelPreset> {
#[cfg(test)]
mod tests {
use super::*;
use codex_app_server_protocol::AuthMode;

#[test]
fn only_one_default_model_is_configured() {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
}

#[test]
fn gpt_5_1_codex_max_hidden_for_api_key_auth() {
let presets = builtin_model_presets(Some(AuthMode::ApiKey));
assert!(
presets
.iter()
.all(|preset| preset.id != "gpt-5.1-codex-max")
);
}
}
7 changes: 6 additions & 1 deletion codex-rs/core/tests/suite/list_models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,12 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
}

fn expected_models_for_api_key() -> Vec<ModelPreset> {
vec![gpt_5_1_codex(), gpt_5_1_codex_mini(), gpt_5_1()]
vec![
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_1(),
]
}

fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
Expand Down
4 changes: 2 additions & 2 deletions codex-rs/tui/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ use tokio::sync::mpsc::unbounded_channel;
use crate::history_cell::UpdateAvailableHistoryCell;

const GPT_5_1_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey];
const GPT_5_1_CODEX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT];
const GPT_5_1_CODEX_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey];

#[derive(Debug, Clone)]
pub struct AppExitInfo {
Expand Down Expand Up @@ -1438,7 +1438,7 @@ mod tests {
Some(AuthMode::ChatGPT),
HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
));
assert!(!migration_prompt_allows_auth_mode(
assert!(migration_prompt_allows_auth_mode(
Some(AuthMode::ApiKey),
HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
));
Expand Down
20 changes: 12 additions & 8 deletions codex-rs/tui/src/chatwidget.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2031,7 +2031,7 @@ impl ChatWidget {
}

fn lower_cost_preset(&self) -> Option<ModelPreset> {
let models = self.models_manager.available_models.blocking_read();
let models = self.models_manager.available_models.try_read().ok()?;
models
.iter()
.find(|preset| preset.model == NUDGE_MODEL_SLUG)
Expand Down Expand Up @@ -2138,13 +2138,17 @@ impl ChatWidget {
/// a second popup is shown to choose the reasoning effort.
pub(crate) fn open_model_popup(&mut self) {
let current_model = self.config.model.clone();
let presets: Vec<ModelPreset> = self
.models_manager
.available_models
.blocking_read()
.iter()
.cloned()
.collect();
let presets: Vec<ModelPreset> =
// todo(aibrahim): make this async function
if let Ok(models) = self.models_manager.available_models.try_read() {
models.clone()
} else {
self.add_info_message(
"Models are being updated; please try /model again in a moment.".to_string(),
None,
);
return;
};

let mut items: Vec<SelectionItem> = Vec::new();
for preset in presets.into_iter() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@ expression: popup
Select Model and Effort
Access legacy models by running codex -m <model_name> or in your config.toml

› 1. gpt-5.1-codex Optimized for codex.
2. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
› 1. gpt-5.1-codex-max Latest Codex-optimized flagship for deep and fast
reasoning.
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
capable.
3. gpt-5.1 Broad world knowledge with strong general reasoning.
4. gpt-5.1 Broad world knowledge with strong general reasoning.

Press enter to select reasoning effort, or esc to dismiss.
23 changes: 12 additions & 11 deletions codex-rs/tui/src/history_cell.rs
Original file line number Diff line number Diff line change
Expand Up @@ -573,18 +573,19 @@ impl TooltipHistoryCell {

impl HistoryCell for TooltipHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
let indent: Line<'static> = " ".into();
let mut lines = Vec::new();
let tooltip_line: Line<'static> = vec!["Tip: ".cyan(), self.tip.into()].into();
let wrap_opts = RtOptions::new(usize::from(width.max(1)))
.initial_indent(indent.clone())
.subsequent_indent(indent.clone());
lines.extend(
word_wrap_line(&tooltip_line, wrap_opts.clone())
.into_iter()
.map(|line| line_to_static(&line)),
let indent = " ";
let indent_width = UnicodeWidthStr::width(indent);
let wrap_width = usize::from(width.max(1))
.saturating_sub(indent_width)
.max(1);
let mut lines: Vec<Line<'static>> = Vec::new();
append_markdown(
&format!("**Tip:** {}", self.tip),
Some(wrap_width),
&mut lines,
);
lines

prefix_lines(lines, indent.into(), indent.into())
}
}

Expand Down
2 changes: 1 addition & 1 deletion codex-rs/tui/tooltips.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ Type / to open the command popup; Tab autocompletes slash commands and saved pro
Use /prompts:<name> key=value to expand a saved prompt with placeholders before sending.
With the composer empty, press Esc to step back and edit your last message; Enter confirms.
Paste an image with Ctrl+V to attach it to your next message.
You can resume a previous conversation by doing `codex resume`
You can resume a previous conversation by running `codex resume`