Compare commits

...

18 Commits

Author SHA1 Message Date
Ahmed Ibrahim
c7e6666337 auto-picker 2025-11-18 15:37:25 -08:00
Ahmed Ibrahim
e7227ffec8 tests 2025-11-18 14:49:32 -08:00
Ahmed Ibrahim
f0cc2db4c3 tests 2025-11-18 14:48:57 -08:00
Ahmed Ibrahim
c03d43be6f all models 2025-11-18 14:47:12 -08:00
Ahmed Ibrahim
44058a2d27 all models 2025-11-18 14:43:03 -08:00
Ahmed Ibrahim
90fdb118f4 Merge branch 'auto-picker' of github.com:openai/codex into auto-picker 2025-11-18 14:38:42 -08:00
Ahmed Ibrahim
0afeea3b5e tests 2025-11-18 14:34:14 -08:00
Ahmed Ibrahim
e3809caaaf Merge branch 'main' into auto-picker 2025-11-18 13:50:14 -08:00
Ahmed Ibrahim
1fafbb0fb8 tests 2025-11-18 12:33:54 -08:00
Ahmed Ibrahim
c10bcc6780 tests 2025-11-18 12:29:51 -08:00
Ahmed Ibrahim
77bc873059 tests 2025-11-18 12:28:56 -08:00
Ahmed Ibrahim
c1b7e6f9a8 auto-picker 2025-11-18 12:13:12 -08:00
Ahmed Ibrahim
0ce1ed78e4 tests 2025-11-18 12:00:05 -08:00
Ahmed Ibrahim
c36291f233 tests 2025-11-18 11:43:59 -08:00
Ahmed Ibrahim
9435d39d4c tests 2025-11-18 11:34:19 -08:00
Ahmed Ibrahim
c025309d32 tests 2025-11-18 11:19:38 -08:00
Ahmed Ibrahim
093cd9cf04 function 2025-11-18 10:54:47 -08:00
Ahmed Ibrahim
9923f76831 auto 2025-11-18 00:17:19 -08:00
14 changed files with 504 additions and 63 deletions

View File

@@ -45,6 +45,29 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
} = to_response::<ModelListResponse>(response)?;
let expected_models = vec![
Model {
id: "codex-auto".to_string(),
model: "codex-auto".to_string(),
display_name: "codex-auto".to_string(),
description: "Automatically chooses the best Codex model configuration for your task."
.to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Works faster".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Balances speed with intelligence".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Works longer for harder tasks".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
@@ -66,7 +89,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
is_default: false,
},
Model {
id: "gpt-5.1-codex-mini".to_string(),
@@ -147,7 +170,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.1-codex");
assert_eq!(first_items[0].id, "codex-auto");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
@@ -169,7 +192,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
assert_eq!(second_items[0].id, "gpt-5.1-codex");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
@@ -191,8 +214,30 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1");
assert!(third_cursor.is_none());
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fourth_cursor.clone()),
})
.await?;
let fourth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)),
)
.await??;
let ModelListResponse {
data: fourth_items,
next_cursor: fourth_cursor,
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1");
assert!(fourth_cursor.is_none());
Ok(())
}

View File

@@ -1,6 +1,7 @@
use codex_core::WireApi;
use codex_core::config::Config;
use crate::model_presets::reasoning_effort_label_for_model;
use crate::sandbox_summary::summarize_sandbox_policy;
/// Build a list of key/value pairs summarizing the effective configuration.
@@ -19,7 +20,7 @@ pub fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, Stri
"reasoning effort",
config
.model_reasoning_effort
.map(|effort| effort.to_string())
.map(|effort| reasoning_effort_label_for_model(&config.model, effort))
.unwrap_or_else(|| "none".to_string()),
));
entries.push((

View File

@@ -11,6 +11,16 @@ pub struct ReasoningEffortPreset {
pub effort: ReasoningEffort,
/// Short human description shown next to the effort in UIs.
pub description: &'static str,
/// Optional friendly label shown in featured pickers.
pub label: Option<&'static str>,
}
impl ReasoningEffortPreset {
pub fn label(&self) -> String {
self.label
.map(ToString::to_string)
.unwrap_or_else(|| self.effort.to_string())
}
}
#[derive(Debug, Clone)]
@@ -42,6 +52,32 @@ pub struct ModelPreset {
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
vec![
ModelPreset {
id: "codex-auto",
model: "codex-auto",
display_name: "codex-auto",
description: "Automatically chooses the best Codex model configuration for your task.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Works faster",
label: Some("Fast"),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed with intelligence",
label: Some("Balanced"),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Works longer for harder tasks",
label: Some("Thorough"),
},
],
is_default: true,
upgrade: None,
},
ModelPreset {
id: "gpt-5.1-codex",
model: "gpt-5.1-codex",
@@ -52,17 +88,20 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
},
],
is_default: true,
is_default: false,
upgrade: None,
},
ModelPreset {
@@ -75,10 +114,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
},
],
is_default: false,
@@ -94,14 +135,17 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
},
],
is_default: false,
@@ -118,14 +162,17 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
},
],
is_default: false,
@@ -144,10 +191,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
},
],
is_default: false,
@@ -166,18 +215,22 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffortPreset {
effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks",
label: None,
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
},
],
is_default: false,
@@ -192,11 +245,15 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
]
});
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
// leave auth mode for later use
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS
.iter()
.filter(|preset| preset.upgrade.is_none())
.filter(|preset| match auth_mode {
// `codex-auto` is only available when using ChatGPT-style auth.
Some(AuthMode::ApiKey) => preset.id != "codex-auto",
_ => true,
})
.cloned()
.collect()
}
@@ -205,6 +262,25 @@ pub fn all_model_presets() -> &'static Vec<ModelPreset> {
&PRESETS
}
impl ModelPreset {
pub fn reasoning_effort_label(&self, effort: ReasoningEffort) -> String {
self.supported_reasoning_efforts
.iter()
.find(|option| option.effort == effort)
.map(ReasoningEffortPreset::label)
.unwrap_or_else(|| effort.to_string())
}
}
/// Return the display label for a reasoning effort on a given model, falling back to the effort
/// name when no label is provided.
pub fn reasoning_effort_label_for_model(model: &str, effort: ReasoningEffort) -> String {
all_model_presets()
.iter()
.find(|preset| preset.model == model)
.map(|preset| preset.reasoning_effort_label(effort))
.unwrap_or_else(|| effort.to_string())
}
#[cfg(test)]
mod tests {
use super::*;
@@ -214,4 +290,31 @@ mod tests {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
}
#[test]
fn codex_auto_is_included_for_non_api_auth() {
let presets_no_auth = builtin_model_presets(None);
assert!(
presets_no_auth
.iter()
.any(|preset| preset.id == "codex-auto")
);
let presets_chatgpt = builtin_model_presets(Some(AuthMode::ChatGPT));
assert!(
presets_chatgpt
.iter()
.any(|preset| preset.id == "codex-auto")
);
}
#[test]
fn codex_auto_is_excluded_for_api_key_auth() {
let presets_api_key = builtin_model_presets(Some(AuthMode::ApiKey));
assert!(
!presets_api_key
.iter()
.any(|preset| preset.id == "codex-auto")
);
}
}

View File

@@ -62,9 +62,9 @@ pub mod profile;
pub mod types;
#[cfg(target_os = "windows")]
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1";
pub const OPENAI_DEFAULT_MODEL: &str = "codex-auto";
#[cfg(not(target_os = "windows"))]
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex";
pub const OPENAI_DEFAULT_MODEL: &str = "codex-auto";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex";

View File

@@ -194,6 +194,18 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> {
"view_image",
],
),
(
"codex-auto",
vec![
"shell",
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"apply_patch",
"view_image",
],
),
(
"gpt-5.1-codex",
vec![

View File

@@ -19,6 +19,7 @@ use crate::update_action::UpdateAction;
use codex_ansi_escape::ansi_escape_line;
use codex_common::model_presets::ModelUpgrade;
use codex_common::model_presets::all_model_presets;
use codex_common::model_presets::reasoning_effort_label_for_model;
use codex_core::AuthManager;
use codex_core::ConversationManager;
use codex_core::config::Config;
@@ -97,6 +98,15 @@ fn should_show_model_migration_prompt(
.any(|preset| preset.model == current_model)
}
fn format_model_change_target(model: &str, effort: Option<ReasoningEffortConfig>) -> String {
if let Some(effort) = effort {
let effort_label = reasoning_effort_label_for_model(model, effort);
format!("{model} ({effort_label})")
} else {
format!("{model} with default reasoning")
}
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
@@ -515,8 +525,18 @@ impl App {
self.config.model_family = family;
}
}
AppEvent::OpenReasoningPopup { model } => {
self.chat_widget.open_reasoning_popup(model);
AppEvent::OpenReasoningPopup {
model,
preferred_effort,
} => {
self.chat_widget
.open_reasoning_popup(model, preferred_effort);
}
AppEvent::OpenAllModelsPopup => {
self.chat_widget.open_all_models_popup();
}
AppEvent::ApplyModelAndEffort { model, effort } => {
self.chat_widget.apply_model_and_effort(model, effort);
}
AppEvent::OpenFullAccessConfirmation { preset } => {
self.chat_widget.open_full_access_confirmation(preset);
@@ -618,21 +638,15 @@ impl App {
.await
{
Ok(()) => {
let effort_label = effort
.map(|eff| format!(" with {eff} reasoning"))
.unwrap_or_else(|| " with default reasoning".to_string());
let target = format_model_change_target(&model, effort);
if let Some(profile) = profile {
self.chat_widget.add_info_message(
format!(
"Model changed to {model}{effort_label} for {profile} profile"
),
format!("Model changed to {target} for {profile} profile"),
None,
);
} else {
self.chat_widget.add_info_message(
format!("Model changed to {model}{effort_label}"),
None,
);
self.chat_widget
.add_info_message(format!("Model changed to {target}"), None);
}
}
Err(err) => {
@@ -1111,4 +1125,24 @@ mod tests {
Some("codex resume 123e4567-e89b-12d3-a456-426614174000".to_string())
);
}
#[test]
fn format_model_change_target_prefers_featured_label() {
let formatted =
super::format_model_change_target("codex-auto", Some(ReasoningEffortConfig::Low));
assert_eq!(formatted, "codex-auto (Fast)");
}
#[test]
fn format_model_change_target_falls_back_to_effort_name_when_no_label() {
let formatted =
super::format_model_change_target("gpt-5.1-codex", Some(ReasoningEffortConfig::High));
assert_eq!(formatted, "gpt-5.1-codex (high)");
}
#[test]
fn format_model_change_target_handles_default_reasoning() {
let formatted = super::format_model_change_target("gpt-5.1-codex", None);
assert_eq!(formatted, "gpt-5.1-codex with default reasoning");
}
}

View File

@@ -69,8 +69,18 @@ pub(crate) enum AppEvent {
/// Open the reasoning selection popup after picking a model.
OpenReasoningPopup {
model: ModelPreset,
preferred_effort: Option<ReasoningEffort>,
},
/// Apply a model + reasoning effort combination directly.
ApplyModelAndEffort {
model: String,
effort: Option<ReasoningEffort>,
},
/// Open the full model list picker.
OpenAllModelsPopup,
/// Open the confirmation prompt before enabling full access mode.
OpenFullAccessConfirmation {
preset: ApprovalPreset,

View File

@@ -1966,10 +1966,92 @@ impl ChatWidget {
/// Open a popup to choose the model (stage 1). After selecting a model,
/// a second popup is shown to choose the reasoning effort.
pub(crate) fn open_model_popup(&mut self) {
let current_model = self.config.model.clone();
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let presets: Vec<ModelPreset> = builtin_model_presets(auth_mode);
let presets = builtin_model_presets(auth_mode);
let (featured, all_models): (Vec<_>, Vec<_>) =
presets.into_iter().partition(|preset| preset.is_default);
if featured.is_empty() {
self.show_model_list(
all_models,
"Select Model and Effort",
Some(
"Access legacy models by running codex -m <model_name> or in your config.toml"
.to_string(),
),
);
return;
}
let Some(featured_model) = featured.into_iter().next() else {
unreachable!("featured presets checked to be non-empty");
};
let mut items = self.featured_model_items(&featured_model);
items.push(SelectionItem {
name: "All models".to_string(),
description: Some(
"Choose and configure what model and reasoning level to use".to_string(),
),
selected_description: None,
is_current: false,
actions: vec![Box::new(|tx| {
tx.send(AppEvent::OpenAllModelsPopup);
})],
dismiss_on_select: false,
..Default::default()
});
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Select Model".to_string()),
subtitle: Some(
"Quickly pick Codex Auto or open the full list of models for more options."
.to_string(),
),
footer_hint: Some("Press enter to apply selection, or esc to dismiss.".into()),
items,
..Default::default()
});
}
pub(crate) fn open_all_models_popup(&mut self) {
let auth_mode = self.auth_manager.auth().map(|auth| auth.mode);
let presets = builtin_model_presets(auth_mode);
let (_, all_models): (Vec<_>, Vec<_>) =
presets.into_iter().partition(|preset| preset.is_default);
self.show_model_list(
all_models,
"Select Model",
Some(
"Access the full list of models or use codex -m <model_name> in your config.toml"
.to_string(),
),
);
}
fn show_model_list(
&mut self,
presets: Vec<ModelPreset>,
title: &str,
subtitle: Option<String>,
) {
if presets.is_empty() {
let items = vec![SelectionItem {
name: "No models available".to_string(),
description: Some("No matching model presets are configured.".to_string()),
is_current: false,
dismiss_on_select: true,
..Default::default()
}];
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some(title.to_string()),
subtitle,
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
});
return;
}
let current_model = self.config.model.clone();
let mut items: Vec<SelectionItem> = Vec::new();
for preset in presets.into_iter() {
let description = if preset.description.is_empty() {
@@ -1984,6 +2066,7 @@ impl ChatWidget {
let preset_for_event = preset_for_action.clone();
tx.send(AppEvent::OpenReasoningPopup {
model: preset_for_event,
preferred_effort: None,
});
})];
items.push(SelectionItem {
@@ -1997,11 +2080,8 @@ impl ChatWidget {
}
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some("Select Model and Effort".to_string()),
subtitle: Some(
"Access legacy models by running codex -m <model_name> or in your config.toml"
.to_string(),
),
title: Some(title.to_string()),
subtitle,
footer_hint: Some("Press enter to select reasoning effort, or esc to dismiss.".into()),
items,
..Default::default()
@@ -2009,7 +2089,11 @@ impl ChatWidget {
}
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) {
pub(crate) fn open_reasoning_popup(
&mut self,
preset: ModelPreset,
preferred_effort: Option<ReasoningEffortConfig>,
) {
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let supported = preset.supported_reasoning_efforts;
@@ -2052,7 +2136,11 @@ impl ChatWidget {
let model_slug = preset.model.to_string();
let is_current_model = self.config.model == preset.model;
let highlight_choice = if is_current_model {
let preferred_choice = preferred_effort
.filter(|effort| choices.iter().any(|choice| choice.stored == Some(*effort)));
let highlight_choice = if let Some(effort) = preferred_choice {
Some(effort)
} else if is_current_model {
self.config.model_reasoning_effort
} else {
default_choice
@@ -2060,11 +2148,13 @@ impl ChatWidget {
let mut items: Vec<SelectionItem> = Vec::new();
for choice in choices.iter() {
let effort = choice.display;
let mut effort_label = effort.to_string();
if let Some(first) = effort_label.get_mut(0..1) {
first.make_ascii_uppercase();
}
if choice.stored == default_choice {
let mut effort_label = supported
.iter()
.find(|option| option.effort == effort)
.map(codex_common::model_presets::ReasoningEffortPreset::label)
.unwrap_or_else(|| effort.to_string());
let is_current_choice = is_current_model && choice.stored == highlight_choice;
if choice.stored == default_choice && !is_current_choice {
effort_label.push_str(" (default)");
}
@@ -2117,7 +2207,7 @@ impl ChatWidget {
name: effort_label,
description,
selected_description,
is_current: is_current_model && choice.stored == highlight_choice,
is_current: is_current_choice,
actions,
dismiss_on_select: true,
..Default::default()
@@ -2137,7 +2227,54 @@ impl ChatWidget {
});
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
fn featured_model_items(&self, preset: &ModelPreset) -> Vec<SelectionItem> {
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let is_current = self.config.model == preset.model;
let current_effort = if is_current {
self.config.model_reasoning_effort.or(Some(default_effort))
} else {
None
};
let mut items = Vec::new();
let model_slug = preset.model.to_string();
for option in preset.supported_reasoning_efforts.iter() {
let effort = option.effort;
let model_for_action = model_slug.clone();
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::ApplyModelAndEffort {
model: model_for_action.clone(),
effort: Some(effort),
});
})];
let mut name = option.label().to_string();
let is_current_option = current_effort == Some(effort);
if effort == default_effort && !is_current_option {
name.push_str(" (default)");
}
let description =
(!option.description.is_empty()).then(|| option.description.to_string());
items.push(SelectionItem {
name,
description,
selected_description: None,
is_current: is_current_option,
actions,
dismiss_on_select: true,
..Default::default()
});
}
items
}
pub(crate) fn apply_model_and_effort(
&self,
model: String,
effort: Option<ReasoningEffortConfig>,
) {
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {
cwd: None,

View File

@@ -4,9 +4,9 @@ expression: popup
---
Select Reasoning Level for gpt-5.1-codex
1. Low Fastest responses with limited reasoning
2. Medium (default) Dynamically adjusts reasoning based on the task
3. High (current) Maximizes reasoning depth for complex or ambiguous
1. low Fastest responses with limited reasoning
2. medium (default) Dynamically adjusts reasoning based on the task
3. high (current) Maximizes reasoning depth for complex or ambiguous
problems
⚠ High reasoning effort can quickly consume Plus plan
rate limits.

View File

@@ -0,0 +1,14 @@
---
source: tui/src/chatwidget/tests.rs
expression: popup
---
Select Model
Quickly pick Codex Auto or open the full list of models for more options.
1. Fast Works faster
2. Balanced (default) Balances speed with intelligence
3. Thorough Works longer for harder tasks
4. All models Choose and configure what model and reasoning level
to use
Press enter to apply selection, or esc to dismiss.

View File

@@ -1445,6 +1445,73 @@ fn model_selection_popup_snapshot() {
assert_snapshot!("model_selection_popup", popup);
}
#[test]
fn model_selection_popup_chatgpt_auth_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.config.model = "gpt-5.1-codex".to_string();
chat.open_model_popup();
let popup = render_bottom_popup(&chat, 80);
insta::with_settings!({ snapshot_suffix => "chatgpt_auth" }, {
assert_snapshot!("model_selection_popup", popup);
});
}
#[test]
fn featured_model_popup_hides_default_label_when_option_is_current() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.is_default)
.expect("default preset");
chat.config.model = preset.model.to_string();
chat.config.model_reasoning_effort = Some(preset.default_reasoning_effort);
chat.open_model_popup();
let popup = render_bottom_popup(&chat, 80);
let current_line = popup
.lines()
.find(|line| line.contains("(current)"))
.expect("current featured option line");
assert!(
!current_line.contains("(default)"),
"expected current featured option to omit redundant default tag: {current_line}"
);
}
#[test]
fn esc_on_all_models_returns_to_featured_picker() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.open_model_popup();
// Select "All models" from the featured picker (option 4) and open the full list.
chat.handle_key_event(KeyEvent::new(KeyCode::Char('4'), KeyModifiers::NONE));
chat.open_all_models_popup();
assert!(
!chat.bottom_pane.is_normal_backtrack_mode(),
"all models popup should be stacked on top of the featured picker"
);
// Esc should close the all-models list but leave the featured picker visible.
chat.handle_key_event(KeyEvent::new(KeyCode::Esc, KeyModifiers::NONE));
assert!(
!chat.bottom_pane.is_normal_backtrack_mode(),
"esc should return to the featured picker instead of dismissing the model picker"
);
}
#[test]
fn approvals_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
@@ -1523,31 +1590,56 @@ fn model_reasoning_selection_popup_snapshot() {
.into_iter()
.find(|preset| preset.model == "gpt-5.1-codex")
.expect("gpt-5.1-codex preset");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, None);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn reasoning_popup_hides_default_label_when_option_is_current() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.is_default)
.expect("default preset");
chat.config.model = preset.model.to_string();
chat.config.model_reasoning_effort = Some(preset.default_reasoning_effort);
chat.open_reasoning_popup(preset, None);
let popup = render_bottom_popup(&chat, 80);
let current_line = popup
.lines()
.find(|line| line.contains("(current)"))
.expect("current reasoning option line");
assert!(
!current_line.contains("(default)"),
"expected current reasoning option to omit redundant default tag: {current_line}"
);
}
#[test]
fn single_reasoning_option_skips_selection() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
static SINGLE_EFFORT: [ReasoningEffortPreset; 1] = [ReasoningEffortPreset {
const SINGLE_HIGH_REASONING: &[ReasoningEffortPreset] = &[ReasoningEffortPreset {
effort: ReasoningEffortConfig::High,
description: "Maximizes reasoning depth for complex or ambiguous problems",
label: None,
}];
let preset = ModelPreset {
id: "model-with-single-reasoning",
model: "model-with-single-reasoning",
display_name: "model-with-single-reasoning",
description: "",
default_reasoning_effort: ReasoningEffortConfig::High,
supported_reasoning_efforts: &SINGLE_EFFORT,
supported_reasoning_efforts: SINGLE_HIGH_REASONING,
is_default: false,
upgrade: None,
};
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, None);
let popup = render_bottom_popup(&chat, 80);
assert!(
@@ -1601,7 +1693,7 @@ fn reasoning_popup_escape_returns_to_model_popup() {
.into_iter()
.find(|preset| preset.model == "gpt-5.1-codex")
.expect("gpt-5.1-codex preset");
chat.open_reasoning_popup(presets);
chat.open_reasoning_popup(presets, None);
let before_escape = render_bottom_popup(&chat, 80);
assert!(before_escape.contains("Select Reasoning Level"));
@@ -1609,7 +1701,7 @@ fn reasoning_popup_escape_returns_to_model_popup() {
chat.handle_key_event(KeyEvent::new(KeyCode::Esc, KeyModifiers::NONE));
let after_escape = render_bottom_popup(&chat, 80);
assert!(after_escape.contains("Select Model and Effort"));
assert!(after_escape.contains("Select Model"));
assert!(!after_escape.contains("Select Reasoning Level"));
}

View File

@@ -706,14 +706,8 @@ impl SessionHeaderHistoryCell {
formatted
}
fn reasoning_label(&self) -> Option<&'static str> {
self.reasoning_effort.map(|effort| match effort {
ReasoningEffortConfig::Minimal => "minimal",
ReasoningEffortConfig::Low => "low",
ReasoningEffortConfig::Medium => "medium",
ReasoningEffortConfig::High => "high",
ReasoningEffortConfig::None => "none",
})
fn reasoning_label(&self) -> Option<String> {
self.reasoning_effort.map(|effort| effort.to_string())
}
}

View File

@@ -64,7 +64,7 @@ Notes:
The model that Codex should use.
```toml
model = "gpt-5.1" # overrides the default ("gpt-5.1-codex" on macOS/Linux, "gpt-5.1" on Windows)
model = "gpt-5.1" # overrides the default ("codex-auto")
```
### model_providers
@@ -818,7 +818,7 @@ Users can specify config values at multiple levels. Order of precedence is as fo
1. custom command-line argument, e.g., `--model o3`
2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself)
3. as an entry in `config.toml`, e.g., `model = "o3"`
4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `gpt-5.1-codex`)
4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `codex-auto`)
### history
@@ -921,7 +921,7 @@ Valid values:
| Key | Type / Values | Notes |
| ------------------------------------------------ | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `model` | string | Model to use (e.g., `gpt-5.1-codex`). |
| `model` | string | Model to use (e.g., `codex-auto`). |
| `model_provider` | string | Provider id from `model_providers` (default: `openai`). |
| `model_context_window` | number | Context window tokens. |
| `model_max_output_tokens` | number | Max output tokens. |

View File

@@ -18,9 +18,8 @@ Use this example configuration as a starting point. For an explanation of each f
# Core Model Selection
################################################################################
# Primary model used by Codex. Default differs by OS; non-Windows defaults here.
# Linux/macOS default: "gpt-5.1-codex"; Windows default: "gpt-5.1".
model = "gpt-5.1-codex"
# Primary model used by Codex. Default: "codex-auto" on all platforms.
model = "codex-auto"
# Model used by the /review feature (code reviews). Default: "gpt-5.1-codex".
review_model = "gpt-5.1-codex"
@@ -316,7 +315,7 @@ mcp_oauth_credentials_store = "auto"
[profiles]
# [profiles.default]
# model = "gpt-5.1-codex"
# model = "codex-auto"
# model_provider = "openai"
# approval_policy = "on-request"
# sandbox_mode = "read-only"