fix: typos in model picker (#6859)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
Ahmed Ibrahim
2025-11-18 22:29:02 -08:00
committed by GitHub
parent 030d1d5b1c
commit 793063070b
33 changed files with 563 additions and 180 deletions

View File

@@ -528,7 +528,7 @@ mod tests {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: v1::NewConversationParams {
model: Some("gpt-5.1-codex".to_string()),
model: Some("arcticfox".to_string()),
model_provider: None,
profile: None,
cwd: None,
@@ -546,7 +546,7 @@ mod tests {
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5.1-codex",
"model": "arcticfox",
"modelProvider": null,
"profile": null,
"cwd": null,

View File

@@ -27,7 +27,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "gpt-5.1-codex"
model = "arcticfox"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
model_reasoning_summary = "detailed"
@@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
}),
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
model: Some("gpt-5.1-codex".into()),
model: Some("arcticfox".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),

View File

@@ -57,7 +57,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "gpt-5.1-codex"
model = "arcticfox"
model_reasoning_effort = "medium"
"#,
)

View File

@@ -45,6 +45,33 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
} = to_response::<ModelListResponse>(response)?;
let expected_models = vec![
Model {
id: "arcticfox".to_string(),
model: "arcticfox".to_string(),
display_name: "arcticfox".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex problems".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
},
Model {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
@@ -66,7 +93,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: true,
is_default: false,
},
Model {
id: "gpt-5.1-codex-mini".to_string(),
@@ -147,7 +174,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(first_response)?;
assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "gpt-5.1-codex");
assert_eq!(first_items[0].id, "arcticfox");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
let second_request = mcp
@@ -169,7 +196,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(second_response)?;
assert_eq!(second_items.len(), 1);
assert_eq!(second_items[0].id, "gpt-5.1-codex-mini");
assert_eq!(second_items[0].id, "gpt-5.1-codex");
let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?;
let third_request = mcp
@@ -191,8 +218,30 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(third_response)?;
assert_eq!(third_items.len(), 1);
assert_eq!(third_items[0].id, "gpt-5.1");
assert!(third_cursor.is_none());
assert_eq!(third_items[0].id, "gpt-5.1-codex-mini");
let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?;
let fourth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(fourth_cursor.clone()),
})
.await?;
let fourth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)),
)
.await??;
let ModelListResponse {
data: fourth_items,
next_cursor: fourth_cursor,
} = to_response::<ModelListResponse>(fourth_response)?;
assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "gpt-5.1");
assert!(fourth_cursor.is_none());
Ok(())
}

View File

@@ -27,7 +27,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.1-codex".to_string()),
model: Some("arcticfox".to_string()),
..Default::default()
})
.await?;
@@ -69,7 +69,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.1-codex".to_string()),
model: Some("arcticfox".to_string()),
..Default::default()
})
.await?;
@@ -114,7 +114,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.1-codex".to_string()),
model: Some("arcticfox".to_string()),
..Default::default()
})
.await?;

View File

@@ -4,6 +4,9 @@ use codex_app_server_protocol::AuthMode;
use codex_core::protocol_config_types::ReasoningEffort;
use once_cell::sync::Lazy;
pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt";
pub const HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG: &str = "hide_arcticfox_migration_prompt";
/// A reasoning effort option that can be surfaced for a model.
#[derive(Debug, Clone, Copy)]
pub struct ReasoningEffortPreset {
@@ -17,6 +20,7 @@ pub struct ReasoningEffortPreset {
pub struct ModelUpgrade {
pub id: &'static str,
pub reasoning_effort_mapping: Option<HashMap<ReasoningEffort, ReasoningEffort>>,
pub migration_config_key: &'static str,
}
/// Metadata describing a Codex-supported model.
@@ -38,10 +42,40 @@ pub struct ModelPreset {
pub is_default: bool,
/// recommended upgrade model
pub upgrade: Option<ModelUpgrade>,
/// Whether this preset should appear in the picker UI.
pub show_in_picker: bool,
}
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
vec![
ModelPreset {
id: "arcticfox",
model: "arcticfox",
display_name: "arcticfox",
description: "Latest Codex-optimized flagship for deep and fast reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning",
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks",
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex problems",
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems",
},
],
is_default: true,
upgrade: None,
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1-codex",
model: "gpt-5.1-codex",
@@ -62,8 +96,13 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Maximizes reasoning depth for complex or ambiguous problems",
},
],
is_default: true,
upgrade: None,
is_default: false,
upgrade: Some(ModelUpgrade {
id: "arcticfox",
reasoning_effort_mapping: None,
migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1-codex-mini",
@@ -83,6 +122,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: None,
show_in_picker: true,
},
ModelPreset {
id: "gpt-5.1",
@@ -106,6 +146,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: None,
show_in_picker: true,
},
// Deprecated models.
ModelPreset {
@@ -130,9 +171,11 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex",
id: "arcticfox",
reasoning_effort_mapping: None,
migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
ModelPreset {
id: "gpt-5-codex-mini",
@@ -154,7 +197,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-mini",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
ModelPreset {
id: "gpt-5",
@@ -187,16 +232,20 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
ReasoningEffort::Minimal,
ReasoningEffort::Low,
)])),
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
]
});
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
// leave auth mode for later use
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS
.iter()
.filter(|preset| preset.upgrade.is_none())
.filter(|preset| match auth_mode {
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "arcticfox",
_ => preset.show_in_picker,
})
.cloned()
.collect()
}
@@ -208,10 +257,17 @@ pub fn all_model_presets() -> &'static Vec<ModelPreset> {
#[cfg(test)]
mod tests {
use super::*;
use codex_app_server_protocol::AuthMode;
#[test]
fn only_one_default_model_is_configured() {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
}
#[test]
fn arcticfox_hidden_for_api_key_auth() {
let presets = builtin_model_presets(Some(AuthMode::ApiKey));
assert!(presets.iter().all(|preset| preset.id != "arcticfox"));
}
}

View File

@@ -431,7 +431,7 @@ mod tests {
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "gpt-5.1-codex",
slug: "arcticfox",
expects_apply_patch_instructions: false,
},
];

View File

@@ -845,6 +845,36 @@ hide_gpt5_1_migration_prompt = true
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_hide_arcticfox_migration_prompt_preserves_table() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[notice]
existing = "value"
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetNoticeHideModelMigrationPrompt(
"hide_arcticfox_migration_prompt".to_string(),
true,
)],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[notice]
existing = "value"
hide_arcticfox_migration_prompt = true
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_replace_mcp_servers_round_trips() {
let tmp = tempdir().expect("tmpdir");

View File

@@ -62,11 +62,11 @@ pub mod profile;
pub mod types;
#[cfg(target_os = "windows")]
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1";
pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox";
#[cfg(not(target_os = "windows"))]
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex";
pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "arcticfox";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "arcticfox";
/// Maximum number of bytes of the documentation that will be embedded. Larger
/// files are *silently truncated* to this size so we do not take up too much of
@@ -81,7 +81,7 @@ pub struct Config {
/// Optional override of model selection.
pub model: String,
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex".
/// Model used specifically for review sessions. Defaults to "arcticfox".
pub review_model: String,
pub model_family: ModelFamily,

View File

@@ -378,6 +378,8 @@ pub struct Notice {
pub hide_rate_limit_model_nudge: Option<bool>,
/// Tracks whether the user has seen the model migration prompt
pub hide_gpt5_1_migration_prompt: Option<bool>,
/// Tracks whether the user has seen the arcticfox migration prompt
pub hide_arcticfox_migration_prompt: Option<bool>,
}
impl Notice {

View File

@@ -189,6 +189,18 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
support_verbosity: false,
truncation_policy: TruncationPolicy::Tokens(10_000),
)
} else if slug.starts_with("arcticfox") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: BASE_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: true,
support_verbosity: false,
truncation_policy: TruncationPolicy::Tokens(10_000),
)
} else if slug.starts_with("gpt-5.1") {
model_family!(
slug, "gpt-5.1",

View File

@@ -70,7 +70,10 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
// https://platform.openai.com/docs/models/gpt-3.5-turbo
"gpt-3.5-turbo" => Some(ModelInfo::new(16_385, 4_096)),
_ if slug.starts_with("gpt-5-codex") || slug.starts_with("gpt-5.1-codex") => {
_ if slug.starts_with("gpt-5-codex")
|| slug.starts_with("gpt-5.1-codex")
|| slug.starts_with("arcticfox") =>
{
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
}

View File

@@ -1155,7 +1155,7 @@ async fn token_count_includes_rate_limits_snapshot() {
"reasoning_output_tokens": 0,
"total_tokens": 123
},
// Default model is gpt-5.1-codex in tests → 95% usable context window
// Default model is arcticfox in tests → 95% usable context window
"model_context_window": 258400
},
"rate_limits": {

View File

@@ -167,11 +167,12 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> {
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"apply_patch",
"view_image",
],
),
(
"gpt-5.1",
"arcticfox",
vec![
"shell_command",
"list_mcp_resources",

View File

@@ -30,6 +30,7 @@ pub enum ReasoningEffort {
#[default]
Medium,
High,
XHigh,
}
/// A summary of the reasoning performed by the model. This can be useful for

View File

@@ -8,6 +8,7 @@ use crate::exec_command::strip_bash_lc_and_escape;
use crate::file_search::FileSearchManager;
use crate::history_cell::HistoryCell;
use crate::model_migration::ModelMigrationOutcome;
use crate::model_migration::migration_copy_for_config;
use crate::model_migration::run_model_migration_prompt;
use crate::pager_overlay::Overlay;
use crate::render::highlight::highlight_bash_to_lines;
@@ -17,6 +18,9 @@ use crate::tui;
use crate::tui::TuiEvent;
use crate::update_action::UpdateAction;
use codex_ansi_escape::ansi_escape_line;
use codex_app_server_protocol::AuthMode;
use codex_common::model_presets::HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG;
use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_common::model_presets::ModelUpgrade;
use codex_common::model_presets::all_model_presets;
use codex_core::AuthManager;
@@ -52,6 +56,9 @@ use tokio::sync::mpsc::unbounded_channel;
#[cfg(not(debug_assertions))]
use crate::history_cell::UpdateAvailableHistoryCell;
const GPT_5_1_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey];
const ARCTICFOX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT];
#[derive(Debug, Clone)]
pub struct AppExitInfo {
pub token_usage: TokenUsage,
@@ -97,10 +104,19 @@ fn should_show_model_migration_prompt(
.any(|preset| preset.model == current_model)
}
fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> Option<bool> {
match migration_config_key {
HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => config.notices.hide_arcticfox_migration_prompt,
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => config.notices.hide_gpt5_1_migration_prompt,
_ => None,
}
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
app_event_tx: &AppEventSender,
auth_mode: Option<AuthMode>,
) -> Option<AppExitInfo> {
let upgrade = all_model_presets()
.iter()
@@ -110,18 +126,24 @@ async fn handle_model_migration_prompt_if_needed(
if let Some(ModelUpgrade {
id: target_model,
reasoning_effort_mapping,
migration_config_key,
}) = upgrade
{
if !migration_prompt_allows_auth_mode(auth_mode, migration_config_key) {
return None;
}
let target_model = target_model.to_string();
let hide_prompt_flag = config.notices.hide_gpt5_1_migration_prompt;
let hide_prompt_flag = migration_prompt_hidden(config, migration_config_key);
if !should_show_model_migration_prompt(&config.model, &target_model, hide_prompt_flag) {
return None;
}
match run_model_migration_prompt(tui).await {
let prompt_copy = migration_copy_for_config(migration_config_key);
match run_model_migration_prompt(tui, prompt_copy).await {
ModelMigrationOutcome::Accepted => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
migration_config: "hide_gpt5_1_migration_prompt".to_string(),
migration_config: migration_config_key.to_string(),
});
config.model = target_model.to_string();
if let Some(family) = find_family_for_model(&target_model) {
@@ -211,8 +233,10 @@ impl App {
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
let auth_mode = auth_manager.auth().map(|auth| auth.mode);
let exit_info =
handle_model_migration_prompt_if_needed(tui, &mut config, &app_event_tx).await;
handle_model_migration_prompt_if_needed(tui, &mut config, &app_event_tx, auth_mode)
.await;
if let Some(exit_info) = exit_info {
return Ok(exit_info);
}
@@ -919,6 +943,28 @@ impl App {
}
}
fn migration_prompt_allowed_auth_modes(migration_config_key: &str) -> Option<&'static [AuthMode]> {
match migration_config_key {
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => Some(&GPT_5_1_MIGRATION_AUTH_MODES),
HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => Some(&ARCTICFOX_MIGRATION_AUTH_MODES),
_ => None,
}
}
fn migration_prompt_allows_auth_mode(
auth_mode: Option<AuthMode>,
migration_config_key: &str,
) -> bool {
if let Some(allowed_modes) = migration_prompt_allowed_auth_modes(migration_config_key) {
match auth_mode {
None => true,
Some(mode) => allowed_modes.contains(&mode),
}
} else {
auth_mode != Some(AuthMode::ApiKey)
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -986,6 +1032,11 @@ mod tests {
"gpt-5.1-codex-mini",
None
));
assert!(should_show_model_migration_prompt(
"gpt-5.1-codex",
"arcticfox",
None
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex",
@@ -1116,4 +1167,40 @@ mod tests {
Some("codex resume 123e4567-e89b-12d3-a456-426614174000".to_string())
);
}
#[test]
fn gpt5_migration_allows_api_key_and_chatgpt() {
assert!(migration_prompt_allows_auth_mode(
Some(AuthMode::ApiKey),
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
));
assert!(migration_prompt_allows_auth_mode(
Some(AuthMode::ChatGPT),
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
));
}
#[test]
fn arcticfox_migration_limits_to_chatgpt() {
assert!(migration_prompt_allows_auth_mode(
Some(AuthMode::ChatGPT),
HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
));
assert!(!migration_prompt_allows_auth_mode(
Some(AuthMode::ApiKey),
HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
));
}
#[test]
fn other_migrations_block_api_key() {
assert!(!migration_prompt_allows_auth_mode(
Some(AuthMode::ApiKey),
"unknown"
));
assert!(migration_prompt_allows_auth_mode(
Some(AuthMode::ChatGPT),
"unknown"
));
}
}

View File

@@ -2013,6 +2013,26 @@ impl ChatWidget {
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let supported = preset.supported_reasoning_efforts;
let warn_effort = if supported
.iter()
.any(|option| option.effort == ReasoningEffortConfig::XHigh)
{
Some(ReasoningEffortConfig::XHigh)
} else if supported
.iter()
.any(|option| option.effort == ReasoningEffortConfig::High)
{
Some(ReasoningEffortConfig::High)
} else {
None
};
let warning_text = warn_effort.map(|effort| {
let effort_label = Self::reasoning_effort_label(effort);
format!("{effort_label} reasoning effort can quickly consume Plus plan rate limits.")
});
let warn_for_model =
preset.model.starts_with("gpt-5.1-codex") || preset.model.starts_with("arcticfox");
struct EffortChoice {
stored: Option<ReasoningEffortConfig>,
display: ReasoningEffortConfig,
@@ -2060,10 +2080,7 @@ impl ChatWidget {
let mut items: Vec<SelectionItem> = Vec::new();
for choice in choices.iter() {
let effort = choice.display;
let mut effort_label = effort.to_string();
if let Some(first) = effort_label.get_mut(0..1) {
first.make_ascii_uppercase();
}
let mut effort_label = Self::reasoning_effort_label(effort).to_string();
if choice.stored == default_choice {
effort_label.push_str(" (default)");
}
@@ -2078,14 +2095,17 @@ impl ChatWidget {
})
.filter(|text| !text.is_empty());
let warning = "⚠ High reasoning effort can quickly consume Plus plan rate limits.";
let show_warning =
preset.model.starts_with("gpt-5.1-codex") && effort == ReasoningEffortConfig::High;
let selected_description = show_warning.then(|| {
description
.as_ref()
.map_or(warning.to_string(), |d| format!("{d}\n{warning}"))
});
let show_warning = warn_for_model && warn_effort == Some(effort);
let selected_description = if show_warning {
warning_text.as_ref().map(|warning_message| {
description.as_ref().map_or_else(
|| warning_message.clone(),
|d| format!("{d}\n{warning_message}"),
)
})
} else {
None
};
let model_for_action = model_slug.clone();
let effort_for_action = choice.stored;
@@ -2137,6 +2157,17 @@ impl ChatWidget {
});
}
fn reasoning_effort_label(effort: ReasoningEffortConfig) -> &'static str {
match effort {
ReasoningEffortConfig::None => "None",
ReasoningEffortConfig::Minimal => "Minimal",
ReasoningEffortConfig::Low => "Low",
ReasoningEffortConfig::Medium => "Medium",
ReasoningEffortConfig::High => "High",
ReasoningEffortConfig::XHigh => "Extra high",
}
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.app_event_tx
.send(AppEvent::CodexOp(Op::OverrideTurnContext {

View File

@@ -2,13 +2,11 @@
source: tui/src/chatwidget/tests.rs
expression: popup
---
Select Reasoning Level for gpt-5.1-codex
Select Reasoning Level for arcticfox
1. Low Fastest responses with limited reasoning
2. Medium (default) Dynamically adjusts reasoning based on the task
3. High (current) Maximizes reasoning depth for complex or ambiguous
problems
⚠ High reasoning effort can quickly consume Plus plan
rate limits.
1. Low Fast responses with lighter reasoning
2. Medium (default) Balances speed and reasoning depth for everyday tasks
3. High (current) Maximizes reasoning depth for complex problems
4. Extra high Extra high reasoning depth for complex problems
Press enter to confirm or esc to go back

View File

@@ -0,0 +1,16 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 1548
expression: popup
---
Select Reasoning Level for arcticfox
1. Low Fast responses with lighter reasoning
2. Medium (default) Balances speed and reasoning depth for everyday
tasks
3. High Maximizes reasoning depth for complex problems
4. Extra high (current) Extra high reasoning depth for complex problems
⚠ Extra high reasoning effort can quickly consume
Plus plan rate limits.
Press enter to confirm or esc to go back

View File

@@ -1526,19 +1526,59 @@ fn startup_prompts_for_windows_sandbox_when_agent_requested() {
fn model_reasoning_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "gpt-5.1-codex".to_string();
chat.config.model = "arcticfox".to_string();
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High);
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.model == "gpt-5.1-codex")
.expect("gpt-5.1-codex preset");
.find(|preset| preset.model == "arcticfox")
.expect("arcticfox preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn model_reasoning_selection_popup_extra_high_warning_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "arcticfox".to_string();
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::XHigh);
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.model == "arcticfox")
.expect("arcticfox preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup_extra_high_warning", popup);
}
#[test]
fn reasoning_popup_shows_extra_high_with_space() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "arcticfox".to_string();
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.model == "arcticfox")
.expect("arcticfox preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 120);
assert!(
popup.contains("Extra high"),
"expected popup to include 'Extra high'; popup: {popup}"
);
assert!(
!popup.contains("Extrahigh"),
"expected popup not to include 'Extrahigh'; popup: {popup}"
);
}
#[test]
fn single_reasoning_option_skips_selection() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
@@ -1556,6 +1596,7 @@ fn single_reasoning_option_skips_selection() {
supported_reasoning_efforts: &SINGLE_EFFORT,
is_default: false,
upgrade: None,
show_in_picker: true,
};
chat.open_reasoning_popup(preset);

View File

@@ -708,6 +708,7 @@ impl SessionHeaderHistoryCell {
ReasoningEffortConfig::Low => "low",
ReasoningEffortConfig::Medium => "medium",
ReasoningEffortConfig::High => "high",
ReasoningEffortConfig::XHigh => "extra high",
ReasoningEffortConfig::None => "none",
})
}

View File

@@ -5,6 +5,8 @@ use crate::render::renderable::RenderableExt as _;
use crate::tui::FrameRequester;
use crate::tui::Tui;
use crate::tui::TuiEvent;
use codex_common::model_presets::HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG;
use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
@@ -12,6 +14,7 @@ use crossterm::event::KeyModifiers;
use ratatui::prelude::Stylize as _;
use ratatui::prelude::Widget;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::widgets::Clear;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
@@ -24,7 +27,24 @@ pub(crate) enum ModelMigrationOutcome {
Exit,
}
pub(crate) async fn run_model_migration_prompt(tui: &mut Tui) -> ModelMigrationOutcome {
#[derive(Clone)]
pub(crate) struct ModelMigrationCopy {
pub heading: Vec<Span<'static>>,
pub content: Vec<Line<'static>>,
}
pub(crate) fn migration_copy_for_config(migration_config_key: &str) -> ModelMigrationCopy {
match migration_config_key {
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => gpt5_migration_copy(),
HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => arcticfox_migration_copy(),
_ => arcticfox_migration_copy(),
}
}
pub(crate) async fn run_model_migration_prompt(
tui: &mut Tui,
copy: ModelMigrationCopy,
) -> ModelMigrationOutcome {
// Render the prompt on the terminal's alternate screen so exiting or cancelling
// does not leave a large blank region in the normal scrollback. This does not
// change the prompt's appearance only where it is drawn.
@@ -45,7 +65,7 @@ pub(crate) async fn run_model_migration_prompt(tui: &mut Tui) -> ModelMigrationO
let alt = AltScreenGuard::enter(tui);
let mut screen = ModelMigrationScreen::new(alt.tui.frame_requester());
let mut screen = ModelMigrationScreen::new(alt.tui.frame_requester(), copy);
let _ = alt.tui.draw(u16::MAX, |frame| {
frame.render_widget_ref(&screen, frame.area());
@@ -76,14 +96,16 @@ pub(crate) async fn run_model_migration_prompt(tui: &mut Tui) -> ModelMigrationO
struct ModelMigrationScreen {
request_frame: FrameRequester,
copy: ModelMigrationCopy,
done: bool,
should_exit: bool,
}
impl ModelMigrationScreen {
fn new(request_frame: FrameRequester) -> Self {
fn new(request_frame: FrameRequester, copy: ModelMigrationCopy) -> Self {
Self {
request_frame,
copy,
done: false,
should_exit: false,
}
@@ -133,51 +155,74 @@ impl WidgetRef for &ModelMigrationScreen {
let mut column = ColumnRenderable::new();
column.push("");
column.push(Line::from(vec![
"> ".into(),
"Introducing our gpt-5.1 models".bold(),
]));
let mut heading = vec![Span::raw("> ")];
heading.extend(self.copy.heading.clone());
column.push(Line::from(heading));
column.push(Line::from(""));
column.push(
Paragraph::new(Line::from(
"We've upgraded our family of models supported in Codex to gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.",
))
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
column.push(
Paragraph::new(Line::from(
for (idx, line) in self.copy.content.iter().enumerate() {
if idx != 0 {
column.push(Line::from(""));
}
column.push(
Paragraph::new(line.clone())
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
}
column.render(area, buf);
}
}
fn arcticfox_migration_copy() -> ModelMigrationCopy {
ModelMigrationCopy {
heading: vec!["Introducing arcticfox".bold()],
content: vec![
Line::from("We've upgraded our family of models supported in Codex to arcticfox."),
Line::from(
"You can continue using legacy models by specifying them directly with the -m option or in your config.toml.",
))
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
column.push(
),
Line::from(vec![
"Learn more at ".into(),
"www.openai.com/index/arcticfox".cyan().underlined(),
".".into(),
]),
Line::from(vec!["Press enter to continue".dim()]),
],
}
}
fn gpt5_migration_copy() -> ModelMigrationCopy {
ModelMigrationCopy {
heading: vec!["Introducing our gpt-5.1 models".bold()],
content: vec![
Line::from(
"We've upgraded our family of models supported in Codex to gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.",
),
Line::from(
"You can continue using legacy models by specifying them directly with the -m option or in your config.toml.",
),
Line::from(vec![
"Learn more at ".into(),
"www.openai.com/index/gpt-5-1".cyan().underlined(),
".".into(),
])
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
column.push(
Line::from(vec!["Press enter to continue".dim()]).inset(Insets::tlbr(0, 2, 0, 0)),
);
column.render(area, buf);
]),
Line::from(vec!["Press enter to continue".dim()]),
],
}
}
#[cfg(test)]
mod tests {
use super::ModelMigrationScreen;
use super::arcticfox_migration_copy;
use super::migration_copy_for_config;
use crate::custom_terminal::Terminal;
use crate::test_backend::VT100Backend;
use crate::tui::FrameRequester;
use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use insta::assert_snapshot;
@@ -191,7 +236,8 @@ mod tests {
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, width, height));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy());
let screen =
ModelMigrationScreen::new(FrameRequester::test_dummy(), arcticfox_migration_copy());
{
let mut frame = terminal.get_frame();
@@ -208,7 +254,10 @@ mod tests {
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 65, 12));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy());
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_config(HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
@@ -223,7 +272,10 @@ mod tests {
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 60, 12));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy());
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_config(HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
@@ -238,7 +290,10 @@ mod tests {
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 60, 12));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy());
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_config(HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
@@ -249,7 +304,8 @@ mod tests {
#[test]
fn escape_key_accepts_prompt() {
let mut screen = ModelMigrationScreen::new(FrameRequester::test_dummy());
let mut screen =
ModelMigrationScreen::new(FrameRequester::test_dummy(), arcticfox_migration_copy());
// Simulate pressing Escape
screen.handle_key(KeyEvent::new(

View File

@@ -2,14 +2,14 @@
source: tui/src/model_migration.rs
expression: terminal.backend()
---
> Introducing our gpt-5.1 models
> Introducing arcticfox
We've upgraded our family of models supported in Codex to
gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.
arcticfox.
You can continue using legacy models by specifying them
directly with the -m option or in your config.toml.
Learn more at www.openai.com/index/gpt-5-1.
Learn more at www.openai.com/index/arcticfox.
Press enter to continue

View File

@@ -10,7 +10,7 @@ expression: sanitized
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex (reasoning none, summaries auto) │
│ Model: arcticfox (reasoning none, summaries auto)
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: read-only │

View File

@@ -4,20 +4,20 @@ expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────────
│ >_ OpenAI Codex (v0.0.0)
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date
│ information on rate limits and credits
│ Model: gpt-5.1-codex (reasoning high, summaries detailed) │
│ Directory: [[workspace]]
│ Approval: on-request
│ Sandbox: workspace-write
│ Agents.md: <none>
│ Token usage: 1.9K total (1K input + 900 output)
│ Context window: 100% left (2.25K used / 272K)
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14)
│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24)
╰───────────────────────────────────────────────────────────────────────
╭───────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: arcticfox (reasoning high, summaries detailed) │
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: workspace-write │
│ Agents.md: <none> │
│ │
│ Token usage: 1.9K total (1K input + 900 output) │
│ Context window: 100% left (2.25K used / 272K) │
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │
│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │
╰───────────────────────────────────────────────────────────────────╯

View File

@@ -4,19 +4,19 @@ expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────
│ >_ OpenAI Codex (v0.0.0)
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date
│ information on rate limits and credits
│ Model: gpt-5.1-codex (reasoning none, summaries auto) │
│ Directory: [[workspace]]
│ Approval: on-request
│ Sandbox: read-only
│ Agents.md: <none>
│ Token usage: 750 total (500 input + 250 output)
│ Context window: 100% left (750 used / 272K)
│ Limits: data not available yet
╰───────────────────────────────────────────────────────────────────
╭───────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: arcticfox (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: read-only │
│ Agents.md: <none> │
│ │
│ Token usage: 750 total (500 input + 250 output) │
│ Context window: 100% left (750 used / 272K) │
│ Limits: data not available yet │
╰───────────────────────────────────────────────────────────────╯

View File

@@ -4,19 +4,19 @@ expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────
│ >_ OpenAI Codex (v0.0.0)
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date
│ information on rate limits and credits
│ Model: gpt-5.1-codex (reasoning none, summaries auto) │
│ Directory: [[workspace]]
│ Approval: on-request
│ Sandbox: read-only
│ Agents.md: <none>
│ Token usage: 750 total (500 input + 250 output)
│ Context window: 100% left (750 used / 272K)
│ Limits: data not available yet
╰───────────────────────────────────────────────────────────────────
╭───────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: arcticfox (reasoning none, summaries auto) │
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: read-only │
│ Agents.md: <none> │
│ │
│ Token usage: 750 total (500 input + 250 output) │
│ Context window: 100% left (750 used / 272K) │
│ Limits: data not available yet │
╰───────────────────────────────────────────────────────────────╯

View File

@@ -10,7 +10,7 @@ expression: sanitized
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: gpt-5.1-codex (reasoning none, summaries auto) │
│ Model: arcticfox (reasoning none, summaries auto)
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: read-only │

View File

@@ -4,19 +4,19 @@ expression: sanitized
---
/status
╭───────────────────────────────────────────────────────────────────
│ >_ OpenAI Codex (v0.0.0)
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date
│ information on rate limits and credits
│ Model: gpt-5.1-codex (reasoning high, summaries detail │
│ Directory: [[workspace]]
│ Approval: on-request
│ Sandbox: read-only
│ Agents.md: <none>
│ Token usage: 1.9K total (1K input + 900 output)
│ Context window: 100% left (2.25K used / 272K)
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14)
╰───────────────────────────────────────────────────────────────────
╭───────────────────────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │
│ information on rate limits and credits │
│ │
│ Model: arcticfox (reasoning high, summaries detailed)
│ Directory: [[workspace]] │
│ Approval: on-request │
│ Sandbox: read-only │
│ Agents.md: <none> │
│ │
│ Token usage: 1.9K total (1K input + 900 output) │
│ Context window: 100% left (2.25K used / 272K) │
│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │
╰───────────────────────────────────────────────────────────────────╯

View File

@@ -81,7 +81,7 @@ fn reset_at_from(captured_at: &chrono::DateTime<chrono::Local>, seconds: i64) ->
fn status_snapshot_includes_reasoning_details() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.model_provider_id = "openai".to_string();
config.model_reasoning_effort = Some(ReasoningEffort::High);
config.model_reasoning_summary = ReasoningSummary::Detailed;
@@ -144,7 +144,7 @@ fn status_snapshot_includes_reasoning_details() {
fn status_snapshot_includes_monthly_limit() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.model_provider_id = "openai".to_string();
config.cwd = PathBuf::from("/workspace/tests");
@@ -194,7 +194,7 @@ fn status_snapshot_includes_monthly_limit() {
fn status_card_token_usage_excludes_cached_tokens() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.cwd = PathBuf::from("/workspace/tests");
let auth_manager = test_auth_manager(&config);
@@ -232,7 +232,7 @@ fn status_card_token_usage_excludes_cached_tokens() {
fn status_snapshot_truncates_in_narrow_terminal() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.model_provider_id = "openai".to_string();
config.model_reasoning_effort = Some(ReasoningEffort::High);
config.model_reasoning_summary = ReasoningSummary::Detailed;
@@ -285,7 +285,7 @@ fn status_snapshot_truncates_in_narrow_terminal() {
fn status_snapshot_shows_missing_limits_message() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.cwd = PathBuf::from("/workspace/tests");
let auth_manager = test_auth_manager(&config);
@@ -325,7 +325,7 @@ fn status_snapshot_shows_missing_limits_message() {
fn status_snapshot_shows_empty_limits_message() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.cwd = PathBuf::from("/workspace/tests");
let auth_manager = test_auth_manager(&config);
@@ -370,7 +370,7 @@ fn status_snapshot_shows_empty_limits_message() {
fn status_snapshot_shows_stale_limits_message() {
let temp_home = TempDir::new().expect("temp home");
let mut config = test_config(&temp_home);
config.model = "gpt-5.1-codex".to_string();
config.model = "arcticfox".to_string();
config.cwd = PathBuf::from("/workspace/tests");
let auth_manager = test_auth_manager(&config);

View File

@@ -64,7 +64,7 @@ Notes:
The model that Codex should use.
```toml
model = "gpt-5.1" # overrides the default ("gpt-5.1-codex" on macOS/Linux, "gpt-5.1" on Windows)
model = "gpt-5.1" # overrides the default ("arcticfox" across platforms)
```
### model_providers
@@ -191,7 +191,7 @@ model = "mistral"
### model_reasoning_effort
If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5.1`, `gpt-5.1-codex`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `arcticfox`, `gpt-5.1`, `gpt-5.1-codex`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
- `"minimal"`
- `"low"`
@@ -835,7 +835,7 @@ Users can specify config values at multiple levels. Order of precedence is as fo
1. custom command-line argument, e.g., `--model o3`
2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself)
3. as an entry in `config.toml`, e.g., `model = "o3"`
4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `gpt-5.1-codex`)
4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `arcticfox`)
### history
@@ -938,7 +938,7 @@ Valid values:
| Key | Type / Values | Notes |
| ------------------------------------------------ | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `model` | string | Model to use (e.g., `gpt-5.1-codex`). |
| `model` | string | Model to use (e.g., `arcticfox`). |
| `model_provider` | string | Provider id from `model_providers` (default: `openai`). |
| `model_context_window` | number | Context window tokens. |
| `model_max_output_tokens` | number | Max output tokens. |

View File

@@ -18,12 +18,11 @@ Use this example configuration as a starting point. For an explanation of each f
# Core Model Selection
################################################################################
# Primary model used by Codex. Default differs by OS; non-Windows defaults here.
# Linux/macOS default: "gpt-5.1-codex"; Windows default: "gpt-5.1".
model = "gpt-5.1-codex"
# Primary model used by Codex. Default: "arcticfox" on all platforms.
model = "arcticfox"
# Model used by the /review feature (code reviews). Default: "gpt-5.1-codex".
review_model = "gpt-5.1-codex"
# Model used by the /review feature (code reviews). Default: "arcticfox".
review_model = "arcticfox"
# Provider id selected from [model_providers]. Default: "openai".
model_provider = "openai"
@@ -33,7 +32,7 @@ model_provider = "openai"
# model_context_window = 128000 # tokens; default: auto for model
# model_max_output_tokens = 8192 # tokens; default: auto for model
# model_auto_compact_token_limit = 0 # disable/override auto; default: model family specific
# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.1-codex
# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for arcticfox
################################################################################
# Reasoning & Verbosity (Responses API capable models)
@@ -316,7 +315,7 @@ mcp_oauth_credentials_store = "auto"
[profiles]
# [profiles.default]
# model = "gpt-5.1-codex"
# model = "arcticfox"
# model_provider = "openai"
# approval_policy = "on-request"
# sandbox_mode = "read-only"

View File

@@ -99,7 +99,7 @@ codex exec resume --last "Fix use-after-free issues"
Only the conversation context is preserved; you must still provide flags to customize Codex behavior.
```shell
codex exec --model gpt-5.1-codex --json "Review the change, look for use-after-free issues"
codex exec --model arcticfox --json "Review the change, look for use-after-free issues"
codex exec --model gpt-5.1 --json resume --last "Fix use-after-free issues"
```