diff --git a/codex-rs/core/src/features.rs b/codex-rs/core/src/features.rs index 0b1563bd5e..94610f0b75 100644 --- a/codex-rs/core/src/features.rs +++ b/codex-rs/core/src/features.rs @@ -405,7 +405,7 @@ pub const FEATURES: &[FeatureSpec] = &[ id: Feature::RemoteModels, key: "remote_models", stage: Stage::Beta, - default_enabled: false, + default_enabled: true, }, FeatureSpec { id: Feature::PowershellUtf8, diff --git a/codex-rs/core/tests/suite/cli_stream.rs b/codex-rs/core/tests/suite/cli_stream.rs index 9108f263f9..12be98723c 100644 --- a/codex-rs/core/tests/suite/cli_stream.rs +++ b/codex-rs/core/tests/suite/cli_stream.rs @@ -1,5 +1,6 @@ use assert_cmd::Command as AssertCommand; use codex_core::RolloutRecorder; +use codex_core::auth::CODEX_API_KEY_ENV_VAR; use codex_core::protocol::GitInfo; use codex_utils_cargo_bin::find_resource; use core_test_support::fs_wait; @@ -239,7 +240,7 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> { .arg(&repo_root) .arg(&prompt); cmd.env("CODEX_HOME", home.path()) - .env("OPENAI_API_KEY", "dummy") + .env(CODEX_API_KEY_ENV_VAR, "dummy") .env("CODEX_RS_SSE_FIXTURE", &fixture) // Required for CLI arg parsing even though fixture short-circuits network usage. .env("OPENAI_BASE_URL", "http://unused.local"); diff --git a/codex-rs/core/tests/suite/client.rs b/codex-rs/core/tests/suite/client.rs index cfae930d95..1b43f7932f 100644 --- a/codex-rs/core/tests/suite/client.rs +++ b/codex-rs/core/tests/suite/client.rs @@ -826,7 +826,7 @@ async fn includes_no_effort_in_request() -> anyhow::Result<()> { .get("reasoning") .and_then(|t| t.get("effort")) .and_then(|v| v.as_str()), - None + Some("medium") ); Ok(()) diff --git a/codex-rs/core/tests/suite/compact_resume_fork.rs b/codex-rs/core/tests/suite/compact_resume_fork.rs index e054908aec..b7ce49abbe 100644 --- a/codex-rs/core/tests/suite/compact_resume_fork.rs +++ b/codex-rs/core/tests/suite/compact_resume_fork.rs @@ -278,6 +278,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { + "effort": "medium", "summary": "auto" }, "store": false, @@ -348,6 +349,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { + "effort": "medium", "summary": "auto" }, "store": false, @@ -409,6 +411,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { + "effort": "medium", "summary": "auto" }, "store": false, @@ -511,6 +514,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { + "effort": "medium", "summary": "auto" }, "store": false, @@ -634,6 +638,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { + "effort": "medium", "summary": "auto" }, "store": false, diff --git a/codex-rs/core/tests/suite/list_models.rs b/codex-rs/core/tests/suite/list_models.rs index 5f21e94537..1791d28b01 100644 --- a/codex-rs/core/tests/suite/list_models.rs +++ b/codex-rs/core/tests/suite/list_models.rs @@ -4,11 +4,13 @@ use codex_core::ThreadManager; use codex_core::built_in_model_providers; use codex_core::models_manager::manager::RefreshStrategy; use codex_protocol::openai_models::ModelPreset; +use codex_protocol::openai_models::ModelUpgrade; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; use core_test_support::load_default_config_for_test; use indoc::indoc; use pretty_assertions::assert_eq; +use std::collections::HashMap; use tempfile::tempdir; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -50,35 +52,21 @@ async fn list_models_returns_chatgpt_models() -> Result<()> { fn expected_models_for_api_key() -> Vec { vec![ gpt_52_codex(), - gpt_5_1_codex_max(), - gpt_5_1_codex_mini(), gpt_5_2(), + gpt_5_1_codex_max(), + gpt_5_1_codex(), + gpt_5_1_codex_mini(), + gpt_5_1(), + gpt_5_codex(), + gpt_5(), + gpt_5_codex_mini(), bengalfox(), boomslang(), - gpt_5_codex(), - gpt_5_codex_mini(), - gpt_5_1_codex(), - gpt_5(), - gpt_5_1(), ] } fn expected_models_for_chatgpt() -> Vec { - let mut gpt_5_1_codex_max = gpt_5_1_codex_max(); - gpt_5_1_codex_max.is_default = false; - vec![ - gpt_52_codex(), - gpt_5_1_codex_max, - gpt_5_1_codex_mini(), - gpt_5_2(), - bengalfox(), - boomslang(), - gpt_5_codex(), - gpt_5_codex_mini(), - gpt_5_1_codex(), - gpt_5(), - gpt_5_1(), - ] + expected_models_for_api_key() } fn gpt_52_codex() -> ModelPreset { @@ -139,7 +127,17 @@ fn gpt_5_1_codex_max() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5.1-codex-max", + HashMap::from([ + (ReasoningEffort::Low, ReasoningEffort::Low), + (ReasoningEffort::None, ReasoningEffort::Low), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::Minimal, ReasoningEffort::Low), + (ReasoningEffort::XHigh, ReasoningEffort::XHigh), + ]), + )), show_in_picker: true, supported_in_api: true, } @@ -163,7 +161,17 @@ fn gpt_5_1_codex_mini() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5.1-codex-mini", + HashMap::from([ + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::XHigh, ReasoningEffort::High), + (ReasoningEffort::Minimal, ReasoningEffort::Medium), + (ReasoningEffort::None, ReasoningEffort::Medium), + (ReasoningEffort::Low, ReasoningEffort::Medium), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + ]), + )), show_in_picker: true, supported_in_api: true, } @@ -193,11 +201,21 @@ fn gpt_5_2() -> ModelPreset { ), effort( ReasoningEffort::XHigh, - "Extra high reasoning depth for complex problems", + "Extra high reasoning for complex problems", ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5.2", + HashMap::from([ + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::None, ReasoningEffort::Low), + (ReasoningEffort::Minimal, ReasoningEffort::Low), + (ReasoningEffort::Low, ReasoningEffort::Low), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + (ReasoningEffort::XHigh, ReasoningEffort::XHigh), + ]), + )), show_in_picker: true, supported_in_api: true, } @@ -289,7 +307,17 @@ fn gpt_5_codex() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5-codex", + HashMap::from([ + (ReasoningEffort::Minimal, ReasoningEffort::Low), + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + (ReasoningEffort::XHigh, ReasoningEffort::High), + (ReasoningEffort::None, ReasoningEffort::Low), + (ReasoningEffort::Low, ReasoningEffort::Low), + ]), + )), show_in_picker: false, supported_in_api: true, } @@ -313,7 +341,17 @@ fn gpt_5_codex_mini() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5-codex-mini", + HashMap::from([ + (ReasoningEffort::None, ReasoningEffort::Medium), + (ReasoningEffort::XHigh, ReasoningEffort::High), + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::Low, ReasoningEffort::Medium), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + (ReasoningEffort::Minimal, ReasoningEffort::Medium), + ]), + )), show_in_picker: false, supported_in_api: true, } @@ -341,7 +379,17 @@ fn gpt_5_1_codex() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5.1-codex", + HashMap::from([ + (ReasoningEffort::Minimal, ReasoningEffort::Low), + (ReasoningEffort::Low, ReasoningEffort::Low), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + (ReasoningEffort::None, ReasoningEffort::Low), + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::XHigh, ReasoningEffort::High), + ]), + )), show_in_picker: false, supported_in_api: true, } @@ -373,7 +421,17 @@ fn gpt_5() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5", + HashMap::from([ + (ReasoningEffort::XHigh, ReasoningEffort::High), + (ReasoningEffort::Minimal, ReasoningEffort::Minimal), + (ReasoningEffort::Low, ReasoningEffort::Low), + (ReasoningEffort::None, ReasoningEffort::Minimal), + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + ]), + )), show_in_picker: false, supported_in_api: true, } @@ -401,27 +459,37 @@ fn gpt_5_1() -> ModelPreset { ), ], is_default: false, - upgrade: Some(gpt52_codex_upgrade()), + upgrade: Some(gpt52_codex_upgrade( + "gpt-5.1", + HashMap::from([ + (ReasoningEffort::None, ReasoningEffort::Low), + (ReasoningEffort::Medium, ReasoningEffort::Medium), + (ReasoningEffort::High, ReasoningEffort::High), + (ReasoningEffort::XHigh, ReasoningEffort::High), + (ReasoningEffort::Low, ReasoningEffort::Low), + (ReasoningEffort::Minimal, ReasoningEffort::Low), + ]), + )), show_in_picker: false, supported_in_api: true, } } -fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade { - codex_protocol::openai_models::ModelUpgrade { +fn gpt52_codex_upgrade( + migration_config_key: &str, + reasoning_effort_mapping: HashMap, +) -> ModelUpgrade { + ModelUpgrade { id: "gpt-5.2-codex".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - upgrade_copy: Some( - "Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work." - .to_string(), - ), + reasoning_effort_mapping: Some(reasoning_effort_mapping), + migration_config_key: migration_config_key.to_string(), + model_link: None, + upgrade_copy: None, migration_markdown: Some( indoc! {r#" **Codex just got an upgrade. Introducing {model_to}.** - Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about {model_to} at https://openai.com/index/introducing-gpt-5-2-codex + Codex is now powered by {model_to}, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about {model_to} at https://openai.com/index/introducing-gpt-5-2-codex You can continue using {model_from} if you prefer. "#} diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap index 905925709e..d2676235a2 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap @@ -6,11 +6,11 @@ expression: popup Access legacy models by running codex -m or in your config.toml › 1. gpt-5.2-codex (default) Latest frontier agentic coding model. - 2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast - reasoning. - 3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less - capable. - 4. gpt-5.2 Latest frontier model with improvements across + 2. gpt-5.2 Latest frontier model with improvements across knowledge, reasoning and coding + 3. gpt-5.1-codex-max Codex-optimized flagship for deep and fast + reasoning. + 4. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less + capable. Press enter to select reasoning effort, or esc to dismiss. diff --git a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap index 190f9c2933..27479f97f0 100644 --- a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap @@ -6,11 +6,11 @@ expression: popup Access legacy models by running codex -m or in your config.toml › 1. gpt-5.2-codex (default) Latest frontier agentic coding model. - 2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast - reasoning. - 3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less - capable. - 4. gpt-5.2 Latest frontier model with improvements across + 2. gpt-5.2 Latest frontier model with improvements across knowledge, reasoning and coding + 3. gpt-5.1-codex-max Codex-optimized flagship for deep and fast + reasoning. + 4. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less + capable. Press enter to select reasoning effort, or esc to dismiss.