feat: model picker (#8209)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
Ahmed Ibrahim
2025-12-17 16:12:35 -08:00
committed by GitHub
parent 25ecd0c2e4
commit 774bd9e432
14 changed files with 222 additions and 110 deletions

View File

@@ -762,7 +762,7 @@ async fn includes_configured_effort_in_request() -> anyhow::Result<()> {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_default_effort_in_request() -> anyhow::Result<()> {
async fn includes_no_effort_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;
@@ -791,7 +791,7 @@ async fn includes_default_effort_in_request() -> anyhow::Result<()> {
.get("reasoning")
.and_then(|t| t.get("effort"))
.and_then(|v| v.as_str()),
Some("medium")
None
);
Ok(())

View File

@@ -276,7 +276,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@@ -346,7 +345,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@@ -407,7 +405,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@@ -488,7 +485,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,
@@ -569,7 +565,6 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
"tool_choice": "auto",
"parallel_tool_calls": false,
"reasoning": {
"effort": "medium",
"summary": "auto"
},
"store": false,

View File

@@ -52,8 +52,11 @@ fn expected_models_for_api_key() -> Vec<ModelPreset> {
}
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
let mut gpt_5_1_codex_max = gpt_5_1_codex_max();
gpt_5_1_codex_max.is_default = false;
vec![
gpt_5_1_codex_max(),
caribou(),
gpt_5_1_codex_max,
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_2(),
@@ -61,6 +64,38 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
]
}
fn caribou() -> ModelPreset {
ModelPreset {
id: "caribou".to_string(),
model: "caribou".to_string(),
display_name: "caribou".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: false,
}
}
fn gpt_5_1_codex_max() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
@@ -87,8 +122,9 @@ fn gpt_5_1_codex_max() -> ModelPreset {
),
],
is_default: true,
upgrade: None,
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
@@ -114,15 +150,9 @@ fn gpt_5_1_codex() -> ModelPreset {
),
],
is_default: false,
upgrade: Some(gpt_5_1_codex_max_upgrade(
"gpt-5.1-codex",
vec![
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
],
)),
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
@@ -144,11 +174,9 @@ fn gpt_5_1_codex_mini() -> ModelPreset {
),
],
is_default: false,
upgrade: Some(gpt_5_1_codex_max_upgrade(
"gpt-5.1-codex-mini",
vec![ReasoningEffort::Medium, ReasoningEffort::High],
)),
upgrade: None,
show_in_picker: true,
supported_in_api: true,
}
}
@@ -180,8 +208,9 @@ fn gpt_5_2() -> ModelPreset {
),
],
is_default: false,
upgrade: None,
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
@@ -207,59 +236,17 @@ fn gpt_5_1() -> ModelPreset {
),
],
is_default: false,
upgrade: Some(gpt_5_1_codex_max_upgrade(
"gpt-5.1",
vec![
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
],
)),
upgrade: Some(caribou_upgrade()),
show_in_picker: true,
supported_in_api: true,
}
}
fn gpt_5_1_codex_max_upgrade(
migration_config_key: &str,
supported_efforts: Vec<ReasoningEffort>,
) -> codex_protocol::openai_models::ModelUpgrade {
use std::collections::HashMap;
fn nearest_effort(effort: ReasoningEffort, supported: &[ReasoningEffort]) -> ReasoningEffort {
supported
.iter()
.min_by_key(|candidate| (effort_rank(effort) - effort_rank(**candidate)).abs())
.copied()
.unwrap_or(ReasoningEffort::Low)
}
fn effort_rank(effort: ReasoningEffort) -> i32 {
match effort {
ReasoningEffort::None => 0,
ReasoningEffort::Minimal => 1,
ReasoningEffort::Low => 2,
ReasoningEffort::Medium => 3,
ReasoningEffort::High => 4,
ReasoningEffort::XHigh => 5,
}
}
let mut mapping = HashMap::new();
for effort in [
ReasoningEffort::None,
ReasoningEffort::Minimal,
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
ReasoningEffort::XHigh,
] {
mapping.insert(effort, nearest_effort(effort, &supported_efforts));
}
fn caribou_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
codex_protocol::openai_models::ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
reasoning_effort_mapping: Some(mapping),
migration_config_key: migration_config_key.to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
}
}

View File

@@ -388,7 +388,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
);
let selected = manager.get_model(&None, &config).await;
assert_eq!(selected, "gpt-5.1-codex-max");
assert_eq!(selected, "caribou");
let available = manager.list_models(&config).await;
assert!(