This commit is contained in:
Ahmed Ibrahim
2026-02-09 15:22:29 -08:00
parent 481145e959
commit 88d9bf2cb3
2 changed files with 157 additions and 743 deletions

View File

@@ -1,135 +1,137 @@
use crate::auth::AuthMode;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::default_input_modalities;
use indoc::indoc;
use once_cell::sync::Lazy;
use std::collections::HashMap;
pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt";
pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str =
"hide_gpt-5.1-codex-max_migration_prompt";
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
static BUILTIN_PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(builtin_model_presets_from_models_json);
fn builtin_model_presets_from_models_json() -> Vec<ModelPreset> {
let mut presets = serde_json::from_str::<ModelsResponse>(include_str!("../../models.json"))
.expect("bundled models.json must parse")
.models
.into_iter()
.map(ModelPreset::from)
.collect::<Vec<_>>();
for preset in &mut presets {
override_builtin_preset_properties(preset);
}
presets.extend(internal_model_presets());
presets
}
fn override_builtin_preset_properties(preset: &mut ModelPreset) {
match preset.model.as_str() {
"gpt-5.2-codex" => {
preset.is_default = true;
preset.supports_personality = true;
}
"gpt-5.1-codex-max" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5.1-codex-max",
HashMap::from([
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::XHigh, ReasoningEffort::XHigh),
]),
));
}
"gpt-5.1-codex-mini" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5.1-codex-mini",
HashMap::from([
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Medium),
(ReasoningEffort::Low, ReasoningEffort::Medium),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
]),
));
}
"gpt-5.2" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5.2",
HashMap::from([
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::XHigh),
]),
));
}
"gpt-5.1-codex" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5.1-codex",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
]),
));
}
"gpt-5-codex" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5-codex",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
]),
));
}
"gpt-5" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5",
HashMap::from([
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Minimal),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::None, ReasoningEffort::Minimal),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
]),
));
}
"gpt-5-codex-mini" => {
preset.upgrade = Some(gpt_52_codex_upgrade(
"gpt-5-codex-mini",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Low, ReasoningEffort::Medium),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Medium),
]),
));
}
_ => {}
}
}
fn internal_model_presets() -> Vec<ModelPreset> {
vec![
ModelPreset {
id: "gpt-5.2-codex".to_string(),
model: "gpt-5.2-codex".to_string(),
display_name: "gpt-5.2-codex".to_string(),
description: "Latest frontier agentic coding model.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: true,
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
display_name: "gpt-5.1-codex-max".to_string(),
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description: "Latest frontier model with improvements across knowledge, reasoning and coding".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "bengalfox".to_string(),
model: "bengalfox".to_string(),
@@ -192,151 +194,17 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
supported_in_api: true,
input_modalities: default_input_modalities(),
},
// Deprecated models.
ModelPreset {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
model: "gpt-5-codex-mini".to_string(),
display_name: "gpt-5-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fastest responses with limited reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Dynamically adjusts reasoning based on the task".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems"
.to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Minimal,
description: "Fastest responses with little reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(),
},
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt_52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
},
]
});
}
fn gpt_52_codex_upgrade() -> ModelUpgrade {
fn gpt_52_codex_upgrade(
migration_config_key: &str,
reasoning_effort_mapping: HashMap<ReasoningEffort, ReasoningEffort>,
) -> ModelUpgrade {
ModelUpgrade {
id: "gpt-5.2-codex".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "gpt-5.2-codex".to_string(),
reasoning_effort_mapping: Some(reasoning_effort_mapping),
migration_config_key: migration_config_key.to_string(),
model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()),
upgrade_copy: Some(
"Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work."
@@ -356,12 +224,12 @@ fn gpt_52_codex_upgrade() -> ModelUpgrade {
}
pub(super) fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS.iter().cloned().collect()
BUILTIN_PRESETS.iter().cloned().collect()
}
#[cfg(any(test, feature = "test-support"))]
pub fn all_model_presets() -> &'static Vec<ModelPreset> {
&PRESETS
&BUILTIN_PRESETS
}
#[cfg(test)]
@@ -370,7 +238,10 @@ mod tests {
#[test]
fn only_one_default_model_is_configured() {
let default_models = PRESETS.iter().filter(|preset| preset.is_default).count();
assert!(default_models == 1);
let default_models = builtin_model_presets(None)
.iter()
.filter(|preset| preset.is_default)
.count();
assert_eq!(default_models, 1);
}
}

View File

@@ -3,15 +3,8 @@ use codex_core::CodexAuth;
use codex_core::ThreadManager;
use codex_core::built_in_model_providers;
use codex_core::models_manager::manager::RefreshStrategy;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::default_input_modalities;
use core_test_support::load_default_config_for_test;
use indoc::indoc;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use tempfile::tempdir;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -27,7 +20,11 @@ async fn list_models_returns_api_key_models() -> Result<()> {
.await;
let expected_models = expected_models_for_api_key();
assert_eq!(expected_models, models);
let actual_models = models
.iter()
.map(|model| model.model.as_str())
.collect::<Vec<_>>();
assert_eq!(expected_models, actual_models);
Ok(())
}
@@ -45,485 +42,31 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
.await;
let expected_models = expected_models_for_chatgpt();
assert_eq!(expected_models, models);
let actual_models = models
.iter()
.map(|model| model.model.as_str())
.collect::<Vec<_>>();
assert_eq!(expected_models, actual_models);
Ok(())
}
fn expected_models_for_api_key() -> Vec<ModelPreset> {
fn expected_models_for_api_key() -> Vec<&'static str> {
vec![
gpt_52_codex(),
gpt_5_2(),
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
gpt_5_1(),
gpt_5_codex(),
gpt_5(),
gpt_5_codex_mini(),
bengalfox(),
boomslang(),
"gpt-5.2-codex",
"gpt-5.2",
"gpt-5.1-codex-max",
"gpt-5.1-codex",
"gpt-5.1-codex-mini",
"gpt-5.1",
"gpt-5-codex",
"gpt-5",
"gpt-5-codex-mini",
"bengalfox",
"boomslang",
]
}
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
fn expected_models_for_chatgpt() -> Vec<&'static str> {
expected_models_for_api_key()
}
fn gpt_52_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.2-codex".to_string(),
model: "gpt-5.2-codex".to_string(),
display_name: "gpt-5.2-codex".to_string(),
description: "Latest frontier agentic coding model.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1_codex_max() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
display_name: "gpt-5.1-codex-max".to_string(),
description: "Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex-max",
HashMap::from([
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::XHigh, ReasoningEffort::XHigh),
]),
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
model: "gpt-5.1-codex-mini".to_string(),
display_name: "gpt-5.1-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex-mini",
HashMap::from([
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Medium),
(ReasoningEffort::Low, ReasoningEffort::Medium),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
]),
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_2() -> ModelPreset {
ModelPreset {
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.2",
HashMap::from([
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::XHigh),
]),
)),
show_in_picker: true,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn bengalfox() -> ModelPreset {
ModelPreset {
id: "bengalfox".to_string(),
model: "bengalfox".to_string(),
display_name: "bengalfox".to_string(),
description: "bengalfox".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: true,
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn boomslang() -> ModelPreset {
ModelPreset {
id: "boomslang".to_string(),
model: "boomslang".to_string(),
display_name: "boomslang".to_string(),
description: "boomslang".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5-codex",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
model: "gpt-5-codex-mini".to_string(),
display_name: "gpt-5-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5-codex-mini",
HashMap::from([
(ReasoningEffort::None, ReasoningEffort::Medium),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Low, ReasoningEffort::Medium),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::Minimal, ReasoningEffort::Medium),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1-codex",
HashMap::from([
(ReasoningEffort::Minimal, ReasoningEffort::Low),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5() -> ModelPreset {
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Minimal,
"Fastest responses with little reasoning",
),
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5",
HashMap::from([
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Minimal, ReasoningEffort::Minimal),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::None, ReasoningEffort::Minimal),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt_5_1() -> ModelPreset {
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
supports_personality: false,
is_default: false,
upgrade: Some(gpt52_codex_upgrade(
"gpt-5.1",
HashMap::from([
(ReasoningEffort::None, ReasoningEffort::Low),
(ReasoningEffort::Medium, ReasoningEffort::Medium),
(ReasoningEffort::High, ReasoningEffort::High),
(ReasoningEffort::XHigh, ReasoningEffort::High),
(ReasoningEffort::Low, ReasoningEffort::Low),
(ReasoningEffort::Minimal, ReasoningEffort::Low),
]),
)),
show_in_picker: false,
supported_in_api: true,
input_modalities: default_input_modalities(),
}
}
fn gpt52_codex_upgrade(
migration_config_key: &str,
reasoning_effort_mapping: HashMap<ReasoningEffort, ReasoningEffort>,
) -> ModelUpgrade {
ModelUpgrade {
id: "gpt-5.2-codex".to_string(),
reasoning_effort_mapping: Some(reasoning_effort_mapping),
migration_config_key: migration_config_key.to_string(),
model_link: None,
upgrade_copy: None,
migration_markdown: Some(
indoc! {r#"
**Codex just got an upgrade. Introducing {model_to}.**
Codex is now powered by {model_to}, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about {model_to} at https://openai.com/index/introducing-gpt-5-2-codex
You can continue using {model_from} if you prefer.
"#}
.to_string(),
),
}
}
fn effort(reasoning_effort: ReasoningEffort, description: &str) -> ReasoningEffortPreset {
ReasoningEffortPreset {
effort: reasoning_effort,
description: description.to_string(),
}
}