Enable model upgrade popup even when selected model is no longer in picker (#8802)

With `config.toml`:
```
model = "gpt-5.1-codex"
```
(where `gpt-5.1-codex` has `show_in_picker: false` in
[`model_presets.rs`](https://github.com/openai/codex/blob/main/codex-rs/core/src/models_manager/model_presets.rs);
this happens if the user hasn't used codex in a while so they didn't see
the popup before their model was changed to `show_in_picker: false`)

The upgrade picker used to not show (because `gpt-5.1-codex` was
filtered out of the model list in code). Now, the filtering is done
downstream in tui and app-server, so the model upgrade popup shows:

<img width="1503" height="227" alt="Screenshot 2026-01-06 at 5 04 37 PM"
src="https://github.com/user-attachments/assets/26144cc2-0b3f-4674-ac17-e476781ec548"
/>
This commit is contained in:
charley-oai
2026-01-06 19:32:27 -08:00
committed by GitHub
parent 8b4d27dfcd
commit 3389465c8d
16 changed files with 535 additions and 32 deletions

View File

@@ -42,7 +42,18 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
}
fn expected_models_for_api_key() -> Vec<ModelPreset> {
vec![gpt_5_1_codex_max(), gpt_5_1_codex_mini(), gpt_5_2()]
vec![
gpt_5_1_codex_max(),
gpt_5_1_codex_mini(),
gpt_5_2(),
bengalfox(),
boomslang(),
gpt_5_codex(),
gpt_5_codex_mini(),
gpt_5_1_codex(),
gpt_5(),
gpt_5_1(),
]
}
fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
@@ -53,6 +64,13 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
gpt_5_1_codex_max,
gpt_5_1_codex_mini(),
gpt_5_2(),
bengalfox(),
boomslang(),
gpt_5_codex(),
gpt_5_codex_mini(),
gpt_5_1_codex(),
gpt_5(),
gpt_5_1(),
]
}
@@ -168,7 +186,7 @@ fn gpt_5_2() -> ModelPreset {
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning for complex problems",
"Extra high reasoning depth for complex problems",
),
],
is_default: false,
@@ -178,6 +196,210 @@ fn gpt_5_2() -> ModelPreset {
}
}
fn bengalfox() -> ModelPreset {
ModelPreset {
id: "bengalfox".to_string(),
model: "bengalfox".to_string(),
display_name: "bengalfox".to_string(),
description: "bengalfox".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
}
}
fn boomslang() -> ModelPreset {
ModelPreset {
id: "boomslang".to_string(),
model: "boomslang".to_string(),
display_name: "boomslang".to_string(),
description: "boomslang".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
}
}
fn gpt_5_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}
fn gpt_5_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
model: "gpt-5-codex-mini".to_string(),
display_name: "gpt-5-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}
fn gpt_5_1_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}
fn gpt_5() -> ModelPreset {
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Minimal,
"Fastest responses with little reasoning",
),
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}
fn gpt_5_1() -> ModelPreset {
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}
fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
codex_protocol::openai_models::ModelUpgrade {
id: "gpt-5.2-codex".to_string(),

View File

@@ -381,12 +381,11 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
assert_eq!(selected, "gpt-5.2-codex");
let available = manager.list_models(&config).await;
assert!(
available
.iter()
.all(|model| model.model != "codex-auto-balanced"),
"hidden models should not appear in the picker list"
);
let hidden = available
.iter()
.find(|model| model.model == "codex-auto-balanced")
.expect("hidden remote model should be listed");
assert!(!hidden.show_in_picker, "hidden models should remain hidden");
Ok(())
}