fix: typos in model picker (#6859)

# External (non-OpenAI) Pull Request Requirements

Before opening this Pull Request, please read the dedicated
"Contributing" markdown file or your PR may be closed:
https://github.com/openai/codex/blob/main/docs/contributing.md

If your PR conforms to our contribution guidelines, replace this text
with a detailed and high quality description of your changes.

Include a link to a bug report or enhancement request.
This commit is contained in:
Ahmed Ibrahim
2025-11-18 22:29:02 -08:00
committed by GitHub
parent 030d1d5b1c
commit 793063070b
33 changed files with 563 additions and 180 deletions

View File

@@ -2,13 +2,11 @@
source: tui/src/chatwidget/tests.rs
expression: popup
---
Select Reasoning Level for gpt-5.1-codex
Select Reasoning Level for arcticfox
1. Low Fastest responses with limited reasoning
2. Medium (default) Dynamically adjusts reasoning based on the task
3. High (current) Maximizes reasoning depth for complex or ambiguous
problems
⚠ High reasoning effort can quickly consume Plus plan
rate limits.
1. Low Fast responses with lighter reasoning
2. Medium (default) Balances speed and reasoning depth for everyday tasks
3. High (current) Maximizes reasoning depth for complex problems
4. Extra high Extra high reasoning depth for complex problems
Press enter to confirm or esc to go back

View File

@@ -0,0 +1,16 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 1548
expression: popup
---
Select Reasoning Level for arcticfox
1. Low Fast responses with lighter reasoning
2. Medium (default) Balances speed and reasoning depth for everyday
tasks
3. High Maximizes reasoning depth for complex problems
4. Extra high (current) Extra high reasoning depth for complex problems
⚠ Extra high reasoning effort can quickly consume
Plus plan rate limits.
Press enter to confirm or esc to go back

View File

@@ -1526,19 +1526,59 @@ fn startup_prompts_for_windows_sandbox_when_agent_requested() {
fn model_reasoning_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "gpt-5.1-codex".to_string();
chat.config.model = "arcticfox".to_string();
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High);
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.model == "gpt-5.1-codex")
.expect("gpt-5.1-codex preset");
.find(|preset| preset.model == "arcticfox")
.expect("arcticfox preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup", popup);
}
#[test]
fn model_reasoning_selection_popup_extra_high_warning_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "arcticfox".to_string();
chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::XHigh);
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.model == "arcticfox")
.expect("arcticfox preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("model_reasoning_selection_popup_extra_high_warning", popup);
}
#[test]
fn reasoning_popup_shows_extra_high_with_space() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual();
chat.config.model = "arcticfox".to_string();
let preset = builtin_model_presets(None)
.into_iter()
.find(|preset| preset.model == "arcticfox")
.expect("arcticfox preset");
chat.open_reasoning_popup(preset);
let popup = render_bottom_popup(&chat, 120);
assert!(
popup.contains("Extra high"),
"expected popup to include 'Extra high'; popup: {popup}"
);
assert!(
!popup.contains("Extrahigh"),
"expected popup not to include 'Extrahigh'; popup: {popup}"
);
}
#[test]
fn single_reasoning_option_skips_selection() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
@@ -1556,6 +1596,7 @@ fn single_reasoning_option_skips_selection() {
supported_reasoning_efforts: &SINGLE_EFFORT,
is_default: false,
upgrade: None,
show_in_picker: true,
};
chat.open_reasoning_popup(preset);