NUX for gpt5.1 (#6561)

- Introducing a screen to inform users of model changes. 
- Config name is being passed to be able to reuse this component in the
future for future models
This commit is contained in:
Ahmed Ibrahim
2025-11-12 17:24:21 -08:00
committed by GitHub
parent 964220ac94
commit e63ab0dd65
12 changed files with 512 additions and 1 deletions

View File

@@ -27,6 +27,8 @@ pub struct ModelPreset {
pub supported_reasoning_efforts: &'static [ReasoningEffortPreset],
/// Whether this is the default model for new users.
pub is_default: bool,
/// recommended upgrade model
pub recommended_upgrade_model: Option<&'static str>,
}
const PRESETS: &[ModelPreset] = &[
@@ -51,6 +53,7 @@ const PRESETS: &[ModelPreset] = &[
},
],
is_default: true,
recommended_upgrade_model: Some("gpt-5.1-codex"),
},
ModelPreset {
id: "gpt-5-codex-mini",
@@ -69,6 +72,7 @@ const PRESETS: &[ModelPreset] = &[
},
],
is_default: false,
recommended_upgrade_model: Some("gpt-5.1-codex-mini"),
},
ModelPreset {
id: "gpt-5",
@@ -95,6 +99,7 @@ const PRESETS: &[ModelPreset] = &[
},
],
is_default: false,
recommended_upgrade_model: Some("gpt-5.1"),
},
];
@@ -107,6 +112,10 @@ pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
.collect()
}
pub fn all_model_presets() -> &'static [ModelPreset] {
PRESETS
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -29,6 +29,8 @@ pub enum ConfigEdit {
SetNoticeHideRateLimitModelNudge(bool),
/// Toggle the Windows onboarding acknowledgement flag.
SetWindowsWslSetupAcknowledged(bool),
/// Toggle the model migration prompt acknowledgement flag.
SetNoticeHideModelMigrationPrompt(String, bool),
/// Replace the entire `[mcp_servers]` table.
ReplaceMcpServers(BTreeMap<String, McpServerConfig>),
/// Set trust_level = "trusted" under `[projects."<path>"]`,
@@ -253,6 +255,13 @@ impl ConfigDocument {
&[Notice::TABLE_KEY, "hide_rate_limit_model_nudge"],
value(*acknowledged),
)),
ConfigEdit::SetNoticeHideModelMigrationPrompt(migration_config, acknowledged) => {
Ok(self.write_value(
Scope::Global,
&[Notice::TABLE_KEY, migration_config.as_str()],
value(*acknowledged),
))
}
ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged) => Ok(self.write_value(
Scope::Global,
&["windows_wsl_setup_acknowledged"],
@@ -499,6 +508,15 @@ impl ConfigEditsBuilder {
self
}
pub fn set_hide_model_migration_prompt(mut self, model: &str, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetNoticeHideModelMigrationPrompt(
model.to_string(),
acknowledged,
));
self
}
pub fn set_windows_wsl_setup_acknowledged(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged));
@@ -770,6 +788,35 @@ existing = "value"
let expected = r#"[notice]
existing = "value"
hide_rate_limit_model_nudge = true
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_set_hide_gpt5_1_migration_prompt_preserves_table() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[notice]
existing = "value"
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::SetNoticeHideModelMigrationPrompt(
"hide_gpt5_1_migration_prompt".to_string(),
true,
)],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[notice]
existing = "value"
hide_gpt5_1_migration_prompt = true
"#;
assert_eq!(contents, expected);
}

View File

@@ -362,6 +362,8 @@ pub struct Notice {
pub hide_world_writable_warning: Option<bool>,
/// Tracks whether the user opted out of the rate limit model switch reminder.
pub hide_rate_limit_model_nudge: Option<bool>,
/// Tracks whether the user has seen the model migration prompt
pub hide_gpt5_1_migration_prompt: Option<bool>,
}
impl Notice {

View File

@@ -7,6 +7,8 @@ use crate::diff_render::DiffSummary;
use crate::exec_command::strip_bash_lc_and_escape;
use crate::file_search::FileSearchManager;
use crate::history_cell::HistoryCell;
use crate::model_migration::ModelMigrationOutcome;
use crate::model_migration::run_model_migration_prompt;
use crate::pager_overlay::Overlay;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::Renderable;
@@ -15,6 +17,7 @@ use crate::tui;
use crate::tui::TuiEvent;
use crate::update_action::UpdateAction;
use codex_ansi_escape::ansi_escape_line;
use codex_common::model_presets::all_model_presets;
use codex_core::AuthManager;
use codex_core::ConversationManager;
use codex_core::config::Config;
@@ -50,6 +53,64 @@ pub struct AppExitInfo {
pub update_action: Option<UpdateAction>,
}
fn should_show_model_migration_prompt(
current_model: &str,
target_model: &str,
hide_prompt_flag: Option<bool>,
) -> bool {
if target_model == current_model || hide_prompt_flag.unwrap_or(false) {
return false;
}
all_model_presets()
.iter()
.filter(|preset| preset.recommended_upgrade_model.is_some())
.any(|preset| preset.model == current_model)
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
app_event_tx: &AppEventSender,
) -> Option<AppExitInfo> {
let target_model = all_model_presets()
.iter()
.find(|preset| preset.model == config.model)
.and_then(|preset| preset.recommended_upgrade_model)
.unwrap_or(&config.model)
.to_string();
let hide_prompt_flag = config.notices.hide_gpt5_1_migration_prompt;
if !should_show_model_migration_prompt(&config.model, &target_model, hide_prompt_flag) {
return None;
}
match run_model_migration_prompt(tui, &target_model).await {
ModelMigrationOutcome::Accepted => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
migration_config: "hide_gpt5_1_migration_prompt".to_string(),
});
config.model = target_model.clone();
if let Some(family) = find_family_for_model(&target_model) {
config.model_family = family;
}
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
app_event_tx.send(AppEvent::PersistModelSelection {
model: target_model,
effort: config.model_reasoning_effort,
});
}
ModelMigrationOutcome::Exit => {
return Some(AppExitInfo {
token_usage: TokenUsage::default(),
conversation_id: None,
update_action: None,
});
}
}
None
}
pub(crate) struct App {
pub(crate) server: Arc<ConversationManager>,
pub(crate) app_event_tx: AppEventSender,
@@ -89,7 +150,7 @@ impl App {
pub async fn run(
tui: &mut tui::Tui,
auth_manager: Arc<AuthManager>,
config: Config,
mut config: Config,
active_profile: Option<String>,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
@@ -100,6 +161,12 @@ impl App {
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
let exit_info =
handle_model_migration_prompt_if_needed(tui, &mut config, &app_event_tx).await;
if let Some(exit_info) = exit_info {
return Ok(exit_info);
}
let conversation_manager = Arc::new(ConversationManager::new(
auth_manager.clone(),
SessionSource::Cli,
@@ -547,6 +614,18 @@ impl App {
));
}
}
AppEvent::PersistModelMigrationPromptAcknowledged { migration_config } => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.set_hide_model_migration_prompt(&migration_config, true)
.apply()
.await
{
tracing::error!(error = %err, "failed to persist model migration prompt acknowledgement");
self.chat_widget.add_error_message(format!(
"Failed to save model migration prompt preference: {err}"
));
}
}
AppEvent::OpenApprovalsPopup => {
self.chat_widget.open_approvals_popup();
}
@@ -757,6 +836,38 @@ mod tests {
}
}
#[test]
fn model_migration_prompt_only_shows_for_deprecated_models() {
assert!(should_show_model_migration_prompt("gpt-5", "gpt-5.1", None));
assert!(should_show_model_migration_prompt(
"gpt-5-codex",
"gpt-5.1-codex",
None
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex-mini",
"gpt-5.1-codex-mini",
None
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex",
None
));
}
#[test]
fn model_migration_prompt_respects_hide_flag_and_self_target() {
assert!(!should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
Some(true)
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1", "gpt-5.1", None
));
}
#[test]
fn update_reasoning_effort_updates_config() {
let mut app = make_test_app();

View File

@@ -117,6 +117,11 @@ pub(crate) enum AppEvent {
/// Persist the acknowledgement flag for the rate limit switch prompt.
PersistRateLimitSwitchPromptHidden,
/// Persist the acknowledgement flag for the model migration prompt.
PersistModelMigrationPromptAcknowledged {
migration_config: String,
},
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
SkipNextWorldWritableScan,

View File

@@ -1533,6 +1533,7 @@ fn single_reasoning_option_skips_selection() {
default_reasoning_effort: ReasoningEffortConfig::High,
supported_reasoning_efforts: &SINGLE_EFFORT,
is_default: false,
recommended_upgrade_model: None,
};
chat.open_reasoning_popup(preset);

View File

@@ -54,6 +54,7 @@ pub mod live_wrap;
mod markdown;
mod markdown_render;
mod markdown_stream;
mod model_migration;
pub mod onboarding;
mod pager_overlay;
pub mod public_widgets;

View File

@@ -0,0 +1,275 @@
use crate::render::Insets;
use crate::render::renderable::ColumnRenderable;
use crate::render::renderable::Renderable;
use crate::render::renderable::RenderableExt as _;
use crate::tui::FrameRequester;
use crate::tui::Tui;
use crate::tui::TuiEvent;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::KeyModifiers;
use ratatui::prelude::Stylize as _;
use ratatui::prelude::Widget;
use ratatui::text::Line;
use ratatui::widgets::Clear;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use tokio_stream::StreamExt;
/// Outcome of the migration prompt.
pub(crate) enum ModelMigrationOutcome {
Accepted,
Exit,
}
pub(crate) async fn run_model_migration_prompt(
tui: &mut Tui,
target_model: &str,
) -> ModelMigrationOutcome {
// Render the prompt on the terminal's alternate screen so exiting or cancelling
// does not leave a large blank region in the normal scrollback. This does not
// change the prompt's appearance only where it is drawn.
struct AltScreenGuard<'a> {
tui: &'a mut Tui,
}
impl<'a> AltScreenGuard<'a> {
fn enter(tui: &'a mut Tui) -> Self {
let _ = tui.enter_alt_screen();
Self { tui }
}
}
impl Drop for AltScreenGuard<'_> {
fn drop(&mut self) {
let _ = self.tui.leave_alt_screen();
}
}
let alt = AltScreenGuard::enter(tui);
let mut screen = ModelMigrationScreen::new(alt.tui.frame_requester(), target_model);
let _ = alt.tui.draw(u16::MAX, |frame| {
frame.render_widget_ref(&screen, frame.area());
});
let events = alt.tui.event_stream();
tokio::pin!(events);
while !screen.is_done() {
if let Some(event) = events.next().await {
match event {
TuiEvent::Key(key_event) => screen.handle_key(key_event),
TuiEvent::Paste(_) => {}
TuiEvent::Draw => {
let _ = alt.tui.draw(u16::MAX, |frame| {
frame.render_widget_ref(&screen, frame.area());
});
}
}
} else {
screen.accept();
break;
}
}
screen.outcome()
}
struct ModelMigrationScreen {
request_frame: FrameRequester,
target_model: String,
done: bool,
should_exit: bool,
}
impl ModelMigrationScreen {
fn new(request_frame: FrameRequester, target_model: &str) -> Self {
Self {
request_frame,
target_model: target_model.to_string(),
done: false,
should_exit: false,
}
}
fn accept(&mut self) {
self.done = true;
self.request_frame.schedule_frame();
}
fn handle_key(&mut self, key_event: KeyEvent) {
if key_event.kind == KeyEventKind::Release {
return;
}
if key_event.modifiers.contains(KeyModifiers::CONTROL)
&& matches!(key_event.code, KeyCode::Char('c') | KeyCode::Char('d'))
{
self.should_exit = true;
self.done = true;
self.request_frame.schedule_frame();
return;
}
if matches!(key_event.code, KeyCode::Esc | KeyCode::Enter) {
self.accept();
}
}
fn is_done(&self) -> bool {
self.done
}
fn outcome(&self) -> ModelMigrationOutcome {
if self.should_exit {
ModelMigrationOutcome::Exit
} else {
ModelMigrationOutcome::Accepted
}
}
}
impl WidgetRef for &ModelMigrationScreen {
fn render_ref(&self, area: ratatui::layout::Rect, buf: &mut ratatui::buffer::Buffer) {
Clear.render(area, buf);
let mut column = ColumnRenderable::new();
column.push("");
column.push(Line::from(vec![
"> ".into(),
"Introducing ".bold(),
"our ".bold(),
self.target_model.clone().bold(),
" models".bold(),
]));
column.push(Line::from(""));
column.push(
Paragraph::new(Line::from(
"We've upgraded our family of models supported in Codex to gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.",
))
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
column.push(
Paragraph::new(Line::from(
"You can continue using legacy models by specifying them directly with the -m option or in your config.toml.",
))
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
column.push(
Line::from(vec![
"Learn more at ".into(),
"www.openai.com/index/gpt-5-1".cyan().underlined(),
".".into(),
])
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
column.push(
Line::from(vec!["Press enter to continue".dim()]).inset(Insets::tlbr(0, 2, 0, 0)),
);
column.render(area, buf);
}
}
#[cfg(test)]
mod tests {
use super::ModelMigrationScreen;
use crate::custom_terminal::Terminal;
use crate::test_backend::VT100Backend;
use crate::tui::FrameRequester;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use insta::assert_snapshot;
use ratatui::layout::Rect;
#[test]
fn prompt_snapshot() {
let width: u16 = 60;
let height: u16 = 12;
let backend = VT100Backend::new(width, height);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, width, height));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy(), "gpt-5.1-codex");
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt", terminal.backend());
}
#[test]
fn prompt_snapshot_gpt5_family() {
let backend = VT100Backend::new(65, 12);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 65, 12));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy(), "gpt-5.1");
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt_gpt5_family", terminal.backend());
}
#[test]
fn prompt_snapshot_gpt5_codex() {
let backend = VT100Backend::new(60, 12);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 60, 12));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy(), "gpt-5.1-codex");
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt_gpt5_codex", terminal.backend());
}
#[test]
fn prompt_snapshot_gpt5_codex_mini() {
let backend = VT100Backend::new(60, 12);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 60, 12));
let screen = ModelMigrationScreen::new(FrameRequester::test_dummy(), "gpt-5.1-codex-mini");
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt_gpt5_codex_mini", terminal.backend());
}
#[test]
fn escape_key_accepts_prompt() {
let screen_target = "gpt-5.1-codex";
let mut screen = ModelMigrationScreen::new(FrameRequester::test_dummy(), screen_target);
// Simulate pressing Escape
screen.handle_key(KeyEvent::new(
KeyCode::Esc,
crossterm::event::KeyModifiers::NONE,
));
assert!(screen.is_done());
// Esc should not be treated as Exit it accepts like Enter.
assert!(matches!(
screen.outcome(),
super::ModelMigrationOutcome::Accepted
));
}
}

View File

@@ -0,0 +1,15 @@
---
source: tui/src/model_migration.rs
expression: terminal.backend()
---
> Introducing our gpt-5.1-codex models
We've upgraded our family of models supported in Codex to
gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.
You can continue using legacy models by specifying them
directly with the -m option or in your config.toml.
Learn more at www.openai.com/index/gpt-5-1.
Press enter to continue

View File

@@ -0,0 +1,15 @@
---
source: tui/src/model_migration.rs
expression: terminal.backend()
---
> Introducing our gpt-5.1-codex models
We've upgraded our family of models supported in Codex to
gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.
You can continue using legacy models by specifying them
directly with the -m option or in your config.toml.
Learn more at www.openai.com/index/gpt-5-1.
Press enter to continue

View File

@@ -0,0 +1,15 @@
---
source: tui/src/model_migration.rs
expression: terminal.backend()
---
> Introducing our gpt-5.1-codex-mini models
We've upgraded our family of models supported in Codex to
gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.
You can continue using legacy models by specifying them
directly with the -m option or in your config.toml.
Learn more at www.openai.com/index/gpt-5-1.
Press enter to continue

View File

@@ -0,0 +1,15 @@
---
source: tui/src/model_migration.rs
expression: terminal.backend()
---
> Introducing our gpt-5.1 models
We've upgraded our family of models supported in Codex to
gpt-5.1, gpt-5.1-codex and gpt-5.1-codex-mini.
You can continue using legacy models by specifying them
directly with the -m option or in your config.toml.
Learn more at www.openai.com/index/gpt-5-1.
Press enter to continue