Compare commits

...

19 Commits

Author SHA1 Message Date
Ahmed Ibrahim
7fe7b7bf06 snap 2026-01-06 15:02:42 -08:00
Ahmed Ibrahim
bf37fe670d tui2 2026-01-05 19:39:16 -08:00
Ahmed Ibrahim
91bc7fc5fd tui2 2026-01-05 19:38:23 -08:00
Ahmed Ibrahim
284dc5c724 models 2026-01-05 13:29:41 -08:00
Ahmed Ibrahim
0f3c232541 tests 2026-01-05 12:47:00 -08:00
Ahmed Ibrahim
706e92fc7e tests 2026-01-05 12:46:01 -08:00
Ahmed Ibrahim
19075571ba Apply reasoning-effort mapping on migration accept 2026-01-05 12:41:30 -08:00
Ahmed Ibrahim
a32c06fc58 Merge branch 'main' into async-tui-1 2026-01-05 12:22:20 -08:00
Ahmed Ibrahim
083d8d856d models 2026-01-05 11:00:17 -08:00
Ahmed Ibrahim
dbd5c6a557 models 2026-01-05 10:54:56 -08:00
Ahmed Ibrahim
0239053ad9 models 2026-01-05 10:50:38 -08:00
Ahmed Ibrahim
5c79085dae models 2026-01-05 10:44:23 -08:00
Ahmed Ibrahim
d7b0997d32 models 2026-01-05 10:10:32 -08:00
Ahmed Ibrahim
88d090a2b7 models 2026-01-05 10:02:44 -08:00
Ahmed Ibrahim
5f56a2328b models 2026-01-05 09:41:38 -08:00
Ahmed Ibrahim
ff5f74b6bf models 2026-01-05 09:22:04 -08:00
Ahmed Ibrahim
a944b0ebf1 files 2026-01-05 08:17:34 -08:00
Ahmed Ibrahim
c982cbd6e1 files 2026-01-04 14:29:05 -08:00
Ahmed Ibrahim
9cd4d895ba async 2026-01-03 22:12:55 -08:00
15 changed files with 2008 additions and 1182 deletions

View File

@@ -10,6 +10,7 @@ use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::sync::RwLock;
use tokio::sync::TryLockError;
use tracing::error;
@@ -39,6 +40,7 @@ pub struct ModelsManager {
// todo(aibrahim) merge available_models and model family creation into one struct
local_models: Vec<ModelPreset>,
remote_models: RwLock<Vec<ModelInfo>>,
refresh_lock: Mutex<()>,
auth_manager: Arc<AuthManager>,
etag: RwLock<Option<String>>,
codex_home: PathBuf,
@@ -53,6 +55,7 @@ impl ModelsManager {
Self {
local_models: builtin_model_presets(auth_manager.get_auth_mode()),
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
refresh_lock: Mutex::new(()),
auth_manager,
etag: RwLock::new(None),
codex_home,
@@ -68,6 +71,7 @@ impl ModelsManager {
Self {
local_models: builtin_model_presets(auth_manager.get_auth_mode()),
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
refresh_lock: Mutex::new(()),
auth_manager,
etag: RwLock::new(None),
codex_home,
@@ -78,6 +82,7 @@ impl ModelsManager {
/// Fetch the latest remote models, using the on-disk cache when still fresh.
pub async fn refresh_available_models_with_cache(&self, config: &Config) -> CoreResult<()> {
let _refresh_guard = self.refresh_lock.lock().await;
if !config.features.enabled(Feature::RemoteModels)
|| self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey)
{
@@ -104,10 +109,23 @@ impl ModelsManager {
let client = ModelsClient::new(transport, api_provider, api_auth);
let client_version = format_client_version_to_whole();
let (models, etag) = client
.list_models(&client_version, HeaderMap::new())
.await
.map_err(map_api_error)?;
let remote_models = tokio::time::timeout(
Duration::from_secs(5),
client.list_models(&client_version, HeaderMap::new()),
)
.await;
let (models, etag) = match remote_models {
Ok(Ok((models, etag))) => (models, etag),
Ok(Err(err)) => {
error!("failed to refresh remote models: {}", map_api_error(err));
return Ok(());
}
Err(_) => {
error!("timed out refreshing remote models after 5s");
return Ok(());
}
};
self.apply_remote_models(models.clone()).await;
*self.etag.write().await = etag.clone();

View File

@@ -10,9 +10,8 @@ use crate::external_editor;
use crate::file_search::FileSearchManager;
use crate::history_cell;
use crate::history_cell::HistoryCell;
use crate::model_migration::ModelMigrationOutcome;
use crate::model_migration::migration_copy_for_models;
use crate::model_migration::run_model_migration_prompt;
use crate::model_migration::StartupModelMigrationAction;
use crate::model_migration::maybe_run_startup_model_migration_prompt;
use crate::pager_overlay::Overlay;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::Renderable;
@@ -28,9 +27,6 @@ use codex_core::config::edit::ConfigEdit;
use codex_core::config::edit::ConfigEditsBuilder;
#[cfg(target_os = "windows")]
use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
@@ -39,8 +35,6 @@ use codex_core::protocol::SessionSource;
use codex_core::protocol::SkillErrorInfo;
use codex_core::protocol::TokenUsage;
use codex_protocol::ConversationId;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use color_eyre::eyre::Result;
use color_eyre::eyre::WrapErr;
@@ -51,7 +45,6 @@ use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use std::collections::BTreeMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
@@ -127,164 +120,6 @@ struct SessionSummary {
resume_command: Option<String>,
}
fn should_show_model_migration_prompt(
current_model: &str,
target_model: &str,
seen_migrations: &BTreeMap<String, String>,
available_models: &[ModelPreset],
) -> bool {
if target_model == current_model {
return false;
}
if let Some(seen_target) = seen_migrations.get(current_model)
&& seen_target == target_model
{
return false;
}
if available_models
.iter()
.any(|preset| preset.model == current_model && preset.upgrade.is_some())
{
return true;
}
if available_models
.iter()
.any(|preset| preset.upgrade.as_ref().map(|u| u.id.as_str()) == Some(target_model))
{
return true;
}
false
}
fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> bool {
match migration_config_key {
HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => config
.notices
.hide_gpt_5_1_codex_max_migration_prompt
.unwrap_or(false),
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => {
config.notices.hide_gpt5_1_migration_prompt.unwrap_or(false)
}
_ => false,
}
}
fn target_preset_for_upgrade<'a>(
available_models: &'a [ModelPreset],
target_model: &str,
) -> Option<&'a ModelPreset> {
available_models
.iter()
.find(|preset| preset.model == target_model)
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
model: &str,
app_event_tx: &AppEventSender,
models_manager: Arc<ModelsManager>,
) -> Option<AppExitInfo> {
let available_models = models_manager.list_models(config).await;
let upgrade = available_models
.iter()
.find(|preset| preset.model == model)
.and_then(|preset| preset.upgrade.as_ref());
if let Some(ModelUpgrade {
id: target_model,
reasoning_effort_mapping,
migration_config_key,
model_link,
upgrade_copy,
}) = upgrade
{
if migration_prompt_hidden(config, migration_config_key.as_str()) {
return None;
}
let target_model = target_model.to_string();
if !should_show_model_migration_prompt(
model,
&target_model,
&config.notices.model_migrations,
&available_models,
) {
return None;
}
let current_preset = available_models.iter().find(|preset| preset.model == model);
let target_preset = target_preset_for_upgrade(&available_models, &target_model);
let target_preset = target_preset?;
let target_display_name = target_preset.display_name.clone();
let heading_label = if target_display_name == model {
target_model.clone()
} else {
target_display_name.clone()
};
let target_description =
(!target_preset.description.is_empty()).then(|| target_preset.description.clone());
let can_opt_out = current_preset.is_some();
let prompt_copy = migration_copy_for_models(
model,
&target_model,
model_link.clone(),
upgrade_copy.clone(),
heading_label,
target_description,
can_opt_out,
);
match run_model_migration_prompt(tui, prompt_copy).await {
ModelMigrationOutcome::Accepted => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
config.model = Some(target_model.clone());
let mapped_effort = if let Some(reasoning_effort_mapping) = reasoning_effort_mapping
&& let Some(reasoning_effort) = config.model_reasoning_effort
{
reasoning_effort_mapping
.get(&reasoning_effort)
.cloned()
.or(config.model_reasoning_effort)
} else {
config.model_reasoning_effort
};
config.model_reasoning_effort = mapped_effort;
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
app_event_tx.send(AppEvent::UpdateReasoningEffort(mapped_effort));
app_event_tx.send(AppEvent::PersistModelSelection {
model: target_model.clone(),
effort: mapped_effort,
});
}
ModelMigrationOutcome::Rejected => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
}
ModelMigrationOutcome::Exit => {
return Some(AppExitInfo {
token_usage: TokenUsage::default(),
conversation_id: None,
update_action: None,
});
}
}
}
None
}
pub(crate) struct App {
pub(crate) server: Arc<ConversationManager>,
pub(crate) app_event_tx: AppEventSender,
@@ -336,7 +171,7 @@ impl App {
pub async fn run(
tui: &mut tui::Tui,
auth_manager: Arc<AuthManager>,
mut config: Config,
config: Config,
active_profile: Option<String>,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
@@ -348,27 +183,27 @@ impl App {
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
let mut config = config;
let conversation_manager = Arc::new(ConversationManager::new(
auth_manager.clone(),
SessionSource::Cli,
));
let mut model = conversation_manager
.get_models_manager()
.get_model(&config.model, &config)
.await;
let exit_info = handle_model_migration_prompt_if_needed(
tui,
&mut config,
model.as_str(),
&app_event_tx,
conversation_manager.get_models_manager(),
)
.await;
if let Some(exit_info) = exit_info {
return Ok(exit_info);
}
if let Some(updated_model) = config.model.clone() {
model = updated_model;
if matches!(
maybe_run_startup_model_migration_prompt(
tui,
&mut config,
conversation_manager.get_models_manager().as_ref(),
)
.await?,
StartupModelMigrationAction::Exit
) {
return Ok(AppExitInfo {
token_usage: TokenUsage::default(),
conversation_id: None,
update_action: None,
});
}
let enhanced_keys_supported = tui.enhanced_keys_supported();
@@ -385,7 +220,8 @@ impl App {
models_manager: conversation_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
// The only truthful model is the one we get back on SessionConfigured.
model: String::new(),
};
ChatWidget::new(init, conversation_manager.clone())
}
@@ -411,7 +247,8 @@ impl App {
models_manager: conversation_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
// The only truthful model is the one we get back on SessionConfigured.
model: String::new(),
};
ChatWidget::new_from_existing(
init,
@@ -433,7 +270,7 @@ impl App {
chat_widget,
auth_manager: auth_manager.clone(),
config,
current_model: model.clone(),
current_model: String::new(),
active_profile,
file_search,
enhanced_keys_supported,
@@ -555,11 +392,6 @@ impl App {
}
async fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
let model_family = self
.server
.get_models_manager()
.construct_model_family(self.current_model.as_str(), &self.config)
.await;
match event {
AppEvent::NewSession => {
let summary = session_summary(
@@ -581,7 +413,6 @@ impl App {
model: self.current_model.clone(),
};
self.chat_widget = ChatWidget::new(init, self.server.clone());
self.current_model = model_family.get_model_slug().to_string();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> = vec![summary.usage_line.clone().into()];
if let Some(command) = summary.resume_command {
@@ -635,7 +466,6 @@ impl App {
resumed.conversation,
resumed.session_configured,
);
self.current_model = model_family.get_model_slug().to_string();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> =
vec![summary.usage_line.clone().into()];
@@ -1057,24 +887,6 @@ impl App {
));
}
}
AppEvent::PersistModelMigrationPromptAcknowledged {
from_model,
to_model,
} => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.record_model_migration_seen(from_model.as_str(), to_model.as_str())
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist model migration prompt acknowledgement"
);
self.chat_widget.add_error_message(format!(
"Failed to save model migration prompt preference: {err}"
));
}
}
AppEvent::OpenApprovalsPopup => {
self.chat_widget.open_approvals_popup();
}
@@ -1423,91 +1235,6 @@ mod tests {
)
}
fn all_model_presets() -> Vec<ModelPreset> {
codex_core::models_manager::model_presets::all_model_presets().clone()
}
#[tokio::test]
async fn model_migration_prompt_only_shows_for_deprecated_models() {
let seen = BTreeMap::new();
assert!(should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex",
"gpt-5.1-codex",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex-mini",
"gpt-5.1-codex-mini",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex-max",
&seen,
&all_model_presets()
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex",
&seen,
&all_model_presets()
));
}
#[tokio::test]
async fn model_migration_prompt_respects_hide_flag_and_self_target() {
let mut seen = BTreeMap::new();
seen.insert("gpt-5".to_string(), "gpt-5.1".to_string());
assert!(!should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
&seen,
&all_model_presets()
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1",
"gpt-5.1",
&seen,
&all_model_presets()
));
}
#[tokio::test]
async fn model_migration_prompt_skips_when_target_missing() {
let mut available = all_model_presets();
let mut current = available
.iter()
.find(|preset| preset.model == "gpt-5-codex")
.cloned()
.expect("preset present");
current.upgrade = Some(ModelUpgrade {
id: "missing-target".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(),
model_link: None,
upgrade_copy: None,
});
available.retain(|preset| preset.model != "gpt-5-codex");
available.push(current.clone());
assert!(should_show_model_migration_prompt(
&current.model,
"missing-target",
&BTreeMap::new(),
&available,
));
assert!(target_preset_for_upgrade(&available, "missing-target").is_none());
}
#[tokio::test]
async fn update_reasoning_effort_updates_config() {
let mut app = make_test_app().await;

View File

@@ -143,12 +143,6 @@ pub(crate) enum AppEvent {
/// Persist the acknowledgement flag for the rate limit switch prompt.
PersistRateLimitSwitchPromptHidden,
/// Persist the acknowledgement flag for the model migration prompt.
PersistModelMigrationPromptAcknowledged {
from_model: String,
to_model: String,
},
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
SkipNextWorldWritableScan,

View File

@@ -78,6 +78,7 @@ use ratatui::layout::Rect;
use ratatui::style::Color;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use tokio::sync::mpsc::UnboundedSender;
@@ -147,6 +148,8 @@ use strum::IntoEnumIterator;
const USER_SHELL_COMMAND_HELP_TITLE: &str = "Prefix a command with ! to run it locally";
const USER_SHELL_COMMAND_HELP_HINT: &str = "Example: !ls";
use crate::model_migration;
// Track information about an in-flight exec command.
struct RunningCommand {
command: Vec<String>,
@@ -402,6 +405,10 @@ fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Optio
}
impl ChatWidget {
fn is_session_configured(&self) -> bool {
self.conversation_id.is_some()
}
fn flush_answer_stream_with_separator(&mut self) {
if let Some(mut controller) = self.stream_controller.take()
&& let Some(cell) = controller.finalize()
@@ -439,13 +446,25 @@ impl ChatWidget {
self.current_rollout_path = Some(event.rollout_path.clone());
let initial_messages = event.initial_messages.clone();
let model_for_header = event.model.clone();
self.session_header.set_model(&model_for_header);
self.add_to_history(history_cell::new_session_info(
self.set_model(&model_for_header);
let session_info = history_cell::new_session_info(
&self.config,
&model_for_header,
event,
self.show_welcome_banner,
));
);
if self.active_cell.as_ref().is_some_and(|cell| {
cell.as_any()
.is::<history_cell::StartupSessionHeaderHistoryCell>()
}) {
// Replace the startup placeholder header ("model: loading") with the configured
// session header now that we know the selected model.
self.active_cell = Some(Box::new(session_info));
self.flush_active_cell();
} else {
self.add_to_history(session_info);
}
if let Some(messages) = initial_messages {
self.replay_initial_messages(messages);
}
@@ -457,7 +476,12 @@ impl ChatWidget {
});
if let Some(user_message) = self.initial_user_message.take() {
self.submit_user_message(user_message);
} else {
// If the user queued messages while startup was still in progress, kick off the first
// turn now that we know the session is configured.
self.maybe_send_next_queued_input();
}
self.refresh_pending_model_migration_notice();
if !self.suppress_session_configured_redraw {
self.request_redraw();
}
@@ -795,6 +819,15 @@ impl ChatWidget {
self.request_redraw();
}
fn refresh_pending_model_migration_notice(&self) {
let available_models = match self.models_manager.try_list_models(&self.config) {
Ok(models) => models,
Err(_) => return,
};
model_migration::refresh_pending_model_migration_notice(&self.config, &available_models);
}
/// Handle a turn aborted due to user interrupt (Esc).
/// When there are queued user messages, restore them into the composer
/// separated by newlines rather than autosubmitting the next one.
@@ -908,6 +941,9 @@ impl ChatWidget {
.as_any()
.downcast_ref::<history_cell::UnifiedExecWaitCell>()
.is_none()
&& !active
.as_any()
.is::<history_cell::StartupSessionHeaderHistoryCell>()
);
if has_non_wait_active {
// Do not preempt non-wait active cells with a wait entry.
@@ -1417,10 +1453,16 @@ impl ChatWidget {
model,
} = common;
let mut config = config;
config.model = Some(model.clone());
// `model` is an optional override provided by the app. Avoid clobbering the configured
// model with an empty string during startup; that would propagate to core and render as a
// blank model in the session header (/model current label, etc).
if !model.is_empty() {
config.model = Some(model.clone());
}
let mut rng = rand::rng();
let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string();
let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager);
let startup_dir = config.cwd.clone();
let mut widget = Self {
app_event_tx: app_event_tx.clone(),
@@ -1436,7 +1478,9 @@ impl ChatWidget {
animations_enabled: config.animations,
skills: None,
}),
active_cell: None,
active_cell: Some(Box::new(
history_cell::StartupSessionHeaderHistoryCell::new(startup_dir),
)),
config,
model: model.clone(),
auth_manager,
@@ -1719,7 +1763,7 @@ impl ChatWidget {
return;
}
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
self.submit_user_message(INIT_PROMPT.to_string().into());
self.queue_user_message(INIT_PROMPT.to_string().into());
}
SlashCommand::Compact => {
self.clear_token_usage();
@@ -1860,6 +1904,15 @@ impl ChatWidget {
fn flush_active_cell(&mut self) {
self.flush_wait_cell();
if let Some(active) = self.active_cell.take() {
if active
.as_any()
.is::<history_cell::StartupSessionHeaderHistoryCell>()
{
// Startup header is a transient placeholder and should not be flushed into
// history. It will be replaced once SessionConfigured arrives.
self.active_cell = Some(active);
return;
}
self.needs_final_message_separator = true;
self.app_event_tx.send(AppEvent::InsertHistoryCell(active));
}
@@ -1900,7 +1953,7 @@ impl ChatWidget {
}
fn queue_user_message(&mut self, user_message: UserMessage) {
if self.bottom_pane.is_task_running() {
if !self.is_session_configured() || self.bottom_pane.is_task_running() {
self.queued_user_messages.push_back(user_message);
self.refresh_queued_user_messages();
} else {
@@ -1909,6 +1962,12 @@ impl ChatWidget {
}
fn submit_user_message(&mut self, user_message: UserMessage) {
if !self.is_session_configured() {
self.queued_user_messages.push_back(user_message);
self.refresh_queued_user_messages();
return;
}
let UserMessage { text, image_paths } = user_message;
if text.is_empty() && image_paths.is_empty() {
return;
@@ -2214,6 +2273,9 @@ impl ChatWidget {
// If idle and there are queued inputs, submit exactly one to start the next turn.
fn maybe_send_next_queued_input(&mut self) {
if !self.is_session_configured() {
return;
}
if self.bottom_pane.is_task_running() {
return;
}
@@ -2410,6 +2472,14 @@ impl ChatWidget {
/// Open a popup to choose a quick auto model. Selecting "All models"
/// opens the full picker with every available preset.
pub(crate) fn open_model_popup(&mut self) {
if !self.is_session_configured() {
self.add_info_message(
"Model selection is disabled until startup completes.".to_string(),
None,
);
return;
}
let presets: Vec<ModelPreset> =
// todo(aibrahim): make this async function
match self.models_manager.try_list_models(&self.config) {

View File

@@ -0,0 +1,11 @@
---
source: tui/src/chatwidget/tests.rs
assertion_line: 2612
expression: active_blob(&chat)
---
╭───────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: loading /model to change │
│ directory: /home/user/project │
╰───────────────────────────────────────╯

View File

@@ -1021,6 +1021,7 @@ async fn alt_up_edits_most_recent_queued_message() {
#[tokio::test]
async fn enqueueing_history_prompt_multiple_times_is_stable() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
chat.conversation_id = Some(ConversationId::new());
// Submit an initial prompt to seed history.
chat.bottom_pane.set_composer_text("repeat me".to_string());
@@ -1955,6 +1956,7 @@ async fn experimental_features_toggle_saves_on_exit() {
#[tokio::test]
async fn model_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5-codex")).await;
chat.conversation_id = Some(ConversationId::new());
chat.open_model_popup();
let popup = render_bottom_popup(&chat, 80);
@@ -2169,6 +2171,7 @@ async fn feedback_upload_consent_popup_snapshot() {
#[tokio::test]
async fn reasoning_popup_escape_returns_to_model_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.conversation_id = Some(ConversationId::new());
chat.open_model_popup();
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
@@ -2597,6 +2600,67 @@ async fn ui_snapshots_small_heights_idle() {
}
}
#[tokio::test]
async fn startup_header_renders_in_active_cell_before_session_configured_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
chat.active_cell = Some(Box::new(
// Use a path that's not under $HOME so the rendered directory is stable across CI/dev.
crate::history_cell::StartupSessionHeaderHistoryCell::new(PathBuf::from(
"/home/user/project",
)),
));
assert_snapshot!("startup_header_active_cell", active_blob(&chat));
}
#[tokio::test]
async fn startup_header_is_replaced_on_session_configured() {
let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(None).await;
chat.active_cell = Some(Box::new(
crate::history_cell::StartupSessionHeaderHistoryCell::new(chat.config.cwd.clone()),
));
chat.handle_codex_event(Event {
id: "session-1".into(),
msg: EventMsg::SessionConfigured(codex_core::protocol::SessionConfiguredEvent {
session_id: ConversationId::new(),
model: "gpt-5.2-codex".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: codex_core::protocol::AskForApproval::OnRequest,
sandbox_policy: codex_core::protocol::SandboxPolicy::WorkspaceWrite {
writable_roots: Vec::new(),
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
},
cwd: chat.config.cwd.clone(),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
rollout_path: std::env::temp_dir(),
}),
});
assert!(
chat.active_cell.is_none(),
"startup header should be replaced and flushed into history"
);
let history = drain_insert_history(&mut rx);
let rendered = history
.iter()
.map(|lines| lines_to_single_string(lines))
.collect::<String>();
assert!(
rendered.contains("model:") && rendered.contains("gpt-5.2-codex"),
"expected configured model in history: {rendered}"
);
// Drain rx to avoid unused warnings.
let _ = drain_insert_history(&mut rx);
let _ = op_rx.try_recv();
}
// Snapshot test: ChatWidget at very small heights (task running)
// Validates how status + composer are presented within tight space.
#[tokio::test]

View File

@@ -874,6 +874,7 @@ pub(crate) fn new_session_info(
// Header box rendered as history (so it appears at the very top)
let header = SessionHeaderHistoryCell::new(
model.clone(),
Style::default(),
reasoning_effort,
config.cwd.clone(),
CODEX_CLI_VERSION,
@@ -942,6 +943,7 @@ pub(crate) fn new_user_prompt(message: String) -> UserHistoryCell {
struct SessionHeaderHistoryCell {
version: &'static str,
model: String,
model_style: Style,
reasoning_effort: Option<ReasoningEffortConfig>,
directory: PathBuf,
}
@@ -949,6 +951,7 @@ struct SessionHeaderHistoryCell {
impl SessionHeaderHistoryCell {
fn new(
model: String,
model_style: Style,
reasoning_effort: Option<ReasoningEffortConfig>,
directory: PathBuf,
version: &'static str,
@@ -956,6 +959,7 @@ impl SessionHeaderHistoryCell {
Self {
version,
model,
model_style,
reasoning_effort,
directory,
}
@@ -1028,7 +1032,7 @@ impl HistoryCell for SessionHeaderHistoryCell {
let reasoning_label = self.reasoning_label();
let mut model_spans: Vec<Span<'static>> = vec![
Span::from(format!("{model_label} ")).dim(),
Span::from(self.model.clone()),
Span::styled(self.model.clone(), self.model_style),
];
if let Some(reasoning) = reasoning_label {
model_spans.push(Span::from(" "));
@@ -1056,6 +1060,43 @@ impl HistoryCell for SessionHeaderHistoryCell {
}
}
#[derive(Debug)]
pub(crate) struct StartupSessionHeaderHistoryCell(SessionHeaderHistoryCell);
impl StartupSessionHeaderHistoryCell {
pub(crate) fn new(directory: PathBuf) -> Self {
// Render a session header before we know the configured session details.
// This is a transient placeholder until the SessionConfigured event arrives.
let inner = SessionHeaderHistoryCell::new(
"loading".to_string(),
Style::default().dim().italic(),
None,
directory,
CODEX_CLI_VERSION,
);
Self(inner)
}
}
impl HistoryCell for StartupSessionHeaderHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
self.0.display_lines(width)
}
fn desired_height(&self, width: u16) -> u16 {
self.0.desired_height(width)
}
fn transcript_lines(&self, _width: u16) -> Vec<Line<'static>> {
// Do not include this placeholder in transcript/log output.
Vec::new()
}
fn desired_transcript_height(&self, _width: u16) -> u16 {
0
}
}
#[derive(Debug)]
pub(crate) struct CompositeHistoryCell {
parts: Vec<Box<dyn HistoryCell>>,
@@ -2200,6 +2241,7 @@ mod tests {
fn session_header_includes_reasoning_level_when_present() {
let cell = SessionHeaderHistoryCell::new(
"gpt-4o".to_string(),
Style::default(),
Some(ReasoningEffortConfig::High),
std::env::temp_dir(),
"test",

File diff suppressed because it is too large Load Diff

View File

@@ -9,9 +9,8 @@ use crate::exec_command::strip_bash_lc_and_escape;
use crate::file_search::FileSearchManager;
use crate::history_cell::HistoryCell;
use crate::history_cell::UserHistoryCell;
use crate::model_migration::ModelMigrationOutcome;
use crate::model_migration::migration_copy_for_models;
use crate::model_migration::run_model_migration_prompt;
use crate::model_migration::StartupModelMigrationAction;
use crate::model_migration::maybe_run_startup_model_migration_prompt;
use crate::pager_overlay::Overlay;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::Renderable;
@@ -45,9 +44,6 @@ use codex_core::config::Config;
use codex_core::config::edit::ConfigEditsBuilder;
#[cfg(target_os = "windows")]
use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
@@ -57,8 +53,6 @@ use codex_core::protocol::SkillErrorInfo;
use codex_core::protocol::TokenUsage;
use codex_core::terminal::terminal_info;
use codex_protocol::ConversationId;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use color_eyre::eyre::Result;
use color_eyre::eyre::WrapErr;
@@ -74,7 +68,6 @@ use ratatui::widgets::Clear;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use std::collections::BTreeMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
@@ -164,161 +157,6 @@ struct SessionSummary {
resume_command: Option<String>,
}
fn should_show_model_migration_prompt(
current_model: &str,
target_model: &str,
seen_migrations: &BTreeMap<String, String>,
available_models: &[ModelPreset],
) -> bool {
if target_model == current_model {
return false;
}
if let Some(seen_target) = seen_migrations.get(current_model)
&& seen_target == target_model
{
return false;
}
if available_models
.iter()
.any(|preset| preset.model == current_model && preset.upgrade.is_some())
{
return true;
}
if available_models
.iter()
.any(|preset| preset.upgrade.as_ref().map(|u| u.id.as_str()) == Some(target_model))
{
return true;
}
false
}
fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> bool {
match migration_config_key {
HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => config
.notices
.hide_gpt_5_1_codex_max_migration_prompt
.unwrap_or(false),
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => {
config.notices.hide_gpt5_1_migration_prompt.unwrap_or(false)
}
_ => false,
}
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
model: &str,
app_event_tx: &AppEventSender,
models_manager: Arc<ModelsManager>,
) -> Option<AppExitInfo> {
let available_models = models_manager.list_models(config).await;
let upgrade = available_models
.iter()
.find(|preset| preset.model == model)
.and_then(|preset| preset.upgrade.as_ref());
if let Some(ModelUpgrade {
id: target_model,
reasoning_effort_mapping,
migration_config_key,
..
}) = upgrade
{
if migration_prompt_hidden(config, migration_config_key.as_str()) {
return None;
}
let target_model = target_model.to_string();
if !should_show_model_migration_prompt(
model,
&target_model,
&config.notices.model_migrations,
&available_models,
) {
return None;
}
let current_preset = available_models.iter().find(|preset| preset.model == model);
let target_preset = available_models
.iter()
.find(|preset| preset.model == target_model);
let target_display_name = target_preset
.map(|preset| preset.display_name.clone())
.unwrap_or_else(|| target_model.clone());
let heading_label = if target_display_name == model {
target_model.clone()
} else {
target_display_name.clone()
};
let target_description = target_preset.and_then(|preset| {
if preset.description.is_empty() {
None
} else {
Some(preset.description.clone())
}
});
let can_opt_out = current_preset.is_some();
let prompt_copy = migration_copy_for_models(
model,
&target_model,
heading_label,
target_description,
can_opt_out,
);
match run_model_migration_prompt(tui, prompt_copy).await {
ModelMigrationOutcome::Accepted => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
config.model = Some(target_model.clone());
let mapped_effort = if let Some(reasoning_effort_mapping) = reasoning_effort_mapping
&& let Some(reasoning_effort) = config.model_reasoning_effort
{
reasoning_effort_mapping
.get(&reasoning_effort)
.cloned()
.or(config.model_reasoning_effort)
} else {
config.model_reasoning_effort
};
config.model_reasoning_effort = mapped_effort;
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
app_event_tx.send(AppEvent::UpdateReasoningEffort(mapped_effort));
app_event_tx.send(AppEvent::PersistModelSelection {
model: target_model.clone(),
effort: mapped_effort,
});
}
ModelMigrationOutcome::Rejected => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
}
ModelMigrationOutcome::Exit => {
return Some(AppExitInfo {
token_usage: TokenUsage::default(),
conversation_id: None,
update_action: None,
session_lines: Vec::new(),
});
}
}
}
None
}
pub(crate) struct App {
pub(crate) server: Arc<ConversationManager>,
pub(crate) app_event_tx: AppEventSender,
@@ -383,7 +221,7 @@ impl App {
pub async fn run(
tui: &mut tui::Tui,
auth_manager: Arc<AuthManager>,
mut config: Config,
config: Config,
active_profile: Option<String>,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
@@ -395,27 +233,28 @@ impl App {
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
let mut config = config;
let conversation_manager = Arc::new(ConversationManager::new(
auth_manager.clone(),
SessionSource::Cli,
));
let mut model = conversation_manager
.get_models_manager()
.get_model(&config.model, &config)
.await;
let exit_info = handle_model_migration_prompt_if_needed(
tui,
&mut config,
model.as_str(),
&app_event_tx,
conversation_manager.get_models_manager(),
)
.await;
if let Some(exit_info) = exit_info {
return Ok(exit_info);
}
if let Some(updated_model) = config.model.clone() {
model = updated_model;
if matches!(
maybe_run_startup_model_migration_prompt(
tui,
&mut config,
conversation_manager.get_models_manager().as_ref(),
)
.await?,
StartupModelMigrationAction::Exit
) {
return Ok(AppExitInfo {
token_usage: TokenUsage::default(),
conversation_id: None,
update_action: None,
session_lines: Vec::new(),
});
}
let enhanced_keys_supported = tui.enhanced_keys_supported();
@@ -432,7 +271,8 @@ impl App {
models_manager: conversation_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
// The only truthful model is the one we get back on SessionConfigured.
model: String::new(),
};
ChatWidget::new(init, conversation_manager.clone())
}
@@ -458,7 +298,8 @@ impl App {
models_manager: conversation_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
// The only truthful model is the one we get back on SessionConfigured.
model: String::new(),
};
ChatWidget::new_from_existing(
init,
@@ -496,7 +337,7 @@ impl App {
chat_widget,
auth_manager: auth_manager.clone(),
config,
current_model: model.clone(),
current_model: String::new(),
active_profile,
file_search,
enhanced_keys_supported,
@@ -1793,24 +1634,6 @@ impl App {
));
}
}
AppEvent::PersistModelMigrationPromptAcknowledged {
from_model,
to_model,
} => {
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.record_model_migration_seen(from_model.as_str(), to_model.as_str())
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist model migration prompt acknowledgement"
);
self.chat_widget.add_error_message(format!(
"Failed to save model migration prompt preference: {err}"
));
}
}
AppEvent::OpenApprovalsPopup => {
self.chat_widget.open_approvals_popup();
}
@@ -2205,45 +2028,6 @@ mod tests {
)
}
fn all_model_presets() -> Vec<ModelPreset> {
codex_core::models_manager::model_presets::all_model_presets().clone()
}
#[tokio::test]
async fn model_migration_prompt_only_shows_for_deprecated_models() {
let seen = BTreeMap::new();
assert!(should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex",
"gpt-5.1-codex",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5-codex-mini",
"gpt-5.1-codex-mini",
&seen,
&all_model_presets()
));
assert!(should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex-max",
&seen,
&all_model_presets()
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1-codex",
"gpt-5.1-codex",
&seen,
&all_model_presets()
));
}
#[tokio::test]
async fn transcript_selection_copy_includes_offscreen_lines() {
let mut app = make_test_app().await;
@@ -2276,24 +2060,6 @@ mod tests {
assert_eq!(text, "one\ntwo\nthree\nfour");
}
#[tokio::test]
async fn model_migration_prompt_respects_hide_flag_and_self_target() {
let mut seen = BTreeMap::new();
seen.insert("gpt-5".to_string(), "gpt-5.1".to_string());
assert!(!should_show_model_migration_prompt(
"gpt-5",
"gpt-5.1",
&seen,
&all_model_presets()
));
assert!(!should_show_model_migration_prompt(
"gpt-5.1",
"gpt-5.1",
&seen,
&all_model_presets()
));
}
#[tokio::test]
async fn update_reasoning_effort_updates_config() {
let mut app = make_test_app().await;

View File

@@ -137,12 +137,6 @@ pub(crate) enum AppEvent {
/// Persist the acknowledgement flag for the rate limit switch prompt.
PersistRateLimitSwitchPromptHidden,
/// Persist the acknowledgement flag for the model migration prompt.
PersistModelMigrationPromptAcknowledged {
from_model: String,
to_model: String,
},
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
SkipNextWorldWritableScan,

View File

@@ -142,6 +142,8 @@ use strum::IntoEnumIterator;
const USER_SHELL_COMMAND_HELP_TITLE: &str = "Prefix a command with ! to run it locally";
const USER_SHELL_COMMAND_HELP_HINT: &str = "Example: !ls";
use crate::model_migration;
// Track information about an in-flight exec command.
struct RunningCommand {
command: Vec<String>,
@@ -368,6 +370,10 @@ fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Optio
}
impl ChatWidget {
fn is_session_configured(&self) -> bool {
self.conversation_id.is_some()
}
fn flush_answer_stream_with_separator(&mut self) {
if let Some(mut controller) = self.stream_controller.take()
&& let Some(cell) = controller.finalize()
@@ -405,13 +411,25 @@ impl ChatWidget {
self.current_rollout_path = Some(event.rollout_path.clone());
let initial_messages = event.initial_messages.clone();
let model_for_header = event.model.clone();
self.session_header.set_model(&model_for_header);
self.add_to_history(history_cell::new_session_info(
self.set_model(&model_for_header);
let session_info = history_cell::new_session_info(
&self.config,
&model_for_header,
event,
self.show_welcome_banner,
));
);
let is_startup_header_active = self.active_cell.as_ref().is_some_and(|cell| {
cell.as_any()
.downcast_ref::<history_cell::StartupSessionHeaderHistoryCell>()
.is_some()
});
if is_startup_header_active {
// Replace the startup placeholder header instead of duplicating it.
self.active_cell = Some(Box::new(session_info));
self.flush_active_cell();
} else {
self.add_to_history(session_info);
}
if let Some(messages) = initial_messages {
self.replay_initial_messages(messages);
}
@@ -423,7 +441,12 @@ impl ChatWidget {
});
if let Some(user_message) = self.initial_user_message.take() {
self.submit_user_message(user_message);
} else {
// If the user queued messages while startup was still in progress, kick off the first
// turn now that we know the session is configured.
self.maybe_send_next_queued_input();
}
self.refresh_pending_model_migration_notice();
if !self.suppress_session_configured_redraw {
self.request_redraw();
}
@@ -738,6 +761,15 @@ impl ChatWidget {
self.request_redraw();
}
fn refresh_pending_model_migration_notice(&self) {
let available_models = match self.models_manager.try_list_models(&self.config) {
Ok(models) => models,
Err(_) => return,
};
model_migration::refresh_pending_model_migration_notice(&self.config, &available_models);
}
fn on_mcp_startup_complete(&mut self, ev: McpStartupCompleteEvent) {
let mut parts = Vec::new();
if !ev.failed.is_empty() {
@@ -1282,7 +1314,13 @@ impl ChatWidget {
model,
} = common;
let mut config = config;
config.model = Some(model.clone());
let startup_dir = config.cwd.clone();
// `model` is an optional override provided by the app. Avoid clobbering the configured
// model with an empty string during startup; that would propagate to core and render as a
// blank model in the session header (/model current label, etc).
if !model.is_empty() {
config.model = Some(model.clone());
}
let mut rng = rand::rng();
let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string();
let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager);
@@ -1301,7 +1339,9 @@ impl ChatWidget {
animations_enabled: config.animations,
skills: None,
}),
active_cell: None,
active_cell: Some(Box::new(
history_cell::StartupSessionHeaderHistoryCell::new(startup_dir),
)),
config,
model: model.clone(),
auth_manager,
@@ -1555,7 +1595,7 @@ impl ChatWidget {
return;
}
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
self.submit_user_message(INIT_PROMPT.to_string().into());
self.queue_user_message(INIT_PROMPT.to_string().into());
}
SlashCommand::Compact => {
self.clear_token_usage();
@@ -1689,6 +1729,14 @@ impl ChatWidget {
fn flush_active_cell(&mut self) {
if let Some(active) = self.active_cell.take() {
if active
.as_ref()
.as_any()
.downcast_ref::<history_cell::StartupSessionHeaderHistoryCell>()
.is_some()
{
return;
}
self.needs_final_message_separator = true;
self.app_event_tx.send(AppEvent::InsertHistoryCell(active));
}
@@ -1708,7 +1756,7 @@ impl ChatWidget {
}
fn queue_user_message(&mut self, user_message: UserMessage) {
if self.bottom_pane.is_task_running() {
if !self.is_session_configured() || self.bottom_pane.is_task_running() {
self.queued_user_messages.push_back(user_message);
self.refresh_queued_user_messages();
} else {
@@ -1717,6 +1765,12 @@ impl ChatWidget {
}
fn submit_user_message(&mut self, user_message: UserMessage) {
if !self.is_session_configured() {
self.queued_user_messages.push_back(user_message);
self.refresh_queued_user_messages();
return;
}
let UserMessage { text, image_paths } = user_message;
if text.is_empty() && image_paths.is_empty() {
return;
@@ -2022,6 +2076,9 @@ impl ChatWidget {
// If idle and there are queued inputs, submit exactly one to start the next turn.
fn maybe_send_next_queued_input(&mut self) {
if !self.is_session_configured() {
return;
}
if self.bottom_pane.is_task_running() {
return;
}
@@ -2208,6 +2265,14 @@ impl ChatWidget {
/// Open a popup to choose a quick auto model. Selecting "All models"
/// opens the full picker with every available preset.
pub(crate) fn open_model_popup(&mut self) {
if !self.is_session_configured() {
self.add_info_message(
"Model selection is disabled until startup completes.".to_string(),
None,
);
return;
}
let presets: Vec<ModelPreset> =
// todo(aibrahim): make this async function
match self.models_manager.try_list_models(&self.config) {

View File

@@ -0,0 +1,11 @@
---
source: tui2/src/chatwidget/tests.rs
assertion_line: 161
expression: active_blob(&chat)
---
╭───────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: loading /model to change │
│ directory: /home/user/project │
╰───────────────────────────────────────╯

View File

@@ -150,6 +150,64 @@ async fn resumed_initial_messages_render_history() {
);
}
#[tokio::test]
async fn startup_header_renders_in_active_cell_before_session_configured_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
// Use a path that's not under $HOME so the rendered directory is stable across CI/dev.
let startup_dir = PathBuf::from("/home/user/project");
chat.active_cell = Some(Box::new(
crate::history_cell::StartupSessionHeaderHistoryCell::new(startup_dir),
));
assert_snapshot!("startup_header_active_cell", active_blob(&chat));
}
#[tokio::test]
async fn startup_header_is_replaced_on_session_configured() {
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await;
let startup_dir = chat.config.cwd.clone();
chat.active_cell = Some(Box::new(
crate::history_cell::StartupSessionHeaderHistoryCell::new(startup_dir),
));
let conversation_id = ConversationId::new();
let rollout_file = NamedTempFile::new().unwrap();
let configured = codex_core::protocol::SessionConfiguredEvent {
session_id: conversation_id,
model: "test-model".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: Some(ReasoningEffortConfig::default()),
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
rollout_path: rollout_file.path().to_path_buf(),
};
chat.handle_codex_event(Event {
id: "initial".into(),
msg: EventMsg::SessionConfigured(configured),
});
assert!(chat.active_cell.is_none());
let cells = drain_insert_history(&mut rx);
let merged_lines = cells
.iter()
.flat_map(|lines| {
lines
.iter()
.flat_map(|line| line.spans.iter())
.map(|span| span.content.clone())
})
.collect::<String>();
assert!(
merged_lines.contains("test-model"),
"expected resolved model to be recorded in history cell, got:\n{merged_lines}",
);
}
/// Entering review mode uses the hint provided by the review request.
#[tokio::test]
async fn entered_review_mode_uses_request_hint() {
@@ -981,6 +1039,7 @@ async fn alt_up_edits_most_recent_queued_message() {
#[tokio::test]
async fn enqueueing_history_prompt_multiple_times_is_stable() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
chat.conversation_id = Some(ConversationId::new());
// Submit an initial prompt to seed history.
chat.bottom_pane.set_composer_text("repeat me".to_string());
@@ -1725,6 +1784,7 @@ fn render_bottom_popup(chat: &ChatWidget, width: u16) -> String {
#[tokio::test]
async fn model_selection_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5-codex")).await;
chat.conversation_id = Some(ConversationId::new());
chat.open_model_popup();
let popup = render_bottom_popup(&chat, 80);
@@ -1939,6 +1999,7 @@ async fn feedback_upload_consent_popup_snapshot() {
#[tokio::test]
async fn reasoning_popup_escape_returns_to_model_popup() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.conversation_id = Some(ConversationId::new());
chat.open_model_popup();
let preset = get_available_model(&chat, "gpt-5.1-codex-max");

View File

@@ -757,6 +757,7 @@ pub(crate) fn new_session_info(
// Header box rendered as history (so it appears at the very top)
let header = SessionHeaderHistoryCell::new(
model.clone(),
Style::default(),
reasoning_effort,
config.cwd.clone(),
CODEX_CLI_VERSION,
@@ -825,6 +826,7 @@ pub(crate) fn new_user_prompt(message: String) -> UserHistoryCell {
struct SessionHeaderHistoryCell {
version: &'static str,
model: String,
model_style: Style,
reasoning_effort: Option<ReasoningEffortConfig>,
directory: PathBuf,
}
@@ -832,6 +834,7 @@ struct SessionHeaderHistoryCell {
impl SessionHeaderHistoryCell {
fn new(
model: String,
model_style: Style,
reasoning_effort: Option<ReasoningEffortConfig>,
directory: PathBuf,
version: &'static str,
@@ -839,6 +842,7 @@ impl SessionHeaderHistoryCell {
Self {
version,
model,
model_style,
reasoning_effort,
directory,
}
@@ -911,7 +915,7 @@ impl HistoryCell for SessionHeaderHistoryCell {
let reasoning_label = self.reasoning_label();
let mut model_spans: Vec<Span<'static>> = vec![
Span::from(format!("{model_label} ")).dim(),
Span::from(self.model.clone()),
Span::from(self.model.clone()).set_style(self.model_style),
];
if let Some(reasoning) = reasoning_label {
model_spans.push(Span::from(" "));
@@ -939,6 +943,43 @@ impl HistoryCell for SessionHeaderHistoryCell {
}
}
/// Startup-only session header shown in the active cell before SessionConfigured arrives.
///
/// This is a transient UI element and is excluded from transcripts/copy.
#[derive(Debug)]
pub(crate) struct StartupSessionHeaderHistoryCell(SessionHeaderHistoryCell);
impl StartupSessionHeaderHistoryCell {
pub(crate) fn new(directory: PathBuf) -> Self {
let inner = SessionHeaderHistoryCell::new(
"loading".to_string(),
Style::default().dim().italic(),
None,
directory,
CODEX_CLI_VERSION,
);
Self(inner)
}
}
impl HistoryCell for StartupSessionHeaderHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
self.0.display_lines(width)
}
fn desired_height(&self, width: u16) -> u16 {
self.0.desired_height(width)
}
fn transcript_lines(&self, _width: u16) -> Vec<Line<'static>> {
Vec::new()
}
fn desired_transcript_height(&self, _width: u16) -> u16 {
0
}
}
#[derive(Debug)]
pub(crate) struct CompositeHistoryCell {
parts: Vec<Box<dyn HistoryCell>>,
@@ -2023,6 +2064,7 @@ mod tests {
fn session_header_includes_reasoning_level_when_present() {
let cell = SessionHeaderHistoryCell::new(
"gpt-4o".to_string(),
Style::default(),
Some(ReasoningEffortConfig::High),
std::env::temp_dir(),
"test",

File diff suppressed because it is too large Load Diff