Compare commits

...

2 Commits

Author SHA1 Message Date
Felipe Coury
88396ab6ea docs(tui): clarify scoped model flow
Document the scoped model-selection flow in the TUI and config
helpers added for `/model plan` and `/model default`.
2026-04-03 16:18:06 -03:00
Felipe Coury
f39b433264 feat(tui): add scoped /model targets
Add `/model plan` and `/model default` so users can update
mode-specific models without switching modes first. The picker,
reasoning popup, and app events now target Default or Plan
state explicitly.

Persist `plan_mode_model` in config and clear accepted
inline slash-command drafts so the composer does not keep
`/model plan` or `/model default` after opening the picker.
2026-04-03 15:44:10 -03:00
14 changed files with 609 additions and 151 deletions

View File

@@ -569,6 +569,9 @@
"personality": {
"$ref": "#/definitions/Personality"
},
"plan_mode_model": {
"type": "string"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},
@@ -2439,6 +2442,9 @@
],
"description": "Optionally specify a personality for the model"
},
"plan_mode_model": {
"type": "string"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},

View File

@@ -4476,6 +4476,7 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> {
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
plan_mode_model: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_supports_reasoning_summaries: None,
@@ -4618,6 +4619,7 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> {
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
plan_mode_model: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: None,
model_supports_reasoning_summaries: None,
@@ -4758,6 +4760,7 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> {
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: None,
plan_mode_model: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: None,
model_supports_reasoning_summaries: None,
@@ -4884,6 +4887,7 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> {
hide_agent_reasoning: false,
show_raw_agent_reasoning: false,
model_reasoning_effort: Some(ReasoningEffort::High),
plan_mode_model: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_supports_reasoning_summaries: None,

View File

@@ -868,6 +868,31 @@ impl ConfigEditsBuilder {
self
}
/// Set or clear the Plan-mode-specific model override in `config.toml`.
///
/// When `model` is `Some`, the `plan_mode_model` key is written (or
/// scoped under the active profile). When `None`, the key is removed so
/// Plan mode falls back to the global `model` default.
pub fn set_plan_mode_model(mut self, model: Option<&str>) -> Self {
let segments = if let Some(profile) = self.profile.as_ref() {
vec![
"profiles".to_string(),
profile.clone(),
"plan_mode_model".to_string(),
]
} else {
vec!["plan_mode_model".to_string()]
};
match model {
Some(model) => self.edits.push(ConfigEdit::SetPath {
segments,
value: value(model),
}),
None => self.edits.push(ConfigEdit::ClearPath { segments }),
}
self
}
pub fn set_service_tier(mut self, service_tier: Option<ServiceTier>) -> Self {
self.edits.push(ConfigEdit::SetServiceTier { service_tier });
self

View File

@@ -967,6 +967,23 @@ model_reasoning_effort = "high"
assert_eq!(contents, expected);
}
#[tokio::test]
async fn async_builder_set_plan_mode_model_persists() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path().to_path_buf();
ConfigEditsBuilder::new(&codex_home)
.set_plan_mode_model(Some("gpt-5.4-pro"))
.apply()
.await
.expect("persist");
let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"plan_mode_model = "gpt-5.4-pro"
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_builder_set_model_round_trips_back_and_forth() {
let tmp = tempdir().expect("tmpdir");

View File

@@ -463,6 +463,10 @@ pub struct Config {
/// Value to use for `reasoning.effort` when making a request using the
/// Responses API.
pub model_reasoning_effort: Option<ReasoningEffort>,
/// Optional Plan-mode-specific model override used by the TUI.
///
/// When unset, Plan mode inherits the global `model` default.
pub plan_mode_model: Option<String>,
/// Optional Plan-mode-specific reasoning effort override used by the TUI.
///
/// When unset, Plan mode uses the built-in Plan preset default (currently
@@ -1299,6 +1303,7 @@ pub struct ConfigToml {
pub show_raw_agent_reasoning: Option<bool>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_model: Option<String>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
@@ -2428,6 +2433,7 @@ impl Config {
let forced_login_method = cfg.forced_login_method;
let model = model.or(config_profile.model).or(cfg.model);
let plan_mode_model = config_profile.plan_mode_model.or(cfg.plan_mode_model);
let service_tier = service_tier_override
.unwrap_or_else(|| config_profile.service_tier.or(cfg.service_tier));
let service_tier = match service_tier {
@@ -2679,6 +2685,7 @@ impl Config {
model_reasoning_effort: config_profile
.model_reasoning_effort
.or(cfg.model_reasoning_effort),
plan_mode_model,
plan_mode_reasoning_effort: config_profile
.plan_mode_reasoning_effort
.or(cfg.plan_mode_reasoning_effort),

View File

@@ -30,6 +30,7 @@ pub struct ConfigProfile {
pub approvals_reviewer: Option<ApprovalsReviewer>,
pub sandbox_mode: Option<SandboxMode>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_model: Option<String>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,

View File

@@ -4453,6 +4453,13 @@ impl App {
AppEvent::UpdateModel(model) => {
self.chat_widget.set_model(&model);
}
AppEvent::UpdateDefaultModel(model) => {
self.chat_widget.set_default_mode_model(&model);
}
AppEvent::UpdatePlanModeModel(model) => {
self.config.plan_mode_model = Some(model.clone());
self.chat_widget.set_plan_mode_model(&model);
}
AppEvent::UpdateCollaborationMode(mask) => {
self.chat_widget.set_collaboration_mask(mask);
}
@@ -4462,15 +4469,15 @@ impl App {
AppEvent::OpenRealtimeAudioDeviceSelection { kind } => {
self.chat_widget.open_realtime_audio_device_selection(kind);
}
AppEvent::OpenReasoningPopup { model } => {
self.chat_widget.open_reasoning_popup(model);
AppEvent::OpenReasoningPopup { model, target } => {
self.chat_widget.open_reasoning_popup(model, target);
}
AppEvent::OpenPlanReasoningScopePrompt { model, effort } => {
self.chat_widget
.open_plan_reasoning_scope_prompt(model, effort);
}
AppEvent::OpenAllModelsPopup { models } => {
self.chat_widget.open_all_models_popup(models);
AppEvent::OpenAllModelsPopup { models, target } => {
self.chat_widget.open_all_models_popup(models, target);
}
AppEvent::OpenFullAccessConfirmation {
preset,
@@ -4881,6 +4888,63 @@ impl App {
}
}
}
AppEvent::PersistPlanModeModelSelection { model, effort } => {
let profile = self.active_profile.as_deref();
let persist_result = ConfigEditsBuilder::new(&self.config.codex_home)
.with_profile(profile)
.set_plan_mode_model(Some(model.as_str()))
.with_edits([{
let segments = if let Some(profile) = profile {
vec![
"profiles".to_string(),
profile.to_string(),
"plan_mode_reasoning_effort".to_string(),
]
} else {
vec!["plan_mode_reasoning_effort".to_string()]
};
if let Some(effort) = effort {
ConfigEdit::SetPath {
segments,
value: effort.to_string().into(),
}
} else {
ConfigEdit::ClearPath { segments }
}
}])
.apply()
.await;
match persist_result {
Ok(()) => {
let mut message = format!("Plan mode model changed to {model}");
if let Some(label) = Self::reasoning_label_for(&model, effort) {
message.push(' ');
message.push_str(label);
}
if let Some(profile) = profile {
message.push_str(" for ");
message.push_str(profile);
message.push_str(" profile");
}
self.chat_widget.add_info_message(message, /*hint*/ None);
}
Err(err) => {
tracing::error!(
error = %err,
"failed to persist Plan mode model selection"
);
if let Some(profile) = profile {
self.chat_widget.add_error_message(format!(
"Failed to save Plan mode model for profile `{profile}`: {err}"
));
} else {
self.chat_widget.add_error_message(format!(
"Failed to save Plan mode model: {err}"
));
}
}
}
}
AppEvent::PluginUninstallLoaded {
cwd,
plugin_id: _plugin_id,
@@ -5196,45 +5260,6 @@ impl App {
));
}
}
AppEvent::PersistPlanModeReasoningEffort(effort) => {
let profile = self.active_profile.as_deref();
let segments = if let Some(profile) = profile {
vec![
"profiles".to_string(),
profile.to_string(),
"plan_mode_reasoning_effort".to_string(),
]
} else {
vec!["plan_mode_reasoning_effort".to_string()]
};
let edit = if let Some(effort) = effort {
ConfigEdit::SetPath {
segments,
value: effort.to_string().into(),
}
} else {
ConfigEdit::ClearPath { segments }
};
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.with_edits([edit])
.apply()
.await
{
tracing::error!(
error = %err,
"failed to persist plan mode reasoning effort"
);
if let Some(profile) = profile {
self.chat_widget.add_error_message(format!(
"Failed to save Plan mode reasoning effort for profile `{profile}`: {err}"
));
} else {
self.chat_widget.add_error_message(format!(
"Failed to save Plan mode reasoning effort: {err}"
));
}
}
}
AppEvent::PersistModelMigrationPromptAcknowledged {
from_model,
to_model,

View File

@@ -34,6 +34,7 @@ use crate::history_cell::HistoryCell;
use codex_config::types::ApprovalsReviewer;
use codex_features::Feature;
use codex_protocol::config_types::CollaborationModeMask;
use codex_protocol::config_types::ModeKind;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::openai_models::ReasoningEffort;
@@ -69,6 +70,35 @@ pub(crate) enum WindowsSandboxEnableMode {
Legacy,
}
/// Where a model selection from the picker should be applied.
///
/// The TUI model picker can target a specific collaboration mode without
/// requiring the user to switch into that mode first. `Active` preserves the
/// legacy behavior (mutate whatever is current); `Default` and `Plan` allow
/// cross-mode selection, e.g. `/model plan` while in Default mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum ModelSelectionTarget {
/// Apply a model selection to whichever collaboration mode is active now.
Active,
/// Apply a model selection to Default mode, even while Plan mode is active.
Default,
/// Apply a model selection to Plan mode, even while Default mode is active.
Plan,
}
impl ModelSelectionTarget {
/// Resolve the abstract target into a concrete [`ModeKind`] given the
/// currently active mode. `Active` is the only variant that depends on
/// runtime state; the others are constant.
pub(crate) fn mode_kind(self, active_mode: ModeKind) -> ModeKind {
match self {
Self::Active => active_mode,
Self::Default => ModeKind::Default,
Self::Plan => ModeKind::Plan,
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
pub(crate) struct ConnectorsSnapshot {
@@ -285,6 +315,12 @@ pub(crate) enum AppEvent {
/// Update the current model slug in the running app and widget.
UpdateModel(String),
/// Update the Default-mode model slug without touching the active Plan mask.
UpdateDefaultModel(String),
/// Update the Plan-mode model override in memory.
UpdatePlanModeModel(String),
/// Update the active collaboration mask in the running app and widget.
UpdateCollaborationMode(CollaborationModeMask),
@@ -297,6 +333,12 @@ pub(crate) enum AppEvent {
effort: Option<ReasoningEffort>,
},
/// Persist the selected Plan-mode model override and reasoning effort.
PersistPlanModeModelSelection {
model: String,
effort: Option<ReasoningEffort>,
},
/// Persist the selected personality to the appropriate config.
PersistPersonalitySelection {
personality: Personality,
@@ -327,6 +369,7 @@ pub(crate) enum AppEvent {
/// Open the reasoning selection popup after picking a model.
OpenReasoningPopup {
model: ModelPreset,
target: ModelSelectionTarget,
},
/// Open the Plan-mode reasoning scope prompt for the selected model/effort.
@@ -338,6 +381,7 @@ pub(crate) enum AppEvent {
/// Open the full model picker (non-auto models).
OpenAllModelsPopup {
models: Vec<ModelPreset>,
target: ModelSelectionTarget,
},
/// Open the confirmation prompt before enabling full access mode.
@@ -445,9 +489,6 @@ pub(crate) enum AppEvent {
/// Persist the acknowledgement flag for the rate limit switch prompt.
PersistRateLimitSwitchPromptHidden,
/// Persist the Plan-mode-specific reasoning effort.
PersistPlanModeReasoningEffort(Option<ReasoningEffort>),
/// Persist the acknowledgement flag for the model migration prompt.
PersistModelMigrationPromptAcknowledged {
from_model: String,

View File

@@ -255,6 +255,23 @@ const PLAN_MODE_REASONING_SCOPE_ALL_MODES: &str = "Apply to global default and P
const CONNECTORS_SELECTION_VIEW_ID: &str = "connectors-selection";
const TUI_STUB_MESSAGE: &str = "Not available in TUI yet.";
/// The outcome of resolving a [`ModelSelectionTarget`] against the current
/// widget state.
///
/// This sits between the user-facing target (which mode they *want* to
/// change) and the event dispatch (which `AppEvent`s to send). The
/// `PromptPlanScope` variant triggers the existing "apply to plan only vs.
/// all modes" confirmation dialog before committing the selection.
#[derive(Clone, Copy)]
enum ModelSelectionResolution {
/// Persist as the Default-mode model and reasoning effort.
UpdateDefault,
/// Persist as the Plan-mode model override and reasoning effort.
UpdatePlan,
/// Show the Plan-mode scope prompt before persisting.
PromptPlanScope,
}
/// Choose the keybinding used to edit the most-recently queued message.
///
/// Apple Terminal, Warp, and VSCode integrated terminals intercept or silently
@@ -293,6 +310,7 @@ fn queued_message_edit_binding_for_terminal(terminal_info: TerminalInfo) -> KeyB
use crate::app_event::AppEvent;
use crate::app_event::ConnectorsSnapshot;
use crate::app_event::ExitMode;
use crate::app_event::ModelSelectionTarget;
#[cfg(target_os = "windows")]
use crate::app_event::WindowsSandboxEnableMode;
use crate::app_event_sender::AppEventSender;
@@ -5366,6 +5384,20 @@ impl ChatWidget {
let trimmed = args.trim();
match cmd {
SlashCommand::Model => match trimmed.to_ascii_lowercase().as_str() {
"" => self.open_model_popup(),
"plan" => {
self.open_model_popup_for_target(ModelSelectionTarget::Plan);
self.consume_accepted_inline_args_command();
}
"default" => {
self.open_model_popup_for_target(ModelSelectionTarget::Default);
self.consume_accepted_inline_args_command();
}
_ => {
self.add_error_message("Usage: /model [plan|default]".to_string());
}
},
SlashCommand::Fast => {
if trimmed.is_empty() {
self.dispatch_command(cmd);
@@ -5473,6 +5505,21 @@ impl ChatWidget {
}
}
/// Drain the composer's pending submission state after a slash command
/// with inline args has been accepted and the popup opened.
///
/// Without this, the composer would still hold the `/model plan` text and
/// treat the next Enter as a duplicate submission.
fn consume_accepted_inline_args_command(&mut self) {
let Some((_prepared_args, _prepared_elements)) = self
.bottom_pane
.prepare_inline_args_submission(/*record_history*/ false)
else {
return;
};
self.bottom_pane.drain_pending_submission_state();
}
fn show_rename_prompt(&mut self) {
let tx = self.app_event_tx.clone();
let has_name = self
@@ -7672,6 +7719,14 @@ impl ChatWidget {
/// Open a popup to choose a quick auto model. Selecting "All models"
/// opens the full picker with every available preset.
pub(crate) fn open_model_popup(&mut self) {
self.open_model_popup_for_target(ModelSelectionTarget::Active);
}
/// Open the model picker popup scoped to a specific selection target.
///
/// The target determines which mode's current model is highlighted and
/// which persistence path is used when the user confirms a selection.
pub(crate) fn open_model_popup_for_target(&mut self, target: ModelSelectionTarget) {
if !self.is_session_configured() {
self.add_info_message(
"Model selection is disabled until startup completes.".to_string(),
@@ -7690,7 +7745,7 @@ impl ChatWidget {
return;
}
};
self.open_model_popup_with_presets(presets);
self.open_model_popup_with_presets(presets, target);
}
pub(crate) fn open_personality_popup(&mut self) {
@@ -7932,6 +7987,29 @@ impl ChatWidget {
Box::new(header)
}
/// Append a mode-scope suffix to the popup title when the selection targets
/// a specific mode (e.g. "Select Model for Plan Mode").
fn model_menu_title(&self, target: ModelSelectionTarget, base_title: &str) -> String {
match target {
ModelSelectionTarget::Active => base_title.to_string(),
ModelSelectionTarget::Default => format!("{base_title} for Default Mode"),
ModelSelectionTarget::Plan => format!("{base_title} for Plan Mode"),
}
}
/// Append a mode-scope clarification to the popup subtitle.
fn model_menu_subtitle(&self, target: ModelSelectionTarget, base_subtitle: &str) -> String {
match target {
ModelSelectionTarget::Active => base_subtitle.to_string(),
ModelSelectionTarget::Default => {
format!("{base_subtitle} Selection applies to Default mode.")
}
ModelSelectionTarget::Plan => {
format!("{base_subtitle} Selection applies to Plan mode.")
}
}
}
fn model_menu_warning_line(&self) -> Option<Line<'static>> {
let base_url = self.custom_openai_base_url()?;
let warning = format!(
@@ -7959,25 +8037,29 @@ impl ChatWidget {
Some(trimmed.to_string())
}
pub(crate) fn open_model_popup_with_presets(&mut self, presets: Vec<ModelPreset>) {
pub(crate) fn open_model_popup_with_presets(
&mut self,
presets: Vec<ModelPreset>,
target: ModelSelectionTarget,
) {
let presets: Vec<ModelPreset> = presets
.into_iter()
.filter(|preset| preset.show_in_picker)
.collect();
let current_model = self.current_model();
let current_model = self.model_for_selection_target(target);
let current_label = presets
.iter()
.find(|preset| preset.model.as_str() == current_model)
.map(|preset| preset.model.to_string())
.unwrap_or_else(|| self.model_display_name().to_string());
.unwrap_or_else(|| self.model_display_name_for_selection_target(target));
let (mut auto_presets, other_presets): (Vec<ModelPreset>, Vec<ModelPreset>) = presets
.into_iter()
.partition(|preset| Self::is_auto_model(&preset.model));
if auto_presets.is_empty() {
self.open_all_models_popup(other_presets);
self.open_all_models_popup(other_presets, target);
return;
}
@@ -7988,14 +8070,15 @@ impl ChatWidget {
let description =
(!preset.description.is_empty()).then_some(preset.description.clone());
let model = preset.model.clone();
let should_prompt_plan_mode_scope = self.should_prompt_plan_mode_reasoning_scope(
let resolution = self.model_selection_resolution(
target,
model.as_str(),
Some(preset.default_reasoning_effort),
);
let actions = Self::model_selection_actions(
model.clone(),
Some(preset.default_reasoning_effort),
should_prompt_plan_mode_scope,
resolution,
);
SelectionItem {
name: model.clone(),
@@ -8014,6 +8097,7 @@ impl ChatWidget {
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::OpenAllModelsPopup {
models: all_models.clone(),
target,
});
})];
@@ -8033,8 +8117,8 @@ impl ChatWidget {
}
let header = self.model_menu_header(
"Select Model",
"Pick a quick auto mode or browse all models.",
&self.model_menu_title(target, "Select Model"),
&self.model_menu_subtitle(target, "Pick a quick auto mode or browse all models."),
);
self.bottom_pane.show_selection_view(SelectionViewParams {
footer_hint: Some(standard_popup_hint_line()),
@@ -8057,7 +8141,11 @@ impl ChatWidget {
}
}
pub(crate) fn open_all_models_popup(&mut self, presets: Vec<ModelPreset>) {
pub(crate) fn open_all_models_popup(
&mut self,
presets: Vec<ModelPreset>,
target: ModelSelectionTarget,
) {
if presets.is_empty() {
self.add_info_message(
"No additional models are available right now.".to_string(),
@@ -8070,13 +8158,14 @@ impl ChatWidget {
for preset in presets.into_iter() {
let description =
(!preset.description.is_empty()).then_some(preset.description.to_string());
let is_current = preset.model.as_str() == self.current_model();
let is_current = preset.model.as_str() == self.model_for_selection_target(target);
let single_supported_effort = preset.supported_reasoning_efforts.len() == 1;
let preset_for_action = preset.clone();
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
let preset_for_event = preset_for_action.clone();
tx.send(AppEvent::OpenReasoningPopup {
model: preset_for_event,
target,
});
})];
items.push(SelectionItem {
@@ -8091,8 +8180,11 @@ impl ChatWidget {
}
let header = self.model_menu_header(
"Select Model and Effort",
"Access legacy models by running codex -m <model_name> or in your config.toml",
&self.model_menu_title(target, "Select Model and Effort"),
&self.model_menu_subtitle(
target,
"Access legacy models by running codex -m <model_name> or in your config.toml",
),
);
self.bottom_pane.show_selection_view(SelectionViewParams {
footer_hint: Some("Press enter to select reasoning effort, or esc to dismiss.".into()),
@@ -8147,29 +8239,71 @@ impl ChatWidget {
});
}
/// Build the closure(s) that fire when the user confirms a model selection.
///
/// The returned actions send the appropriate `AppEvent` sequence for the
/// given [`ModelSelectionResolution`]. This is a pure factory — it captures
/// the resolution at popup-creation time so the popup callback doesn't need
/// access to widget state.
fn model_selection_actions(
model_for_action: String,
effort_for_action: Option<ReasoningEffortConfig>,
should_prompt_plan_mode_scope: bool,
resolution: ModelSelectionResolution,
) -> Vec<SelectionAction> {
vec![Box::new(move |tx| {
if should_prompt_plan_mode_scope {
vec![Box::new(move |tx| match resolution {
ModelSelectionResolution::PromptPlanScope => {
tx.send(AppEvent::OpenPlanReasoningScopePrompt {
model: model_for_action.clone(),
effort: effort_for_action,
});
return;
}
tx.send(AppEvent::UpdateModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort_for_action));
tx.send(AppEvent::PersistModelSelection {
model: model_for_action.clone(),
effort: effort_for_action,
});
ModelSelectionResolution::UpdateDefault => {
tx.send(AppEvent::UpdateDefaultModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort_for_action));
tx.send(AppEvent::PersistModelSelection {
model: model_for_action.clone(),
effort: effort_for_action,
});
}
ModelSelectionResolution::UpdatePlan => {
tx.send(AppEvent::UpdatePlanModeModel(model_for_action.clone()));
tx.send(AppEvent::UpdatePlanModeReasoningEffort(effort_for_action));
tx.send(AppEvent::PersistPlanModeModelSelection {
model: model_for_action.clone(),
effort: effort_for_action,
});
}
})]
}
/// Decide how a model selection should be applied given the target and the
/// current widget state.
///
/// For explicit `Default`/`Plan` targets the mapping is trivial. For
/// `Active`, the function consults `should_prompt_plan_mode_reasoning_scope`
/// (which checks whether the effort differs from the Plan preset default)
/// and falls back to whichever mode is currently active.
fn model_selection_resolution(
&self,
target: ModelSelectionTarget,
selected_model: &str,
selected_effort: Option<ReasoningEffortConfig>,
) -> ModelSelectionResolution {
match target {
ModelSelectionTarget::Active => {
if self.should_prompt_plan_mode_reasoning_scope(selected_model, selected_effort) {
ModelSelectionResolution::PromptPlanScope
} else if self.active_mode_kind() == ModeKind::Plan {
ModelSelectionResolution::UpdatePlan
} else {
ModelSelectionResolution::UpdateDefault
}
}
ModelSelectionTarget::Default => ModelSelectionResolution::UpdateDefault,
ModelSelectionTarget::Plan => ModelSelectionResolution::UpdatePlan,
}
}
fn should_prompt_plan_mode_reasoning_scope(
&self,
selected_model: &str,
@@ -8233,16 +8367,23 @@ impl ChatWidget {
let plan_only_actions: Vec<SelectionAction> = vec![Box::new({
let model = model.clone();
move |tx| {
tx.send(AppEvent::UpdateModel(model.clone()));
tx.send(AppEvent::UpdatePlanModeModel(model.clone()));
tx.send(AppEvent::UpdatePlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistPlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistPlanModeModelSelection {
model: model.clone(),
effort,
});
}
})];
let all_modes_actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::UpdateModel(model.clone()));
tx.send(AppEvent::UpdateDefaultModel(model.clone()));
tx.send(AppEvent::UpdatePlanModeModel(model.clone()));
tx.send(AppEvent::UpdateReasoningEffort(effort));
tx.send(AppEvent::UpdatePlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistPlanModeReasoningEffort(effort));
tx.send(AppEvent::PersistPlanModeModelSelection {
model: model.clone(),
effort,
});
tx.send(AppEvent::PersistModelSelection {
model: model.clone(),
effort,
@@ -8277,11 +8418,15 @@ impl ChatWidget {
}
/// Open a popup to choose the reasoning effort (stage 2) for the given model.
pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) {
pub(crate) fn open_reasoning_popup(
&mut self,
preset: ModelPreset,
target: ModelSelectionTarget,
) {
let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort;
let supported = preset.supported_reasoning_efforts;
let in_plan_mode =
self.collaboration_modes_enabled() && self.active_mode_kind() == ModeKind::Plan;
let target_mode = target.mode_kind(self.active_mode_kind());
let in_plan_mode = self.collaboration_modes_enabled() && target_mode == ModeKind::Plan;
let warn_effort = if supported
.iter()
@@ -8327,15 +8472,12 @@ impl ChatWidget {
if choices.len() == 1 {
let selected_effort = choices.first().and_then(|c| c.stored);
let selected_model = preset.model;
if self.should_prompt_plan_mode_reasoning_scope(&selected_model, selected_effort) {
self.app_event_tx
.send(AppEvent::OpenPlanReasoningScopePrompt {
model: selected_model,
effort: selected_effort,
});
} else {
self.apply_model_and_effort(selected_model, selected_effort);
}
self.apply_model_selection(
target,
selected_model.clone().as_str(),
selected_model,
selected_effort,
);
return;
}
@@ -8348,14 +8490,14 @@ impl ChatWidget {
.or(Some(default_effort));
let model_slug = preset.model.to_string();
let is_current_model = self.current_model() == preset.model.as_str();
let is_current_model = self.model_for_selection_target(target) == preset.model.as_str();
let highlight_choice = if is_current_model {
if in_plan_mode {
self.config
.plan_mode_reasoning_effort
.or(self.effective_reasoning_effort())
.or(self.plan_mode_reasoning_effort_for_picker())
} else {
self.effective_reasoning_effort()
self.current_collaboration_mode.reasoning_effort()
}
} else {
default_choice
@@ -8400,23 +8542,10 @@ impl ChatWidget {
let model_for_action = model_slug.clone();
let choice_effort = choice.stored;
let should_prompt_plan_mode_scope =
self.should_prompt_plan_mode_reasoning_scope(model_slug.as_str(), choice_effort);
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
if should_prompt_plan_mode_scope {
tx.send(AppEvent::OpenPlanReasoningScopePrompt {
model: model_for_action.clone(),
effort: choice_effort,
});
} else {
tx.send(AppEvent::UpdateModel(model_for_action.clone()));
tx.send(AppEvent::UpdateReasoningEffort(choice_effort));
tx.send(AppEvent::PersistModelSelection {
model: model_for_action.clone(),
effort: choice_effort,
});
}
})];
let resolution =
self.model_selection_resolution(target, model_slug.as_str(), choice_effort);
let actions =
Self::model_selection_actions(model_for_action, choice_effort, resolution);
items.push(SelectionItem {
name: effort_label,
@@ -8431,7 +8560,8 @@ impl ChatWidget {
let mut header = ColumnRenderable::new();
header.push(Line::from(
format!("Select Reasoning Level for {model_slug}").bold(),
self.model_menu_title(target, &format!("Select Reasoning Level for {model_slug}"))
.bold(),
));
self.bottom_pane.show_selection_view(SelectionViewParams {
@@ -8454,20 +8584,41 @@ impl ChatWidget {
}
}
fn apply_model_and_effort_without_persist(
/// Dispatch the appropriate update + persist events for a confirmed model
/// selection.
///
/// This is the non-closure counterpart of [`model_selection_actions`] —
/// used when the widget can send events directly (e.g. single-effort
/// models that skip the reasoning popup).
fn apply_model_selection(
&self,
target: ModelSelectionTarget,
selected_model: &str,
model: String,
effort: Option<ReasoningEffortConfig>,
) {
self.app_event_tx.send(AppEvent::UpdateModel(model));
self.app_event_tx
.send(AppEvent::UpdateReasoningEffort(effort));
}
fn apply_model_and_effort(&self, model: String, effort: Option<ReasoningEffortConfig>) {
self.apply_model_and_effort_without_persist(model.clone(), effort);
self.app_event_tx
.send(AppEvent::PersistModelSelection { model, effort });
match self.model_selection_resolution(target, selected_model, effort) {
ModelSelectionResolution::PromptPlanScope => {
self.app_event_tx
.send(AppEvent::OpenPlanReasoningScopePrompt { model, effort });
}
ModelSelectionResolution::UpdateDefault => {
self.app_event_tx
.send(AppEvent::UpdateDefaultModel(model.clone()));
self.app_event_tx
.send(AppEvent::UpdateReasoningEffort(effort));
self.app_event_tx
.send(AppEvent::PersistModelSelection { model, effort });
}
ModelSelectionResolution::UpdatePlan => {
self.app_event_tx
.send(AppEvent::UpdatePlanModeModel(model.clone()));
self.app_event_tx
.send(AppEvent::UpdatePlanModeReasoningEffort(effort));
self.app_event_tx
.send(AppEvent::PersistPlanModeModelSelection { model, effort });
}
}
}
/// Open the permissions popup (alias for /permissions).
@@ -9404,6 +9555,23 @@ impl ChatWidget {
self.refresh_model_dependent_surfaces();
}
/// Override the model used when Plan mode is active.
///
/// When unset, Plan mode inherits the Default mode model. Since the
/// current config schema persists a concrete Plan model value rather than a
/// clearable sentinel through the picker UI, this setter only applies a
/// concrete model override.
pub(crate) fn set_plan_mode_model(&mut self, model: &str) {
self.config.plan_mode_model = Some(model.to_string());
if self.collaboration_modes_enabled()
&& let Some(mask) = self.active_collaboration_mask.as_mut()
&& mask.mode == Some(ModeKind::Plan)
{
mask.model = Some(model.to_string());
}
self.refresh_model_dependent_surfaces();
}
/// Set the reasoning effort for the non-Plan collaboration mode.
///
/// Does not touch the active Plan mask — Plan reasoning is controlled
@@ -9500,7 +9668,7 @@ impl ChatWidget {
}
/// Set the model in the widget's config copy and stored collaboration mode.
pub(crate) fn set_model(&mut self, model: &str) {
pub(crate) fn set_default_mode_model(&mut self, model: &str) {
self.current_collaboration_mode = self.current_collaboration_mode.with_updates(
Some(model.to_string()),
/*effort*/ None,
@@ -9508,12 +9676,22 @@ impl ChatWidget {
);
if self.collaboration_modes_enabled()
&& let Some(mask) = self.active_collaboration_mask.as_mut()
&& mask.mode != Some(ModeKind::Plan)
{
mask.model = Some(model.to_string());
}
self.refresh_model_dependent_surfaces();
}
/// Set the model for whichever collaboration mode is active.
pub(crate) fn set_model(&mut self, model: &str) {
if self.collaboration_modes_enabled() && self.active_mode_kind() == ModeKind::Plan {
self.set_plan_mode_model(model);
} else {
self.set_default_mode_model(model);
}
}
fn set_service_tier_selection(&mut self, service_tier: Option<ServiceTier>) {
self.set_service_tier(service_tier);
self.app_event_tx.send(AppEvent::CodexOp(
@@ -9537,13 +9715,42 @@ impl ChatWidget {
}
pub(crate) fn current_model(&self) -> &str {
self.model_for_selection_target(ModelSelectionTarget::Active)
}
/// Resolve the "current" model for the given selection target.
///
/// For Plan mode the precedence is:
/// 1. `config.plan_mode_model` (explicit user override),
/// 2. the active collaboration mask's model (only when the mask is
/// currently Plan — ignored if the user targeted Plan from Default
/// mode, since the mask belongs to Default in that case),
/// 3. the global default model.
///
/// For all other modes the global default model is used directly.
fn model_for_selection_target(&self, target: ModelSelectionTarget) -> &str {
if !self.collaboration_modes_enabled() {
return self.current_collaboration_mode.model();
}
self.active_collaboration_mask
.as_ref()
.and_then(|mask| mask.model.as_deref())
.unwrap_or_else(|| self.current_collaboration_mode.model())
match target.mode_kind(self.active_mode_kind()) {
ModeKind::Plan => self
.config
.plan_mode_model
.as_deref()
.or_else(|| {
// Only consult the active mask when it *is* Plan mode.
// If the target is Plan but the active mask is Default,
// the mask's model belongs to Default, not Plan.
self.active_collaboration_mask
.as_ref()
.filter(|mask| mask.mode == Some(ModeKind::Plan))
.and_then(|mask| mask.model.as_deref())
})
.unwrap_or_else(|| self.current_collaboration_mode.model()),
ModeKind::Default | ModeKind::PairProgramming | ModeKind::Execute => {
self.current_collaboration_mode.model()
}
}
}
pub(crate) fn realtime_conversation_is_live(&self) -> bool {
@@ -9644,12 +9851,18 @@ impl ChatWidget {
}
fn initial_collaboration_mask(
_config: &Config,
config: &Config,
model_catalog: &ModelCatalog,
model_override: Option<&str>,
) -> Option<CollaborationModeMask> {
let mut mask = collaboration_modes::default_mask(model_catalog)?;
if let Some(model_override) = model_override {
if mask.mode == Some(ModeKind::Plan) {
if let Some(plan_mode_model) = config.plan_mode_model.as_ref() {
mask.model = Some(plan_mode_model.clone());
} else if let Some(model_override) = model_override {
mask.model = Some(model_override.to_string());
}
} else if let Some(model_override) = model_override {
mask.model = Some(model_override.to_string());
}
Some(mask)
@@ -9663,14 +9876,35 @@ impl ChatWidget {
}
fn effective_reasoning_effort(&self) -> Option<ReasoningEffortConfig> {
match self.active_mode_kind() {
ModeKind::Plan => self.plan_mode_reasoning_effort_for_picker(),
ModeKind::Default | ModeKind::PairProgramming | ModeKind::Execute => {
self.current_collaboration_mode.reasoning_effort()
}
}
}
/// Resolve the Plan-mode reasoning effort that the picker should treat as
/// "current."
///
/// Precedence: active Plan mask effort > `config.plan_mode_reasoning_effort`
/// > catalog Plan preset default.
fn plan_mode_reasoning_effort_for_picker(&self) -> Option<ReasoningEffortConfig> {
if !self.collaboration_modes_enabled() {
return self.current_collaboration_mode.reasoning_effort();
}
let current_effort = self.current_collaboration_mode.reasoning_effort();
self.active_collaboration_mask
if let Some(mask) = self
.active_collaboration_mask
.as_ref()
.and_then(|mask| mask.reasoning_effort)
.unwrap_or(current_effort)
.filter(|mask| mask.mode == Some(ModeKind::Plan))
&& let Some(effort) = mask.reasoning_effort
{
return effort;
}
self.config.plan_mode_reasoning_effort.or_else(|| {
collaboration_modes::plan_mask(self.model_catalog.as_ref())
.and_then(|mask| mask.reasoning_effort.flatten())
})
}
fn effective_collaboration_mode(&self) -> CollaborationMode {
@@ -9704,6 +9938,15 @@ impl ChatWidget {
self.refresh_status_line();
}
fn model_display_name_for_selection_target(&self, target: ModelSelectionTarget) -> String {
let model = self.model_for_selection_target(target);
if model.is_empty() {
DEFAULT_MODEL_DISPLAY_NAME.to_string()
} else {
model.to_string()
}
}
fn model_display_name(&self) -> &str {
let model = self.current_model();
if model.is_empty() {
@@ -9780,10 +10023,13 @@ impl ChatWidget {
let previous_mode = self.active_mode_kind();
let previous_model = self.current_model().to_string();
let previous_effort = self.effective_reasoning_effort();
if mask.mode == Some(ModeKind::Plan)
&& let Some(effort) = self.config.plan_mode_reasoning_effort
{
mask.reasoning_effort = Some(Some(effort));
if mask.mode == Some(ModeKind::Plan) {
if let Some(model) = self.config.plan_mode_model.as_ref() {
mask.model = Some(model.clone());
}
if let Some(effort) = self.config.plan_mode_reasoning_effort {
mask.reasoning_effort = Some(Some(effort));
}
}
self.active_collaboration_mask = Some(mask);
self.update_collaboration_mode_indicator();

View File

@@ -7,6 +7,7 @@
pub(super) use super::*;
pub(super) use crate::app_event::AppEvent;
pub(super) use crate::app_event::ExitMode;
pub(super) use crate::app_event::ModelSelectionTarget;
#[cfg(not(target_os = "linux"))]
pub(super) use crate::app_event::RealtimeAudioDeviceKind;
pub(super) use crate::app_event_sender::AppEventSender;

View File

@@ -78,7 +78,7 @@ async fn reasoning_selection_in_plan_mode_opens_scope_prompt_event() {
chat.set_reasoning_effort(Some(ReasoningEffortConfig::High));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
chat.handle_key_event(KeyEvent::from(KeyCode::Down));
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
@@ -107,21 +107,21 @@ async fn reasoning_selection_in_plan_mode_without_effort_change_does_not_open_sc
chat.set_reasoning_effort(Some(current_preset.default_reasoning_effort));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::UpdateModel(model) if model == "gpt-5.1-codex-max"
AppEvent::UpdatePlanModeModel(model) if model == "gpt-5.1-codex-max"
)),
"expected model update event; events: {events:?}"
);
assert!(
events
.iter()
.any(|event| matches!(event, AppEvent::UpdateReasoningEffort(Some(_)))),
.any(|event| matches!(event, AppEvent::UpdatePlanModeReasoningEffort(Some(_)))),
"expected reasoning update event; events: {events:?}"
);
}
@@ -144,7 +144,7 @@ async fn reasoning_selection_in_plan_mode_matching_plan_effort_but_different_glo
chat.set_reasoning_effort(Some(ReasoningEffortConfig::High));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let event = rx.try_recv().expect("expected AppEvent");
@@ -170,7 +170,7 @@ async fn plan_mode_reasoning_override_is_marked_current_in_reasoning_popup() {
chat.set_collaboration_mask(plan_mask);
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
let popup = render_bottom_popup(&chat, /*width*/ 100);
assert!(popup.contains("Low (current)"));
@@ -192,21 +192,21 @@ async fn reasoning_selection_in_plan_mode_model_switch_does_not_open_scope_promp
set_chatgpt_auth(&mut chat);
let preset = get_available_model(&chat, "gpt-5");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::UpdateModel(model) if model == "gpt-5"
AppEvent::UpdatePlanModeModel(model) if model == "gpt-5"
)),
"expected model update event; events: {events:?}"
);
assert!(
events
.iter()
.any(|event| matches!(event, AppEvent::UpdateReasoningEffort(Some(_)))),
.any(|event| matches!(event, AppEvent::UpdatePlanModeReasoningEffort(Some(_)))),
"expected reasoning update event; events: {events:?}"
);
}
@@ -233,7 +233,10 @@ async fn plan_reasoning_scope_popup_all_modes_persists_global_and_plan_override(
assert!(
events.iter().any(|event| matches!(
event,
AppEvent::PersistPlanModeReasoningEffort(Some(ReasoningEffortConfig::High))
AppEvent::PersistPlanModeModelSelection {
model,
effort: Some(ReasoningEffortConfig::High),
} if model == "gpt-5.1-codex-max"
)),
"expected updated plan override to be persisted; events: {events:?}"
);
@@ -1156,6 +1159,81 @@ async fn plan_slash_command_switches_to_plan_mode() {
assert_eq!(chat.current_collaboration_mode(), &initial);
}
#[tokio::test]
async fn model_slash_command_plan_arg_opens_plan_scoped_picker_without_switching_modes() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, /*enabled*/ true);
assert_eq!(chat.active_collaboration_mode_kind(), ModeKind::Default);
chat.bottom_pane
.set_composer_text("/model plan".to_string(), Vec::new(), Vec::new());
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let popup = render_bottom_popup(&chat, /*width*/ 120);
assert!(
popup.contains("for Plan Mode"),
"expected Plan-scoped model picker, got: {popup}"
);
assert_eq!(chat.bottom_pane.composer_text(), "");
assert_eq!(chat.active_collaboration_mode_kind(), ModeKind::Default);
}
#[tokio::test]
async fn model_slash_command_default_arg_updates_default_model_while_in_plan_mode() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, /*enabled*/ true);
let plan_mask = collaboration_modes::plan_mask(chat.model_catalog.as_ref())
.expect("expected plan collaboration mode");
chat.set_collaboration_mask(plan_mask);
let _ = drain_insert_history(&mut rx);
chat.bottom_pane
.set_composer_text("/model default".to_string(), Vec::new(), Vec::new());
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let popup = render_bottom_popup(&chat, /*width*/ 120);
assert!(
popup.contains("for Default Mode"),
"expected Default-scoped model picker, got: {popup}"
);
assert_eq!(chat.bottom_pane.composer_text(), "");
assert_eq!(chat.active_collaboration_mode_kind(), ModeKind::Plan);
}
#[tokio::test]
async fn plan_scoped_model_picker_selection_updates_plan_model_override() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1-codex-max")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, /*enabled*/ true);
chat.open_model_popup_for_target(ModelSelectionTarget::Plan);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let event = rx.try_recv().expect("expected OpenReasoningPopup event");
let AppEvent::OpenReasoningPopup { model, target } = event else {
panic!("expected OpenReasoningPopup, got {event:?}");
};
chat.open_reasoning_popup(model, target);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let events = std::iter::from_fn(|| rx.try_recv().ok()).collect::<Vec<_>>();
assert!(
events
.iter()
.any(|event| matches!(event, AppEvent::UpdatePlanModeModel(_))),
"expected Plan model update event; events: {events:?}"
);
assert!(
events
.iter()
.any(|event| matches!(event, AppEvent::PersistPlanModeModelSelection { .. })),
"expected Plan model persistence event; events: {events:?}"
);
assert_eq!(chat.active_collaboration_mode_kind(), ModeKind::Default);
}
#[tokio::test]
async fn plan_slash_command_with_args_submits_prompt_in_plan_mode() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(/*model_override*/ None).await;

View File

@@ -1600,10 +1600,13 @@ async fn model_picker_hides_show_in_picker_false_models_from_cache() {
input_modalities: default_input_modalities(),
};
chat.open_model_popup_with_presets(vec![
preset("test-visible-model", true),
preset("test-hidden-model", false),
]);
chat.open_model_popup_with_presets(
vec![
preset("test-visible-model", true),
preset("test-hidden-model", false),
],
ModelSelectionTarget::Active,
);
let popup = render_bottom_popup(&chat, /*width*/ 80);
assert_chatwidget_snapshot!("model_picker_filters_hidden_models", popup);
assert!(
@@ -1658,7 +1661,7 @@ async fn model_reasoning_selection_popup_snapshot() {
chat.set_reasoning_effort(Some(ReasoningEffortConfig::High));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
let popup = render_bottom_popup(&chat, /*width*/ 80);
assert_chatwidget_snapshot!("model_reasoning_selection_popup", popup);
@@ -1672,7 +1675,7 @@ async fn model_reasoning_selection_popup_extra_high_warning_snapshot() {
chat.set_reasoning_effort(Some(ReasoningEffortConfig::XHigh));
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
let popup = render_bottom_popup(&chat, /*width*/ 80);
assert_chatwidget_snapshot!("model_reasoning_selection_popup_extra_high_warning", popup);
@@ -1685,7 +1688,7 @@ async fn reasoning_popup_shows_extra_high_with_space() {
set_chatgpt_auth(&mut chat);
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
let popup = render_bottom_popup(&chat, /*width*/ 120);
assert!(
@@ -1721,7 +1724,7 @@ async fn single_reasoning_option_skips_selection() {
supported_in_api: true,
input_modalities: default_input_modalities(),
};
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
let popup = render_bottom_popup(&chat, /*width*/ 80);
assert!(
@@ -1800,7 +1803,7 @@ async fn reasoning_popup_escape_returns_to_model_popup() {
chat.open_model_popup();
let preset = get_available_model(&chat, "gpt-5.1-codex-max");
chat.open_reasoning_popup(preset);
chat.open_reasoning_popup(preset, ModelSelectionTarget::Active);
let before_escape = render_bottom_popup(&chat, /*width*/ 80);
assert!(before_escape.contains("Select Reasoning Level"));

View File

@@ -130,6 +130,7 @@ impl SlashCommand {
self,
SlashCommand::Review
| SlashCommand::Rename
| SlashCommand::Model
| SlashCommand::Plan
| SlashCommand::Fast
| SlashCommand::SandboxReadRoot

View File

@@ -74,6 +74,9 @@ Codex stores "do not show again" flags for some UI prompts under the `[notice]`
## Plan mode defaults
`plan_mode_model` lets you set a Plan-mode-specific model override. When unset,
Plan mode inherits the global `model` default.
`plan_mode_reasoning_effort` lets you set a Plan-mode-specific default reasoning
effort override. When unset, Plan mode uses the built-in Plan preset default
(currently `medium`). When explicitly set (including `none`), it overrides the