TUI: prompt to implement plan and switch to Execute (#9712)

## Summary
- Replace the plan‑implementation prompt with a standard selection
popup.
- “Yes” submits a user turn in Execute via a dedicated app event to
preserve normal transcript behavior.
- “No” simply dismisses the popup.

<img width="977" height="433" alt="Screenshot 2026-01-22 at 2 00 54 PM"
src="https://github.com/user-attachments/assets/91fad06f-7b7a-4cd8-9051-f28a19b750b2"
/>

## Changes
- Add a plan‑implementation popup using `SelectionViewParams`.
- Add `SubmitUserMessageWithMode` so “Yes” routes through
`submit_user_message` (ensures user history + separator state).
- Track `saw_plan_update_this_turn` so the prompt appears even when only
`update_plan` is emitted.
- Suppress the plan popup on replayed turns, when messages are queued,
or when a rate‑limit prompt is pending.
- Add `execute_mode` helper for collaboration modes.
- Add tests for replay/queued/rate‑limit guards and plan update without
final message.
- Add snapshots for both the default and “No”‑selected popup states.
This commit is contained in:
charley-oai
2026-01-22 16:25:50 -08:00
committed by GitHub
parent e117a3ff33
commit 0e79d239ed
7 changed files with 307 additions and 4 deletions

View File

@@ -1520,6 +1520,13 @@ impl App {
AppEvent::OpenReviewCustomPrompt => {
self.chat_widget.show_review_custom_prompt();
}
AppEvent::SubmitUserMessageWithMode {
text,
collaboration_mode,
} => {
self.chat_widget
.submit_user_message_with_mode(text, collaboration_mode);
}
AppEvent::ManageSkillsClosed => {
self.chat_widget.handle_manage_skills_closed();
}

View File

@@ -240,6 +240,12 @@ pub(crate) enum AppEvent {
/// Open the custom prompt option from the review popup.
OpenReviewCustomPrompt,
/// Submit a user message with an explicit collaboration mode.
SubmitUserMessageWithMode {
text: String,
collaboration_mode: CollaborationMode,
},
/// Open the approval popup.
FullScreenApprovalRequest(ApprovalRequest),

View File

@@ -119,6 +119,10 @@ use tokio::task::JoinHandle;
use tracing::debug;
const DEFAULT_MODEL_DISPLAY_NAME: &str = "loading";
const PLAN_IMPLEMENTATION_TITLE: &str = "Implement this plan?";
const PLAN_IMPLEMENTATION_YES: &str = "Yes, implement this plan";
const PLAN_IMPLEMENTATION_NO: &str = "No, stay in Plan mode";
const PLAN_IMPLEMENTATION_EXECUTE_MESSAGE: &str = "Implement the plan.";
use crate::app_event::AppEvent;
use crate::app_event::ExitMode;
@@ -493,6 +497,8 @@ pub(crate) struct ChatWidget {
// This gates rendering of the "Worked for …" separator so purely conversational turns don't
// show an empty divider. It is reset when the separator is emitted.
had_work_activity: bool,
// Whether the current turn emitted a plan update.
saw_plan_update_this_turn: bool,
// Status-indicator elapsed seconds captured at the last emitted final-message separator.
//
// This lets the separator show per-chunk work time (since the previous separator) rather than
@@ -858,6 +864,7 @@ impl ChatWidget {
fn on_task_started(&mut self) {
self.agent_turn_running = true;
self.saw_plan_update_this_turn = false;
self.bottom_pane.clear_quit_shortcut_hint();
self.quit_shortcut_expires_at = None;
self.quit_shortcut_key = None;
@@ -870,7 +877,7 @@ impl ChatWidget {
self.request_redraw();
}
fn on_task_complete(&mut self, last_agent_message: Option<String>) {
fn on_task_complete(&mut self, last_agent_message: Option<String>, from_replay: bool) {
// If a stream is currently active, finalize it.
self.flush_answer_stream_with_separator();
self.flush_unified_exec_wait_streak();
@@ -884,6 +891,9 @@ impl ChatWidget {
self.clear_unified_exec_processes();
self.request_redraw();
if !from_replay && self.queued_user_messages.is_empty() {
self.maybe_prompt_plan_implementation(last_agent_message.as_deref());
}
// If there is a queued user message, send exactly one now to begin the next turn.
self.maybe_send_next_queued_input();
// Emit a notification when the turn completes (suppressed if focused).
@@ -894,6 +904,81 @@ impl ChatWidget {
self.maybe_show_pending_rate_limit_prompt();
}
fn maybe_prompt_plan_implementation(&mut self, last_agent_message: Option<&str>) {
if !self.collaboration_modes_enabled() {
return;
}
if !self.queued_user_messages.is_empty() {
return;
}
if !matches!(self.stored_collaboration_mode, CollaborationMode::Plan(_)) {
return;
}
let has_message = last_agent_message.is_some_and(|message| !message.trim().is_empty());
if !has_message && !self.saw_plan_update_this_turn {
return;
}
if !self.bottom_pane.no_modal_or_popup_active() {
return;
}
if matches!(
self.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Pending
) {
return;
}
self.open_plan_implementation_prompt();
}
fn open_plan_implementation_prompt(&mut self) {
let execute_mode = collaboration_modes::execute_mode(self.models_manager.as_ref());
let (implement_actions, implement_disabled_reason) = match execute_mode {
Some(collaboration_mode) => {
let user_text = PLAN_IMPLEMENTATION_EXECUTE_MESSAGE.to_string();
let actions: Vec<SelectionAction> = vec![Box::new(move |tx| {
tx.send(AppEvent::SubmitUserMessageWithMode {
text: user_text.clone(),
collaboration_mode: collaboration_mode.clone(),
});
})];
(actions, None)
}
None => (Vec::new(), Some("Execute mode unavailable".to_string())),
};
let items = vec![
SelectionItem {
name: PLAN_IMPLEMENTATION_YES.to_string(),
description: Some("Switch to Execute and start coding.".to_string()),
selected_description: None,
is_current: false,
actions: implement_actions,
disabled_reason: implement_disabled_reason,
dismiss_on_select: true,
..Default::default()
},
SelectionItem {
name: PLAN_IMPLEMENTATION_NO.to_string(),
description: Some("Continue planning with the model.".to_string()),
selected_description: None,
is_current: false,
actions: Vec::new(),
dismiss_on_select: true,
..Default::default()
},
];
self.bottom_pane.show_selection_view(SelectionViewParams {
title: Some(PLAN_IMPLEMENTATION_TITLE.to_string()),
subtitle: None,
footer_hint: Some(standard_popup_hint_line()),
items,
..Default::default()
});
}
pub(crate) fn set_token_info(&mut self, info: Option<TokenUsageInfo>) {
match info {
Some(info) => self.apply_token_info(info),
@@ -1190,6 +1275,7 @@ impl ChatWidget {
}
fn on_plan_update(&mut self, update: UpdatePlanArgs) {
self.saw_plan_update_this_turn = true;
self.add_to_history(history_cell::new_plan_update(update));
}
@@ -1924,6 +2010,7 @@ impl ChatWidget {
pre_review_token_info: None,
needs_final_message_separator: false,
had_work_activity: false,
saw_plan_update_this_turn: false,
last_separator_elapsed_secs: None,
last_rendered_width: std::cell::Cell::new(None),
feedback,
@@ -2043,6 +2130,7 @@ impl ChatWidget {
pre_review_token_info: None,
needs_final_message_separator: false,
had_work_activity: false,
saw_plan_update_this_turn: false,
last_separator_elapsed_secs: None,
last_rendered_width: std::cell::Cell::new(None),
feedback,
@@ -2699,7 +2787,7 @@ impl ChatWidget {
EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(),
EventMsg::TurnStarted(_) => self.on_task_started(),
EventMsg::TurnComplete(TurnCompleteEvent { last_agent_message }) => {
self.on_task_complete(last_agent_message)
self.on_task_complete(last_agent_message, from_replay)
}
EventMsg::TokenCount(ev) => {
self.set_token_info(ev.info);
@@ -4653,6 +4741,17 @@ impl ChatWidget {
self.bottom_pane.composer_is_empty()
}
pub(crate) fn submit_user_message_with_mode(
&mut self,
text: String,
collaboration_mode: CollaborationMode,
) {
let model = collaboration_mode.model().to_string();
self.set_collaboration_mode(collaboration_mode);
self.set_model(&model);
self.submit_user_message(text.into());
}
/// True when the UI is in the regular composer state with no running task,
/// no modal overlay (e.g. approvals or status indicator), and no composer popups.
/// In this state Esc-Esc backtracking is enabled.

View File

@@ -0,0 +1,10 @@
---
source: tui/src/chatwidget/tests.rs
expression: popup
---
Implement this plan?
1. Yes, implement this plan Switch to Execute and start coding.
2. No, stay in Plan mode Continue planning with the model.
Press enter to confirm or esc to go back

View File

@@ -0,0 +1,10 @@
---
source: tui/src/chatwidget/tests.rs
expression: popup
---
Implement this plan?
1. Yes, implement this plan Switch to Execute and start coding.
2. No, stay in Plan mode Continue planning with the model.
Press enter to confirm or esc to go back

View File

@@ -840,6 +840,7 @@ async fn make_chatwidget_manual(
pre_review_token_info: None,
needs_final_message_separator: false,
had_work_activity: false,
saw_plan_update_this_turn: false,
last_separator_elapsed_secs: None,
last_rendered_width: std::cell::Cell::new(None),
feedback: codex_feedback::CodexFeedback::new(),
@@ -1170,6 +1171,169 @@ async fn rate_limit_switch_prompt_popup_snapshot() {
assert_snapshot!("rate_limit_switch_prompt_popup", popup);
}
#[tokio::test]
async fn plan_implementation_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.open_plan_implementation_prompt();
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("plan_implementation_popup", popup);
}
#[tokio::test]
async fn plan_implementation_popup_no_selected_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.open_plan_implementation_prompt();
chat.handle_key_event(KeyEvent::from(KeyCode::Down));
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("plan_implementation_popup_no_selected", popup);
}
#[tokio::test]
async fn plan_implementation_popup_yes_emits_submit_message_event() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.open_plan_implementation_prompt();
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let event = rx.try_recv().expect("expected AppEvent");
let AppEvent::SubmitUserMessageWithMode {
text,
collaboration_mode,
} = event
else {
panic!("expected SubmitUserMessageWithMode, got {event:?}");
};
assert_eq!(text, PLAN_IMPLEMENTATION_EXECUTE_MESSAGE);
assert!(matches!(collaboration_mode, CollaborationMode::Execute(_)));
}
#[tokio::test]
async fn submit_user_message_with_mode_sets_execute_collaboration_mode() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, true);
let execute_mode = collaboration_modes::execute_mode(chat.models_manager.as_ref())
.expect("expected execute collaboration mode");
chat.submit_user_message_with_mode("Implement the plan.".to_string(), execute_mode);
match next_submit_op(&mut op_rx) {
Op::UserTurn {
collaboration_mode: Some(CollaborationMode::Execute(_)),
personality: None,
..
} => {}
other => {
panic!("expected Op::UserTurn with execute collab mode, got {other:?}")
}
}
}
#[tokio::test]
async fn plan_implementation_popup_skips_replayed_turn_complete() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.replay_initial_messages(vec![EventMsg::TurnComplete(TurnCompleteEvent {
last_agent_message: Some("Plan details".to_string()),
})]);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected no plan popup for replayed turn, got {popup:?}"
);
}
#[tokio::test]
async fn plan_implementation_popup_skips_when_messages_queued() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.bottom_pane.set_task_running(true);
chat.queue_user_message("Queued message".into());
chat.on_task_complete(Some("Plan details".to_string()), false);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected no plan popup with queued messages, got {popup:?}"
);
}
#[tokio::test]
async fn plan_implementation_popup_shows_on_plan_update_without_message() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.on_task_started();
chat.on_plan_update(UpdatePlanArgs {
explanation: None,
plan: vec![PlanItemArg {
step: "First".to_string(),
status: StepStatus::Pending,
}],
});
chat.on_task_complete(None, false);
let popup = render_bottom_popup(&chat, 80);
assert!(
popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected plan popup after plan update, got {popup:?}"
);
}
#[tokio::test]
async fn plan_implementation_popup_skips_when_rate_limit_prompt_pending() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.on_task_started();
chat.on_plan_update(UpdatePlanArgs {
explanation: None,
plan: vec![PlanItemArg {
step: "First".to_string(),
status: StepStatus::Pending,
}],
});
chat.on_rate_limit_snapshot(Some(snapshot(92.0)));
chat.on_task_complete(None, false);
let popup = render_bottom_popup(&chat, 80);
assert!(
popup.contains("Approaching rate limits"),
"expected rate limit popup, got {popup:?}"
);
assert!(
!popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected plan popup to be skipped, got {popup:?}"
);
}
// (removed experimental resize snapshot test)
#[tokio::test]
@@ -1756,7 +1920,7 @@ async fn unified_exec_end_after_task_complete_is_suppressed() {
);
drain_insert_history(&mut rx);
chat.on_task_complete(None);
chat.on_task_complete(None, false);
end_exec(&mut chat, begin, "", "", 0);
let cells = drain_insert_history(&mut rx);
@@ -1770,7 +1934,7 @@ async fn unified_exec_end_after_task_complete_is_suppressed() {
async fn unified_exec_interaction_after_task_complete_is_suppressed() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await;
chat.on_task_started();
chat.on_task_complete(None);
chat.on_task_complete(None, false);
chat.handle_codex_event(Event {
id: "call-1".to_string(),

View File

@@ -48,3 +48,10 @@ pub(crate) fn next_mode(
.map_or(0, |idx| (idx + 1) % presets.len());
presets.get(next_index).cloned()
}
pub(crate) fn execute_mode(models_manager: &ModelsManager) -> Option<CollaborationMode> {
models_manager
.list_collaboration_modes()
.into_iter()
.find(|preset| mode_kind(preset) == ModeKind::Execute)
}