TUI: prompt to implement plan and switch to Execute (#9712)

## Summary
- Replace the plan‑implementation prompt with a standard selection
popup.
- “Yes” submits a user turn in Execute via a dedicated app event to
preserve normal transcript behavior.
- “No” simply dismisses the popup.

<img width="977" height="433" alt="Screenshot 2026-01-22 at 2 00 54 PM"
src="https://github.com/user-attachments/assets/91fad06f-7b7a-4cd8-9051-f28a19b750b2"
/>

## Changes
- Add a plan‑implementation popup using `SelectionViewParams`.
- Add `SubmitUserMessageWithMode` so “Yes” routes through
`submit_user_message` (ensures user history + separator state).
- Track `saw_plan_update_this_turn` so the prompt appears even when only
`update_plan` is emitted.
- Suppress the plan popup on replayed turns, when messages are queued,
or when a rate‑limit prompt is pending.
- Add `execute_mode` helper for collaboration modes.
- Add tests for replay/queued/rate‑limit guards and plan update without
final message.
- Add snapshots for both the default and “No”‑selected popup states.
This commit is contained in:
charley-oai
2026-01-22 16:25:50 -08:00
committed by GitHub
parent e117a3ff33
commit 0e79d239ed
7 changed files with 307 additions and 4 deletions

View File

@@ -840,6 +840,7 @@ async fn make_chatwidget_manual(
pre_review_token_info: None,
needs_final_message_separator: false,
had_work_activity: false,
saw_plan_update_this_turn: false,
last_separator_elapsed_secs: None,
last_rendered_width: std::cell::Cell::new(None),
feedback: codex_feedback::CodexFeedback::new(),
@@ -1170,6 +1171,169 @@ async fn rate_limit_switch_prompt_popup_snapshot() {
assert_snapshot!("rate_limit_switch_prompt_popup", popup);
}
#[tokio::test]
async fn plan_implementation_popup_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.open_plan_implementation_prompt();
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("plan_implementation_popup", popup);
}
#[tokio::test]
async fn plan_implementation_popup_no_selected_snapshot() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.open_plan_implementation_prompt();
chat.handle_key_event(KeyEvent::from(KeyCode::Down));
let popup = render_bottom_popup(&chat, 80);
assert_snapshot!("plan_implementation_popup_no_selected", popup);
}
#[tokio::test]
async fn plan_implementation_popup_yes_emits_submit_message_event() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.open_plan_implementation_prompt();
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
let event = rx.try_recv().expect("expected AppEvent");
let AppEvent::SubmitUserMessageWithMode {
text,
collaboration_mode,
} = event
else {
panic!("expected SubmitUserMessageWithMode, got {event:?}");
};
assert_eq!(text, PLAN_IMPLEMENTATION_EXECUTE_MESSAGE);
assert!(matches!(collaboration_mode, CollaborationMode::Execute(_)));
}
#[tokio::test]
async fn submit_user_message_with_mode_sets_execute_collaboration_mode() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.thread_id = Some(ThreadId::new());
chat.set_feature_enabled(Feature::CollaborationModes, true);
let execute_mode = collaboration_modes::execute_mode(chat.models_manager.as_ref())
.expect("expected execute collaboration mode");
chat.submit_user_message_with_mode("Implement the plan.".to_string(), execute_mode);
match next_submit_op(&mut op_rx) {
Op::UserTurn {
collaboration_mode: Some(CollaborationMode::Execute(_)),
personality: None,
..
} => {}
other => {
panic!("expected Op::UserTurn with execute collab mode, got {other:?}")
}
}
}
#[tokio::test]
async fn plan_implementation_popup_skips_replayed_turn_complete() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.replay_initial_messages(vec![EventMsg::TurnComplete(TurnCompleteEvent {
last_agent_message: Some("Plan details".to_string()),
})]);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected no plan popup for replayed turn, got {popup:?}"
);
}
#[tokio::test]
async fn plan_implementation_popup_skips_when_messages_queued() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.bottom_pane.set_task_running(true);
chat.queue_user_message("Queued message".into());
chat.on_task_complete(Some("Plan details".to_string()), false);
let popup = render_bottom_popup(&chat, 80);
assert!(
!popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected no plan popup with queued messages, got {popup:?}"
);
}
#[tokio::test]
async fn plan_implementation_popup_shows_on_plan_update_without_message() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.on_task_started();
chat.on_plan_update(UpdatePlanArgs {
explanation: None,
plan: vec![PlanItemArg {
step: "First".to_string(),
status: StepStatus::Pending,
}],
});
chat.on_task_complete(None, false);
let popup = render_bottom_popup(&chat, 80);
assert!(
popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected plan popup after plan update, got {popup:?}"
);
}
#[tokio::test]
async fn plan_implementation_popup_skips_when_rate_limit_prompt_pending() {
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("gpt-5")).await;
chat.auth_manager =
AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing());
chat.set_feature_enabled(Feature::CollaborationModes, true);
chat.stored_collaboration_mode = CollaborationMode::Plan(Settings {
model: chat.current_model().to_string(),
reasoning_effort: None,
developer_instructions: None,
});
chat.on_task_started();
chat.on_plan_update(UpdatePlanArgs {
explanation: None,
plan: vec![PlanItemArg {
step: "First".to_string(),
status: StepStatus::Pending,
}],
});
chat.on_rate_limit_snapshot(Some(snapshot(92.0)));
chat.on_task_complete(None, false);
let popup = render_bottom_popup(&chat, 80);
assert!(
popup.contains("Approaching rate limits"),
"expected rate limit popup, got {popup:?}"
);
assert!(
!popup.contains(PLAN_IMPLEMENTATION_TITLE),
"expected plan popup to be skipped, got {popup:?}"
);
}
// (removed experimental resize snapshot test)
#[tokio::test]
@@ -1756,7 +1920,7 @@ async fn unified_exec_end_after_task_complete_is_suppressed() {
);
drain_insert_history(&mut rx);
chat.on_task_complete(None);
chat.on_task_complete(None, false);
end_exec(&mut chat, begin, "", "", 0);
let cells = drain_insert_history(&mut rx);
@@ -1770,7 +1934,7 @@ async fn unified_exec_end_after_task_complete_is_suppressed() {
async fn unified_exec_interaction_after_task_complete_is_suppressed() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await;
chat.on_task_started();
chat.on_task_complete(None);
chat.on_task_complete(None, false);
chat.handle_codex_event(Event {
id: "call-1".to_string(),