Compare commits

..

4 Commits

Author SHA1 Message Date
jif-oai
6c5471feb2 feat: /resume per ID/name (#17222)
Support `/resume 00000-0000-0000-00000000` from the TUI (equivalent for
the name)
2026-04-09 14:21:27 +01:00
jgershen-oai
8f705b0702 [codex] Defer steering until after sampling the model post-compaction (#17163)
## Summary
- keep pending steered input buffered until the active user prompt has
received a model response
- keep steering pending across auto-compact when there is real
model/tool continuation to resume
- allow queued steering to follow compaction immediately when the prior
model response was already final
- keep pending-input follow-up owned by `run_turn` instead of folding it
into `SamplingRequestResult`
- add regression coverage for mid-turn compaction, final-response
compaction, and compaction triggered before the next request after tool
output

## Root Cause
Steered input was drained at the top of every `run_turn` loop. After
auto-compaction, the loop continued and immediately appended any pending
steer after the compact summary, making a queued prompt look like the
newest task instead of letting the model first resume interrupted
model/tool work.

## Implementation Notes
This patch keeps the follow-up signals separated:

- `SamplingRequestResult.needs_follow_up` means model/tool continuation
is needed
- `sess.has_pending_input().await` means queued user steering exists
- `run_turn` computes the combined loop condition from those two signals

In `run_turn`:

```rust
let has_pending_input = sess.has_pending_input().await;
let needs_follow_up = model_needs_follow_up || has_pending_input;
```

After auto-compact we choose whether the next request may drain
steering:

```rust
can_drain_pending_input = !model_needs_follow_up;
```

That means:

- model/tool continuation + pending steer: compact -> resume once
without draining steer
- completed model answer + pending steer: compact -> drain/send the
steer immediately
- fresh user prompt: do not drain steering before the model has answered
the prompt once

The drain is still only `sess.get_pending_input().await`; when
`can_drain_pending_input` is false, core uses an empty local vec and
leaves the steer pending in session state.

## Validation
- PASS `cargo test -p codex-core --test all steered_user_input --
--nocapture`
- PASS `just fmt`
- PASS `git diff --check`
- NOT PASSING HERE `just fix -p codex-core` currently stops before
linting this change on an unrelated mainline test-build error:
`core/src/tools/spec_tests.rs` initializes `ToolsConfigParams` without
`image_generation_tool_auth_allowed`; this PR does not touch that file.
2026-04-09 02:08:41 -07:00
Ahmed Ibrahim
84a24fe333 make webrtc the default experience (#17188)
## Summary
- make realtime default to the v2 WebRTC path
- keep partial realtime config tables inheriting
`RealtimeConfig::default()`

## Validation
- CI found a stale config-test expectation; fixed in 974ba51bb3
- just fmt
- git diff --check

---------

Co-authored-by: Codex <noreply@openai.com>
2026-04-08 23:52:32 -07:00
Eric Traut
23f4cd8459 Skip update prompts for source builds (#17186)
Addresses #17166

Problem: Source builds report version 0.0.0, so the TUI update path can
treat any released Codex version as upgradeable and show startup or
popup prompts.

Solution: Skip both TUI update prompt entry points when the running CLI
version is the source-build sentinel 0.0.0.
2026-04-08 22:26:05 -07:00
10 changed files with 507 additions and 99 deletions

View File

@@ -448,9 +448,9 @@ pub enum RealtimeWsMode {
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default, PartialEq, Eq, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum RealtimeTransport {
#[default]
#[serde(rename = "webrtc")]
WebRtc,
#[default]
Websocket,
}

View File

@@ -6144,6 +6144,11 @@ pub(crate) async fn run_turn(
// one instance across retries within this turn.
let mut client_session =
prewarmed_client_session.unwrap_or_else(|| sess.services.model_client.new_session());
// Pending input is drained into history before building the next model request.
// However, we defer that drain until after sampling in two cases:
// 1. At the start of a turn, so the fresh user prompt in `input` gets sampled first.
// 2. After auto-compact, when model/tool continuation needs to resume before any steer.
let mut can_drain_pending_input = input.is_empty();
loop {
if run_pending_session_start_hooks(&sess, &turn_context).await {
@@ -6153,7 +6158,11 @@ pub(crate) async fn run_turn(
// Note that pending_input would be something like a message the user
// submitted through the UI while the model was running. Though the UI
// may support this, the model might not.
let pending_input = sess.get_pending_input().await;
let pending_input = if can_drain_pending_input {
sess.get_pending_input().await
} else {
Vec::new()
};
let mut blocked_pending_input = false;
let mut blocked_pending_input_contexts = Vec::new();
@@ -6227,9 +6236,12 @@ pub(crate) async fn run_turn(
{
Ok(sampling_request_output) => {
let SamplingRequestResult {
needs_follow_up,
needs_follow_up: model_needs_follow_up,
last_agent_message: sampling_request_last_agent_message,
} = sampling_request_output;
can_drain_pending_input = true;
let has_pending_input = sess.has_pending_input().await;
let needs_follow_up = model_needs_follow_up || has_pending_input;
let total_usage_tokens = sess.get_total_token_usage().await;
let token_limit_reached = total_usage_tokens >= auto_compact_limit;
@@ -6242,6 +6254,8 @@ pub(crate) async fn run_turn(
estimated_token_count = ?estimated_token_count,
auto_compact_limit,
token_limit_reached,
model_needs_follow_up,
has_pending_input,
needs_follow_up,
"post sampling token usage"
);
@@ -6259,6 +6273,7 @@ pub(crate) async fn run_turn(
return None;
}
client_session.reset_websocket_session();
can_drain_pending_input = !model_needs_follow_up;
continue;
}
@@ -7767,8 +7782,6 @@ async fn try_run_sampling_request(
.await;
should_emit_turn_diff = true;
needs_follow_up |= sess.has_pending_input().await;
break Ok(SamplingRequestResult {
needs_follow_up,
last_agent_message,

View File

@@ -6498,10 +6498,8 @@ voice = "marin"
assert_eq!(
config.realtime,
RealtimeConfig {
version: RealtimeWsVersion::V2,
session_type: RealtimeWsMode::Conversational,
transport: RealtimeTransport::Websocket,
voice: Some(RealtimeVoice::Marin),
..RealtimeConfig::default()
}
);
Ok(())

View File

@@ -3,14 +3,17 @@ use std::sync::Arc;
use codex_core::CodexThread;
use codex_protocol::AgentPath;
use codex_protocol::items::TurnItem;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::EventMsg;
use codex_protocol::protocol::InterAgentCommunication;
use codex_protocol::protocol::Op;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::user_input::UserInput;
use core_test_support::context_snapshot;
use core_test_support::context_snapshot::ContextSnapshotOptions;
use core_test_support::responses;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_completed_with_tokens;
use core_test_support::responses::ev_function_call;
use core_test_support::responses::ev_message_item_added;
use core_test_support::responses::ev_output_text_delta;
@@ -20,6 +23,7 @@ use core_test_support::responses::ev_response_created;
use core_test_support::streaming_sse::StreamingSseChunk;
use core_test_support::streaming_sse::StreamingSseServer;
use core_test_support::streaming_sse::start_streaming_sse_server;
use core_test_support::test_codex::TestCodex;
use core_test_support::test_codex::test_codex;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
@@ -101,6 +105,29 @@ async fn submit_user_input(codex: &CodexThread, text: &str) {
.unwrap_or_else(|err| panic!("submit user input: {err}"));
}
async fn submit_danger_full_access_user_turn(test: &TestCodex, text: &str) {
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: text.to_string(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.config.cwd.to_path_buf(),
approval_policy: AskForApproval::Never,
approvals_reviewer: None,
sandbox_policy: SandboxPolicy::DangerFullAccess,
model: test.session_configured.model.clone(),
effort: None,
summary: None,
service_tier: None,
collaboration_mode: None,
personality: None,
})
.await
.unwrap_or_else(|err| panic!("submit user turn: {err}"));
}
async fn steer_user_input(codex: &CodexThread, text: &str) {
codex
.steer_input(
@@ -439,3 +466,303 @@ async fn user_input_does_not_preempt_after_reasoning_item() {
server.shutdown().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn steered_user_input_waits_for_model_continuation_after_mid_turn_compact() {
let first_chunks = vec![
chunk(ev_response_created("resp-1")),
chunk(ev_function_call("call-1", "test_tool", "{}")),
chunk(ev_completed_with_tokens(
"resp-1", /*total_tokens*/ 500,
)),
];
let compact_chunks = vec![
chunk(ev_response_created("resp-compact")),
chunk(ev_message_item_done("msg-compact", "AUTO_COMPACT_SUMMARY")),
chunk(ev_completed_with_tokens(
"resp-compact",
/*total_tokens*/ 50,
)),
];
let post_compact_continuation_chunks = vec![
chunk(ev_response_created("resp-post-compact")),
chunk(ev_message_item_added("msg-post-compact", "")),
chunk(ev_output_text_delta("resumed old task")),
chunk(ev_message_item_done("msg-post-compact", "resumed old task")),
chunk(ev_completed_with_tokens(
"resp-post-compact",
/*total_tokens*/ 60,
)),
];
let steered_follow_up_chunks = vec![
chunk(ev_response_created("resp-steered")),
chunk(ev_message_item_done(
"msg-steered",
"processed steered prompt",
)),
chunk(ev_completed_with_tokens(
"resp-steered",
/*total_tokens*/ 70,
)),
];
let (server, _completions) = start_streaming_sse_server(vec![
first_chunks,
compact_chunks,
post_compact_continuation_chunks,
steered_follow_up_chunks,
])
.await;
let codex = test_codex()
.with_model("gpt-5.1")
.with_config(|config| {
config.model_provider.name = "OpenAI (test)".to_string();
config.model_provider.supports_websockets = false;
config.model_auto_compact_token_limit = Some(200);
})
.build_with_streaming_server(&server)
.await
.unwrap_or_else(|err| panic!("build streaming Codex test session: {err}"))
.codex;
submit_user_input(&codex, "first prompt").await;
submit_user_input(&codex, "second prompt").await;
wait_for_agent_message(&codex, "resumed old task").await;
wait_for_turn_complete(&codex).await;
let requests = server.requests().await;
assert_eq!(requests.len(), 4);
let post_compact_body: Value =
from_slice(&requests[2]).unwrap_or_else(|err| panic!("parse post-compact request: {err}"));
let steered_body: Value =
from_slice(&requests[3]).unwrap_or_else(|err| panic!("parse steered request: {err}"));
let post_compact_user_texts = message_input_texts(&post_compact_body, "user");
assert!(
!post_compact_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should stay pending until the model resumes after compaction"
);
let steered_user_texts = message_input_texts(&steered_body, "user");
assert!(
steered_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should be recorded on the request after the post-compact continuation"
);
server.shutdown().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn steered_user_input_follows_compact_when_only_the_steer_needs_follow_up() {
let (gate_first_completed_tx, gate_first_completed_rx) = oneshot::channel();
let first_chunks = vec![
chunk(ev_response_created("resp-1")),
chunk(ev_message_item_added("msg-1", "")),
chunk(ev_output_text_delta("first answer")),
chunk(ev_message_item_done("msg-1", "first answer")),
gated_chunk(
gate_first_completed_rx,
vec![ev_completed_with_tokens(
"resp-1", /*total_tokens*/ 500,
)],
),
];
let compact_chunks = vec![
chunk(ev_response_created("resp-compact")),
chunk(ev_message_item_done("msg-compact", "AUTO_COMPACT_SUMMARY")),
chunk(ev_completed_with_tokens(
"resp-compact",
/*total_tokens*/ 50,
)),
];
let steered_follow_up_chunks = vec![
chunk(ev_response_created("resp-steered")),
chunk(ev_message_item_done(
"msg-steered",
"processed steered prompt",
)),
chunk(ev_completed_with_tokens(
"resp-steered",
/*total_tokens*/ 70,
)),
];
let (server, _completions) =
start_streaming_sse_server(vec![first_chunks, compact_chunks, steered_follow_up_chunks])
.await;
let codex = test_codex()
.with_model("gpt-5.1")
.with_config(|config| {
config.model_provider.name = "OpenAI (test)".to_string();
config.model_provider.supports_websockets = false;
config.model_auto_compact_token_limit = Some(200);
})
.build_with_streaming_server(&server)
.await
.unwrap_or_else(|err| panic!("build streaming Codex test session: {err}"))
.codex;
submit_user_input(&codex, "first prompt").await;
wait_for_agent_message(&codex, "first answer").await;
steer_user_input(&codex, "second prompt").await;
let _ = gate_first_completed_tx.send(());
wait_for_agent_message(&codex, "processed steered prompt").await;
wait_for_turn_complete(&codex).await;
let requests = server.requests().await;
assert_eq!(requests.len(), 3);
let compact_body: Value =
from_slice(&requests[1]).unwrap_or_else(|err| panic!("parse compact request: {err}"));
let steered_body: Value =
from_slice(&requests[2]).unwrap_or_else(|err| panic!("parse steered request: {err}"));
let compact_user_texts = message_input_texts(&compact_body, "user");
assert!(
!compact_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should not be included in the compaction request"
);
let steered_user_texts = message_input_texts(&steered_body, "user");
assert!(
steered_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should follow compaction without an empty resume request when the model was already done"
);
server.shutdown().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn steered_user_input_waits_when_tool_output_triggers_compact_before_next_request() {
let (gate_first_completed_tx, gate_first_completed_rx) = oneshot::channel();
let first_chunks = vec![
chunk(ev_response_created("resp-1")),
chunk(ev_function_call(
"call-1",
"shell_command",
r#"{"command":"printf '%04000d' 0","login":false,"timeout_ms":2000}"#,
)),
gated_chunk(
gate_first_completed_rx,
vec![ev_completed_with_tokens(
"resp-1", /*total_tokens*/ 100,
)],
),
];
let compact_chunks = vec![
chunk(ev_response_created("resp-compact")),
chunk(ev_message_item_done("msg-compact", "TOOL_OUTPUT_SUMMARY")),
chunk(ev_completed_with_tokens(
"resp-compact",
/*total_tokens*/ 50,
)),
];
let post_compact_continuation_chunks = vec![
chunk(ev_response_created("resp-post-compact")),
chunk(ev_message_item_done(
"msg-post-compact",
"resumed after compacting tool output",
)),
chunk(ev_completed_with_tokens(
"resp-post-compact",
/*total_tokens*/ 60,
)),
];
let steered_follow_up_chunks = vec![
chunk(ev_response_created("resp-steered")),
chunk(ev_message_item_done(
"msg-steered",
"processed steered prompt",
)),
chunk(ev_completed_with_tokens(
"resp-steered",
/*total_tokens*/ 70,
)),
];
let (server, _completions) = start_streaming_sse_server(vec![
first_chunks,
compact_chunks,
post_compact_continuation_chunks,
steered_follow_up_chunks,
])
.await;
let test = test_codex()
.with_model("gpt-5.1")
.with_config(|config| {
config.model_provider.name = "OpenAI (test)".to_string();
config.model_provider.supports_websockets = false;
config.model_auto_compact_token_limit = Some(200);
})
.build_with_streaming_server(&server)
.await
.unwrap_or_else(|err| panic!("build streaming Codex test session: {err}"));
let codex = test.codex.clone();
submit_danger_full_access_user_turn(&test, "first prompt").await;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnStarted(_))).await;
steer_user_input(&codex, "second prompt").await;
let _ = gate_first_completed_tx.send(());
wait_for_turn_complete(&codex).await;
let requests = server.requests().await;
assert_eq!(requests.len(), 4);
let compact_body: Value =
from_slice(&requests[1]).unwrap_or_else(|err| panic!("parse compact request: {err}"));
let post_compact_body: Value =
from_slice(&requests[2]).unwrap_or_else(|err| panic!("parse post-compact request: {err}"));
let steered_body: Value =
from_slice(&requests[3]).unwrap_or_else(|err| panic!("parse steered request: {err}"));
let compact_user_texts = message_input_texts(&compact_body, "user");
assert!(
!compact_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should not be included in the compaction request"
);
let post_compact_user_texts = message_input_texts(&post_compact_body, "user");
assert!(
!post_compact_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should stay pending until after the compacted continuation"
);
let steered_user_texts = message_input_texts(&steered_body, "user");
assert!(
steered_user_texts
.iter()
.any(|text| text == "second prompt"),
"steered input should be recorded on the request after the post-compact continuation"
);
server.shutdown().await;
}

View File

@@ -47,6 +47,7 @@ use crate::read_session_model;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::Renderable;
use crate::resume_picker::SessionSelection;
use crate::resume_picker::SessionTarget;
#[cfg(test)]
use crate::test_support::PathBufExt;
use crate::tui;
@@ -4047,6 +4048,108 @@ impl App {
Ok(AppRunControl::Continue)
}
async fn resume_target_session(
&mut self,
tui: &mut tui::Tui,
app_server: &mut AppServerSession,
target_session: SessionTarget,
) -> Result<AppRunControl> {
if self.ignore_same_thread_resume(&target_session) {
tui.frame_requester().schedule_frame();
return Ok(AppRunControl::Continue);
}
let current_cwd = self.config.cwd.to_path_buf();
let resume_cwd = if self.remote_app_server_url.is_some() {
current_cwd.clone()
} else {
match crate::resolve_cwd_for_resume_or_fork(
tui,
&self.config,
&current_cwd,
target_session.thread_id,
target_session.path.as_deref(),
CwdPromptAction::Resume,
/*allow_prompt*/ true,
)
.await?
{
crate::ResolveCwdOutcome::Continue(Some(cwd)) => cwd,
crate::ResolveCwdOutcome::Continue(None) => current_cwd.clone(),
crate::ResolveCwdOutcome::Exit => {
return Ok(AppRunControl::Exit(ExitReason::UserRequested));
}
}
};
let mut resume_config = match self
.rebuild_config_for_resume_or_fallback(&current_cwd, resume_cwd)
.await
{
Ok(cfg) => cfg,
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to rebuild configuration for resume: {err}"
));
return Ok(AppRunControl::Continue);
}
};
self.apply_runtime_policy_overrides(&mut resume_config);
let summary = session_summary(
self.chat_widget.token_usage(),
self.chat_widget.thread_id(),
self.chat_widget.thread_name(),
);
match app_server
.resume_thread(resume_config.clone(), target_session.thread_id)
.await
{
Ok(resumed) => {
self.shutdown_current_thread(app_server).await;
self.config = resume_config;
tui.set_notification_settings(
self.config.tui_notifications.method,
self.config.tui_notifications.condition,
);
self.file_search
.update_search_dir(self.config.cwd.to_path_buf());
match self
.replace_chat_widget_with_app_server_thread(tui, app_server, resumed)
.await
{
Ok(()) => {
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> = Vec::new();
if let Some(usage_line) = summary.usage_line {
lines.push(usage_line.into());
}
if let Some(command) = summary.resume_command {
let spans =
vec!["To continue this session, run ".into(), command.cyan()];
lines.push(spans.into());
}
self.chat_widget.add_plain_history_lines(lines);
}
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to attach to resumed app-server thread: {err}"
));
}
}
}
Err(err) => {
let path_display = target_session.display_label();
self.chat_widget.add_error_message(format!(
"Failed to resume session from {path_display}: {err}"
));
}
}
Ok(AppRunControl::Continue)
}
async fn handle_event(
&mut self,
tui: &mut tui::Tui,
@@ -4097,97 +4200,13 @@ impl App {
.await?
{
SessionSelection::Resume(target_session) => {
if self.ignore_same_thread_resume(&target_session) {
tui.frame_requester().schedule_frame();
return Ok(AppRunControl::Continue);
}
let current_cwd = self.config.cwd.to_path_buf();
let resume_cwd = if self.remote_app_server_url.is_some() {
current_cwd.clone()
} else {
match crate::resolve_cwd_for_resume_or_fork(
tui,
&self.config,
&current_cwd,
target_session.thread_id,
target_session.path.as_deref(),
CwdPromptAction::Resume,
/*allow_prompt*/ true,
)
match self
.resume_target_session(tui, app_server, target_session)
.await?
{
crate::ResolveCwdOutcome::Continue(Some(cwd)) => cwd,
crate::ResolveCwdOutcome::Continue(None) => current_cwd.clone(),
crate::ResolveCwdOutcome::Exit => {
return Ok(AppRunControl::Exit(ExitReason::UserRequested));
}
}
};
let mut resume_config = match self
.rebuild_config_for_resume_or_fallback(&current_cwd, resume_cwd)
.await
{
Ok(cfg) => cfg,
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to rebuild configuration for resume: {err}"
));
return Ok(AppRunControl::Continue);
}
};
self.apply_runtime_policy_overrides(&mut resume_config);
let summary = session_summary(
self.chat_widget.token_usage(),
self.chat_widget.thread_id(),
self.chat_widget.thread_name(),
);
match app_server
.resume_thread(resume_config.clone(), target_session.thread_id)
.await
{
Ok(resumed) => {
self.shutdown_current_thread(app_server).await;
self.config = resume_config;
tui.set_notification_settings(
self.config.tui_notifications.method,
self.config.tui_notifications.condition,
);
self.file_search
.update_search_dir(self.config.cwd.to_path_buf());
match self
.replace_chat_widget_with_app_server_thread(
tui, app_server, resumed,
)
.await
{
Ok(()) => {
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> = Vec::new();
if let Some(usage_line) = summary.usage_line {
lines.push(usage_line.into());
}
if let Some(command) = summary.resume_command {
let spans = vec![
"To continue this session, run ".into(),
command.cyan(),
];
lines.push(spans.into());
}
self.chat_widget.add_plain_history_lines(lines);
}
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to attach to resumed app-server thread: {err}"
));
}
}
}
Err(err) => {
let path_display = target_session.display_label();
self.chat_widget.add_error_message(format!(
"Failed to resume session from {path_display}: {err}"
));
AppRunControl::Continue => {}
AppRunControl::Exit(reason) => {
return Ok(AppRunControl::Exit(reason));
}
}
}
@@ -4199,6 +4218,20 @@ impl App {
// Leaving alt-screen may blank the inline viewport; force a redraw either way.
tui.frame_requester().schedule_frame();
}
AppEvent::ResumeSessionByIdOrName(id_or_name) => {
match crate::lookup_session_target_with_app_server(app_server, &id_or_name).await? {
Some(target_session) => {
return self
.resume_target_session(tui, app_server, target_session)
.await;
}
None => {
self.chat_widget.add_error_message(format!(
"No saved chat found matching '{id_or_name}'."
));
}
}
}
AppEvent::ForkCurrentSession => {
self.session_telemetry.counter(
"codex.thread.fork",

View File

@@ -124,6 +124,9 @@ pub(crate) enum AppEvent {
/// Open the resume picker inside the running TUI session.
OpenResumePicker,
/// Resume a thread by UUID or thread name inside the running TUI session.
ResumeSessionByIdOrName(String),
/// Fork the current session into a new thread.
ForkCurrentSession,

View File

@@ -5452,6 +5452,17 @@ impl ChatWidget {
}));
self.bottom_pane.drain_pending_submission_state();
}
SlashCommand::Resume if !trimmed.is_empty() => {
let Some((prepared_args, _prepared_elements)) = self
.bottom_pane
.prepare_inline_args_submission(/*record_history*/ false)
else {
return;
};
self.app_event_tx
.send(AppEvent::ResumeSessionByIdOrName(prepared_args));
self.bottom_pane.drain_pending_submission_state();
}
SlashCommand::SandboxReadRoot if !trimmed.is_empty() => {
let Some((prepared_args, _prepared_elements)) = self
.bottom_pane

View File

@@ -447,6 +447,24 @@ async fn slash_resume_opens_picker() {
assert_matches!(rx.try_recv(), Ok(AppEvent::OpenResumePicker));
}
#[tokio::test]
async fn slash_resume_with_arg_requests_named_session() {
let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(/*model_override*/ None).await;
chat.bottom_pane.set_composer_text(
"/resume my-saved-thread".to_string(),
Vec::new(),
Vec::new(),
);
chat.handle_key_event(KeyEvent::from(KeyCode::Enter));
assert_matches!(
rx.try_recv(),
Ok(AppEvent::ResumeSessionByIdOrName(id_or_name)) if id_or_name == "my-saved-thread"
);
assert_matches!(op_rx.try_recv(), Err(TryRecvError::Empty));
}
#[tokio::test]
async fn slash_fork_requests_current_fork() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(/*model_override*/ None).await;

View File

@@ -132,6 +132,7 @@ impl SlashCommand {
| SlashCommand::Rename
| SlashCommand::Plan
| SlashCommand::Fast
| SlashCommand::Resume
| SlashCommand::SandboxReadRoot
)
}

View File

@@ -15,7 +15,7 @@ use std::path::PathBuf;
use crate::version::CODEX_CLI_VERSION;
pub fn get_upgrade_version(config: &Config) -> Option<String> {
if !config.check_for_update_on_startup {
if !config.check_for_update_on_startup || is_source_build_version(CODEX_CLI_VERSION) {
return None;
}
@@ -137,7 +137,7 @@ fn extract_version_from_latest_tag(latest_tag_name: &str) -> anyhow::Result<Stri
/// Returns the latest version to show in a popup, if it should be shown.
/// This respects the user's dismissal choice for the current latest version.
pub fn get_upgrade_version_for_popup(config: &Config) -> Option<String> {
if !config.check_for_update_on_startup {
if !config.check_for_update_on_startup || is_source_build_version(CODEX_CLI_VERSION) {
return None;
}
@@ -177,6 +177,10 @@ fn parse_version(v: &str) -> Option<(u64, u64, u64)> {
Some((maj, min, pat))
}
fn is_source_build_version(version: &str) -> bool {
parse_version(version) == Some((0, 0, 0))
}
#[cfg(test)]
mod tests {
use super::*;