diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index b87f76384d..7c5b5a8634 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -360,16 +360,19 @@ dependencies = [ "objc2-foundation", "parking_lot", "percent-encoding", - "windows-sys 0.52.0", + "windows-sys 0.60.2", "wl-clipboard-rs", "x11rb", ] [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] [[package]] name = "arrayvec" @@ -861,9 +864,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", @@ -1298,7 +1301,7 @@ dependencies = [ "codex-windows-sandbox", "core-foundation 0.9.4", "core_test_support", - "ctor 0.5.0", + "ctor 0.6.3", "dirs", "dunce", "encoding_rs", @@ -1681,7 +1684,7 @@ dependencies = [ "anyhow", "clap", "codex-process-hardening", - "ctor 0.5.0", + "ctor 0.6.3", "libc", "reqwest", "serde", @@ -2262,9 +2265,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.5.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67773048316103656a637612c4a62477603b777d91d9c62ff2290f9cde178fdb" +checksum = "424e0138278faeb2b401f174ad17e715c829512d74f3d1e81eb43365c2e0590e" dependencies = [ "ctor-proc-macro", "dtor", @@ -2272,9 +2275,9 @@ dependencies = [ [[package]] name = "ctor-proc-macro" -version = "0.0.6" +version = "0.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2931af7e13dc045d8e9d26afccc6fa115d64e115c9c84b1166288b46f6782c2" +checksum = "52560adf09603e58c9a7ee1fe1dcb95a16927b17c127f0ac02d6e768a0e25bc1" [[package]] name = "darling" @@ -2829,7 +2832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -2926,7 +2929,7 @@ checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78" dependencies = [ "cfg-if", "rustix 1.0.8", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3867,7 +3870,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4160,9 +4163,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "logos" @@ -5378,7 +5381,7 @@ dependencies = [ "once_cell", "socket2 0.6.1", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -5757,7 +5760,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5770,7 +5773,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -7071,9 +7074,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", @@ -8088,7 +8091,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 33a664e06d..a74fb1e828 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -122,12 +122,12 @@ axum = { version = "0.8", default-features = false } base64 = "0.22.1" bytes = "1.10.1" chardetng = "0.1.17" -chrono = "0.4.42" +chrono = "0.4.43" clap = "4" clap_complete = "4" color-eyre = "0.6.3" crossterm = "0.28.1" -ctor = "0.5.0" +ctor = "0.6.3" derive_more = "2" diffy = "0.4.2" dirs = "6" diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index 4011328062..911a4a0e9a 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -510,6 +510,12 @@ server_request_definitions! { response: v2::FileChangeRequestApprovalResponse, }, + /// EXPERIMENTAL - Request input from the user for a tool call. + ToolRequestUserInput => "item/tool/requestUserInput" { + params: v2::ToolRequestUserInputParams, + response: v2::ToolRequestUserInputResponse, + }, + /// DEPRECATED APIs below /// Request to approve a patch. /// This request is used for Turns started via the legacy APIs (i.e. SendUserTurn, SendUserMessage). diff --git a/codex-rs/app-server-protocol/src/protocol/v2.rs b/codex-rs/app-server-protocol/src/protocol/v2.rs index db51c278f0..563a78cc6e 100644 --- a/codex-rs/app-server-protocol/src/protocol/v2.rs +++ b/codex-rs/app-server-protocol/src/protocol/v2.rs @@ -2277,6 +2277,54 @@ pub struct FileChangeRequestApprovalResponse { pub decision: FileChangeApprovalDecision, } +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +/// EXPERIMENTAL. Defines a single selectable option for request_user_input. +pub struct ToolRequestUserInputOption { + pub label: String, + pub description: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +/// EXPERIMENTAL. Represents one request_user_input question and its optional options. +pub struct ToolRequestUserInputQuestion { + pub id: String, + pub header: String, + pub question: String, + pub options: Option>, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +/// EXPERIMENTAL. Params sent with a request_user_input event. +pub struct ToolRequestUserInputParams { + pub thread_id: String, + pub turn_id: String, + pub item_id: String, + pub questions: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +/// EXPERIMENTAL. Captures a user's answer to a request_user_input question. +pub struct ToolRequestUserInputAnswer { + pub selected: Vec, + pub other: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +/// EXPERIMENTAL. Response payload mapping question ids to answers. +pub struct ToolRequestUserInputResponse { + pub answers: HashMap, +} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] diff --git a/codex-rs/app-server-test-client/src/main.rs b/codex-rs/app-server-test-client/src/main.rs index 9df895a70a..1407c2401d 100644 --- a/codex-rs/app-server-test-client/src/main.rs +++ b/codex-rs/app-server-test-client/src/main.rs @@ -258,7 +258,7 @@ fn send_message_v2_with_policies( thread_id: thread_response.thread.id.clone(), input: vec![V2UserInput::Text { text: user_message, - // Plain text conversion has no UI element ranges. + // Test client sends plain text without UI element ranges. text_elements: Vec::new(), }], ..Default::default() @@ -292,6 +292,7 @@ fn send_follow_up_v2( thread_id: thread_response.thread.id.clone(), input: vec![V2UserInput::Text { text: first_message, + // Test client sends plain text without UI element ranges. text_elements: Vec::new(), }], ..Default::default() @@ -304,6 +305,7 @@ fn send_follow_up_v2( thread_id: thread_response.thread.id.clone(), input: vec![V2UserInput::Text { text: follow_up_message, + // Test client sends plain text without UI element ranges. text_elements: Vec::new(), }], ..Default::default() @@ -477,6 +479,7 @@ impl CodexClient { conversation_id: *conversation_id, items: vec![InputItem::Text { text: message.to_string(), + // Test client sends plain text without UI element ranges. text_elements: Vec::new(), }], }, diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index 4c376ce6a4..9bc8c3dff1 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -90,6 +90,7 @@ Example (from OpenAI's official VSCode extension): - `skills/list` — list skills for one or more `cwd` values (optional `forceReload`). - `skills/config/write` — write user-level skill config by path. - `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes. +- `tool/requestUserInput` — prompt the user with 1–3 short questions for a tool call and return their answers (experimental). - `config/mcpServer/reload` — reload MCP server config from disk and queue a refresh for loaded threads (applied on each thread's next active turn); returns `{}`. Use this after editing `config.toml` without restarting the server. - `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination. - `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id. @@ -470,8 +471,15 @@ Invoke a skill by including `$` in the text input. Add a `skill` inp "params": { "threadId": "thread-1", "input": [ - { "type": "text", "text": "$skill-creator Add a new skill for triaging flaky CI." }, - { "type": "skill", "name": "skill-creator", "path": "/Users/me/.codex/skills/skill-creator/SKILL.md" } + { + "type": "text", + "text": "$skill-creator Add a new skill for triaging flaky CI." + }, + { + "type": "skill", + "name": "skill-creator", + "path": "/Users/me/.codex/skills/skill-creator/SKILL.md" + } ] } } @@ -506,10 +514,14 @@ Use `skills/list` to fetch the available skills (optionally scoped by `cwds`, wi To enable or disable a skill by path: ```json -{ "method": "skills/config/write", "id": 26, "params": { +{ + "method": "skills/config/write", + "id": 26, + "params": { "path": "/Users/me/.codex/skills/skill-creator/SKILL.md", "enabled": false -} } + } +} ``` ## Auth endpoints diff --git a/codex-rs/app-server/src/bespoke_event_handling.rs b/codex-rs/app-server/src/bespoke_event_handling.rs index 0b2520098d..225b2aface 100644 --- a/codex-rs/app-server/src/bespoke_event_handling.rs +++ b/codex-rs/app-server/src/bespoke_event_handling.rs @@ -54,6 +54,10 @@ use codex_app_server_protocol::ThreadItem; use codex_app_server_protocol::ThreadRollbackResponse; use codex_app_server_protocol::ThreadTokenUsage; use codex_app_server_protocol::ThreadTokenUsageUpdatedNotification; +use codex_app_server_protocol::ToolRequestUserInputOption; +use codex_app_server_protocol::ToolRequestUserInputParams; +use codex_app_server_protocol::ToolRequestUserInputQuestion; +use codex_app_server_protocol::ToolRequestUserInputResponse; use codex_app_server_protocol::Turn; use codex_app_server_protocol::TurnCompletedNotification; use codex_app_server_protocol::TurnDiffUpdatedNotification; @@ -83,6 +87,8 @@ use codex_core::review_prompts; use codex_protocol::ThreadId; use codex_protocol::plan_tool::UpdatePlanArgs; use codex_protocol::protocol::ReviewOutputEvent; +use codex_protocol::request_user_input::RequestUserInputAnswer as CoreRequestUserInputAnswer; +use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestUserInputResponse; use std::collections::HashMap; use std::convert::TryFrom; use std::path::PathBuf; @@ -258,6 +264,57 @@ pub(crate) async fn apply_bespoke_event_handling( }); } }, + EventMsg::RequestUserInput(request) => { + if matches!(api_version, ApiVersion::V2) { + let questions = request + .questions + .into_iter() + .map(|question| ToolRequestUserInputQuestion { + id: question.id, + header: question.header, + question: question.question, + options: question.options.map(|options| { + options + .into_iter() + .map(|option| ToolRequestUserInputOption { + label: option.label, + description: option.description, + }) + .collect() + }), + }) + .collect(); + let params = ToolRequestUserInputParams { + thread_id: conversation_id.to_string(), + turn_id: request.turn_id, + item_id: request.call_id, + questions, + }; + let rx = outgoing + .send_request(ServerRequestPayload::ToolRequestUserInput(params)) + .await; + tokio::spawn(async move { + on_request_user_input_response(event_turn_id, rx, conversation).await; + }); + } else { + error!( + "request_user_input is only supported on api v2 (call_id: {})", + request.call_id + ); + let empty = CoreRequestUserInputResponse { + answers: HashMap::new(), + }; + if let Err(err) = conversation + .submit(Op::UserInputAnswer { + id: event_turn_id, + response: empty, + }) + .await + { + error!("failed to submit UserInputAnswer: {err}"); + } + } + } // TODO(celia): properly construct McpToolCall TurnItem in core. EventMsg::McpToolCallBegin(begin_event) => { let notification = construct_mcp_tool_call_notification( @@ -1347,6 +1404,66 @@ async fn on_exec_approval_response( } } +async fn on_request_user_input_response( + event_turn_id: String, + receiver: oneshot::Receiver, + conversation: Arc, +) { + let response = receiver.await; + let value = match response { + Ok(value) => value, + Err(err) => { + error!("request failed: {err:?}"); + let empty = CoreRequestUserInputResponse { + answers: HashMap::new(), + }; + if let Err(err) = conversation + .submit(Op::UserInputAnswer { + id: event_turn_id, + response: empty, + }) + .await + { + error!("failed to submit UserInputAnswer: {err}"); + } + return; + } + }; + + let response = + serde_json::from_value::(value).unwrap_or_else(|err| { + error!("failed to deserialize ToolRequestUserInputResponse: {err}"); + ToolRequestUserInputResponse { + answers: HashMap::new(), + } + }); + let response = CoreRequestUserInputResponse { + answers: response + .answers + .into_iter() + .map(|(id, answer)| { + ( + id, + CoreRequestUserInputAnswer { + selected: answer.selected, + other: answer.other, + }, + ) + }) + .collect(), + }; + + if let Err(err) = conversation + .submit(Op::UserInputAnswer { + id: event_turn_id, + response, + }) + .await + { + error!("failed to submit UserInputAnswer: {err}"); + } +} + const REVIEW_FALLBACK_MESSAGE: &str = "Reviewer failed to output a response."; fn render_review_output_text(output: &ReviewOutputEvent) -> String { diff --git a/codex-rs/app-server/tests/common/lib.rs b/codex-rs/app-server/tests/common/lib.rs index d350a5df82..8093121403 100644 --- a/codex-rs/app-server/tests/common/lib.rs +++ b/codex-rs/app-server/tests/common/lib.rs @@ -27,6 +27,7 @@ pub use models_cache::write_models_cache_with_models; pub use responses::create_apply_patch_sse_response; pub use responses::create_exec_command_sse_response; pub use responses::create_final_assistant_message_sse_response; +pub use responses::create_request_user_input_sse_response; pub use responses::create_shell_command_sse_response; pub use rollout::create_fake_rollout; pub use rollout::create_fake_rollout_with_text_elements; diff --git a/codex-rs/app-server/tests/common/responses.rs b/codex-rs/app-server/tests/common/responses.rs index 35c1862e8f..e15319e02f 100644 --- a/codex-rs/app-server/tests/common/responses.rs +++ b/codex-rs/app-server/tests/common/responses.rs @@ -60,3 +60,26 @@ pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result responses::ev_completed("resp-1"), ])) } + +pub fn create_request_user_input_sse_response(call_id: &str) -> anyhow::Result { + let tool_call_arguments = serde_json::to_string(&json!({ + "questions": [{ + "id": "confirm_path", + "header": "Confirm", + "question": "Proceed with the plan?", + "options": [{ + "label": "Yes (Recommended)", + "description": "Continue the current plan." + }, { + "label": "No", + "description": "Stop and revisit the approach." + }] + }] + }))?; + + Ok(responses::sse(vec![ + responses::ev_response_created("resp-1"), + responses::ev_function_call(call_id, "request_user_input", &tool_call_arguments), + responses::ev_completed("resp-1"), + ])) +} diff --git a/codex-rs/app-server/tests/common/rollout.rs b/codex-rs/app-server/tests/common/rollout.rs index 994cdf5ff8..e2e55a6544 100644 --- a/codex-rs/app-server/tests/common/rollout.rs +++ b/codex-rs/app-server/tests/common/rollout.rs @@ -59,6 +59,7 @@ pub fn create_fake_rollout( cli_version: "0.0.0".to_string(), source: SessionSource::Cli, model_provider: model_provider.map(str::to_string), + base_instructions: None, }; let payload = serde_json::to_value(SessionMetaLine { meta, @@ -136,6 +137,7 @@ pub fn create_fake_rollout_with_text_elements( cli_version: "0.0.0".to_string(), source: SessionSource::Cli, model_provider: model_provider.map(str::to_string), + base_instructions: None, }; let payload = serde_json::to_value(SessionMetaLine { meta, diff --git a/codex-rs/app-server/tests/suite/v2/mod.rs b/codex-rs/app-server/tests/suite/v2/mod.rs index 82939908ee..bf6230ae52 100644 --- a/codex-rs/app-server/tests/suite/v2/mod.rs +++ b/codex-rs/app-server/tests/suite/v2/mod.rs @@ -6,6 +6,7 @@ mod initialize; mod model_list; mod output_schema; mod rate_limits; +mod request_user_input; mod review; mod thread_archive; mod thread_fork; diff --git a/codex-rs/app-server/tests/suite/v2/request_user_input.rs b/codex-rs/app-server/tests/suite/v2/request_user_input.rs new file mode 100644 index 0000000000..37122e08a2 --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/request_user_input.rs @@ -0,0 +1,127 @@ +use anyhow::Result; +use app_test_support::McpProcess; +use app_test_support::create_final_assistant_message_sse_response; +use app_test_support::create_mock_responses_server_sequence; +use app_test_support::create_request_user_input_sse_response; +use app_test_support::to_response; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ServerRequest; +use codex_app_server_protocol::ThreadStartParams; +use codex_app_server_protocol::ThreadStartResponse; +use codex_app_server_protocol::TurnStartParams; +use codex_app_server_protocol::TurnStartResponse; +use codex_app_server_protocol::UserInput as V2UserInput; +use codex_protocol::openai_models::ReasoningEffort; +use tokio::time::timeout; + +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn request_user_input_round_trip() -> Result<()> { + let codex_home = tempfile::TempDir::new()?; + let responses = vec![ + create_request_user_input_sse_response("call1")?, + create_final_assistant_message_sse_response("done")?, + ]; + let server = create_mock_responses_server_sequence(responses).await; + create_config_toml(codex_home.path(), &server.uri())?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_start_id = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let thread_start_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_start_id)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response(thread_start_resp)?; + + let turn_start_id = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id.clone(), + input: vec![V2UserInput::Text { + text: "ask something".to_string(), + text_elements: Vec::new(), + }], + model: Some("mock-model".to_string()), + effort: Some(ReasoningEffort::Medium), + ..Default::default() + }) + .await?; + let turn_start_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_start_id)), + ) + .await??; + let TurnStartResponse { turn, .. } = to_response(turn_start_resp)?; + + let server_req = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_request_message(), + ) + .await??; + let ServerRequest::ToolRequestUserInput { request_id, params } = server_req else { + panic!("expected ToolRequestUserInput request, got: {server_req:?}"); + }; + + assert_eq!(params.thread_id, thread.id); + assert_eq!(params.turn_id, turn.id); + assert_eq!(params.item_id, "call1"); + assert_eq!(params.questions.len(), 1); + + mcp.send_response( + request_id, + serde_json::json!({ + "answers": { + "confirm_path": { "selected": ["yes"], "other": serde_json::Value::Null } + } + }), + ) + .await?; + + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("codex/event/task_complete"), + ) + .await??; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + Ok(()) +} + +fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> { + let config_toml = codex_home.join("config.toml"); + std::fs::write( + config_toml, + format!( + r#" +model = "mock-model" +approval_policy = "untrusted" +sandbox_mode = "read-only" + +model_provider = "mock_provider" + +[features] +collaboration_modes = true + +[model_providers.mock_provider] +name = "Mock provider for test" +base_url = "{server_uri}/v1" +wire_api = "responses" +request_max_retries = 0 +stream_max_retries = 0 +"# + ), + ) +} diff --git a/codex-rs/app-server/tests/suite/v2/turn_start.rs b/codex-rs/app-server/tests/suite/v2/turn_start.rs index 4c8df56c3d..29a7d6b936 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_start.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_start.rs @@ -1471,8 +1471,18 @@ unified_exec = true unreachable!("loop ensures we break on command execution items"); }; assert_eq!(completed_id, "uexec-1"); - assert_eq!(completed_status, CommandExecutionStatus::Completed); - assert_eq!(exit_code, Some(0)); + assert!( + matches!( + completed_status, + CommandExecutionStatus::Completed | CommandExecutionStatus::Failed + ), + "unexpected command execution status: {completed_status:?}" + ); + if completed_status == CommandExecutionStatus::Completed { + assert_eq!(exit_code, Some(0)); + } else { + assert!(exit_code.is_some(), "expected exit_code for failed command"); + } assert_eq!( completed_process_id.as_deref(), Some(started_process_id.as_str()) diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index 337be81e5d..25c1161c8d 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -459,8 +459,8 @@ enum FeaturesSubcommand { fn stage_str(stage: codex_core::features::Stage) -> &'static str { use codex_core::features::Stage; match stage { - Stage::Experimental => "experimental", - Stage::Beta { .. } => "beta", + Stage::Beta => "experimental", + Stage::Experimental { .. } => "beta", Stage::Stable => "stable", Stage::Deprecated => "deprecated", Stage::Removed => "removed", @@ -728,9 +728,13 @@ fn prepend_config_flags( /// Run the interactive Codex TUI, dispatching to either the legacy implementation or the /// experimental TUI v2 shim based on feature flags resolved from config. async fn run_interactive_tui( - interactive: TuiCli, + mut interactive: TuiCli, codex_linux_sandbox_exe: Option, ) -> std::io::Result { + if let Some(prompt) = interactive.prompt.take() { + // Normalize CRLF/CR to LF so CLI-provided text can't leak `\r` into TUI state. + interactive.prompt = Some(prompt.replace("\r\n", "\n").replace('\r', "\n")); + } if is_tui2_enabled(&interactive).await? { let result = tui2::run_main(interactive.into(), codex_linux_sandbox_exe).await?; Ok(result.into()) @@ -855,7 +859,8 @@ fn merge_interactive_cli_flags(interactive: &mut TuiCli, subcommand_cli: TuiCli) interactive.add_dir.extend(subcommand_cli.add_dir); } if let Some(prompt) = subcommand_cli.prompt { - interactive.prompt = Some(prompt); + // Normalize CRLF/CR to LF so CLI-provided text can't leak `\r` into TUI state. + interactive.prompt = Some(prompt.replace("\r\n", "\n").replace('\r', "\n")); } interactive diff --git a/codex-rs/codex-api/src/common.rs b/codex-rs/codex-api/src/common.rs index 2118cf66e4..9a7aab9973 100644 --- a/codex-rs/codex-api/src/common.rs +++ b/codex-rs/codex-api/src/common.rs @@ -42,6 +42,10 @@ pub enum ResponseEvent { Created, OutputItemDone(ResponseItem), OutputItemAdded(ResponseItem), + /// Emitted when `X-Reasoning-Included: true` is present on the response, + /// meaning the server already accounted for past reasoning tokens and the + /// client should not re-estimate them. + ServerReasoningIncluded(bool), Completed { response_id: String, token_usage: Option, diff --git a/codex-rs/codex-api/src/endpoint/chat.rs b/codex-rs/codex-api/src/endpoint/chat.rs index cd830a09fc..8fe1d2a521 100644 --- a/codex-rs/codex-api/src/endpoint/chat.rs +++ b/codex-rs/codex-api/src/endpoint/chat.rs @@ -157,6 +157,9 @@ impl Stream for AggregatedStream { return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))); } + Poll::Ready(Some(Ok(ResponseEvent::ServerReasoningIncluded(included)))) => { + return Poll::Ready(Some(Ok(ResponseEvent::ServerReasoningIncluded(included)))); + } Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => { return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))); } diff --git a/codex-rs/codex-api/src/endpoint/responses_websocket.rs b/codex-rs/codex-api/src/endpoint/responses_websocket.rs index 3c6cab74e5..39e2f2fd05 100644 --- a/codex-rs/codex-api/src/endpoint/responses_websocket.rs +++ b/codex-rs/codex-api/src/endpoint/responses_websocket.rs @@ -29,18 +29,21 @@ use url::Url; type WsStream = WebSocketStream>; const X_CODEX_TURN_STATE_HEADER: &str = "x-codex-turn-state"; +const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included"; pub struct ResponsesWebsocketConnection { stream: Arc>>, // TODO (pakrym): is this the right place for timeout? idle_timeout: Duration, + server_reasoning_included: bool, } impl ResponsesWebsocketConnection { - fn new(stream: WsStream, idle_timeout: Duration) -> Self { + fn new(stream: WsStream, idle_timeout: Duration, server_reasoning_included: bool) -> Self { Self { stream: Arc::new(Mutex::new(Some(stream))), idle_timeout, + server_reasoning_included, } } @@ -56,11 +59,17 @@ impl ResponsesWebsocketConnection { mpsc::channel::>(1600); let stream = Arc::clone(&self.stream); let idle_timeout = self.idle_timeout; + let server_reasoning_included = self.server_reasoning_included; let request_body = serde_json::to_value(&request).map_err(|err| { ApiError::Stream(format!("failed to encode websocket request: {err}")) })?; tokio::spawn(async move { + if server_reasoning_included { + let _ = tx_event + .send(Ok(ResponseEvent::ServerReasoningIncluded(true))) + .await; + } let mut guard = stream.lock().await; let Some(ws_stream) = guard.as_mut() else { let _ = tx_event @@ -111,10 +120,12 @@ impl ResponsesWebsocketClient { headers.extend(extra_headers); apply_auth_headers(&mut headers, &self.auth); - let stream = connect_websocket(ws_url, headers, turn_state).await?; + let (stream, server_reasoning_included) = + connect_websocket(ws_url, headers, turn_state).await?; Ok(ResponsesWebsocketConnection::new( stream, self.provider.stream_idle_timeout, + server_reasoning_included, )) } } @@ -137,7 +148,7 @@ async fn connect_websocket( url: Url, headers: HeaderMap, turn_state: Option>>, -) -> Result { +) -> Result<(WsStream, bool), ApiError> { let mut request = url .clone() .into_client_request() @@ -147,6 +158,7 @@ async fn connect_websocket( let (stream, response) = tokio_tungstenite::connect_async(request) .await .map_err(|err| map_ws_error(err, &url))?; + let reasoning_included = response.headers().contains_key(X_REASONING_INCLUDED_HEADER); if let Some(turn_state) = turn_state && let Some(header_value) = response .headers() @@ -155,7 +167,7 @@ async fn connect_websocket( { let _ = turn_state.set(header_value.to_string()); } - Ok(stream) + Ok((stream, reasoning_included)) } fn map_ws_error(err: WsError, url: &Url) -> ApiError { diff --git a/codex-rs/codex-api/src/sse/responses.rs b/codex-rs/codex-api/src/sse/responses.rs index a70111d988..f23975f8dd 100644 --- a/codex-rs/codex-api/src/sse/responses.rs +++ b/codex-rs/codex-api/src/sse/responses.rs @@ -25,6 +25,8 @@ use tokio_util::io::ReaderStream; use tracing::debug; use tracing::trace; +const X_REASONING_INCLUDED_HEADER: &str = "x-reasoning-included"; + /// Streams SSE events from an on-disk fixture for tests. pub fn stream_from_fixture( path: impl AsRef, @@ -58,6 +60,10 @@ pub fn spawn_response_stream( .get("X-Models-Etag") .and_then(|v| v.to_str().ok()) .map(ToString::to_string); + let reasoning_included = stream_response + .headers + .get(X_REASONING_INCLUDED_HEADER) + .is_some(); if let Some(turn_state) = turn_state.as_ref() && let Some(header_value) = stream_response .headers @@ -74,6 +80,11 @@ pub fn spawn_response_stream( if let Some(etag) = models_etag { let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await; } + if reasoning_included { + let _ = tx_event + .send(Ok(ResponseEvent::ServerReasoningIncluded(true))) + .await; + } process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await; }); diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 10b635b747..8ee2848a98 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] anyhow = { workspace = true } -arc-swap = "1.7.1" +arc-swap = "1.8.0" async-channel = { workspace = true } async-trait = { workspace = true } base64 = { workspace = true } diff --git a/codex-rs/core/src/agent/control.rs b/codex-rs/core/src/agent/control.rs index 7083081dc6..4467494fcf 100644 --- a/codex-rs/core/src/agent/control.rs +++ b/codex-rs/core/src/agent/control.rs @@ -58,7 +58,7 @@ impl AgentControl { Op::UserInput { items: vec![UserInput::Text { text: prompt, - // Plain text conversion has no UI element ranges. + // Agent control prompts are plain text with no UI text elements. text_elements: Vec::new(), }], final_output_json_schema: None, @@ -85,7 +85,6 @@ impl AgentControl { result } - #[allow(dead_code)] // Will be used for collab tools. /// Fetch the last known status for `agent_id`, returning `NotFound` when unavailable. pub(crate) async fn get_status(&self, agent_id: ThreadId) -> AgentStatus { let Ok(state) = self.upgrade() else { diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 03e74b54b9..f0f2b125fd 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -217,9 +217,7 @@ impl ModelClient { let client = ApiCompactClient::new(transport, api_provider, api_auth) .with_telemetry(Some(request_telemetry)); - let instructions = prompt - .get_full_instructions(&self.state.model_info) - .into_owned(); + let instructions = prompt.base_instructions.text.clone(); let payload = ApiCompactionInput { model: &self.state.model_info.slug, input: &prompt.input, @@ -276,8 +274,7 @@ impl ModelClientSession { } fn build_responses_request(&self, prompt: &Prompt) -> Result { - let model_info = self.state.model_info.clone(); - let instructions = prompt.get_full_instructions(&model_info).into_owned(); + let instructions = prompt.base_instructions.text.clone(); let tools_json: Vec = create_tools_json_for_responses_api(&prompt.tools)?; Ok(build_api_prompt(prompt, instructions, tools_json)) } @@ -448,8 +445,7 @@ impl ModelClientSession { } let auth_manager = self.state.auth_manager.clone(); - let model_info = self.state.model_info.clone(); - let instructions = prompt.get_full_instructions(&model_info).into_owned(); + let instructions = prompt.base_instructions.text.clone(); let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?; let api_prompt = build_api_prompt(prompt, instructions, tools_json); let conversation_id = self.state.conversation_id.to_string(); diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs index 7d7cabcfa6..2abe4883e7 100644 --- a/codex-rs/core/src/client_common.rs +++ b/codex-rs/core/src/client_common.rs @@ -1,12 +1,11 @@ use crate::client_common::tools::ToolSpec; use crate::error::Result; pub use codex_api::common::ResponseEvent; +use codex_protocol::models::BaseInstructions; use codex_protocol::models::ResponseItem; -use codex_protocol::openai_models::ModelInfo; use futures::Stream; use serde::Deserialize; use serde_json::Value; -use std::borrow::Cow; use std::collections::HashSet; use std::pin::Pin; use std::task::Context; @@ -34,22 +33,13 @@ pub struct Prompt { /// Whether parallel tool calls are permitted for this prompt. pub(crate) parallel_tool_calls: bool, - /// Optional override for the built-in BASE_INSTRUCTIONS. - pub base_instructions_override: Option, + pub base_instructions: BaseInstructions, /// Optional the output schema for the model's response. pub output_schema: Option, } impl Prompt { - pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelInfo) -> Cow<'a, str> { - Cow::Borrowed( - self.base_instructions_override - .as_deref() - .unwrap_or(model.base_instructions.as_str()), - ) - } - pub(crate) fn get_formatted_input(&self) -> Vec { let mut input = self.input.clone(); @@ -245,76 +235,8 @@ mod tests { use codex_api::create_text_param_for_request; use pretty_assertions::assert_eq; - use crate::config::test_config; - use crate::models_manager::manager::ModelsManager; - use super::*; - struct InstructionsTestCase { - pub slug: &'static str, - pub expects_apply_patch_instructions: bool, - } - #[test] - fn get_full_instructions_no_user_content() { - let prompt = Prompt { - ..Default::default() - }; - let prompt_with_apply_patch_instructions = - include_str!("../prompt_with_apply_patch_instructions.md"); - let test_cases = vec![ - InstructionsTestCase { - slug: "gpt-3.5", - expects_apply_patch_instructions: true, - }, - InstructionsTestCase { - slug: "gpt-4.1", - expects_apply_patch_instructions: true, - }, - InstructionsTestCase { - slug: "gpt-4o", - expects_apply_patch_instructions: true, - }, - InstructionsTestCase { - slug: "gpt-5", - expects_apply_patch_instructions: true, - }, - InstructionsTestCase { - slug: "gpt-5.1", - expects_apply_patch_instructions: false, - }, - InstructionsTestCase { - slug: "codex-mini-latest", - expects_apply_patch_instructions: true, - }, - InstructionsTestCase { - slug: "gpt-oss:120b", - expects_apply_patch_instructions: false, - }, - InstructionsTestCase { - slug: "gpt-5.1-codex", - expects_apply_patch_instructions: false, - }, - InstructionsTestCase { - slug: "gpt-5.1-codex-max", - expects_apply_patch_instructions: false, - }, - ]; - for test_case in test_cases { - let config = test_config(); - let model_info = ModelsManager::construct_model_info_offline(test_case.slug, &config); - if test_case.expects_apply_patch_instructions { - assert_eq!( - model_info.base_instructions.as_str(), - prompt_with_apply_patch_instructions - ); - } - - let expected = model_info.base_instructions.as_str(); - let full = prompt.get_full_instructions(&model_info); - assert_eq!(full, expected); - } - } - #[test] fn serializes_text_verbosity_when_set() { let input: Vec = vec![]; diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 7bdcf44cb5..e2e9be33e1 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -12,7 +12,6 @@ use crate::SandboxState; use crate::agent::AgentControl; use crate::agent::AgentStatus; use crate::agent::agent_status_from_event; -use crate::client_common::REVIEW_PROMPT; use crate::compact; use crate::compact::run_inline_auto_compact_task; use crate::compact::should_use_remote_compact_task; @@ -34,9 +33,11 @@ use async_channel::Receiver; use async_channel::Sender; use codex_protocol::ThreadId; use codex_protocol::approvals::ExecPolicyAmendment; +use codex_protocol::config_types::Settings; use codex_protocol::config_types::WebSearchMode; use codex_protocol::items::TurnItem; use codex_protocol::items::UserMessageItem; +use codex_protocol::models::BaseInstructions; use codex_protocol::openai_models::ModelInfo; use codex_protocol::protocol::FileChange; use codex_protocol::protocol::HasLegacyEvent; @@ -49,6 +50,8 @@ use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::TurnContextItem; use codex_protocol::protocol::TurnStartedEvent; +use codex_protocol::request_user_input::RequestUserInputArgs; +use codex_protocol::request_user_input::RequestUserInputResponse; use codex_rmcp_client::ElicitationResponse; use codex_rmcp_client::OAuthCredentialsStoreMode; use futures::future::BoxFuture; @@ -118,6 +121,7 @@ use crate::protocol::Op; use crate::protocol::RateLimitSnapshot; use crate::protocol::ReasoningContentDeltaEvent; use crate::protocol::ReasoningRawContentDeltaEvent; +use crate::protocol::RequestUserInputEvent; use crate::protocol::ReviewDecision; use crate::protocol::SandboxPolicy; use crate::protocol::SessionConfiguredEvent; @@ -162,7 +166,6 @@ use codex_async_utils::OrCancelExt; use codex_otel::OtelManager; use codex_protocol::config_types::CollaborationMode; use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; -use codex_protocol::config_types::Settings; use codex_protocol::models::ContentItem; use codex_protocol::models::DeveloperInstructions; use codex_protocol::models::ResponseInputItem; @@ -272,6 +275,18 @@ impl Codex { crate::models_manager::manager::RefreshStrategy::OnlineIfUncached, ) .await; + + // Resolve base instructions for the session. Priority order: + // 1. config.base_instructions override + // 2. conversation history => session_meta.base_instructions + // 3. base_intructions for current model + let model_info = models_manager.get_model_info(model.as_str(), &config).await; + let base_instructions = config + .base_instructions + .clone() + .or_else(|| conversation_history.get_base_instructions().map(|s| s.text)) + .unwrap_or_else(|| model_info.base_instructions.clone()); + // TODO (aibrahim): Consolidate config.model and config.model_reasoning_effort into config.collaboration_mode // to avoid extracting these fields separately and constructing CollaborationMode here. let collaboration_mode = CollaborationMode::Custom(Settings { @@ -285,7 +300,7 @@ impl Codex { model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), user_instructions, - base_instructions: config.base_instructions.clone(), + base_instructions, compact_prompt: config.compact_prompt.clone(), approval_policy: config.approval_policy.clone(), sandbox_policy: config.sandbox_policy.clone(), @@ -397,7 +412,6 @@ pub(crate) struct TurnContext { /// instead of `std::env::current_dir()`. pub(crate) cwd: PathBuf, pub(crate) developer_instructions: Option, - pub(crate) base_instructions: Option, pub(crate) compact_prompt: Option, pub(crate) user_instructions: Option, pub(crate) approval_policy: AskForApproval, @@ -439,8 +453,8 @@ pub(crate) struct SessionConfiguration { /// Model instructions that are appended to the base instructions. user_instructions: Option, - /// Base instructions override. - base_instructions: Option, + /// Base instructions for the session. + base_instructions: String, /// Compact prompt override. compact_prompt: Option, @@ -525,7 +539,6 @@ impl Session { session_configuration.collaboration_mode.model(), model_info.slug.as_str(), ); - let per_turn_config = Arc::new(per_turn_config); let client = ModelClient::new( per_turn_config.clone(), @@ -550,7 +563,6 @@ impl Session { client, cwd: session_configuration.cwd.clone(), developer_instructions: session_configuration.developer_instructions.clone(), - base_instructions: session_configuration.base_instructions.clone(), compact_prompt: session_configuration.compact_prompt.clone(), user_instructions: session_configuration.user_instructions.clone(), approval_policy: session_configuration.approval_policy.value(), @@ -598,7 +610,14 @@ impl Session { let conversation_id = ThreadId::default(); ( conversation_id, - RolloutRecorderParams::new(conversation_id, forked_from_id, session_source), + RolloutRecorderParams::new( + conversation_id, + forked_from_id, + session_source, + BaseInstructions { + text: session_configuration.base_instructions.clone(), + }, + ), ) } InitialHistory::Resumed(resumed_history) => ( @@ -807,7 +826,14 @@ impl Session { async fn get_total_token_usage(&self) -> i64 { let state = self.state.lock().await; - state.get_total_token_usage() + state.get_total_token_usage(state.server_reasoning_included()) + } + + pub(crate) async fn get_base_instructions(&self) -> BaseInstructions { + let state = self.state.lock().await; + BaseInstructions { + text: state.session_configuration.base_instructions.clone(), + } } async fn record_initial_history(&self, conversation_history: InitialHistory) { @@ -1297,6 +1323,63 @@ impl Session { rx_approve } + pub async fn request_user_input( + &self, + turn_context: &TurnContext, + call_id: String, + args: RequestUserInputArgs, + ) -> Option { + let sub_id = turn_context.sub_id.clone(); + let (tx_response, rx_response) = oneshot::channel(); + let event_id = sub_id.clone(); + let prev_entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.insert_pending_user_input(sub_id, tx_response) + } + None => None, + } + }; + if prev_entry.is_some() { + warn!("Overwriting existing pending user input for sub_id: {event_id}"); + } + + let event = EventMsg::RequestUserInput(RequestUserInputEvent { + call_id, + turn_id: turn_context.sub_id.clone(), + questions: args.questions, + }); + self.send_event(turn_context, event).await; + rx_response.await.ok() + } + + pub async fn notify_user_input_response( + &self, + sub_id: &str, + response: RequestUserInputResponse, + ) { + let entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.remove_pending_user_input(sub_id) + } + None => None, + } + }; + match entry { + Some(tx_response) => { + tx_response.send(response).ok(); + } + None => { + warn!("No pending user input found for sub_id: {sub_id}"); + } + } + } + pub async fn notify_approval(&self, sub_id: &str, decision: ReviewDecision) { let entry = { let mut active = self.active_turn.lock().await; @@ -1391,6 +1474,9 @@ impl Session { } pub(crate) async fn record_model_warning(&self, message: impl Into, ctx: &TurnContext) { + self.services + .otel_manager + .counter("codex.model_warning", 1, &[]); let item = ResponseItem::Message { id: None, role: "user".to_string(), @@ -1556,6 +1642,11 @@ impl Session { self.send_token_count_event(turn_context).await; } + pub(crate) async fn set_server_reasoning_included(&self, included: bool) { + let mut state = self.state.lock().await; + state.set_server_reasoning_included(included); + } + async fn send_token_count_event(&self, turn_context: &TurnContext) { let (info, rate_limits) = { let state = self.state.lock().await; @@ -1931,6 +2022,9 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv Op::PatchApproval { id, decision } => { handlers::patch_approval(&sess, id, decision).await; } + Op::UserInputAnswer { id, response } => { + handlers::request_user_input_response(&sess, id, response).await; + } Op::AddToHistory { text } => { handlers::add_to_history(&sess, &config, text).await; } @@ -2020,6 +2114,7 @@ mod handlers { use codex_protocol::protocol::ThreadRolledBackEvent; use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::WarningEvent; + use codex_protocol::request_user_input::RequestUserInputResponse; use crate::context_manager::is_user_turn_boundary; use codex_protocol::config_types::CollaborationMode; @@ -2248,6 +2343,14 @@ mod handlers { } } + pub async fn request_user_input_response( + sess: &Arc, + id: String, + response: RequestUserInputResponse, + ) { + sess.notify_user_input_response(&id, response).await; + } + pub async fn add_to_history(sess: &Arc, config: &Arc, text: String) { let id = sess.conversation_id; let config = Arc::clone(config); @@ -2382,6 +2485,7 @@ mod handlers { Arc::clone(&turn_context), vec![UserInput::Text { text: turn_context.compact_prompt().to_string(), + // Compaction prompt is synthesized; no UI element ranges to preserve. text_elements: Vec::new(), }], CompactTask, @@ -2541,7 +2645,6 @@ async fn spawn_review_thread( web_search_mode: Some(review_web_search_mode), }); - let base_instructions = REVIEW_PROMPT.to_string(); let review_prompt = resolved.prompt.clone(); let provider = parent_turn_context.client.get_provider(); let auth_manager = parent_turn_context.client.get_auth_manager(); @@ -2578,7 +2681,6 @@ async fn spawn_review_thread( ghost_snapshot: parent_turn_context.ghost_snapshot.clone(), developer_instructions: None, user_instructions: None, - base_instructions: Some(base_instructions.clone()), compact_prompt: parent_turn_context.compact_prompt.clone(), approval_policy: parent_turn_context.approval_policy, sandbox_policy: parent_turn_context.sandbox_policy.clone(), @@ -2593,6 +2695,7 @@ async fn spawn_review_thread( // Seed the child task with the review prompt as the initial user message. let input: Vec = vec![UserInput::Text { text: review_prompt, + // Review prompt is synthesized; no UI element ranges to preserve. text_elements: Vec::new(), }]; let tc = Arc::new(review_turn_context); @@ -2856,11 +2959,13 @@ async fn run_sampling_request( .get_model_info() .supports_parallel_tool_calls; + let base_instructions = sess.get_base_instructions().await; + let prompt = Prompt { input, tools: router.specs(), parallel_tool_calls: model_supports_parallel, - base_instructions_override: turn_context.base_instructions.clone(), + base_instructions, output_schema: turn_context.final_output_json_schema.clone(), }; @@ -2976,7 +3081,6 @@ async fn try_run_sampling_request( model: turn_context.client.get_model(), effort: turn_context.client.get_reasoning_effort(), summary: turn_context.client.get_reasoning_summary(), - base_instructions: turn_context.base_instructions.clone(), user_instructions: turn_context.user_instructions.clone(), developer_instructions: turn_context.developer_instructions.clone(), final_output_json_schema: turn_context.final_output_json_schema.clone(), @@ -3075,6 +3179,9 @@ async fn try_run_sampling_request( active_item = Some(tracked_item); } } + ResponseEvent::ServerReasoningIncluded(included) => { + sess.set_server_reasoning_included(included).await; + } ResponseEvent::RateLimits(snapshot) => { // Update internal state with latest rate limits, but defer sending until // token usage is available to avoid duplicate TokenCount events. @@ -3218,6 +3325,7 @@ mod tests { use super::*; use crate::CodexAuth; use crate::config::ConfigBuilder; + use crate::config::test_config; use crate::exec::ExecToolCallOutput; use crate::function_tool::FunctionCallError; use crate::shell::default_user_shell; @@ -3261,6 +3369,77 @@ mod tests { use std::sync::Arc; use std::time::Duration as StdDuration; + struct InstructionsTestCase { + slug: &'static str, + expects_apply_patch_instructions: bool, + } + + #[tokio::test] + async fn get_base_instructions_no_user_content() { + let prompt_with_apply_patch_instructions = + include_str!("../prompt_with_apply_patch_instructions.md"); + let test_cases = vec![ + InstructionsTestCase { + slug: "gpt-3.5", + expects_apply_patch_instructions: true, + }, + InstructionsTestCase { + slug: "gpt-4.1", + expects_apply_patch_instructions: true, + }, + InstructionsTestCase { + slug: "gpt-4o", + expects_apply_patch_instructions: true, + }, + InstructionsTestCase { + slug: "gpt-5", + expects_apply_patch_instructions: true, + }, + InstructionsTestCase { + slug: "gpt-5.1", + expects_apply_patch_instructions: false, + }, + InstructionsTestCase { + slug: "codex-mini-latest", + expects_apply_patch_instructions: true, + }, + InstructionsTestCase { + slug: "gpt-oss:120b", + expects_apply_patch_instructions: false, + }, + InstructionsTestCase { + slug: "gpt-5.1-codex", + expects_apply_patch_instructions: false, + }, + InstructionsTestCase { + slug: "gpt-5.1-codex-max", + expects_apply_patch_instructions: false, + }, + ]; + + let (session, _turn_context) = make_session_and_context().await; + + for test_case in test_cases { + let config = test_config(); + let model_info = ModelsManager::construct_model_info_offline(test_case.slug, &config); + if test_case.expects_apply_patch_instructions { + assert_eq!( + model_info.base_instructions.as_str(), + prompt_with_apply_patch_instructions + ); + } + + { + let mut state = session.state.lock().await; + state.session_configuration.base_instructions = + model_info.base_instructions.clone(); + } + + let base_instructions = session.get_base_instructions().await; + assert_eq!(base_instructions.text, model_info.base_instructions); + } + } + #[tokio::test] async fn reconstruct_history_matches_live_compactions() { let (session, turn_context) = make_session_and_context().await; @@ -3513,6 +3692,7 @@ mod tests { let config = build_test_config(codex_home.path()).await; let config = Arc::new(config); let model = ModelsManager::get_model_offline(config.model.as_deref()); + let model_info = ModelsManager::construct_model_info_offline(model.as_str(), &config); let reasoning_effort = config.model_reasoning_effort; let collaboration_mode = CollaborationMode::Custom(Settings { model, @@ -3525,7 +3705,10 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), - base_instructions: config.base_instructions.clone(), + base_instructions: config + .base_instructions + .clone() + .unwrap_or_else(|| model_info.base_instructions.clone()), compact_prompt: config.compact_prompt.clone(), approval_policy: config.approval_policy.clone(), sandbox_policy: config.sandbox_policy.clone(), @@ -3584,6 +3767,7 @@ mod tests { let config = build_test_config(codex_home.path()).await; let config = Arc::new(config); let model = ModelsManager::get_model_offline(config.model.as_deref()); + let model_info = ModelsManager::construct_model_info_offline(model.as_str(), &config); let reasoning_effort = config.model_reasoning_effort; let collaboration_mode = CollaborationMode::Custom(Settings { model, @@ -3596,7 +3780,10 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), - base_instructions: config.base_instructions.clone(), + base_instructions: config + .base_instructions + .clone() + .unwrap_or_else(|| model_info.base_instructions.clone()), compact_prompt: config.compact_prompt.clone(), approval_policy: config.approval_policy.clone(), sandbox_policy: config.sandbox_policy.clone(), @@ -3840,6 +4027,7 @@ mod tests { let exec_policy = ExecPolicyManager::default(); let (agent_status_tx, _agent_status_rx) = watch::channel(AgentStatus::PendingInit); let model = ModelsManager::get_model_offline(config.model.as_deref()); + let model_info = ModelsManager::construct_model_info_offline(model.as_str(), &config); let reasoning_effort = config.model_reasoning_effort; let collaboration_mode = CollaborationMode::Custom(Settings { model, @@ -3852,7 +4040,10 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), - base_instructions: config.base_instructions.clone(), + base_instructions: config + .base_instructions + .clone() + .unwrap_or_else(|| model_info.base_instructions.clone()), compact_prompt: config.compact_prompt.clone(), approval_policy: config.approval_policy.clone(), sandbox_policy: config.sandbox_policy.clone(), @@ -3940,6 +4131,7 @@ mod tests { let exec_policy = ExecPolicyManager::default(); let (agent_status_tx, _agent_status_rx) = watch::channel(AgentStatus::PendingInit); let model = ModelsManager::get_model_offline(config.model.as_deref()); + let model_info = ModelsManager::construct_model_info_offline(model.as_str(), &config); let reasoning_effort = config.model_reasoning_effort; let collaboration_mode = CollaborationMode::Custom(Settings { model, @@ -3952,7 +4144,10 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), - base_instructions: config.base_instructions.clone(), + base_instructions: config + .base_instructions + .clone() + .unwrap_or_else(|| model_info.base_instructions.clone()), compact_prompt: config.compact_prompt.clone(), approval_policy: config.approval_policy.clone(), sandbox_policy: config.sandbox_policy.clone(), diff --git a/codex-rs/core/src/codex_delegate.rs b/codex-rs/core/src/codex_delegate.rs index 49409f8e84..b855612113 100644 --- a/codex-rs/core/src/codex_delegate.rs +++ b/codex-rs/core/src/codex_delegate.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::Arc; use std::sync::atomic::AtomicU64; @@ -9,9 +10,12 @@ use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExecApprovalRequestEvent; use codex_protocol::protocol::Op; +use codex_protocol::protocol::RequestUserInputEvent; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; use codex_protocol::protocol::Submission; +use codex_protocol::request_user_input::RequestUserInputArgs; +use codex_protocol::request_user_input::RequestUserInputResponse; use codex_protocol::user_input::UserInput; use std::time::Duration; use tokio::time::timeout; @@ -229,6 +233,20 @@ async fn forward_events( ) .await; } + Event { + id, + msg: EventMsg::RequestUserInput(event), + } => { + handle_request_user_input( + &codex, + id, + &parent_session, + &parent_ctx, + event, + &cancel_token, + ) + .await; + } other => { match tx_sub.send(other).or_cancel(&cancel_token).await { Ok(Ok(())) => {} @@ -334,6 +352,55 @@ async fn handle_patch_approval( let _ = codex.submit(Op::PatchApproval { id, decision }).await; } +async fn handle_request_user_input( + codex: &Codex, + id: String, + parent_session: &Session, + parent_ctx: &TurnContext, + event: RequestUserInputEvent, + cancel_token: &CancellationToken, +) { + let args = RequestUserInputArgs { + questions: event.questions, + }; + let response_fut = + parent_session.request_user_input(parent_ctx, parent_ctx.sub_id.clone(), args); + let response = await_user_input_with_cancel( + response_fut, + parent_session, + &parent_ctx.sub_id, + cancel_token, + ) + .await; + let _ = codex.submit(Op::UserInputAnswer { id, response }).await; +} + +async fn await_user_input_with_cancel( + fut: F, + parent_session: &Session, + sub_id: &str, + cancel_token: &CancellationToken, +) -> RequestUserInputResponse +where + F: core::future::Future>, +{ + tokio::select! { + biased; + _ = cancel_token.cancelled() => { + let empty = RequestUserInputResponse { + answers: HashMap::new(), + }; + parent_session + .notify_user_input_response(sub_id, empty.clone()) + .await; + empty + } + response = fut => response.unwrap_or_else(|| RequestUserInputResponse { + answers: HashMap::new(), + }), + } +} + /// Await an approval decision, aborting on cancellation. async fn await_approval_with_cancel( fut: F, diff --git a/codex-rs/core/src/compact.rs b/codex-rs/core/src/compact.rs index 250b91415d..bde807154d 100644 --- a/codex-rs/core/src/compact.rs +++ b/codex-rs/core/src/compact.rs @@ -46,7 +46,7 @@ pub(crate) async fn run_inline_auto_compact_task( let prompt = turn_context.compact_prompt().to_string(); let input = vec![UserInput::Text { text: prompt, - // Plain text conversion has no UI element ranges. + // Compaction prompt is synthesized; no UI element ranges to preserve. text_elements: Vec::new(), }]; @@ -90,7 +90,6 @@ async fn run_compact_task_inner( model: turn_context.client.get_model(), effort: turn_context.client.get_reasoning_effort(), summary: turn_context.client.get_reasoning_summary(), - base_instructions: turn_context.base_instructions.clone(), user_instructions: turn_context.user_instructions.clone(), developer_instructions: turn_context.developer_instructions.clone(), final_output_json_schema: turn_context.final_output_json_schema.clone(), @@ -104,6 +103,7 @@ async fn run_compact_task_inner( let turn_input_len = turn_input.len(); let prompt = Prompt { input: turn_input, + base_instructions: sess.get_base_instructions().await, ..Default::default() }; let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await; @@ -316,6 +316,9 @@ async fn drain_to_completed( sess.record_into_history(std::slice::from_ref(&item), turn_context) .await; } + Ok(ResponseEvent::ServerReasoningIncluded(included)) => { + sess.set_server_reasoning_included(included).await; + } Ok(ResponseEvent::RateLimits(snapshot)) => { sess.update_rate_limits(turn_context, snapshot).await; } diff --git a/codex-rs/core/src/compact_remote.rs b/codex-rs/core/src/compact_remote.rs index dd038c5287..b8dae3ceab 100644 --- a/codex-rs/core/src/compact_remote.rs +++ b/codex-rs/core/src/compact_remote.rs @@ -54,7 +54,7 @@ async fn run_remote_compact_task_inner_impl( input: history.for_prompt(), tools: vec![], parallel_tool_calls: false, - base_instructions_override: turn_context.base_instructions.clone(), + base_instructions: sess.get_base_instructions().await, output_schema: None, }; diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index f97903b31a..9c12272d94 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -1,13 +1,14 @@ use crate::config::CONFIG_TOML_FILE; use crate::config::types::McpServerConfig; use crate::config::types::Notice; +use crate::path_utils::resolve_symlink_write_paths; +use crate::path_utils::write_atomically; use anyhow::Context; use codex_protocol::config_types::TrustLevel; use codex_protocol::openai_models::ReasoningEffort; use std::collections::BTreeMap; use std::path::Path; use std::path::PathBuf; -use tempfile::NamedTempFile; use tokio::task; use toml_edit::ArrayOfTables; use toml_edit::DocumentMut; @@ -625,10 +626,14 @@ pub fn apply_blocking( } let config_path = codex_home.join(CONFIG_TOML_FILE); - let serialized = match std::fs::read_to_string(&config_path) { - Ok(contents) => contents, - Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(), - Err(err) => return Err(err.into()), + let write_paths = resolve_symlink_write_paths(&config_path)?; + let serialized = match write_paths.read_path { + Some(path) => match std::fs::read_to_string(&path) { + Ok(contents) => contents, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(), + Err(err) => return Err(err.into()), + }, + None => String::new(), }; let doc = if serialized.is_empty() { @@ -654,22 +659,13 @@ pub fn apply_blocking( return Ok(()); } - std::fs::create_dir_all(codex_home).with_context(|| { + write_atomically(&write_paths.write_path, &document.doc.to_string()).with_context(|| { format!( - "failed to create Codex home directory at {}", - codex_home.display() + "failed to persist config.toml at {}", + write_paths.write_path.display() ) })?; - let tmp = NamedTempFile::new_in(codex_home)?; - std::fs::write(tmp.path(), document.doc.to_string()).with_context(|| { - format!( - "failed to write temporary config file at {}", - tmp.path().display() - ) - })?; - tmp.persist(config_path)?; - Ok(()) } @@ -813,6 +809,8 @@ mod tests { use crate::config::types::McpServerTransportConfig; use codex_protocol::openai_models::ReasoningEffort; use pretty_assertions::assert_eq; + #[cfg(unix)] + use std::os::unix::fs::symlink; use tempfile::tempdir; use toml::Value as TomlValue; @@ -952,6 +950,71 @@ profiles = { fast = { model = "gpt-4o", sandbox_mode = "strict" } } ); } + #[cfg(unix)] + #[test] + fn blocking_set_model_writes_through_symlink_chain() { + let tmp = tempdir().expect("tmpdir"); + let codex_home = tmp.path(); + let target_dir = tempdir().expect("target dir"); + let target_path = target_dir.path().join(CONFIG_TOML_FILE); + let link_path = codex_home.join("config-link.toml"); + let config_path = codex_home.join(CONFIG_TOML_FILE); + + symlink(&target_path, &link_path).expect("symlink link"); + symlink("config-link.toml", &config_path).expect("symlink config"); + + apply_blocking( + codex_home, + None, + &[ConfigEdit::SetModel { + model: Some("gpt-5.1-codex".to_string()), + effort: Some(ReasoningEffort::High), + }], + ) + .expect("persist"); + + let meta = std::fs::symlink_metadata(&config_path).expect("config metadata"); + assert!(meta.file_type().is_symlink()); + + let contents = std::fs::read_to_string(&target_path).expect("read target"); + let expected = r#"model = "gpt-5.1-codex" +model_reasoning_effort = "high" +"#; + assert_eq!(contents, expected); + } + + #[cfg(unix)] + #[test] + fn blocking_set_model_replaces_symlink_on_cycle() { + let tmp = tempdir().expect("tmpdir"); + let codex_home = tmp.path(); + let link_a = codex_home.join("a.toml"); + let link_b = codex_home.join("b.toml"); + let config_path = codex_home.join(CONFIG_TOML_FILE); + + symlink("b.toml", &link_a).expect("symlink a"); + symlink("a.toml", &link_b).expect("symlink b"); + symlink("a.toml", &config_path).expect("symlink config"); + + apply_blocking( + codex_home, + None, + &[ConfigEdit::SetModel { + model: Some("gpt-5.1-codex".to_string()), + effort: None, + }], + ) + .expect("persist"); + + let meta = std::fs::symlink_metadata(&config_path).expect("config metadata"); + assert!(!meta.file_type().is_symlink()); + + let contents = std::fs::read_to_string(&config_path).expect("read config"); + let expected = r#"model = "gpt-5.1-codex" +"#; + assert_eq!(contents, expected); + } + #[test] fn batch_write_table_upsert_preserves_inline_comments() { let tmp = tempdir().expect("tmpdir"); diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 956df12e9c..970ec7c4df 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -1,4 +1,6 @@ use crate::auth::AuthCredentialsStoreMode; +use crate::config::edit::ConfigEdit; +use crate::config::edit::ConfigEditsBuilder; use crate::config::types::DEFAULT_OTEL_ENVIRONMENT; use crate::config::types::History; use crate::config::types::McpServerConfig; @@ -751,30 +753,17 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R )); } } - let config_path = codex_home.join(CONFIG_TOML_FILE); - - // Read existing config or create empty string if file doesn't exist - let content = match std::fs::read_to_string(&config_path) { - Ok(content) => content, - Err(e) if e.kind() == std::io::ErrorKind::NotFound => String::new(), - Err(e) => return Err(e), - }; - - // Parse as DocumentMut for editing while preserving structure - let mut doc = content.parse::().map_err(|e| { - std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("failed to parse config.toml: {e}"), - ) - })?; - - // Set the default_oss_provider at root level use toml_edit::value; - doc["oss_provider"] = value(provider); - // Write the modified document back - std::fs::write(&config_path, doc.to_string())?; - Ok(()) + let edits = [ConfigEdit::SetPath { + segments: vec!["oss_provider".to_string()], + value: value(provider), + }]; + + ConfigEditsBuilder::new(codex_home) + .with_edits(edits) + .apply_blocking() + .map_err(|err| std::io::Error::other(format!("failed to persist config.toml: {err}"))) } /// Base config deserialized from ~/.codex/config.toml. diff --git a/codex-rs/core/src/config/service.rs b/codex-rs/core/src/config/service.rs index 913c02df1d..b6ed9f617e 100644 --- a/codex-rs/core/src/config/service.rs +++ b/codex-rs/core/src/config/service.rs @@ -9,6 +9,9 @@ use crate::config_loader::LoaderOverrides; use crate::config_loader::load_config_layers_state; use crate::config_loader::merge_toml_values; use crate::path_utils; +use crate::path_utils::SymlinkWritePaths; +use crate::path_utils::resolve_symlink_write_paths; +use crate::path_utils::write_atomically; use codex_app_server_protocol::Config as ApiConfig; use codex_app_server_protocol::ConfigBatchWriteParams; use codex_app_server_protocol::ConfigLayerMetadata; @@ -27,6 +30,7 @@ use std::borrow::Cow; use std::path::Path; use std::path::PathBuf; use thiserror::Error; +use tokio::task; use toml::Value as TomlValue; use toml_edit::Item as TomlItem; @@ -362,19 +366,30 @@ impl ConfigService { async fn create_empty_user_layer( config_toml: &AbsolutePathBuf, ) -> Result { - let toml_value = match tokio::fs::read_to_string(config_toml).await { - Ok(contents) => toml::from_str(&contents).map_err(|e| { - ConfigServiceError::toml("failed to parse existing user config.toml", e) - })?, - Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { - tokio::fs::write(config_toml, "").await.map_err(|e| { - ConfigServiceError::io("failed to create empty user config.toml", e) - })?; + let SymlinkWritePaths { + read_path, + write_path, + } = resolve_symlink_write_paths(config_toml.as_path()) + .map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?; + let toml_value = match read_path { + Some(path) => match tokio::fs::read_to_string(&path).await { + Ok(contents) => toml::from_str(&contents).map_err(|e| { + ConfigServiceError::toml("failed to parse existing user config.toml", e) + })?, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => { + write_empty_user_config(write_path.clone()).await?; TomlValue::Table(toml::map::Map::new()) - } else { - return Err(ConfigServiceError::io("failed to read user config.toml", e)); } + Err(err) => { + return Err(ConfigServiceError::io( + "failed to read user config.toml", + err, + )); + } + }, + None => { + write_empty_user_config(write_path).await?; + TomlValue::Table(toml::map::Map::new()) } }; Ok(ConfigLayerEntry::new( @@ -385,6 +400,13 @@ async fn create_empty_user_layer( )) } +async fn write_empty_user_config(write_path: PathBuf) -> Result<(), ConfigServiceError> { + task::spawn_blocking(move || write_atomically(&write_path, "")) + .await + .map_err(|err| ConfigServiceError::anyhow("config persistence task panicked", err.into()))? + .map_err(|err| ConfigServiceError::io("failed to create empty user config.toml", err)) +} + fn parse_value(value: JsonValue) -> Result, String> { if value.is_null() { return Ok(None); diff --git a/codex-rs/core/src/context_manager/history.rs b/codex-rs/core/src/context_manager/history.rs index 0c133bdc28..4feeddc29f 100644 --- a/codex-rs/core/src/context_manager/history.rs +++ b/codex-rs/core/src/context_manager/history.rs @@ -235,12 +235,19 @@ impl ContextManager { token_estimate as usize } - pub(crate) fn get_total_token_usage(&self) -> i64 { - self.token_info + /// When true, the server already accounted for past reasoning tokens and + /// the client should not re-estimate them. + pub(crate) fn get_total_token_usage(&self, server_reasoning_included: bool) -> i64 { + let last_tokens = self + .token_info .as_ref() .map(|info| info.last_token_usage.total_tokens) - .unwrap_or(0) - .saturating_add(self.get_non_last_reasoning_items_tokens() as i64) + .unwrap_or(0); + if server_reasoning_included { + last_tokens + } else { + last_tokens.saturating_add(self.get_non_last_reasoning_items_tokens() as i64) + } } /// This function enforces a couple of invariants on the in-memory history: diff --git a/codex-rs/core/src/event_mapping.rs b/codex-rs/core/src/event_mapping.rs index eaa477b085..493a159593 100644 --- a/codex-rs/core/src/event_mapping.rs +++ b/codex-rs/core/src/event_mapping.rs @@ -52,7 +52,7 @@ fn parse_user_message(message: &[ContentItem]) -> Option { } content.push(UserInput::Text { text: text.clone(), - // Plain text conversion has no UI element ranges. + // Model input content does not carry UI element ranges. text_elements: Vec::new(), }); } diff --git a/codex-rs/core/src/features.rs b/codex-rs/core/src/features.rs index 66f6a08602..3e95c566bd 100644 --- a/codex-rs/core/src/features.rs +++ b/codex-rs/core/src/features.rs @@ -21,28 +21,33 @@ pub(crate) use legacy::legacy_feature_keys; /// High-level lifecycle stage for a feature. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Stage { - Experimental, - Beta { + /// Closed beta features to be used while developing or within the company. + Beta, + /// Experimental features made available to users through the `/experimental` menu + Experimental { name: &'static str, menu_description: &'static str, announcement: &'static str, }, + /// Stable features. The feature flag is kept for ad-hoc enabling/disabling Stable, + /// Deprecated feature that should not be used anymore. Deprecated, + /// The feature flag is useless but kept for backward compatibility reason. Removed, } impl Stage { pub fn beta_menu_name(self) -> Option<&'static str> { match self { - Stage::Beta { name, .. } => Some(name), + Stage::Experimental { name, .. } => Some(name), _ => None, } } pub fn beta_menu_description(self) -> Option<&'static str> { match self { - Stage::Beta { + Stage::Experimental { menu_description, .. } => Some(menu_description), _ => None, @@ -51,7 +56,7 @@ impl Stage { pub fn beta_announcement(self) -> Option<&'static str> { match self { - Stage::Beta { announcement, .. } => Some(announcement), + Stage::Experimental { announcement, .. } => Some(announcement), _ => None, } } @@ -336,14 +341,14 @@ pub const FEATURES: &[FeatureSpec] = &[ FeatureSpec { id: Feature::WebSearchCached, key: "web_search_cached", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, // Beta program. Rendered in the `/experimental` menu for users. FeatureSpec { id: Feature::UnifiedExec, key: "unified_exec", - stage: Stage::Beta { + stage: Stage::Experimental { name: "Background terminal", menu_description: "Run long-running terminal commands in the background.", announcement: "NEW! Try Background terminals for long-running commands. Enable in /experimental!", @@ -353,7 +358,7 @@ pub const FEATURES: &[FeatureSpec] = &[ FeatureSpec { id: Feature::ShellSnapshot, key: "shell_snapshot", - stage: Stage::Beta { + stage: Stage::Experimental { name: "Shell snapshot", menu_description: "Snapshot your shell environment to avoid re-running login scripts for every command.", announcement: "NEW! Try shell snapshotting to make your Codex faster. Enable in /experimental!", @@ -363,50 +368,50 @@ pub const FEATURES: &[FeatureSpec] = &[ FeatureSpec { id: Feature::ChildAgentsMd, key: "child_agents_md", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::ApplyPatchFreeform, key: "apply_patch_freeform", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::ExecPolicy, key: "exec_policy", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: true, }, FeatureSpec { id: Feature::WindowsSandbox, key: "experimental_windows_sandbox", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::WindowsSandboxElevated, key: "elevated_windows_sandbox", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::RemoteCompaction, key: "remote_compaction", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: true, }, FeatureSpec { id: Feature::RemoteModels, key: "remote_models", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::PowershellUtf8, key: "powershell_utf8", #[cfg(windows)] - stage: Stage::Beta { + stage: Stage::Experimental { name: "Powershell UTF-8 support", menu_description: "Enable UTF-8 output in Powershell.", announcement: "Codex now supports UTF-8 output in Powershell. If you are seeing problems, disable in /experimental.", @@ -414,32 +419,32 @@ pub const FEATURES: &[FeatureSpec] = &[ #[cfg(windows)] default_enabled: true, #[cfg(not(windows))] - stage: Stage::Experimental, + stage: Stage::Beta, #[cfg(not(windows))] default_enabled: false, }, FeatureSpec { id: Feature::EnableRequestCompression, key: "enable_request_compression", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::Collab, key: "collab", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::Tui2, key: "tui2", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, FeatureSpec { id: Feature::Steer, key: "steer", - stage: Stage::Beta { + stage: Stage::Experimental { name: "Steer conversation", menu_description: "Enter submits immediately; Tab queues messages when a task is running.", announcement: "NEW! Try Steer mode: Enter submits immediately, Tab queues. Enable in /experimental!", @@ -449,7 +454,7 @@ pub const FEATURES: &[FeatureSpec] = &[ FeatureSpec { id: Feature::CollaborationModes, key: "collaboration_modes", - stage: Stage::Experimental, + stage: Stage::Beta, default_enabled: false, }, ]; diff --git a/codex-rs/core/src/models_manager/model_info.rs b/codex-rs/core/src/models_manager/model_info.rs index 5feeb6e12a..b6e7ef91af 100644 --- a/codex-rs/core/src/models_manager/model_info.rs +++ b/codex-rs/core/src/models_manager/model_info.rs @@ -86,6 +86,9 @@ pub(crate) fn with_config_overrides(mut model: ModelInfo, config: &Config) -> Mo } }; } + if let Some(base_instructions) = &config.base_instructions { + model.base_instructions = base_instructions.clone(); + } model } diff --git a/codex-rs/core/src/path_utils.rs b/codex-rs/core/src/path_utils.rs index 65b3db0f6d..4885679e52 100644 --- a/codex-rs/core/src/path_utils.rs +++ b/codex-rs/core/src/path_utils.rs @@ -1,5 +1,9 @@ +use codex_utils_absolute_path::AbsolutePathBuf; +use std::collections::HashSet; +use std::io; use std::path::Path; use std::path::PathBuf; +use tempfile::NamedTempFile; use crate::env; @@ -8,6 +12,106 @@ pub fn normalize_for_path_comparison(path: impl AsRef) -> std::io::Result< Ok(normalize_for_wsl(canonical)) } +pub struct SymlinkWritePaths { + pub read_path: Option, + pub write_path: PathBuf, +} + +/// Resolve the final filesystem target for `path` while retaining a safe write path. +/// +/// This follows symlink chains (including relative symlink targets) until it reaches a +/// non-symlink path. If the chain cycles or any metadata/link resolution fails, it +/// returns `read_path: None` and uses the original absolute path as `write_path`. +/// There is no fixed max-resolution count; cycles are detected via a visited set. +pub fn resolve_symlink_write_paths(path: &Path) -> io::Result { + let root = AbsolutePathBuf::from_absolute_path(path) + .map(AbsolutePathBuf::into_path_buf) + .unwrap_or_else(|_| path.to_path_buf()); + let mut current = root.clone(); + let mut visited = HashSet::new(); + + // Follow symlink chains while guarding against cycles. + loop { + let meta = match std::fs::symlink_metadata(¤t) { + Ok(meta) => meta, + Err(err) if err.kind() == io::ErrorKind::NotFound => { + return Ok(SymlinkWritePaths { + read_path: Some(current.clone()), + write_path: current, + }); + } + Err(_) => { + return Ok(SymlinkWritePaths { + read_path: None, + write_path: root, + }); + } + }; + + if !meta.file_type().is_symlink() { + return Ok(SymlinkWritePaths { + read_path: Some(current.clone()), + write_path: current, + }); + } + + // If we've already seen this path, the chain cycles. + if !visited.insert(current.clone()) { + return Ok(SymlinkWritePaths { + read_path: None, + write_path: root, + }); + } + + let target = match std::fs::read_link(¤t) { + Ok(target) => target, + Err(_) => { + return Ok(SymlinkWritePaths { + read_path: None, + write_path: root, + }); + } + }; + + let next = if target.is_absolute() { + AbsolutePathBuf::from_absolute_path(&target) + } else if let Some(parent) = current.parent() { + AbsolutePathBuf::resolve_path_against_base(&target, parent) + } else { + return Ok(SymlinkWritePaths { + read_path: None, + write_path: root, + }); + }; + + let next = match next { + Ok(path) => path.into_path_buf(), + Err(_) => { + return Ok(SymlinkWritePaths { + read_path: None, + write_path: root, + }); + } + }; + + current = next; + } +} + +pub fn write_atomically(write_path: &Path, contents: &str) -> io::Result<()> { + let parent = write_path.parent().ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + format!("path {} has no parent directory", write_path.display()), + ) + })?; + std::fs::create_dir_all(parent)?; + let tmp = NamedTempFile::new_in(parent)?; + std::fs::write(tmp.path(), contents)?; + tmp.persist(write_path)?; + Ok(()) +} + fn normalize_for_wsl(path: PathBuf) -> PathBuf { normalize_for_wsl_with_flag(path, env::is_wsl()) } @@ -84,6 +188,29 @@ fn lower_ascii_path(path: PathBuf) -> PathBuf { #[cfg(test)] mod tests { + #[cfg(unix)] + mod symlinks { + use super::super::resolve_symlink_write_paths; + use pretty_assertions::assert_eq; + use std::os::unix::fs::symlink; + + #[test] + fn symlink_cycles_fall_back_to_root_write_path() -> std::io::Result<()> { + let dir = tempfile::tempdir()?; + let a = dir.path().join("a"); + let b = dir.path().join("b"); + + symlink(&b, &a)?; + symlink(&a, &b)?; + + let resolved = resolve_symlink_write_paths(&a)?; + + assert_eq!(resolved.read_path, None); + assert_eq!(resolved.write_path, a); + Ok(()) + } + } + #[cfg(target_os = "linux")] mod wsl { use super::super::normalize_for_wsl_with_flag; diff --git a/codex-rs/core/src/rollout/policy.rs b/codex-rs/core/src/rollout/policy.rs index fafdc83102..2d4a79cc22 100644 --- a/codex-rs/core/src/rollout/policy.rs +++ b/codex-rs/core/src/rollout/policy.rs @@ -67,6 +67,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::ExecCommandOutputDelta(_) | EventMsg::ExecCommandEnd(_) | EventMsg::ExecApprovalRequest(_) + | EventMsg::RequestUserInput(_) | EventMsg::ElicitationRequest(_) | EventMsg::ApplyPatchApprovalRequest(_) | EventMsg::BackgroundEvent(_) diff --git a/codex-rs/core/src/rollout/recorder.rs b/codex-rs/core/src/rollout/recorder.rs index 1c6503f318..8c2efcf4e1 100644 --- a/codex-rs/core/src/rollout/recorder.rs +++ b/codex-rs/core/src/rollout/recorder.rs @@ -9,6 +9,7 @@ use std::path::PathBuf; use std::time::SystemTime; use codex_protocol::ThreadId; +use codex_protocol::models::BaseInstructions; use serde_json::Value; use time::OffsetDateTime; use time::format_description::FormatItem; @@ -59,6 +60,7 @@ pub enum RolloutRecorderParams { conversation_id: ThreadId, forked_from_id: Option, source: SessionSource, + base_instructions: BaseInstructions, }, Resume { path: PathBuf, @@ -81,11 +83,13 @@ impl RolloutRecorderParams { conversation_id: ThreadId, forked_from_id: Option, source: SessionSource, + base_instructions: BaseInstructions, ) -> Self { Self::Create { conversation_id, forked_from_id, source, + base_instructions, } } @@ -160,6 +164,7 @@ impl RolloutRecorder { conversation_id, forked_from_id, source, + base_instructions, } => { let LogFileInfo { file, @@ -188,6 +193,7 @@ impl RolloutRecorder { cli_version: env!("CARGO_PKG_VERSION").to_string(), source, model_provider: Some(config.model_provider_id.clone()), + base_instructions: Some(base_instructions), }), ) } diff --git a/codex-rs/core/src/rollout/tests.rs b/codex-rs/core/src/rollout/tests.rs index c519c0c70c..08d4a1e2c9 100644 --- a/codex-rs/core/src/rollout/tests.rs +++ b/codex-rs/core/src/rollout/tests.rs @@ -6,6 +6,7 @@ use std::fs::{self}; use std::io::Write; use std::path::Path; +use pretty_assertions::assert_eq; use tempfile::TempDir; use time::Duration; use time::OffsetDateTime; @@ -89,6 +90,7 @@ fn write_session_file_with_provider( "cwd": ".", "originator": "test_originator", "cli_version": "test_version", + "base_instructions": null, }); if let Some(source) = source { @@ -129,6 +131,48 @@ fn write_session_file_with_provider( Ok((dt, uuid)) } +fn write_session_file_with_meta_payload( + root: &Path, + ts_str: &str, + uuid: Uuid, + payload: serde_json::Value, +) -> std::io::Result<()> { + let format: &[FormatItem] = + format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]"); + let dt = PrimitiveDateTime::parse(ts_str, format) + .unwrap() + .assume_utc(); + let dir = root + .join("sessions") + .join(format!("{:04}", dt.year())) + .join(format!("{:02}", u8::from(dt.month()))) + .join(format!("{:02}", dt.day())); + fs::create_dir_all(&dir)?; + + let filename = format!("rollout-{ts_str}-{uuid}.jsonl"); + let file_path = dir.join(filename); + let mut file = File::create(file_path)?; + + let meta = serde_json::json!({ + "timestamp": ts_str, + "type": "session_meta", + "payload": payload, + }); + writeln!(file, "{meta}")?; + + let user_event = serde_json::json!({ + "timestamp": ts_str, + "type": "event_msg", + "payload": {"type": "user_message", "message": "Hello from user", "kind": "plain"} + }); + writeln!(file, "{user_event}")?; + + let times = FileTimes::new().set_modified(dt.into()); + file.set_times(times)?; + + Ok(()) +} + #[tokio::test] async fn test_list_conversations_latest_first() { let temp = TempDir::new().unwrap(); @@ -206,6 +250,7 @@ async fn test_list_conversations_latest_first() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let head_2 = vec![serde_json::json!({ "id": u2, @@ -215,6 +260,7 @@ async fn test_list_conversations_latest_first() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let head_1 = vec![serde_json::json!({ "id": u1, @@ -224,6 +270,7 @@ async fn test_list_conversations_latest_first() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let updated_times: Vec> = @@ -344,6 +391,7 @@ async fn test_pagination_cursor() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let head_4 = vec![serde_json::json!({ "id": u4, @@ -353,6 +401,7 @@ async fn test_pagination_cursor() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let updated_page1: Vec> = page1.items.iter().map(|i| i.updated_at.clone()).collect(); @@ -410,6 +459,7 @@ async fn test_pagination_cursor() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let head_2 = vec![serde_json::json!({ "id": u2, @@ -419,6 +469,7 @@ async fn test_pagination_cursor() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let updated_page2: Vec> = page2.items.iter().map(|i| i.updated_at.clone()).collect(); @@ -470,6 +521,7 @@ async fn test_pagination_cursor() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let updated_page3: Vec> = page3.items.iter().map(|i| i.updated_at.clone()).collect(); @@ -527,6 +579,7 @@ async fn test_get_thread_contents() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })]; let expected_page = ThreadsPage { items: vec![ThreadItem { @@ -551,6 +604,7 @@ async fn test_get_thread_contents() { "cwd": ".", "originator": "test_originator", "cli_version": "test_version", + "base_instructions": null, "source": "vscode", "model_provider": "test-provider", } @@ -566,6 +620,93 @@ async fn test_get_thread_contents() { assert_eq!(content, expected_content); } +#[tokio::test] +async fn test_base_instructions_missing_in_meta_defaults_to_null() { + let temp = TempDir::new().unwrap(); + let home = temp.path(); + + let ts = "2025-04-02T10-30-00"; + let uuid = Uuid::from_u128(101); + let payload = serde_json::json!({ + "id": uuid, + "timestamp": ts, + "cwd": ".", + "originator": "test_originator", + "cli_version": "test_version", + "source": "vscode", + "model_provider": "test-provider", + }); + write_session_file_with_meta_payload(home, ts, uuid, payload).unwrap(); + + let provider_filter = provider_vec(&[TEST_PROVIDER]); + let page = get_threads( + home, + 1, + None, + ThreadSortKey::CreatedAt, + INTERACTIVE_SESSION_SOURCES, + Some(provider_filter.as_slice()), + TEST_PROVIDER, + ) + .await + .unwrap(); + + let head = page + .items + .first() + .and_then(|item| item.head.first()) + .expect("session meta head"); + assert_eq!( + head.get("base_instructions"), + Some(&serde_json::Value::Null) + ); +} + +#[tokio::test] +async fn test_base_instructions_present_in_meta_is_preserved() { + let temp = TempDir::new().unwrap(); + let home = temp.path(); + + let ts = "2025-04-03T10-30-00"; + let uuid = Uuid::from_u128(102); + let base_text = "Custom base instructions"; + let payload = serde_json::json!({ + "id": uuid, + "timestamp": ts, + "cwd": ".", + "originator": "test_originator", + "cli_version": "test_version", + "source": "vscode", + "model_provider": "test-provider", + "base_instructions": {"text": base_text}, + }); + write_session_file_with_meta_payload(home, ts, uuid, payload).unwrap(); + + let provider_filter = provider_vec(&[TEST_PROVIDER]); + let page = get_threads( + home, + 1, + None, + ThreadSortKey::CreatedAt, + INTERACTIVE_SESSION_SOURCES, + Some(provider_filter.as_slice()), + TEST_PROVIDER, + ) + .await + .unwrap(); + + let head = page + .items + .first() + .and_then(|item| item.head.first()) + .expect("session meta head"); + let base = head + .get("base_instructions") + .and_then(|value| value.get("text")) + .and_then(serde_json::Value::as_str); + assert_eq!(base, Some(base_text)); +} + #[tokio::test] async fn test_created_at_sort_uses_file_mtime_for_updated_at() -> Result<()> { let temp = TempDir::new().unwrap(); @@ -637,6 +778,7 @@ async fn test_updated_at_uses_file_mtime() -> Result<()> { cli_version: "test_version".into(), source: SessionSource::VSCode, model_provider: Some("test-provider".into()), + base_instructions: None, }, git: None, }), @@ -744,6 +886,7 @@ async fn test_stable_ordering_same_second_pagination() { "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", + "base_instructions": null, })] }; let updated_page1: Vec> = diff --git a/codex-rs/core/src/shell_snapshot.rs b/codex-rs/core/src/shell_snapshot.rs index 9c10eedf42..be316afa5b 100644 --- a/codex-rs/core/src/shell_snapshot.rs +++ b/codex-rs/core/src/shell_snapshot.rs @@ -135,6 +135,13 @@ async fn run_shell_script_with_timeout( // returns a ref of handler. let mut handler = Command::new(&args[0]); handler.args(&args[1..]); + #[cfg(unix)] + unsafe { + handler.pre_exec(|| { + codex_utils_pty::process_group::detach_from_tty()?; + Ok(()) + }); + } handler.kill_on_drop(true); let output = timeout(snapshot_timeout, handler.output()) .await diff --git a/codex-rs/core/src/spawn.rs b/codex-rs/core/src/spawn.rs index c1a8d4457a..b2a507fda7 100644 --- a/codex-rs/core/src/spawn.rs +++ b/codex-rs/core/src/spawn.rs @@ -66,12 +66,12 @@ pub(crate) async fn spawn_child_async( #[cfg(unix)] unsafe { - let set_process_group = matches!(stdio_policy, StdioPolicy::RedirectForShellTool); + let detach_from_tty = matches!(stdio_policy, StdioPolicy::RedirectForShellTool); #[cfg(target_os = "linux")] let parent_pid = libc::getpid(); cmd.pre_exec(move || { - if set_process_group { - codex_utils_pty::process_group::set_process_group()?; + if detach_from_tty { + codex_utils_pty::process_group::detach_from_tty()?; } // This relies on prctl(2), so it only works on Linux. diff --git a/codex-rs/core/src/state/session.rs b/codex-rs/core/src/state/session.rs index c61d188373..746396949e 100644 --- a/codex-rs/core/src/state/session.rs +++ b/codex-rs/core/src/state/session.rs @@ -14,6 +14,7 @@ pub(crate) struct SessionState { pub(crate) session_configuration: SessionConfiguration, pub(crate) history: ContextManager, pub(crate) latest_rate_limits: Option, + pub(crate) server_reasoning_included: bool, } impl SessionState { @@ -24,6 +25,7 @@ impl SessionState { session_configuration, history, latest_rate_limits: None, + server_reasoning_included: false, } } @@ -78,8 +80,17 @@ impl SessionState { self.history.set_token_usage_full(context_window); } - pub(crate) fn get_total_token_usage(&self) -> i64 { - self.history.get_total_token_usage() + pub(crate) fn get_total_token_usage(&self, server_reasoning_included: bool) -> i64 { + self.history + .get_total_token_usage(server_reasoning_included) + } + + pub(crate) fn set_server_reasoning_included(&mut self, included: bool) { + self.server_reasoning_included = included; + } + + pub(crate) fn server_reasoning_included(&self) -> bool { + self.server_reasoning_included } } diff --git a/codex-rs/core/src/state/turn.rs b/codex-rs/core/src/state/turn.rs index ddc9efafea..66e2f694e9 100644 --- a/codex-rs/core/src/state/turn.rs +++ b/codex-rs/core/src/state/turn.rs @@ -9,6 +9,7 @@ use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; use codex_protocol::models::ResponseInputItem; +use codex_protocol::request_user_input::RequestUserInputResponse; use tokio::sync::oneshot; use crate::codex::TurnContext; @@ -37,7 +38,6 @@ pub(crate) enum TaskKind { Compact, } -#[derive(Clone)] pub(crate) struct RunningTask { pub(crate) done: Arc, pub(crate) kind: TaskKind, @@ -45,6 +45,8 @@ pub(crate) struct RunningTask { pub(crate) cancellation_token: CancellationToken, pub(crate) handle: Arc>, pub(crate) turn_context: Arc, + // Timer recorded when the task drops to capture the full turn duration. + pub(crate) _timer: Option, } impl ActiveTurn { @@ -67,6 +69,7 @@ impl ActiveTurn { #[derive(Default)] pub(crate) struct TurnState { pending_approvals: HashMap>, + pending_user_input: HashMap>, pending_input: Vec, } @@ -88,9 +91,25 @@ impl TurnState { pub(crate) fn clear_pending(&mut self) { self.pending_approvals.clear(); + self.pending_user_input.clear(); self.pending_input.clear(); } + pub(crate) fn insert_pending_user_input( + &mut self, + key: String, + tx: oneshot::Sender, + ) -> Option> { + self.pending_user_input.insert(key, tx) + } + + pub(crate) fn remove_pending_user_input( + &mut self, + key: &str, + ) -> Option> { + self.pending_user_input.remove(key) + } + pub(crate) fn push_pending_input(&mut self, input: ResponseInputItem) { self.pending_input.push(input); } diff --git a/codex-rs/core/src/tasks/mod.rs b/codex-rs/core/src/tasks/mod.rs index d6754c23c8..83326b1f05 100644 --- a/codex-rs/core/src/tasks/mod.rs +++ b/codex-rs/core/src/tasks/mod.rs @@ -144,6 +144,12 @@ impl Session { }) }; + let timer = turn_context + .client + .get_otel_manager() + .start_timer("codex.turn.e2e_duration_ms", &[]) + .ok(); + let running_task = RunningTask { done, handle: Arc::new(AbortOnDropHandle::new(handle)), @@ -151,6 +157,7 @@ impl Session { task, cancellation_token, turn_context: Arc::clone(&turn_context), + _timer: timer, }; self.register_new_active_task(running_task).await; } diff --git a/codex-rs/core/src/tasks/regular.rs b/codex-rs/core/src/tasks/regular.rs index f897d3ce87..cac0cd5da0 100644 --- a/codex-rs/core/src/tasks/regular.rs +++ b/codex-rs/core/src/tasks/regular.rs @@ -30,6 +30,7 @@ impl SessionTask for RegularTask { ) -> Option { let sess = session.clone_session(); let run_turn_span = trace_span!("run_turn"); + sess.set_server_reasoning_included(false).await; sess.services .otel_manager .apply_traceparent_parent(&run_turn_span); diff --git a/codex-rs/core/src/tasks/review.rs b/codex-rs/core/src/tasks/review.rs index 9157e922ec..5b0d0bebe9 100644 --- a/codex-rs/core/src/tasks/review.rs +++ b/codex-rs/core/src/tasks/review.rs @@ -190,8 +190,8 @@ pub(crate) async fn exit_review_mode( review_output: Option, ctx: Arc, ) { - const REVIEW_USER_MESSAGE_ID: &str = "review:rollout:user"; - const REVIEW_ASSISTANT_MESSAGE_ID: &str = "review:rollout:assistant"; + const REVIEW_USER_MESSAGE_ID: &str = "review_rollout_user"; + const REVIEW_ASSISTANT_MESSAGE_ID: &str = "review_rollout_assistant"; let (user_message, assistant_message) = if let Some(out) = review_output.clone() { let mut findings_str = String::new(); let text = out.overall_explanation.trim(); diff --git a/codex-rs/core/src/thread_manager.rs b/codex-rs/core/src/thread_manager.rs index f533e608f7..01dc99d91a 100644 --- a/codex-rs/core/src/thread_manager.rs +++ b/codex-rs/core/src/thread_manager.rs @@ -235,6 +235,15 @@ impl ThreadManager { self.state.threads.write().await.remove(thread_id) } + /// Closes all threads open in this ThreadManager + pub async fn remove_and_close_all_threads(&self) -> CodexResult<()> { + for thread in self.state.threads.read().await.values() { + thread.submit(Op::Shutdown).await?; + } + self.state.threads.write().await.clear(); + Ok(()) + } + /// Fork an existing thread by taking messages up to the given position (not including /// the message at the given position) and starting a new thread with identical /// configuration (unless overridden by the caller's `config`). The new thread will have diff --git a/codex-rs/core/src/tools/handlers/collab.rs b/codex-rs/core/src/tools/handlers/collab.rs index 6117211757..56463e644a 100644 --- a/codex-rs/core/src/tools/handlers/collab.rs +++ b/codex-rs/core/src/tools/handlers/collab.rs @@ -12,6 +12,7 @@ use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; use async_trait::async_trait; use codex_protocol::ThreadId; +use codex_protocol::models::BaseInstructions; use codex_protocol::protocol::CollabAgentInteractionBeginEvent; use codex_protocol::protocol::CollabAgentInteractionEndEvent; use codex_protocol::protocol::CollabAgentSpawnBeginEvent; @@ -115,10 +116,12 @@ mod spawn { .into(), ) .await; - let mut config = build_agent_spawn_config(turn.as_ref())?; + let mut config = + build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?; agent_role .apply_to_config(&mut config) .map_err(FunctionCallError::RespondToModel)?; + let result = session .services .agent_control @@ -557,15 +560,18 @@ fn collab_agent_error(agent_id: ThreadId, err: CodexErr) -> FunctionCallError { } } -fn build_agent_spawn_config(turn: &TurnContext) -> Result { +fn build_agent_spawn_config( + base_instructions: &BaseInstructions, + turn: &TurnContext, +) -> Result { let base_config = turn.client.config(); let mut config = (*base_config).clone(); + config.base_instructions = Some(base_instructions.text.clone()); config.model = Some(turn.client.get_model()); config.model_provider = turn.client.get_provider(); config.model_reasoning_effort = turn.client.get_reasoning_effort(); config.model_reasoning_summary = turn.client.get_reasoning_summary(); config.developer_instructions = turn.developer_instructions.clone(); - config.base_instructions = turn.base_instructions.clone(); config.compact_prompt = turn.compact_prompt.clone(); config.user_instructions = turn.user_instructions.clone(); config.shell_environment_policy = turn.shell_environment_policy.clone(); @@ -1062,8 +1068,10 @@ mod tests { #[tokio::test] async fn build_agent_spawn_config_uses_turn_context_values() { let (_session, mut turn) = make_session_and_context().await; + let base_instructions = BaseInstructions { + text: "base".to_string(), + }; turn.developer_instructions = Some("dev".to_string()); - turn.base_instructions = Some("base".to_string()); turn.compact_prompt = Some("compact".to_string()); turn.user_instructions = Some("user".to_string()); turn.shell_environment_policy = ShellEnvironmentPolicy { @@ -1076,14 +1084,14 @@ mod tests { turn.approval_policy = AskForApproval::Never; turn.sandbox_policy = SandboxPolicy::DangerFullAccess; - let config = build_agent_spawn_config(&turn).expect("spawn config"); + let config = build_agent_spawn_config(&base_instructions, &turn).expect("spawn config"); let mut expected = (*turn.client.config()).clone(); + expected.base_instructions = Some(base_instructions.text); expected.model = Some(turn.client.get_model()); expected.model_provider = turn.client.get_provider(); expected.model_reasoning_effort = turn.client.get_reasoning_effort(); expected.model_reasoning_summary = turn.client.get_reasoning_summary(); expected.developer_instructions = turn.developer_instructions.clone(); - expected.base_instructions = turn.base_instructions.clone(); expected.compact_prompt = turn.compact_prompt.clone(); expected.user_instructions = turn.user_instructions.clone(); expected.shell_environment_policy = turn.shell_environment_policy.clone(); diff --git a/codex-rs/core/src/tools/handlers/mod.rs b/codex-rs/core/src/tools/handlers/mod.rs index ab8123df1e..8b63c9567f 100644 --- a/codex-rs/core/src/tools/handlers/mod.rs +++ b/codex-rs/core/src/tools/handlers/mod.rs @@ -6,6 +6,7 @@ mod mcp; mod mcp_resource; mod plan; mod read_file; +mod request_user_input; mod shell; mod test_sync; mod unified_exec; @@ -23,6 +24,7 @@ pub use mcp::McpHandler; pub use mcp_resource::McpResourceHandler; pub use plan::PlanHandler; pub use read_file::ReadFileHandler; +pub use request_user_input::RequestUserInputHandler; pub use shell::ShellCommandHandler; pub use shell::ShellHandler; pub use test_sync::TestSyncHandler; diff --git a/codex-rs/core/src/tools/handlers/request_user_input.rs b/codex-rs/core/src/tools/handlers/request_user_input.rs new file mode 100644 index 0000000000..f63b11d235 --- /dev/null +++ b/codex-rs/core/src/tools/handlers/request_user_input.rs @@ -0,0 +1,60 @@ +use async_trait::async_trait; + +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::handlers::parse_arguments; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; +use codex_protocol::request_user_input::RequestUserInputArgs; + +pub struct RequestUserInputHandler; + +#[async_trait] +impl ToolHandler for RequestUserInputHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + session, + turn, + call_id, + payload, + .. + } = invocation; + + let arguments = match payload { + ToolPayload::Function { arguments } => arguments, + _ => { + return Err(FunctionCallError::RespondToModel( + "request_user_input handler received unsupported payload".to_string(), + )); + } + }; + + let args: RequestUserInputArgs = parse_arguments(&arguments)?; + let response = session + .request_user_input(turn.as_ref(), call_id, args) + .await + .ok_or_else(|| { + FunctionCallError::RespondToModel( + "request_user_input was cancelled before receiving a response".to_string(), + ) + })?; + + let content = serde_json::to_string(&response).map_err(|err| { + FunctionCallError::Fatal(format!( + "failed to serialize request_user_input response: {err}" + )) + })?; + + Ok(ToolOutput::Function { + content, + content_items: None, + success: Some(true), + }) + } +} diff --git a/codex-rs/core/src/tools/spec.rs b/codex-rs/core/src/tools/spec.rs index 9d272a5e6f..e330d93a03 100644 --- a/codex-rs/core/src/tools/spec.rs +++ b/codex-rs/core/src/tools/spec.rs @@ -27,6 +27,7 @@ pub(crate) struct ToolsConfig { pub apply_patch_tool_type: Option, pub web_search_mode: Option, pub collab_tools: bool, + pub collaboration_modes_tools: bool, pub experimental_supported_tools: Vec, } @@ -45,6 +46,7 @@ impl ToolsConfig { } = params; let include_apply_patch_tool = features.enabled(Feature::ApplyPatchFreeform); let include_collab_tools = features.enabled(Feature::Collab); + let include_collaboration_modes_tools = features.enabled(Feature::CollaborationModes); let shell_type = if !features.enabled(Feature::ShellTool) { ConfigShellToolType::Disabled @@ -76,6 +78,7 @@ impl ToolsConfig { apply_patch_tool_type, web_search_mode: *web_search_mode, collab_tools: include_collab_tools, + collaboration_modes_tools: include_collaboration_modes_tools, experimental_supported_tools: model_info.experimental_supported_tools.clone(), } } @@ -532,6 +535,88 @@ fn create_wait_tool() -> ToolSpec { }) } +fn create_request_user_input_tool() -> ToolSpec { + let mut option_props = BTreeMap::new(); + option_props.insert( + "label".to_string(), + JsonSchema::String { + description: Some("User-facing label (1-5 words).".to_string()), + }, + ); + option_props.insert( + "description".to_string(), + JsonSchema::String { + description: Some( + "One short sentence explaining impact/tradeoff if selected.".to_string(), + ), + }, + ); + + let options_schema = JsonSchema::Array { + description: Some( + "Optional 2-3 mutually exclusive choices. Put the recommended option first and suffix its label with \"(Recommended)\". Only include \"Other\" option if we want to include a free form option. If the question is free form in nature, please do not have any option." + .to_string(), + ), + items: Box::new(JsonSchema::Object { + properties: option_props, + required: Some(vec!["label".to_string(), "description".to_string()]), + additional_properties: Some(false.into()), + }), + }; + + let mut question_props = BTreeMap::new(); + question_props.insert( + "id".to_string(), + JsonSchema::String { + description: Some("Stable identifier for mapping answers (snake_case).".to_string()), + }, + ); + question_props.insert( + "header".to_string(), + JsonSchema::String { + description: Some( + "Short header label shown in the UI (12 or fewer chars).".to_string(), + ), + }, + ); + question_props.insert( + "question".to_string(), + JsonSchema::String { + description: Some("Single-sentence prompt shown to the user.".to_string()), + }, + ); + question_props.insert("options".to_string(), options_schema); + + let questions_schema = JsonSchema::Array { + description: Some("Questions to show the user. Prefer 1 and do not exceed 3".to_string()), + items: Box::new(JsonSchema::Object { + properties: question_props, + required: Some(vec![ + "id".to_string(), + "header".to_string(), + "question".to_string(), + ]), + additional_properties: Some(false.into()), + }), + }; + + let mut properties = BTreeMap::new(); + properties.insert("questions".to_string(), questions_schema); + + ToolSpec::Function(ResponsesApiTool { + name: "request_user_input".to_string(), + description: + "Request user input for one to three short questions and wait for the response." + .to_string(), + strict: false, + parameters: JsonSchema::Object { + properties, + required: Some(vec!["questions".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + fn create_close_agent_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( @@ -1140,6 +1225,7 @@ pub(crate) fn build_specs( use crate::tools::handlers::McpResourceHandler; use crate::tools::handlers::PlanHandler; use crate::tools::handlers::ReadFileHandler; + use crate::tools::handlers::RequestUserInputHandler; use crate::tools::handlers::ShellCommandHandler; use crate::tools::handlers::ShellHandler; use crate::tools::handlers::TestSyncHandler; @@ -1157,6 +1243,7 @@ pub(crate) fn build_specs( let mcp_handler = Arc::new(McpHandler); let mcp_resource_handler = Arc::new(McpResourceHandler); let shell_command_handler = Arc::new(ShellCommandHandler); + let request_user_input_handler = Arc::new(RequestUserInputHandler); match &config.shell_type { ConfigShellToolType::Default => { @@ -1197,6 +1284,11 @@ pub(crate) fn build_specs( builder.push_spec(PLAN_TOOL.clone()); builder.register_handler("update_plan", plan_handler); + if config.collaboration_modes_tools { + builder.push_spec(create_request_user_input_tool()); + builder.register_handler("request_user_input", request_user_input_handler); + } + if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type { match apply_patch_tool_type { ApplyPatchToolType::Freeform => { @@ -1398,6 +1490,7 @@ mod tests { let model_info = ModelsManager::construct_model_info_offline("gpt-5-codex", &config); let mut features = Features::with_defaults(); features.enable(Feature::UnifiedExec); + features.enable(Feature::CollaborationModes); let config = ToolsConfig::new(&ToolsConfigParams { model_info: &model_info, features: &features, @@ -1430,6 +1523,7 @@ mod tests { create_list_mcp_resource_templates_tool(), create_read_mcp_resource_tool(), PLAN_TOOL.clone(), + create_request_user_input_tool(), create_apply_patch_freeform_tool(), ToolSpec::WebSearch { external_web_access: Some(true), @@ -1460,6 +1554,7 @@ mod tests { let model_info = ModelsManager::construct_model_info_offline("gpt-5-codex", &config); let mut features = Features::with_defaults(); features.enable(Feature::Collab); + features.enable(Feature::CollaborationModes); let tools_config = ToolsConfig::new(&ToolsConfigParams { model_info: &model_info, features: &features, @@ -1472,6 +1567,33 @@ mod tests { ); } + #[test] + fn request_user_input_requires_collaboration_modes_feature() { + let config = test_config(); + let model_info = ModelsManager::construct_model_info_offline("gpt-5-codex", &config); + let mut features = Features::with_defaults(); + features.disable(Feature::CollaborationModes); + let tools_config = ToolsConfig::new(&ToolsConfigParams { + model_info: &model_info, + features: &features, + web_search_mode: Some(WebSearchMode::Cached), + }); + let (tools, _) = build_specs(&tools_config, None).build(); + assert!( + !tools.iter().any(|t| t.spec.name() == "request_user_input"), + "request_user_input should be disabled when collaboration_modes feature is off" + ); + + features.enable(Feature::CollaborationModes); + let tools_config = ToolsConfig::new(&ToolsConfigParams { + model_info: &model_info, + features: &features, + web_search_mode: Some(WebSearchMode::Cached), + }); + let (tools, _) = build_specs(&tools_config, None).build(); + assert_contains_tool_names(&tools, &["request_user_input"]); + } + fn assert_model_tools( model_slug: &str, features: &Features, @@ -1536,9 +1658,11 @@ mod tests { #[test] fn test_build_specs_gpt5_codex_default() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5-codex", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "shell_command", @@ -1546,6 +1670,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1555,9 +1680,11 @@ mod tests { #[test] fn test_build_specs_gpt51_codex_default() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5.1-codex", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "shell_command", @@ -1565,6 +1692,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1574,9 +1702,12 @@ mod tests { #[test] fn test_build_specs_gpt5_codex_unified_exec_web_search() { + let mut features = Features::with_defaults(); + features.enable(Feature::UnifiedExec); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5-codex", - Features::with_defaults().enable(Feature::UnifiedExec), + &features, Some(WebSearchMode::Live), &[ "exec_command", @@ -1585,6 +1716,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1594,9 +1726,12 @@ mod tests { #[test] fn test_build_specs_gpt51_codex_unified_exec_web_search() { + let mut features = Features::with_defaults(); + features.enable(Feature::UnifiedExec); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5.1-codex", - Features::with_defaults().enable(Feature::UnifiedExec), + &features, Some(WebSearchMode::Live), &[ "exec_command", @@ -1605,6 +1740,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1614,9 +1750,11 @@ mod tests { #[test] fn test_codex_mini_defaults() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "codex-mini-latest", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "local_shell", @@ -1624,6 +1762,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "web_search", "view_image", ], @@ -1632,9 +1771,11 @@ mod tests { #[test] fn test_codex_5_1_mini_defaults() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5.1-codex-mini", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "shell_command", @@ -1642,6 +1783,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1651,9 +1793,11 @@ mod tests { #[test] fn test_gpt_5_defaults() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "shell", @@ -1661,6 +1805,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "web_search", "view_image", ], @@ -1669,9 +1814,11 @@ mod tests { #[test] fn test_gpt_5_1_defaults() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "gpt-5.1", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "shell_command", @@ -1679,6 +1826,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1688,9 +1836,11 @@ mod tests { #[test] fn test_exp_5_1_defaults() { + let mut features = Features::with_defaults(); + features.enable(Feature::CollaborationModes); assert_model_tools( "exp-5.1", - &Features::with_defaults(), + &features, Some(WebSearchMode::Cached), &[ "exec_command", @@ -1699,6 +1849,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -1708,9 +1859,12 @@ mod tests { #[test] fn test_codex_mini_unified_exec_web_search() { + let mut features = Features::with_defaults(); + features.enable(Feature::UnifiedExec); + features.enable(Feature::CollaborationModes); assert_model_tools( "codex-mini-latest", - Features::with_defaults().enable(Feature::UnifiedExec), + &features, Some(WebSearchMode::Live), &[ "exec_command", @@ -1719,6 +1873,7 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "web_search", "view_image", ], diff --git a/codex-rs/core/templates/agents/orchestrator.md b/codex-rs/core/templates/agents/orchestrator.md index 19be2a1afe..09b6ea0b4e 100644 --- a/codex-rs/core/templates/agents/orchestrator.md +++ b/codex-rs/core/templates/agents/orchestrator.md @@ -14,10 +14,12 @@ You are Codex Orchestrator, based on GPT-5. You are running as an orchestration * **Never stop monitoring workers.** * **Do not rush workers. Be patient.** * The orchestrator must not return unless the task is fully accomplished. +* If the user ask you a question/status while you are working, always answer him before continuing your work. ## Worker execution semantics * While a worker is running, you cannot observe intermediate state. +* Workers are able to run commands, update/create/delete files etc. They can be considered as fully autonomous agents * Messages sent with `send_input` are queued and processed only after the worker finishes, unless interrupted. * Therefore: * Do not send messages to “check status” or “ask for progress” unless being asked. @@ -40,7 +42,7 @@ You are Codex Orchestrator, based on GPT-5. You are running as an orchestration * verify correctness, * check integration with other work, * assess whether the global task is closer to completion. -5. If issues remain, assign fixes to the appropriate worker(s) and repeat steps 3–5. +5. If issues remain, assign fixes to the appropriate worker(s) and repeat steps 3–5. Do not fix yourself unless the fixes are very small. 6. Close agents only when no further work is required from them. 7. Return to the user only when the task is fully completed and verified. diff --git a/codex-rs/core/templates/collaboration_mode/plan.md b/codex-rs/core/templates/collaboration_mode/plan.md index 70b710c420..9985ba1139 100644 --- a/codex-rs/core/templates/collaboration_mode/plan.md +++ b/codex-rs/core/templates/collaboration_mode/plan.md @@ -1,40 +1,133 @@ # Collaboration Style: Plan + You work in 2 distinct modes: + 1. Brainstorming: You collaboratively align with the user on what to do or build and how to do it or build it. -2. Writing and confirming a plan: After you've gathered all the information you write up a plan and verify it with the user. -You usually start with the planning step. Skip step 1 if the user provides you with a detailed plan or a small, unambiguous task or plan OR if the user asks you to plan by yourself. +2. Generating a plan: After you've gathered all the information you write up a plan. + You usually start with the brainstorming step. Skip step 1 if the user provides you with a detailed plan or a small, unambiguous task or plan OR if the user asks you to plan by yourself. ## Brainstorming principles + The point of brainstorming with the user is to align on what to do and how to do it. This phase is iterative and conversational. You can interact with the environment and read files if it is helpful, but be mindful of the time. You MUST follow the principles below. Think about them carefully as you work with the user. Follow the structure and tone of the examples. -*State what you think the user cares about.* Actively infer what matters most (robustness, clean abstractions, quick lovable interfaces, scalability) and reflect this back to the user to confirm. +_State what you think the user cares about._ Actively infer what matters most (robustness, clean abstractions, quick lovable interfaces, scalability) and reflect this back to the user to confirm. Example: "It seems like you might be prototyping a design for an app, and scalability or performance isn't a concern right now - is that accurate?" -*Think out loud.* Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. Avoid design lectures or exhaustive option lists. +_Think out loud._ Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. Avoid design lectures or exhaustive option lists. -*Use reasonable suggestions.* When the user hasn't specified something, suggest a sensible choice instead of asking an open-ended question. Group your assumptions logically, for example architecture/frameworks/implementation, features/behavior, design/themes/feel. Clearly label suggestions as provisional. Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. They should be easy to accept or override. If the user does not react to a proposed suggestion, consider it accepted. +_Use reasonable suggestions._ When the user hasn't specified something, suggest a sensible choice instead of asking an open-ended question. Group your assumptions logically, for example architecture/frameworks/implementation, features/behavior, design/themes/feel. Clearly label suggestions as provisional. Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. They should be easy to accept or override. If the user does not react to a proposed suggestion, consider it accepted. Example: "There are a few viable ways to structure this. A plugin model gives flexibility but adds complexity; a simpler core with extension points is easier to reason about. Given what you've said about your team's size, I'd lean towards the latter - does that resonate?" Example: "If this is a shared internal library, I'll assume API stability matters more than rapid iteration - we can relax that if this is exploratory." -*Ask fewer, better questions.* Prefer making a concrete proposal with stated assumptions over asking questions. Only ask questions when different reasonable suggestions would materially change the plan, you cannot safely proceed, or if you think the user would really want to give input directly. Never ask a question if you already provided a suggestion. +_Ask fewer, better questions._ Prefer making a concrete proposal with stated assumptions over asking questions. Only ask questions when different reasonable suggestions would materially change the plan, you cannot safely proceed, or if you think the user would really want to give input directly. Never ask a question if you already provided a suggestion. You can use `request_user_input` tool to ask questions. -*Think ahead.* What else might the user need? How will the user test and understand what you did? Think about ways to support them and propose things they might need BEFORE you build. Offer at least one suggestion you came up with by thinking ahead. +_Think ahead._ What else might the user need? How will the user test and understand what you did? Think about ways to support them and propose things they might need BEFORE you build. Offer at least one suggestion you came up with by thinking ahead. Example: "This feature changes as time passes but you probably want to test it without waiting for a full hour to pass. Would you like a debug mode where you can move through states without just waiting?" -*Be mindful of time.* The user is right here with you. Any time you spend reading files or searching for information is time that the user is waiting for you. Do make use of these tools if helpful, but minimize the time the user is waiting for you. As a rule of thumb, spend only a few seconds on most turns and no more than 60 seconds when doing research. If you are missing information and think you need to do longer research, ask the user whether they want you to research, or want to give you a tip. +_Be mindful of time._ The user is right here with you. Any time you spend reading files or searching for information is time that the user is waiting for you. Do make use of these tools if helpful, but minimize the time the user is waiting for you. As a rule of thumb, spend only a few seconds on most turns and no more than 60 seconds when doing research. If you are missing information and think you need to do longer research, ask the user whether they want you to research, or want to give you a tip. Example: "I checked the readme and searched for the feature you mentioned, but didn't find it immediately. If it's ok, I'll go and spend a bit more time exploring the code base?" +## Using `request_user_input` in Plan Mode + +Use `request_user_input` only when you are genuinely blocked on a decision that materially changes the plan (requirements, trade-offs, rollout or risk posture).The maximum number of `request_user_input` tool calls should be **5**. + +Only include an "Other" option when a free-form answer is truly useful. If the question is purely free-form, leave `options` unset entirely. + +Do **not** use `request_user_input` to ask "is my plan ready?" or "should I proceed?". + +### Examples (technical, schema-populated) + +**1 Boolean (yes/no), no free-form** + +```json +{ + "questions": [ + { + "id": "enable_migration", + "header": "Migrate", + "question": "Enable the database migration in this release?", + "options": [ + { + "label": "Yes (Recommended)", + "description": "Ship the migration with this rollout." + }, + { + "label": "No", + "description": "Defer the migration to a later release." + } + ] + } + ] +} +``` + +**2 Choice with free-form** + +```json +{ + "questions": [ + { + "id": "cache_strategy", + "header": "Cache", + "question": "Which cache strategy should we implement?", + "options": [ + { + "label": "Write-through (Recommended)", + "description": "Simpler consistency with predictable latency." + }, + { + "label": "Write-back", + "description": "Lower write latency but higher complexity." + }, + { + "label": "Other", + "description": "Provide a custom strategy or constraints." + } + ] + } + ] +} +``` + +**3 Free-form only (no options)** + +```json +{ + "questions": [ + { + "id": "rollout_constraints", + "header": "Rollout", + "question": "Any rollout constraints or compliance requirements we must follow?" + } + ] +} +``` + ## Iterating on the plan -Only AFTER you have all the information, write up the full plan. + +Only AFTER you have all the information, write up the full plan. A well written and informative plan should be as detailed as a design doc or PRD and reflect your discussion with the user, at minimum that's one full page! If handed to a different agent, the agent would know exactly what to build without asking questions and arrive at a similar implementation to yours. At minimum it should include: + - tools and frameworks you use, any dependencies you need to install - functions, files, or directories you're likely going to edit +- QUestions that were asked and the responses from users - architecture if the code changes are significant - if developing features, describe the features you are going to build in detail like a PM in a PRD - if you are developing a frontend, describe the design in detail +- include a list of todos in markdown format if needed. Please do not include a **plan** step given that we are planning here already -`plan.md`: For long, detailed plans, it makes sense to write them in a separate file. If the changes are substantial and the plan is longer than a full page, ask the user if it's ok to write the plan in `plan.md`. If plan.md is used, ALWAYS update the file rather than outputting the plan in your final answer. +### Output schema - — MUST MATCH _exactly_ -ALWAYS confirm the plan with the user before ending. If the user requests changes or additions to the plan update the plan. Iterate until the user confirms the plan. +When you present the plan, format the final response as a JSON object with a single key, `plan`, whose value is the full plan text. + +Example: + +```json +{ + "plan": "Title: Schema migration rollout\n\n1. Validate the current schema on staging...\n2. Add the new columns with nullable defaults...\n3. Backfill in batches with feature-flagged writes...\n4. Flip reads to the new fields and monitor...\n5. Remove legacy columns after one full release cycle..." +} +``` + +PLEASE DO NOT confirm the plan with the user before ending. The user will be responsible for telling us to update, iterate or execute the plan. diff --git a/codex-rs/core/tests/suite/client_websockets.rs b/codex-rs/core/tests/suite/client_websockets.rs index 9d39fb2402..1532ac74d1 100644 --- a/codex-rs/core/tests/suite/client_websockets.rs +++ b/codex-rs/core/tests/suite/client_websockets.rs @@ -15,10 +15,12 @@ use codex_otel::OtelManager; use codex_protocol::ThreadId; use codex_protocol::config_types::ReasoningSummary; use core_test_support::load_default_config_for_test; +use core_test_support::responses::WebSocketConnectionConfig; use core_test_support::responses::WebSocketTestServer; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_response_created; use core_test_support::responses::start_websocket_server; +use core_test_support::responses::start_websocket_server_with_headers; use core_test_support::skip_if_no_network; use futures::StreamExt; use pretty_assertions::assert_eq; @@ -60,6 +62,40 @@ async fn responses_websocket_streams_request() { server.shutdown().await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn responses_websocket_emits_reasoning_included_event() { + skip_if_no_network!(); + + let server = start_websocket_server_with_headers(vec![WebSocketConnectionConfig { + requests: vec![vec![ev_response_created("resp-1"), ev_completed("resp-1")]], + response_headers: vec![("X-Reasoning-Included".to_string(), "true".to_string())], + }]) + .await; + + let harness = websocket_harness(&server).await; + let mut session = harness.client.new_session(); + let prompt = prompt_with_input(vec![message_item("hello")]); + + let mut stream = session + .stream(&prompt) + .await + .expect("websocket stream failed"); + + let mut saw_reasoning_included = false; + while let Some(event) = stream.next().await { + match event.expect("event") { + ResponseEvent::ServerReasoningIncluded(true) => { + saw_reasoning_included = true; + } + ResponseEvent::Completed { .. } => break, + _ => {} + } + } + + assert!(saw_reasoning_included); + server.shutdown().await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn responses_websocket_appends_on_prefix() { skip_if_no_network!(); diff --git a/codex-rs/core/tests/suite/compact.rs b/codex-rs/core/tests/suite/compact.rs index d033c66644..b06f5ef14b 100644 --- a/codex-rs/core/tests/suite/compact.rs +++ b/codex-rs/core/tests/suite/compact.rs @@ -32,11 +32,13 @@ use core_test_support::responses::ev_completed; use core_test_support::responses::ev_completed_with_tokens; use core_test_support::responses::ev_function_call; use core_test_support::responses::mount_compact_json_once; +use core_test_support::responses::mount_response_sequence; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_once_match; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::sse_failed; +use core_test_support::responses::sse_response; use core_test_support::responses::start_mock_server; use pretty_assertions::assert_eq; use serde_json::json; @@ -2147,3 +2149,85 @@ async fn auto_compact_counts_encrypted_reasoning_before_last_user() { "third turn should include compaction summary item" ); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn auto_compact_runs_when_reasoning_header_clears_between_turns() { + skip_if_no_network!(); + + let server = start_mock_server().await; + + let first_user = "SERVER_INCLUDED_FIRST"; + let second_user = "SERVER_INCLUDED_SECOND"; + let third_user = "SERVER_INCLUDED_THIRD"; + + let pre_last_reasoning_content = "a".repeat(2_400); + let post_last_reasoning_content = "b".repeat(4_000); + + let first_turn = sse(vec![ + ev_reasoning_item("pre-reasoning", &["pre"], &[&pre_last_reasoning_content]), + ev_completed_with_tokens("r1", 10), + ]); + let second_turn = sse(vec![ + ev_reasoning_item("post-reasoning", &["post"], &[&post_last_reasoning_content]), + ev_completed_with_tokens("r2", 80), + ]); + let third_turn = sse(vec![ + ev_assistant_message("m4", FINAL_REPLY), + ev_completed_with_tokens("r4", 1), + ]); + + let responses = vec![ + sse_response(first_turn).insert_header("X-Reasoning-Included", "true"), + sse_response(second_turn), + sse_response(third_turn), + ]; + mount_response_sequence(&server, responses).await; + + let compacted_history = vec![ + codex_protocol::models::ResponseItem::Message { + id: None, + role: "assistant".to_string(), + content: vec![codex_protocol::models::ContentItem::OutputText { + text: "REMOTE_COMPACT_SUMMARY".to_string(), + }], + }, + codex_protocol::models::ResponseItem::Compaction { + encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(), + }, + ]; + let compact_mock = + mount_compact_json_once(&server, serde_json::json!({ "output": compacted_history })).await; + + let codex = test_codex() + .with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing()) + .with_config(|config| { + set_test_compact_prompt(config); + config.model_auto_compact_token_limit = Some(300); + config.features.enable(Feature::RemoteCompaction); + }) + .build(&server) + .await + .expect("build codex") + .codex; + + for user in [first_user, second_user, third_user] { + codex + .submit(Op::UserInput { + items: vec![UserInput::Text { + text: user.into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + }) + .await + .unwrap(); + wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await; + } + + let compact_requests = compact_mock.requests(); + assert_eq!( + compact_requests.len(), + 1, + "remote compaction should run once after the reasoning header clears" + ); +} diff --git a/codex-rs/core/tests/suite/mod.rs b/codex-rs/core/tests/suite/mod.rs index b8066db387..5cdc440c53 100644 --- a/codex-rs/core/tests/suite/mod.rs +++ b/codex-rs/core/tests/suite/mod.rs @@ -53,6 +53,7 @@ mod quota_exceeded; mod read_file; mod remote_models; mod request_compression; +mod request_user_input; mod resume; mod resume_warning; mod review; diff --git a/codex-rs/core/tests/suite/model_tools.rs b/codex-rs/core/tests/suite/model_tools.rs index 8a4d0a7717..b08b5281c8 100644 --- a/codex-rs/core/tests/suite/model_tools.rs +++ b/codex-rs/core/tests/suite/model_tools.rs @@ -1,5 +1,6 @@ #![allow(clippy::unwrap_used)] +use codex_core::features::Feature; use codex_protocol::config_types::WebSearchMode; use core_test_support::load_sse_fixture_with_id; use core_test_support::responses; @@ -36,7 +37,10 @@ async fn collect_tool_identifiers_for_model(model: &str) -> Vec { let mut builder = test_codex() .with_model(model) // Keep tool expectations stable when the default web_search mode changes. - .with_config(|config| config.web_search_mode = Some(WebSearchMode::Cached)); + .with_config(|config| { + config.web_search_mode = Some(WebSearchMode::Cached); + config.features.enable(Feature::CollaborationModes); + }); let test = builder .build(&server) .await @@ -62,6 +66,7 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "request_user_input".to_string(), "web_search".to_string(), "view_image".to_string() ], @@ -77,6 +82,7 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "request_user_input".to_string(), "apply_patch".to_string(), "web_search".to_string(), "view_image".to_string() @@ -93,6 +99,7 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "request_user_input".to_string(), "apply_patch".to_string(), "web_search".to_string(), "view_image".to_string() @@ -109,6 +116,7 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "request_user_input".to_string(), "web_search".to_string(), "view_image".to_string() ], @@ -124,6 +132,7 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "request_user_input".to_string(), "apply_patch".to_string(), "web_search".to_string(), "view_image".to_string() @@ -140,6 +149,7 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "request_user_input".to_string(), "apply_patch".to_string(), "web_search".to_string(), "view_image".to_string() diff --git a/codex-rs/core/tests/suite/prompt_caching.rs b/codex-rs/core/tests/suite/prompt_caching.rs index 4e87a43640..4b04364120 100644 --- a/codex-rs/core/tests/suite/prompt_caching.rs +++ b/codex-rs/core/tests/suite/prompt_caching.rs @@ -92,6 +92,7 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> { config.model = Some("gpt-5.1-codex-max".to_string()); // Keep tool expectations stable when the default web_search mode changes. config.web_search_mode = Some(WebSearchMode::Cached); + config.features.enable(Feature::CollaborationModes); }) .build(&server) .await?; @@ -135,6 +136,7 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "request_user_input", "apply_patch", "web_search", "view_image", @@ -176,6 +178,7 @@ async fn codex_mini_latest_tools() -> anyhow::Result<()> { .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); config.features.disable(Feature::ApplyPatchFreeform); + config.features.enable(Feature::CollaborationModes); config.model = Some("codex-mini-latest".to_string()); }) .build(&server) @@ -240,6 +243,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests let TestCodex { codex, config, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); + config.features.enable(Feature::CollaborationModes); }) .build(&server) .await?; @@ -316,6 +320,7 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); + config.features.enable(Feature::CollaborationModes); }) .build(&server) .await?; @@ -538,6 +543,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); + config.features.enable(Feature::CollaborationModes); }) .build(&server) .await?; @@ -645,6 +651,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); + config.features.enable(Feature::CollaborationModes); }) .build(&server) .await?; @@ -742,6 +749,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); + config.features.enable(Feature::CollaborationModes); }) .build(&server) .await?; diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index e7123c4305..8a6beb6e24 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -388,9 +388,10 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> { wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + let base_model_info = models_manager.get_model_info("gpt-5.1", &config).await; let body = response_mock.single_request().body_json(); let instructions = body["instructions"].as_str().unwrap(); - assert_eq!(instructions, remote_base); + assert_eq!(instructions, base_model_info.base_instructions); Ok(()) } diff --git a/codex-rs/core/tests/suite/request_user_input.rs b/codex-rs/core/tests/suite/request_user_input.rs new file mode 100644 index 0000000000..3c4405eeb3 --- /dev/null +++ b/codex-rs/core/tests/suite/request_user_input.rs @@ -0,0 +1,155 @@ +#![allow(clippy::unwrap_used)] + +use std::collections::HashMap; + +use codex_core::features::Feature; +use codex_core::protocol::AskForApproval; +use codex_core::protocol::EventMsg; +use codex_core::protocol::Op; +use codex_core::protocol::SandboxPolicy; +use codex_protocol::config_types::ReasoningSummary; +use codex_protocol::request_user_input::RequestUserInputAnswer; +use codex_protocol::request_user_input::RequestUserInputResponse; +use codex_protocol::user_input::UserInput; +use core_test_support::responses; +use core_test_support::responses::ResponsesRequest; +use core_test_support::responses::ev_assistant_message; +use core_test_support::responses::ev_completed; +use core_test_support::responses::ev_function_call; +use core_test_support::responses::ev_response_created; +use core_test_support::responses::sse; +use core_test_support::responses::start_mock_server; +use core_test_support::skip_if_no_network; +use core_test_support::test_codex::TestCodex; +use core_test_support::test_codex::test_codex; +use core_test_support::wait_for_event; +use core_test_support::wait_for_event_match; +use pretty_assertions::assert_eq; +use serde_json::Value; +use serde_json::json; + +fn call_output(req: &ResponsesRequest, call_id: &str) -> String { + let raw = req.function_call_output(call_id); + assert_eq!( + raw.get("call_id").and_then(Value::as_str), + Some(call_id), + "mismatched call_id in function_call_output" + ); + let (content_opt, _success) = match req.function_call_output_content_and_success(call_id) { + Some(values) => values, + None => panic!("function_call_output present"), + }; + match content_opt { + Some(content) => content, + None => panic!("function_call_output content present"), + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn request_user_input_round_trip_resolves_pending() -> anyhow::Result<()> { + skip_if_no_network!(Ok(())); + + let server = start_mock_server().await; + + let builder = test_codex(); + let TestCodex { + codex, + cwd, + session_configured, + .. + } = builder + .with_config(|config| { + config.features.enable(Feature::CollaborationModes); + }) + .build(&server) + .await?; + + let call_id = "user-input-call"; + let request_args = json!({ + "questions": [{ + "id": "confirm_path", + "header": "Confirm", + "question": "Proceed with the plan?", + "options": [{ + "label": "Yes (Recommended)", + "description": "Continue the current plan." + }, { + "label": "No", + "description": "Stop and revisit the approach." + }] + }] + }) + .to_string(); + + let first_response = sse(vec![ + ev_response_created("resp-1"), + ev_function_call(call_id, "request_user_input", &request_args), + ev_completed("resp-1"), + ]); + responses::mount_sse_once(&server, first_response).await; + + let second_response = sse(vec![ + ev_assistant_message("msg-1", "thanks"), + ev_completed("resp-2"), + ]); + let second_mock = responses::mount_sse_once(&server, second_response).await; + + let session_model = session_configured.model.clone(); + + codex + .submit(Op::UserTurn { + items: vec![UserInput::Text { + text: "please confirm".into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + cwd: cwd.path().to_path_buf(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::DangerFullAccess, + model: session_model, + effort: None, + summary: ReasoningSummary::Auto, + collaboration_mode: None, + }) + .await?; + + let request = wait_for_event_match(&codex, |event| match event { + EventMsg::RequestUserInput(request) => Some(request.clone()), + _ => None, + }) + .await; + assert_eq!(request.call_id, call_id); + assert_eq!(request.questions.len(), 1); + + let mut answers = HashMap::new(); + answers.insert( + "confirm_path".to_string(), + RequestUserInputAnswer { + selected: vec!["yes".to_string()], + other: None, + }, + ); + let response = RequestUserInputResponse { answers }; + codex + .submit(Op::UserInputAnswer { + id: request.turn_id.clone(), + response, + }) + .await?; + + wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + + let req = second_mock.single_request(); + let output_text = call_output(&req, call_id); + let output_json: Value = serde_json::from_str(&output_text)?; + assert_eq!( + output_json, + json!({ + "answers": { + "confirm_path": { "selected": ["yes"], "other": Value::Null } + } + }) + ); + + Ok(()) +} diff --git a/codex-rs/core/tests/suite/resume.rs b/codex-rs/core/tests/suite/resume.rs index 8912870e5e..47b67cfe38 100644 --- a/codex-rs/core/tests/suite/resume.rs +++ b/codex-rs/core/tests/suite/resume.rs @@ -131,3 +131,78 @@ async fn resume_includes_initial_messages_from_reasoning_events() -> Result<()> Ok(()) } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn resume_switches_models_preserves_base_instructions() -> Result<()> { + skip_if_no_network!(Ok(())); + + let server = start_mock_server().await; + let mut builder = test_codex().with_config(|config| { + config.model = Some("gpt-5.2".to_string()); + }); + let initial = builder.build(&server).await?; + let codex = Arc::clone(&initial.codex); + let home = initial.home.clone(); + let rollout_path = initial.session_configured.rollout_path.clone(); + + let initial_sse = sse(vec![ + ev_response_created("resp-initial"), + ev_assistant_message("msg-1", "Completed first turn"), + ev_completed("resp-initial"), + ]); + let initial_mock = mount_sse_once(&server, initial_sse).await; + + codex + .submit(Op::UserInput { + items: vec![UserInput::Text { + text: "Record initial instructions".into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + }) + .await?; + wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + + let initial_body = initial_mock.single_request().body_json(); + let initial_instructions = initial_body + .get("instructions") + .and_then(|v| v.as_str()) + .unwrap_or_default() + .to_string(); + + let resumed_sse = sse(vec![ + ev_response_created("resp-resume"), + ev_assistant_message("msg-2", "Resumed turn"), + ev_completed("resp-resume"), + ]); + let resumed_mock = mount_sse_once(&server, resumed_sse).await; + + let mut resume_builder = test_codex().with_config(|config| { + config.model = Some("gpt-5.2-codex".to_string()); + }); + let resumed = resume_builder.resume(&server, home, rollout_path).await?; + resumed + .codex + .submit(Op::UserInput { + items: vec![UserInput::Text { + text: "Resume with different model".into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + }) + .await?; + wait_for_event(&resumed.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; + + let resumed_body = resumed_mock.single_request().body_json(); + let resumed_instructions = resumed_body + .get("instructions") + .and_then(|v| v.as_str()) + .unwrap_or_default() + .to_string(); + assert_eq!(resumed_instructions, initial_instructions); + + Ok(()) +} diff --git a/codex-rs/core/tests/suite/resume_warning.rs b/codex-rs/core/tests/suite/resume_warning.rs index df0534bf40..8c3ecf7afe 100644 --- a/codex-rs/core/tests/suite/resume_warning.rs +++ b/codex-rs/core/tests/suite/resume_warning.rs @@ -28,7 +28,6 @@ fn resume_history( model: previous_model.to_string(), effort: config.model_reasoning_effort, summary: config.model_reasoning_summary, - base_instructions: None, user_instructions: None, developer_instructions: None, final_output_json_schema: None, diff --git a/codex-rs/core/tests/suite/tool_harness.rs b/codex-rs/core/tests/suite/tool_harness.rs index 8f5646fc30..d9bdf83ed1 100644 --- a/codex-rs/core/tests/suite/tool_harness.rs +++ b/codex-rs/core/tests/suite/tool_harness.rs @@ -28,7 +28,6 @@ use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use serde_json::Value; use serde_json::json; - fn call_output(req: &ResponsesRequest, call_id: &str) -> (String, Option) { let raw = req.function_call_output(call_id); assert_eq!( diff --git a/codex-rs/debug-client/src/client.rs b/codex-rs/debug-client/src/client.rs index 377dc02a8d..673a2a8fbc 100644 --- a/codex-rs/debug-client/src/client.rs +++ b/codex-rs/debug-client/src/client.rs @@ -187,7 +187,7 @@ impl AppServerClient { thread_id: thread_id.to_string(), input: vec![UserInput::Text { text, - // Plain text conversion has no UI element ranges. + // Debug client sends plain text with no UI markup spans. text_elements: Vec::new(), }], ..Default::default() diff --git a/codex-rs/docs/protocol_v1.md b/codex-rs/docs/protocol_v1.md index 0e4e1ddde3..5133f6f75f 100644 --- a/codex-rs/docs/protocol_v1.md +++ b/codex-rs/docs/protocol_v1.md @@ -1,4 +1,4 @@ -Overview of Protocol Defined in [protocol.rs](../core/src/protocol.rs) and [agent.rs](../core/src/agent.rs). +Overview of Protocol defined in [protocol.rs](../protocol/src/protocol.rs) and [agent.rs](../core/src/agent.rs). The goal of this document is to define terminology used in the system and explain the expected behavior of the system. @@ -23,11 +23,11 @@ These are entities exit on the codex backend. The intent of this section is to e 3. `Task` - A `Task` is `Codex` executing work in response to user input. - `Session` has at most one `Task` running at a time. - - Receiving `Op::UserInput` starts a `Task` + - Receiving `Op::UserTurn` starts a `Task` (`Op::UserInput` is legacy) - Consists of a series of `Turn`s - The `Task` executes to until: - The `Model` completes the task and there is no output to feed into an additional `Turn` - - Additional `Op::UserInput` aborts the current task and starts a new one + - Additional user-turn input aborts the current task and starts a new one - UI interrupts with `Op::Interrupt` - Fatal errors are encountered, eg. `Model` connection exceeding retry limits - Blocked by user approval (executing a command or patch) @@ -42,7 +42,7 @@ These are entities exit on the codex backend. The intent of this section is to e The term "UI" is used to refer to the application driving `Codex`. This may be the CLI / TUI chat-like interface that users operate, or it may be a GUI interface like a VSCode extension. The UI is external to `Codex`, as `Codex` is intended to be operated by arbitrary UI implementations. -When a `Turn` completes, the `response_id` from the `Model`'s final `response.completed` message is stored in the `Session` state to resume the thread given the next `Op::UserInput`. The `response_id` is also returned in the `EventMsg::TurnComplete` to the UI, which can be used to fork the thread from an earlier point by providing it in the `Op::UserInput`. +When a `Turn` completes, the `response_id` from the `Model`'s final `response.completed` message is stored in the `Session` state to resume the thread given the next user turn. The `response_id` is also returned in the `EventMsg::TurnComplete` to the UI, which can be used to fork the thread from an earlier point by providing it in a future user turn. Since only 1 `Task` can be run at a time, for parallel tasks it is recommended that a single `Codex` be run for each thread of work. @@ -57,21 +57,24 @@ Since only 1 `Task` can be run at a time, for parallel tasks it is recommended t - This enum is `non_exhaustive`; variants can be added at future dates - `Event` - These are messages sent on the `EQ` (`Codex` -> UI) - - Each `Event` has a non-unique ID, matching the `sub_id` from the `Op::UserInput` that started the current task. + - Each `Event` has a non-unique ID, matching the `sub_id` from the user-turn op that started the current task. - `EventMsg` refers to the enum of all possible `Event` payloads - This enum is `non_exhaustive`; variants can be added at future dates - It should be expected that new `EventMsg` variants will be added over time to expose more detailed information about the model's actions. -For complete documentation of the `Op` and `EventMsg` variants, refer to [protocol.rs](../core/src/protocol.rs). Some example payload types: +For complete documentation of the `Op` and `EventMsg` variants, refer to [protocol.rs](../protocol/src/protocol.rs). Some example payload types: - `Op` - - `Op::UserInput` – Any input from the user to kick off a `Turn` + - `Op::UserTurn` – Any input from the user to kick off a `Turn` + - `Op::UserInput` – Legacy form of user input - `Op::Interrupt` – Interrupts a running turn - `Op::ExecApproval` – Approve or deny code execution + - `Op::UserInputAnswer` – Provide answers for a `request_user_input` tool call - `Op::ListSkills` – Request skills for one or more cwd values (optionally `force_reload`) - `EventMsg` - `EventMsg::AgentMessage` – Messages from the `Model` - `EventMsg::ExecApprovalRequest` – Request approval from user to execute a command + - `EventMsg::RequestUserInput` – Request user input for a tool call - `EventMsg::TurnComplete` – A turn completed successfully - `EventMsg::Error` – A turn stopped with an error - `EventMsg::Warning` – A non-fatal warning that the client should surface to the user @@ -112,7 +115,7 @@ sequenceDiagram user->>codex: Op::ConfigureSession codex-->>session: create session codex->>user: Event::SessionConfigured - user->>session: Op::UserInput + user->>session: Op::UserTurn session-->>+task: start task task->>user: Event::TurnStarted task->>agent: prompt @@ -150,7 +153,7 @@ sequenceDiagram box Rest API participant agent as Model end - user->>session: Op::UserInput + user->>session: Op::UserTurn session-->>+task1: start task task1->>user: Event::TurnStarted task1->>agent: prompt @@ -162,7 +165,7 @@ sequenceDiagram task1->>task1: exec (auto-approved) user->>task1: Op::Interrupt task1->>-user: Event::Error("interrupted") - user->>session: Op::UserInput w/ last_response_id + user->>session: Op::UserTurn w/ response bookmark session-->>+task2: start task task2->>user: Event::TurnStarted task2->>agent: prompt + Task1 last_response_id diff --git a/codex-rs/exec/src/event_processor_with_human_output.rs b/codex-rs/exec/src/event_processor_with_human_output.rs index 42a83e44d1..c4a5c27d9c 100644 --- a/codex-rs/exec/src/event_processor_with_human_output.rs +++ b/codex-rs/exec/src/event_processor_with_human_output.rs @@ -606,7 +606,8 @@ impl EventProcessor for EventProcessorWithHumanOutput { | EventMsg::SkillsUpdateAvailable | EventMsg::UndoCompleted(_) | EventMsg::UndoStarted(_) - | EventMsg::ThreadRolledBack(_) => {} + | EventMsg::ThreadRolledBack(_) + | EventMsg::RequestUserInput(_) => {} } CodexStatus::Running } diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index a76510ed95..a7629a107f 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -360,6 +360,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any .collect(); items.push(UserInput::Text { text: prompt_text.clone(), + // CLI input doesn't track UI element ranges, so none are available here. text_elements: Vec::new(), }); let output_schema = load_output_schema(output_schema_path.clone()); @@ -379,6 +380,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any .collect(); items.push(UserInput::Text { text: prompt_text.clone(), + // CLI input doesn't track UI element ranges, so none are available here. text_elements: Vec::new(), }); let output_schema = load_output_schema(output_schema_path); diff --git a/codex-rs/mcp-server/src/codex_tool_runner.rs b/codex-rs/mcp-server/src/codex_tool_runner.rs index 531bf90d23..b400b4cc79 100644 --- a/codex-rs/mcp-server/src/codex_tool_runner.rs +++ b/codex-rs/mcp-server/src/codex_tool_runner.rs @@ -122,6 +122,7 @@ pub async fn run_codex_tool_session( op: Op::UserInput { items: vec![UserInput::Text { text: initial_prompt.clone(), + // MCP tool prompts are plain text with no UI element ranges. text_elements: Vec::new(), }], final_output_json_schema: None, @@ -167,7 +168,7 @@ pub async fn run_codex_tool_session_reply( .submit(Op::UserInput { items: vec![UserInput::Text { text: prompt, - // Plain text conversion has no UI element ranges. + // MCP tool prompts are plain text with no UI element ranges. text_elements: Vec::new(), }], final_output_json_schema: None, @@ -358,6 +359,7 @@ async fn run_codex_tool_session_inner( | EventMsg::UndoStarted(_) | EventMsg::UndoCompleted(_) | EventMsg::ExitedReviewMode(_) + | EventMsg::RequestUserInput(_) | EventMsg::ContextCompacted(_) | EventMsg::ThreadRolledBack(_) | EventMsg::CollabAgentSpawnBegin(_) diff --git a/codex-rs/mcp-server/tests/common/mcp_process.rs b/codex-rs/mcp-server/tests/common/mcp_process.rs index 7e447533d0..9a3f076fb1 100644 --- a/codex-rs/mcp-server/tests/common/mcp_process.rs +++ b/codex-rs/mcp-server/tests/common/mcp_process.rs @@ -137,8 +137,10 @@ impl McpProcess { let initialized = self.read_jsonrpc_message().await?; let os_info = os_info::get(); + let build_version = env!("CARGO_PKG_VERSION"); + let originator = codex_core::default_client::originator().value; let user_agent = format!( - "codex_cli_rs/0.0.0 ({} {}; {}) {} (elicitation test; 0.0.0)", + "{originator}/{build_version} ({} {}; {}) {} (elicitation test; 0.0.0)", os_info.os_type(), os_info.version(), os_info.architecture().unwrap_or("unknown"), diff --git a/codex-rs/otel/src/lib.rs b/codex-rs/otel/src/lib.rs index 868a977735..fa7cc93bb7 100644 --- a/codex-rs/otel/src/lib.rs +++ b/codex-rs/otel/src/lib.rs @@ -9,7 +9,7 @@ use crate::metrics::MetricsClient; use crate::metrics::MetricsConfig; use crate::metrics::MetricsError; use crate::metrics::Result as MetricsResult; -use crate::metrics::timer::Timer; +pub use crate::metrics::timer::Timer; use crate::metrics::validation::validate_tag_key; use crate::metrics::validation::validate_tag_value; use crate::otel_provider::OtelProvider; diff --git a/codex-rs/otel/src/metrics/timer.rs b/codex-rs/otel/src/metrics/timer.rs index b1624fda16..6c85f413b3 100644 --- a/codex-rs/otel/src/metrics/timer.rs +++ b/codex-rs/otel/src/metrics/timer.rs @@ -2,6 +2,7 @@ use crate::metrics::MetricsClient; use crate::metrics::error::Result; use std::time::Instant; +#[derive(Debug)] pub struct Timer { name: String, tags: Vec<(String, String)>, diff --git a/codex-rs/otel/src/traces/otel_manager.rs b/codex-rs/otel/src/traces/otel_manager.rs index a1aaf0b513..0847fd882a 100644 --- a/codex-rs/otel/src/traces/otel_manager.rs +++ b/codex-rs/otel/src/traces/otel_manager.rs @@ -484,6 +484,7 @@ impl OtelManager { ResponseEvent::ReasoningSummaryPartAdded { .. } => { "reasoning_summary_part_added".into() } + ResponseEvent::ServerReasoningIncluded(_) => "server_reasoning_included".into(), ResponseEvent::RateLimits(_) => "rate_limits".into(), ResponseEvent::ModelsEtag(_) => "models_etag".into(), } diff --git a/codex-rs/protocol/BUILD.bazel b/codex-rs/protocol/BUILD.bazel index 2c00d076bc..e47bc8c16b 100644 --- a/codex-rs/protocol/BUILD.bazel +++ b/codex-rs/protocol/BUILD.bazel @@ -3,5 +3,5 @@ load("//:defs.bzl", "codex_rust_crate") codex_rust_crate( name = "protocol", crate_name = "codex_protocol", - compile_data = glob(["src/prompts/permissions/**/*.md"]), + compile_data = glob(["src/prompts/**/*.md"]), ) diff --git a/codex-rs/protocol/src/lib.rs b/codex-rs/protocol/src/lib.rs index 513743c97f..69d09da089 100644 --- a/codex-rs/protocol/src/lib.rs +++ b/codex-rs/protocol/src/lib.rs @@ -1,7 +1,5 @@ pub mod account; mod thread_id; -#[allow(deprecated)] -pub use thread_id::ConversationId; pub use thread_id::ThreadId; pub mod approvals; pub mod config_types; @@ -14,4 +12,5 @@ pub mod openai_models; pub mod parse_command; pub mod plan_tool; pub mod protocol; +pub mod request_user_input; pub mod user_input; diff --git a/codex-rs/protocol/src/models.rs b/codex-rs/protocol/src/models.rs index 8f8c9b3077..1b22fb925f 100644 --- a/codex-rs/protocol/src/models.rs +++ b/codex-rs/protocol/src/models.rs @@ -167,6 +167,23 @@ pub enum ResponseItem { Other, } +pub const BASE_INSTRUCTIONS_DEFAULT: &str = include_str!("prompts/base_instructions/default.md"); + +/// Base instructions for the model in a thread. Corresponds to the `instructions` field in the ResponsesAPI. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, JsonSchema, TS)] +#[serde(rename = "base_instructions", rename_all = "snake_case")] +pub struct BaseInstructions { + pub text: String, +} + +impl Default for BaseInstructions { + fn default() -> Self { + Self { + text: BASE_INSTRUCTIONS_DEFAULT.to_string(), + } + } +} + /// Developer-provided guidance that is injected into a turn as a developer role /// message. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, JsonSchema, TS)] diff --git a/codex-rs/protocol/src/prompts/base_instructions/default.md b/codex-rs/protocol/src/prompts/base_instructions/default.md new file mode 100644 index 0000000000..4886c7ef44 --- /dev/null +++ b/codex-rs/protocol/src/prompts/base_instructions/default.md @@ -0,0 +1,275 @@ +You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful. + +Your capabilities: + +- Receive user prompts and other context provided by the harness, such as files in the workspace. +- Communicate with the user by streaming thinking & responses, and by making & updating plans. +- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section. + +Within this context, Codex refers to the open-source agentic coding interface (not the old Codex language model built by OpenAI). + +# How you work + +## Personality + +Your default personality and tone is concise, direct, and friendly. You communicate efficiently, always keeping the user clearly informed about ongoing actions without unnecessary detail. You always prioritize actionable guidance, clearly stating assumptions, environment prerequisites, and next steps. Unless explicitly asked, you avoid excessively verbose explanations about your work. + +# AGENTS.md spec +- Repos often contain AGENTS.md files. These files can appear anywhere within the repository. +- These files are a way for humans to give you (the agent) instructions or tips for working within the container. +- Some examples might be: coding conventions, info about how code is organized, or instructions for how to run or test code. +- Instructions in AGENTS.md files: + - The scope of an AGENTS.md file is the entire directory tree rooted at the folder that contains it. + - For every file you touch in the final patch, you must obey instructions in any AGENTS.md file whose scope includes that file. + - Instructions about code style, structure, naming, etc. apply only to code within the AGENTS.md file's scope, unless the file states otherwise. + - More-deeply-nested AGENTS.md files take precedence in the case of conflicting instructions. + - Direct system/developer/user instructions (as part of a prompt) take precedence over AGENTS.md instructions. +- The contents of the AGENTS.md file at the root of the repo and any directories from the CWD up to the root are included with the developer message and don't need to be re-read. When working in a subdirectory of CWD, or a directory outside the CWD, check for any AGENTS.md files that may be applicable. + +## Responsiveness + +### Preamble messages + +Before making tool calls, send a brief preamble to the user explaining what you’re about to do. When sending preamble messages, follow these principles and examples: + +- **Logically group related actions**: if you’re about to run several related commands, describe them together in one preamble rather than sending a separate note for each. +- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words for quick updates). +- **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions. +- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging. +- **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action. + +**Examples:** + +- “I’ve explored the repo; now checking the API route definitions.” +- “Next, I’ll patch the config and update the related tests.” +- “I’m about to scaffold the CLI commands and helper functions.” +- “Ok cool, so I’ve wrapped my head around the repo. Now digging into the API routes.” +- “Config’s looking tidy. Next up is patching helpers to keep things in sync.” +- “Finished poking at the DB gateway. I will now chase down error handling.” +- “Alright, build pipeline order is interesting. Checking how it reports failures.” +- “Spotted a clever caching util; now hunting where it gets used.” + +## Planning + +You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go. + +Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately. + +Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step. + +Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so. + +Use a plan when: + +- The task is non-trivial and will require multiple actions over a long time horizon. +- There are logical phases or dependencies where sequencing matters. +- The work has ambiguity that benefits from outlining high-level goals. +- You want intermediate checkpoints for feedback and validation. +- When the user asked you to do more than one thing in a single prompt +- The user has asked you to use the plan tool (aka "TODOs") +- You generate additional steps while working, and plan to do them before yielding to the user + +### Examples + +**High-quality plans** + +Example 1: + +1. Add CLI entry with file args +2. Parse Markdown via CommonMark library +3. Apply semantic HTML template +4. Handle code blocks, images, links +5. Add error handling for invalid files + +Example 2: + +1. Define CSS variables for colors +2. Add toggle with localStorage state +3. Refactor components to use variables +4. Verify all views for readability +5. Add smooth theme-change transition + +Example 3: + +1. Set up Node.js + WebSocket server +2. Add join/leave broadcast events +3. Implement messaging with timestamps +4. Add usernames + mention highlighting +5. Persist messages in lightweight DB +6. Add typing indicators + unread count + +**Low-quality plans** + +Example 1: + +1. Create CLI tool +2. Add Markdown parser +3. Convert to HTML + +Example 2: + +1. Add dark mode toggle +2. Save preference +3. Make styles look good + +Example 3: + +1. Create single-file HTML game +2. Run quick sanity check +3. Summarize usage instructions + +If you need to write a plan, only write high quality plans, not low quality ones. + +## Task execution + +You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer. + +You MUST adhere to the following criteria when solving queries: + +- Working on the repo(s) in the current environment is allowed, even if they are proprietary. +- Analyzing code for vulnerabilities is allowed. +- Showing user code and tool call details is allowed. +- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]} + +If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines: + +- Fix the problem at the root cause rather than applying surface-level patches, when possible. +- Avoid unneeded complexity in your solution. +- Do not attempt to fix unrelated bugs or broken tests. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) +- Update documentation as necessary. +- Keep changes consistent with the style of the existing codebase. Changes should be minimal and focused on the task. +- Use `git log` and `git blame` to search the history of the codebase if additional context is required. +- NEVER add copyright or license headers unless specifically requested. +- Do not waste tokens by re-reading files after calling `apply_patch` on them. The tool call will fail if it didn't work. The same goes for making folders, deleting folders, etc. +- Do not `git commit` your changes or create new git branches unless explicitly requested. +- Do not add inline comments within code unless explicitly requested. +- Do not use one-letter variable names unless explicitly requested. +- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor. + +## Validating your work + +If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete. + +When testing, your philosophy should be to start as specific as possible to the code you changed so that you can catch issues efficiently, then make your way to broader tests as you build confidence. If there's no test for the code you changed, and if the adjacent patterns in the codebases show that there's a logical place for you to add a test, you may do so. However, do not add tests to codebases with no tests. + +Similarly, once you're confident in correctness, you can suggest or use formatting commands to ensure that your code is well formatted. If there are issues you can iterate up to 3 times to get formatting right, but if you still can't manage it's better to save the user time and present them a correct solution where you call out the formatting in your final message. If the codebase does not have a formatter configured, do not add one. + +For all of testing, running, building, and formatting, do not attempt to fix unrelated bugs. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) + +Be mindful of whether to run validation commands proactively. In the absence of behavioral guidance: + +- When running in non-interactive approval modes like **never** or **on-failure**, proactively run tests, lint and do whatever you need to ensure you've completed the task. +- When working in interactive approval modes like **untrusted**, or **on-request**, hold off on running tests or lint commands until the user is ready for you to finalize your output, because these commands take time to run and slow down iteration. Instead suggest what you want to do next, and let the user confirm first. +- When working on test-related tasks, such as adding tests, fixing tests, or reproducing a bug to verify behavior, you may proactively run tests regardless of approval mode. Use your judgement to decide whether this is a test-related task. + +## Ambition vs. precision + +For tasks that have no prior context (i.e. the user is starting something brand new), you should feel free to be ambitious and demonstrate creativity with your implementation. + +If you're operating in an existing codebase, you should make sure you do exactly what the user asks with surgical precision. Treat the surrounding codebase with respect, and don't overstep (i.e. changing filenames or variables unnecessarily). You should balance being sufficiently ambitious and proactive when completing tasks of this nature. + +You should use judicious initiative to decide on the right level of detail and complexity to deliver based on the user's needs. This means showing good judgment that you're capable of doing the right extras without gold-plating. This might be demonstrated by high-value, creative touches when scope of the task is vague; while being surgical and targeted when scope is tightly specified. + +## Sharing progress updates + +For especially longer tasks that you work on (i.e. requiring many tool calls, or a plan with multiple steps), you should provide progress updates back to the user at reasonable intervals. These updates should be structured as a concise sentence or two (no more than 8-10 words long) recapping progress so far in plain language: this update demonstrates your understanding of what needs to be done, progress so far (i.e. files explores, subtasks complete), and where you're going next. + +Before doing large chunks of work that may incur latency as experienced by the user (i.e. writing a new file), you should send a concise message to the user with an update indicating what you're about to do to ensure they know what you're spending time on. Don't start editing or writing large files before informing the user what you are doing and why. + +The messages you send before tool calls should describe what is immediately about to be done next in very concise language. If there was previous work done, this preamble message should also include a note about the work done so far to bring the user along. + +## Presenting your work and final message + +Your final message should read naturally, like an update from a concise teammate. For casual conversation, brainstorming tasks, or quick questions from the user, respond in a friendly, conversational tone. You should ask questions, suggest ideas, and adapt to the user’s style. If you've finished a large amount of work, when describing what you've done to the user, you should follow the final answer formatting guidelines to communicate substantive changes. You don't need to add structured formatting for one-word answers, greetings, or purely conversational exchanges. + +You can skip heavy formatting for single, simple actions or confirmations. In these cases, respond in plain sentences with any relevant next step or quick option. Reserve multi-section structured responses for results that need grouping or explanation. + +The user is working on the same computer as you, and has access to your work. As such there's no need to show the full contents of large files you have already written unless the user explicitly asks for them. Similarly, if you've created or modified files using `apply_patch`, there's no need to tell users to "save the file" or "copy the code into a file"—just reference the file path. + +If there's something that you think you could help with as a logical next step, concisely ask the user if they want you to do so. Good examples of this are running tests, committing changes, or building out the next logical component. If there’s something that you couldn't do (even with approval) but that the user might want to do (such as verifying changes by running the app), include those instructions succinctly. + +Brevity is very important as a default. You should be very concise (i.e. no more than 10 lines), but can relax this requirement for tasks where additional detail and comprehensiveness is important for the user's understanding. + +### Final answer structure and style guidelines + +You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. + +**Section Headers** + +- Use only when they improve clarity — they are not mandatory for every answer. +- Choose descriptive names that fit the content +- Keep headers short (1–3 words) and in `**Title Case**`. Always start headers with `**` and end with `**` +- Leave no blank line before the first bullet under a header. +- Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer. + +**Bullets** + +- Use `-` followed by a space for every bullet. +- Merge related points when possible; avoid a bullet for every trivial detail. +- Keep bullets to one line unless breaking for clarity is unavoidable. +- Group into short lists (4–6 bullets) ordered by importance. +- Use consistent keyword phrasing and formatting across sections. + +**Monospace** + +- Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``). +- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command. +- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``). + +**File References** +When referencing files in your response, make sure to include the relevant start line and always follow the below rules: + * Use inline code to make file paths clickable. + * Each reference should have a stand alone path. Even if it's the same file. + * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. + * Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1). + * Do not use URIs like file://, vscode://, or https://. + * Do not provide range of lines + * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 + +**Structure** + +- Place related bullets together; don’t mix unrelated concepts in the same section. +- Order sections from general → specific → supporting info. +- For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it. +- Match structure to complexity: + - Multi-part or detailed results → use clear headers and grouped bullets. + - Simple results → minimal headers, possibly just a short list or paragraph. + +**Tone** + +- Keep the voice collaborative and natural, like a coding partner handing off work. +- Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition +- Use present tense and active voice (e.g., “Runs tests” not “This will run tests”). +- Keep descriptions self-contained; don’t refer to “above” or “below”. +- Use parallel structure in lists for consistency. + +**Don’t** + +- Don’t use literal words “bold” or “monospace” in the content. +- Don’t nest bullets or create deep hierarchies. +- Don’t output ANSI escape codes directly — the CLI renderer applies them. +- Don’t cram unrelated keywords into a single bullet; split for clarity. +- Don’t let keyword lists run long — wrap or reformat for scanability. + +Generally, ensure your final answers adapt their shape and depth to the request. For example, answers to code explanations should have a precise, structured explanation with code references that answer the question directly. For tasks with a simple implementation, lead with the outcome and supplement only with what’s needed for clarity. Larger changes can be presented as a logical walkthrough of your approach, grouping related steps, explaining rationale where it adds value, and highlighting next actions to accelerate the user. Your answers should provide the right level of detail while being easily scannable. + +For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting. + +# Tool Guidelines + +## Shell commands + +When using the shell, you must adhere to the following guidelines: + +- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) +- Do not use python scripts to attempt to output larger chunks of a file. + +## `update_plan` + +A tool named `update_plan` is available to you. You can use it to keep an up‑to‑date, step‑by‑step plan for the task. + +To create a new plan, call `update_plan` with a short list of 1‑sentence steps (no more than 5-7 words each) with a `status` for each step (`pending`, `in_progress`, or `completed`). + +When steps have been completed, use `update_plan` to mark each finished step as `completed` and the next step you are working on as `in_progress`. There should always be exactly one `in_progress` step until everything is done. You can mark multiple items as complete in a single `update_plan` call. + +If all steps are complete, ensure you call `update_plan` to mark all steps as `completed`. diff --git a/codex-rs/protocol/src/prompts/collaboration_mode/execute.md b/codex-rs/protocol/src/prompts/collaboration_mode/execute.md deleted file mode 100644 index c57878242d..0000000000 --- a/codex-rs/protocol/src/prompts/collaboration_mode/execute.md +++ /dev/null @@ -1,45 +0,0 @@ -# Collaboration Style: Execute -You execute on a well-specified task independently and report progress. - -You do not collaborate on decisions in this mode. You execute end-to-end. -You make reasonable assumptions when the user hasn't specified something, and you proceed without asking questions. - -## Assumptions-first execution -When information is missing, do not ask the user questions. -Instead: -- Make a sensible assumption. -- Clearly state the assumption in the final message (briefly). -- Continue executing. - -Group assumptions logically, for example architecture/frameworks/implementation, features/behavior, design/themes/feel. -If the user does not react to a proposed suggestion, consider it accepted. - -## Execution principles -*Think out loud.* Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. Avoid design lectures or exhaustive option lists. - -*Use reasonable assumptions.* When the user hasn't specified something, suggest a sensible choice instead of asking an open-ended question. Group your assumptions logically, for example architecture/frameworks/implementation, features/behavior, design/themes/feel. Clearly label suggestions as provisional. Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. They should be easy to accept or override. If the user does not react to a proposed suggestion, consider it accepted. - -Example: "There are a few viable ways to structure this. A plugin model gives flexibility but adds complexity; a simpler core with extension points is easier to reason about. Given what you've said about your team's size, I'd lean towards the latter." -Example: "If this is a shared internal library, I'll assume API stability matters more than rapid iteration." - -*Think ahead.* What else might the user need? How will the user test and understand what you did? Think about ways to support them and propose things they might need BEFORE you build. Offer at least one suggestion you came up with by thinking ahead. -Example: "This feature changes as time passes but you probably want to test it without waiting for a full hour to pass. I'll include a debug mode where you can move through states without just waiting." - -*Be mindful of time.* The user is right here with you. Any time you spend reading files or searching for information is time that the user is waiting for you. Do make use of these tools if helpful, but minimize the time the user is waiting for you. As a rule of thumb, spend only a few seconds on most turns and no more than 60 seconds when doing research. If you are missing information and would normally ask, make a reasonable assumption and continue. -Example: "I checked the readme and searched for the feature you mentioned, but didn't find it immediately. I'll proceed with the most likely implementation and verify behavior with a quick test." - -## Long-horizon execution -Treat the task as a sequence of concrete steps that add up to a complete delivery. -- Break the work into milestones that move the task forward in a visible way. -- Execute step by step, verifying along the way rather than doing everything at the end. -- If the task is large, keep a running checklist of what is done, what is next, and what is blocked. -- Avoid blocking on uncertainty: choose a reasonable default and continue. - -## Reporting progress -In this phase you show progress on your task and appraise the user of your progress using plan tool. -- Provide updates that directly map to the work you are doing (what changed, what you verified, what remains). -- If something fails, report what failed, what you tried, and what you will do next. -- When you finish, summarize what you delivered and how the user can validate it. - -## Executing -Once you start working, you should execute independently. Your job is to deliver the task and report progress. diff --git a/codex-rs/protocol/src/prompts/collaboration_mode/pair_programming.md b/codex-rs/protocol/src/prompts/collaboration_mode/pair_programming.md deleted file mode 100644 index 1297129b1d..0000000000 --- a/codex-rs/protocol/src/prompts/collaboration_mode/pair_programming.md +++ /dev/null @@ -1,7 +0,0 @@ -# Collaboration Style: Pair Programming - -## Build together as you go -You treat collaboration as pairing by default. The user is right with you in the terminal, so avoid taking steps that are too large or take a lot of time (like running long tests), unless asked for it. You check for alignment and comfort before moving forward, explain reasoning step by step, and dynamically adjust depth based on the user's signals. There is no need to ask multiple rounds of questions—build as you go. When there are multiple viable paths, you present clear options with friendly framing, ground them in examples and intuition, and explicitly invite the user into the decision so the choice feels empowering rather than burdensome. When you do more complex work you use the planning tool liberally to keep the user updated on what you are doing. - -## Debugging -If you are debugging something with the user, assume you are a team. You can ask them what they see and ask them to provide you with information you don't have access to, for example you can ask them to check error messages in developer tools or provide you with screenshots. diff --git a/codex-rs/protocol/src/prompts/collaboration_mode/plan.md b/codex-rs/protocol/src/prompts/collaboration_mode/plan.md deleted file mode 100644 index 70b710c420..0000000000 --- a/codex-rs/protocol/src/prompts/collaboration_mode/plan.md +++ /dev/null @@ -1,40 +0,0 @@ -# Collaboration Style: Plan -You work in 2 distinct modes: -1. Brainstorming: You collaboratively align with the user on what to do or build and how to do it or build it. -2. Writing and confirming a plan: After you've gathered all the information you write up a plan and verify it with the user. -You usually start with the planning step. Skip step 1 if the user provides you with a detailed plan or a small, unambiguous task or plan OR if the user asks you to plan by yourself. - -## Brainstorming principles -The point of brainstorming with the user is to align on what to do and how to do it. This phase is iterative and conversational. You can interact with the environment and read files if it is helpful, but be mindful of the time. -You MUST follow the principles below. Think about them carefully as you work with the user. Follow the structure and tone of the examples. - -*State what you think the user cares about.* Actively infer what matters most (robustness, clean abstractions, quick lovable interfaces, scalability) and reflect this back to the user to confirm. -Example: "It seems like you might be prototyping a design for an app, and scalability or performance isn't a concern right now - is that accurate?" - -*Think out loud.* Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. Avoid design lectures or exhaustive option lists. - -*Use reasonable suggestions.* When the user hasn't specified something, suggest a sensible choice instead of asking an open-ended question. Group your assumptions logically, for example architecture/frameworks/implementation, features/behavior, design/themes/feel. Clearly label suggestions as provisional. Share reasoning when it helps the user evaluate tradeoffs. Keep explanations short and grounded in consequences. They should be easy to accept or override. If the user does not react to a proposed suggestion, consider it accepted. - -Example: "There are a few viable ways to structure this. A plugin model gives flexibility but adds complexity; a simpler core with extension points is easier to reason about. Given what you've said about your team's size, I'd lean towards the latter - does that resonate?" -Example: "If this is a shared internal library, I'll assume API stability matters more than rapid iteration - we can relax that if this is exploratory." - -*Ask fewer, better questions.* Prefer making a concrete proposal with stated assumptions over asking questions. Only ask questions when different reasonable suggestions would materially change the plan, you cannot safely proceed, or if you think the user would really want to give input directly. Never ask a question if you already provided a suggestion. - -*Think ahead.* What else might the user need? How will the user test and understand what you did? Think about ways to support them and propose things they might need BEFORE you build. Offer at least one suggestion you came up with by thinking ahead. -Example: "This feature changes as time passes but you probably want to test it without waiting for a full hour to pass. Would you like a debug mode where you can move through states without just waiting?" - -*Be mindful of time.* The user is right here with you. Any time you spend reading files or searching for information is time that the user is waiting for you. Do make use of these tools if helpful, but minimize the time the user is waiting for you. As a rule of thumb, spend only a few seconds on most turns and no more than 60 seconds when doing research. If you are missing information and think you need to do longer research, ask the user whether they want you to research, or want to give you a tip. -Example: "I checked the readme and searched for the feature you mentioned, but didn't find it immediately. If it's ok, I'll go and spend a bit more time exploring the code base?" - -## Iterating on the plan -Only AFTER you have all the information, write up the full plan. -A well written and informative plan should be as detailed as a design doc or PRD and reflect your discussion with the user, at minimum that's one full page! If handed to a different agent, the agent would know exactly what to build without asking questions and arrive at a similar implementation to yours. At minimum it should include: -- tools and frameworks you use, any dependencies you need to install -- functions, files, or directories you're likely going to edit -- architecture if the code changes are significant -- if developing features, describe the features you are going to build in detail like a PM in a PRD -- if you are developing a frontend, describe the design in detail - -`plan.md`: For long, detailed plans, it makes sense to write them in a separate file. If the changes are substantial and the plan is longer than a full page, ask the user if it's ok to write the plan in `plan.md`. If plan.md is used, ALWAYS update the file rather than outputting the plan in your final answer. - -ALWAYS confirm the plan with the user before ending. If the user requests changes or additions to the plan update the plan. Iterate until the user confirms the plan. diff --git a/codex-rs/protocol/src/protocol.rs b/codex-rs/protocol/src/protocol.rs index 01e6736b11..1b1df42511 100644 --- a/codex-rs/protocol/src/protocol.rs +++ b/codex-rs/protocol/src/protocol.rs @@ -18,12 +18,14 @@ use crate::config_types::ReasoningSummary as ReasoningSummaryConfig; use crate::custom_prompts::CustomPrompt; use crate::items::TurnItem; use crate::message_history::HistoryEntry; +use crate::models::BaseInstructions; use crate::models::ContentItem; use crate::models::ResponseItem; use crate::num_format::format_with_separators; use crate::openai_models::ReasoningEffort as ReasoningEffortConfig; use crate::parse_command::ParsedCommand; use crate::plan_tool::UpdatePlanArgs; +use crate::request_user_input::RequestUserInputResponse; use crate::user_input::UserInput; use codex_utils_absolute_path::AbsolutePathBuf; use mcp_types::CallToolResult; @@ -44,6 +46,7 @@ pub use crate::approvals::ApplyPatchApprovalRequestEvent; pub use crate::approvals::ElicitationAction; pub use crate::approvals::ExecApprovalRequestEvent; pub use crate::approvals::ExecPolicyAmendment; +pub use crate::request_user_input::RequestUserInputEvent; /// Open/close tags for special user-input blocks. Used across crates to avoid /// duplicated hardcoded strings. @@ -81,7 +84,10 @@ pub enum Op { /// This server sends [`EventMsg::TurnAborted`] in response. Interrupt, - /// Input from the user + /// Legacy user input. + /// + /// Prefer [`Op::UserTurn`] so the caller provides full turn context + /// (cwd/approval/sandbox/model/etc.) for each turn. UserInput { /// User input items, see `InputItem` items: Vec, @@ -129,7 +135,8 @@ pub enum Op { /// /// All fields are optional; when omitted, the existing value is preserved. /// This does not enqueue any input – it only updates defaults used for - /// future `UserInput` turns. + /// turns that rely on persistent session-level context (for example, + /// [`Op::UserInput`]). OverrideTurnContext { /// Updated `cwd` for sandbox/tool calls. #[serde(skip_serializing_if = "Option::is_none")] @@ -191,6 +198,15 @@ pub enum Op { decision: ElicitationAction, }, + /// Resolve a request_user_input tool call. + #[serde(rename = "user_input_answer", alias = "request_user_input_response")] + UserInputAnswer { + /// Turn id for the in-flight request. + id: String, + /// User-provided answers. + response: RequestUserInputResponse, + }, + /// Append an entry to the persistent cross-session message history. /// /// Note the entry is not guaranteed to be logged if the user has @@ -723,6 +739,8 @@ pub enum EventMsg { ExecApprovalRequest(ExecApprovalRequestEvent), + RequestUserInput(RequestUserInputEvent), + ElicitationRequest(ElicitationRequestEvent), ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent), @@ -1437,6 +1455,23 @@ impl InitialHistory { ), } } + + pub fn get_base_instructions(&self) -> Option { + // TODO: SessionMeta should (in theory) always be first in the history, so we can probably only check the first item? + match self { + InitialHistory::New => None, + InitialHistory::Resumed(resumed) => { + resumed.history.iter().find_map(|item| match item { + RolloutItem::SessionMeta(meta_line) => meta_line.meta.base_instructions.clone(), + _ => None, + }) + } + InitialHistory::Forked(items) => items.iter().find_map(|item| match item { + RolloutItem::SessionMeta(meta_line) => meta_line.meta.base_instructions.clone(), + _ => None, + }), + } + } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, TS, Default)] @@ -1485,6 +1520,11 @@ impl fmt::Display for SubAgentSource { } } +/// SessionMeta contains session-level data that doesn't correspond to a specific turn. +/// +/// NOTE: There used to be an `instructions` field here, which stored user_instructions, but we +/// now save that on TurnContext. base_instructions stores the base instructions for the session, +/// and should be used when there is no config override. #[derive(Serialize, Deserialize, Clone, Debug, JsonSchema, TS)] pub struct SessionMeta { pub id: ThreadId, @@ -1497,6 +1537,10 @@ pub struct SessionMeta { #[serde(default)] pub source: SessionSource, pub model_provider: Option, + /// base_instructions for the session. This *should* always be present when creating a new session, + /// but may be missing for older sessions. If not present, fall back to rendering the base_instructions + /// from ModelsManager. + pub base_instructions: Option, } impl Default for SessionMeta { @@ -1510,6 +1554,7 @@ impl Default for SessionMeta { cli_version: String::new(), source: SessionSource::default(), model_provider: None, + base_instructions: None, } } } @@ -1561,8 +1606,6 @@ pub struct TurnContextItem { pub effort: Option, pub summary: ReasoningSummaryConfig, #[serde(skip_serializing_if = "Option::is_none")] - pub base_instructions: Option, - #[serde(skip_serializing_if = "Option::is_none")] pub user_instructions: Option, #[serde(skip_serializing_if = "Option::is_none")] pub developer_instructions: Option, diff --git a/codex-rs/protocol/src/request_user_input.rs b/codex-rs/protocol/src/request_user_input.rs new file mode 100644 index 0000000000..44ad3b6302 --- /dev/null +++ b/codex-rs/protocol/src/request_user_input.rs @@ -0,0 +1,48 @@ +use std::collections::HashMap; + +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use ts_rs::TS; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +pub struct RequestUserInputQuestionOption { + pub label: String, + pub description: String, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +pub struct RequestUserInputQuestion { + pub id: String, + pub header: String, + pub question: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub options: Option>, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +pub struct RequestUserInputArgs { + pub questions: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +pub struct RequestUserInputAnswer { + pub selected: Vec, + pub other: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +pub struct RequestUserInputResponse { + pub answers: HashMap, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +pub struct RequestUserInputEvent { + /// Responses API call id for the associated tool call, if available. + pub call_id: String, + /// Turn ID that this request belongs to. + /// Uses `#[serde(default)]` for backwards compatibility. + #[serde(default)] + pub turn_id: String, + pub questions: Vec, +} diff --git a/codex-rs/protocol/src/thread_id.rs b/codex-rs/protocol/src/thread_id.rs index 8589566a25..7b27db8364 100644 --- a/codex-rs/protocol/src/thread_id.rs +++ b/codex-rs/protocol/src/thread_id.rs @@ -70,10 +70,6 @@ impl JsonSchema for ThreadId { } } -/// Backward-compatible alias for the previous name. -#[deprecated(note = "use ThreadId instead")] -pub type ConversationId = ThreadId; - #[cfg(test)] mod tests { use super::*; diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index b256ccd200..e33e934029 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -365,6 +365,26 @@ pub(crate) struct App { } impl App { + pub fn chatwidget_init_for_forked_or_resumed_thread( + &self, + tui: &mut tui::Tui, + cfg: codex_core::config::Config, + ) -> crate::chatwidget::ChatWidgetInit { + crate::chatwidget::ChatWidgetInit { + config: cfg, + frame_requester: tui.frame_requester(), + app_event_tx: self.app_event_tx.clone(), + // Fork/resume bootstraps here don't carry any prefilled message content. + initial_user_message: None, + enhanced_keys_supported: self.enhanced_keys_supported, + auth_manager: self.auth_manager.clone(), + models_manager: self.server.get_models_manager(), + feedback: self.feedback.clone(), + is_first_run: false, + model: Some(self.current_model.clone()), + } + } + async fn shutdown_current_thread(&mut self) { if let Some(thread_id) = self.chat_widget.thread_id() { // Clear any in-flight rollback guard when switching threads. @@ -428,8 +448,12 @@ impl App { config: config.clone(), frame_requester: tui.frame_requester(), app_event_tx: app_event_tx.clone(), - initial_prompt: initial_prompt.clone(), - initial_images: initial_images.clone(), + initial_user_message: crate::chatwidget::create_initial_user_message( + initial_prompt.clone(), + initial_images.clone(), + // CLI prompt args are plain strings, so they don't provide element ranges. + Vec::new(), + ), enhanced_keys_supported, auth_manager: auth_manager.clone(), models_manager: thread_manager.get_models_manager(), @@ -451,8 +475,12 @@ impl App { config: config.clone(), frame_requester: tui.frame_requester(), app_event_tx: app_event_tx.clone(), - initial_prompt: initial_prompt.clone(), - initial_images: initial_images.clone(), + initial_user_message: crate::chatwidget::create_initial_user_message( + initial_prompt.clone(), + initial_images.clone(), + // CLI prompt args are plain strings, so they don't provide element ranges. + Vec::new(), + ), enhanced_keys_supported, auth_manager: auth_manager.clone(), models_manager: thread_manager.get_models_manager(), @@ -474,8 +502,12 @@ impl App { config: config.clone(), frame_requester: tui.frame_requester(), app_event_tx: app_event_tx.clone(), - initial_prompt: initial_prompt.clone(), - initial_images: initial_images.clone(), + initial_user_message: crate::chatwidget::create_initial_user_message( + initial_prompt.clone(), + initial_images.clone(), + // CLI prompt args are plain strings, so they don't provide element ranges. + Vec::new(), + ), enhanced_keys_supported, auth_manager: auth_manager.clone(), models_manager: thread_manager.get_models_manager(), @@ -672,12 +704,15 @@ impl App { let summary = session_summary(self.chat_widget.token_usage(), self.chat_widget.thread_id()); self.shutdown_current_thread().await; + if let Err(err) = self.server.remove_and_close_all_threads().await { + tracing::warn!(error = %err, "failed to close all threads"); + } let init = crate::chatwidget::ChatWidgetInit { config: self.config.clone(), frame_requester: tui.frame_requester(), app_event_tx: self.app_event_tx.clone(), - initial_prompt: None, - initial_images: Vec::new(), + // New sessions start without prefilled message content. + initial_user_message: None, enhanced_keys_supported: self.enhanced_keys_supported, auth_manager: self.auth_manager.clone(), models_manager: self.server.get_models_manager(), @@ -722,19 +757,10 @@ impl App { { Ok(resumed) => { self.shutdown_current_thread().await; - let init = crate::chatwidget::ChatWidgetInit { - config: self.config.clone(), - frame_requester: tui.frame_requester(), - app_event_tx: self.app_event_tx.clone(), - initial_prompt: None, - initial_images: Vec::new(), - enhanced_keys_supported: self.enhanced_keys_supported, - auth_manager: self.auth_manager.clone(), - models_manager: self.server.get_models_manager(), - feedback: self.feedback.clone(), - is_first_run: false, - model: Some(self.current_model.clone()), - }; + let init = self.chatwidget_init_for_forked_or_resumed_thread( + tui, + self.config.clone(), + ); self.chat_widget = ChatWidget::new_from_existing( init, resumed.thread, @@ -781,19 +807,10 @@ impl App { { Ok(forked) => { self.shutdown_current_thread().await; - let init = crate::chatwidget::ChatWidgetInit { - config: self.config.clone(), - frame_requester: tui.frame_requester(), - app_event_tx: self.app_event_tx.clone(), - initial_prompt: None, - initial_images: Vec::new(), - enhanced_keys_supported: self.enhanced_keys_supported, - auth_manager: self.auth_manager.clone(), - models_manager: self.server.get_models_manager(), - feedback: self.feedback.clone(), - is_first_run: false, - model: Some(self.current_model.clone()), - }; + let init = self.chatwidget_init_for_forked_or_resumed_thread( + tui, + self.config.clone(), + ); self.chat_widget = ChatWidget::new_from_existing( init, forked.thread, @@ -1999,6 +2016,8 @@ mod tests { let user_cell = |text: &str| -> Arc { Arc::new(UserHistoryCell { message: text.to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc }; let agent_cell = |text: &str| -> Arc { diff --git a/codex-rs/tui/src/app_backtrack.rs b/codex-rs/tui/src/app_backtrack.rs index 80fc65ca77..9a77b6bed7 100644 --- a/codex-rs/tui/src/app_backtrack.rs +++ b/codex-rs/tui/src/app_backtrack.rs @@ -204,7 +204,10 @@ impl App { }); self.chat_widget.submit_op(Op::ThreadRollback { num_turns }); if !prefill.is_empty() { - self.chat_widget.set_composer_text(prefill); + // TODO: Rehydrate text_elements/local_image_paths from the selected user cell so + // backtrack preserves image placeholders and attachments. + self.chat_widget + .set_composer_text(prefill, Vec::new(), Vec::new()); } } @@ -554,6 +557,8 @@ mod tests { let mut cells: Vec> = vec![ Arc::new(UserHistoryCell { message: "first user".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("assistant")], true)) as Arc, @@ -570,6 +575,8 @@ mod tests { as Arc, Arc::new(UserHistoryCell { message: "first".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("after")], false)) as Arc, @@ -598,11 +605,15 @@ mod tests { as Arc, Arc::new(UserHistoryCell { message: "first".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("between")], false)) as Arc, Arc::new(UserHistoryCell { message: "second".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("tail")], false)) as Arc, diff --git a/codex-rs/tui/src/bottom_pane/chat_composer.rs b/codex-rs/tui/src/bottom_pane/chat_composer.rs index 343403704e..55b40a9a06 100644 --- a/codex-rs/tui/src/bottom_pane/chat_composer.rs +++ b/codex-rs/tui/src/bottom_pane/chat_composer.rs @@ -69,7 +69,6 @@ use ratatui::layout::Constraint; use ratatui::layout::Layout; use ratatui::layout::Margin; use ratatui::layout::Rect; -use ratatui::style::Style; use ratatui::style::Stylize; use ratatui::text::Line; use ratatui::text::Span; @@ -80,12 +79,15 @@ use ratatui::widgets::WidgetRef; use super::chat_composer_history::ChatComposerHistory; use super::command_popup::CommandItem; use super::command_popup::CommandPopup; +use super::command_popup::CommandPopupFlags; use super::file_search_popup::FileSearchPopup; use super::footer::FooterMode; use super::footer::FooterProps; use super::footer::esc_hint_mode; use super::footer::footer_height; +use super::footer::inset_footer_hint_area; use super::footer::render_footer; +use super::footer::render_footer_hint_items; use super::footer::reset_mode_after_activity; use super::footer::toggle_shortcut_mode; use super::paste_burst::CharDecision; @@ -108,9 +110,12 @@ use codex_common::fuzzy_match::fuzzy_match; use codex_protocol::custom_prompts::CustomPrompt; use codex_protocol::custom_prompts::PROMPTS_CMD_PREFIX; use codex_protocol::models::local_image_label_text; +use codex_protocol::user_input::ByteRange; +use codex_protocol::user_input::TextElement; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; +use crate::bottom_pane::LocalImageAttachment; use crate::bottom_pane::textarea::TextArea; use crate::bottom_pane::textarea::TextAreaState; use crate::clipboard_paste::normalize_pasted_path; @@ -122,6 +127,7 @@ use codex_file_search::FileMatch; use std::cell::RefCell; use std::collections::HashMap; use std::collections::HashSet; +use std::collections::VecDeque; use std::path::PathBuf; use std::time::Duration; use std::time::Instant; @@ -140,8 +146,14 @@ const LARGE_PASTE_CHAR_THRESHOLD: usize = 1000; /// Result returned when the user interacts with the text area. #[derive(Debug, PartialEq)] pub enum InputResult { - Submitted(String), - Queued(String), + Submitted { + text: String, + text_elements: Vec, + }, + Queued { + text: String, + text_elements: Vec, + }, Command(SlashCommand), CommandWithArgs(SlashCommand, String), None, @@ -191,12 +203,20 @@ pub(crate) struct ChatComposer { custom_prompts: Vec, footer_mode: FooterMode, footer_hint_override: Option>, + footer_flash: Option, context_window_percent: Option, context_window_used_tokens: Option, skills: Option>, dismissed_skill_popup_token: Option, /// When enabled, `Enter` submits immediately and `Tab` requests queuing behavior. steer_enabled: bool, + collaboration_modes_enabled: bool, +} + +#[derive(Clone, Debug)] +struct FooterFlash { + line: Line<'static>, + expires_at: Instant, } /// Popup state – at most one can be visible at any time. @@ -244,11 +264,13 @@ impl ChatComposer { custom_prompts: Vec::new(), footer_mode: FooterMode::ShortcutSummary, footer_hint_override: None, + footer_flash: None, context_window_percent: None, context_window_used_tokens: None, skills: None, dismissed_skill_popup_token: None, steer_enabled: false, + collaboration_modes_enabled: false, }; // Apply configuration via the setter to keep side-effects centralized. this.set_disable_paste_burst(disable_paste_burst); @@ -269,6 +291,10 @@ impl ChatComposer { self.steer_enabled = enabled; } + pub fn set_collaboration_modes_enabled(&mut self, enabled: bool) { + self.collaboration_modes_enabled = enabled; + } + fn layout_areas(&self, area: Rect) -> [Rect; 3] { let footer_props = self.footer_props(); let footer_hint_height = self @@ -323,7 +349,8 @@ impl ChatComposer { let Some(text) = self.history.on_entry_response(log_id, offset, entry) else { return false; }; - self.set_text_content(text); + // Composer history (↑/↓) stores plain text only; no UI element ranges/attachments to restore here. + self.set_text_content(text, Vec::new(), Vec::new()); true } @@ -346,6 +373,7 @@ impl ChatComposer { /// In all cases, clears any paste-burst Enter suppression state so a real paste cannot affect /// the next user Enter key, then syncs popup state. pub fn handle_paste(&mut self, pasted: String) -> bool { + let pasted = pasted.replace("\r\n", "\n").replace('\r', "\n"); let char_count = pasted.chars().count(); if char_count > LARGE_PASTE_CHAR_THRESHOLD { let placeholder = self.next_large_paste_placeholder(char_count); @@ -446,7 +474,7 @@ impl ChatComposer { self.attached_images = kept_images; // Rebuild textarea so placeholders become elements again. - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); let mut remaining: HashMap<&str, usize> = HashMap::new(); for img in &self.attached_images { *remaining.entry(img.placeholder.as_str()).or_insert(0) += 1; @@ -499,13 +527,51 @@ impl ChatComposer { self.footer_hint_override = items; } + pub(crate) fn show_footer_flash(&mut self, line: Line<'static>, duration: Duration) { + let expires_at = Instant::now() + .checked_add(duration) + .unwrap_or_else(Instant::now); + self.footer_flash = Some(FooterFlash { line, expires_at }); + } + + pub(crate) fn footer_flash_visible(&self) -> bool { + self.footer_flash + .as_ref() + .is_some_and(|flash| Instant::now() < flash.expires_at) + } + /// Replace the entire composer content with `text` and reset cursor. - pub(crate) fn set_text_content(&mut self, text: String) { + /// This clears any pending paste payloads. + pub(crate) fn set_text_content( + &mut self, + text: String, + text_elements: Vec, + local_image_paths: Vec, + ) { // Clear any existing content, placeholders, and attachments first. - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); self.pending_pastes.clear(); self.attached_images.clear(); - self.textarea.set_text(&text); + + self.textarea.set_text_with_elements(&text, &text_elements); + + let image_placeholders: HashSet = text_elements + .iter() + .filter_map(|elem| { + elem.placeholder.as_ref().cloned().or_else(|| { + text.get(elem.byte_range.start..elem.byte_range.end) + .map(str::to_string) + }) + }) + .collect(); + for (idx, path) in local_image_paths.into_iter().enumerate() { + let placeholder = local_image_label_text(idx + 1); + if image_placeholders.contains(&placeholder) { + self.attached_images + .push(AttachedImage { placeholder, path }); + } + } + self.textarea.set_cursor(0); self.sync_popups(); } @@ -515,7 +581,7 @@ impl ChatComposer { return None; } let previous = self.current_text(); - self.set_text_content(String::new()); + self.set_text_content(String::new(), Vec::new(), Vec::new()); self.history.reset_navigation(); self.history.record_local_submission(&previous); Some(previous) @@ -526,6 +592,28 @@ impl ChatComposer { self.textarea.text().to_string() } + pub(crate) fn text_elements(&self) -> Vec { + self.textarea.text_elements() + } + + #[cfg(test)] + pub(crate) fn local_image_paths(&self) -> Vec { + self.attached_images + .iter() + .map(|img| img.path.clone()) + .collect() + } + + pub(crate) fn local_images(&self) -> Vec { + self.attached_images + .iter() + .map(|img| LocalImageAttachment { + placeholder: img.placeholder.clone(), + path: img.path.clone(), + }) + .collect() + } + /// Insert an attachment placeholder and track it for the next submission. pub fn attach_image(&mut self, path: PathBuf) { let image_number = self.attached_images.len() + 1; @@ -537,11 +625,23 @@ impl ChatComposer { .push(AttachedImage { placeholder, path }); } + #[cfg(test)] pub fn take_recent_submission_images(&mut self) -> Vec { let images = std::mem::take(&mut self.attached_images); images.into_iter().map(|img| img.path).collect() } + pub fn take_recent_submission_images_with_placeholders(&mut self) -> Vec { + let images = std::mem::take(&mut self.attached_images); + images + .into_iter() + .map(|img| LocalImageAttachment { + placeholder: img.placeholder, + path: img.path, + }) + .collect() + } + /// Flushes any due paste-burst state. /// /// Call this from a UI tick to turn paste-burst transient state into explicit textarea edits: @@ -720,7 +820,7 @@ impl ChatComposer { match sel { CommandItem::Builtin(cmd) => { if cmd == SlashCommand::Skills { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); return (InputResult::Command(cmd), true); } @@ -728,7 +828,8 @@ impl ChatComposer { .trim_start() .starts_with(&format!("/{}", cmd.command())); if !starts_with_cmd { - self.textarea.set_text(&format!("/{} ", cmd.command())); + self.textarea + .set_text_clearing_elements(&format!("/{} ", cmd.command())); } if !self.textarea.text().is_empty() { cursor_target = Some(self.textarea.text().len()); @@ -743,7 +844,8 @@ impl ChatComposer { ) { PromptSelectionAction::Insert { text, cursor } => { let target = cursor.unwrap_or(text.len()); - self.textarea.set_text(&text); + // Inserted prompt text is plain input; discard any elements. + self.textarea.set_text_clearing_elements(&text); cursor_target = Some(target); } PromptSelectionAction::Submit { .. } => {} @@ -772,14 +874,21 @@ impl ChatComposer { && let Some(expanded) = expand_if_numeric_with_positional_args(prompt, first_line) { - self.textarea.set_text(""); - return (InputResult::Submitted(expanded), true); + self.textarea.set_text_clearing_elements(""); + return ( + InputResult::Submitted { + text: expanded, + // Expanded prompt is plain text; no UI element ranges to preserve. + text_elements: Vec::new(), + }, + true, + ); } if let Some(sel) = popup.selected_item() { match sel { CommandItem::Builtin(cmd) => { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); return (InputResult::Command(cmd), true); } CommandItem::UserPrompt(idx) => { @@ -790,12 +899,20 @@ impl ChatComposer { PromptSelectionMode::Submit, ) { PromptSelectionAction::Submit { text } => { - self.textarea.set_text(""); - return (InputResult::Submitted(text), true); + self.textarea.set_text_clearing_elements(""); + return ( + InputResult::Submitted { + text, + // Submitting a slash/custom prompt generates plain text, so there are no UI element ranges. + text_elements: Vec::new(), + }, + true, + ); } PromptSelectionAction::Insert { text, cursor } => { let target = cursor.unwrap_or(text.len()); - self.textarea.set_text(&text); + // Inserted prompt text is plain input; discard any elements. + self.textarea.set_text_clearing_elements(&text); self.textarea.set_cursor(target); return (InputResult::None, true); } @@ -1098,6 +1215,111 @@ impl ChatComposer { lower.ends_with(".png") || lower.ends_with(".jpg") || lower.ends_with(".jpeg") } + fn trim_text_elements( + original: &str, + trimmed: &str, + elements: Vec, + ) -> Vec { + if trimmed.is_empty() || elements.is_empty() { + return Vec::new(); + } + let trimmed_start = original.len().saturating_sub(original.trim_start().len()); + let trimmed_end = trimmed_start.saturating_add(trimmed.len()); + + elements + .into_iter() + .filter_map(|elem| { + let start = elem.byte_range.start; + let end = elem.byte_range.end; + if end <= trimmed_start || start >= trimmed_end { + return None; + } + let new_start = start.saturating_sub(trimmed_start); + let new_end = end.saturating_sub(trimmed_start).min(trimmed.len()); + if new_start >= new_end { + return None; + } + let placeholder = trimmed.get(new_start..new_end).map(str::to_string); + Some(TextElement { + byte_range: ByteRange { + start: new_start, + end: new_end, + }, + placeholder, + }) + }) + .collect() + } + + /// Expand large-paste placeholders using element ranges and rebuild other element spans. + fn expand_pending_pastes( + text: &str, + mut elements: Vec, + pending_pastes: &[(String, String)], + ) -> (String, Vec) { + if pending_pastes.is_empty() || elements.is_empty() { + return (text.to_string(), elements); + } + + // Stage 1: index pending paste payloads by placeholder for deterministic replacements. + let mut pending_by_placeholder: HashMap<&str, VecDeque<&str>> = HashMap::new(); + for (placeholder, actual) in pending_pastes { + pending_by_placeholder + .entry(placeholder.as_str()) + .or_default() + .push_back(actual.as_str()); + } + + // Stage 2: walk elements in order and rebuild text/spans in a single pass. + elements.sort_by_key(|elem| elem.byte_range.start); + + let mut rebuilt = String::with_capacity(text.len()); + let mut rebuilt_elements = Vec::with_capacity(elements.len()); + let mut cursor = 0usize; + + for elem in elements { + let start = elem.byte_range.start.min(text.len()); + let end = elem.byte_range.end.min(text.len()); + if start > end { + continue; + } + if start > cursor { + rebuilt.push_str(&text[cursor..start]); + } + let elem_text = &text[start..end]; + let placeholder = elem.placeholder; + let replacement = placeholder + .as_deref() + .and_then(|ph| pending_by_placeholder.get_mut(ph)) + .and_then(VecDeque::pop_front); + if let Some(actual) = replacement { + // Stage 3: inline actual paste payloads and drop their placeholder elements. + rebuilt.push_str(actual); + } else { + // Stage 4: keep non-paste elements, updating their byte ranges for the new text. + let new_start = rebuilt.len(); + rebuilt.push_str(elem_text); + let new_end = rebuilt.len(); + let placeholder = placeholder.or_else(|| Some(elem_text.to_string())); + rebuilt_elements.push(TextElement { + byte_range: ByteRange { + start: new_start, + end: new_end, + }, + placeholder, + }); + } + cursor = end; + } + + // Stage 5: append any trailing text that followed the last element. + if cursor < text.len() { + rebuilt.push_str(&text[cursor..]); + } + + (rebuilt, rebuilt_elements) + } + fn skills_enabled(&self) -> bool { self.skills.as_ref().is_some_and(|s| !s.is_empty()) } @@ -1272,7 +1494,8 @@ impl ChatComposer { new_text.push(' '); new_text.push_str(&text[end_idx..]); - self.textarea.set_text(&new_text); + // Path replacement is plain text; rebuild without carrying elements. + self.textarea.set_text_clearing_elements(&new_text); let new_cursor = start_idx.saturating_add(inserted.len()).saturating_add(1); self.textarea.set_cursor(new_cursor); } @@ -1307,52 +1530,49 @@ impl ChatComposer { new_text.push(' '); new_text.push_str(&text[end_idx..]); - self.textarea.set_text(&new_text); + // Skill insertion rebuilds plain text, so drop existing elements. + self.textarea.set_text_clearing_elements(&new_text); let new_cursor = start_idx.saturating_add(inserted.len()).saturating_add(1); self.textarea.set_cursor(new_cursor); } /// Prepare text for submission/queuing. Returns None if submission should be suppressed. - fn prepare_submission_text(&mut self) -> Option { - // If we have pending placeholder pastes, replace them in the textarea text - // and continue to the normal submission flow to handle slash commands. - if !self.pending_pastes.is_empty() { - let mut text = self.textarea.text().to_string(); - for (placeholder, actual) in &self.pending_pastes { - if text.contains(placeholder) { - text = text.replace(placeholder, actual); - } - } - self.textarea.set_text(&text); - self.pending_pastes.clear(); - } - + /// On success, clears pending paste payloads because placeholders have been expanded. + fn prepare_submission_text(&mut self) -> Option<(String, Vec)> { let mut text = self.textarea.text().to_string(); let original_input = text.clone(); + let original_text_elements = self.textarea.text_elements(); + let original_local_image_paths = self + .attached_images + .iter() + .map(|img| img.path.clone()) + .collect::>(); + let original_pending_pastes = self.pending_pastes.clone(); + let mut text_elements = original_text_elements.clone(); let input_starts_with_space = original_input.starts_with(' '); - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); - // Replace all pending pastes in the text - for (placeholder, actual) in &self.pending_pastes { - if text.contains(placeholder) { - text = text.replace(placeholder, actual); - } + if !self.pending_pastes.is_empty() { + // Expand placeholders so element byte ranges stay aligned. + let (expanded, expanded_elements) = + Self::expand_pending_pastes(&text, text_elements, &self.pending_pastes); + text = expanded; + text_elements = expanded_elements; } - self.pending_pastes.clear(); + + let expanded_input = text.clone(); // If there is neither text nor attachments, suppress submission entirely. let has_attachments = !self.attached_images.is_empty(); text = text.trim().to_string(); + text_elements = Self::trim_text_elements(&expanded_input, &text, text_elements); if let Some((name, _rest)) = parse_slash_name(&text) { let treat_as_plain_text = input_starts_with_space || name.contains('/'); if !treat_as_plain_text { - let is_builtin = built_in_slash_commands() - .into_iter() - .filter(|(_, cmd)| { - windows_degraded_sandbox_active() || *cmd != SlashCommand::ElevateSandbox - }) - .any(|(command_name, _)| command_name == name); + let is_builtin = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .any(|(command_name, _)| command_name == name); let prompt_prefix = format!("{PROMPTS_CMD_PREFIX}:"); let is_known_prompt = name .strip_prefix(&prompt_prefix) @@ -1369,7 +1589,12 @@ impl ChatComposer { self.app_event_tx.send(AppEvent::InsertHistoryCell(Box::new( history_cell::new_info_event(message, None), ))); - self.textarea.set_text(&original_input); + self.set_text_content( + original_input.clone(), + original_text_elements, + original_local_image_paths, + ); + self.pending_pastes.clone_from(&original_pending_pastes); self.textarea.set_cursor(original_input.len()); return None; } @@ -1382,13 +1607,21 @@ impl ChatComposer { self.app_event_tx.send(AppEvent::InsertHistoryCell(Box::new( history_cell::new_error_event(err.user_message()), ))); - self.textarea.set_text(&original_input); + self.set_text_content( + original_input.clone(), + original_text_elements, + original_local_image_paths, + ); + self.pending_pastes.clone_from(&original_pending_pastes); self.textarea.set_cursor(original_input.len()); return None; } }; if let Some(expanded) = expanded_prompt { text = expanded; + // Expanded prompt (e.g. custom prompt) is plain text; text elements not supported yet. + // TODO: Preserve UI element ranges through prompt expansion in a follow-up PR. + text_elements = Vec::new(); } if text.is_empty() && !has_attachments { return None; @@ -1396,7 +1629,8 @@ impl ChatComposer { if !text.is_empty() { self.history.record_local_submission(&text); } - Some(text) + self.pending_pastes.clear(); + Some((text, text_elements)) } /// Common logic for handling message submission/queuing. @@ -1445,20 +1679,44 @@ impl ChatComposer { } let original_input = self.textarea.text().to_string(); + let original_text_elements = self.textarea.text_elements(); + let original_local_image_paths = self + .attached_images + .iter() + .map(|img| img.path.clone()) + .collect::>(); + let original_pending_pastes = self.pending_pastes.clone(); if let Some(result) = self.try_dispatch_slash_command_with_args() { return (result, true); } - if let Some(text) = self.prepare_submission_text() { + if let Some((text, text_elements)) = self.prepare_submission_text() { if should_queue { - (InputResult::Queued(text), true) + ( + InputResult::Queued { + text, + text_elements, + }, + true, + ) } else { // Do not clear attached_images here; ChatWidget drains them via take_recent_submission_images(). - (InputResult::Submitted(text), true) + ( + InputResult::Submitted { + text, + text_elements, + }, + true, + ) } } else { - // Restore text if submission was suppressed - self.textarea.set_text(&original_input); + // Restore text if submission was suppressed. + self.set_text_content( + original_input, + original_text_elements, + original_local_image_paths, + ); + self.pending_pastes = original_pending_pastes; (InputResult::None, true) } } @@ -1469,14 +1727,11 @@ impl ChatComposer { let first_line = self.textarea.text().lines().next().unwrap_or(""); if let Some((name, rest)) = parse_slash_name(first_line) && rest.is_empty() - && let Some((_n, cmd)) = built_in_slash_commands() - .into_iter() - .filter(|(_, cmd)| { - windows_degraded_sandbox_active() || *cmd != SlashCommand::ElevateSandbox - }) - .find(|(n, _)| *n == name) + && let Some((_n, cmd)) = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .find(|(n, _)| *n == name) { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); Some(InputResult::Command(cmd)) } else { None @@ -1494,12 +1749,12 @@ impl ChatComposer { if let Some((name, rest)) = parse_slash_name(&text) && !rest.is_empty() && !name.contains('/') - && let Some((_n, cmd)) = built_in_slash_commands() - .into_iter() - .find(|(command_name, _)| *command_name == name) + && let Some((_n, cmd)) = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .find(|(command_name, _)| *command_name == name) && cmd == SlashCommand::Review { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); return Some(InputResult::CommandWithArgs(cmd, rest.to_string())); } } @@ -1555,7 +1810,7 @@ impl ChatComposer { _ => unreachable!(), }; if let Some(text) = replace_text { - self.set_text_content(text); + self.set_text_content(text, Vec::new(), Vec::new()); return (InputResult::None, true); } } @@ -1843,6 +2098,7 @@ impl ChatComposer { is_task_running: self.is_task_running, quit_shortcut_key: self.quit_shortcut_key, steer_enabled: self.steer_enabled, + collaboration_modes_enabled: self.collaboration_modes_enabled, context_window_percent: self.context_window_percent, context_window_used_tokens: self.context_window_used_tokens, } @@ -1865,6 +2121,9 @@ impl ChatComposer { } fn custom_footer_height(&self) -> Option { + if self.footer_flash_visible() { + return Some(1); + } self.footer_hint_override .as_ref() .map(|items| if items.is_empty() { 0 } else { 1 }) @@ -1948,12 +2207,9 @@ impl ChatComposer { return rest_after_name.is_empty(); } - let builtin_match = built_in_slash_commands() - .into_iter() - .filter(|(_, cmd)| { - windows_degraded_sandbox_active() || *cmd != SlashCommand::ElevateSandbox - }) - .any(|(cmd_name, _)| fuzzy_match(cmd_name, name).is_some()); + let builtin_match = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .any(|(cmd_name, _)| fuzzy_match(cmd_name, name).is_some()); if builtin_match { return true; @@ -2006,8 +2262,14 @@ impl ChatComposer { _ => { if is_editing_slash_command_name { let skills_enabled = self.skills_enabled(); - let mut command_popup = - CommandPopup::new(self.custom_prompts.clone(), skills_enabled); + let collaboration_modes_enabled = self.collaboration_modes_enabled; + let mut command_popup = CommandPopup::new( + self.custom_prompts.clone(), + CommandPopupFlags { + skills_enabled, + collaboration_modes_enabled, + }, + ); command_popup.on_composer_text_change(first_line.to_string()); self.active_popup = ActivePopup::Command(command_popup); } @@ -2015,6 +2277,16 @@ impl ChatComposer { } } + fn built_in_slash_commands_for_input( + collaboration_modes_enabled: bool, + ) -> impl Iterator { + let allow_elevate_sandbox = windows_degraded_sandbox_active(); + built_in_slash_commands() + .into_iter() + .filter(move |(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox) + .filter(move |(_, cmd)| collaboration_modes_enabled || *cmd != SlashCommand::Collab) + } + pub(crate) fn set_custom_prompts(&mut self, prompts: Vec) { self.custom_prompts = prompts.clone(); if let ActivePopup::Command(popup) = &mut self.active_popup { @@ -2180,24 +2452,12 @@ impl Renderable for ChatComposer { } else { popup_rect }; - if let Some(items) = self.footer_hint_override.as_ref() { - if !items.is_empty() { - let mut spans = Vec::with_capacity(items.len() * 4); - for (idx, (key, label)) in items.iter().enumerate() { - spans.push(" ".into()); - spans.push(Span::styled(key.clone(), Style::default().bold())); - spans.push(format!(" {label}").into()); - if idx + 1 != items.len() { - spans.push(" ".into()); - } - } - let mut custom_rect = hint_rect; - if custom_rect.width > 2 { - custom_rect.x += 2; - custom_rect.width = custom_rect.width.saturating_sub(2); - } - Line::from(spans).render_ref(custom_rect, buf); + if self.footer_flash_visible() { + if let Some(flash) = self.footer_flash.as_ref() { + flash.line.render(inset_footer_hint_area(hint_rect), buf); } + } else if let Some(items) = self.footer_hint_override.as_ref() { + render_footer_hint_items(hint_rect, buf, items); } else { render_footer(hint_rect, buf, footer_props); } @@ -2357,6 +2617,84 @@ mod tests { ); } + #[test] + fn footer_flash_overrides_footer_hint_override() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_footer_hint_override(Some(vec![("K".to_string(), "label".to_string())])); + composer.show_footer_flash(Line::from("FLASH"), Duration::from_secs(10)); + + let area = Rect::new(0, 0, 60, 6); + let mut buf = Buffer::empty(area); + composer.render(area, &mut buf); + + let mut bottom_row = String::new(); + for x in 0..area.width { + bottom_row.push( + buf[(x, area.height - 1)] + .symbol() + .chars() + .next() + .unwrap_or(' '), + ); + } + assert!( + bottom_row.contains("FLASH"), + "expected flash content to render in footer row, saw: {bottom_row:?}", + ); + assert!( + !bottom_row.contains("K label"), + "expected flash to override hint override, saw: {bottom_row:?}", + ); + } + + #[test] + fn footer_flash_expires_and_falls_back_to_hint_override() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_footer_hint_override(Some(vec![("K".to_string(), "label".to_string())])); + composer.show_footer_flash(Line::from("FLASH"), Duration::from_secs(10)); + composer.footer_flash.as_mut().unwrap().expires_at = + Instant::now() - Duration::from_secs(1); + + let area = Rect::new(0, 0, 60, 6); + let mut buf = Buffer::empty(area); + composer.render(area, &mut buf); + + let mut bottom_row = String::new(); + for x in 0..area.width { + bottom_row.push( + buf[(x, area.height - 1)] + .symbol() + .chars() + .next() + .unwrap_or(' '), + ); + } + assert!( + bottom_row.contains("K label"), + "expected hint override to render after flash expired, saw: {bottom_row:?}", + ); + assert!( + !bottom_row.contains("FLASH"), + "expected expired flash to be hidden, saw: {bottom_row:?}", + ); + } + fn snapshot_composer_state(name: &str, enhanced_keys_supported: bool, setup: F) where F: FnOnce(&mut ChatComposer), @@ -2483,11 +2821,8 @@ mod tests { composer.set_steer_enabled(true); composer.set_steer_enabled(true); composer.set_steer_enabled(true); - composer.set_steer_enabled(true); - composer.set_steer_enabled(true); - composer.set_steer_enabled(true); - composer.set_text_content("draft text".to_string()); + composer.set_text_content("draft text".to_string(), Vec::new(), Vec::new()); assert_eq!(composer.clear_for_ctrl_c(), Some("draft text".to_string())); assert!(composer.is_empty()); @@ -2794,7 +3129,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "1あ"), + InputResult::Submitted { text, .. } => assert_eq!(text, "1あ"), _ => panic!("expected Submitted"), } } @@ -3008,7 +3343,7 @@ mod tests { false, ); - composer.textarea.set_text("/diff"); + composer.textarea.set_text_clearing_elements("/diff"); composer.textarea.set_cursor("/diff".len()); composer .paste_burst @@ -3116,7 +3451,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "hello"), + InputResult::Submitted { text, .. } => assert_eq!(text, "hello"), _ => panic!("expected Submitted"), } } @@ -3182,7 +3517,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, large), + InputResult::Submitted { text, .. } => assert_eq!(text, large), _ => panic!("expected Submitted"), } assert!(composer.pending_pastes.is_empty()); @@ -3455,10 +3790,10 @@ mod tests { InputResult::CommandWithArgs(_, _) => { panic!("expected command dispatch without args for '/init'") } - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { panic!("expected command dispatch, but composer submitted literal text: {text}") } - InputResult::Queued(_) => { + InputResult::Queued { .. } => { panic!("expected command dispatch, but composer queued literal text") } InputResult::None => panic!("expected Command result for '/init'"), @@ -3534,10 +3869,10 @@ mod tests { InputResult::CommandWithArgs(_, _) => { panic!("expected command dispatch without args for '/diff'") } - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { panic!("expected command dispatch after Tab completion, got literal submit: {text}") } - InputResult::Queued(_) => { + InputResult::Queued { .. } => { panic!("expected command dispatch after Tab completion, got literal queue") } InputResult::None => panic!("expected Command result for '/diff'"), @@ -3573,10 +3908,10 @@ mod tests { InputResult::CommandWithArgs(_, _) => { panic!("expected command dispatch without args for '/mention'") } - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { panic!("expected command dispatch, but composer submitted literal text: {text}") } - InputResult::Queued(_) => { + InputResult::Queued { .. } => { panic!("expected command dispatch, but composer queued literal text") } InputResult::None => panic!("expected Command result for '/mention'"), @@ -3661,7 +3996,7 @@ mod tests { // Submit and verify final expansion let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - if let InputResult::Submitted(text) = result { + if let InputResult::Submitted { text, .. } = result { assert_eq!(text, format!("{} and {}", test_cases[0].0, test_cases[2].0)); } else { panic!("expected Submitted"); @@ -3858,7 +4193,7 @@ mod tests { composer.textarea.text().contains(&placeholder), composer.pending_pastes.len(), ); - composer.textarea.set_text(""); + composer.textarea.set_text_clearing_elements(""); result }) .collect(); @@ -3874,7 +4209,7 @@ mod tests { // --- Image attachment tests --- #[test] - fn attach_image_and_submit_includes_image_paths() { + fn attach_image_and_submit_includes_local_image_paths() { let (tx, _rx) = unbounded_channel::(); let sender = AppEventSender::new(tx); let mut composer = ChatComposer::new( @@ -3891,13 +4226,231 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "[Image #1] hi"), + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, "[Image #1] hi"); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: 0, + end: "[Image #1]".len() + } + ); + } _ => panic!("expected Submitted"), } let imgs = composer.take_recent_submission_images(); assert_eq!(vec![path], imgs); } + #[test] + fn set_text_content_reattaches_images_without_placeholder_metadata() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let placeholder = local_image_label_text(1); + let text = format!("{placeholder} restored"); + let text_elements = vec![TextElement { + byte_range: (0..placeholder.len()).into(), + placeholder: None, + }]; + let path = PathBuf::from("/tmp/image1.png"); + + composer.set_text_content(text, text_elements, vec![path.clone()]); + + assert_eq!(composer.local_image_paths(), vec![path]); + } + + #[test] + fn large_paste_preserves_image_text_elements_on_submit() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + let large_content = "x".repeat(LARGE_PASTE_CHAR_THRESHOLD + 5); + composer.handle_paste(large_content.clone()); + composer.handle_paste(" ".into()); + let path = PathBuf::from("/tmp/image_with_paste.png"); + composer.attach_image(path.clone()); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + let expected = format!("{large_content} [Image #1]"); + assert_eq!(text, expected); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: large_content.len() + 1, + end: large_content.len() + 1 + "[Image #1]".len(), + } + ); + } + _ => panic!("expected Submitted"), + } + let imgs = composer.take_recent_submission_images(); + assert_eq!(vec![path], imgs); + } + + #[test] + fn large_paste_with_leading_whitespace_trims_and_shifts_elements() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + let large_content = format!(" {}", "x".repeat(LARGE_PASTE_CHAR_THRESHOLD + 5)); + composer.handle_paste(large_content.clone()); + composer.handle_paste(" ".into()); + let path = PathBuf::from("/tmp/image_with_trim.png"); + composer.attach_image(path.clone()); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + let trimmed = large_content.trim().to_string(); + assert_eq!(text, format!("{trimmed} [Image #1]")); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: trimmed.len() + 1, + end: trimmed.len() + 1 + "[Image #1]".len(), + } + ); + } + _ => panic!("expected Submitted"), + } + let imgs = composer.take_recent_submission_images(); + assert_eq!(vec![path], imgs); + } + + #[test] + fn pasted_crlf_normalizes_newlines_for_elements() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + let pasted = "line1\r\nline2\r\n".to_string(); + composer.handle_paste(pasted); + composer.handle_paste(" ".into()); + let path = PathBuf::from("/tmp/image_crlf.png"); + composer.attach_image(path.clone()); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, "line1\nline2\n [Image #1]"); + assert!(!text.contains('\r')); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: "line1\nline2\n ".len(), + end: "line1\nline2\n [Image #1]".len(), + } + ); + } + _ => panic!("expected Submitted"), + } + let imgs = composer.take_recent_submission_images(); + assert_eq!(vec![path], imgs); + } + + #[test] + fn suppressed_submission_restores_pending_paste_payload() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + composer.textarea.set_text_clearing_elements("/unknown "); + composer.textarea.set_cursor("/unknown ".len()); + let large_content = "x".repeat(LARGE_PASTE_CHAR_THRESHOLD + 5); + composer.handle_paste(large_content.clone()); + let placeholder = composer + .pending_pastes + .first() + .expect("expected pending paste") + .0 + .clone(); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + assert!(matches!(result, InputResult::None)); + assert_eq!(composer.pending_pastes.len(), 1); + assert_eq!(composer.textarea.text(), format!("/unknown {placeholder}")); + + composer.textarea.set_cursor(0); + composer.textarea.insert_str(" "); + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, format!("/unknown {large_content}")); + assert!(text_elements.is_empty()); + } + _ => panic!("expected Submitted"), + } + assert!(composer.pending_pastes.is_empty()); + } + #[test] fn attach_image_without_text_submits_empty_text_and_images() { let (tx, _rx) = unbounded_channel::(); @@ -3915,7 +4468,21 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "[Image #1]"), + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, "[Image #1]"); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: 0, + end: "[Image #1]".len() + } + ); + } _ => panic!("expected Submitted"), } let imgs = composer.take_recent_submission_images(); @@ -4070,6 +4637,69 @@ mod tests { ); } + #[test] + fn deleting_reordered_image_one_renumbers_text_in_place() { + use crossterm::event::KeyCode; + use crossterm::event::KeyEvent; + use crossterm::event::KeyModifiers; + + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let path1 = PathBuf::from("/tmp/image_first.png"); + let path2 = PathBuf::from("/tmp/image_second.png"); + let placeholder1 = local_image_label_text(1); + let placeholder2 = local_image_label_text(2); + + // Placeholders can be reordered in the text buffer; deleting image #1 should renumber + // image #2 wherever it appears, not just after the cursor. + let text = format!("Test {placeholder2} test {placeholder1}"); + let start2 = text.find(&placeholder2).expect("placeholder2 present"); + let start1 = text.find(&placeholder1).expect("placeholder1 present"); + let text_elements = vec![ + TextElement { + byte_range: ByteRange { + start: start2, + end: start2 + placeholder2.len(), + }, + placeholder: Some(placeholder2), + }, + TextElement { + byte_range: ByteRange { + start: start1, + end: start1 + placeholder1.len(), + }, + placeholder: Some(placeholder1.clone()), + }, + ]; + composer.set_text_content(text, text_elements, vec![path1, path2.clone()]); + + let end1 = start1 + placeholder1.len(); + composer.textarea.set_cursor(end1); + + composer.handle_key_event(KeyEvent::new(KeyCode::Backspace, KeyModifiers::NONE)); + + assert_eq!( + composer.textarea.text(), + format!("Test {placeholder1} test ") + ); + assert_eq!( + vec![AttachedImage { + path: path2, + placeholder: placeholder1 + }], + composer.attached_images, + "attachment renumbered after deletion" + ); + } + #[test] fn deleting_first_text_element_renumbers_following_text_element() { use crossterm::event::KeyCode; @@ -4167,7 +4797,10 @@ mod tests { let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!(InputResult::Submitted(prompt_text.to_string()), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == prompt_text + )); assert!(composer.textarea.is_empty()); } @@ -4194,15 +4827,16 @@ mod tests { composer .textarea - .set_text("/prompts:my-prompt USER=Alice BRANCH=main"); + .set_text_clearing_elements("/prompts:my-prompt USER=Alice BRANCH=main"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!( - InputResult::Submitted("Review Alice changes on main".to_string()), - result - ); + assert!(matches!( + result, + InputResult::Submitted { text, .. } + if text == "Review Alice changes on main" + )); assert!(composer.textarea.is_empty()); } @@ -4229,15 +4863,16 @@ mod tests { composer .textarea - .set_text("/prompts:my-prompt USER=\"Alice Smith\" BRANCH=dev-main"); + .set_text_clearing_elements("/prompts:my-prompt USER=\"Alice Smith\" BRANCH=dev-main"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!( - InputResult::Submitted("Pair Alice Smith with dev-main".to_string()), - result - ); + assert!(matches!( + result, + InputResult::Submitted { text, .. } + if text == "Pair Alice Smith with dev-main" + )); assert!(composer.textarea.is_empty()); } @@ -4271,7 +4906,7 @@ mod tests { // Type the slash command let command_text = "/prompts:code-review "; - composer.textarea.set_text(command_text); + composer.textarea.set_text_clearing_elements(command_text); composer.textarea.set_cursor(command_text.len()); // Paste large content (>3000 chars) to trigger placeholder @@ -4294,7 +4929,7 @@ mod tests { // Verify the custom prompt was expanded with the large content as positional arg match result { - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { // The prompt should be expanded, with the large content replacing $1 assert_eq!( text, @@ -4327,12 +4962,12 @@ mod tests { composer .textarea - .set_text("/Users/example/project/src/main.rs"); + .set_text_clearing_elements("/Users/example/project/src/main.rs"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - if let InputResult::Submitted(text) = result { + if let InputResult::Submitted { text, .. } = result { assert_eq!(text, "/Users/example/project/src/main.rs"); } else { panic!("expected Submitted"); @@ -4362,12 +4997,14 @@ mod tests { ); composer.set_steer_enabled(true); - composer.textarea.set_text(" /this-looks-like-a-command"); + composer + .textarea + .set_text_clearing_elements(" /this-looks-like-a-command"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - if let InputResult::Submitted(text) = result { + if let InputResult::Submitted { text, .. } = result { assert_eq!(text, "/this-looks-like-a-command"); } else { panic!("expected Submitted"); @@ -4402,7 +5039,7 @@ mod tests { composer .textarea - .set_text("/prompts:my-prompt USER=Alice stray"); + .set_text_clearing_elements("/prompts:my-prompt USER=Alice stray"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); @@ -4451,7 +5088,9 @@ mod tests { }]); // Provide only one of the required args - composer.textarea.set_text("/prompts:my-prompt USER=Alice"); + composer + .textarea + .set_text_clearing_elements("/prompts:my-prompt USER=Alice"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); @@ -4516,7 +5155,10 @@ mod tests { composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); let expected = "Header: foo\nArgs: foo bar\nNinth: \n".to_string(); - assert_eq!(InputResult::Submitted(expected), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == expected + )); } #[test] @@ -4543,11 +5185,16 @@ mod tests { }]); // Type positional args; should submit with numeric expansion, no errors. - composer.textarea.set_text("/prompts:elegant hi"); + composer + .textarea + .set_text_clearing_elements("/prompts:elegant hi"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!(InputResult::Submitted("Echo: hi".to_string()), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == "Echo: hi" + )); assert!(composer.textarea.is_empty()); } @@ -4619,10 +5266,11 @@ mod tests { let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!( - InputResult::Submitted("Cost: $$ and first: x".to_string()), - result - ); + assert!(matches!( + result, + InputResult::Submitted { text, .. } + if text == "Cost: $$ and first: x" + )); } #[test] @@ -4659,7 +5307,10 @@ mod tests { composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); let expected = "First: one two\nSecond: one two".to_string(); - assert_eq!(InputResult::Submitted(expected), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == expected + )); } /// Behavior: the first fast ASCII character is held briefly to avoid flicker; if no burst @@ -4813,7 +5464,7 @@ mod tests { ); // Simulate history-like content: "/ test" - composer.set_text_content("/ test".to_string()); + composer.set_text_content("/ test".to_string(), Vec::new(), Vec::new()); // After set_text_content -> sync_popups is called; popup should NOT be Command. assert!( @@ -4843,21 +5494,21 @@ mod tests { ); // Case 1: bare "/" - composer.set_text_content("/".to_string()); + composer.set_text_content("/".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::Command(_)), "bare '/' should activate slash popup" ); // Case 2: valid prefix "/re" (matches /review, /resume, etc.) - composer.set_text_content("/re".to_string()); + composer.set_text_content("/re".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::Command(_)), "'/re' should activate slash popup via prefix match" ); // Case 3: fuzzy match "/ac" (subsequence of /compact and /feedback) - composer.set_text_content("/ac".to_string()); + composer.set_text_content("/ac".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::Command(_)), "'/ac' should activate slash popup via fuzzy match" @@ -4866,7 +5517,7 @@ mod tests { // Case 4: invalid prefix "/zzz" – still allowed to open popup if it // matches no built-in command; our current logic will not open popup. // Verify that explicitly. - composer.set_text_content("/zzz".to_string()); + composer.set_text_content("/zzz".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::None), "'/zzz' should not activate slash popup because it is not a prefix of any built-in command" @@ -5001,7 +5652,7 @@ mod tests { false, ); - composer.set_text_content("hello".to_string()); + composer.set_text_content("hello".to_string(), Vec::new(), Vec::new()); composer.set_input_enabled(false, Some("Input disabled for test.".to_string())); let (result, needs_redraw) = diff --git a/codex-rs/tui/src/bottom_pane/command_popup.rs b/codex-rs/tui/src/bottom_pane/command_popup.rs index 10a469c683..83afdca30c 100644 --- a/codex-rs/tui/src/bottom_pane/command_popup.rs +++ b/codex-rs/tui/src/bottom_pane/command_popup.rs @@ -37,13 +37,20 @@ pub(crate) struct CommandPopup { state: ScrollState, } +#[derive(Clone, Copy, Debug, Default)] +pub(crate) struct CommandPopupFlags { + pub(crate) skills_enabled: bool, + pub(crate) collaboration_modes_enabled: bool, +} + impl CommandPopup { - pub(crate) fn new(mut prompts: Vec, skills_enabled: bool) -> Self { + pub(crate) fn new(mut prompts: Vec, flags: CommandPopupFlags) -> Self { let allow_elevate_sandbox = windows_degraded_sandbox_active(); let builtins: Vec<(&'static str, SlashCommand)> = built_in_slash_commands() .into_iter() - .filter(|(_, cmd)| skills_enabled || *cmd != SlashCommand::Skills) + .filter(|(_, cmd)| flags.skills_enabled || *cmd != SlashCommand::Skills) .filter(|(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox) + .filter(|(_, cmd)| flags.collaboration_modes_enabled || *cmd != SlashCommand::Collab) .collect(); // Exclude prompts that collide with builtin command names and sort by name. let exclude: HashSet = builtins.iter().map(|(n, _)| (*n).to_string()).collect(); @@ -231,7 +238,7 @@ mod tests { #[test] fn filter_includes_init_when_typing_prefix() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); // Simulate the composer line starting with '/in' so the popup filters // matching commands by prefix. popup.on_composer_text_change("/in".to_string()); @@ -251,7 +258,7 @@ mod tests { #[test] fn selecting_init_by_exact_match() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/init".to_string()); // When an exact match exists, the selected command should be that @@ -266,7 +273,7 @@ mod tests { #[test] fn model_is_first_suggestion_for_mo() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/mo".to_string()); let matches = popup.filtered_items(); match matches.first() { @@ -280,7 +287,7 @@ mod tests { #[test] fn filtered_commands_keep_presentation_order() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/m".to_string()); let cmds: Vec<&str> = popup @@ -322,7 +329,7 @@ mod tests { argument_hint: None, }, ]; - let popup = CommandPopup::new(prompts, false); + let popup = CommandPopup::new(prompts, CommandPopupFlags::default()); let items = popup.filtered_items(); let mut prompt_names: Vec = items .into_iter() @@ -346,7 +353,7 @@ mod tests { description: None, argument_hint: None, }], - false, + CommandPopupFlags::default(), ); let items = popup.filtered_items(); let has_collision_prompt = items.into_iter().any(|it| match it { @@ -369,7 +376,7 @@ mod tests { description: Some("Create feature branch, commit and open draft PR.".to_string()), argument_hint: None, }], - false, + CommandPopupFlags::default(), ); let rows = popup.rows_from_matches(vec![(CommandItem::UserPrompt(0), None, 0)]); let description = rows.first().and_then(|row| row.description.as_deref()); @@ -389,7 +396,7 @@ mod tests { description: None, argument_hint: None, }], - false, + CommandPopupFlags::default(), ); let rows = popup.rows_from_matches(vec![(CommandItem::UserPrompt(0), None, 0)]); let description = rows.first().and_then(|row| row.description.as_deref()); @@ -398,7 +405,7 @@ mod tests { #[test] fn fuzzy_filter_matches_subsequence_for_ac() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/ac".to_string()); let cmds: Vec<&str> = popup @@ -414,4 +421,40 @@ mod tests { "expected fuzzy search for '/ac' to include compact and feedback, got {cmds:?}" ); } + + #[test] + fn collab_command_hidden_when_collaboration_modes_disabled() { + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); + popup.on_composer_text_change("/coll".to_string()); + + let cmds: Vec<&str> = popup + .filtered_items() + .into_iter() + .filter_map(|item| match item { + CommandItem::Builtin(cmd) => Some(cmd.command()), + CommandItem::UserPrompt(_) => None, + }) + .collect(); + assert!( + !cmds.contains(&"collab"), + "expected '/collab' to be hidden when collaboration modes are disabled, got {cmds:?}" + ); + } + + #[test] + fn collab_command_visible_when_collaboration_modes_enabled() { + let mut popup = CommandPopup::new( + Vec::new(), + CommandPopupFlags { + skills_enabled: false, + collaboration_modes_enabled: true, + }, + ); + popup.on_composer_text_change("/collab".to_string()); + + match popup.selected_item() { + Some(CommandItem::Builtin(cmd)) => assert_eq!(cmd.command(), "collab"), + other => panic!("expected collab to be selected for exact match, got {other:?}"), + } + } } diff --git a/codex-rs/tui/src/bottom_pane/footer.rs b/codex-rs/tui/src/bottom_pane/footer.rs index 42c0392a61..5a54dd11d7 100644 --- a/codex-rs/tui/src/bottom_pane/footer.rs +++ b/codex-rs/tui/src/bottom_pane/footer.rs @@ -37,6 +37,7 @@ pub(crate) struct FooterProps { pub(crate) use_shift_enter_hint: bool, pub(crate) is_task_running: bool, pub(crate) steer_enabled: bool, + pub(crate) collaboration_modes_enabled: bool, /// Which key the user must press again to quit. /// /// This is rendered when `mode` is `FooterMode::QuitShortcutReminder`. @@ -103,6 +104,31 @@ pub(crate) fn render_footer(area: Rect, buf: &mut Buffer, props: FooterProps) { .render(area, buf); } +pub(crate) fn inset_footer_hint_area(mut area: Rect) -> Rect { + if area.width > 2 { + area.x += 2; + area.width = area.width.saturating_sub(2); + } + area +} + +pub(crate) fn render_footer_hint_items(area: Rect, buf: &mut Buffer, items: &[(String, String)]) { + if items.is_empty() { + return; + } + + let mut spans = Vec::with_capacity(items.len() * 4); + for (idx, (key, label)) in items.iter().enumerate() { + spans.push(" ".into()); + spans.push(key.clone().bold()); + spans.push(format!(" {label}").into()); + if idx + 1 != items.len() { + spans.push(" ".into()); + } + } + Line::from(spans).render(inset_footer_hint_area(area), buf); +} + fn footer_lines(props: FooterProps) -> Vec> { // Show the context indicator on the left, appended after the primary hint // (e.g., "? for shortcuts"). Keep it visible even when typing (i.e., when @@ -134,6 +160,7 @@ fn footer_lines(props: FooterProps) -> Vec> { use_shift_enter_hint: props.use_shift_enter_hint, esc_backtrack_hint: props.esc_backtrack_hint, is_wsl, + collaboration_modes_enabled: props.collaboration_modes_enabled, }; shortcut_overlay_lines(state) } @@ -158,6 +185,7 @@ struct ShortcutsState { use_shift_enter_hint: bool, esc_backtrack_hint: bool, is_wsl: bool, + collaboration_modes_enabled: bool, } fn quit_shortcut_reminder_line(key: KeyBinding) -> Line<'static> { @@ -190,6 +218,7 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { let mut edit_previous = Line::from(""); let mut quit = Line::from(""); let mut show_transcript = Line::from(""); + let mut change_mode = Line::from(""); for descriptor in SHORTCUTS { if let Some(text) = descriptor.overlay_entry(state) { @@ -204,11 +233,12 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { ShortcutId::EditPrevious => edit_previous = text, ShortcutId::Quit => quit = text, ShortcutId::ShowTranscript => show_transcript = text, + ShortcutId::ChangeMode => change_mode = text, } } } - let ordered = vec![ + let mut ordered = vec![ commands, shell_commands, newline, @@ -218,9 +248,12 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { external_editor, edit_previous, quit, - Line::from(""), - show_transcript, ]; + if change_mode.width() > 0 { + ordered.push(change_mode); + } + ordered.push(Line::from("")); + ordered.push(show_transcript); build_columns(ordered) } @@ -298,6 +331,7 @@ enum ShortcutId { EditPrevious, Quit, ShowTranscript, + ChangeMode, } #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -318,6 +352,7 @@ enum DisplayCondition { WhenShiftEnterHint, WhenNotShiftEnterHint, WhenUnderWSL, + WhenCollaborationModesEnabled, } impl DisplayCondition { @@ -327,6 +362,7 @@ impl DisplayCondition { DisplayCondition::WhenShiftEnterHint => state.use_shift_enter_hint, DisplayCondition::WhenNotShiftEnterHint => !state.use_shift_enter_hint, DisplayCondition::WhenUnderWSL => state.is_wsl, + DisplayCondition::WhenCollaborationModesEnabled => state.collaboration_modes_enabled, } } } @@ -469,6 +505,15 @@ const SHORTCUTS: &[ShortcutDescriptor] = &[ prefix: "", label: " to view transcript", }, + ShortcutDescriptor { + id: ShortcutId::ChangeMode, + bindings: &[ShortcutBinding { + key: key_hint::shift(KeyCode::Tab), + condition: DisplayCondition::WhenCollaborationModesEnabled, + }], + prefix: "", + label: " to change mode", + }, ]; #[cfg(test)] @@ -500,6 +545,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -514,6 +560,22 @@ mod tests { use_shift_enter_hint: true, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, + quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), + context_window_percent: None, + context_window_used_tokens: None, + }, + ); + + snapshot_footer( + "footer_shortcuts_collaboration_modes_enabled", + FooterProps { + mode: FooterMode::ShortcutOverlay, + esc_backtrack_hint: false, + use_shift_enter_hint: false, + is_task_running: false, + steer_enabled: false, + collaboration_modes_enabled: true, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -528,6 +590,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -542,6 +605,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -556,6 +620,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -570,6 +635,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -584,6 +650,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: Some(72), context_window_used_tokens: None, @@ -598,6 +665,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: Some(123_456), @@ -612,6 +680,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -626,6 +695,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: true, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, diff --git a/codex-rs/tui/src/bottom_pane/mod.rs b/codex-rs/tui/src/bottom_pane/mod.rs index f270145a04..ee23cad334 100644 --- a/codex-rs/tui/src/bottom_pane/mod.rs +++ b/codex-rs/tui/src/bottom_pane/mod.rs @@ -28,16 +28,24 @@ use bottom_pane_view::BottomPaneView; use codex_core::features::Features; use codex_core::skills::model::SkillMetadata; use codex_file_search::FileMatch; +use codex_protocol::user_input::TextElement; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; use ratatui::buffer::Buffer; use ratatui::layout::Rect; +use ratatui::text::Line; use std::time::Duration; mod approval_overlay; pub(crate) use approval_overlay::ApprovalOverlay; pub(crate) use approval_overlay::ApprovalRequest; mod bottom_pane_view; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct LocalImageAttachment { + pub(crate) placeholder: String, + pub(crate) path: PathBuf, +} mod chat_composer; mod chat_composer_history; mod command_popup; @@ -188,6 +196,11 @@ impl BottomPane { self.composer.set_steer_enabled(enabled); } + pub fn set_collaboration_modes_enabled(&mut self, enabled: bool) { + self.composer.set_collaboration_modes_enabled(enabled); + self.request_redraw(); + } + pub fn status_widget(&self) -> Option<&StatusIndicatorWidget> { self.status.as_ref() } @@ -237,8 +250,10 @@ impl BottomPane { } else { // If a task is running and a status line is visible, allow Esc to // send an interrupt even while the composer has focus. - if matches!(key_event.code, crossterm::event::KeyCode::Esc) + // When a popup is active, prefer dismissing it over interrupting the task. + if key_event.code == KeyCode::Esc && self.is_task_running + && !self.composer.popup_active() && let Some(status) = &self.status { // Send Op::Interrupt @@ -309,8 +324,14 @@ impl BottomPane { } /// Replace the composer text with `text`. - pub(crate) fn set_composer_text(&mut self, text: String) { - self.composer.set_text_content(text); + pub(crate) fn set_composer_text( + &mut self, + text: String, + text_elements: Vec, + local_image_paths: Vec, + ) { + self.composer + .set_text_content(text, text_elements, local_image_paths); self.request_redraw(); } @@ -334,6 +355,19 @@ impl BottomPane { self.composer.current_text() } + pub(crate) fn composer_text_elements(&self) -> Vec { + self.composer.text_elements() + } + + pub(crate) fn composer_local_images(&self) -> Vec { + self.composer.local_images() + } + + #[cfg(test)] + pub(crate) fn composer_local_image_paths(&self) -> Vec { + self.composer.local_image_paths() + } + pub(crate) fn composer_text_with_pending(&self) -> String { self.composer.current_text_with_pending() } @@ -508,6 +542,23 @@ impl BottomPane { self.request_redraw(); } + pub(crate) fn flash_footer_hint(&mut self, line: Line<'static>, duration: Duration) { + self.composer.show_footer_flash(line, duration); + let frame_requester = self.frame_requester.clone(); + if let Ok(handle) = tokio::runtime::Handle::try_current() { + handle.spawn(async move { + tokio::time::sleep(duration).await; + frame_requester.schedule_frame(); + }); + } else { + std::thread::spawn(move || { + std::thread::sleep(duration); + frame_requester.schedule_frame(); + }); + } + self.request_redraw(); + } + pub(crate) fn composer_is_empty(&self) -> bool { self.composer.is_empty() } @@ -627,10 +678,18 @@ impl BottomPane { } } + #[cfg(test)] pub(crate) fn take_recent_submission_images(&mut self) -> Vec { self.composer.take_recent_submission_images() } + pub(crate) fn take_recent_submission_images_with_placeholders( + &mut self, + ) -> Vec { + self.composer + .take_recent_submission_images_with_placeholders() + } + fn as_renderable(&'_ self) -> RenderableItem<'_> { if let Some(view) = self.active_view() { RenderableItem::Borrowed(view) @@ -673,9 +732,13 @@ impl Renderable for BottomPane { mod tests { use super::*; use crate::app_event::AppEvent; + use codex_core::protocol::Op; + use codex_protocol::protocol::SkillScope; + use crossterm::event::KeyModifiers; use insta::assert_snapshot; use ratatui::buffer::Buffer; use ratatui::layout::Rect; + use std::path::PathBuf; use tokio::sync::mpsc::unbounded_channel; fn snapshot_buffer(buf: &Buffer) -> String { @@ -942,4 +1005,109 @@ mod tests { render_snapshot(&pane, area) ); } + + #[test] + fn esc_with_skill_popup_does_not_interrupt_task() { + let (tx_raw, mut rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut pane = BottomPane::new(BottomPaneParams { + app_event_tx: tx, + frame_requester: FrameRequester::test_dummy(), + has_input_focus: true, + enhanced_keys_supported: false, + placeholder_text: "Ask Codex to do anything".to_string(), + disable_paste_burst: false, + animations_enabled: true, + skills: Some(vec![SkillMetadata { + name: "test-skill".to_string(), + description: "test skill".to_string(), + short_description: None, + interface: None, + path: PathBuf::from("test-skill"), + scope: SkillScope::User, + }]), + }); + + pane.set_task_running(true); + + // Repro: a running task + skill popup + Esc should dismiss the popup, not interrupt. + pane.insert_str("$"); + assert!( + pane.composer.popup_active(), + "expected skill popup after typing `$`" + ); + + pane.handle_key_event(KeyEvent::new(KeyCode::Esc, KeyModifiers::NONE)); + + while let Ok(ev) = rx.try_recv() { + assert!( + !matches!(ev, AppEvent::CodexOp(Op::Interrupt)), + "expected Esc to not send Op::Interrupt when dismissing skill popup" + ); + } + assert!( + !pane.composer.popup_active(), + "expected Esc to dismiss skill popup" + ); + } + + #[test] + fn esc_with_slash_command_popup_does_not_interrupt_task() { + let (tx_raw, mut rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut pane = BottomPane::new(BottomPaneParams { + app_event_tx: tx, + frame_requester: FrameRequester::test_dummy(), + has_input_focus: true, + enhanced_keys_supported: false, + placeholder_text: "Ask Codex to do anything".to_string(), + disable_paste_burst: false, + animations_enabled: true, + skills: Some(Vec::new()), + }); + + pane.set_task_running(true); + + // Repro: a running task + slash-command popup + Esc should not interrupt the task. + pane.insert_str("/"); + assert!( + pane.composer.popup_active(), + "expected command popup after typing `/`" + ); + + pane.handle_key_event(KeyEvent::new(KeyCode::Esc, KeyModifiers::NONE)); + + while let Ok(ev) = rx.try_recv() { + assert!( + !matches!(ev, AppEvent::CodexOp(Op::Interrupt)), + "expected Esc to not send Op::Interrupt while command popup is active" + ); + } + assert_eq!(pane.composer_text(), "/"); + } + + #[test] + fn esc_interrupts_running_task_when_no_popup() { + let (tx_raw, mut rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut pane = BottomPane::new(BottomPaneParams { + app_event_tx: tx, + frame_requester: FrameRequester::test_dummy(), + has_input_focus: true, + enhanced_keys_supported: false, + placeholder_text: "Ask Codex to do anything".to_string(), + disable_paste_burst: false, + animations_enabled: true, + skills: Some(Vec::new()), + }); + + pane.set_task_running(true); + + pane.handle_key_event(KeyEvent::new(KeyCode::Esc, KeyModifiers::NONE)); + + assert!( + matches!(rx.try_recv(), Ok(AppEvent::CodexOp(Op::Interrupt))), + "expected Esc to send Op::Interrupt while a task is running" + ); + } } diff --git a/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_collaboration_modes_enabled.snap b/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_collaboration_modes_enabled.snap new file mode 100644 index 0000000000..b6d87789ad --- /dev/null +++ b/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_collaboration_modes_enabled.snap @@ -0,0 +1,11 @@ +--- +source: tui/src/bottom_pane/footer.rs +assertion_line: 535 +expression: terminal.backend() +--- +" / for commands ! for shell commands " +" ctrl + j for newline tab to queue message " +" @ for file paths ctrl + v to paste images " +" ctrl + g to edit in external editor esc esc to edit previous message " +" ctrl + c to exit shift + tab to change mode " +" ctrl + t to view transcript " diff --git a/codex-rs/tui/src/bottom_pane/textarea.rs b/codex-rs/tui/src/bottom_pane/textarea.rs index 903ebe9f82..926c53601a 100644 --- a/codex-rs/tui/src/bottom_pane/textarea.rs +++ b/codex-rs/tui/src/bottom_pane/textarea.rs @@ -1,4 +1,6 @@ use crate::key_hint::is_altgr; +use codex_protocol::user_input::ByteRange; +use codex_protocol::user_input::TextElement as UserTextElement; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; use crossterm::event::KeyModifiers; @@ -60,10 +62,36 @@ impl TextArea { } } - pub fn set_text(&mut self, text: &str) { + /// Replace the textarea text and clear any existing text elements. + pub fn set_text_clearing_elements(&mut self, text: &str) { + self.set_text_inner(text, None); + } + + /// Replace the textarea text and set the provided text elements. + pub fn set_text_with_elements(&mut self, text: &str, elements: &[UserTextElement]) { + self.set_text_inner(text, Some(elements)); + } + + fn set_text_inner(&mut self, text: &str, elements: Option<&[UserTextElement]>) { + // Stage 1: replace the raw text and keep the cursor in a safe byte range. self.text = text.to_string(); self.cursor_pos = self.cursor_pos.clamp(0, self.text.len()); + // Stage 2: rebuild element ranges from scratch against the new text. self.elements.clear(); + if let Some(elements) = elements { + for elem in elements { + let mut start = elem.byte_range.start.min(self.text.len()); + let mut end = elem.byte_range.end.min(self.text.len()); + start = self.clamp_pos_to_char_boundary(start); + end = self.clamp_pos_to_char_boundary(end); + if start >= end { + continue; + } + self.elements.push(TextElement { range: start..end }); + } + self.elements.sort_by_key(|e| e.range.start); + } + // Stage 3: clamp the cursor and reset derived state tied to the prior content. self.cursor_pos = self.clamp_pos_to_nearest_boundary(self.cursor_pos); self.wrap_cache.replace(None); self.preferred_col = None; @@ -722,6 +750,22 @@ impl TextArea { .collect() } + pub fn text_elements(&self) -> Vec { + self.elements + .iter() + .map(|e| { + let placeholder = self.text.get(e.range.clone()).map(str::to_string); + UserTextElement { + byte_range: ByteRange { + start: e.range.start, + end: e.range.end, + }, + placeholder, + } + }) + .collect() + } + pub fn element_payload_starting_at(&self, pos: usize) -> Option { let pos = pos.min(self.text.len()); let elem = self.elements.iter().find(|e| e.range.start == pos)?; @@ -1251,7 +1295,7 @@ mod tests { let mut t = TextArea::new(); t.insert_str("abcd"); t.set_cursor(1); - t.set_text("你"); + t.set_text_clearing_elements("你"); assert_eq!(t.cursor(), 0); t.insert_str("a"); assert_eq!(t.text(), "a你"); @@ -1933,7 +1977,7 @@ mod tests { for _ in 0..base_len { base.push_str(&rand_grapheme(&mut rng)); } - ta.set_text(&base); + ta.set_text_clearing_elements(&base); // Choose a valid char boundary for initial cursor let mut boundaries: Vec = vec![0]; boundaries.extend(ta.text().char_indices().map(|(i, _)| i).skip(1)); diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index bab27efd0a..0131887a1e 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -92,7 +92,9 @@ use codex_core::skills::model::SkillMetadata; use codex_protocol::ThreadId; use codex_protocol::account::PlanType; use codex_protocol::approvals::ElicitationRequestEvent; +use codex_protocol::models::local_image_label_text; use codex_protocol::parse_command::ParsedCommand; +use codex_protocol::user_input::TextElement; use codex_protocol::user_input::UserInput; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -128,6 +130,7 @@ use crate::bottom_pane::CancellationEvent; use crate::bottom_pane::DOUBLE_PRESS_QUIT_SHORTCUT_ENABLED; use crate::bottom_pane::ExperimentalFeaturesView; use crate::bottom_pane::InputResult; +use crate::bottom_pane::LocalImageAttachment; use crate::bottom_pane::QUIT_SHORTCUT_TIMEOUT; use crate::bottom_pane::SelectionAction; use crate::bottom_pane::SelectionItem; @@ -136,6 +139,7 @@ use crate::bottom_pane::custom_prompt_view::CustomPromptView; use crate::bottom_pane::popup_consts::standard_popup_hint_line; use crate::clipboard_paste::paste_image_to_temp_png; use crate::collab; +use crate::collaboration_modes; use crate::diff_render::display_path_for; use crate::exec_cell::CommandOutput; use crate::exec_cell::ExecCell; @@ -345,8 +349,7 @@ pub(crate) struct ChatWidgetInit { pub(crate) config: Config, pub(crate) frame_requester: FrameRequester, pub(crate) app_event_tx: AppEventSender, - pub(crate) initial_prompt: Option, - pub(crate) initial_images: Vec, + pub(crate) initial_user_message: Option, pub(crate) enhanced_keys_supported: bool, pub(crate) auth_manager: Arc, pub(crate) models_manager: Arc, @@ -371,6 +374,8 @@ pub(crate) enum ExternalEditorState { Active, } +type CollaborationModeSelection = collaboration_modes::Selection; + /// Maintains the per-session UI state and interaction state machines for the chat screen. /// /// `ChatWidget` owns the state derived from the protocol event stream (history cells, streaming @@ -400,6 +405,11 @@ pub(crate) struct ChatWidget { active_cell_revision: u64, config: Config, model: Option, + /// Current UI selection for collaboration modes. + /// + /// This selection is only meaningful when `Feature::CollaborationModes` is enabled; when the + /// feature is disabled, the value is effectively inert. + collaboration_mode: CollaborationModeSelection, auth_manager: Arc, models_manager: Arc, session_header: SessionHeader, @@ -507,16 +517,19 @@ pub(crate) struct ActiveCellTranscriptKey { pub(crate) animation_tick: Option, } -struct UserMessage { +pub(crate) struct UserMessage { text: String, - image_paths: Vec, + local_images: Vec, + text_elements: Vec, } impl From for UserMessage { fn from(text: String) -> Self { Self { text, - image_paths: Vec::new(), + local_images: Vec::new(), + // Plain text conversion has no UI element ranges. + text_elements: Vec::new(), } } } @@ -525,16 +538,107 @@ impl From<&str> for UserMessage { fn from(text: &str) -> Self { Self { text: text.to_string(), - image_paths: Vec::new(), + local_images: Vec::new(), + // Plain text conversion has no UI element ranges. + text_elements: Vec::new(), } } } -fn create_initial_user_message(text: String, image_paths: Vec) -> Option { - if text.is_empty() && image_paths.is_empty() { +pub(crate) fn create_initial_user_message( + text: Option, + local_image_paths: Vec, + text_elements: Vec, +) -> Option { + let text = text.unwrap_or_default(); + if text.is_empty() && local_image_paths.is_empty() { None } else { - Some(UserMessage { text, image_paths }) + let local_images = local_image_paths + .into_iter() + .enumerate() + .map(|(idx, path)| LocalImageAttachment { + placeholder: local_image_label_text(idx + 1), + path, + }) + .collect(); + Some(UserMessage { + text, + local_images, + text_elements, + }) + } +} + +// When merging multiple queued drafts (e.g., after interrupt), each draft starts numbering +// its attachments at [Image #1]. Reassign placeholder labels based on the attachment list so +// the combined local_image_paths order matches the labels, even if placeholders were moved +// in the text (e.g., [Image #2] appearing before [Image #1]). +fn remap_placeholders_for_message(message: UserMessage, next_label: &mut usize) -> UserMessage { + let UserMessage { + text, + text_elements, + local_images, + } = message; + if local_images.is_empty() { + return UserMessage { + text, + text_elements, + local_images, + }; + } + + let mut mapping: HashMap = HashMap::new(); + let mut remapped_images = Vec::new(); + for attachment in local_images { + let new_placeholder = local_image_label_text(*next_label); + *next_label += 1; + mapping.insert(attachment.placeholder.clone(), new_placeholder.clone()); + remapped_images.push(LocalImageAttachment { + placeholder: new_placeholder, + path: attachment.path, + }); + } + + let mut elements = text_elements; + elements.sort_by_key(|elem| elem.byte_range.start); + + let mut cursor = 0usize; + let mut rebuilt = String::new(); + let mut rebuilt_elements = Vec::new(); + for mut elem in elements { + let start = elem.byte_range.start.min(text.len()); + let end = elem.byte_range.end.min(text.len()); + if let Some(segment) = text.get(cursor..start) { + rebuilt.push_str(segment); + } + + let original = text.get(start..end).unwrap_or(""); + let placeholder_key = elem.placeholder.as_deref().unwrap_or(original); + let replacement = mapping + .get(placeholder_key) + .map(String::as_str) + .unwrap_or(original); + + let elem_start = rebuilt.len(); + rebuilt.push_str(replacement); + let elem_end = rebuilt.len(); + + if let Some(remapped) = mapping.get(placeholder_key) { + elem.placeholder = Some(remapped.clone()); + } + elem.byte_range = (elem_start..elem_end).into(); + rebuilt_elements.push(elem); + cursor = end; + } + if let Some(segment) = text.get(cursor..) { + rebuilt.push_str(segment); + } + + UserMessage { + text: rebuilt, + local_images: remapped_images, + text_elements: rebuilt_elements, } } @@ -994,31 +1098,76 @@ impl ChatWidget { )); } - // If any messages were queued during the task, restore them into the composer. - if !self.queued_user_messages.is_empty() { - let queued_text = self - .queued_user_messages + if let Some(combined) = self.drain_queued_messages_for_restore() { + let combined_local_image_paths = combined + .local_images .iter() - .map(|m| m.text.clone()) - .collect::>() - .join("\n"); - let existing_text = self.bottom_pane.composer_text(); - let combined = if existing_text.is_empty() { - queued_text - } else if queued_text.is_empty() { - existing_text - } else { - format!("{queued_text}\n{existing_text}") - }; - self.bottom_pane.set_composer_text(combined); - // Clear the queue and update the status indicator list. - self.queued_user_messages.clear(); + .map(|img| img.path.clone()) + .collect(); + self.bottom_pane.set_composer_text( + combined.text, + combined.text_elements, + combined_local_image_paths, + ); self.refresh_queued_user_messages(); } self.request_redraw(); } + /// Merge queued drafts (plus the current composer state) into a single message for restore. + /// + /// Each queued draft numbers attachments from `[Image #1]`. When we concatenate drafts, we + /// must renumber placeholders in a stable order so the merged attachment list stays aligned + /// with the labels embedded in text. This helper drains the queue, remaps placeholders, and + /// fixes text element byte ranges as content is appended. Returns `None` when there is nothing + /// to restore. + fn drain_queued_messages_for_restore(&mut self) -> Option { + if self.queued_user_messages.is_empty() { + return None; + } + + let existing_message = UserMessage { + text: self.bottom_pane.composer_text(), + text_elements: self.bottom_pane.composer_text_elements(), + local_images: self.bottom_pane.composer_local_images(), + }; + + let mut to_merge: Vec = self.queued_user_messages.drain(..).collect(); + if !existing_message.text.is_empty() || !existing_message.local_images.is_empty() { + to_merge.push(existing_message); + } + + let mut combined = UserMessage { + text: String::new(), + text_elements: Vec::new(), + local_images: Vec::new(), + }; + let mut combined_offset = 0usize; + let mut next_image_label = 1usize; + + for (idx, message) in to_merge.into_iter().enumerate() { + if idx > 0 { + combined.text.push('\n'); + combined_offset += 1; + } + let message = remap_placeholders_for_message(message, &mut next_image_label); + let base = combined_offset; + combined.text.push_str(&message.text); + combined_offset += message.text.len(); + combined + .text_elements + .extend(message.text_elements.into_iter().map(|mut elem| { + elem.byte_range.start += base; + elem.byte_range.end += base; + elem + })); + combined.local_images.extend(message.local_images); + } + + Some(combined) + } + fn on_plan_update(&mut self, update: UpdatePlanArgs) { self.add_to_history(history_cell::new_plan_update(update)); } @@ -1629,8 +1778,7 @@ impl ChatWidget { config, frame_requester, app_event_tx, - initial_prompt, - initial_images, + initial_user_message, enhanced_keys_supported, auth_manager, models_manager, @@ -1673,13 +1821,11 @@ impl ChatWidget { active_cell_revision: 0, config, model, + collaboration_mode: CollaborationModeSelection::default(), auth_manager, models_manager, session_header: SessionHeader::new(model_for_header), - initial_user_message: create_initial_user_message( - initial_prompt.unwrap_or_default(), - initial_images, - ), + initial_user_message, token_info: None, rate_limit_snapshot: None, plan_type: None, @@ -1722,6 +1868,9 @@ impl ChatWidget { widget .bottom_pane .set_steer_enabled(widget.config.features.enabled(Feature::Steer)); + widget.bottom_pane.set_collaboration_modes_enabled( + widget.config.features.enabled(Feature::CollaborationModes), + ); widget } @@ -1736,8 +1885,7 @@ impl ChatWidget { config, frame_requester, app_event_tx, - initial_prompt, - initial_images, + initial_user_message, enhanced_keys_supported, auth_manager, models_manager, @@ -1772,13 +1920,11 @@ impl ChatWidget { active_cell_revision: 0, config, model: Some(header_model.clone()), + collaboration_mode: CollaborationModeSelection::default(), auth_manager, models_manager, session_header: SessionHeader::new(header_model), - initial_user_message: create_initial_user_message( - initial_prompt.unwrap_or_default(), - initial_images, - ), + initial_user_message, token_info: None, rate_limit_snapshot: None, plan_type: None, @@ -1821,6 +1967,9 @@ impl ChatWidget { widget .bottom_pane .set_steer_enabled(widget.config.features.enabled(Feature::Steer)); + widget.bottom_pane.set_collaboration_modes_enabled( + widget.config.features.enabled(Feature::CollaborationModes), + ); widget } @@ -1885,6 +2034,16 @@ impl ChatWidget { } match key_event { + KeyEvent { + code: KeyCode::BackTab, + kind: KeyEventKind::Press, + .. + } if self.collaboration_modes_enabled() + && !self.bottom_pane.is_task_running() + && self.bottom_pane.no_modal_or_popup_active() => + { + self.cycle_collaboration_mode(); + } KeyEvent { code: KeyCode::Up, modifiers: KeyModifiers::ALT, @@ -1893,46 +2052,64 @@ impl ChatWidget { } if !self.queued_user_messages.is_empty() => { // Prefer the most recently queued item. if let Some(user_message) = self.queued_user_messages.pop_back() { - self.bottom_pane.set_composer_text(user_message.text); + let local_image_paths = user_message + .local_images + .iter() + .map(|img| img.path.clone()) + .collect(); + self.bottom_pane.set_composer_text( + user_message.text, + user_message.text_elements, + local_image_paths, + ); self.refresh_queued_user_messages(); self.request_redraw(); } } - _ => { - match self.bottom_pane.handle_key_event(key_event) { - InputResult::Submitted(text) => { - // Enter always sends messages immediately (bypasses queue check) - // Clear any reasoning status header when submitting a new message + _ => match self.bottom_pane.handle_key_event(key_event) { + InputResult::Submitted { + text, + text_elements, + } => { + let user_message = UserMessage { + text, + local_images: self + .bottom_pane + .take_recent_submission_images_with_placeholders(), + text_elements, + }; + if self.is_session_configured() { + // Submitted is only emitted when steer is enabled (Enter sends immediately). + // Reset any reasoning header only when we are actually submitting a turn. self.reasoning_buffer.clear(); self.full_reasoning_buffer.clear(); self.set_status_header(String::from("Working")); - let user_message = UserMessage { - text, - image_paths: self.bottom_pane.take_recent_submission_images(), - }; - if !self.is_session_configured() { - self.queue_user_message(user_message); - } else { - self.submit_user_message(user_message); - } - } - InputResult::Queued(text) => { - // Tab queues the message if a task is running, otherwise submits immediately - let user_message = UserMessage { - text, - image_paths: self.bottom_pane.take_recent_submission_images(), - }; + self.submit_user_message(user_message); + } else { self.queue_user_message(user_message); } - InputResult::Command(cmd) => { - self.dispatch_command(cmd); - } - InputResult::CommandWithArgs(cmd, args) => { - self.dispatch_command_with_args(cmd, args); - } - InputResult::None => {} } - } + InputResult::Queued { + text, + text_elements, + } => { + let user_message = UserMessage { + text, + local_images: self + .bottom_pane + .take_recent_submission_images_with_placeholders(), + text_elements, + }; + self.queue_user_message(user_message); + } + InputResult::Command(cmd) => { + self.dispatch_command(cmd); + } + InputResult::CommandWithArgs(cmd, args) => { + self.dispatch_command_with_args(cmd, args); + } + InputResult::None => {} + }, } } @@ -2022,6 +2199,11 @@ impl ChatWidget { SlashCommand::Model => { self.open_model_popup(); } + SlashCommand::Collab => { + if self.collaboration_modes_enabled() { + self.cycle_collaboration_mode(); + } + } SlashCommand::Approvals => { self.open_approvals_popup(); } @@ -2178,6 +2360,16 @@ impl ChatWidget { let trimmed = args.trim(); match cmd { + SlashCommand::Collab if !trimmed.is_empty() => { + if let Some(selection) = collaboration_modes::parse_selection(trimmed) { + self.set_collaboration_mode(selection); + } else { + self.add_error_message(format!( + "Unknown collaboration mode '{trimmed}'. Try: plan, pair, execute." + )); + self.request_redraw(); + } + } SlashCommand::Review if !trimmed.is_empty() => { self.submit_op(Op::Review { review_request: ReviewRequest { @@ -2255,8 +2447,20 @@ impl ChatWidget { } fn submit_user_message(&mut self, user_message: UserMessage) { - let UserMessage { text, image_paths } = user_message; - if text.is_empty() && image_paths.is_empty() { + let Some(model) = self.current_model().or(self.config.model.as_deref()) else { + tracing::warn!("cannot submit user message before model is known; queueing"); + self.queued_user_messages.push_front(user_message); + self.refresh_queued_user_messages(); + return; + }; + let model = model.to_string(); + + let UserMessage { + text, + local_images, + text_elements, + } = user_message; + if text.is_empty() && local_images.is_empty() { return; } @@ -2280,15 +2484,16 @@ impl ChatWidget { return; } - for path in image_paths { - items.push(UserInput::LocalImage { path }); + for image in &local_images { + items.push(UserInput::LocalImage { + path: image.path.clone(), + }); } if !text.is_empty() { - // TODO: Thread text element ranges from the composer input. Empty keeps old behavior. items.push(UserInput::Text { text: text.clone(), - text_elements: Vec::new(), + text_elements: text_elements.clone(), }); } @@ -2302,14 +2507,29 @@ impl ChatWidget { } } - self.codex_op_tx - .send(Op::UserInput { - items, - final_output_json_schema: None, - }) - .unwrap_or_else(|e| { - tracing::error!("failed to send message: {e}"); - }); + let collaboration_mode = self.collaboration_modes_enabled().then(|| { + collaboration_modes::resolve_mode_or_fallback( + self.models_manager.as_ref(), + self.collaboration_mode, + model.as_str(), + self.config.model_reasoning_effort, + ) + }); + let op = Op::UserTurn { + items, + cwd: self.config.cwd.clone(), + approval_policy: self.config.approval_policy.value(), + sandbox_policy: self.config.sandbox_policy.get().clone(), + model, + effort: self.config.model_reasoning_effort, + summary: self.config.model_reasoning_summary, + final_output_json_schema: None, + collaboration_mode, + }; + + self.codex_op_tx.send(op).unwrap_or_else(|e| { + tracing::error!("failed to send message: {e}"); + }); // Persist the text to cross-session message history. if !text.is_empty() { @@ -2322,7 +2542,12 @@ impl ChatWidget { // Only show the text portion in conversation history. if !text.is_empty() { - self.add_to_history(history_cell::new_user_prompt(text)); + let local_image_paths = local_images.into_iter().map(|img| img.path).collect(); + self.add_to_history(history_cell::new_user_prompt( + text, + text_elements, + local_image_paths, + )); } self.needs_final_message_separator = false; @@ -2479,7 +2704,8 @@ impl ChatWidget { | EventMsg::ItemCompleted(_) | EventMsg::AgentMessageContentDelta(_) | EventMsg::ReasoningContentDelta(_) - | EventMsg::ReasoningRawContentDelta(_) => {} + | EventMsg::ReasoningRawContentDelta(_) + | EventMsg::RequestUserInput(_) => {} } } @@ -2537,10 +2763,16 @@ impl ChatWidget { } fn on_user_message_event(&mut self, event: UserMessageEvent) { - let message = event.message.trim(); - if !message.is_empty() { - self.add_to_history(history_cell::new_user_prompt(message.to_string())); + if !event.message.trim().is_empty() { + self.add_to_history(history_cell::new_user_prompt( + event.message, + event.text_elements, + event.local_images, + )); } + + // User messages reset separator state so the next agent response doesn't add a stray break. + self.needs_final_message_separator = false; } /// Exit the UI immediately without waiting for shutdown. @@ -2633,6 +2865,11 @@ impl ChatWidget { let total_usage = token_info .map(|ti| &ti.total_token_usage) .unwrap_or(&default_usage); + let collaboration_mode = if self.collaboration_modes_enabled() { + Some(self.collaboration_mode.label()) + } else { + None + }; self.add_to_history(crate::status::new_status_output( &self.config, self.auth_manager.as_ref(), @@ -2644,6 +2881,7 @@ impl ChatWidget { self.plan_type, Local::now(), self.model_display_name(), + collaboration_mode, )); } @@ -3893,6 +4131,8 @@ impl ChatWidget { } if feature == Feature::Steer { self.bottom_pane.set_steer_enabled(enabled); + } else if feature == Feature::CollaborationModes { + self.bottom_pane.set_collaboration_modes_enabled(enabled); } } @@ -3930,10 +4170,39 @@ impl ChatWidget { self.model = Some(model.to_string()); } + fn cycle_collaboration_mode(&mut self) { + if !self.collaboration_modes_enabled() { + return; + } + let next = self.collaboration_mode.next(); + self.set_collaboration_mode(next); + } + + /// Update the selected collaboration mode. + /// + /// When collaboration modes are enabled, the current selection is attached to *every* + /// submission as `Op::UserTurn { collaboration_mode: Some(...) }`. + fn set_collaboration_mode(&mut self, selection: CollaborationModeSelection) { + if !self.collaboration_modes_enabled() { + return; + } + const FLASH_DURATION: Duration = Duration::from_secs(1); + + self.collaboration_mode = selection; + + let flash = collaboration_modes::flash_line(selection); + self.bottom_pane.flash_footer_hint(flash, FLASH_DURATION); + self.request_redraw(); + } + fn current_model(&self) -> Option<&str> { self.model.as_deref() } + fn collaboration_modes_enabled(&self) -> bool { + self.config.features.enabled(Feature::CollaborationModes) + } + fn model_display_name(&self) -> &str { self.model.as_deref().unwrap_or(DEFAULT_MODEL_DISPLAY_NAME) } @@ -4126,8 +4395,14 @@ impl ChatWidget { } /// Replace the composer content with the provided text and reset cursor. - pub(crate) fn set_composer_text(&mut self, text: String) { - self.bottom_pane.set_composer_text(text); + pub(crate) fn set_composer_text( + &mut self, + text: String, + text_elements: Vec, + local_image_paths: Vec, + ) { + self.bottom_pane + .set_composer_text(text, text_elements, local_image_paths); } pub(crate) fn show_esc_backtrack_hint(&mut self) { diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 2bd73633aa..210c3574c2 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -8,6 +8,8 @@ use super::*; use crate::app_event::AppEvent; use crate::app_event::ExitMode; use crate::app_event_sender::AppEventSender; +use crate::bottom_pane::LocalImageAttachment; +use crate::history_cell::UserHistoryCell; use crate::test_backend::VT100Backend; use crate::tui::FrameRequester; use assert_matches::assert_matches; @@ -59,6 +61,7 @@ use codex_core::protocol::ViewImageToolCallEvent; use codex_core::protocol::WarningEvent; use codex_protocol::ThreadId; use codex_protocol::account::PlanType; +use codex_protocol::config_types::CollaborationMode; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ReasoningEffortPreset; use codex_protocol::parse_command::ParsedCommand; @@ -66,6 +69,8 @@ use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; use codex_protocol::plan_tool::UpdatePlanArgs; use codex_protocol::protocol::CodexErrorInfo; +use codex_protocol::user_input::TextElement; +use codex_protocol::user_input::UserInput; use codex_utils_absolute_path::AbsolutePathBuf; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -181,6 +186,364 @@ async fn resumed_initial_messages_render_history() { ); } +#[tokio::test] +async fn replayed_user_message_preserves_text_elements_and_local_images() { + let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await; + + let placeholder = "[Image #1]"; + let message = format!("{placeholder} replayed"); + let text_elements = vec![TextElement { + byte_range: (0..placeholder.len()).into(), + placeholder: Some(placeholder.to_string()), + }]; + let local_images = vec![PathBuf::from("/tmp/replay.png")]; + + let conversation_id = ThreadId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + forked_from_id: None, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![EventMsg::UserMessage(UserMessageEvent { + message: message.clone(), + images: None, + text_elements: text_elements.clone(), + local_images: local_images.clone(), + })]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + let mut user_cell = None; + while let Ok(ev) = rx.try_recv() { + if let AppEvent::InsertHistoryCell(cell) = ev + && let Some(cell) = cell.as_any().downcast_ref::() + { + user_cell = Some(( + cell.message.clone(), + cell.text_elements.clone(), + cell.local_image_paths.clone(), + )); + break; + } + } + + let (stored_message, stored_elements, stored_images) = + user_cell.expect("expected a replayed user history cell"); + assert_eq!(stored_message, message); + assert_eq!(stored_elements, text_elements); + assert_eq!(stored_images, local_images); +} + +#[tokio::test] +async fn submission_preserves_text_elements_and_local_images() { + let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(None).await; + + let conversation_id = ThreadId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + forked_from_id: None, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: None, + rollout_path: rollout_file.path().to_path_buf(), + }; + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + drain_insert_history(&mut rx); + + let placeholder = "[Image #1]"; + let text = format!("{placeholder} submit"); + let text_elements = vec![TextElement { + byte_range: (0..placeholder.len()).into(), + placeholder: Some(placeholder.to_string()), + }]; + let local_images = vec![PathBuf::from("/tmp/submitted.png")]; + + chat.bottom_pane + .set_composer_text(text.clone(), text_elements.clone(), local_images.clone()); + chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + let items = match next_submit_op(&mut op_rx) { + Op::UserTurn { items, .. } => items, + other => panic!("expected Op::UserTurn, got {other:?}"), + }; + assert_eq!(items.len(), 2); + assert_eq!( + items[0], + UserInput::LocalImage { + path: local_images[0].clone() + } + ); + assert_eq!( + items[1], + UserInput::Text { + text: text.clone(), + text_elements: text_elements.clone(), + } + ); + + let mut user_cell = None; + while let Ok(ev) = rx.try_recv() { + if let AppEvent::InsertHistoryCell(cell) = ev + && let Some(cell) = cell.as_any().downcast_ref::() + { + user_cell = Some(( + cell.message.clone(), + cell.text_elements.clone(), + cell.local_image_paths.clone(), + )); + break; + } + } + + let (stored_message, stored_elements, stored_images) = + user_cell.expect("expected submitted user history cell"); + assert_eq!(stored_message, text); + assert_eq!(stored_elements, text_elements); + assert_eq!(stored_images, local_images); +} + +#[tokio::test] +async fn interrupted_turn_restores_queued_messages_with_images_and_elements() { + let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await; + + let first_placeholder = "[Image #1]"; + let first_text = format!("{first_placeholder} first"); + let first_elements = vec![TextElement { + byte_range: (0..first_placeholder.len()).into(), + placeholder: Some(first_placeholder.to_string()), + }]; + let first_images = [PathBuf::from("/tmp/first.png")]; + + let second_placeholder = "[Image #1]"; + let second_text = format!("{second_placeholder} second"); + let second_elements = vec![TextElement { + byte_range: (0..second_placeholder.len()).into(), + placeholder: Some(second_placeholder.to_string()), + }]; + let second_images = [PathBuf::from("/tmp/second.png")]; + + let existing_placeholder = "[Image #1]"; + let existing_text = format!("{existing_placeholder} existing"); + let existing_elements = vec![TextElement { + byte_range: (0..existing_placeholder.len()).into(), + placeholder: Some(existing_placeholder.to_string()), + }]; + let existing_images = vec![PathBuf::from("/tmp/existing.png")]; + + chat.queued_user_messages.push_back(UserMessage { + text: first_text, + local_images: vec![LocalImageAttachment { + placeholder: first_placeholder.to_string(), + path: first_images[0].clone(), + }], + text_elements: first_elements, + }); + chat.queued_user_messages.push_back(UserMessage { + text: second_text, + local_images: vec![LocalImageAttachment { + placeholder: second_placeholder.to_string(), + path: second_images[0].clone(), + }], + text_elements: second_elements, + }); + chat.refresh_queued_user_messages(); + + chat.bottom_pane + .set_composer_text(existing_text, existing_elements, existing_images.clone()); + + // When interrupted, queued messages are merged into the composer; image placeholders + // must be renumbered to match the combined local image list. + chat.handle_codex_event(Event { + id: "interrupt".into(), + msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent { + reason: TurnAbortReason::Interrupted, + }), + }); + + let first = "[Image #1] first".to_string(); + let second = "[Image #2] second".to_string(); + let third = "[Image #3] existing".to_string(); + let expected_text = format!("{first}\n{second}\n{third}"); + assert_eq!(chat.bottom_pane.composer_text(), expected_text); + + let first_start = 0; + let second_start = first.len() + 1; + let third_start = second_start + second.len() + 1; + let expected_elements = vec![ + TextElement { + byte_range: (first_start..first_start + "[Image #1]".len()).into(), + placeholder: Some("[Image #1]".to_string()), + }, + TextElement { + byte_range: (second_start..second_start + "[Image #2]".len()).into(), + placeholder: Some("[Image #2]".to_string()), + }, + TextElement { + byte_range: (third_start..third_start + "[Image #3]".len()).into(), + placeholder: Some("[Image #3]".to_string()), + }, + ]; + assert_eq!(chat.bottom_pane.composer_text_elements(), expected_elements); + assert_eq!( + chat.bottom_pane.composer_local_image_paths(), + vec![ + first_images[0].clone(), + second_images[0].clone(), + existing_images[0].clone(), + ] + ); +} + +#[tokio::test] +async fn remap_placeholders_uses_attachment_labels() { + let placeholder_one = "[Image #1]"; + let placeholder_two = "[Image #2]"; + let text = format!("{placeholder_two} before {placeholder_one}"); + let elements = vec![ + TextElement { + byte_range: (0..placeholder_two.len()).into(), + placeholder: Some(placeholder_two.to_string()), + }, + TextElement { + byte_range: ("[Image #2] before ".len().."[Image #2] before [Image #1]".len()).into(), + placeholder: Some(placeholder_one.to_string()), + }, + ]; + + let attachments = vec![ + LocalImageAttachment { + placeholder: placeholder_one.to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: placeholder_two.to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ]; + let message = UserMessage { + text, + text_elements: elements, + local_images: attachments, + }; + let mut next_label = 3usize; + let remapped = remap_placeholders_for_message(message, &mut next_label); + + assert_eq!(remapped.text, "[Image #4] before [Image #3]"); + assert_eq!( + remapped.text_elements, + vec![ + TextElement { + byte_range: (0.."[Image #4]".len()).into(), + placeholder: Some("[Image #4]".to_string()), + }, + TextElement { + byte_range: ("[Image #4] before ".len().."[Image #4] before [Image #3]".len()) + .into(), + placeholder: Some("[Image #3]".to_string()), + }, + ] + ); + assert_eq!( + remapped.local_images, + vec![ + LocalImageAttachment { + placeholder: "[Image #3]".to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: "[Image #4]".to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ] + ); +} + +#[tokio::test] +async fn remap_placeholders_uses_byte_ranges_when_placeholder_missing() { + let placeholder_one = "[Image #1]"; + let placeholder_two = "[Image #2]"; + let text = format!("{placeholder_two} before {placeholder_one}"); + let elements = vec![ + TextElement { + byte_range: (0..placeholder_two.len()).into(), + placeholder: None, + }, + TextElement { + byte_range: ("[Image #2] before ".len().."[Image #2] before [Image #1]".len()).into(), + placeholder: None, + }, + ]; + + let attachments = vec![ + LocalImageAttachment { + placeholder: placeholder_one.to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: placeholder_two.to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ]; + let message = UserMessage { + text, + text_elements: elements, + local_images: attachments, + }; + let mut next_label = 3usize; + let remapped = remap_placeholders_for_message(message, &mut next_label); + + assert_eq!(remapped.text, "[Image #4] before [Image #3]"); + assert_eq!( + remapped.text_elements, + vec![ + TextElement { + byte_range: (0.."[Image #4]".len()).into(), + placeholder: Some("[Image #4]".to_string()), + }, + TextElement { + byte_range: ("[Image #4] before ".len().."[Image #4] before [Image #3]".len()) + .into(), + placeholder: Some("[Image #3]".to_string()), + }, + ] + ); + assert_eq!( + remapped.local_images, + vec![ + LocalImageAttachment { + placeholder: "[Image #3]".to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: "[Image #4]".to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ] + ); +} + /// Entering review mode uses the hint provided by the review request. #[tokio::test] async fn entered_review_mode_uses_request_hint() { @@ -351,8 +714,7 @@ async fn helpers_are_available_and_do_not_panic() { config: cfg, frame_requester: FrameRequester::test_dummy(), app_event_tx: tx, - initial_prompt: None, - initial_images: Vec::new(), + initial_user_message: None, enhanced_keys_supported: false, auth_manager, models_manager: thread_manager.get_models_manager(), @@ -404,6 +766,7 @@ async fn make_chatwidget_manual( active_cell_revision: 0, config: cfg, model: Some(resolved_model.clone()), + collaboration_mode: CollaborationModeSelection::default(), auth_manager: auth_manager.clone(), models_manager: Arc::new(ModelsManager::new(codex_home, auth_manager)), session_header: SessionHeader::new(resolved_model), @@ -449,6 +812,19 @@ async fn make_chatwidget_manual( (widget, rx, op_rx) } +// ChatWidget may emit other `Op`s (e.g. history/logging updates) on the same channel; this helper +// filters until we see a submission op. +fn next_submit_op(op_rx: &mut tokio::sync::mpsc::UnboundedReceiver) -> Op { + loop { + match op_rx.try_recv() { + Ok(op @ Op::UserTurn { .. }) => return op, + Ok(_) => continue, + Err(TryRecvError::Empty) => panic!("expected a submit op but queue was empty"), + Err(TryRecvError::Disconnected) => panic!("expected submit op but channel closed"), + } + } +} + fn set_chatgpt_auth(chat: &mut ChatWidget) { chat.auth_manager = AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing()); @@ -1069,7 +1445,8 @@ async fn enqueueing_history_prompt_multiple_times_is_stable() { chat.thread_id = Some(ThreadId::new()); // Submit an initial prompt to seed history. - chat.bottom_pane.set_composer_text("repeat me".to_string()); + chat.bottom_pane + .set_composer_text("repeat me".to_string(), Vec::new(), Vec::new()); chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); // Simulate an active task so further submissions are queued. @@ -1103,7 +1480,7 @@ async fn streaming_final_answer_keeps_task_running_state() { assert!(chat.bottom_pane.status_widget().is_none()); chat.bottom_pane - .set_composer_text("queued submission".to_string()); + .set_composer_text("queued submission".to_string(), Vec::new(), Vec::new()); chat.handle_key_event(KeyEvent::new(KeyCode::Tab, KeyModifiers::NONE)); assert_eq!(chat.queued_user_messages.len(), 1); @@ -1511,6 +1888,107 @@ async fn slash_init_skips_when_project_doc_exists() { ); } +#[test] +fn parse_collaboration_mode_selection_accepts_common_aliases() { + assert_eq!( + collaboration_modes::parse_selection("plan"), + Some(CollaborationModeSelection::Plan) + ); + assert_eq!( + collaboration_modes::parse_selection("PAIR"), + Some(CollaborationModeSelection::PairProgramming) + ); + assert_eq!( + collaboration_modes::parse_selection("pair_programming"), + Some(CollaborationModeSelection::PairProgramming) + ); + assert_eq!( + collaboration_modes::parse_selection("pp"), + Some(CollaborationModeSelection::PairProgramming) + ); + assert_eq!( + collaboration_modes::parse_selection(" exec "), + Some(CollaborationModeSelection::Execute) + ); + assert_eq!( + collaboration_modes::parse_selection("execute"), + Some(CollaborationModeSelection::Execute) + ); + assert_eq!(collaboration_modes::parse_selection("unknown"), None); +} + +#[tokio::test] +async fn collab_mode_shift_tab_cycles_only_when_enabled_and_idle() { + let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await; + chat.set_feature_enabled(Feature::CollaborationModes, false); + + let initial = chat.collaboration_mode; + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, initial); + + chat.set_feature_enabled(Feature::CollaborationModes, true); + + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Execute); + + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Plan); + + chat.on_task_started(); + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Plan); +} + +#[tokio::test] +async fn collab_slash_command_sets_mode_and_next_submit_sends_user_turn() { + let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None).await; + chat.thread_id = Some(ThreadId::new()); + chat.set_feature_enabled(Feature::CollaborationModes, true); + + chat.dispatch_command_with_args(SlashCommand::Collab, "plan".to_string()); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Plan); + + chat.bottom_pane + .set_composer_text("hello".to_string(), Vec::new(), Vec::new()); + chat.handle_key_event(KeyEvent::from(KeyCode::Enter)); + match next_submit_op(&mut op_rx) { + Op::UserTurn { + collaboration_mode: Some(CollaborationMode::Plan(_)), + .. + } => {} + other => panic!("expected Op::UserTurn with plan collab mode, got {other:?}"), + } + + chat.bottom_pane + .set_composer_text("follow up".to_string(), Vec::new(), Vec::new()); + chat.handle_key_event(KeyEvent::from(KeyCode::Enter)); + match next_submit_op(&mut op_rx) { + Op::UserTurn { + collaboration_mode: Some(CollaborationMode::Plan(_)), + .. + } => {} + other => panic!("expected Op::UserTurn with plan collab mode, got {other:?}"), + } +} + +#[tokio::test] +async fn collab_mode_defaults_to_pair_programming_when_enabled() { + let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None).await; + chat.thread_id = Some(ThreadId::new()); + chat.set_feature_enabled(Feature::CollaborationModes, true); + + chat.bottom_pane + .set_composer_text("hello".to_string(), Vec::new(), Vec::new()); + chat.handle_key_event(KeyEvent::from(KeyCode::Enter)); + match next_submit_op(&mut op_rx) { + Op::UserTurn { + collaboration_mode: Some(CollaborationMode::PairProgramming(_)), + .. + } => {} + other => panic!("expected Op::UserTurn with pair programming collab mode, got {other:?}"), + } +} + #[tokio::test] async fn slash_quit_requests_exit() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; @@ -2755,7 +3233,7 @@ async fn interrupt_prepends_queued_messages_before_existing_composer_text() { chat.bottom_pane.set_task_running(true); chat.bottom_pane - .set_composer_text("current draft".to_string()); + .set_composer_text("current draft".to_string(), Vec::new(), Vec::new()); chat.queued_user_messages .push_back(UserMessage::from("first queued".to_string())); @@ -3781,8 +4259,11 @@ async fn chatwidget_exec_and_status_layout_vt100_snapshot() { delta: "**Investigating rendering code**".into(), }), }); - chat.bottom_pane - .set_composer_text("Summarize recent commits".to_string()); + chat.bottom_pane.set_composer_text( + "Summarize recent commits".to_string(), + Vec::new(), + Vec::new(), + ); let width: u16 = 80; let ui_height: u16 = chat.desired_height(width); diff --git a/codex-rs/tui/src/clipboard_paste.rs b/codex-rs/tui/src/clipboard_paste.rs index 5863c728b0..4d28b365fe 100644 --- a/codex-rs/tui/src/clipboard_paste.rs +++ b/codex-rs/tui/src/clipboard_paste.rs @@ -244,9 +244,14 @@ pub fn paste_image_to_temp_png() -> Result<(PathBuf, PastedImageInfo), PasteImag /// - shell-escaped single paths (via `shlex`) pub fn normalize_pasted_path(pasted: &str) -> Option { let pasted = pasted.trim(); + let unquoted = pasted + .strip_prefix('"') + .and_then(|s| s.strip_suffix('"')) + .or_else(|| pasted.strip_prefix('\'').and_then(|s| s.strip_suffix('\''))) + .unwrap_or(pasted); // file:// URL → filesystem path - if let Ok(url) = url::Url::parse(pasted) + if let Ok(url) = url::Url::parse(unquoted) && url.scheme() == "file" { return url.to_file_path().ok(); @@ -258,38 +263,18 @@ pub fn normalize_pasted_path(pasted: &str) -> Option { // Detect unquoted Windows paths and bypass POSIX shlex which // treats backslashes as escapes (e.g., C:\Users\Alice\file.png). // Also handles UNC paths (\\server\share\path). - let looks_like_windows_path = { - // Drive letter path: C:\ or C:/ - let drive = pasted - .chars() - .next() - .map(|c| c.is_ascii_alphabetic()) - .unwrap_or(false) - && pasted.get(1..2) == Some(":") - && pasted - .get(2..3) - .map(|s| s == "\\" || s == "/") - .unwrap_or(false); - // UNC path: \\server\share - let unc = pasted.starts_with("\\\\"); - drive || unc - }; - if looks_like_windows_path { - #[cfg(target_os = "linux")] - { - if is_probably_wsl() - && let Some(converted) = convert_windows_path_to_wsl(pasted) - { - return Some(converted); - } - } - return Some(PathBuf::from(pasted)); + if let Some(path) = normalize_windows_path(unquoted) { + return Some(path); } // shell-escaped single path → unescaped let parts: Vec = shlex::Shlex::new(pasted).collect(); if parts.len() == 1 { - return parts.into_iter().next().map(PathBuf::from); + let part = parts.into_iter().next()?; + if let Some(path) = normalize_windows_path(&part) { + return Some(path); + } + return Some(PathBuf::from(part)); } None @@ -339,6 +324,36 @@ fn convert_windows_path_to_wsl(input: &str) -> Option { Some(result) } +fn normalize_windows_path(input: &str) -> Option { + // Drive letter path: C:\ or C:/ + let drive = input + .chars() + .next() + .map(|c| c.is_ascii_alphabetic()) + .unwrap_or(false) + && input.get(1..2) == Some(":") + && input + .get(2..3) + .map(|s| s == "\\" || s == "/") + .unwrap_or(false); + // UNC path: \\server\share + let unc = input.starts_with("\\\\"); + if !drive && !unc { + return None; + } + + #[cfg(target_os = "linux")] + { + if is_probably_wsl() + && let Some(converted) = convert_windows_path_to_wsl(input) + { + return Some(converted); + } + } + + Some(PathBuf::from(input)) +} + /// Infer an image format for the provided path based on its extension. pub fn pasted_image_format(path: &Path) -> EncodedImageFormat { match path @@ -438,9 +453,39 @@ mod pasted_paths_tests { #[test] fn normalize_single_quoted_windows_path() { let input = r"'C:\\Users\\Alice\\My File.jpeg'"; + let unquoted = r"C:\\Users\\Alice\\My File.jpeg"; let result = normalize_pasted_path(input).expect("should trim single quotes on windows path"); - assert_eq!(result, PathBuf::from(r"C:\\Users\\Alice\\My File.jpeg")); + #[cfg(target_os = "linux")] + let expected = if is_probably_wsl() + && let Some(converted) = convert_windows_path_to_wsl(unquoted) + { + converted + } else { + PathBuf::from(unquoted) + }; + #[cfg(not(target_os = "linux"))] + let expected = PathBuf::from(unquoted); + assert_eq!(result, expected); + } + + #[test] + fn normalize_double_quoted_windows_path() { + let input = r#""C:\\Users\\Alice\\My File.jpeg""#; + let unquoted = r"C:\\Users\\Alice\\My File.jpeg"; + let result = + normalize_pasted_path(input).expect("should trim double quotes on windows path"); + #[cfg(target_os = "linux")] + let expected = if is_probably_wsl() + && let Some(converted) = convert_windows_path_to_wsl(unquoted) + { + converted + } else { + PathBuf::from(unquoted) + }; + #[cfg(not(target_os = "linux"))] + let expected = PathBuf::from(unquoted); + assert_eq!(result, expected); } #[test] diff --git a/codex-rs/tui/src/collaboration_modes.rs b/codex-rs/tui/src/collaboration_modes.rs new file mode 100644 index 0000000000..bdd1d8b63c --- /dev/null +++ b/codex-rs/tui/src/collaboration_modes.rs @@ -0,0 +1,135 @@ +//! Collaboration mode selection + rendering helpers for the TUI. +//! +//! This module is intentionally UI-focused: +//! - It owns the user-facing set of selectable collaboration modes and how they cycle. +//! - It parses `/collab ` arguments into a selection. +//! - It resolves a `Selection` to a concrete `codex_protocol::config_types::CollaborationMode` by +//! picking from the `ModelsManager` builtin collaboration presets. +//! - It builds the small footer "flash" line shown after changing modes. +//! +//! The `ChatWidget` owns the session state and decides *when* selection/mode changes are allowed +//! (feature flag, task running, modals open, etc.). This module just provides the building blocks. + +use crate::key_hint; +use codex_core::models_manager::manager::ModelsManager; +use codex_protocol::config_types::CollaborationMode; +use codex_protocol::config_types::Settings; +use codex_protocol::openai_models::ReasoningEffort; +use crossterm::event::KeyCode; +use ratatui::style::Stylize; +use ratatui::text::Line; + +/// The user-facing collaboration mode choices supported by the TUI. +/// +/// This is distinct from `CollaborationMode`: it represents a stable UI selection and the cycling +/// order, while `CollaborationMode` can carry nested settings/prompt configuration. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) enum Selection { + Plan, + #[default] + PairProgramming, + Execute, +} + +impl Selection { + /// Cycle to the next selection. + /// + /// The TUI cycles through a small, fixed set of presets. + pub(crate) fn next(self) -> Self { + match self { + Self::Plan => Self::PairProgramming, + Self::PairProgramming => Self::Execute, + Self::Execute => Self::Plan, + } + } + + /// User-facing label used in UI surfaces like `/status` and the footer flash. + pub(crate) fn label(self) -> &'static str { + match self { + Self::Plan => "Plan", + Self::PairProgramming => "Pair Programming", + Self::Execute => "Execute", + } + } +} + +/// Parse a user argument (e.g. `/collab plan`, `/collab pair_programming`) into a selection. +/// +/// The parser is forgiving: it strips whitespace, `-`, and `_`, and matches case-insensitively. +pub(crate) fn parse_selection(input: &str) -> Option { + let normalized: String = input + .chars() + .filter(|c| !c.is_ascii_whitespace() && *c != '-' && *c != '_') + .flat_map(char::to_lowercase) + .collect(); + + match normalized.as_str() { + "plan" => Some(Selection::Plan), + "pair" | "pairprogramming" | "pp" => Some(Selection::PairProgramming), + "execute" | "exec" => Some(Selection::Execute), + _ => None, + } +} + +/// Resolve a selection to a concrete collaboration mode preset. +/// +/// `ModelsManager::list_collaboration_modes()` is expected to return a builtin set of presets; this +/// function selects the first preset of the desired variant. +pub(crate) fn resolve_mode( + models_manager: &ModelsManager, + selection: Selection, +) -> Option { + match selection { + Selection::Plan => models_manager + .list_collaboration_modes() + .into_iter() + .find(|mode| matches!(mode, CollaborationMode::Plan(_))), + Selection::PairProgramming => models_manager + .list_collaboration_modes() + .into_iter() + .find(|mode| matches!(mode, CollaborationMode::PairProgramming(_))), + Selection::Execute => models_manager + .list_collaboration_modes() + .into_iter() + .find(|mode| matches!(mode, CollaborationMode::Execute(_))), + } +} + +/// Resolve a selection to a concrete collaboration mode preset, falling back to a synthesized mode +/// when the desired preset is unavailable. +/// +/// This keeps the TUI behavior stable when collaboration presets are missing (for example, when +/// running in offline/unit-test contexts): if the feature flag is enabled, every submission carries +/// an explicit collaboration mode so core doesn't fall back to `Custom`. +pub(crate) fn resolve_mode_or_fallback( + models_manager: &ModelsManager, + selection: Selection, + fallback_model: &str, + fallback_effort: Option, +) -> CollaborationMode { + resolve_mode(models_manager, selection).unwrap_or_else(|| { + let settings = Settings { + model: fallback_model.to_string(), + reasoning_effort: fallback_effort, + developer_instructions: None, + }; + + match selection { + Selection::Plan => CollaborationMode::Plan(settings), + Selection::PairProgramming => CollaborationMode::PairProgramming(settings), + Selection::Execute => CollaborationMode::Execute(settings), + } + }) +} + +/// Build a 1-line footer "flash" that is shown after switching modes. +/// +/// The `ChatWidget` controls when to show this and how long it should remain visible. +pub(crate) fn flash_line(selection: Selection) -> Line<'static> { + Line::from(vec![ + selection.label().bold(), + " (".dim(), + key_hint::shift(KeyCode::Tab).into(), + " to change mode)".dim(), + ]) +} diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index c060d20cd9..fd09fe89dc 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -47,6 +47,7 @@ use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; use codex_protocol::plan_tool::UpdatePlanArgs; +use codex_protocol::user_input::TextElement; use image::DynamicImage; use image::ImageReader; use mcp_types::EmbeddedResourceResource; @@ -54,6 +55,7 @@ use mcp_types::Resource; use mcp_types::ResourceLink; use mcp_types::ResourceTemplate; use ratatui::prelude::*; +use ratatui::style::Color; use ratatui::style::Modifier; use ratatui::style::Style; use ratatui::style::Styled; @@ -158,6 +160,75 @@ impl dyn HistoryCell { #[derive(Debug)] pub(crate) struct UserHistoryCell { pub message: String, + pub text_elements: Vec, + #[allow(dead_code)] + pub local_image_paths: Vec, +} + +/// Build logical lines for a user message with styled text elements. +/// +/// This preserves explicit newlines while interleaving element spans and skips +/// malformed byte ranges instead of panicking during history rendering. +fn build_user_message_lines_with_elements( + message: &str, + elements: &[TextElement], + style: Style, + element_style: Style, +) -> Vec> { + let mut elements = elements.to_vec(); + elements.sort_by_key(|e| e.byte_range.start); + let mut offset = 0usize; + let mut raw_lines: Vec> = Vec::new(); + for line_text in message.split('\n') { + let line_start = offset; + let line_end = line_start + line_text.len(); + let mut spans: Vec> = Vec::new(); + // Track how much of the line we've emitted to interleave plain and styled spans. + let mut cursor = line_start; + for elem in &elements { + let start = elem.byte_range.start.max(line_start); + let end = elem.byte_range.end.min(line_end); + if start >= end { + continue; + } + let rel_start = start - line_start; + let rel_end = end - line_start; + // Guard against malformed UTF-8 byte ranges from upstream data; skip + // invalid elements rather than panicking while rendering history. + if !line_text.is_char_boundary(rel_start) || !line_text.is_char_boundary(rel_end) { + continue; + } + let rel_cursor = cursor - line_start; + if cursor < start + && line_text.is_char_boundary(rel_cursor) + && let Some(segment) = line_text.get(rel_cursor..rel_start) + { + spans.push(Span::from(segment.to_string())); + } + if let Some(segment) = line_text.get(rel_start..rel_end) { + spans.push(Span::styled(segment.to_string(), element_style)); + cursor = end; + } + } + let rel_cursor = cursor - line_start; + if cursor < line_end + && line_text.is_char_boundary(rel_cursor) + && let Some(segment) = line_text.get(rel_cursor..) + { + spans.push(Span::from(segment.to_string())); + } + let line = if spans.is_empty() { + Line::from(line_text.to_string()).style(style) + } else { + Line::from(spans).style(style) + }; + raw_lines.push(line); + // Split on '\n' so any '\r' stays in the line; advancing by 1 accounts + // for the separator byte. + offset = line_end + 1; + } + + raw_lines } impl HistoryCell for UserHistoryCell { @@ -171,13 +242,28 @@ impl HistoryCell for UserHistoryCell { .max(1); let style = user_message_style(); + let element_style = style.fg(Color::Cyan); - let wrapped = word_wrap_lines( - self.message.lines().map(|l| Line::from(l).style(style)), - // Wrap algorithm matches textarea.rs. - RtOptions::new(usize::from(wrap_width)) - .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), - ); + let wrapped = if self.text_elements.is_empty() { + word_wrap_lines( + self.message.split('\n').map(|l| Line::from(l).style(style)), + // Wrap algorithm matches textarea.rs. + RtOptions::new(usize::from(wrap_width)) + .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), + ) + } else { + let raw_lines = build_user_message_lines_with_elements( + &self.message, + &self.text_elements, + style, + element_style, + ); + word_wrap_lines( + raw_lines, + RtOptions::new(usize::from(wrap_width)) + .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), + ) + }; lines.push(Line::from("").style(style)); lines.extend(prefix_lines(wrapped, "› ".bold().dim(), " ".into())); @@ -886,8 +972,16 @@ pub(crate) fn new_session_info( SessionInfoCell(CompositeHistoryCell { parts }) } -pub(crate) fn new_user_prompt(message: String) -> UserHistoryCell { - UserHistoryCell { message } +pub(crate) fn new_user_prompt( + message: String, + text_elements: Vec, + local_image_paths: Vec, +) -> UserHistoryCell { + UserHistoryCell { + message, + text_elements, + local_image_paths, + } } #[derive(Debug)] @@ -1331,7 +1425,8 @@ pub(crate) fn empty_mcp_output() -> PlainHistoryCell { " • No MCP servers configured.".italic().into(), Line::from(vec![ " See the ".into(), - "\u{1b}]8;;https://github.com/openai/codex/blob/main/docs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}".underlined(), + "\u{1b}]8;;https://developers.openai.com/codex/mcp\u{7}MCP docs\u{1b}]8;;\u{7}" + .underlined(), " to configure them.".into(), ]) .style(Style::default().add_modifier(Modifier::DIM)), @@ -2581,6 +2676,8 @@ mod tests { let msg = "one two three four five six seven"; let cell = UserHistoryCell { message: msg.to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }; // Small width to force wrapping more clearly. Effective wrap width is width-2 due to the ▌ prefix and trailing space. diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 880ff19322..e57497505a 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -47,6 +47,7 @@ mod chatwidget; mod cli; mod clipboard_paste; mod collab; +mod collaboration_modes; mod color; pub mod custom_terminal; mod diff_render; diff --git a/codex-rs/tui/src/public_widgets/composer_input.rs b/codex-rs/tui/src/public_widgets/composer_input.rs index 2a80c087ee..46a7e72bcf 100644 --- a/codex-rs/tui/src/public_widgets/composer_input.rs +++ b/codex-rs/tui/src/public_widgets/composer_input.rs @@ -48,13 +48,14 @@ impl ComposerInput { /// Clear the input text. pub fn clear(&mut self) { - self.inner.set_text_content(String::new()); + self.inner + .set_text_content(String::new(), Vec::new(), Vec::new()); } /// Feed a key event into the composer and return a high-level action. pub fn input(&mut self, key: KeyEvent) -> ComposerAction { let action = match self.inner.handle_key_event(key).0 { - InputResult::Submitted(text) => ComposerAction::Submitted(text), + InputResult::Submitted { text, .. } => ComposerAction::Submitted(text), _ => ComposerAction::None, }; self.drain_app_events(); diff --git a/codex-rs/tui/src/slash_command.rs b/codex-rs/tui/src/slash_command.rs index d7cbd7fc1d..ab63d6c7c8 100644 --- a/codex-rs/tui/src/slash_command.rs +++ b/codex-rs/tui/src/slash_command.rs @@ -24,6 +24,7 @@ pub enum SlashCommand { Fork, Init, Compact, + Collab, // Undo, Diff, Mention, @@ -57,6 +58,7 @@ impl SlashCommand { SlashCommand::Status => "show current session configuration and token usage", SlashCommand::Ps => "list background terminals", SlashCommand::Model => "choose what model and reasoning effort to use", + SlashCommand::Collab => "change collaboration mode (experimental)", SlashCommand::Approvals => "choose what Codex can do without approval", SlashCommand::ElevateSandbox => "set up elevated agent sandbox", SlashCommand::Experimental => "toggle beta features", @@ -99,6 +101,7 @@ impl SlashCommand { | SlashCommand::Exit => true, SlashCommand::Rollout => true, SlashCommand::TestApproval => true, + SlashCommand::Collab => true, } } diff --git a/codex-rs/tui/src/status/card.rs b/codex-rs/tui/src/status/card.rs index b860b03941..1f903c26b7 100644 --- a/codex-rs/tui/src/status/card.rs +++ b/codex-rs/tui/src/status/card.rs @@ -63,6 +63,7 @@ struct StatusHistoryCell { approval: String, sandbox: String, agents_summary: String, + collaboration_mode: Option, model_provider: Option, account: Option, session_id: Option, @@ -83,6 +84,7 @@ pub(crate) fn new_status_output( plan_type: Option, now: DateTime, model_name: &str, + collaboration_mode: Option<&str>, ) -> CompositeHistoryCell { let command = PlainHistoryCell::new(vec!["/status".magenta().into()]); let card = StatusHistoryCell::new( @@ -96,6 +98,7 @@ pub(crate) fn new_status_output( plan_type, now, model_name, + collaboration_mode, ); CompositeHistoryCell::new(vec![Box::new(command), Box::new(card)]) @@ -114,6 +117,7 @@ impl StatusHistoryCell { plan_type: Option, now: DateTime, model_name: &str, + collaboration_mode: Option<&str>, ) -> Self { let config_entries = create_config_summary_entries(config, model_name); let (model_name, model_details) = compose_model_display(model_name, &config_entries); @@ -165,6 +169,7 @@ impl StatusHistoryCell { approval, sandbox, agents_summary, + collaboration_mode: collaboration_mode.map(ToString::to_string), model_provider, account, session_id, @@ -360,6 +365,9 @@ impl HistoryCell for StatusHistoryCell { if self.session_id.is_some() && self.forked_from.is_some() { push_label(&mut labels, &mut seen, "Forked from"); } + if self.collaboration_mode.is_some() { + push_label(&mut labels, &mut seen, "Collaboration mode"); + } push_label(&mut labels, &mut seen, "Token usage"); if self.token_usage.context_window.is_some() { push_label(&mut labels, &mut seen, "Context window"); @@ -409,6 +417,10 @@ impl HistoryCell for StatusHistoryCell { lines.push(formatter.line("Account", vec![Span::from(account_value)])); } + if let Some(collab_mode) = self.collaboration_mode.as_ref() { + lines.push(formatter.line("Collaboration mode", vec![Span::from(collab_mode.clone())])); + } + if let Some(session) = self.session_id.as_ref() { lines.push(formatter.line("Session", vec![Span::from(session.clone())])); } diff --git a/codex-rs/tui/src/status/tests.rs b/codex-rs/tui/src/status/tests.rs index b01a139522..bc421da424 100644 --- a/codex-rs/tui/src/status/tests.rs +++ b/codex-rs/tui/src/status/tests.rs @@ -152,6 +152,7 @@ async fn status_snapshot_includes_reasoning_details() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -203,6 +204,7 @@ async fn status_snapshot_includes_forked_from() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -260,6 +262,7 @@ async fn status_snapshot_includes_monthly_limit() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -305,6 +308,7 @@ async fn status_snapshot_shows_unlimited_credits() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -349,6 +353,7 @@ async fn status_snapshot_shows_positive_credits() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -393,6 +398,7 @@ async fn status_snapshot_hides_zero_credits() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -435,6 +441,7 @@ async fn status_snapshot_hides_when_has_no_credits_flag() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -477,6 +484,7 @@ async fn status_card_token_usage_excludes_cached_tokens() { None, now, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); @@ -534,6 +542,7 @@ async fn status_snapshot_truncates_in_narrow_terminal() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(70)); if cfg!(windows) { @@ -580,6 +589,7 @@ async fn status_snapshot_shows_missing_limits_message() { None, now, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -644,6 +654,7 @@ async fn status_snapshot_includes_credits_and_limits() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -696,6 +707,7 @@ async fn status_snapshot_shows_empty_limits_message() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -757,6 +769,7 @@ async fn status_snapshot_shows_stale_limits_message() { None, now, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -822,6 +835,7 @@ async fn status_snapshot_cached_limits_hide_credits_without_flag() { None, now, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -877,6 +891,7 @@ async fn status_context_window_uses_last_usage() { None, now, &model_slug, + None, ); let rendered_lines = render_lines(&composite.display_lines(80)); let context_line = rendered_lines diff --git a/codex-rs/tui/src/style.rs b/codex-rs/tui/src/style.rs index e50d59791e..26a71bd897 100644 --- a/codex-rs/tui/src/style.rs +++ b/codex-rs/tui/src/style.rs @@ -19,10 +19,10 @@ pub fn user_message_style_for(terminal_bg: Option<(u8, u8, u8)>) -> Style { #[allow(clippy::disallowed_methods)] pub fn user_message_bg(terminal_bg: (u8, u8, u8)) -> Color { - let top = if is_light(terminal_bg) { - (0, 0, 0) + let (top, alpha) = if is_light(terminal_bg) { + ((0, 0, 0), 0.04) } else { - (255, 255, 255) + ((255, 255, 255), 0.12) }; - best_color(blend(top, terminal_bg, 0.1)) + best_color(blend(top, terminal_bg, alpha)) } diff --git a/codex-rs/tui2/src/app.rs b/codex-rs/tui2/src/app.rs index 6cc764c4a2..99fc2e188e 100644 --- a/codex-rs/tui2/src/app.rs +++ b/codex-rs/tui2/src/app.rs @@ -423,6 +423,26 @@ pub(crate) struct App { skip_world_writable_scan_once: bool, } impl App { + pub fn chatwidget_init_for_forked_or_resumed_thread( + &self, + tui: &mut tui::Tui, + cfg: codex_core::config::Config, + ) -> crate::chatwidget::ChatWidgetInit { + crate::chatwidget::ChatWidgetInit { + config: cfg, + frame_requester: tui.frame_requester(), + app_event_tx: self.app_event_tx.clone(), + // Fork/resume bootstraps here don't carry any prefilled message content. + initial_user_message: None, + enhanced_keys_supported: self.enhanced_keys_supported, + auth_manager: self.auth_manager.clone(), + models_manager: self.server.get_models_manager(), + feedback: self.feedback.clone(), + is_first_run: false, + model: Some(self.current_model.clone()), + } + } + async fn shutdown_current_conversation(&mut self) { if let Some(conversation_id) = self.chat_widget.conversation_id() { // Clear any in-flight rollback guard when switching conversations. @@ -486,8 +506,12 @@ impl App { config: config.clone(), frame_requester: tui.frame_requester(), app_event_tx: app_event_tx.clone(), - initial_prompt: initial_prompt.clone(), - initial_images: initial_images.clone(), + initial_user_message: crate::chatwidget::create_initial_user_message( + initial_prompt.clone(), + initial_images.clone(), + // CLI prompt args are plain strings, so they don't provide element ranges. + Vec::new(), + ), enhanced_keys_supported, auth_manager: auth_manager.clone(), models_manager: thread_manager.get_models_manager(), @@ -509,8 +533,12 @@ impl App { config: config.clone(), frame_requester: tui.frame_requester(), app_event_tx: app_event_tx.clone(), - initial_prompt: initial_prompt.clone(), - initial_images: initial_images.clone(), + initial_user_message: crate::chatwidget::create_initial_user_message( + initial_prompt.clone(), + initial_images.clone(), + // CLI prompt args are plain strings, so they don't provide element ranges. + Vec::new(), + ), enhanced_keys_supported, auth_manager: auth_manager.clone(), models_manager: thread_manager.get_models_manager(), @@ -532,8 +560,12 @@ impl App { config: config.clone(), frame_requester: tui.frame_requester(), app_event_tx: app_event_tx.clone(), - initial_prompt: initial_prompt.clone(), - initial_images: initial_images.clone(), + initial_user_message: crate::chatwidget::create_initial_user_message( + initial_prompt.clone(), + initial_images.clone(), + // CLI prompt args are plain strings, so they don't provide element ranges. + Vec::new(), + ), enhanced_keys_supported, auth_manager: auth_manager.clone(), models_manager: thread_manager.get_models_manager(), @@ -1447,12 +1479,15 @@ impl App { self.chat_widget.conversation_id(), ); self.shutdown_current_conversation().await; + if let Err(err) = self.server.remove_and_close_all_threads().await { + tracing::warn!(error = %err, "failed to close all threads"); + } let init = crate::chatwidget::ChatWidgetInit { config: self.config.clone(), frame_requester: tui.frame_requester(), app_event_tx: self.app_event_tx.clone(), - initial_prompt: None, - initial_images: Vec::new(), + // New sessions start without prefilled message content. + initial_user_message: None, enhanced_keys_supported: self.enhanced_keys_supported, auth_manager: self.auth_manager.clone(), models_manager: self.server.get_models_manager(), @@ -1496,19 +1531,10 @@ impl App { { Ok(resumed) => { self.shutdown_current_conversation().await; - let init = crate::chatwidget::ChatWidgetInit { - config: self.config.clone(), - frame_requester: tui.frame_requester(), - app_event_tx: self.app_event_tx.clone(), - initial_prompt: None, - initial_images: Vec::new(), - enhanced_keys_supported: self.enhanced_keys_supported, - auth_manager: self.auth_manager.clone(), - models_manager: self.server.get_models_manager(), - feedback: self.feedback.clone(), - is_first_run: false, - model: Some(self.current_model.clone()), - }; + let init = self.chatwidget_init_for_forked_or_resumed_thread( + tui, + self.config.clone(), + ); self.chat_widget = ChatWidget::new_from_existing( init, resumed.thread, @@ -1556,19 +1582,10 @@ impl App { { Ok(forked) => { self.shutdown_current_conversation().await; - let init = crate::chatwidget::ChatWidgetInit { - config: self.config.clone(), - frame_requester: tui.frame_requester(), - app_event_tx: self.app_event_tx.clone(), - initial_prompt: None, - initial_images: Vec::new(), - enhanced_keys_supported: self.enhanced_keys_supported, - auth_manager: self.auth_manager.clone(), - models_manager: self.server.get_models_manager(), - feedback: self.feedback.clone(), - is_first_run: false, - model: Some(self.current_model.clone()), - }; + let init = self.chatwidget_init_for_forked_or_resumed_thread( + tui, + self.config.clone(), + ); self.chat_widget = ChatWidget::new_from_existing( init, forked.thread, @@ -1610,6 +1627,8 @@ impl App { self.transcript_cells.push(cell.clone()); if self.overlay.is_some() { self.deferred_history_cells.push(cell); + } else { + tui.frame_requester().schedule_frame(); } } AppEvent::StartCommitAnimation => { @@ -2634,6 +2653,8 @@ mod tests { let user_cell = |text: &str| -> Arc { Arc::new(UserHistoryCell { message: text.to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc }; let agent_cell = |text: &str| -> Arc { diff --git a/codex-rs/tui2/src/app_backtrack.rs b/codex-rs/tui2/src/app_backtrack.rs index a86ed6dea6..81bf398bf9 100644 --- a/codex-rs/tui2/src/app_backtrack.rs +++ b/codex-rs/tui2/src/app_backtrack.rs @@ -205,7 +205,10 @@ impl App { }); self.chat_widget.submit_op(Op::ThreadRollback { num_turns }); if !prefill.is_empty() { - self.chat_widget.set_composer_text(prefill); + // TODO: Rehydrate text_elements/local_image_paths from the selected user cell so + // backtrack preserves image placeholders and attachments. + self.chat_widget + .set_composer_text(prefill, Vec::new(), Vec::new()); } } @@ -576,6 +579,8 @@ mod tests { let mut cells: Vec> = vec![ Arc::new(UserHistoryCell { message: "first user".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("assistant")], true)) as Arc, @@ -592,6 +597,8 @@ mod tests { as Arc, Arc::new(UserHistoryCell { message: "first".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("after")], false)) as Arc, @@ -620,11 +627,15 @@ mod tests { as Arc, Arc::new(UserHistoryCell { message: "first".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("between")], false)) as Arc, Arc::new(UserHistoryCell { message: "second".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }) as Arc, Arc::new(AgentMessageCell::new(vec![Line::from("tail")], false)) as Arc, diff --git a/codex-rs/tui2/src/bottom_pane/chat_composer.rs b/codex-rs/tui2/src/bottom_pane/chat_composer.rs index c9058e0153..3592b25737 100644 --- a/codex-rs/tui2/src/bottom_pane/chat_composer.rs +++ b/codex-rs/tui2/src/bottom_pane/chat_composer.rs @@ -70,7 +70,6 @@ use ratatui::layout::Constraint; use ratatui::layout::Layout; use ratatui::layout::Margin; use ratatui::layout::Rect; -use ratatui::style::Style; use ratatui::style::Stylize; use ratatui::text::Line; use ratatui::text::Span; @@ -81,12 +80,15 @@ use ratatui::widgets::WidgetRef; use super::chat_composer_history::ChatComposerHistory; use super::command_popup::CommandItem; use super::command_popup::CommandPopup; +use super::command_popup::CommandPopupFlags; use super::file_search_popup::FileSearchPopup; use super::footer::FooterMode; use super::footer::FooterProps; use super::footer::esc_hint_mode; use super::footer::footer_height; +use super::footer::inset_footer_hint_area; use super::footer::render_footer; +use super::footer::render_footer_hint_items; use super::footer::reset_mode_after_activity; use super::footer::toggle_shortcut_mode; use super::paste_burst::CharDecision; @@ -109,9 +111,12 @@ use codex_common::fuzzy_match::fuzzy_match; use codex_protocol::custom_prompts::CustomPrompt; use codex_protocol::custom_prompts::PROMPTS_CMD_PREFIX; use codex_protocol::models::local_image_label_text; +use codex_protocol::user_input::ByteRange; +use codex_protocol::user_input::TextElement; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; +use crate::bottom_pane::LocalImageAttachment; use crate::bottom_pane::textarea::TextArea; use crate::bottom_pane::textarea::TextAreaState; use crate::clipboard_paste::normalize_pasted_path; @@ -123,6 +128,7 @@ use codex_file_search::FileMatch; use std::cell::RefCell; use std::collections::HashMap; use std::collections::HashSet; +use std::collections::VecDeque; use std::path::PathBuf; use std::time::Duration; use std::time::Instant; @@ -141,8 +147,14 @@ const LARGE_PASTE_CHAR_THRESHOLD: usize = 1000; /// Result returned when the user interacts with the text area. #[derive(Debug, PartialEq)] pub enum InputResult { - Submitted(String), - Queued(String), + Submitted { + text: String, + text_elements: Vec, + }, + Queued { + text: String, + text_elements: Vec, + }, Command(SlashCommand), CommandWithArgs(SlashCommand, String), None, @@ -192,6 +204,7 @@ pub(crate) struct ChatComposer { custom_prompts: Vec, footer_mode: FooterMode, footer_hint_override: Option>, + footer_flash: Option, context_window_percent: Option, context_window_used_tokens: Option, transcript_scrolled: bool, @@ -203,6 +216,13 @@ pub(crate) struct ChatComposer { dismissed_skill_popup_token: Option, /// When enabled, `Enter` submits immediately and `Tab` requests queuing behavior. steer_enabled: bool, + collaboration_modes_enabled: bool, +} + +#[derive(Clone, Debug)] +struct FooterFlash { + line: Line<'static>, + expires_at: Instant, } /// Popup state – at most one can be visible at any time. @@ -250,6 +270,7 @@ impl ChatComposer { custom_prompts: Vec::new(), footer_mode: FooterMode::ShortcutSummary, footer_hint_override: None, + footer_flash: None, context_window_percent: None, context_window_used_tokens: None, transcript_scrolled: false, @@ -260,6 +281,7 @@ impl ChatComposer { skills: None, dismissed_skill_popup_token: None, steer_enabled: false, + collaboration_modes_enabled: false, }; // Apply configuration via the setter to keep side-effects centralized. this.set_disable_paste_burst(disable_paste_burst); @@ -280,6 +302,10 @@ impl ChatComposer { self.steer_enabled = enabled; } + pub fn set_collaboration_modes_enabled(&mut self, enabled: bool) { + self.collaboration_modes_enabled = enabled; + } + fn layout_areas(&self, area: Rect) -> [Rect; 3] { let footer_props = self.footer_props(); let footer_hint_height = self @@ -334,7 +360,8 @@ impl ChatComposer { let Some(text) = self.history.on_entry_response(log_id, offset, entry) else { return false; }; - self.set_text_content(text); + // Composer history (↑/↓) stores plain text only; no UI element ranges/attachments to restore here. + self.set_text_content(text, Vec::new(), Vec::new()); true } @@ -357,6 +384,7 @@ impl ChatComposer { /// In all cases, clears any paste-burst Enter suppression state so a real paste cannot affect /// the next user Enter key, then syncs popup state. pub fn handle_paste(&mut self, pasted: String) -> bool { + let pasted = pasted.replace("\r\n", "\n").replace('\r', "\n"); let char_count = pasted.chars().count(); if char_count > LARGE_PASTE_CHAR_THRESHOLD { let placeholder = self.next_large_paste_placeholder(char_count); @@ -431,13 +459,51 @@ impl ChatComposer { self.footer_hint_override = items; } + pub(crate) fn show_footer_flash(&mut self, line: Line<'static>, duration: Duration) { + let expires_at = Instant::now() + .checked_add(duration) + .unwrap_or_else(Instant::now); + self.footer_flash = Some(FooterFlash { line, expires_at }); + } + + pub(crate) fn footer_flash_visible(&self) -> bool { + self.footer_flash + .as_ref() + .is_some_and(|flash| Instant::now() < flash.expires_at) + } + /// Replace the entire composer content with `text` and reset cursor. - pub(crate) fn set_text_content(&mut self, text: String) { + /// This clears any pending paste payloads. + pub(crate) fn set_text_content( + &mut self, + text: String, + text_elements: Vec, + local_image_paths: Vec, + ) { // Clear any existing content, placeholders, and attachments first. - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); self.pending_pastes.clear(); self.attached_images.clear(); - self.textarea.set_text(&text); + + self.textarea.set_text_with_elements(&text, &text_elements); + + let image_placeholders: HashSet = text_elements + .iter() + .filter_map(|elem| { + elem.placeholder.as_ref().cloned().or_else(|| { + text.get(elem.byte_range.start..elem.byte_range.end) + .map(str::to_string) + }) + }) + .collect(); + for (idx, path) in local_image_paths.into_iter().enumerate() { + let placeholder = local_image_label_text(idx + 1); + if image_placeholders.contains(&placeholder) { + self.attached_images + .push(AttachedImage { placeholder, path }); + } + } + self.textarea.set_cursor(0); self.sync_popups(); } @@ -447,7 +513,7 @@ impl ChatComposer { return None; } let previous = self.current_text(); - self.set_text_content(String::new()); + self.set_text_content(String::new(), Vec::new(), Vec::new()); self.history.reset_navigation(); self.history.record_local_submission(&previous); Some(previous) @@ -458,6 +524,28 @@ impl ChatComposer { self.textarea.text().to_string() } + pub(crate) fn text_elements(&self) -> Vec { + self.textarea.text_elements() + } + + #[cfg(test)] + pub(crate) fn local_image_paths(&self) -> Vec { + self.attached_images + .iter() + .map(|img| img.path.clone()) + .collect() + } + + pub(crate) fn local_images(&self) -> Vec { + self.attached_images + .iter() + .map(|img| LocalImageAttachment { + placeholder: img.placeholder.clone(), + path: img.path.clone(), + }) + .collect() + } + /// Insert an attachment placeholder and track it for the next submission. pub fn attach_image(&mut self, path: PathBuf) { let image_number = self.attached_images.len() + 1; @@ -469,11 +557,23 @@ impl ChatComposer { .push(AttachedImage { placeholder, path }); } + #[cfg(test)] pub fn take_recent_submission_images(&mut self) -> Vec { let images = std::mem::take(&mut self.attached_images); images.into_iter().map(|img| img.path).collect() } + pub fn take_recent_submission_images_with_placeholders(&mut self) -> Vec { + let images = std::mem::take(&mut self.attached_images); + images + .into_iter() + .map(|img| LocalImageAttachment { + placeholder: img.placeholder, + path: img.path, + }) + .collect() + } + /// Flushes any due paste-burst state. /// /// Call this from a UI tick to turn paste-burst transient state into explicit textarea edits: @@ -652,7 +752,7 @@ impl ChatComposer { match sel { CommandItem::Builtin(cmd) => { if cmd == SlashCommand::Skills { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); return (InputResult::Command(cmd), true); } @@ -660,7 +760,9 @@ impl ChatComposer { .trim_start() .starts_with(&format!("/{}", cmd.command())); if !starts_with_cmd { - self.textarea.set_text(&format!("/{} ", cmd.command())); + // Slash completion replaces the buffer with plain text; drop elements. + self.textarea + .set_text_clearing_elements(&format!("/{} ", cmd.command())); } if !self.textarea.text().is_empty() { cursor_target = Some(self.textarea.text().len()); @@ -675,7 +777,8 @@ impl ChatComposer { ) { PromptSelectionAction::Insert { text, cursor } => { let target = cursor.unwrap_or(text.len()); - self.textarea.set_text(&text); + // Inserted prompt text is plain input; discard any elements. + self.textarea.set_text_clearing_elements(&text); cursor_target = Some(target); } PromptSelectionAction::Submit { .. } => {} @@ -704,14 +807,21 @@ impl ChatComposer { && let Some(expanded) = expand_if_numeric_with_positional_args(prompt, first_line) { - self.textarea.set_text(""); - return (InputResult::Submitted(expanded), true); + self.textarea.set_text_clearing_elements(""); + return ( + InputResult::Submitted { + text: expanded, + // Expanded prompt is plain text; no UI element ranges to preserve. + text_elements: Vec::new(), + }, + true, + ); } if let Some(sel) = popup.selected_item() { match sel { CommandItem::Builtin(cmd) => { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); return (InputResult::Command(cmd), true); } CommandItem::UserPrompt(idx) => { @@ -722,12 +832,20 @@ impl ChatComposer { PromptSelectionMode::Submit, ) { PromptSelectionAction::Submit { text } => { - self.textarea.set_text(""); - return (InputResult::Submitted(text), true); + self.textarea.set_text_clearing_elements(""); + return ( + InputResult::Submitted { + text, + // Submitting a slash/custom prompt generates plain text, so there are no UI element ranges. + text_elements: Vec::new(), + }, + true, + ); } PromptSelectionAction::Insert { text, cursor } => { let target = cursor.unwrap_or(text.len()); - self.textarea.set_text(&text); + // Inserted prompt text is plain input; discard any elements. + self.textarea.set_text_clearing_elements(&text); self.textarea.set_cursor(target); return (InputResult::None, true); } @@ -1031,6 +1149,106 @@ impl ChatComposer { lower.ends_with(".png") || lower.ends_with(".jpg") || lower.ends_with(".jpeg") } + fn trim_text_elements( + original: &str, + trimmed: &str, + elements: Vec, + ) -> Vec { + if trimmed.is_empty() || elements.is_empty() { + return Vec::new(); + } + let trimmed_start = original.len().saturating_sub(original.trim_start().len()); + let trimmed_end = trimmed_start.saturating_add(trimmed.len()); + + elements + .into_iter() + .filter_map(|elem| { + let start = elem.byte_range.start; + let end = elem.byte_range.end; + if end <= trimmed_start || start >= trimmed_end { + return None; + } + let new_start = start.saturating_sub(trimmed_start); + let new_end = end.saturating_sub(trimmed_start).min(trimmed.len()); + if new_start >= new_end { + return None; + } + let placeholder = trimmed.get(new_start..new_end).map(str::to_string); + Some(TextElement { + byte_range: ByteRange { + start: new_start, + end: new_end, + }, + placeholder, + }) + }) + .collect() + } + + /// Expand large-paste placeholders using element ranges and rebuild other element spans. + fn expand_pending_pastes( + text: &str, + mut elements: Vec, + pending_pastes: &[(String, String)], + ) -> (String, Vec) { + if pending_pastes.is_empty() || elements.is_empty() { + return (text.to_string(), elements); + } + + let mut pending_by_placeholder: HashMap<&str, VecDeque<&str>> = HashMap::new(); + for (placeholder, actual) in pending_pastes { + pending_by_placeholder + .entry(placeholder.as_str()) + .or_default() + .push_back(actual.as_str()); + } + + elements.sort_by_key(|elem| elem.byte_range.start); + + let mut rebuilt = String::with_capacity(text.len()); + let mut rebuilt_elements = Vec::with_capacity(elements.len()); + let mut cursor = 0usize; + + for elem in elements { + let start = elem.byte_range.start.min(text.len()); + let end = elem.byte_range.end.min(text.len()); + if start > end { + continue; + } + if start > cursor { + rebuilt.push_str(&text[cursor..start]); + } + let elem_text = &text[start..end]; + let placeholder = elem.placeholder; + let replacement = placeholder + .as_deref() + .and_then(|ph| pending_by_placeholder.get_mut(ph)) + .and_then(VecDeque::pop_front); + if let Some(actual) = replacement { + rebuilt.push_str(actual); + } else { + let new_start = rebuilt.len(); + rebuilt.push_str(elem_text); + let new_end = rebuilt.len(); + let placeholder = placeholder.or_else(|| Some(elem_text.to_string())); + rebuilt_elements.push(TextElement { + byte_range: ByteRange { + start: new_start, + end: new_end, + }, + placeholder, + }); + } + cursor = end; + } + + if cursor < text.len() { + rebuilt.push_str(&text[cursor..]); + } + + (rebuilt, rebuilt_elements) + } + fn skills_enabled(&self) -> bool { self.skills.as_ref().is_some_and(|s| !s.is_empty()) } @@ -1205,7 +1423,8 @@ impl ChatComposer { new_text.push(' '); new_text.push_str(&text[end_idx..]); - self.textarea.set_text(&new_text); + // Path replacement is plain text; rebuild without carrying elements. + self.textarea.set_text_clearing_elements(&new_text); let new_cursor = start_idx.saturating_add(inserted.len()).saturating_add(1); self.textarea.set_cursor(new_cursor); } @@ -1240,52 +1459,49 @@ impl ChatComposer { new_text.push(' '); new_text.push_str(&text[end_idx..]); - self.textarea.set_text(&new_text); + // Skill insertion rebuilds plain text, so drop existing elements. + self.textarea.set_text_clearing_elements(&new_text); let new_cursor = start_idx.saturating_add(inserted.len()).saturating_add(1); self.textarea.set_cursor(new_cursor); } /// Prepare text for submission/queuing. Returns None if submission should be suppressed. - fn prepare_submission_text(&mut self) -> Option { - // If we have pending placeholder pastes, replace them in the textarea text - // and continue to the normal submission flow to handle slash commands. - if !self.pending_pastes.is_empty() { - let mut text = self.textarea.text().to_string(); - for (placeholder, actual) in &self.pending_pastes { - if text.contains(placeholder) { - text = text.replace(placeholder, actual); - } - } - self.textarea.set_text(&text); - self.pending_pastes.clear(); - } - + /// On success, clears pending paste payloads because placeholders have been expanded. + fn prepare_submission_text(&mut self) -> Option<(String, Vec)> { let mut text = self.textarea.text().to_string(); let original_input = text.clone(); + let original_text_elements = self.textarea.text_elements(); + let original_local_image_paths = self + .attached_images + .iter() + .map(|img| img.path.clone()) + .collect::>(); + let original_pending_pastes = self.pending_pastes.clone(); + let mut text_elements = original_text_elements.clone(); let input_starts_with_space = original_input.starts_with(' '); - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); - // Replace all pending pastes in the text - for (placeholder, actual) in &self.pending_pastes { - if text.contains(placeholder) { - text = text.replace(placeholder, actual); - } + if !self.pending_pastes.is_empty() { + // Expand placeholders so element byte ranges stay aligned. + let (expanded, expanded_elements) = + Self::expand_pending_pastes(&text, text_elements, &self.pending_pastes); + text = expanded; + text_elements = expanded_elements; } - self.pending_pastes.clear(); + + let expanded_input = text.clone(); // If there is neither text nor attachments, suppress submission entirely. let has_attachments = !self.attached_images.is_empty(); text = text.trim().to_string(); + text_elements = Self::trim_text_elements(&expanded_input, &text, text_elements); if let Some((name, _rest)) = parse_slash_name(&text) { let treat_as_plain_text = input_starts_with_space || name.contains('/'); if !treat_as_plain_text { - let is_builtin = built_in_slash_commands() - .into_iter() - .filter(|(_, cmd)| { - windows_degraded_sandbox_active() || *cmd != SlashCommand::ElevateSandbox - }) - .any(|(command_name, _)| command_name == name); + let is_builtin = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .any(|(command_name, _)| command_name == name); let prompt_prefix = format!("{PROMPTS_CMD_PREFIX}:"); let is_known_prompt = name .strip_prefix(&prompt_prefix) @@ -1302,7 +1518,12 @@ impl ChatComposer { self.app_event_tx.send(AppEvent::InsertHistoryCell(Box::new( history_cell::new_info_event(message, None), ))); - self.textarea.set_text(&original_input); + self.set_text_content( + original_input.clone(), + original_text_elements, + original_local_image_paths, + ); + self.pending_pastes.clone_from(&original_pending_pastes); self.textarea.set_cursor(original_input.len()); return None; } @@ -1315,13 +1536,21 @@ impl ChatComposer { self.app_event_tx.send(AppEvent::InsertHistoryCell(Box::new( history_cell::new_error_event(err.user_message()), ))); - self.textarea.set_text(&original_input); + self.set_text_content( + original_input.clone(), + original_text_elements, + original_local_image_paths, + ); + self.pending_pastes.clone_from(&original_pending_pastes); self.textarea.set_cursor(original_input.len()); return None; } }; if let Some(expanded) = expanded_prompt { text = expanded; + // Expanded prompt (e.g. custom prompt) is plain text; text elements not supported yet. + // TODO: Preserve UI element ranges through prompt expansion in a follow-up PR. + text_elements = Vec::new(); } if text.is_empty() && !has_attachments { return None; @@ -1329,7 +1558,9 @@ impl ChatComposer { if !text.is_empty() { self.history.record_local_submission(&text); } - Some(text) + // Placeholder elements have been expanded into real text, so payloads can be dropped. + self.pending_pastes.clear(); + Some((text, text_elements)) } /// Common logic for handling message submission/queuing. @@ -1378,20 +1609,44 @@ impl ChatComposer { } let original_input = self.textarea.text().to_string(); + let original_text_elements = self.textarea.text_elements(); + let original_local_image_paths = self + .attached_images + .iter() + .map(|img| img.path.clone()) + .collect::>(); + let original_pending_pastes = self.pending_pastes.clone(); if let Some(result) = self.try_dispatch_slash_command_with_args() { return (result, true); } - if let Some(text) = self.prepare_submission_text() { + if let Some((text, text_elements)) = self.prepare_submission_text() { if should_queue { - (InputResult::Queued(text), true) + ( + InputResult::Queued { + text, + text_elements, + }, + true, + ) } else { // Do not clear attached_images here; ChatWidget drains them via take_recent_submission_images(). - (InputResult::Submitted(text), true) + ( + InputResult::Submitted { + text, + text_elements, + }, + true, + ) } } else { - // Restore text if submission was suppressed - self.textarea.set_text(&original_input); + // Restore text if submission was suppressed. + self.set_text_content( + original_input, + original_text_elements, + original_local_image_paths, + ); + self.pending_pastes = original_pending_pastes; (InputResult::None, true) } } @@ -1402,14 +1657,11 @@ impl ChatComposer { let first_line = self.textarea.text().lines().next().unwrap_or(""); if let Some((name, rest)) = parse_slash_name(first_line) && rest.is_empty() - && let Some((_n, cmd)) = built_in_slash_commands() - .into_iter() - .filter(|(_, cmd)| { - windows_degraded_sandbox_active() || *cmd != SlashCommand::ElevateSandbox - }) - .find(|(n, _)| *n == name) + && let Some((_n, cmd)) = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .find(|(n, _)| *n == name) { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); Some(InputResult::Command(cmd)) } else { None @@ -1427,12 +1679,12 @@ impl ChatComposer { if let Some((name, rest)) = parse_slash_name(&text) && !rest.is_empty() && !name.contains('/') - && let Some((_n, cmd)) = built_in_slash_commands() - .into_iter() - .find(|(command_name, _)| *command_name == name) + && let Some((_n, cmd)) = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .find(|(command_name, _)| *command_name == name) && cmd == SlashCommand::Review { - self.textarea.set_text(""); + self.textarea.set_text_clearing_elements(""); return Some(InputResult::CommandWithArgs(cmd, rest.to_string())); } } @@ -1488,7 +1740,7 @@ impl ChatComposer { _ => unreachable!(), }; if let Some(text) = replace_text { - self.set_text_content(text); + self.set_text_content(text, Vec::new(), Vec::new()); return (InputResult::None, true); } } @@ -1736,6 +1988,8 @@ impl ChatComposer { } fn relabel_attached_images_and_update_placeholders(&mut self) { + // Renumber by insertion order (attachment list order), and update any matching elements + // regardless of where they appear in the text. for idx in 0..self.attached_images.len() { let expected = local_image_label_text(idx + 1); let current = self.attached_images[idx].placeholder.clone(); @@ -1776,6 +2030,7 @@ impl ChatComposer { is_task_running: self.is_task_running, quit_shortcut_key: self.quit_shortcut_key, steer_enabled: self.steer_enabled, + collaboration_modes_enabled: self.collaboration_modes_enabled, context_window_percent: self.context_window_percent, context_window_used_tokens: self.context_window_used_tokens, transcript_scrolled: self.transcript_scrolled, @@ -1803,6 +2058,9 @@ impl ChatComposer { } fn custom_footer_height(&self) -> Option { + if self.footer_flash_visible() { + return Some(1); + } self.footer_hint_override .as_ref() .map(|items| if items.is_empty() { 0 } else { 1 }) @@ -1917,12 +2175,9 @@ impl ChatComposer { return rest_after_name.is_empty(); } - let builtin_match = built_in_slash_commands() - .into_iter() - .filter(|(_, cmd)| { - windows_degraded_sandbox_active() || *cmd != SlashCommand::ElevateSandbox - }) - .any(|(cmd_name, _)| fuzzy_match(cmd_name, name).is_some()); + let builtin_match = + Self::built_in_slash_commands_for_input(self.collaboration_modes_enabled) + .any(|(cmd_name, _)| fuzzy_match(cmd_name, name).is_some()); if builtin_match { return true; @@ -1975,8 +2230,14 @@ impl ChatComposer { _ => { if is_editing_slash_command_name { let skills_enabled = self.skills_enabled(); - let mut command_popup = - CommandPopup::new(self.custom_prompts.clone(), skills_enabled); + let collaboration_modes_enabled = self.collaboration_modes_enabled; + let mut command_popup = CommandPopup::new( + self.custom_prompts.clone(), + CommandPopupFlags { + skills_enabled, + collaboration_modes_enabled, + }, + ); command_popup.on_composer_text_change(first_line.to_string()); self.active_popup = ActivePopup::Command(command_popup); } @@ -1984,6 +2245,16 @@ impl ChatComposer { } } + fn built_in_slash_commands_for_input( + collaboration_modes_enabled: bool, + ) -> impl Iterator { + let allow_elevate_sandbox = windows_degraded_sandbox_active(); + built_in_slash_commands() + .into_iter() + .filter(move |(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox) + .filter(move |(_, cmd)| collaboration_modes_enabled || *cmd != SlashCommand::Collab) + } + pub(crate) fn set_custom_prompts(&mut self, prompts: Vec) { self.custom_prompts = prompts.clone(); if let ActivePopup::Command(popup) = &mut self.active_popup { @@ -2149,24 +2420,12 @@ impl Renderable for ChatComposer { } else { popup_rect }; - if let Some(items) = self.footer_hint_override.as_ref() { - if !items.is_empty() { - let mut spans = Vec::with_capacity(items.len() * 4); - for (idx, (key, label)) in items.iter().enumerate() { - spans.push(" ".into()); - spans.push(Span::styled(key.clone(), Style::default().bold())); - spans.push(format!(" {label}").into()); - if idx + 1 != items.len() { - spans.push(" ".into()); - } - } - let mut custom_rect = hint_rect; - if custom_rect.width > 2 { - custom_rect.x += 2; - custom_rect.width = custom_rect.width.saturating_sub(2); - } - Line::from(spans).render_ref(custom_rect, buf); + if self.footer_flash_visible() { + if let Some(flash) = self.footer_flash.as_ref() { + flash.line.render(inset_footer_hint_area(hint_rect), buf); } + } else if let Some(items) = self.footer_hint_override.as_ref() { + render_footer_hint_items(hint_rect, buf, items); } else { render_footer(hint_rect, buf, footer_props); } @@ -2326,6 +2585,84 @@ mod tests { ); } + #[test] + fn footer_flash_overrides_footer_hint_override() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_footer_hint_override(Some(vec![("K".to_string(), "label".to_string())])); + composer.show_footer_flash(Line::from("FLASH"), Duration::from_secs(10)); + + let area = Rect::new(0, 0, 60, 6); + let mut buf = Buffer::empty(area); + composer.render(area, &mut buf); + + let mut bottom_row = String::new(); + for x in 0..area.width { + bottom_row.push( + buf[(x, area.height - 1)] + .symbol() + .chars() + .next() + .unwrap_or(' '), + ); + } + assert!( + bottom_row.contains("FLASH"), + "expected flash content to render in footer row, saw: {bottom_row:?}", + ); + assert!( + !bottom_row.contains("K label"), + "expected flash to override hint override, saw: {bottom_row:?}", + ); + } + + #[test] + fn footer_flash_expires_and_falls_back_to_hint_override() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_footer_hint_override(Some(vec![("K".to_string(), "label".to_string())])); + composer.show_footer_flash(Line::from("FLASH"), Duration::from_secs(10)); + composer.footer_flash.as_mut().unwrap().expires_at = + Instant::now() - Duration::from_secs(1); + + let area = Rect::new(0, 0, 60, 6); + let mut buf = Buffer::empty(area); + composer.render(area, &mut buf); + + let mut bottom_row = String::new(); + for x in 0..area.width { + bottom_row.push( + buf[(x, area.height - 1)] + .symbol() + .chars() + .next() + .unwrap_or(' '), + ); + } + assert!( + bottom_row.contains("K label"), + "expected hint override to render after flash expired, saw: {bottom_row:?}", + ); + assert!( + !bottom_row.contains("FLASH"), + "expected expired flash to be hidden, saw: {bottom_row:?}", + ); + } + fn snapshot_composer_state(name: &str, enhanced_keys_supported: bool, setup: F) where F: FnOnce(&mut ChatComposer), @@ -2449,7 +2786,7 @@ mod tests { ); composer.set_steer_enabled(true); - composer.set_text_content("draft text".to_string()); + composer.set_text_content("draft text".to_string(), Vec::new(), Vec::new()); assert_eq!(composer.clear_for_ctrl_c(), Some("draft text".to_string())); assert!(composer.is_empty()); @@ -2742,7 +3079,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "1あ"), + InputResult::Submitted { text, .. } => assert_eq!(text, "1あ"), _ => panic!("expected Submitted"), } } @@ -2882,6 +3219,7 @@ mod tests { "Ask Codex to do anything".to_string(), false, ); + composer.set_steer_enabled(true); let _ = composer.handle_key_event(KeyEvent::new(KeyCode::Char('あ'), KeyModifiers::NONE)); @@ -2913,7 +3251,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "あ"), + InputResult::Submitted { text, .. } => assert_eq!(text, "あ"), _ => panic!("expected Submitted"), } } @@ -2979,7 +3317,7 @@ mod tests { false, ); - composer.textarea.set_text("/diff"); + composer.textarea.set_text_clearing_elements("/diff"); composer.textarea.set_cursor("/diff".len()); composer .paste_burst @@ -3117,7 +3455,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "hello"), + InputResult::Submitted { text, .. } => assert_eq!(text, "hello"), _ => panic!("expected Submitted"), } } @@ -3181,7 +3519,7 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, large), + InputResult::Submitted { text, .. } => assert_eq!(text, large), _ => panic!("expected Submitted"), } assert!(composer.pending_pastes.is_empty()); @@ -3443,10 +3781,10 @@ mod tests { InputResult::CommandWithArgs(_, _) => { panic!("expected command dispatch without args for '/init'") } - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { panic!("expected command dispatch, but composer submitted literal text: {text}") } - InputResult::Queued(_) => { + InputResult::Queued { .. } => { panic!("expected command dispatch, but composer queued literal text") } InputResult::None => panic!("expected Command result for '/init'"), @@ -3522,10 +3860,10 @@ mod tests { InputResult::CommandWithArgs(_, _) => { panic!("expected command dispatch without args for '/diff'") } - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { panic!("expected command dispatch after Tab completion, got literal submit: {text}") } - InputResult::Queued(_) => { + InputResult::Queued { .. } => { panic!("expected command dispatch after Tab completion, got literal queue") } InputResult::None => panic!("expected Command result for '/diff'"), @@ -3561,10 +3899,10 @@ mod tests { InputResult::CommandWithArgs(_, _) => { panic!("expected command dispatch without args for '/mention'") } - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { panic!("expected command dispatch, but composer submitted literal text: {text}") } - InputResult::Queued(_) => { + InputResult::Queued { .. } => { panic!("expected command dispatch, but composer queued literal text") } InputResult::None => panic!("expected Command result for '/mention'"), @@ -3649,7 +3987,7 @@ mod tests { // Submit and verify final expansion let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - if let InputResult::Submitted(text) = result { + if let InputResult::Submitted { text, .. } = result { assert_eq!(text, format!("{} and {}", test_cases[0].0, test_cases[2].0)); } else { panic!("expected Submitted"); @@ -3846,7 +4184,7 @@ mod tests { composer.textarea.text().contains(&placeholder), composer.pending_pastes.len(), ); - composer.textarea.set_text(""); + composer.textarea.set_text_clearing_elements(""); result }) .collect(); @@ -3862,7 +4200,7 @@ mod tests { // --- Image attachment tests --- #[test] - fn attach_image_and_submit_includes_image_paths() { + fn attach_image_and_submit_includes_local_image_paths() { let (tx, _rx) = unbounded_channel::(); let sender = AppEventSender::new(tx); let mut composer = ChatComposer::new( @@ -3879,13 +4217,231 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "[Image #1] hi"), + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, "[Image #1] hi"); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: 0, + end: "[Image #1]".len() + } + ); + } _ => panic!("expected Submitted"), } let imgs = composer.take_recent_submission_images(); assert_eq!(vec![path], imgs); } + #[test] + fn set_text_content_reattaches_images_without_placeholder_metadata() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let placeholder = local_image_label_text(1); + let text = format!("{placeholder} restored"); + let text_elements = vec![TextElement { + byte_range: (0..placeholder.len()).into(), + placeholder: None, + }]; + let path = PathBuf::from("/tmp/image1.png"); + + composer.set_text_content(text, text_elements, vec![path.clone()]); + + assert_eq!(composer.local_image_paths(), vec![path]); + } + + #[test] + fn large_paste_preserves_image_text_elements_on_submit() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + let large_content = "x".repeat(LARGE_PASTE_CHAR_THRESHOLD + 5); + composer.handle_paste(large_content.clone()); + composer.handle_paste(" ".into()); + let path = PathBuf::from("/tmp/image_with_paste.png"); + composer.attach_image(path.clone()); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + let expected = format!("{large_content} [Image #1]"); + assert_eq!(text, expected); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: large_content.len() + 1, + end: large_content.len() + 1 + "[Image #1]".len(), + } + ); + } + _ => panic!("expected Submitted"), + } + let imgs = composer.take_recent_submission_images(); + assert_eq!(vec![path], imgs); + } + + #[test] + fn large_paste_with_leading_whitespace_trims_and_shifts_elements() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + let large_content = format!(" {}", "x".repeat(LARGE_PASTE_CHAR_THRESHOLD + 5)); + composer.handle_paste(large_content.clone()); + composer.handle_paste(" ".into()); + let path = PathBuf::from("/tmp/image_with_trim.png"); + composer.attach_image(path.clone()); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + let trimmed = large_content.trim().to_string(); + assert_eq!(text, format!("{trimmed} [Image #1]")); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: trimmed.len() + 1, + end: trimmed.len() + 1 + "[Image #1]".len(), + } + ); + } + _ => panic!("expected Submitted"), + } + let imgs = composer.take_recent_submission_images(); + assert_eq!(vec![path], imgs); + } + + #[test] + fn pasted_crlf_normalizes_newlines_for_elements() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + let pasted = "line1\r\nline2\r\n".to_string(); + composer.handle_paste(pasted); + composer.handle_paste(" ".into()); + let path = PathBuf::from("/tmp/image_crlf.png"); + composer.attach_image(path.clone()); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, "line1\nline2\n [Image #1]"); + assert!(!text.contains('\r')); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: "line1\nline2\n ".len(), + end: "line1\nline2\n [Image #1]".len(), + } + ); + } + _ => panic!("expected Submitted"), + } + let imgs = composer.take_recent_submission_images(); + assert_eq!(vec![path], imgs); + } + + #[test] + fn suppressed_submission_restores_pending_paste_payload() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + composer.set_steer_enabled(true); + + composer.textarea.set_text_clearing_elements("/unknown "); + composer.textarea.set_cursor("/unknown ".len()); + let large_content = "x".repeat(LARGE_PASTE_CHAR_THRESHOLD + 5); + composer.handle_paste(large_content.clone()); + let placeholder = composer + .pending_pastes + .first() + .expect("expected pending paste") + .0 + .clone(); + + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + assert!(matches!(result, InputResult::None)); + assert_eq!(composer.pending_pastes.len(), 1); + assert_eq!(composer.textarea.text(), format!("/unknown {placeholder}")); + + composer.textarea.set_cursor(0); + composer.textarea.insert_str(" "); + let (result, _) = + composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + match result { + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, format!("/unknown {large_content}")); + assert!(text_elements.is_empty()); + } + _ => panic!("expected Submitted"), + } + assert!(composer.pending_pastes.is_empty()); + } + #[test] fn attach_image_without_text_submits_empty_text_and_images() { let (tx, _rx) = unbounded_channel::(); @@ -3903,7 +4459,21 @@ mod tests { let (result, _) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); match result { - InputResult::Submitted(text) => assert_eq!(text, "[Image #1]"), + InputResult::Submitted { + text, + text_elements, + } => { + assert_eq!(text, "[Image #1]"); + assert_eq!(text_elements.len(), 1); + assert_eq!(text_elements[0].placeholder.as_deref(), Some("[Image #1]")); + assert_eq!( + text_elements[0].byte_range, + ByteRange { + start: 0, + end: "[Image #1]".len() + } + ); + } _ => panic!("expected Submitted"), } let imgs = composer.take_recent_submission_images(); @@ -4030,6 +4600,69 @@ mod tests { ); } + #[test] + fn deleting_reordered_image_one_renumbers_text_in_place() { + use crossterm::event::KeyCode; + use crossterm::event::KeyEvent; + use crossterm::event::KeyModifiers; + + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let path1 = PathBuf::from("/tmp/image_first.png"); + let path2 = PathBuf::from("/tmp/image_second.png"); + let placeholder1 = local_image_label_text(1); + let placeholder2 = local_image_label_text(2); + + // Placeholders can be reordered in the text buffer; deleting image #1 should renumber + // image #2 wherever it appears, not just after the cursor. + let text = format!("Test {placeholder2} test {placeholder1}"); + let start2 = text.find(&placeholder2).expect("placeholder2 present"); + let start1 = text.find(&placeholder1).expect("placeholder1 present"); + let text_elements = vec![ + TextElement { + byte_range: ByteRange { + start: start2, + end: start2 + placeholder2.len(), + }, + placeholder: Some(placeholder2), + }, + TextElement { + byte_range: ByteRange { + start: start1, + end: start1 + placeholder1.len(), + }, + placeholder: Some(placeholder1.clone()), + }, + ]; + composer.set_text_content(text, text_elements, vec![path1, path2.clone()]); + + let end1 = start1 + placeholder1.len(); + composer.textarea.set_cursor(end1); + + composer.handle_key_event(KeyEvent::new(KeyCode::Backspace, KeyModifiers::NONE)); + + assert_eq!( + composer.textarea.text(), + format!("Test {placeholder1} test ") + ); + assert_eq!( + vec![AttachedImage { + path: path2, + placeholder: placeholder1 + }], + composer.attached_images, + "attachment renumbered after deletion" + ); + } + #[test] fn deleting_first_text_element_renumbers_following_text_element() { use crossterm::event::KeyCode; @@ -4127,7 +4760,10 @@ mod tests { let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!(InputResult::Submitted(prompt_text.to_string()), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == prompt_text + )); assert!(composer.textarea.is_empty()); } @@ -4154,15 +4790,16 @@ mod tests { composer .textarea - .set_text("/prompts:my-prompt USER=Alice BRANCH=main"); + .set_text_clearing_elements("/prompts:my-prompt USER=Alice BRANCH=main"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!( - InputResult::Submitted("Review Alice changes on main".to_string()), - result - ); + assert!(matches!( + result, + InputResult::Submitted { text, .. } + if text == "Review Alice changes on main" + )); assert!(composer.textarea.is_empty()); } @@ -4189,15 +4826,16 @@ mod tests { composer .textarea - .set_text("/prompts:my-prompt USER=\"Alice Smith\" BRANCH=dev-main"); + .set_text_clearing_elements("/prompts:my-prompt USER=\"Alice Smith\" BRANCH=dev-main"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!( - InputResult::Submitted("Pair Alice Smith with dev-main".to_string()), - result - ); + assert!(matches!( + result, + InputResult::Submitted { text, .. } + if text == "Pair Alice Smith with dev-main" + )); assert!(composer.textarea.is_empty()); } @@ -4231,7 +4869,7 @@ mod tests { // Type the slash command let command_text = "/prompts:code-review "; - composer.textarea.set_text(command_text); + composer.textarea.set_text_clearing_elements(command_text); composer.textarea.set_cursor(command_text.len()); // Paste large content (>3000 chars) to trigger placeholder @@ -4254,7 +4892,7 @@ mod tests { // Verify the custom prompt was expanded with the large content as positional arg match result { - InputResult::Submitted(text) => { + InputResult::Submitted { text, .. } => { // The prompt should be expanded, with the large content replacing $1 assert_eq!( text, @@ -4287,12 +4925,12 @@ mod tests { composer .textarea - .set_text("/Users/example/project/src/main.rs"); + .set_text_clearing_elements("/Users/example/project/src/main.rs"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - if let InputResult::Submitted(text) = result { + if let InputResult::Submitted { text, .. } = result { assert_eq!(text, "/Users/example/project/src/main.rs"); } else { panic!("expected Submitted"); @@ -4322,12 +4960,14 @@ mod tests { ); composer.set_steer_enabled(true); - composer.textarea.set_text(" /this-looks-like-a-command"); + composer + .textarea + .set_text_clearing_elements(" /this-looks-like-a-command"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - if let InputResult::Submitted(text) = result { + if let InputResult::Submitted { text, .. } = result { assert_eq!(text, "/this-looks-like-a-command"); } else { panic!("expected Submitted"); @@ -4362,7 +5002,7 @@ mod tests { composer .textarea - .set_text("/prompts:my-prompt USER=Alice stray"); + .set_text_clearing_elements("/prompts:my-prompt USER=Alice stray"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); @@ -4411,7 +5051,9 @@ mod tests { }]); // Provide only one of the required args - composer.textarea.set_text("/prompts:my-prompt USER=Alice"); + composer + .textarea + .set_text_clearing_elements("/prompts:my-prompt USER=Alice"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); @@ -4476,7 +5118,10 @@ mod tests { composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); let expected = "Header: foo\nArgs: foo bar\nNinth: \n".to_string(); - assert_eq!(InputResult::Submitted(expected), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == expected + )); } #[test] @@ -4503,11 +5148,16 @@ mod tests { }]); // Type positional args; should submit with numeric expansion, no errors. - composer.textarea.set_text("/prompts:elegant hi"); + composer + .textarea + .set_text_clearing_elements("/prompts:elegant hi"); let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!(InputResult::Submitted("Echo: hi".to_string()), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == "Echo: hi" + )); assert!(composer.textarea.is_empty()); } @@ -4579,10 +5229,11 @@ mod tests { let (result, _needs_redraw) = composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); - assert_eq!( - InputResult::Submitted("Cost: $$ and first: x".to_string()), - result - ); + assert!(matches!( + result, + InputResult::Submitted { text, .. } + if text == "Cost: $$ and first: x" + )); } #[test] @@ -4619,7 +5270,10 @@ mod tests { composer.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); let expected = "First: one two\nSecond: one two".to_string(); - assert_eq!(InputResult::Submitted(expected), result); + assert!(matches!( + result, + InputResult::Submitted { text, .. } if text == expected + )); } /// Behavior: fast "paste-like" ASCII input should buffer and then flush as a single paste. If @@ -4744,7 +5398,7 @@ mod tests { ); // Simulate history-like content: "/ test" - composer.set_text_content("/ test".to_string()); + composer.set_text_content("/ test".to_string(), Vec::new(), Vec::new()); // After set_text_content -> sync_popups is called; popup should NOT be Command. assert!( @@ -4774,21 +5428,21 @@ mod tests { ); // Case 1: bare "/" - composer.set_text_content("/".to_string()); + composer.set_text_content("/".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::Command(_)), "bare '/' should activate slash popup" ); // Case 2: valid prefix "/re" (matches /review, /resume, etc.) - composer.set_text_content("/re".to_string()); + composer.set_text_content("/re".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::Command(_)), "'/re' should activate slash popup via prefix match" ); // Case 3: fuzzy match "/ac" (subsequence of /compact and /feedback) - composer.set_text_content("/ac".to_string()); + composer.set_text_content("/ac".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::Command(_)), "'/ac' should activate slash popup via fuzzy match" @@ -4797,7 +5451,7 @@ mod tests { // Case 4: invalid prefix "/zzz" – still allowed to open popup if it // matches no built-in command; our current logic will not open popup. // Verify that explicitly. - composer.set_text_content("/zzz".to_string()); + composer.set_text_content("/zzz".to_string(), Vec::new(), Vec::new()); assert!( matches!(composer.active_popup, ActivePopup::None), "'/zzz' should not activate slash popup because it is not a prefix of any built-in command" @@ -4820,7 +5474,7 @@ mod tests { false, ); - composer.set_text_content("hello".to_string()); + composer.set_text_content("hello".to_string(), Vec::new(), Vec::new()); composer.set_input_enabled(false, Some("Input disabled for test.".to_string())); let (result, needs_redraw) = diff --git a/codex-rs/tui2/src/bottom_pane/command_popup.rs b/codex-rs/tui2/src/bottom_pane/command_popup.rs index ba49ab12c0..000ce2e4ea 100644 --- a/codex-rs/tui2/src/bottom_pane/command_popup.rs +++ b/codex-rs/tui2/src/bottom_pane/command_popup.rs @@ -37,13 +37,20 @@ pub(crate) struct CommandPopup { state: ScrollState, } +#[derive(Clone, Copy, Debug, Default)] +pub(crate) struct CommandPopupFlags { + pub(crate) skills_enabled: bool, + pub(crate) collaboration_modes_enabled: bool, +} + impl CommandPopup { - pub(crate) fn new(mut prompts: Vec, skills_enabled: bool) -> Self { + pub(crate) fn new(mut prompts: Vec, flags: CommandPopupFlags) -> Self { let allow_elevate_sandbox = windows_degraded_sandbox_active(); let builtins: Vec<(&'static str, SlashCommand)> = built_in_slash_commands() .into_iter() - .filter(|(_, cmd)| skills_enabled || *cmd != SlashCommand::Skills) + .filter(|(_, cmd)| flags.skills_enabled || *cmd != SlashCommand::Skills) .filter(|(_, cmd)| allow_elevate_sandbox || *cmd != SlashCommand::ElevateSandbox) + .filter(|(_, cmd)| flags.collaboration_modes_enabled || *cmd != SlashCommand::Collab) .collect(); // Exclude prompts that collide with builtin command names and sort by name. let exclude: HashSet = builtins.iter().map(|(n, _)| (*n).to_string()).collect(); @@ -230,7 +237,7 @@ mod tests { #[test] fn filter_includes_init_when_typing_prefix() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); // Simulate the composer line starting with '/in' so the popup filters // matching commands by prefix. popup.on_composer_text_change("/in".to_string()); @@ -250,7 +257,7 @@ mod tests { #[test] fn selecting_init_by_exact_match() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/init".to_string()); // When an exact match exists, the selected command should be that @@ -265,7 +272,7 @@ mod tests { #[test] fn model_is_first_suggestion_for_mo() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/mo".to_string()); let matches = popup.filtered_items(); match matches.first() { @@ -279,7 +286,7 @@ mod tests { #[test] fn filtered_commands_keep_presentation_order() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/m".to_string()); let cmds: Vec<&str> = popup @@ -311,7 +318,7 @@ mod tests { argument_hint: None, }, ]; - let popup = CommandPopup::new(prompts, false); + let popup = CommandPopup::new(prompts, CommandPopupFlags::default()); let items = popup.filtered_items(); let mut prompt_names: Vec = items .into_iter() @@ -335,7 +342,7 @@ mod tests { description: None, argument_hint: None, }], - false, + CommandPopupFlags::default(), ); let items = popup.filtered_items(); let has_collision_prompt = items.into_iter().any(|it| match it { @@ -358,7 +365,7 @@ mod tests { description: Some("Create feature branch, commit and open draft PR.".to_string()), argument_hint: None, }], - false, + CommandPopupFlags::default(), ); let rows = popup.rows_from_matches(vec![(CommandItem::UserPrompt(0), None, 0)]); let description = rows.first().and_then(|row| row.description.as_deref()); @@ -378,7 +385,7 @@ mod tests { description: None, argument_hint: None, }], - false, + CommandPopupFlags::default(), ); let rows = popup.rows_from_matches(vec![(CommandItem::UserPrompt(0), None, 0)]); let description = rows.first().and_then(|row| row.description.as_deref()); @@ -387,7 +394,7 @@ mod tests { #[test] fn fuzzy_filter_matches_subsequence_for_ac() { - let mut popup = CommandPopup::new(Vec::new(), false); + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); popup.on_composer_text_change("/ac".to_string()); let cmds: Vec<&str> = popup @@ -403,4 +410,40 @@ mod tests { "expected fuzzy search for '/ac' to include compact and feedback, got {cmds:?}" ); } + + #[test] + fn collab_command_hidden_when_collaboration_modes_disabled() { + let mut popup = CommandPopup::new(Vec::new(), CommandPopupFlags::default()); + popup.on_composer_text_change("/coll".to_string()); + + let cmds: Vec<&str> = popup + .filtered_items() + .into_iter() + .filter_map(|item| match item { + CommandItem::Builtin(cmd) => Some(cmd.command()), + CommandItem::UserPrompt(_) => None, + }) + .collect(); + assert!( + !cmds.contains(&"collab"), + "expected '/collab' to be hidden when collaboration modes are disabled, got {cmds:?}" + ); + } + + #[test] + fn collab_command_visible_when_collaboration_modes_enabled() { + let mut popup = CommandPopup::new( + Vec::new(), + CommandPopupFlags { + skills_enabled: false, + collaboration_modes_enabled: true, + }, + ); + popup.on_composer_text_change("/collab".to_string()); + + match popup.selected_item() { + Some(CommandItem::Builtin(cmd)) => assert_eq!(cmd.command(), "collab"), + other => panic!("expected collab to be selected for exact match, got {other:?}"), + } + } } diff --git a/codex-rs/tui2/src/bottom_pane/footer.rs b/codex-rs/tui2/src/bottom_pane/footer.rs index bdfb5c8dfc..fe5a13dc48 100644 --- a/codex-rs/tui2/src/bottom_pane/footer.rs +++ b/codex-rs/tui2/src/bottom_pane/footer.rs @@ -38,6 +38,7 @@ pub(crate) struct FooterProps { pub(crate) use_shift_enter_hint: bool, pub(crate) is_task_running: bool, pub(crate) steer_enabled: bool, + pub(crate) collaboration_modes_enabled: bool, /// Which key the user must press again to quit. /// /// This is rendered when `mode` is `FooterMode::QuitShortcutReminder`. @@ -109,6 +110,31 @@ pub(crate) fn render_footer(area: Rect, buf: &mut Buffer, props: FooterProps) { .render(area, buf); } +pub(crate) fn inset_footer_hint_area(mut area: Rect) -> Rect { + if area.width > 2 { + area.x += 2; + area.width = area.width.saturating_sub(2); + } + area +} + +pub(crate) fn render_footer_hint_items(area: Rect, buf: &mut Buffer, items: &[(String, String)]) { + if items.is_empty() { + return; + } + + let mut spans = Vec::with_capacity(items.len() * 4); + for (idx, (key, label)) in items.iter().enumerate() { + spans.push(" ".into()); + spans.push(key.clone().bold()); + spans.push(format!(" {label}").into()); + if idx + 1 != items.len() { + spans.push(" ".into()); + } + } + Line::from(spans).render(inset_footer_hint_area(area), buf); +} + fn footer_lines(props: FooterProps) -> Vec> { fn apply_copy_feedback(lines: &mut [Line<'static>], feedback: Option) { let Some(line) = lines.first_mut() else { @@ -176,6 +202,7 @@ fn footer_lines(props: FooterProps) -> Vec> { use_shift_enter_hint: props.use_shift_enter_hint, esc_backtrack_hint: props.esc_backtrack_hint, is_wsl, + collaboration_modes_enabled: props.collaboration_modes_enabled, }; shortcut_overlay_lines(state) } @@ -202,6 +229,7 @@ struct ShortcutsState { use_shift_enter_hint: bool, esc_backtrack_hint: bool, is_wsl: bool, + collaboration_modes_enabled: bool, } fn quit_shortcut_reminder_line(key: KeyBinding) -> Line<'static> { @@ -233,6 +261,7 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { let mut edit_previous = Line::from(""); let mut quit = Line::from(""); let mut show_transcript = Line::from(""); + let mut change_mode = Line::from(""); for descriptor in SHORTCUTS { if let Some(text) = descriptor.overlay_entry(state) { @@ -246,11 +275,12 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { ShortcutId::EditPrevious => edit_previous = text, ShortcutId::Quit => quit = text, ShortcutId::ShowTranscript => show_transcript = text, + ShortcutId::ChangeMode => change_mode = text, } } } - let ordered = vec![ + let mut ordered = vec![ commands, shell_commands, newline, @@ -259,9 +289,12 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { paste_image, edit_previous, quit, - Line::from(""), - show_transcript, ]; + if change_mode.width() > 0 { + ordered.push(change_mode); + } + ordered.push(Line::from("")); + ordered.push(show_transcript); build_columns(ordered) } @@ -338,6 +371,7 @@ enum ShortcutId { EditPrevious, Quit, ShowTranscript, + ChangeMode, } #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -358,6 +392,7 @@ enum DisplayCondition { WhenShiftEnterHint, WhenNotShiftEnterHint, WhenUnderWSL, + WhenCollaborationModesEnabled, } impl DisplayCondition { @@ -367,6 +402,7 @@ impl DisplayCondition { DisplayCondition::WhenShiftEnterHint => state.use_shift_enter_hint, DisplayCondition::WhenNotShiftEnterHint => !state.use_shift_enter_hint, DisplayCondition::WhenUnderWSL => state.is_wsl, + DisplayCondition::WhenCollaborationModesEnabled => state.collaboration_modes_enabled, } } } @@ -500,6 +536,15 @@ const SHORTCUTS: &[ShortcutDescriptor] = &[ prefix: "", label: " to view transcript", }, + ShortcutDescriptor { + id: ShortcutId::ChangeMode, + bindings: &[ShortcutBinding { + key: key_hint::shift(KeyCode::Tab), + condition: DisplayCondition::WhenCollaborationModesEnabled, + }], + prefix: "", + label: " to change mode", + }, ]; #[cfg(test)] @@ -531,6 +576,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -550,6 +596,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -569,6 +616,27 @@ mod tests { use_shift_enter_hint: true, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, + quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), + context_window_percent: None, + context_window_used_tokens: None, + transcript_scrolled: false, + transcript_selection_active: false, + transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), + transcript_copy_feedback: None, + }, + ); + + snapshot_footer( + "footer_shortcuts_collaboration_modes_enabled", + FooterProps { + mode: FooterMode::ShortcutOverlay, + esc_backtrack_hint: true, + use_shift_enter_hint: true, + is_task_running: false, + steer_enabled: false, + collaboration_modes_enabled: true, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -588,6 +656,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -607,6 +676,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -626,6 +696,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -645,6 +716,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -664,6 +736,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: Some(72), context_window_used_tokens: None, @@ -683,6 +756,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: Some(123_456), @@ -702,6 +776,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -721,6 +796,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: true, steer_enabled: true, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, @@ -740,6 +816,7 @@ mod tests { use_shift_enter_hint: false, is_task_running: false, steer_enabled: false, + collaboration_modes_enabled: false, quit_shortcut_key: key_hint::ctrl(KeyCode::Char('c')), context_window_percent: None, context_window_used_tokens: None, diff --git a/codex-rs/tui2/src/bottom_pane/mod.rs b/codex-rs/tui2/src/bottom_pane/mod.rs index df9c0d84cd..83764d55b8 100644 --- a/codex-rs/tui2/src/bottom_pane/mod.rs +++ b/codex-rs/tui2/src/bottom_pane/mod.rs @@ -27,16 +27,24 @@ use bottom_pane_view::BottomPaneView; use codex_core::features::Features; use codex_core::skills::model::SkillMetadata; use codex_file_search::FileMatch; +use codex_protocol::user_input::TextElement; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; use ratatui::buffer::Buffer; use ratatui::layout::Rect; +use ratatui::text::Line; use std::time::Duration; mod approval_overlay; pub(crate) use approval_overlay::ApprovalOverlay; pub(crate) use approval_overlay::ApprovalRequest; mod bottom_pane_view; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct LocalImageAttachment { + pub(crate) placeholder: String, + pub(crate) path: PathBuf, +} mod chat_composer; mod chat_composer_history; mod command_popup; @@ -180,6 +188,11 @@ impl BottomPane { self.composer.set_steer_enabled(enabled); } + pub fn set_collaboration_modes_enabled(&mut self, enabled: bool) { + self.composer.set_collaboration_modes_enabled(enabled); + self.request_redraw(); + } + pub fn status_widget(&self) -> Option<&StatusIndicatorWidget> { self.status.as_ref() } @@ -301,8 +314,14 @@ impl BottomPane { } /// Replace the composer text with `text`. - pub(crate) fn set_composer_text(&mut self, text: String) { - self.composer.set_text_content(text); + pub(crate) fn set_composer_text( + &mut self, + text: String, + text_elements: Vec, + local_image_paths: Vec, + ) { + self.composer + .set_text_content(text, text_elements, local_image_paths); self.request_redraw(); } @@ -326,6 +345,19 @@ impl BottomPane { self.composer.current_text() } + pub(crate) fn composer_text_elements(&self) -> Vec { + self.composer.text_elements() + } + + pub(crate) fn composer_local_images(&self) -> Vec { + self.composer.local_images() + } + + #[cfg(test)] + pub(crate) fn composer_local_image_paths(&self) -> Vec { + self.composer.local_image_paths() + } + /// Update the status indicator header (defaults to "Working") and details below it. /// /// Passing `None` clears any existing details. No-ops if the status indicator is not active. @@ -500,6 +532,23 @@ impl BottomPane { self.request_redraw(); } + pub(crate) fn flash_footer_hint(&mut self, line: Line<'static>, duration: Duration) { + self.composer.show_footer_flash(line, duration); + let frame_requester = self.frame_requester.clone(); + if let Ok(handle) = tokio::runtime::Handle::try_current() { + handle.spawn(async move { + tokio::time::sleep(duration).await; + frame_requester.schedule_frame(); + }); + } else { + std::thread::spawn(move || { + std::thread::sleep(duration); + frame_requester.schedule_frame(); + }); + } + self.request_redraw(); + } + pub(crate) fn composer_is_empty(&self) -> bool { self.composer.is_empty() } @@ -619,10 +668,18 @@ impl BottomPane { } } + #[cfg(test)] pub(crate) fn take_recent_submission_images(&mut self) -> Vec { self.composer.take_recent_submission_images() } + pub(crate) fn take_recent_submission_images_with_placeholders( + &mut self, + ) -> Vec { + self.composer + .take_recent_submission_images_with_placeholders() + } + fn as_renderable(&'_ self) -> RenderableItem<'_> { if let Some(view) = self.active_view() { RenderableItem::Borrowed(view) diff --git a/codex-rs/tui2/src/bottom_pane/snapshots/codex_tui2__bottom_pane__footer__tests__footer_shortcuts_collaboration_modes_enabled.snap b/codex-rs/tui2/src/bottom_pane/snapshots/codex_tui2__bottom_pane__footer__tests__footer_shortcuts_collaboration_modes_enabled.snap new file mode 100644 index 0000000000..78af40d9f3 --- /dev/null +++ b/codex-rs/tui2/src/bottom_pane/snapshots/codex_tui2__bottom_pane__footer__tests__footer_shortcuts_collaboration_modes_enabled.snap @@ -0,0 +1,10 @@ +--- +source: tui2/src/bottom_pane/footer.rs +expression: terminal.backend() +--- +" / for commands ! for shell commands " +" shift + enter for newline tab to queue message " +" @ for file paths ctrl + v to paste images " +" esc again to edit previous message ctrl + c to exit " +" shift + tab to change mode " +" ctrl + t to view transcript " diff --git a/codex-rs/tui2/src/bottom_pane/textarea.rs b/codex-rs/tui2/src/bottom_pane/textarea.rs index 903ebe9f82..37f2d54c7b 100644 --- a/codex-rs/tui2/src/bottom_pane/textarea.rs +++ b/codex-rs/tui2/src/bottom_pane/textarea.rs @@ -1,4 +1,6 @@ use crate::key_hint::is_altgr; +use codex_protocol::user_input::ByteRange; +use codex_protocol::user_input::TextElement as UserTextElement; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; use crossterm::event::KeyModifiers; @@ -60,10 +62,33 @@ impl TextArea { } } - pub fn set_text(&mut self, text: &str) { + /// Replace the textarea text and clear any existing text elements. + pub fn set_text_clearing_elements(&mut self, text: &str) { + self.set_text_inner(text, None); + } + + /// Replace the textarea text and set the provided text elements. + pub fn set_text_with_elements(&mut self, text: &str, elements: &[UserTextElement]) { + self.set_text_inner(text, Some(elements)); + } + + fn set_text_inner(&mut self, text: &str, elements: Option<&[UserTextElement]>) { self.text = text.to_string(); self.cursor_pos = self.cursor_pos.clamp(0, self.text.len()); self.elements.clear(); + if let Some(elements) = elements { + for elem in elements { + let mut start = elem.byte_range.start.min(self.text.len()); + let mut end = elem.byte_range.end.min(self.text.len()); + start = self.clamp_pos_to_char_boundary(start); + end = self.clamp_pos_to_char_boundary(end); + if start >= end { + continue; + } + self.elements.push(TextElement { range: start..end }); + } + self.elements.sort_by_key(|e| e.range.start); + } self.cursor_pos = self.clamp_pos_to_nearest_boundary(self.cursor_pos); self.wrap_cache.replace(None); self.preferred_col = None; @@ -722,6 +747,22 @@ impl TextArea { .collect() } + pub fn text_elements(&self) -> Vec { + self.elements + .iter() + .map(|e| { + let placeholder = self.text.get(e.range.clone()).map(str::to_string); + UserTextElement { + byte_range: ByteRange { + start: e.range.start, + end: e.range.end, + }, + placeholder, + } + }) + .collect() + } + pub fn element_payload_starting_at(&self, pos: usize) -> Option { let pos = pos.min(self.text.len()); let elem = self.elements.iter().find(|e| e.range.start == pos)?; @@ -1251,7 +1292,7 @@ mod tests { let mut t = TextArea::new(); t.insert_str("abcd"); t.set_cursor(1); - t.set_text("你"); + t.set_text_clearing_elements("你"); assert_eq!(t.cursor(), 0); t.insert_str("a"); assert_eq!(t.text(), "a你"); @@ -1933,7 +1974,7 @@ mod tests { for _ in 0..base_len { base.push_str(&rand_grapheme(&mut rng)); } - ta.set_text(&base); + ta.set_text_clearing_elements(&base); // Choose a valid char boundary for initial cursor let mut boundaries: Vec = vec![0]; boundaries.extend(ta.text().char_indices().map(|(i, _)| i).skip(1)); diff --git a/codex-rs/tui2/src/chatwidget.rs b/codex-rs/tui2/src/chatwidget.rs index 117d320a06..6c11077c80 100644 --- a/codex-rs/tui2/src/chatwidget.rs +++ b/codex-rs/tui2/src/chatwidget.rs @@ -90,7 +90,9 @@ use codex_core::skills::model::SkillMetadata; use codex_protocol::ThreadId; use codex_protocol::account::PlanType; use codex_protocol::approvals::ElicitationRequestEvent; +use codex_protocol::models::local_image_label_text; use codex_protocol::parse_command::ParsedCommand; +use codex_protocol::user_input::TextElement; use codex_protocol::user_input::UserInput; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -122,6 +124,7 @@ use crate::bottom_pane::BottomPaneParams; use crate::bottom_pane::CancellationEvent; use crate::bottom_pane::DOUBLE_PRESS_QUIT_SHORTCUT_ENABLED; use crate::bottom_pane::InputResult; +use crate::bottom_pane::LocalImageAttachment; use crate::bottom_pane::QUIT_SHORTCUT_TIMEOUT; use crate::bottom_pane::SelectionAction; use crate::bottom_pane::SelectionItem; @@ -130,6 +133,7 @@ use crate::bottom_pane::custom_prompt_view::CustomPromptView; use crate::bottom_pane::popup_consts::standard_popup_hint_line; use crate::clipboard_paste::paste_image_to_temp_png; use crate::collab; +use crate::collaboration_modes; use crate::diff_render::display_path_for; use crate::exec_cell::CommandOutput; use crate::exec_cell::ExecCell; @@ -298,8 +302,7 @@ pub(crate) struct ChatWidgetInit { pub(crate) config: Config, pub(crate) frame_requester: FrameRequester, pub(crate) app_event_tx: AppEventSender, - pub(crate) initial_prompt: Option, - pub(crate) initial_images: Vec, + pub(crate) initial_user_message: Option, pub(crate) enhanced_keys_supported: bool, pub(crate) auth_manager: Arc, pub(crate) models_manager: Arc, @@ -316,6 +319,8 @@ enum RateLimitSwitchPromptState { Shown, } +type CollaborationModeSelection = collaboration_modes::Selection; + /// Maintains the per-session UI state and interaction state machines for the chat screen. /// /// `ChatWidget` owns the state derived from the protocol event stream (history cells, streaming @@ -345,6 +350,11 @@ pub(crate) struct ChatWidget { active_cell_revision: u64, config: Config, model: Option, + /// Current UI selection for collaboration modes. + /// + /// This selection is only meaningful when `Feature::CollaborationModes` is enabled; when the + /// feature is disabled, the value is effectively inert. + collaboration_mode: CollaborationModeSelection, auth_manager: Arc, models_manager: Arc, session_header: SessionHeader, @@ -449,16 +459,19 @@ pub(crate) struct ActiveCellTranscriptKey { pub(crate) animation_tick: Option, } -struct UserMessage { +pub(crate) struct UserMessage { text: String, - image_paths: Vec, + local_images: Vec, + text_elements: Vec, } impl From for UserMessage { fn from(text: String) -> Self { Self { text, - image_paths: Vec::new(), + local_images: Vec::new(), + // Plain text conversion has no UI element ranges. + text_elements: Vec::new(), } } } @@ -467,16 +480,107 @@ impl From<&str> for UserMessage { fn from(text: &str) -> Self { Self { text: text.to_string(), - image_paths: Vec::new(), + local_images: Vec::new(), + // Plain text conversion has no UI element ranges. + text_elements: Vec::new(), } } } -fn create_initial_user_message(text: String, image_paths: Vec) -> Option { - if text.is_empty() && image_paths.is_empty() { +pub(crate) fn create_initial_user_message( + text: Option, + local_image_paths: Vec, + text_elements: Vec, +) -> Option { + let text = text.unwrap_or_default(); + if text.is_empty() && local_image_paths.is_empty() { None } else { - Some(UserMessage { text, image_paths }) + let local_images = local_image_paths + .into_iter() + .enumerate() + .map(|(idx, path)| LocalImageAttachment { + placeholder: local_image_label_text(idx + 1), + path, + }) + .collect(); + Some(UserMessage { + text, + local_images, + text_elements, + }) + } +} + +// When merging multiple queued drafts (e.g., after interrupt), each draft starts numbering +// its attachments at [Image #1]. Reassign placeholder labels based on the attachment list so +// the combined local_image_paths order matches the labels, even if placeholders were moved +// in the text (e.g., [Image #2] appearing before [Image #1]). +fn remap_placeholders_for_message(message: UserMessage, next_label: &mut usize) -> UserMessage { + let UserMessage { + text, + text_elements, + local_images, + } = message; + if local_images.is_empty() { + return UserMessage { + text, + text_elements, + local_images, + }; + } + + let mut mapping: HashMap = HashMap::new(); + let mut remapped_images = Vec::new(); + for attachment in local_images { + let new_placeholder = local_image_label_text(*next_label); + *next_label += 1; + mapping.insert(attachment.placeholder.clone(), new_placeholder.clone()); + remapped_images.push(LocalImageAttachment { + placeholder: new_placeholder, + path: attachment.path, + }); + } + + let mut elements = text_elements; + elements.sort_by_key(|elem| elem.byte_range.start); + + let mut cursor = 0usize; + let mut rebuilt = String::new(); + let mut rebuilt_elements = Vec::new(); + for mut elem in elements { + let start = elem.byte_range.start.min(text.len()); + let end = elem.byte_range.end.min(text.len()); + if let Some(segment) = text.get(cursor..start) { + rebuilt.push_str(segment); + } + + let original = text.get(start..end).unwrap_or(""); + let placeholder_key = elem.placeholder.as_deref().unwrap_or(original); + let replacement = mapping + .get(placeholder_key) + .map(String::as_str) + .unwrap_or(original); + + let elem_start = rebuilt.len(); + rebuilt.push_str(replacement); + let elem_end = rebuilt.len(); + + if let Some(remapped) = mapping.get(placeholder_key) { + elem.placeholder = Some(remapped.clone()); + } + elem.byte_range = (elem_start..elem_end).into(); + rebuilt_elements.push(elem); + cursor = end; + } + if let Some(segment) = text.get(cursor..) { + rebuilt.push_str(segment); + } + + UserMessage { + text: rebuilt, + local_images: remapped_images, + text_elements: rebuilt_elements, } } @@ -906,31 +1010,76 @@ impl ChatWidget { )); } - // If any messages were queued during the task, restore them into the composer. - if !self.queued_user_messages.is_empty() { - let queued_text = self - .queued_user_messages + if let Some(combined) = self.drain_queued_messages_for_restore() { + let combined_local_image_paths = combined + .local_images .iter() - .map(|m| m.text.clone()) - .collect::>() - .join("\n"); - let existing_text = self.bottom_pane.composer_text(); - let combined = if existing_text.is_empty() { - queued_text - } else if queued_text.is_empty() { - existing_text - } else { - format!("{queued_text}\n{existing_text}") - }; - self.bottom_pane.set_composer_text(combined); - // Clear the queue and update the status indicator list. - self.queued_user_messages.clear(); + .map(|img| img.path.clone()) + .collect(); + self.bottom_pane.set_composer_text( + combined.text, + combined.text_elements, + combined_local_image_paths, + ); self.refresh_queued_user_messages(); } self.request_redraw(); } + /// Merge queued drafts (plus the current composer state) into a single message for restore. + /// + /// Each queued draft numbers attachments from `[Image #1]`. When we concatenate drafts, we + /// must renumber placeholders in a stable order so the merged attachment list stays aligned + /// with the labels embedded in text. This helper drains the queue, remaps placeholders, and + /// fixes text element byte ranges as content is appended. Returns `None` when there is nothing + /// to restore. + fn drain_queued_messages_for_restore(&mut self) -> Option { + if self.queued_user_messages.is_empty() { + return None; + } + + let existing_message = UserMessage { + text: self.bottom_pane.composer_text(), + text_elements: self.bottom_pane.composer_text_elements(), + local_images: self.bottom_pane.composer_local_images(), + }; + + let mut to_merge: Vec = self.queued_user_messages.drain(..).collect(); + if !existing_message.text.is_empty() || !existing_message.local_images.is_empty() { + to_merge.push(existing_message); + } + + let mut combined = UserMessage { + text: String::new(), + text_elements: Vec::new(), + local_images: Vec::new(), + }; + let mut combined_offset = 0usize; + let mut next_image_label = 1usize; + + for (idx, message) in to_merge.into_iter().enumerate() { + if idx > 0 { + combined.text.push('\n'); + combined_offset += 1; + } + let message = remap_placeholders_for_message(message, &mut next_image_label); + let base = combined_offset; + combined.text.push_str(&message.text); + combined_offset += message.text.len(); + combined + .text_elements + .extend(message.text_elements.into_iter().map(|mut elem| { + elem.byte_range.start += base; + elem.byte_range.end += base; + elem + })); + combined.local_images.extend(message.local_images); + } + + Some(combined) + } + fn on_plan_update(&mut self, update: UpdatePlanArgs) { self.add_to_history(history_cell::new_plan_update(update)); } @@ -1434,8 +1583,7 @@ impl ChatWidget { config, frame_requester, app_event_tx, - initial_prompt, - initial_images, + initial_user_message, enhanced_keys_supported, auth_manager, models_manager, @@ -1478,13 +1626,11 @@ impl ChatWidget { active_cell_revision: 0, config, model, + collaboration_mode: CollaborationModeSelection::default(), auth_manager, models_manager, session_header: SessionHeader::new(model_for_header), - initial_user_message: create_initial_user_message( - initial_prompt.unwrap_or_default(), - initial_images, - ), + initial_user_message, token_info: None, rate_limit_snapshot: None, plan_type: None, @@ -1524,6 +1670,9 @@ impl ChatWidget { widget .bottom_pane .set_steer_enabled(widget.config.features.enabled(Feature::Steer)); + widget.bottom_pane.set_collaboration_modes_enabled( + widget.config.features.enabled(Feature::CollaborationModes), + ); widget } @@ -1538,8 +1687,7 @@ impl ChatWidget { mut config, frame_requester, app_event_tx, - initial_prompt, - initial_images, + initial_user_message, enhanced_keys_supported, auth_manager, models_manager, @@ -1575,13 +1723,11 @@ impl ChatWidget { active_cell_revision: 0, config, model: Some(header_model.clone()), + collaboration_mode: CollaborationModeSelection::default(), auth_manager, models_manager, session_header: SessionHeader::new(header_model), - initial_user_message: create_initial_user_message( - initial_prompt.unwrap_or_default(), - initial_images, - ), + initial_user_message, token_info: None, rate_limit_snapshot: None, plan_type: None, @@ -1621,6 +1767,9 @@ impl ChatWidget { widget .bottom_pane .set_steer_enabled(widget.config.features.enabled(Feature::Steer)); + widget.bottom_pane.set_collaboration_modes_enabled( + widget.config.features.enabled(Feature::CollaborationModes), + ); widget } @@ -1685,6 +1834,16 @@ impl ChatWidget { } match key_event { + KeyEvent { + code: KeyCode::BackTab, + kind: KeyEventKind::Press, + .. + } if self.collaboration_modes_enabled() + && !self.bottom_pane.is_task_running() + && self.bottom_pane.no_modal_or_popup_active() => + { + self.cycle_collaboration_mode(); + } KeyEvent { code: KeyCode::Up, modifiers: KeyModifiers::ALT, @@ -1693,46 +1852,64 @@ impl ChatWidget { } if !self.queued_user_messages.is_empty() => { // Prefer the most recently queued item. if let Some(user_message) = self.queued_user_messages.pop_back() { - self.bottom_pane.set_composer_text(user_message.text); + let local_image_paths = user_message + .local_images + .iter() + .map(|img| img.path.clone()) + .collect(); + self.bottom_pane.set_composer_text( + user_message.text, + user_message.text_elements, + local_image_paths, + ); self.refresh_queued_user_messages(); self.request_redraw(); } } - _ => { - match self.bottom_pane.handle_key_event(key_event) { - InputResult::Submitted(text) => { - // Enter always sends messages immediately (bypasses queue check) - // Clear any reasoning status header when submitting a new message + _ => match self.bottom_pane.handle_key_event(key_event) { + InputResult::Submitted { + text, + text_elements, + } => { + let user_message = UserMessage { + text, + local_images: self + .bottom_pane + .take_recent_submission_images_with_placeholders(), + text_elements, + }; + if self.is_session_configured() { + // Submitted is only emitted when steer is enabled (Enter sends immediately). + // Reset any reasoning header only when we are actually submitting a turn. self.reasoning_buffer.clear(); self.full_reasoning_buffer.clear(); self.set_status_header(String::from("Working")); - let user_message = UserMessage { - text, - image_paths: self.bottom_pane.take_recent_submission_images(), - }; - if !self.is_session_configured() { - self.queue_user_message(user_message); - } else { - self.submit_user_message(user_message); - } - } - InputResult::Queued(text) => { - // Tab queues the message if a task is running, otherwise submits immediately - let user_message = UserMessage { - text, - image_paths: self.bottom_pane.take_recent_submission_images(), - }; + self.submit_user_message(user_message); + } else { self.queue_user_message(user_message); } - InputResult::Command(cmd) => { - self.dispatch_command(cmd); - } - InputResult::CommandWithArgs(cmd, args) => { - self.dispatch_command_with_args(cmd, args); - } - InputResult::None => {} } - } + InputResult::Queued { + text, + text_elements, + } => { + let user_message = UserMessage { + text, + local_images: self + .bottom_pane + .take_recent_submission_images_with_placeholders(), + text_elements, + }; + self.queue_user_message(user_message); + } + InputResult::Command(cmd) => { + self.dispatch_command(cmd); + } + InputResult::CommandWithArgs(cmd, args) => { + self.dispatch_command_with_args(cmd, args); + } + InputResult::None => {} + }, } } @@ -1797,6 +1974,11 @@ impl ChatWidget { SlashCommand::Model => { self.open_model_popup(); } + SlashCommand::Collab => { + if self.collaboration_modes_enabled() { + self.cycle_collaboration_mode(); + } + } SlashCommand::Approvals => { self.open_approvals_popup(); } @@ -1957,6 +2139,19 @@ impl ChatWidget { }, }); } + SlashCommand::Collab => { + if !self.collaboration_modes_enabled() { + return; + } + + if let Some(selection) = collaboration_modes::parse_selection(trimmed) { + self.set_collaboration_mode(selection); + } else if !trimmed.is_empty() { + self.add_error_message(format!( + "Unknown collaboration mode '{trimmed}'. Try: plan, pair, execute." + )); + } + } _ => self.dispatch_command(cmd), } } @@ -2022,8 +2217,20 @@ impl ChatWidget { } fn submit_user_message(&mut self, user_message: UserMessage) { - let UserMessage { text, image_paths } = user_message; - if text.is_empty() && image_paths.is_empty() { + let Some(model) = self.current_model().or(self.config.model.as_deref()) else { + tracing::warn!("cannot submit user message before model is known; queueing"); + self.queued_user_messages.push_front(user_message); + self.refresh_queued_user_messages(); + return; + }; + let model = model.to_string(); + + let UserMessage { + text, + local_images, + text_elements, + } = user_message; + if text.is_empty() && local_images.is_empty() { return; } @@ -2047,15 +2254,16 @@ impl ChatWidget { return; } - for path in image_paths { - items.push(UserInput::LocalImage { path }); + for image in &local_images { + items.push(UserInput::LocalImage { + path: image.path.clone(), + }); } if !text.is_empty() { - // TODO: Thread text element ranges from the composer input. Empty keeps old behavior. items.push(UserInput::Text { text: text.clone(), - text_elements: Vec::new(), + text_elements: text_elements.clone(), }); } @@ -2069,14 +2277,34 @@ impl ChatWidget { } } - self.codex_op_tx - .send(Op::UserInput { - items, - final_output_json_schema: None, - }) - .unwrap_or_else(|e| { - tracing::error!("failed to send message: {e}"); - }); + let collaboration_mode = self.collaboration_modes_enabled().then(|| { + collaboration_modes::resolve_mode_or_fallback( + self.models_manager.as_ref(), + self.collaboration_mode, + model.as_str(), + self.config.model_reasoning_effort, + ) + }); + let op = Op::UserTurn { + items, + cwd: self.config.cwd.clone(), + approval_policy: self.config.approval_policy.value(), + sandbox_policy: self.config.sandbox_policy.get().clone(), + model, + effort: self.config.model_reasoning_effort, + summary: self.config.model_reasoning_summary, + final_output_json_schema: None, + collaboration_mode, + }; + + if !self.agent_turn_running { + self.agent_turn_running = true; + self.update_task_running_state(); + } + + self.codex_op_tx.send(op).unwrap_or_else(|e| { + tracing::error!("failed to send message: {e}"); + }); // Persist the text to cross-session message history. if !text.is_empty() { @@ -2089,7 +2317,12 @@ impl ChatWidget { // Only show the text portion in conversation history. if !text.is_empty() { - self.add_to_history(history_cell::new_user_prompt(text)); + let local_image_paths = local_images.into_iter().map(|img| img.path).collect(); + self.add_to_history(history_cell::new_user_prompt( + text, + text_elements, + local_image_paths, + )); } self.needs_final_message_separator = false; } @@ -2245,7 +2478,8 @@ impl ChatWidget { | EventMsg::ItemCompleted(_) | EventMsg::AgentMessageContentDelta(_) | EventMsg::ReasoningContentDelta(_) - | EventMsg::ReasoningRawContentDelta(_) => {} + | EventMsg::ReasoningRawContentDelta(_) + | EventMsg::RequestUserInput(_) => {} } } @@ -2299,10 +2533,12 @@ impl ChatWidget { } fn on_user_message_event(&mut self, event: UserMessageEvent) { - let message = event.message.trim(); - // Only show the text portion in conversation history. - if !message.is_empty() { - self.add_to_history(history_cell::new_user_prompt(message.to_string())); + if !event.message.trim().is_empty() { + self.add_to_history(history_cell::new_user_prompt( + event.message, + event.text_elements, + event.local_images, + )); } self.needs_final_message_separator = false; @@ -2409,6 +2645,8 @@ impl ChatWidget { self.plan_type, Local::now(), self.model_display_name(), + self.collaboration_modes_enabled() + .then_some(self.collaboration_mode.label()), )); } fn stop_rate_limit_poller(&mut self) { @@ -3584,6 +3822,9 @@ impl ChatWidget { if feature == Feature::Steer { self.bottom_pane.set_steer_enabled(enabled); } + if feature == Feature::CollaborationModes { + self.bottom_pane.set_collaboration_modes_enabled(enabled); + } } pub(crate) fn set_full_access_warning_acknowledged(&mut self, acknowledged: bool) { @@ -3624,10 +3865,39 @@ impl ChatWidget { self.model.as_deref() } + fn collaboration_modes_enabled(&self) -> bool { + self.config.features.enabled(Feature::CollaborationModes) + } + fn model_display_name(&self) -> &str { self.model.as_deref().unwrap_or(DEFAULT_MODEL_DISPLAY_NAME) } + fn cycle_collaboration_mode(&mut self) { + if !self.collaboration_modes_enabled() { + return; + } + + let next = self.collaboration_mode.next(); + self.set_collaboration_mode(next); + } + + /// Update the selected collaboration mode. + /// + /// When collaboration modes are enabled, the current selection is attached to *every* + /// submission as `Op::UserTurn { collaboration_mode: Some(...) }`. + fn set_collaboration_mode(&mut self, selection: CollaborationModeSelection) { + if !self.collaboration_modes_enabled() { + return; + } + + self.collaboration_mode = selection; + let flash = collaboration_modes::flash_line(selection); + const FLASH_DURATION: Duration = Duration::from_secs(2); + self.bottom_pane.flash_footer_hint(flash, FLASH_DURATION); + self.request_redraw(); + } + /// Build a placeholder header cell while the session is configuring. fn placeholder_session_header_cell(config: &Config) -> Box { let placeholder_style = Style::default().add_modifier(Modifier::DIM | Modifier::ITALIC); @@ -3815,8 +4085,14 @@ impl ChatWidget { } /// Replace the composer content with the provided text and reset cursor. - pub(crate) fn set_composer_text(&mut self, text: String) { - self.bottom_pane.set_composer_text(text); + pub(crate) fn set_composer_text( + &mut self, + text: String, + text_elements: Vec, + local_image_paths: Vec, + ) { + self.bottom_pane + .set_composer_text(text, text_elements, local_image_paths); } pub(crate) fn show_esc_backtrack_hint(&mut self) { diff --git a/codex-rs/tui2/src/chatwidget/tests.rs b/codex-rs/tui2/src/chatwidget/tests.rs index 15bcd94ab9..9be051a96e 100644 --- a/codex-rs/tui2/src/chatwidget/tests.rs +++ b/codex-rs/tui2/src/chatwidget/tests.rs @@ -8,6 +8,8 @@ use super::*; use crate::app_event::AppEvent; use crate::app_event::ExitMode; use crate::app_event_sender::AppEventSender; +use crate::bottom_pane::LocalImageAttachment; +use crate::history_cell::UserHistoryCell; use crate::test_backend::VT100Backend; use crate::tui::FrameRequester; use assert_matches::assert_matches; @@ -17,7 +19,6 @@ use codex_core::CodexAuth; use codex_core::config::Config; use codex_core::config::ConfigBuilder; use codex_core::config::Constrained; -#[cfg(target_os = "windows")] use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::protocol::AgentMessageDeltaEvent; @@ -57,6 +58,7 @@ use codex_core::protocol::ViewImageToolCallEvent; use codex_core::protocol::WarningEvent; use codex_protocol::ThreadId; use codex_protocol::account::PlanType; +use codex_protocol::config_types::CollaborationMode; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ReasoningEffortPreset; use codex_protocol::parse_command::ParsedCommand; @@ -64,6 +66,8 @@ use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; use codex_protocol::plan_tool::UpdatePlanArgs; use codex_protocol::protocol::CodexErrorInfo; +use codex_protocol::user_input::TextElement; +use codex_protocol::user_input::UserInput; use codex_utils_absolute_path::AbsolutePathBuf; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -170,6 +174,364 @@ async fn resumed_initial_messages_render_history() { ); } +#[tokio::test] +async fn replayed_user_message_preserves_text_elements_and_local_images() { + let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await; + + let placeholder = "[Image #1]"; + let message = format!("{placeholder} replayed"); + let text_elements = vec![TextElement { + byte_range: (0..placeholder.len()).into(), + placeholder: Some(placeholder.to_string()), + }]; + let local_images = vec![PathBuf::from("/tmp/replay.png")]; + + let conversation_id = ThreadId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + forked_from_id: None, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![EventMsg::UserMessage(UserMessageEvent { + message: message.clone(), + images: None, + text_elements: text_elements.clone(), + local_images: local_images.clone(), + })]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + let mut user_cell = None; + while let Ok(ev) = rx.try_recv() { + if let AppEvent::InsertHistoryCell(cell) = ev + && let Some(cell) = cell.as_any().downcast_ref::() + { + user_cell = Some(( + cell.message.clone(), + cell.text_elements.clone(), + cell.local_image_paths.clone(), + )); + break; + } + } + + let (stored_message, stored_elements, stored_images) = + user_cell.expect("expected a replayed user history cell"); + assert_eq!(stored_message, message); + assert_eq!(stored_elements, text_elements); + assert_eq!(stored_images, local_images); +} + +#[tokio::test] +async fn submission_preserves_text_elements_and_local_images() { + let (mut chat, mut rx, mut op_rx) = make_chatwidget_manual(None).await; + + let conversation_id = ThreadId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + forked_from_id: None, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: None, + rollout_path: rollout_file.path().to_path_buf(), + }; + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + drain_insert_history(&mut rx); + + let placeholder = "[Image #1]"; + let text = format!("{placeholder} submit"); + let text_elements = vec![TextElement { + byte_range: (0..placeholder.len()).into(), + placeholder: Some(placeholder.to_string()), + }]; + let local_images = vec![PathBuf::from("/tmp/submitted.png")]; + + chat.bottom_pane + .set_composer_text(text.clone(), text_elements.clone(), local_images.clone()); + chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + let items = match next_submit_op(&mut op_rx) { + Op::UserTurn { items, .. } => items, + other => panic!("expected Op::UserTurn, got {other:?}"), + }; + assert_eq!(items.len(), 2); + assert_eq!( + items[0], + UserInput::LocalImage { + path: local_images[0].clone() + } + ); + assert_eq!( + items[1], + UserInput::Text { + text: text.clone(), + text_elements: text_elements.clone(), + } + ); + + let mut user_cell = None; + while let Ok(ev) = rx.try_recv() { + if let AppEvent::InsertHistoryCell(cell) = ev + && let Some(cell) = cell.as_any().downcast_ref::() + { + user_cell = Some(( + cell.message.clone(), + cell.text_elements.clone(), + cell.local_image_paths.clone(), + )); + break; + } + } + + let (stored_message, stored_elements, stored_images) = + user_cell.expect("expected submitted user history cell"); + assert_eq!(stored_message, text); + assert_eq!(stored_elements, text_elements); + assert_eq!(stored_images, local_images); +} + +#[tokio::test] +async fn interrupted_turn_restores_queued_messages_with_images_and_elements() { + let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await; + + let first_placeholder = "[Image #1]"; + let first_text = format!("{first_placeholder} first"); + let first_elements = vec![TextElement { + byte_range: (0..first_placeholder.len()).into(), + placeholder: Some(first_placeholder.to_string()), + }]; + let first_images = [PathBuf::from("/tmp/first.png")]; + + let second_placeholder = "[Image #1]"; + let second_text = format!("{second_placeholder} second"); + let second_elements = vec![TextElement { + byte_range: (0..second_placeholder.len()).into(), + placeholder: Some(second_placeholder.to_string()), + }]; + let second_images = [PathBuf::from("/tmp/second.png")]; + + let existing_placeholder = "[Image #1]"; + let existing_text = format!("{existing_placeholder} existing"); + let existing_elements = vec![TextElement { + byte_range: (0..existing_placeholder.len()).into(), + placeholder: Some(existing_placeholder.to_string()), + }]; + let existing_images = vec![PathBuf::from("/tmp/existing.png")]; + + chat.queued_user_messages.push_back(UserMessage { + text: first_text, + local_images: vec![LocalImageAttachment { + placeholder: first_placeholder.to_string(), + path: first_images[0].clone(), + }], + text_elements: first_elements, + }); + chat.queued_user_messages.push_back(UserMessage { + text: second_text, + local_images: vec![LocalImageAttachment { + placeholder: second_placeholder.to_string(), + path: second_images[0].clone(), + }], + text_elements: second_elements, + }); + chat.refresh_queued_user_messages(); + + chat.bottom_pane + .set_composer_text(existing_text, existing_elements, existing_images.clone()); + + // When interrupted, queued messages are merged into the composer; image placeholders + // must be renumbered to match the combined local image list. + chat.handle_codex_event(Event { + id: "interrupt".into(), + msg: EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent { + reason: TurnAbortReason::Interrupted, + }), + }); + + let first = "[Image #1] first".to_string(); + let second = "[Image #2] second".to_string(); + let third = "[Image #3] existing".to_string(); + let expected_text = format!("{first}\n{second}\n{third}"); + assert_eq!(chat.bottom_pane.composer_text(), expected_text); + + let first_start = 0; + let second_start = first.len() + 1; + let third_start = second_start + second.len() + 1; + let expected_elements = vec![ + TextElement { + byte_range: (first_start..first_start + "[Image #1]".len()).into(), + placeholder: Some("[Image #1]".to_string()), + }, + TextElement { + byte_range: (second_start..second_start + "[Image #2]".len()).into(), + placeholder: Some("[Image #2]".to_string()), + }, + TextElement { + byte_range: (third_start..third_start + "[Image #3]".len()).into(), + placeholder: Some("[Image #3]".to_string()), + }, + ]; + assert_eq!(chat.bottom_pane.composer_text_elements(), expected_elements); + assert_eq!( + chat.bottom_pane.composer_local_image_paths(), + vec![ + first_images[0].clone(), + second_images[0].clone(), + existing_images[0].clone(), + ] + ); +} + +#[tokio::test] +async fn remap_placeholders_uses_attachment_labels() { + let placeholder_one = "[Image #1]"; + let placeholder_two = "[Image #2]"; + let text = format!("{placeholder_two} before {placeholder_one}"); + let elements = vec![ + TextElement { + byte_range: (0..placeholder_two.len()).into(), + placeholder: Some(placeholder_two.to_string()), + }, + TextElement { + byte_range: ("[Image #2] before ".len().."[Image #2] before [Image #1]".len()).into(), + placeholder: Some(placeholder_one.to_string()), + }, + ]; + + let attachments = vec![ + LocalImageAttachment { + placeholder: placeholder_one.to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: placeholder_two.to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ]; + let message = UserMessage { + text, + text_elements: elements, + local_images: attachments, + }; + let mut next_label = 3usize; + let remapped = remap_placeholders_for_message(message, &mut next_label); + + assert_eq!(remapped.text, "[Image #4] before [Image #3]"); + assert_eq!( + remapped.text_elements, + vec![ + TextElement { + byte_range: (0.."[Image #4]".len()).into(), + placeholder: Some("[Image #4]".to_string()), + }, + TextElement { + byte_range: ("[Image #4] before ".len().."[Image #4] before [Image #3]".len()) + .into(), + placeholder: Some("[Image #3]".to_string()), + }, + ] + ); + assert_eq!( + remapped.local_images, + vec![ + LocalImageAttachment { + placeholder: "[Image #3]".to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: "[Image #4]".to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ] + ); +} + +#[tokio::test] +async fn remap_placeholders_uses_byte_ranges_when_placeholder_missing() { + let placeholder_one = "[Image #1]"; + let placeholder_two = "[Image #2]"; + let text = format!("{placeholder_two} before {placeholder_one}"); + let elements = vec![ + TextElement { + byte_range: (0..placeholder_two.len()).into(), + placeholder: None, + }, + TextElement { + byte_range: ("[Image #2] before ".len().."[Image #2] before [Image #1]".len()).into(), + placeholder: None, + }, + ]; + + let attachments = vec![ + LocalImageAttachment { + placeholder: placeholder_one.to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: placeholder_two.to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ]; + let message = UserMessage { + text, + text_elements: elements, + local_images: attachments, + }; + let mut next_label = 3usize; + let remapped = remap_placeholders_for_message(message, &mut next_label); + + assert_eq!(remapped.text, "[Image #4] before [Image #3]"); + assert_eq!( + remapped.text_elements, + vec![ + TextElement { + byte_range: (0.."[Image #4]".len()).into(), + placeholder: Some("[Image #4]".to_string()), + }, + TextElement { + byte_range: ("[Image #4] before ".len().."[Image #4] before [Image #3]".len()) + .into(), + placeholder: Some("[Image #3]".to_string()), + }, + ] + ); + assert_eq!( + remapped.local_images, + vec![ + LocalImageAttachment { + placeholder: "[Image #3]".to_string(), + path: PathBuf::from("/tmp/one.png"), + }, + LocalImageAttachment { + placeholder: "[Image #4]".to_string(), + path: PathBuf::from("/tmp/two.png"), + }, + ] + ); +} + /// Entering review mode uses the hint provided by the review request. #[tokio::test] async fn entered_review_mode_uses_request_hint() { @@ -339,8 +701,7 @@ async fn helpers_are_available_and_do_not_panic() { config: cfg.clone(), frame_requester: FrameRequester::test_dummy(), app_event_tx: tx, - initial_prompt: None, - initial_images: Vec::new(), + initial_user_message: None, enhanced_keys_supported: false, auth_manager, models_manager: thread_manager.get_models_manager(), @@ -382,6 +743,7 @@ async fn make_chatwidget_manual( skills: None, }); bottom.set_steer_enabled(true); + bottom.set_collaboration_modes_enabled(cfg.features.enabled(Feature::CollaborationModes)); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test")); let codex_home = cfg.codex_home.clone(); let widget = ChatWidget { @@ -392,6 +754,7 @@ async fn make_chatwidget_manual( active_cell_revision: 0, config: cfg, model: Some(resolved_model.clone()), + collaboration_mode: CollaborationModeSelection::default(), auth_manager: auth_manager.clone(), models_manager: Arc::new(ModelsManager::new(codex_home, auth_manager)), session_header: SessionHeader::new(resolved_model), @@ -434,6 +797,19 @@ async fn make_chatwidget_manual( (widget, rx, op_rx) } +// ChatWidget may emit other `Op`s (e.g. history/logging updates) on the same channel; this helper +// filters until we see a submission op. +fn next_submit_op(op_rx: &mut tokio::sync::mpsc::UnboundedReceiver) -> Op { + loop { + match op_rx.try_recv() { + Ok(op @ Op::UserTurn { .. }) => return op, + Ok(_) => continue, + Err(TryRecvError::Empty) => panic!("expected a submit op but queue was empty"), + Err(TryRecvError::Disconnected) => panic!("expected submit op but channel closed"), + } + } +} + fn set_chatgpt_auth(chat: &mut ChatWidget) { chat.auth_manager = AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing()); @@ -1018,7 +1394,8 @@ async fn enqueueing_history_prompt_multiple_times_is_stable() { assert!(!chat.bottom_pane.is_task_running()); // Submit an initial prompt to seed history. - chat.bottom_pane.set_composer_text("repeat me".to_string()); + chat.bottom_pane + .set_composer_text("repeat me".to_string(), Vec::new(), Vec::new()); chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); // Simulate an active task so further submissions are queued. @@ -1054,7 +1431,7 @@ async fn streaming_final_answer_keeps_task_running_state() { assert!(chat.bottom_pane.status_widget().is_none()); chat.bottom_pane - .set_composer_text("queued submission".to_string()); + .set_composer_text("queued submission".to_string(), Vec::new(), Vec::new()); chat.handle_key_event(KeyEvent::new(KeyCode::Tab, KeyModifiers::NONE)); assert_eq!(chat.queued_user_messages.len(), 1); @@ -1316,6 +1693,107 @@ async fn slash_init_skips_when_project_doc_exists() { ); } +#[test] +fn parse_collaboration_mode_selection_accepts_common_aliases() { + assert_eq!( + collaboration_modes::parse_selection("plan"), + Some(CollaborationModeSelection::Plan) + ); + assert_eq!( + collaboration_modes::parse_selection("PAIR"), + Some(CollaborationModeSelection::PairProgramming) + ); + assert_eq!( + collaboration_modes::parse_selection("pair_programming"), + Some(CollaborationModeSelection::PairProgramming) + ); + assert_eq!( + collaboration_modes::parse_selection("pp"), + Some(CollaborationModeSelection::PairProgramming) + ); + assert_eq!( + collaboration_modes::parse_selection(" exec "), + Some(CollaborationModeSelection::Execute) + ); + assert_eq!( + collaboration_modes::parse_selection("execute"), + Some(CollaborationModeSelection::Execute) + ); + assert_eq!(collaboration_modes::parse_selection("unknown"), None); +} + +#[tokio::test] +async fn collab_mode_shift_tab_cycles_only_when_enabled_and_idle() { + let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await; + chat.set_feature_enabled(Feature::CollaborationModes, false); + + let initial = chat.collaboration_mode; + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, initial); + + chat.set_feature_enabled(Feature::CollaborationModes, true); + + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Execute); + + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Plan); + + chat.on_task_started(); + chat.handle_key_event(KeyEvent::from(KeyCode::BackTab)); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Plan); +} + +#[tokio::test] +async fn collab_slash_command_sets_mode_and_next_submit_sends_user_turn() { + let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None).await; + chat.conversation_id = Some(ThreadId::new()); + chat.set_feature_enabled(Feature::CollaborationModes, true); + + chat.dispatch_command_with_args(SlashCommand::Collab, "plan".to_string()); + assert_eq!(chat.collaboration_mode, CollaborationModeSelection::Plan); + + chat.bottom_pane + .set_composer_text("hello".to_string(), Vec::new(), Vec::new()); + chat.handle_key_event(KeyEvent::from(KeyCode::Enter)); + match next_submit_op(&mut op_rx) { + Op::UserTurn { + collaboration_mode: Some(CollaborationMode::Plan(_)), + .. + } => {} + other => panic!("expected Op::UserTurn with plan collab mode, got {other:?}"), + } + + chat.bottom_pane + .set_composer_text("follow up".to_string(), Vec::new(), Vec::new()); + chat.handle_key_event(KeyEvent::from(KeyCode::Enter)); + match next_submit_op(&mut op_rx) { + Op::UserTurn { + collaboration_mode: Some(CollaborationMode::Plan(_)), + .. + } => {} + other => panic!("expected Op::UserTurn with plan collab mode, got {other:?}"), + } +} + +#[tokio::test] +async fn collab_mode_defaults_to_pair_programming_when_enabled() { + let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None).await; + chat.conversation_id = Some(ThreadId::new()); + chat.set_feature_enabled(Feature::CollaborationModes, true); + + chat.bottom_pane + .set_composer_text("hello".to_string(), Vec::new(), Vec::new()); + chat.handle_key_event(KeyEvent::from(KeyCode::Enter)); + match next_submit_op(&mut op_rx) { + Op::UserTurn { + collaboration_mode: Some(CollaborationMode::PairProgramming(_)), + .. + } => {} + other => panic!("expected Op::UserTurn with pair programming collab mode, got {other:?}"), + } +} + #[tokio::test] async fn slash_quit_requests_exit() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; @@ -2380,7 +2858,7 @@ async fn interrupt_prepends_queued_messages_before_existing_composer_text() { chat.bottom_pane.set_task_running(true); chat.bottom_pane - .set_composer_text("current draft".to_string()); + .set_composer_text("current draft".to_string(), Vec::new(), Vec::new()); chat.queued_user_messages .push_back(UserMessage::from("first queued".to_string())); @@ -3364,8 +3842,11 @@ async fn chatwidget_exec_and_status_layout_vt100_snapshot() { delta: "**Investigating rendering code**".into(), }), }); - chat.bottom_pane - .set_composer_text("Summarize recent commits".to_string()); + chat.bottom_pane.set_composer_text( + "Summarize recent commits".to_string(), + Vec::new(), + Vec::new(), + ); let width: u16 = 80; let ui_height: u16 = chat.desired_height(width); diff --git a/codex-rs/tui2/src/clipboard_paste.rs b/codex-rs/tui2/src/clipboard_paste.rs index 5863c728b0..4d28b365fe 100644 --- a/codex-rs/tui2/src/clipboard_paste.rs +++ b/codex-rs/tui2/src/clipboard_paste.rs @@ -244,9 +244,14 @@ pub fn paste_image_to_temp_png() -> Result<(PathBuf, PastedImageInfo), PasteImag /// - shell-escaped single paths (via `shlex`) pub fn normalize_pasted_path(pasted: &str) -> Option { let pasted = pasted.trim(); + let unquoted = pasted + .strip_prefix('"') + .and_then(|s| s.strip_suffix('"')) + .or_else(|| pasted.strip_prefix('\'').and_then(|s| s.strip_suffix('\''))) + .unwrap_or(pasted); // file:// URL → filesystem path - if let Ok(url) = url::Url::parse(pasted) + if let Ok(url) = url::Url::parse(unquoted) && url.scheme() == "file" { return url.to_file_path().ok(); @@ -258,38 +263,18 @@ pub fn normalize_pasted_path(pasted: &str) -> Option { // Detect unquoted Windows paths and bypass POSIX shlex which // treats backslashes as escapes (e.g., C:\Users\Alice\file.png). // Also handles UNC paths (\\server\share\path). - let looks_like_windows_path = { - // Drive letter path: C:\ or C:/ - let drive = pasted - .chars() - .next() - .map(|c| c.is_ascii_alphabetic()) - .unwrap_or(false) - && pasted.get(1..2) == Some(":") - && pasted - .get(2..3) - .map(|s| s == "\\" || s == "/") - .unwrap_or(false); - // UNC path: \\server\share - let unc = pasted.starts_with("\\\\"); - drive || unc - }; - if looks_like_windows_path { - #[cfg(target_os = "linux")] - { - if is_probably_wsl() - && let Some(converted) = convert_windows_path_to_wsl(pasted) - { - return Some(converted); - } - } - return Some(PathBuf::from(pasted)); + if let Some(path) = normalize_windows_path(unquoted) { + return Some(path); } // shell-escaped single path → unescaped let parts: Vec = shlex::Shlex::new(pasted).collect(); if parts.len() == 1 { - return parts.into_iter().next().map(PathBuf::from); + let part = parts.into_iter().next()?; + if let Some(path) = normalize_windows_path(&part) { + return Some(path); + } + return Some(PathBuf::from(part)); } None @@ -339,6 +324,36 @@ fn convert_windows_path_to_wsl(input: &str) -> Option { Some(result) } +fn normalize_windows_path(input: &str) -> Option { + // Drive letter path: C:\ or C:/ + let drive = input + .chars() + .next() + .map(|c| c.is_ascii_alphabetic()) + .unwrap_or(false) + && input.get(1..2) == Some(":") + && input + .get(2..3) + .map(|s| s == "\\" || s == "/") + .unwrap_or(false); + // UNC path: \\server\share + let unc = input.starts_with("\\\\"); + if !drive && !unc { + return None; + } + + #[cfg(target_os = "linux")] + { + if is_probably_wsl() + && let Some(converted) = convert_windows_path_to_wsl(input) + { + return Some(converted); + } + } + + Some(PathBuf::from(input)) +} + /// Infer an image format for the provided path based on its extension. pub fn pasted_image_format(path: &Path) -> EncodedImageFormat { match path @@ -438,9 +453,39 @@ mod pasted_paths_tests { #[test] fn normalize_single_quoted_windows_path() { let input = r"'C:\\Users\\Alice\\My File.jpeg'"; + let unquoted = r"C:\\Users\\Alice\\My File.jpeg"; let result = normalize_pasted_path(input).expect("should trim single quotes on windows path"); - assert_eq!(result, PathBuf::from(r"C:\\Users\\Alice\\My File.jpeg")); + #[cfg(target_os = "linux")] + let expected = if is_probably_wsl() + && let Some(converted) = convert_windows_path_to_wsl(unquoted) + { + converted + } else { + PathBuf::from(unquoted) + }; + #[cfg(not(target_os = "linux"))] + let expected = PathBuf::from(unquoted); + assert_eq!(result, expected); + } + + #[test] + fn normalize_double_quoted_windows_path() { + let input = r#""C:\\Users\\Alice\\My File.jpeg""#; + let unquoted = r"C:\\Users\\Alice\\My File.jpeg"; + let result = + normalize_pasted_path(input).expect("should trim double quotes on windows path"); + #[cfg(target_os = "linux")] + let expected = if is_probably_wsl() + && let Some(converted) = convert_windows_path_to_wsl(unquoted) + { + converted + } else { + PathBuf::from(unquoted) + }; + #[cfg(not(target_os = "linux"))] + let expected = PathBuf::from(unquoted); + assert_eq!(result, expected); } #[test] diff --git a/codex-rs/tui2/src/collaboration_modes.rs b/codex-rs/tui2/src/collaboration_modes.rs new file mode 100644 index 0000000000..bdd1d8b63c --- /dev/null +++ b/codex-rs/tui2/src/collaboration_modes.rs @@ -0,0 +1,135 @@ +//! Collaboration mode selection + rendering helpers for the TUI. +//! +//! This module is intentionally UI-focused: +//! - It owns the user-facing set of selectable collaboration modes and how they cycle. +//! - It parses `/collab ` arguments into a selection. +//! - It resolves a `Selection` to a concrete `codex_protocol::config_types::CollaborationMode` by +//! picking from the `ModelsManager` builtin collaboration presets. +//! - It builds the small footer "flash" line shown after changing modes. +//! +//! The `ChatWidget` owns the session state and decides *when* selection/mode changes are allowed +//! (feature flag, task running, modals open, etc.). This module just provides the building blocks. + +use crate::key_hint; +use codex_core::models_manager::manager::ModelsManager; +use codex_protocol::config_types::CollaborationMode; +use codex_protocol::config_types::Settings; +use codex_protocol::openai_models::ReasoningEffort; +use crossterm::event::KeyCode; +use ratatui::style::Stylize; +use ratatui::text::Line; + +/// The user-facing collaboration mode choices supported by the TUI. +/// +/// This is distinct from `CollaborationMode`: it represents a stable UI selection and the cycling +/// order, while `CollaborationMode` can carry nested settings/prompt configuration. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) enum Selection { + Plan, + #[default] + PairProgramming, + Execute, +} + +impl Selection { + /// Cycle to the next selection. + /// + /// The TUI cycles through a small, fixed set of presets. + pub(crate) fn next(self) -> Self { + match self { + Self::Plan => Self::PairProgramming, + Self::PairProgramming => Self::Execute, + Self::Execute => Self::Plan, + } + } + + /// User-facing label used in UI surfaces like `/status` and the footer flash. + pub(crate) fn label(self) -> &'static str { + match self { + Self::Plan => "Plan", + Self::PairProgramming => "Pair Programming", + Self::Execute => "Execute", + } + } +} + +/// Parse a user argument (e.g. `/collab plan`, `/collab pair_programming`) into a selection. +/// +/// The parser is forgiving: it strips whitespace, `-`, and `_`, and matches case-insensitively. +pub(crate) fn parse_selection(input: &str) -> Option { + let normalized: String = input + .chars() + .filter(|c| !c.is_ascii_whitespace() && *c != '-' && *c != '_') + .flat_map(char::to_lowercase) + .collect(); + + match normalized.as_str() { + "plan" => Some(Selection::Plan), + "pair" | "pairprogramming" | "pp" => Some(Selection::PairProgramming), + "execute" | "exec" => Some(Selection::Execute), + _ => None, + } +} + +/// Resolve a selection to a concrete collaboration mode preset. +/// +/// `ModelsManager::list_collaboration_modes()` is expected to return a builtin set of presets; this +/// function selects the first preset of the desired variant. +pub(crate) fn resolve_mode( + models_manager: &ModelsManager, + selection: Selection, +) -> Option { + match selection { + Selection::Plan => models_manager + .list_collaboration_modes() + .into_iter() + .find(|mode| matches!(mode, CollaborationMode::Plan(_))), + Selection::PairProgramming => models_manager + .list_collaboration_modes() + .into_iter() + .find(|mode| matches!(mode, CollaborationMode::PairProgramming(_))), + Selection::Execute => models_manager + .list_collaboration_modes() + .into_iter() + .find(|mode| matches!(mode, CollaborationMode::Execute(_))), + } +} + +/// Resolve a selection to a concrete collaboration mode preset, falling back to a synthesized mode +/// when the desired preset is unavailable. +/// +/// This keeps the TUI behavior stable when collaboration presets are missing (for example, when +/// running in offline/unit-test contexts): if the feature flag is enabled, every submission carries +/// an explicit collaboration mode so core doesn't fall back to `Custom`. +pub(crate) fn resolve_mode_or_fallback( + models_manager: &ModelsManager, + selection: Selection, + fallback_model: &str, + fallback_effort: Option, +) -> CollaborationMode { + resolve_mode(models_manager, selection).unwrap_or_else(|| { + let settings = Settings { + model: fallback_model.to_string(), + reasoning_effort: fallback_effort, + developer_instructions: None, + }; + + match selection { + Selection::Plan => CollaborationMode::Plan(settings), + Selection::PairProgramming => CollaborationMode::PairProgramming(settings), + Selection::Execute => CollaborationMode::Execute(settings), + } + }) +} + +/// Build a 1-line footer "flash" that is shown after switching modes. +/// +/// The `ChatWidget` controls when to show this and how long it should remain visible. +pub(crate) fn flash_line(selection: Selection) -> Line<'static> { + Line::from(vec![ + selection.label().bold(), + " (".dim(), + key_hint::shift(KeyCode::Tab).into(), + " to change mode)".dim(), + ]) +} diff --git a/codex-rs/tui2/src/history_cell.rs b/codex-rs/tui2/src/history_cell.rs index e3334a0681..a313f9b2a7 100644 --- a/codex-rs/tui2/src/history_cell.rs +++ b/codex-rs/tui2/src/history_cell.rs @@ -44,6 +44,7 @@ use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; use codex_protocol::plan_tool::UpdatePlanArgs; +use codex_protocol::user_input::TextElement; use image::DynamicImage; use image::ImageReader; use mcp_types::EmbeddedResourceResource; @@ -51,6 +52,7 @@ use mcp_types::Resource; use mcp_types::ResourceLink; use mcp_types::ResourceTemplate; use ratatui::prelude::*; +use ratatui::style::Color; use ratatui::style::Modifier; use ratatui::style::Style; use ratatui::style::Styled; @@ -214,6 +216,75 @@ impl dyn HistoryCell { #[derive(Debug)] pub(crate) struct UserHistoryCell { pub message: String, + pub text_elements: Vec, + #[allow(dead_code)] + pub local_image_paths: Vec, +} + +/// Build logical lines for a user message with styled text elements. +/// +/// This preserves explicit newlines while interleaving element spans and skips +/// malformed byte ranges instead of panicking during history rendering. +fn build_user_message_lines_with_elements( + message: &str, + elements: &[TextElement], + style: Style, + element_style: Style, +) -> Vec> { + let mut elements = elements.to_vec(); + elements.sort_by_key(|e| e.byte_range.start); + let mut offset = 0usize; + let mut raw_lines: Vec> = Vec::new(); + for line_text in message.split('\n') { + let line_start = offset; + let line_end = line_start + line_text.len(); + let mut spans: Vec> = Vec::new(); + // Track how much of the line we've emitted to interleave plain and styled spans. + let mut cursor = line_start; + for elem in &elements { + let start = elem.byte_range.start.max(line_start); + let end = elem.byte_range.end.min(line_end); + if start >= end { + continue; + } + let rel_start = start - line_start; + let rel_end = end - line_start; + // Guard against malformed UTF-8 byte ranges from upstream data; skip + // invalid elements rather than panicking while rendering history. + if !line_text.is_char_boundary(rel_start) || !line_text.is_char_boundary(rel_end) { + continue; + } + let rel_cursor = cursor - line_start; + if cursor < start + && line_text.is_char_boundary(rel_cursor) + && let Some(segment) = line_text.get(rel_cursor..rel_start) + { + spans.push(Span::from(segment.to_string())); + } + if let Some(segment) = line_text.get(rel_start..rel_end) { + spans.push(Span::styled(segment.to_string(), element_style)); + cursor = end; + } + } + let rel_cursor = cursor - line_start; + if cursor < line_end + && line_text.is_char_boundary(rel_cursor) + && let Some(segment) = line_text.get(rel_cursor..) + { + spans.push(Span::from(segment.to_string())); + } + let line = if spans.is_empty() { + Line::from(line_text.to_string()).style(style) + } else { + Line::from(spans).style(style) + }; + raw_lines.push(line); + // Split on '\n' so any '\r' stays in the line; advancing by 1 accounts + // for the separator byte. + offset = line_end + 1; + } + + raw_lines } impl HistoryCell for UserHistoryCell { @@ -229,13 +300,28 @@ impl HistoryCell for UserHistoryCell { .max(1); let style = user_message_style(); + let element_style = style.fg(Color::Cyan); - let (wrapped, joiner_before) = crate::wrapping::word_wrap_lines_with_joiners( - self.message.lines().map(|l| Line::from(l).style(style)), - // Wrap algorithm matches textarea.rs. - RtOptions::new(usize::from(wrap_width)) - .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), - ); + let (wrapped, joiner_before) = if self.text_elements.is_empty() { + crate::wrapping::word_wrap_lines_with_joiners( + self.message.split('\n').map(|l| Line::from(l).style(style)), + // Wrap algorithm matches textarea.rs. + RtOptions::new(usize::from(wrap_width)) + .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), + ) + } else { + let raw_lines = build_user_message_lines_with_elements( + &self.message, + &self.text_elements, + style, + element_style, + ); + crate::wrapping::word_wrap_lines_with_joiners( + raw_lines, + RtOptions::new(usize::from(wrap_width)) + .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), + ) + }; let mut lines: Vec> = Vec::new(); let mut joins: Vec> = Vec::new(); @@ -955,8 +1041,16 @@ pub(crate) fn new_session_info( SessionInfoCell(CompositeHistoryCell { parts }) } -pub(crate) fn new_user_prompt(message: String) -> UserHistoryCell { - UserHistoryCell { message } +pub(crate) fn new_user_prompt( + message: String, + text_elements: Vec, + local_image_paths: Vec, +) -> UserHistoryCell { + UserHistoryCell { + message, + text_elements, + local_image_paths, + } } #[derive(Debug)] @@ -1395,7 +1489,8 @@ pub(crate) fn empty_mcp_output() -> PlainHistoryCell { " • No MCP servers configured.".italic().into(), Line::from(vec![ " See the ".into(), - "\u{1b}]8;;https://github.com/openai/codex/blob/main/docs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}".underlined(), + "\u{1b}]8;;https://developers.openai.com/codex/mcp\u{7}MCP docs\u{1b}]8;;\u{7}" + .underlined(), " to configure them.".into(), ]) .style(Style::default().add_modifier(Modifier::DIM)), @@ -2717,6 +2812,8 @@ mod tests { let msg = "one two three four five six seven"; let cell = UserHistoryCell { message: msg.to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), }; // Small width to force wrapping more clearly. Effective wrap width is width-2 due to the ▌ prefix and trailing space. diff --git a/codex-rs/tui2/src/lib.rs b/codex-rs/tui2/src/lib.rs index 11a0f9f6be..27d649c4de 100644 --- a/codex-rs/tui2/src/lib.rs +++ b/codex-rs/tui2/src/lib.rs @@ -48,6 +48,7 @@ mod cli; mod clipboard_copy; mod clipboard_paste; mod collab; +mod collaboration_modes; mod color; pub mod custom_terminal; mod diff_render; diff --git a/codex-rs/tui2/src/public_widgets/composer_input.rs b/codex-rs/tui2/src/public_widgets/composer_input.rs index 2a80c087ee..46a7e72bcf 100644 --- a/codex-rs/tui2/src/public_widgets/composer_input.rs +++ b/codex-rs/tui2/src/public_widgets/composer_input.rs @@ -48,13 +48,14 @@ impl ComposerInput { /// Clear the input text. pub fn clear(&mut self) { - self.inner.set_text_content(String::new()); + self.inner + .set_text_content(String::new(), Vec::new(), Vec::new()); } /// Feed a key event into the composer and return a high-level action. pub fn input(&mut self, key: KeyEvent) -> ComposerAction { let action = match self.inner.handle_key_event(key).0 { - InputResult::Submitted(text) => ComposerAction::Submitted(text), + InputResult::Submitted { text, .. } => ComposerAction::Submitted(text), _ => ComposerAction::None, }; self.drain_app_events(); diff --git a/codex-rs/tui2/src/slash_command.rs b/codex-rs/tui2/src/slash_command.rs index caac37b85d..2e1e032983 100644 --- a/codex-rs/tui2/src/slash_command.rs +++ b/codex-rs/tui2/src/slash_command.rs @@ -23,6 +23,7 @@ pub enum SlashCommand { Fork, Init, Compact, + Collab, // Undo, Diff, Mention, @@ -54,6 +55,7 @@ impl SlashCommand { SlashCommand::Skills => "use skills to improve how Codex performs specific tasks", SlashCommand::Status => "show current session configuration and token usage", SlashCommand::Model => "choose what model and reasoning effort to use", + SlashCommand::Collab => "change collaboration mode (experimental)", SlashCommand::Approvals => "choose what Codex can do without approval", SlashCommand::ElevateSandbox => "set up elevated agent sandbox", SlashCommand::Mcp => "list configured MCP tools", @@ -93,6 +95,7 @@ impl SlashCommand { | SlashCommand::Exit => true, SlashCommand::Rollout => true, SlashCommand::TestApproval => true, + SlashCommand::Collab => true, } } diff --git a/codex-rs/tui2/src/status/card.rs b/codex-rs/tui2/src/status/card.rs index 3e9e07d538..7988c7d0ff 100644 --- a/codex-rs/tui2/src/status/card.rs +++ b/codex-rs/tui2/src/status/card.rs @@ -63,6 +63,7 @@ struct StatusHistoryCell { approval: String, sandbox: String, agents_summary: String, + collaboration_mode: Option, model_provider: Option, account: Option, session_id: Option, @@ -83,6 +84,7 @@ pub(crate) fn new_status_output( plan_type: Option, now: DateTime, model_name: &str, + collaboration_mode: Option<&str>, ) -> CompositeHistoryCell { let command = PlainHistoryCell::new(vec!["/status".magenta().into()]); let card = StatusHistoryCell::new( @@ -96,6 +98,7 @@ pub(crate) fn new_status_output( plan_type, now, model_name, + collaboration_mode, ); CompositeHistoryCell::new(vec![Box::new(command), Box::new(card)]) @@ -114,6 +117,7 @@ impl StatusHistoryCell { plan_type: Option, now: DateTime, model_name: &str, + collaboration_mode: Option<&str>, ) -> Self { let config_entries = create_config_summary_entries(config, model_name); let (model_name, model_details) = compose_model_display(model_name, &config_entries); @@ -165,6 +169,7 @@ impl StatusHistoryCell { approval, sandbox, agents_summary, + collaboration_mode: collaboration_mode.map(ToString::to_string), model_provider, account, session_id, @@ -360,6 +365,9 @@ impl HistoryCell for StatusHistoryCell { if self.session_id.is_some() && self.forked_from.is_some() { push_label(&mut labels, &mut seen, "Forked from"); } + if self.collaboration_mode.is_some() { + push_label(&mut labels, &mut seen, "Collaboration mode"); + } push_label(&mut labels, &mut seen, "Token usage"); if self.token_usage.context_window.is_some() { push_label(&mut labels, &mut seen, "Context window"); @@ -408,6 +416,10 @@ impl HistoryCell for StatusHistoryCell { lines.push(formatter.line("Account", vec![Span::from(account_value)])); } + if let Some(collab_mode) = self.collaboration_mode.as_ref() { + lines.push(formatter.line("Collaboration mode", vec![Span::from(collab_mode.clone())])); + } + if let Some(session) = self.session_id.as_ref() { lines.push(formatter.line("Session", vec![Span::from(session.clone())])); } diff --git a/codex-rs/tui2/src/status/tests.rs b/codex-rs/tui2/src/status/tests.rs index e79688f44e..9d0f6d0833 100644 --- a/codex-rs/tui2/src/status/tests.rs +++ b/codex-rs/tui2/src/status/tests.rs @@ -152,6 +152,7 @@ async fn status_snapshot_includes_reasoning_details() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -203,6 +204,7 @@ async fn status_snapshot_includes_forked_from() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -260,6 +262,7 @@ async fn status_snapshot_includes_monthly_limit() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -304,6 +307,7 @@ async fn status_snapshot_shows_unlimited_credits() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -348,6 +352,7 @@ async fn status_snapshot_shows_positive_credits() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -392,6 +397,7 @@ async fn status_snapshot_hides_zero_credits() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -434,6 +440,7 @@ async fn status_snapshot_hides_when_has_no_credits_flag() { None, captured_at, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); assert!( @@ -476,6 +483,7 @@ async fn status_card_token_usage_excludes_cached_tokens() { None, now, &model_slug, + None, ); let rendered = render_lines(&composite.display_lines(120)); @@ -533,6 +541,7 @@ async fn status_snapshot_truncates_in_narrow_terminal() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(70)); if cfg!(windows) { @@ -579,6 +588,7 @@ async fn status_snapshot_shows_missing_limits_message() { None, now, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -642,6 +652,7 @@ async fn status_snapshot_includes_credits_and_limits() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -694,6 +705,7 @@ async fn status_snapshot_shows_empty_limits_message() { None, captured_at, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -755,6 +767,7 @@ async fn status_snapshot_shows_stale_limits_message() { None, now, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -820,6 +833,7 @@ async fn status_snapshot_cached_limits_hide_credits_without_flag() { None, now, &model_slug, + None, ); let mut rendered_lines = render_lines(&composite.display_lines(80)); if cfg!(windows) { @@ -875,6 +889,7 @@ async fn status_context_window_uses_last_usage() { None, now, &model_slug, + None, ); let rendered_lines = render_lines(&composite.display_lines(80)); let context_line = rendered_lines diff --git a/codex-rs/tui2/src/transcript_view_cache.rs b/codex-rs/tui2/src/transcript_view_cache.rs index a32094b118..ab5d94ee86 100644 --- a/codex-rs/tui2/src/transcript_view_cache.rs +++ b/codex-rs/tui2/src/transcript_view_cache.rs @@ -1018,6 +1018,8 @@ mod tests { let mut cache = TranscriptViewCache::new(); let cells: Vec> = vec![Arc::new(UserHistoryCell { message: "hello".to_string(), + text_elements: Vec::new(), + local_image_paths: Vec::new(), })]; cache.ensure_wrapped(&cells, 20); diff --git a/codex-rs/utils/pty/src/pipe.rs b/codex-rs/utils/pty/src/pipe.rs index 4aca5af44c..35afc9b171 100644 --- a/codex-rs/utils/pty/src/pipe.rs +++ b/codex-rs/utils/pty/src/pipe.rs @@ -118,7 +118,7 @@ async fn spawn_process_with_stdin_mode( #[cfg(unix)] unsafe { command.pre_exec(move || { - crate::process_group::set_process_group()?; + crate::process_group::detach_from_tty()?; #[cfg(target_os = "linux")] crate::process_group::set_parent_death_signal(parent_pid)?; Ok(()) diff --git a/codex-rs/utils/pty/src/process_group.rs b/codex-rs/utils/pty/src/process_group.rs index ae77a36be0..dadff29f9e 100644 --- a/codex-rs/utils/pty/src/process_group.rs +++ b/codex-rs/utils/pty/src/process_group.rs @@ -4,6 +4,8 @@ //! command can be cleaned up reliably: //! - `set_process_group` is called in `pre_exec` so the child starts its own //! process group. +//! - `detach_from_tty` starts a new session so non-interactive children do not +//! inherit the controlling TTY. //! - `kill_process_group_by_pid` targets the whole group (children/grandchildren) //! - `kill_process_group` targets a known process group ID directly //! instead of a single PID. @@ -42,6 +44,26 @@ pub fn set_parent_death_signal(_parent_pid: i32) -> io::Result<()> { Ok(()) } +#[cfg(unix)] +/// Detach from the controlling TTY by starting a new session. +pub fn detach_from_tty() -> io::Result<()> { + let result = unsafe { libc::setsid() }; + if result == -1 { + let err = io::Error::last_os_error(); + if err.raw_os_error() == Some(libc::EPERM) { + return set_process_group(); + } + return Err(err); + } + Ok(()) +} + +#[cfg(not(unix))] +/// No-op on non-Unix platforms. +pub fn detach_from_tty() -> io::Result<()> { + Ok(()) +} + #[cfg(unix)] /// Put the calling process into its own process group. /// diff --git a/codex-rs/utils/pty/src/tests.rs b/codex-rs/utils/pty/src/tests.rs index 8c41d1dbde..89e72eeb4b 100644 --- a/codex-rs/utils/pty/src/tests.rs +++ b/codex-rs/utils/pty/src/tests.rs @@ -152,6 +152,49 @@ async fn pipe_process_round_trips_stdin() -> anyhow::Result<()> { Ok(()) } +#[cfg(unix)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn pipe_process_detaches_from_parent_session() -> anyhow::Result<()> { + let parent_sid = unsafe { libc::getsid(0) }; + if parent_sid == -1 { + anyhow::bail!("failed to read parent session id"); + } + + let env_map: HashMap = std::env::vars().collect(); + let script = "echo $$; sleep 0.2"; + let (program, args) = shell_command(script); + let spawned = spawn_pipe_process(&program, &args, Path::new("."), &env_map, &None).await?; + + let mut output_rx = spawned.output_rx; + let pid_bytes = + tokio::time::timeout(tokio::time::Duration::from_millis(500), output_rx.recv()).await??; + let pid_text = String::from_utf8_lossy(&pid_bytes); + let child_pid: i32 = pid_text + .split_whitespace() + .next() + .ok_or_else(|| anyhow::anyhow!("missing child pid output: {pid_text:?}"))? + .parse()?; + + let child_sid = unsafe { libc::getsid(child_pid) }; + if child_sid == -1 { + anyhow::bail!("failed to read child session id"); + } + + assert_eq!(child_sid, child_pid, "expected child to be session leader"); + assert_ne!( + child_sid, parent_sid, + "expected child to be detached from parent session" + ); + + let exit_code = spawned.exit_rx.await.unwrap_or(-1); + assert_eq!( + exit_code, 0, + "expected detached pipe process to exit cleanly" + ); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn pipe_and_pty_share_interface() -> anyhow::Result<()> { let env_map: HashMap = std::env::vars().collect();