Compare commits

...

26 Commits

Author SHA1 Message Date
Ahmed Ibrahim
5bbf94bd93 subagents 2025-08-23 11:31:52 -07:00
Ahmed Ibrahim
76c209d78c progress 2025-08-23 11:07:02 -07:00
Ahmed Ibrahim
d2fe780280 Merge branch 'aggregate-out-err' of https://github.com/openai/codex into aggregate-out-err 2025-08-23 09:47:51 -07:00
Ahmed Ibrahim
e172014062 commit 2025-08-23 09:46:54 -07:00
Ahmed Ibrahim
d4cb5fcdbd Update codex-rs/core/src/codex.rs
Co-authored-by: Gabriel Peal <gpeal@users.noreply.github.com>
2025-08-23 09:01:44 -07:00
Ahmed Ibrahim
549a5de99a Update codex-rs/core/src/codex.rs
Co-authored-by: Gabriel Peal <gpeal@users.noreply.github.com>
2025-08-23 09:01:15 -07:00
Ahmed Ibrahim
e0418bf4b9 rust 2025-08-22 20:08:51 -07:00
Ahmed Ibrahim
07c8dbc94d rust 2025-08-22 20:05:52 -07:00
Ahmed Ibrahim
bb9be76328 rust 2025-08-22 20:02:28 -07:00
Ahmed Ibrahim
b277a654fa rust 2025-08-22 19:54:58 -07:00
Ahmed Ibrahim
8752a9b049 rust 2025-08-22 19:51:45 -07:00
Ahmed Ibrahim
5af5856848 rust 2025-08-22 19:41:13 -07:00
Ahmed Ibrahim
16882fa090 rust 2025-08-22 19:38:32 -07:00
Ahmed Ibrahim
366d0738a4 rust 2025-08-22 19:35:02 -07:00
Ahmed Ibrahim
032f14aec8 aggregate-out-err 2025-08-22 18:13:37 -07:00
Ahmed Ibrahim
6ef0c2e8e7 aggregate-out-err 2025-08-22 18:13:03 -07:00
Ahmed Ibrahim
5db76dc66e dead code 2025-08-22 18:12:12 -07:00
Ahmed Ibrahim
1a04fa0379 cap to full 2025-08-22 18:08:52 -07:00
Ahmed Ibrahim
a5c14eb8c0 test 2025-08-22 18:01:06 -07:00
Ahmed Ibrahim
cd610fd409 tests 2025-08-22 18:00:11 -07:00
Ahmed Ibrahim
35130cf21b send-aggregated output 2025-08-22 17:58:33 -07:00
Ahmed Ibrahim
311ad0ce26 fork conversation from a previous message (#2575)
This can be the underlying logic in order to start a conversation from a
previous message. will need some love in the UI.

Base for building this: #2588
2025-08-22 17:06:09 -07:00
Jeremy Rose
5fa7d46ddf tui: fix resize on wezterm (#2600)
WezTerm doesn't respond to cursor queries during a synchronized update,
so resizing was broken there.
2025-08-22 16:33:18 -07:00
Jeremy Rose
d994019f3f tui: coalesce command output; show unabridged commands in transcript (#2590)
https://github.com/user-attachments/assets/effec7c7-732a-4b61-a2ae-3cb297b6b19b
2025-08-22 16:32:31 -07:00
Jeremy Rose
6de9541f0a tui: open transcript mode at the bottom (#2592)
this got lost when we switched /diff to use the pager.
2025-08-22 16:06:41 -07:00
wkrettek
85099017fd Fix typo in AGENTS.md (#2518)
- Change `examole` to `example`
2025-08-22 16:05:39 -07:00
35 changed files with 1486 additions and 294 deletions

View File

@@ -2,7 +2,7 @@
In the codex-rs folder where the rust code lives:
- Crate names are prefixed with `codex-`. For examole, the `core` folder's crate is named `codex-core`
- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`
- When using format! and you can inline variables into {}, always do that.
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.

View File

@@ -14,6 +14,7 @@ use codex_apply_patch::ApplyPatchAction;
use codex_apply_patch::MaybeApplyPatchVerified;
use codex_apply_patch::maybe_parse_apply_patch_verified;
use codex_login::AuthManager;
use codex_protocol::protocol::ConversationHistoryResponseEvent;
use codex_protocol::protocol::TurnAbortReason;
use codex_protocol::protocol::TurnAbortedEvent;
use futures::prelude::*;
@@ -141,14 +142,23 @@ pub struct CodexSpawnOk {
}
pub(crate) const INITIAL_SUBMIT_ID: &str = "";
pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 64;
// Model-formatting limits: clients get full streams; oonly content sent to the model is truncated.
pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB
pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines
pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
impl Codex {
/// Spawn a new [`Codex`] and initialize the session.
pub async fn spawn(
config: Config,
auth_manager: Arc<AuthManager>,
initial_history: Option<Vec<ResponseItem>>,
) -> CodexResult<CodexSpawnOk> {
let (tx_sub, rx_sub) = async_channel::bounded(64);
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let (tx_event, rx_event) = async_channel::unbounded();
let user_instructions = get_user_instructions(&config).await;
@@ -177,6 +187,7 @@ impl Codex {
config.clone(),
auth_manager.clone(),
tx_event.clone(),
initial_history,
)
.await
.map_err(|e| {
@@ -186,7 +197,12 @@ impl Codex {
let session_id = session.session_id;
// This task will run until Op::Shutdown is received.
tokio::spawn(submission_loop(session, turn_context, config, rx_sub));
tokio::spawn(submission_loop(
session.clone(),
turn_context,
config,
rx_sub,
));
let codex = Codex {
next_id: AtomicU64::new(0),
tx_sub,
@@ -247,6 +263,15 @@ pub(crate) struct Session {
/// Manager for external MCP servers/tools.
mcp_connection_manager: McpConnectionManager,
/// Loaded subagent definitions from project and user scope.
subagents_registry: crate::subagents::registry::SubagentRegistry,
/// Auth manager used to spawn nested sessions (e.g., subagents).
auth_manager: Arc<AuthManager>,
/// Base configuration used to derive nested session configs.
base_config: Arc<Config>,
/// External notifier command (will be passed as args to exec()). When
/// `None` this feature is disabled.
notify: Option<Vec<String>>,
@@ -332,6 +357,7 @@ impl Session {
config: Arc<Config>,
auth_manager: Arc<AuthManager>,
tx_event: Sender<Event>,
initial_history: Option<Vec<ResponseItem>>,
) -> anyhow::Result<(Arc<Self>, TurnContext)> {
let ConfigureSession {
provider,
@@ -391,14 +417,15 @@ impl Session {
}
let rollout_result = match rollout_res {
Ok((session_id, maybe_saved, recorder)) => {
let restored_items: Option<Vec<ResponseItem>> =
let restored_items: Option<Vec<ResponseItem>> = initial_history.or_else(|| {
maybe_saved.and_then(|saved_session| {
if saved_session.items.is_empty() {
None
} else {
Some(saved_session.items)
}
});
})
});
RolloutResult {
session_id,
rollout_recorder: Some(recorder),
@@ -480,6 +507,30 @@ impl Session {
model_reasoning_summary,
session_id,
);
// Build subagent registry paths and load once per session
let project_agents_dir = {
let mut p = cwd.clone();
p.push(".codex");
p.push("agents");
if p.exists() { Some(p) } else { None }
};
let user_agents_dir = {
let mut p = config.codex_home.clone();
p.push("agents");
if p.exists() { Some(p) } else { None }
};
let mut subagents_registry =
crate::subagents::registry::SubagentRegistry::new(project_agents_dir, user_agents_dir);
subagents_registry.load();
// Log discovered subagents for visibility in clients (e.g., TUI) after
// SessionConfigured so the first event contract remains intact.
post_session_configured_error_events.push(Event {
id: INITIAL_SUBMIT_ID.to_string(),
msg: EventMsg::BackgroundEvent(BackgroundEventEvent {
message: format!("subagents discovered: {:?}", subagents_registry.all_names()),
}),
});
let turn_context = TurnContext {
client,
tools_config: ToolsConfig::new(
@@ -488,6 +539,7 @@ impl Session {
sandbox_policy.clone(),
config.include_plan_tool,
config.include_apply_patch_tool,
config.include_subagent_tool,
),
user_instructions,
base_instructions,
@@ -501,6 +553,9 @@ impl Session {
session_id,
tx_event: tx_event.clone(),
mcp_connection_manager,
subagents_registry,
auth_manager: auth_manager.clone(),
base_config: config.clone(),
notify,
state: Mutex::new(state),
rollout: Mutex::new(rollout_recorder),
@@ -560,6 +615,16 @@ impl Session {
}
}
/// Access auth manager for nested sessions.
pub(crate) fn auth_manager(&self) -> Arc<AuthManager> {
self.auth_manager.clone()
}
/// Access base config for nested sessions.
pub(crate) fn base_config(&self) -> Arc<Config> {
self.base_config.clone()
}
/// Sends the given event to the client and swallows the send event, if
/// any, logging it as an error.
pub(crate) async fn send_event(&self, event: Event) {
@@ -701,7 +766,6 @@ impl Session {
let _ = self.tx_event.send(event).await;
}
#[allow(clippy::too_many_arguments)]
async fn on_exec_command_end(
&self,
turn_diff_tracker: &mut TurnDiffTracker,
@@ -713,14 +777,15 @@ impl Session {
let ExecToolCallOutput {
stdout,
stderr,
aggregated_output,
duration,
exit_code,
} = output;
// Because stdout and stderr could each be up to 100 KiB, we send
// truncated versions.
const MAX_STREAM_OUTPUT: usize = 5 * 1024; // 5KiB
let stdout = stdout.text.chars().take(MAX_STREAM_OUTPUT).collect();
let stderr = stderr.text.chars().take(MAX_STREAM_OUTPUT).collect();
// Send full stdout/stderr to clients; do not truncate.
let stdout = stdout.text.clone();
let stderr = stderr.text.clone();
let formatted_output = format_exec_output_str(output);
let aggregated_output: String = aggregated_output.text.clone();
let msg = if is_apply_patch {
EventMsg::PatchApplyEnd(PatchApplyEndEvent {
@@ -734,8 +799,10 @@ impl Session {
call_id: call_id.to_string(),
stdout,
stderr,
duration: *duration,
aggregated_output,
exit_code: *exit_code,
duration: *duration,
formatted_output,
})
};
@@ -793,6 +860,7 @@ impl Session {
exit_code: -1,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(get_error_message_ui(e)),
aggregated_output: StreamOutput::new(get_error_message_ui(e)),
duration: Duration::default(),
};
&output_stderr
@@ -1069,6 +1137,7 @@ async fn submission_loop(
new_sandbox_policy.clone(),
config.include_plan_tool,
config.include_apply_patch_tool,
config.include_subagent_tool,
);
let new_turn_context = TurnContext {
@@ -1147,6 +1216,7 @@ async fn submission_loop(
sandbox_policy.clone(),
config.include_plan_tool,
config.include_apply_patch_tool,
config.include_subagent_tool,
),
user_instructions: turn_context.user_instructions.clone(),
base_instructions: turn_context.base_instructions.clone(),
@@ -1285,6 +1355,21 @@ async fn submission_loop(
}
break;
}
Op::GetHistory => {
let tx_event = sess.tx_event.clone();
let sub_id = sub.id.clone();
let event = Event {
id: sub_id.clone(),
msg: EventMsg::ConversationHistory(ConversationHistoryResponseEvent {
conversation_id: sess.session_id,
entries: sess.state.lock_unchecked().history.contents(),
}),
};
if let Err(e) = tx_event.send(event).await {
warn!("failed to send ConversationHistory event: {e}");
}
}
_ => {
// Ignore unknown ops; enum is non_exhaustive to allow extensions.
}
@@ -1518,6 +1603,27 @@ async fn run_turn(
&turn_context.tools_config,
Some(sess.mcp_connection_manager.list_all_tools()),
);
tracing::trace!("Tools: {tools:?}");
// Log tool names for visibility in the TUI/debug logs.
#[allow(clippy::match_same_arms)]
let tool_names: Vec<String> = tools
.iter()
.map(|t| match t {
crate::openai_tools::OpenAiTool::Function(f) => f.name.clone(),
crate::openai_tools::OpenAiTool::LocalShell {} => "local_shell".to_string(),
crate::openai_tools::OpenAiTool::Freeform(f) => f.name.clone(),
})
.collect();
let _ = sess
.tx_event
.send(Event {
id: sub_id.clone(),
msg: EventMsg::BackgroundEvent(BackgroundEventEvent {
message: format!("tools available: {:?}", tool_names),
}),
})
.await;
let prompt = Prompt {
input,
@@ -2037,6 +2143,84 @@ async fn handle_function_call(
.await
}
"update_plan" => handle_update_plan(sess, arguments, sub_id, call_id).await,
"subagent_run" => {
#[derive(serde::Deserialize)]
struct Args {
name: String,
input: String,
#[serde(default)]
context: Option<String>,
}
let args = match serde_json::from_str::<Args>(&arguments) {
Ok(a) => a,
Err(e) => {
return ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: format!("failed to parse function arguments: {e}"),
success: Some(false),
},
};
}
};
let result = crate::subagents::runner::run(
sess,
turn_context,
&sess.subagents_registry,
crate::subagents::runner::RunSubagentArgs {
name: args.name,
input: args.input,
context: args.context,
},
&sub_id,
)
.await;
match result {
Ok(message) => ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: message,
success: Some(true),
},
},
Err(e) => ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: format!("subagent failed: {e}"),
success: Some(false),
},
},
}
}
"subagent_list" => {
#[derive(serde::Serialize)]
struct SubagentBrief<'a> {
name: &'a str,
description: &'a str,
}
let mut list = Vec::new();
for name in sess.subagents_registry.all_names() {
if let Some(def) = sess.subagents_registry.get(&name) {
list.push(SubagentBrief {
name: &def.name,
description: &def.description,
});
}
}
let payload = match serde_json::to_string(&list) {
Ok(s) => s,
Err(e) => format!("failed to serialize subagent list: {e}"),
};
ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: payload,
success: Some(true),
},
}
}
_ => {
match sess.mcp_connection_manager.parse_tool_name(&name) {
Some((server, tool_name)) => {
@@ -2147,6 +2331,8 @@ fn parse_container_exec_arguments(
}
}
// (helper run_one_turn_collect removed as unused)
pub struct ExecInvokeArgs<'a> {
pub params: ExecParams,
pub sandbox_type: SandboxType,
@@ -2357,7 +2543,7 @@ async fn handle_container_exec_with_params(
let ExecToolCallOutput { exit_code, .. } = &output;
let is_success = *exit_code == 0;
let content = format_exec_output(output);
let content = format_exec_output(&output);
ResponseInputItem::FunctionCallOutput {
call_id: call_id.clone(),
output: FunctionCallOutputPayload {
@@ -2490,7 +2676,7 @@ async fn handle_sandbox_error(
let ExecToolCallOutput { exit_code, .. } = &retry_output;
let is_success = *exit_code == 0;
let content = format_exec_output(retry_output);
let content = format_exec_output(&retry_output);
ResponseInputItem::FunctionCallOutput {
call_id: call_id.clone(),
@@ -2522,13 +2708,113 @@ async fn handle_sandbox_error(
}
}
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
let ExecToolCallOutput {
aggregated_output, ..
} = exec_output;
// Head+tail truncation for the model: show the beginning and end with an elision.
// Clients still receive full streams; only this formatted summary is capped.
let s = aggregated_output.text.as_str();
let total_lines = s.lines().count();
if s.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES {
return s.to_string();
}
let lines: Vec<&str> = s.lines().collect();
let head_take = MODEL_FORMAT_HEAD_LINES.min(lines.len());
let tail_take = MODEL_FORMAT_TAIL_LINES.min(lines.len().saturating_sub(head_take));
let omitted = lines.len().saturating_sub(head_take + tail_take);
// Join head and tail blocks (lines() strips newlines; reinsert them)
let head_block = lines
.iter()
.take(head_take)
.cloned()
.collect::<Vec<_>>()
.join("\n");
let tail_block = if tail_take > 0 {
lines[lines.len() - tail_take..].join("\n")
} else {
String::new()
};
let marker = format!("\n[... omitted {omitted} of {total_lines} lines ...]\n\n");
// Byte budgets for head/tail around the marker
let mut head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES);
let tail_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(head_budget + marker.len());
if tail_budget == 0 && marker.len() >= MODEL_FORMAT_MAX_BYTES {
// Degenerate case: marker alone exceeds budget; return a clipped marker
return take_bytes_at_char_boundary(&marker, MODEL_FORMAT_MAX_BYTES).to_string();
}
if tail_budget == 0 {
// Make room for the marker by shrinking head
head_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(marker.len());
}
// Enforce line-count cap by trimming head/tail lines
let head_lines_text = head_block;
let tail_lines_text = tail_block;
// Build final string respecting byte budgets
let head_part = take_bytes_at_char_boundary(&head_lines_text, head_budget);
let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(s.len()));
result.push_str(head_part);
result.push_str(&marker);
let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len());
let tail_budget_final = remaining;
let tail_part = take_last_bytes_at_char_boundary(&tail_lines_text, tail_budget_final);
result.push_str(tail_part);
result
}
// Truncate a &str to a byte budget at a char boundary (prefix)
#[inline]
fn take_bytes_at_char_boundary(s: &str, maxb: usize) -> &str {
if s.len() <= maxb {
return s;
}
let mut last_ok = 0;
for (i, ch) in s.char_indices() {
let nb = i + ch.len_utf8();
if nb > maxb {
break;
}
last_ok = nb;
}
&s[..last_ok]
}
// Take a suffix of a &str within a byte budget at a char boundary
#[inline]
fn take_last_bytes_at_char_boundary(s: &str, maxb: usize) -> &str {
if s.len() <= maxb {
return s;
}
let mut start = s.len();
let mut used = 0usize;
for (i, ch) in s.char_indices().rev() {
let nb = ch.len_utf8();
if used + nb > maxb {
break;
}
start = i;
used += nb;
if start == 0 {
break;
}
}
&s[start..]
}
/// Exec output is a pre-serialized JSON payload
fn format_exec_output(exec_output: ExecToolCallOutput) -> String {
fn format_exec_output(exec_output: &ExecToolCallOutput) -> String {
let ExecToolCallOutput {
exit_code,
stdout,
stderr,
duration,
..
} = exec_output;
#[derive(Serialize)]
@@ -2546,20 +2832,12 @@ fn format_exec_output(exec_output: ExecToolCallOutput) -> String {
// round to 1 decimal place
let duration_seconds = ((duration.as_secs_f32()) * 10.0).round() / 10.0;
let is_success = exit_code == 0;
let output = if is_success { stdout } else { stderr };
let mut formatted_output = output.text;
if let Some(truncated_after_lines) = output.truncated_after_lines {
formatted_output.push_str(&format!(
"\n\n[Output truncated after {truncated_after_lines} lines: too many lines or bytes.]",
));
}
let formatted_output = format_exec_output_str(exec_output);
let payload = ExecOutput {
output: &formatted_output,
metadata: ExecMetadata {
exit_code,
exit_code: *exit_code,
duration_seconds,
},
};
@@ -2679,6 +2957,7 @@ mod tests {
use mcp_types::TextContent;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::time::Duration as StdDuration;
fn text_block(s: &str) -> ContentBlock {
ContentBlock::TextContent(TextContent {
@@ -2713,6 +2992,82 @@ mod tests {
assert_eq!(expected, got);
}
#[test]
fn model_truncation_head_tail_by_lines() {
// Build 400 short lines so line-count limit, not byte budget, triggers truncation
let lines: Vec<String> = (1..=400).map(|i| format!("line{i}")).collect();
let full = lines.join("\n");
let exec = ExecToolCallOutput {
exit_code: 0,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(String::new()),
aggregated_output: StreamOutput::new(full.clone()),
duration: StdDuration::from_secs(1),
};
let out = format_exec_output_str(&exec);
// Expect elision marker with correct counts
let omitted = 400 - MODEL_FORMAT_MAX_LINES; // 144
let marker = format!("\n[... omitted {omitted} of 400 lines ...]\n\n");
assert!(out.contains(&marker), "missing marker: {out}");
// Validate head and tail
let parts: Vec<&str> = out.split(&marker).collect();
assert_eq!(parts.len(), 2, "expected one marker split");
let head = parts[0];
let tail = parts[1];
let expected_head: String = (1..=MODEL_FORMAT_HEAD_LINES)
.map(|i| format!("line{i}"))
.collect::<Vec<_>>()
.join("\n");
assert!(head.starts_with(&expected_head), "head mismatch");
let expected_tail: String = ((400 - MODEL_FORMAT_TAIL_LINES + 1)..=400)
.map(|i| format!("line{i}"))
.collect::<Vec<_>>()
.join("\n");
assert!(tail.ends_with(&expected_tail), "tail mismatch");
}
#[test]
fn model_truncation_respects_byte_budget() {
// Construct a large output (about 100kB) so byte budget dominates
let big_line = "x".repeat(100);
let full = std::iter::repeat_n(big_line.clone(), 1000)
.collect::<Vec<_>>()
.join("\n");
let exec = ExecToolCallOutput {
exit_code: 0,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(String::new()),
aggregated_output: StreamOutput::new(full.clone()),
duration: StdDuration::from_secs(1),
};
let out = format_exec_output_str(&exec);
assert!(out.len() <= MODEL_FORMAT_MAX_BYTES, "exceeds byte budget");
assert!(out.contains("omitted"), "should contain elision marker");
// Ensure head and tail are drawn from the original
assert!(full.starts_with(out.chars().take(8).collect::<String>().as_str()));
assert!(
full.ends_with(
out.chars()
.rev()
.take(8)
.collect::<String>()
.chars()
.rev()
.collect::<String>()
.as_str()
)
);
}
#[test]
fn falls_back_to_content_when_structured_is_null() {
let ctr = CallToolResult {

View File

@@ -169,6 +169,9 @@ pub struct Config {
/// model family's default preference.
pub include_apply_patch_tool: bool,
/// Include the `subagent.run` tool allowing the model to invoke configured subagents.
pub include_subagent_tool: bool,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header: String,
@@ -476,6 +479,9 @@ pub struct ConfigToml {
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: Option<AuthMode>,
/// Include the `subagent.run` tool allowing the model to invoke configured subagents.
pub include_subagent_tool: Option<bool>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
@@ -570,6 +576,7 @@ pub struct ConfigOverrides {
pub base_instructions: Option<String>,
pub include_plan_tool: Option<bool>,
pub include_apply_patch_tool: Option<bool>,
pub include_subagent_tool: Option<bool>,
pub disable_response_storage: Option<bool>,
pub show_raw_agent_reasoning: Option<bool>,
}
@@ -596,6 +603,7 @@ impl Config {
base_instructions,
include_plan_tool,
include_apply_patch_tool,
include_subagent_tool,
disable_response_storage,
show_raw_agent_reasoning,
} = overrides;
@@ -756,6 +764,11 @@ impl Config {
experimental_resume,
include_plan_tool: include_plan_tool.unwrap_or(false),
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
include_subagent_tool: config_profile
.include_subagent_tool
.or(cfg.include_subagent_tool)
.or(include_subagent_tool)
.unwrap_or(false),
responses_originator_header,
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
};
@@ -1122,6 +1135,7 @@ disable_response_storage = true
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
include_subagent_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
},
@@ -1176,6 +1190,7 @@ disable_response_storage = true
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
include_subagent_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
};
@@ -1245,6 +1260,7 @@ disable_response_storage = true
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
include_subagent_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
};

View File

@@ -21,4 +21,6 @@ pub struct ConfigProfile {
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
pub experimental_instructions_file: Option<PathBuf>,
/// Include the `subagent.run` tool allowing the model to invoke configured subagents.
pub include_subagent_tool: Option<bool>,
}

View File

@@ -16,6 +16,7 @@ use crate::error::Result as CodexResult;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::SessionConfiguredEvent;
use codex_protocol::models::ResponseItem;
/// Represents a newly created Codex conversation, including the first event
/// (which is [`EventMsg::SessionConfigured`]).
@@ -59,8 +60,18 @@ impl ConversationManager {
let CodexSpawnOk {
codex,
session_id: conversation_id,
} = Codex::spawn(config, auth_manager).await?;
} = {
let initial_history = None;
Codex::spawn(config, auth_manager, initial_history).await?
};
self.finalize_spawn(codex, conversation_id).await
}
async fn finalize_spawn(
&self,
codex: Codex,
conversation_id: Uuid,
) -> CodexResult<NewConversation> {
// The first event must be `SessionInitialized`. Validate and forward it
// to the caller so that they can display it in the conversation
// history.
@@ -98,4 +109,120 @@ impl ConversationManager {
.cloned()
.ok_or_else(|| CodexErr::ConversationNotFound(conversation_id))
}
/// Fork an existing conversation by dropping the last `drop_last_messages`
/// user/assistant messages from its transcript and starting a new
/// conversation with identical configuration (unless overridden by the
/// caller's `config`). The new conversation will have a fresh id.
pub async fn fork_conversation(
&self,
conversation_history: Vec<ResponseItem>,
num_messages_to_drop: usize,
config: Config,
) -> CodexResult<NewConversation> {
// Compute the prefix up to the cut point.
let truncated_history =
truncate_after_dropping_last_messages(conversation_history, num_messages_to_drop);
// Spawn a new conversation with the computed initial history.
let auth_manager = self.auth_manager.clone();
let CodexSpawnOk {
codex,
session_id: conversation_id,
} = Codex::spawn(config, auth_manager, Some(truncated_history)).await?;
self.finalize_spawn(codex, conversation_id).await
}
}
/// Return a prefix of `items` obtained by dropping the last `n` user messages
/// and all items that follow them.
fn truncate_after_dropping_last_messages(items: Vec<ResponseItem>, n: usize) -> Vec<ResponseItem> {
if n == 0 || items.is_empty() {
return items;
}
// Walk backwards counting only `user` Message items, find cut index.
let mut count = 0usize;
let mut cut_index = 0usize;
for (idx, item) in items.iter().enumerate().rev() {
if let ResponseItem::Message { role, .. } = item
&& role == "user"
{
count += 1;
if count == n {
// Cut everything from this user message to the end.
cut_index = idx;
break;
}
}
}
if count < n {
// If fewer than n messages exist, drop everything.
Vec::new()
} else {
items.into_iter().take(cut_index).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemReasoningSummary;
use codex_protocol::models::ResponseItem;
fn user_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::OutputText {
text: text.to_string(),
}],
}
}
fn assistant_msg(text: &str) -> ResponseItem {
ResponseItem::Message {
id: None,
role: "assistant".to_string(),
content: vec![ContentItem::OutputText {
text: text.to_string(),
}],
}
}
#[test]
fn drops_from_last_user_only() {
let items = vec![
user_msg("u1"),
assistant_msg("a1"),
assistant_msg("a2"),
user_msg("u2"),
assistant_msg("a3"),
ResponseItem::Reasoning {
id: "r1".to_string(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "s".to_string(),
}],
content: None,
encrypted_content: None,
},
ResponseItem::FunctionCall {
id: None,
name: "tool".to_string(),
arguments: "{}".to_string(),
call_id: "c1".to_string(),
},
assistant_msg("a4"),
];
let truncated = truncate_after_dropping_last_messages(items.clone(), 1);
assert_eq!(
truncated,
vec![items[0].clone(), items[1].clone(), items[2].clone()]
);
let truncated2 = truncate_after_dropping_last_messages(items, 2);
assert!(truncated2.is_empty());
}
}

View File

@@ -28,18 +28,17 @@ use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
use serde_bytes::ByteBuf;
// Maximum we send for each stream, which is either:
// - 10KiB OR
// - 256 lines
const MAX_STREAM_OUTPUT: usize = 10 * 1024;
const MAX_STREAM_OUTPUT_LINES: usize = 256;
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
// Hardcode these since it does not seem worth including the libc crate just
// for these.
const SIGKILL_CODE: i32 = 9;
const TIMEOUT_CODE: i32 = 64;
const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal
// I/O buffer sizing
const READ_CHUNK_SIZE: usize = 8192; // bytes per read
const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
#[derive(Debug, Clone)]
pub struct ExecParams {
@@ -153,6 +152,7 @@ pub async fn process_exec_tool_call(
exit_code,
stdout,
stderr,
aggregated_output: raw_output.aggregated_output.from_utf8_lossy(),
duration,
})
}
@@ -189,10 +189,11 @@ pub struct StreamOutput<T> {
pub truncated_after_lines: Option<u32>,
}
#[derive(Debug)]
pub struct RawExecToolCallOutput {
struct RawExecToolCallOutput {
pub exit_status: ExitStatus,
pub stdout: StreamOutput<Vec<u8>>,
pub stderr: StreamOutput<Vec<u8>>,
pub aggregated_output: StreamOutput<Vec<u8>>,
}
impl StreamOutput<String> {
@@ -213,11 +214,17 @@ impl StreamOutput<Vec<u8>> {
}
}
#[inline]
fn append_all(dst: &mut Vec<u8>, src: &[u8]) {
dst.extend_from_slice(src);
}
#[derive(Debug)]
pub struct ExecToolCallOutput {
pub exit_code: i32,
pub stdout: StreamOutput<String>,
pub stderr: StreamOutput<String>,
pub aggregated_output: StreamOutput<String>,
pub duration: Duration,
}
@@ -253,7 +260,7 @@ async fn exec(
/// Consumes the output of a child process, truncating it so it is suitable for
/// use as the output of a `shell` tool call. Also enforces specified timeout.
pub(crate) async fn consume_truncated_output(
async fn consume_truncated_output(
mut child: Child,
timeout: Duration,
stdout_stream: Option<StdoutStream>,
@@ -273,19 +280,19 @@ pub(crate) async fn consume_truncated_output(
))
})?;
let (agg_tx, agg_rx) = async_channel::unbounded::<Vec<u8>>();
let stdout_handle = tokio::spawn(read_capped(
BufReader::new(stdout_reader),
MAX_STREAM_OUTPUT,
MAX_STREAM_OUTPUT_LINES,
stdout_stream.clone(),
false,
Some(agg_tx.clone()),
));
let stderr_handle = tokio::spawn(read_capped(
BufReader::new(stderr_reader),
MAX_STREAM_OUTPUT,
MAX_STREAM_OUTPUT_LINES,
stdout_stream.clone(),
true,
Some(agg_tx.clone()),
));
let exit_status = tokio::select! {
@@ -297,38 +304,48 @@ pub(crate) async fn consume_truncated_output(
// timeout
child.start_kill()?;
// Debatable whether `child.wait().await` should be called here.
synthetic_exit_status(128 + TIMEOUT_CODE)
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE)
}
}
}
_ = tokio::signal::ctrl_c() => {
child.start_kill()?;
synthetic_exit_status(128 + SIGKILL_CODE)
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE)
}
};
let stdout = stdout_handle.await??;
let stderr = stderr_handle.await??;
drop(agg_tx);
let mut combined_buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
while let Ok(chunk) = agg_rx.recv().await {
append_all(&mut combined_buf, &chunk);
}
let aggregated_output = StreamOutput {
text: combined_buf,
truncated_after_lines: None,
};
Ok(RawExecToolCallOutput {
exit_status,
stdout,
stderr,
aggregated_output,
})
}
async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
mut reader: R,
max_output: usize,
max_lines: usize,
stream: Option<StdoutStream>,
is_stderr: bool,
aggregate_tx: Option<Sender<Vec<u8>>>,
) -> io::Result<StreamOutput<Vec<u8>>> {
let mut buf = Vec::with_capacity(max_output.min(8 * 1024));
let mut tmp = [0u8; 8192];
let mut buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
let mut tmp = [0u8; READ_CHUNK_SIZE];
let mut remaining_bytes = max_output;
let mut remaining_lines = max_lines;
// No caps: append all bytes
loop {
let n = reader.read(&mut tmp).await?;
@@ -355,33 +372,17 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
let _ = stream.tx_event.send(event).await;
}
// Copy into the buffer only while we still have byte and line budget.
if remaining_bytes > 0 && remaining_lines > 0 {
let mut copy_len = 0;
for &b in &tmp[..n] {
if remaining_bytes == 0 || remaining_lines == 0 {
break;
}
copy_len += 1;
remaining_bytes -= 1;
if b == b'\n' {
remaining_lines -= 1;
}
}
buf.extend_from_slice(&tmp[..copy_len]);
if let Some(tx) = &aggregate_tx {
let _ = tx.send(tmp[..n].to_vec()).await;
}
// Continue reading to EOF to avoid back-pressure, but discard once caps are hit.
}
let truncated = remaining_lines == 0 || remaining_bytes == 0;
append_all(&mut buf, &tmp[..n]);
// Continue reading to EOF to avoid back-pressure
}
Ok(StreamOutput {
text: buf,
truncated_after_lines: if truncated {
Some((max_lines - remaining_lines) as u32)
} else {
None
},
truncated_after_lines: None,
})
}

View File

@@ -62,3 +62,4 @@ pub use codex_protocol::protocol;
// Re-export protocol config enums to ensure call sites can use the same types
// as those in the protocol crate when constructing protocol messages.
pub use codex_protocol::config_types as protocol_config_types;
pub mod subagents;

View File

@@ -63,6 +63,7 @@ pub struct ToolsConfig {
pub shell_type: ConfigShellToolType,
pub plan_tool: bool,
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
pub subagent_tool: bool,
}
impl ToolsConfig {
@@ -72,6 +73,7 @@ impl ToolsConfig {
sandbox_policy: SandboxPolicy,
include_plan_tool: bool,
include_apply_patch_tool: bool,
include_subagent_tool: bool,
) -> Self {
let mut shell_type = if model_family.uses_local_shell_tool {
ConfigShellToolType::LocalShell
@@ -100,6 +102,7 @@ impl ToolsConfig {
shell_type,
plan_tool: include_plan_tool,
apply_patch_tool_type,
subagent_tool: include_subagent_tool,
}
}
}
@@ -509,6 +512,12 @@ pub(crate) fn get_openai_tools(
}
}
if config.subagent_tool {
tracing::trace!("Adding subagent tool");
tools.push(crate::subagents::SUBAGENT_TOOL.clone());
tools.push(crate::subagents::SUBAGENT_LIST_TOOL.clone());
}
if let Some(mcp_tools) = mcp_tools {
for (name, tool) in mcp_tools {
match mcp_tool_to_openai_tool(name.clone(), tool.clone()) {
@@ -520,6 +529,7 @@ pub(crate) fn get_openai_tools(
}
}
tracing::trace!("Tools: {tools:?}");
tools
}
@@ -564,6 +574,7 @@ mod tests {
SandboxPolicy::ReadOnly,
true,
false,
false,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -579,6 +590,7 @@ mod tests {
SandboxPolicy::ReadOnly,
true,
false,
false,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -594,6 +606,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
false,
);
let tools = get_openai_tools(
&config,
@@ -688,6 +701,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
false,
);
let tools = get_openai_tools(
@@ -744,6 +758,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
false,
);
let tools = get_openai_tools(
@@ -795,6 +810,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
false,
);
let tools = get_openai_tools(
@@ -849,6 +865,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
false,
);
let tools = get_openai_tools(

View File

@@ -0,0 +1,32 @@
use serde::Deserialize;
use std::fs;
use std::path::Path;
#[derive(Debug, Clone, Deserialize)]
pub struct SubagentDefinition {
pub name: String,
pub description: String,
/// Base instructions for this subagent.
pub instructions: String,
/// When not set, inherits the parent agent's tool set. When set to an
/// empty list, no tools are available to the subagent.
#[serde(default)]
pub tools: Option<Vec<String>>, // None => inherit; Some(vec) => allow-list
}
impl SubagentDefinition {
pub fn from_json_str(s: &str) -> Result<Self, serde_json::Error> {
serde_json::from_str::<Self>(s)
}
pub fn from_file(path: &Path) -> std::io::Result<Self> {
let contents = fs::read_to_string(path)?;
// Surface JSON parsing error with file context
serde_json::from_str::<Self>(&contents).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("invalid subagent JSON at {}: {e}", path.display()),
)
})
}
}

View File

@@ -0,0 +1,6 @@
pub mod definition;
pub mod registry;
pub mod runner;
pub mod tool;
pub(crate) use tool::{SUBAGENT_LIST_TOOL, SUBAGENT_TOOL};

View File

@@ -0,0 +1,92 @@
use super::definition::SubagentDefinition;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
#[derive(Debug, Default, Clone)]
pub struct SubagentRegistry {
/// Directory under the project (cwd/.codex/agents).
project_dir: Option<PathBuf>,
/// Directory under CODEX_HOME (~/.codex/agents).
user_dir: Option<PathBuf>,
/// Merged map: project definitions override user ones.
map: HashMap<String, SubagentDefinition>,
}
impl SubagentRegistry {
pub fn new(project_dir: Option<PathBuf>, user_dir: Option<PathBuf>) -> Self {
Self {
project_dir,
user_dir,
map: HashMap::new(),
}
}
/// Loads JSON files from user_dir then project_dir (project wins on conflict).
pub fn load(&mut self) {
let mut map: HashMap<String, SubagentDefinition> = HashMap::new();
// Load user definitions first
if let Some(dir) = &self.user_dir {
Self::load_from_dir_into(dir, &mut map);
}
// Then load project definitions which override on conflicts
if let Some(dir) = &self.project_dir {
Self::load_from_dir_into(dir, &mut map);
}
// Ensure a simple builtin test subagent exists to validate wiring endtoend.
// Users can override this by providing their own definition named "hello".
if !map.contains_key("hello") {
map.insert(
"hello".to_string(),
SubagentDefinition {
name: "hello".to_string(),
description: "Builtin test subagent that replies with a greeting".to_string(),
// Keep instructions narrow so models reliably output the intended text.
instructions:
"Reply with exactly this text and nothing else: Hello from subagent"
.to_string(),
// Disallow tool usage for the hello subagent.
tools: Some(Vec::new()),
},
);
}
self.map = map;
}
pub fn get(&self, name: &str) -> Option<&SubagentDefinition> {
self.map.get(name)
}
pub fn all_names(&self) -> Vec<String> {
self.map.keys().cloned().collect()
}
fn load_from_dir_into(dir: &Path, out: &mut HashMap<String, SubagentDefinition>) {
let Ok(iter) = fs::read_dir(dir) else {
return;
};
for entry in iter.flatten() {
let path = entry.path();
if path.is_file()
&& path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.eq_ignore_ascii_case("json"))
.unwrap_or(false)
{
match SubagentDefinition::from_file(&path) {
Ok(def) => {
out.insert(def.name.clone(), def);
}
Err(e) => {
tracing::warn!("Failed to load subagent from {}: {}", path.display(), e);
}
}
}
}
}
}

View File

@@ -0,0 +1,142 @@
use crate::codex::Codex;
use crate::error::Result as CodexResult;
use super::definition::SubagentDefinition;
use super::registry::SubagentRegistry;
/// Arguments expected for the `subagent.run` tool.
#[derive(serde::Deserialize)]
pub struct RunSubagentArgs {
pub name: String,
pub input: String,
#[serde(default)]
pub context: Option<String>,
}
/// Run a subagent in a nested Codex session and return the final message.
pub(crate) async fn run(
sess: &crate::codex::Session,
turn_context: &crate::codex::TurnContext,
registry: &SubagentRegistry,
args: RunSubagentArgs,
_parent_sub_id: &str,
) -> CodexResult<String> {
let def: &SubagentDefinition = registry.get(&args.name).ok_or_else(|| {
crate::error::CodexErr::Stream(format!("unknown subagent: {}", args.name), None)
})?;
let mut nested_cfg = (*sess.base_config()).clone();
nested_cfg.base_instructions = Some(def.instructions.clone());
nested_cfg.user_instructions = None;
nested_cfg.approval_policy = turn_context.approval_policy;
nested_cfg.sandbox_policy = turn_context.sandbox_policy.clone();
nested_cfg.cwd = turn_context.cwd.clone();
nested_cfg.include_subagent_tool = false;
let nested = Codex::spawn(nested_cfg, sess.auth_manager(), None).await?;
let nested_codex = nested.codex;
let subagent_id = uuid::Uuid::new_v4().to_string();
forward_begin(sess, _parent_sub_id, &subagent_id, &def.name).await;
let text = match args.context {
Some(ctx) if !ctx.trim().is_empty() => format!("{ctx}\n\n{input}", input = args.input),
_ => args.input,
};
nested_codex
.submit(crate::protocol::Op::UserInput {
items: vec![crate::protocol::InputItem::Text { text }],
})
.await
.map_err(|e| {
crate::error::CodexErr::Stream(format!("failed to submit to subagent: {e}"), None)
})?;
let mut last_message: Option<String> = None;
loop {
let ev = nested_codex.next_event().await?;
match ev.msg.clone() {
crate::protocol::EventMsg::AgentMessage(m) => {
last_message = Some(m.message);
}
crate::protocol::EventMsg::TaskComplete(t) => {
let _ = nested_codex.submit(crate::protocol::Op::Shutdown).await;
forward_forwarded(sess, _parent_sub_id, &subagent_id, &def.name, ev.msg).await;
forward_end(
sess,
_parent_sub_id,
&subagent_id,
&def.name,
true,
t.last_agent_message.clone(),
)
.await;
return Ok(t
.last_agent_message
.unwrap_or_else(|| last_message.unwrap_or_default()));
}
_ => {}
}
forward_forwarded(sess, _parent_sub_id, &subagent_id, &def.name, ev.msg).await;
}
}
async fn forward_begin(
sess: &crate::codex::Session,
parent_sub_id: &str,
subagent_id: &str,
name: &str,
) {
sess
.send_event(crate::protocol::Event {
id: parent_sub_id.to_string(),
msg: crate::protocol::EventMsg::SubagentBegin(crate::protocol::SubagentBeginEvent {
subagent_id: subagent_id.to_string(),
name: name.to_string(),
}),
})
.await;
}
async fn forward_forwarded(
sess: &crate::codex::Session,
parent_sub_id: &str,
subagent_id: &str,
name: &str,
msg: crate::protocol::EventMsg,
) {
sess
.send_event(crate::protocol::Event {
id: parent_sub_id.to_string(),
msg: crate::protocol::EventMsg::SubagentForwarded(
crate::protocol::SubagentForwardedEvent {
subagent_id: subagent_id.to_string(),
name: name.to_string(),
event: Box::new(msg),
},
),
})
.await;
}
async fn forward_end(
sess: &crate::codex::Session,
parent_sub_id: &str,
subagent_id: &str,
name: &str,
success: bool,
last_agent_message: Option<String>,
) {
sess
.send_event(crate::protocol::Event {
id: parent_sub_id.to_string(),
msg: crate::protocol::EventMsg::SubagentEnd(crate::protocol::SubagentEndEvent {
subagent_id: subagent_id.to_string(),
name: name.to_string(),
success,
last_agent_message,
}),
})
.await;
}

View File

@@ -0,0 +1,54 @@
use std::collections::BTreeMap;
use std::sync::LazyLock;
use crate::openai_tools::JsonSchema;
use crate::openai_tools::OpenAiTool;
use crate::openai_tools::ResponsesApiTool;
pub(crate) static SUBAGENT_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let mut properties = BTreeMap::new();
properties.insert(
"name".to_string(),
JsonSchema::String {
description: Some("Registered subagent name".to_string()),
},
);
properties.insert(
"input".to_string(),
JsonSchema::String {
description: Some("Task or instruction for the subagent".to_string()),
},
);
properties.insert(
"context".to_string(),
JsonSchema::String {
description: Some("Optional extra context to aid the task".to_string()),
},
);
OpenAiTool::Function(ResponsesApiTool {
name: "subagent_run".to_string(),
description: "Invoke a named subagent with isolated context and return its result"
.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["name".to_string(), "input".to_string()]),
additional_properties: Some(false),
},
})
});
pub(crate) static SUBAGENT_LIST_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let properties = BTreeMap::new();
OpenAiTool::Function(ResponsesApiTool {
name: "subagent_list".to_string(),
description: "List available subagents (name and description). Call before subagent_run if unsure.".to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false),
},
})
});

View File

@@ -70,12 +70,12 @@ async fn truncates_output_lines() {
let output = run_test_cmd(tmp, cmd).await.unwrap();
let expected_output = (1..=256)
let expected_output = (1..=300)
.map(|i| format!("{i}\n"))
.collect::<Vec<_>>()
.join("");
assert_eq!(output.stdout.text, expected_output);
assert_eq!(output.stdout.truncated_after_lines, Some(256));
assert_eq!(output.stdout.truncated_after_lines, None);
}
/// Command succeeds with exit code 0 normally
@@ -91,8 +91,8 @@ async fn truncates_output_bytes() {
let output = run_test_cmd(tmp, cmd).await.unwrap();
assert_eq!(output.stdout.text.len(), 10240);
assert_eq!(output.stdout.truncated_after_lines, Some(10));
assert!(output.stdout.text.len() >= 15000);
assert_eq!(output.stdout.truncated_after_lines, None);
}
/// Command not found returns exit code 127, this is not considered a sandbox error

View File

@@ -139,3 +139,34 @@ async fn test_exec_stderr_stream_events_echo() {
}
assert_eq!(String::from_utf8_lossy(&err), "oops\n");
}
#[tokio::test]
async fn test_aggregated_output_interleaves_in_order() {
// Spawn a shell that alternates stdout and stderr with sleeps to enforce order.
let cmd = vec![
"/bin/sh".to_string(),
"-c".to_string(),
"printf 'O1\\n'; sleep 0.01; printf 'E1\\n' 1>&2; sleep 0.01; printf 'O2\\n'; sleep 0.01; printf 'E2\\n' 1>&2".to_string(),
];
let params = ExecParams {
command: cmd,
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
timeout_ms: Some(5_000),
env: HashMap::new(),
with_escalated_permissions: None,
justification: None,
};
let policy = SandboxPolicy::new_read_only_policy();
let result = process_exec_tool_call(params, SandboxType::None, &policy, &None, None)
.await
.expect("process_exec_tool_call");
assert_eq!(result.exit_code, 0);
assert_eq!(result.stdout.text, "O1\nO2\n");
assert_eq!(result.stderr.text, "E1\nE2\n");
assert_eq!(result.aggregated_output.text, "O1\nE1\nO2\nE2\n");
assert_eq!(result.aggregated_output.truncated_after_lines, None);
}

View File

@@ -168,6 +168,15 @@ impl EventProcessor for EventProcessorWithHumanOutput {
fn process_event(&mut self, event: Event) -> CodexStatus {
let Event { id: _, msg } = event;
match msg {
EventMsg::SubagentBegin(_) => {
// Ignore in human output for now.
}
EventMsg::SubagentForwarded(_) => {
// Ignore; TUI will render forwarded events.
}
EventMsg::SubagentEnd(_) => {
// Ignore in human output for now.
}
EventMsg::Error(ErrorEvent { message }) => {
let prefix = "ERROR:".style(self.red);
ts_println!(self, "{prefix} {message}");
@@ -287,10 +296,10 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::ExecCommandOutputDelta(_) => {}
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
stdout,
stderr,
aggregated_output,
duration,
exit_code,
..
}) => {
let exec_command = self.call_id_to_command.remove(&call_id);
let (duration, call) = if let Some(ExecCommandBegin { command, .. }) = exec_command
@@ -303,8 +312,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
("".to_string(), format!("exec('{call_id}')"))
};
let output = if exit_code == 0 { stdout } else { stderr };
let truncated_output = output
let truncated_output = aggregated_output
.lines()
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
.collect::<Vec<_>>()
@@ -539,6 +547,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
}
},
EventMsg::ShutdownComplete => return CodexStatus::Shutdown,
EventMsg::ConversationHistory(_) => {}
}
CodexStatus::Running
}

View File

@@ -41,6 +41,12 @@ impl EventProcessor for EventProcessorWithJsonOutput {
fn process_event(&mut self, event: Event) -> CodexStatus {
match event.msg {
EventMsg::SubagentBegin(_)
| EventMsg::SubagentForwarded(_)
| EventMsg::SubagentEnd(_) => {
// Ignored for JSON output in exec for now.
CodexStatus::Running
}
EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_) => {
// Suppress streaming events in JSON mode.
CodexStatus::Running

View File

@@ -146,6 +146,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
model_provider,
codex_linux_sandbox_exe,
base_instructions: None,
include_subagent_tool: None,
include_plan_tool: None,
include_apply_patch_tool: None,
disable_response_storage: oss.then_some(true),

View File

@@ -736,6 +736,7 @@ fn derive_config_from_params(
base_instructions,
include_plan_tool,
include_apply_patch_tool,
include_subagent_tool: None,
disable_response_storage: None,
show_raw_agent_reasoning: None,
};

View File

@@ -161,6 +161,7 @@ impl CodexToolCallParam {
base_instructions,
include_plan_tool,
include_apply_patch_tool: None,
include_subagent_tool: None,
disable_response_storage: None,
show_raw_agent_reasoning: None,
};

View File

@@ -174,6 +174,11 @@ async fn run_codex_tool_session_inner(
.await;
match event.msg {
EventMsg::SubagentBegin(_)
| EventMsg::SubagentForwarded(_)
| EventMsg::SubagentEnd(_) => {
// Ignore subagent orchestration for MCP echoing.
}
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
command,
cwd,
@@ -275,6 +280,7 @@ async fn run_codex_tool_session_inner(
| EventMsg::GetHistoryEntryResponse(_)
| EventMsg::PlanUpdate(_)
| EventMsg::TurnAborted(_)
| EventMsg::ConversationHistory(_)
| EventMsg::ShutdownComplete => {
// For now, we do not do anything extra for these
// events. Note that

View File

@@ -22,6 +22,7 @@ use uuid::Uuid;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::message_history::HistoryEntry;
use crate::models::ResponseItem;
use crate::parse_command::ParsedCommand;
use crate::plan_tool::UpdatePlanArgs;
@@ -137,6 +138,10 @@ pub enum Op {
/// Request a single history entry identified by `log_id` + `offset`.
GetHistoryEntryRequest { offset: usize, log_id: u64 },
/// Request the full in-memory conversation transcript for the current session.
/// Reply is delivered via `EventMsg::ConversationHistory`.
GetHistory,
/// Request the list of MCP tools available across all configured servers.
/// Reply is delivered via `EventMsg::McpListToolsResponse`.
ListMcpTools,
@@ -471,6 +476,16 @@ pub enum EventMsg {
/// Notification that the agent is shutting down.
ShutdownComplete,
ConversationHistory(ConversationHistoryResponseEvent),
// --- Subagent orchestration events ---
/// Emitted when a subagent starts.
SubagentBegin(SubagentBeginEvent),
/// Forwards a nested event produced by a running subagent.
SubagentForwarded(SubagentForwardedEvent),
/// Emitted when a subagent finishes.
SubagentEnd(SubagentEndEvent),
}
// Individual event payload types matching each `EventMsg` variant.
@@ -494,6 +509,28 @@ pub struct TokenUsage {
pub total_tokens: u64,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SubagentBeginEvent {
pub subagent_id: String,
pub name: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SubagentEndEvent {
pub subagent_id: String,
pub name: String,
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_agent_message: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SubagentForwardedEvent {
pub subagent_id: String,
pub name: String,
pub event: Box<EventMsg>,
}
impl TokenUsage {
pub fn is_zero(&self) -> bool {
self.total_tokens == 0
@@ -651,6 +688,14 @@ impl McpToolCallEndEvent {
}
}
/// Response payload for `Op::GetHistory` containing the current session's
/// in-memory transcript.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ConversationHistoryResponseEvent {
pub conversation_id: Uuid,
pub entries: Vec<ResponseItem>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ExecCommandBeginEvent {
/// Identifier so this can be paired with the ExecCommandEnd event.
@@ -670,10 +715,15 @@ pub struct ExecCommandEndEvent {
pub stdout: String,
/// Captured stderr
pub stderr: String,
/// Captured aggregated output
#[serde(default)]
pub aggregated_output: String,
/// The command's exit code.
pub exit_code: i32,
/// The duration of the command execution.
pub duration: Duration,
/// Formatted output from the command, as seen by the model.
pub formatted_output: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]

View File

@@ -9,6 +9,8 @@ use codex_core::protocol::TokenUsage;
use codex_file_search::FileMatch;
use crossterm::event::KeyEvent;
use ratatui::buffer::Buffer;
use ratatui::layout::Constraint;
use ratatui::layout::Layout;
use ratatui::layout::Rect;
use ratatui::widgets::WidgetRef;
@@ -95,8 +97,31 @@ impl BottomPane {
} else {
self.composer.desired_height(width)
};
let top_pad = if self.active_view.is_none() || self.status_view_active {
1
} else {
0
};
view_height
.saturating_add(Self::BOTTOM_PAD_LINES)
.saturating_add(top_pad)
}
view_height.saturating_add(Self::BOTTOM_PAD_LINES)
fn layout(&self, area: Rect) -> Rect {
let top = if self.active_view.is_none() || self.status_view_active {
1
} else {
0
};
let [_, content, _] = Layout::vertical([
Constraint::Max(top),
Constraint::Min(1),
Constraint::Max(BottomPane::BOTTOM_PAD_LINES),
])
.areas(area);
content
}
pub fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> {
@@ -104,10 +129,11 @@ impl BottomPane {
// status indicator shown while a task is running, or approval modal).
// In these states the textarea is not interactable, so we should not
// show its caret.
if self.active_view.is_some() {
if self.active_view.is_some() || self.status_view_active {
None
} else {
self.composer.cursor_pos(area)
let content = self.layout(area);
self.composer.cursor_pos(content)
}
}
@@ -365,31 +391,12 @@ impl BottomPane {
impl WidgetRef for &BottomPane {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
let content = self.layout(area);
if let Some(view) = &self.active_view {
// Reserve bottom padding lines; keep at least 1 line for the view.
let avail = area.height;
if avail > 0 {
let pad = BottomPane::BOTTOM_PAD_LINES.min(avail.saturating_sub(1));
let view_rect = Rect {
x: area.x,
y: area.y,
width: area.width,
height: avail - pad,
};
view.render(view_rect, buf);
}
view.render(content, buf);
} else {
let avail = area.height;
if avail > 0 {
let composer_rect = Rect {
x: area.x,
y: area.y,
width: area.width,
// Reserve bottom padding
height: avail - BottomPane::BOTTOM_PAD_LINES.min(avail.saturating_sub(1)),
};
(&self.composer).render_ref(composer_rect, buf);
}
(&self.composer).render_ref(content, buf);
}
}
}
@@ -495,13 +502,13 @@ mod tests {
let area = Rect::new(0, 0, 40, 3);
let mut buf = Buffer::empty(area);
(&pane).render_ref(area, &mut buf);
let mut row0 = String::new();
let mut row1 = String::new();
for x in 0..area.width {
row0.push(buf[(x, 0)].symbol().chars().next().unwrap_or(' '));
row1.push(buf[(x, 1)].symbol().chars().next().unwrap_or(' '));
}
assert!(
row0.contains("Working"),
"expected Working header after denial: {row0:?}"
row1.contains("Working"),
"expected Working header after denial on row 1: {row1:?}"
);
// Drain the channel to avoid unused warnings.
@@ -523,14 +530,13 @@ mod tests {
// Begin a task: show initial status.
pane.set_task_running(true);
// Render and confirm the line contains the "Working" header.
let area = Rect::new(0, 0, 40, 3);
let mut buf = Buffer::empty(area);
(&pane).render_ref(area, &mut buf);
let mut row0 = String::new();
for x in 0..area.width {
row0.push(buf[(x, 0)].symbol().chars().next().unwrap_or(' '));
row0.push(buf[(x, 1)].symbol().chars().next().unwrap_or(' '));
}
assert!(
row0.contains("Working"),
@@ -563,12 +569,12 @@ mod tests {
let mut buf = Buffer::empty(area);
(&pane).render_ref(area, &mut buf);
// Top row contains the status header
// Row 1 contains the status header (row 0 is the spacer)
let mut top = String::new();
for x in 0..area.width {
top.push(buf[(x, 0)].symbol().chars().next().unwrap_or(' '));
top.push(buf[(x, 1)].symbol().chars().next().unwrap_or(' '));
}
assert_eq!(buf[(0, 0)].symbol().chars().next().unwrap_or(' '), '▌');
assert_eq!(buf[(0, 1)].symbol().chars().next().unwrap_or(' '), '▌');
assert!(
top.contains("Working"),
"expected Working header on top row: {top:?}"
@@ -605,7 +611,7 @@ mod tests {
pane.set_task_running(true);
// Height=2 → pad shrinks to 1; bottom row is blank, top row has spinner.
// Height=2 → with spacer, spinner on row 1; no bottom padding.
let area2 = Rect::new(0, 0, 20, 2);
let mut buf2 = Buffer::empty(area2);
(&pane).render_ref(area2, &mut buf2);
@@ -615,13 +621,10 @@ mod tests {
row0.push(buf2[(x, 0)].symbol().chars().next().unwrap_or(' '));
row1.push(buf2[(x, 1)].symbol().chars().next().unwrap_or(' '));
}
assert!(row0.trim().is_empty(), "expected spacer on row 0: {row0:?}");
assert!(
row0.contains("Working"),
"expected Working header on row 0: {row0:?}"
);
assert!(
row1.trim().is_empty(),
"expected bottom padding on row 1: {row1:?}"
row1.contains("Working"),
"expected Working on row 1: {row1:?}"
);
// Height=1 → no padding; single row is the spinner.

View File

@@ -105,6 +105,7 @@ pub(crate) struct ChatWidget {
full_reasoning_buffer: String,
session_id: Option<Uuid>,
frame_requester: FrameRequester,
last_history_was_exec: bool,
}
struct UserMessage {
@@ -376,6 +377,9 @@ impl ChatWidget {
self.bottom_pane.set_task_running(false);
self.task_complete_pending = false;
}
// A completed stream indicates non-exec content was just inserted.
// Reset the exec header grouping so the next exec shows its header.
self.last_history_was_exec = false;
self.flush_interrupt_queue();
}
}
@@ -401,6 +405,7 @@ impl ChatWidget {
exit_code: ev.exit_code,
stdout: ev.stdout.clone(),
stderr: ev.stderr.clone(),
formatted_output: ev.formatted_output.clone(),
},
));
@@ -408,9 +413,16 @@ impl ChatWidget {
self.active_exec_cell = None;
let pending = std::mem::take(&mut self.pending_exec_completions);
for (command, parsed, output) in pending {
self.add_to_history(history_cell::new_completed_exec_command(
command, parsed, output,
));
let include_header = !self.last_history_was_exec;
let cell = history_cell::new_completed_exec_command(
command,
parsed,
output,
include_header,
ev.duration,
);
self.add_to_history(cell);
self.last_history_was_exec = true;
}
}
}
@@ -473,9 +485,11 @@ impl ChatWidget {
exec.parsed.extend(ev.parsed_cmd);
}
_ => {
let include_header = !self.last_history_was_exec;
self.active_exec_cell = Some(history_cell::new_active_exec_command(
ev.command,
ev.parsed_cmd,
include_header,
));
}
}
@@ -565,6 +579,7 @@ impl ChatWidget {
reasoning_buffer: String::new(),
full_reasoning_buffer: String::new(),
session_id: None,
last_history_was_exec: false,
}
}
@@ -713,13 +728,19 @@ impl ChatWidget {
fn flush_active_exec_cell(&mut self) {
if let Some(active) = self.active_exec_cell.take() {
self.last_history_was_exec = true;
self.app_event_tx
.send(AppEvent::InsertHistoryCell(Box::new(active)));
}
}
fn add_to_history(&mut self, cell: impl HistoryCell + 'static) {
// Only break exec grouping if the cell renders visible lines.
let has_display_lines = !cell.display_lines().is_empty();
self.flush_active_exec_cell();
if has_display_lines {
self.last_history_was_exec = false;
}
self.app_event_tx
.send(AppEvent::InsertHistoryCell(Box::new(cell)));
}
@@ -815,9 +836,41 @@ impl ChatWidget {
EventMsg::ShutdownComplete => self.on_shutdown_complete(),
EventMsg::TurnDiff(TurnDiffEvent { unified_diff }) => self.on_turn_diff(unified_diff),
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
// Also show background logs in the transcript for visibility.
self.add_to_history(history_cell::new_log_line(message.clone()));
self.on_background_event(message)
}
EventMsg::SubagentBegin(ev) => {
let msg = format!("subagent begin: {} ({})", ev.name, ev.subagent_id);
self.add_to_history(history_cell::new_log_line(msg));
}
EventMsg::SubagentForwarded(ev) => {
// Summarize forwarded event type; include message text when it is AgentMessage.
match *ev.event {
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
let msg = format!("subagent {}: {}", ev.name, message);
self.add_to_history(history_cell::new_log_line(msg));
}
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { ref delta }) => {
let msg = format!("subagent {}: {}", ev.name, delta);
self.add_to_history(history_cell::new_log_line(msg));
}
ref other => {
let msg = format!("subagent {} forwarded: {:?}", ev.name, other);
self.add_to_history(history_cell::new_log_line(msg));
}
}
}
EventMsg::SubagentEnd(ev) => {
let summary = ev.last_agent_message.as_deref().unwrap_or("");
let msg = format!(
"subagent end: {} ({}) success={} {}",
ev.name, ev.subagent_id, ev.success, summary
);
self.add_to_history(history_cell::new_log_line(msg));
}
EventMsg::StreamError(StreamErrorEvent { message }) => self.on_stream_error(message),
EventMsg::ConversationHistory(_) => {}
}
// Coalesce redraws: issue at most one after handling the event
if self.needs_redraw {

View File

@@ -44,6 +44,31 @@ fn test_config() -> Config {
.expect("config")
}
// Backward-compat shim for older session logs that predate the
// `formatted_output` field on ExecCommandEnd events.
fn upgrade_event_payload_for_tests(mut payload: serde_json::Value) -> serde_json::Value {
if let Some(obj) = payload.as_object_mut()
&& let Some(msg) = obj.get_mut("msg")
&& let Some(m) = msg.as_object_mut()
{
let ty = m.get("type").and_then(|v| v.as_str()).unwrap_or("");
if ty == "exec_command_end" && !m.contains_key("formatted_output") {
let stdout = m.get("stdout").and_then(|v| v.as_str()).unwrap_or("");
let stderr = m.get("stderr").and_then(|v| v.as_str()).unwrap_or("");
let formatted = if stderr.is_empty() {
stdout.to_string()
} else {
format!("{stdout}{stderr}")
};
m.insert(
"formatted_output".to_string(),
serde_json::Value::String(formatted),
);
}
}
payload
}
#[test]
fn final_answer_without_newline_is_flushed_immediately() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
@@ -157,6 +182,7 @@ fn make_chatwidget_manual() -> (
full_reasoning_buffer: String::new(),
session_id: None,
frame_requester: crate::tui::FrameRequester::test_dummy(),
last_history_was_exec: false,
};
(widget, rx, op_rx)
}
@@ -237,8 +263,10 @@ fn exec_history_cell_shows_working_then_completed() {
call_id: "call-1".into(),
stdout: "done".into(),
stderr: String::new(),
aggregated_output: "done".into(),
exit_code: 0,
duration: std::time::Duration::from_millis(5),
formatted_output: "done".into(),
}),
});
@@ -250,8 +278,12 @@ fn exec_history_cell_shows_working_then_completed() {
);
let blob = lines_to_single_string(&cells[0]);
assert!(
blob.contains("Completed"),
"expected completed exec cell to show Completed header: {blob:?}"
blob.contains('✓'),
"expected completed exec cell to show success marker: {blob:?}"
);
assert!(
blob.contains("echo done"),
"expected command text to be present: {blob:?}"
);
}
@@ -282,8 +314,10 @@ fn exec_history_cell_shows_working_then_failed() {
call_id: "call-2".into(),
stdout: String::new(),
stderr: "error".into(),
aggregated_output: "error".into(),
exit_code: 2,
duration: std::time::Duration::from_millis(7),
formatted_output: "".into(),
}),
});
@@ -295,9 +329,82 @@ fn exec_history_cell_shows_working_then_failed() {
);
let blob = lines_to_single_string(&cells[0]);
assert!(
blob.contains("Failed (exit 2)"),
"expected completed exec cell to show Failed header with exit code: {blob:?}"
blob.contains('✗'),
"expected failure marker present: {blob:?}"
);
assert!(
blob.contains("false"),
"expected command text present: {blob:?}"
);
}
#[test]
fn exec_history_extends_previous_when_consecutive() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual();
// First command
chat.handle_codex_event(Event {
id: "call-a".into(),
msg: EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: "call-a".into(),
command: vec!["bash".into(), "-lc".into(), "echo one".into()],
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
parsed_cmd: vec![
codex_core::parse_command::ParsedCommand::Unknown {
cmd: "echo one".into(),
}
.into(),
],
}),
});
chat.handle_codex_event(Event {
id: "call-a".into(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: "call-a".into(),
stdout: "one".into(),
stderr: String::new(),
aggregated_output: "one".into(),
exit_code: 0,
duration: std::time::Duration::from_millis(5),
formatted_output: "one".into(),
}),
});
let first_cells = drain_insert_history(&mut rx);
assert_eq!(first_cells.len(), 1, "first exec should insert history");
// Second command
chat.handle_codex_event(Event {
id: "call-b".into(),
msg: EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: "call-b".into(),
command: vec!["bash".into(), "-lc".into(), "echo two".into()],
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
parsed_cmd: vec![
codex_core::parse_command::ParsedCommand::Unknown {
cmd: "echo two".into(),
}
.into(),
],
}),
});
chat.handle_codex_event(Event {
id: "call-b".into(),
msg: EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id: "call-b".into(),
stdout: "two".into(),
stderr: String::new(),
aggregated_output: "two".into(),
exit_code: 0,
duration: std::time::Duration::from_millis(5),
formatted_output: "two".into(),
}),
});
let second_cells = drain_insert_history(&mut rx);
assert_eq!(second_cells.len(), 1, "second exec should extend history");
let first_blob = lines_to_single_string(&first_cells[0]);
let second_blob = lines_to_single_string(&second_cells[0]);
assert!(first_blob.contains('✓'));
assert!(second_blob.contains("echo two"));
}
#[tokio::test(flavor = "current_thread")]
@@ -340,7 +447,9 @@ async fn binary_size_transcript_matches_ideal_fixture() {
match kind {
"codex_event" => {
if let Some(payload) = v.get("payload") {
let ev: Event = serde_json::from_value(payload.clone()).expect("parse");
let ev: Event =
serde_json::from_value(upgrade_event_payload_for_tests(payload.clone()))
.expect("parse");
chat.handle_codex_event(ev);
while let Ok(app_ev) = rx.try_recv() {
match app_ev {

View File

@@ -9,13 +9,7 @@ pub(crate) fn escape_command(command: &[String]) -> String {
pub(crate) fn strip_bash_lc_and_escape(command: &[String]) -> String {
match command {
// exactly three items
[first, second, third]
// first two must be "bash", "-lc"
if first == "bash" && second == "-lc" =>
{
third.clone() // borrow `third`
}
[first, second, third] if first == "bash" && second == "-lc" => third.clone(),
_ => escape_command(command),
}
}

View File

@@ -29,10 +29,10 @@ use ratatui::prelude::*;
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use shlex::try_join as shlex_try_join;
use std::collections::HashMap;
use std::io::Cursor;
use std::path::PathBuf;
@@ -46,6 +46,7 @@ pub(crate) struct CommandOutput {
pub(crate) exit_code: i32,
pub(crate) stdout: String,
pub(crate) stderr: String,
pub(crate) formatted_output: String,
}
pub(crate) enum PatchEventType {
@@ -104,6 +105,8 @@ pub(crate) struct ExecCell {
pub(crate) parsed: Vec<ParsedCommand>,
pub(crate) output: Option<CommandOutput>,
start_time: Option<Instant>,
duration: Option<Duration>,
include_header: bool,
}
impl HistoryCell for ExecCell {
fn display_lines(&self) -> Vec<Line<'static>> {
@@ -112,15 +115,63 @@ impl HistoryCell for ExecCell {
&self.parsed,
self.output.as_ref(),
self.start_time,
self.include_header,
)
}
fn transcript_lines(&self) -> Vec<Line<'static>> {
let mut lines: Vec<Line<'static>> = vec!["".into()];
let cmd_display = strip_bash_lc_and_escape(&self.command);
for (i, part) in cmd_display.lines().enumerate() {
if i == 0 {
lines.push(Line::from(vec!["$ ".magenta(), part.to_string().into()]));
} else {
lines.push(Line::from(vec![" ".into(), part.to_string().into()]));
}
}
// Command output: include full stdout and stderr (no truncation)
if let Some(output) = self.output.as_ref() {
lines.extend(output.formatted_output.lines().map(ansi_escape_line));
}
if let Some(output) = self.output.as_ref() {
let duration = self
.duration
.map(format_duration)
.unwrap_or_else(|| "unknown".to_string());
let mut result = if output.exit_code == 0 {
Line::from("".green().bold())
} else {
Line::from(vec![
"".red().bold(),
format!(" ({})", output.exit_code).into(),
])
};
result.push_span(format!("{duration}").dim());
lines.push(result);
}
lines
}
}
impl WidgetRef for &ExecCell {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
if area.height == 0 {
return;
}
let content_area = Rect {
x: area.x,
y: area.y,
width: area.width,
height: area.height,
};
Paragraph::new(Text::from(self.display_lines()))
.wrap(Wrap { trim: false })
.render(area, buf);
.render(content_area, buf);
}
}
@@ -131,8 +182,8 @@ struct CompletedMcpToolCallWithImageOutput {
impl HistoryCell for CompletedMcpToolCallWithImageOutput {
fn display_lines(&self) -> Vec<Line<'static>> {
vec![
Line::from("tool result (image output omitted)"),
Line::from(""),
Line::from("tool result (image output omitted)"),
]
}
}
@@ -179,6 +230,7 @@ pub(crate) fn new_session_info(
};
let lines: Vec<Line<'static>> = vec![
Line::from(Span::from("")),
Line::from(vec![
Span::raw(">_ ").dim(),
Span::styled(
@@ -194,17 +246,16 @@ pub(crate) fn new_session_info(
Line::from(format!(" /status - {}", SlashCommand::Status.description()).dim()),
Line::from(format!(" /approvals - {}", SlashCommand::Approvals.description()).dim()),
Line::from(format!(" /model - {}", SlashCommand::Model.description()).dim()),
Line::from("".dim()),
];
PlainHistoryCell { lines }
} else if config.model == model {
PlainHistoryCell { lines: Vec::new() }
} else {
let lines = vec![
Line::from(""),
Line::from("model changed:".magenta().bold()),
Line::from(format!("requested: {}", config.model)),
Line::from(format!("used: {model}")),
Line::from(""),
];
PlainHistoryCell { lines }
}
@@ -212,9 +263,9 @@ pub(crate) fn new_session_info(
pub(crate) fn new_user_prompt(message: String) -> PlainHistoryCell {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from(""));
lines.push(Line::from("user".cyan().bold()));
lines.extend(message.lines().map(|l| Line::from(l.to_string())));
lines.push(Line::from(""));
PlainHistoryCell { lines }
}
@@ -222,12 +273,15 @@ pub(crate) fn new_user_prompt(message: String) -> PlainHistoryCell {
pub(crate) fn new_active_exec_command(
command: Vec<String>,
parsed: Vec<ParsedCommand>,
include_header: bool,
) -> ExecCell {
ExecCell {
command,
parsed,
output: None,
start_time: Some(Instant::now()),
duration: None,
include_header,
}
}
@@ -235,76 +289,61 @@ pub(crate) fn new_completed_exec_command(
command: Vec<String>,
parsed: Vec<ParsedCommand>,
output: CommandOutput,
include_header: bool,
duration: Duration,
) -> ExecCell {
ExecCell {
command,
parsed,
output: Some(output),
start_time: None,
duration: Some(duration),
include_header,
}
}
fn exec_duration(start: Instant) -> String {
format!("{}s", start.elapsed().as_secs())
}
fn exec_command_lines(
command: &[String],
parsed: &[ParsedCommand],
output: Option<&CommandOutput>,
start_time: Option<Instant>,
include_header: bool,
) -> Vec<Line<'static>> {
match parsed.is_empty() {
true => new_exec_command_generic(command, output, start_time),
false => new_parsed_command(command, parsed, output, start_time),
true => new_exec_command_generic(command, output, start_time, include_header),
false => new_parsed_command(command, parsed, output, start_time, include_header),
}
}
fn new_parsed_command(
command: &[String],
_command: &[String],
parsed_commands: &[ParsedCommand],
output: Option<&CommandOutput>,
start_time: Option<Instant>,
include_header: bool,
) -> Vec<Line<'static>> {
let mut lines: Vec<Line> = Vec::new();
match output {
None => {
let mut spans = vec!["⚙︎ Working".magenta().bold()];
if let Some(st) = start_time {
let dur = exec_duration(st);
spans.push(format!("{dur}").dim());
}
lines.push(Line::from(spans));
}
Some(o) if o.exit_code == 0 => {
lines.push(Line::from(vec!["".green(), " Completed".into()]));
}
Some(o) => {
lines.push(Line::from(vec![
"".red(),
format!(" Failed (exit {})", o.exit_code).into(),
]));
}
};
// Optionally include the complete, unaltered command from the model.
if std::env::var("SHOW_FULL_COMMANDS")
.map(|v| !v.is_empty())
.unwrap_or(false)
{
let full_cmd = shlex_try_join(command.iter().map(|s| s.as_str()))
.unwrap_or_else(|_| command.join(" "));
lines.push(Line::from(vec![
Span::styled("", Style::default().add_modifier(Modifier::DIM)),
Span::styled(
full_cmd,
Style::default()
.add_modifier(Modifier::DIM)
.add_modifier(Modifier::ITALIC),
),
]));
// Leading spacer and header line above command list
if include_header {
lines.push(Line::from(""));
lines.push(Line::from(">_".magenta()));
}
for (i, parsed) in parsed_commands.iter().enumerate() {
// Determine the leading status marker: spinner while running, ✓ on success, ✗ on failure.
let status_marker: Span<'static> = match output {
None => {
// Animated braille spinner choose frame based on elapsed time.
const FRAMES: &[char] = &['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
let idx = start_time
.map(|st| ((st.elapsed().as_millis() / 100) as usize) % FRAMES.len())
.unwrap_or(0);
let ch = FRAMES[idx];
Span::raw(format!("{ch}"))
}
Some(o) if o.exit_code == 0 => Span::styled("", Style::default().fg(Color::Green)),
Some(_) => Span::styled("", Style::default().fg(Color::Red)),
};
for parsed in parsed_commands.iter() {
let text = match parsed {
ParsedCommand::Read { name, .. } => format!("📖 {name}"),
ParsedCommand::ListFiles { cmd, path } => match path {
@@ -323,19 +362,25 @@ fn new_parsed_command(
ParsedCommand::Unknown { cmd } => format!("⌨️ {cmd}"),
ParsedCommand::Noop { cmd } => format!("🔄 {cmd}"),
};
let first_prefix = if i == 0 { "" } else { " " };
// Prefix: two spaces, marker, space. Continuations align under the text block.
for (j, line_text) in text.lines().enumerate() {
let prefix = if j == 0 { first_prefix } else { " " };
lines.push(Line::from(vec![
Span::styled(prefix, Style::default().add_modifier(Modifier::DIM)),
line_text.to_string().dim(),
]));
if j == 0 {
lines.push(Line::from(vec![
" ".into(),
status_marker.clone(),
" ".into(),
line_text.to_string().light_blue(),
]));
} else {
lines.push(Line::from(vec![
" ".into(),
line_text.to_string().light_blue(),
]));
}
}
}
lines.extend(output_lines(output, true, false));
lines.push(Line::from(""));
lines
}
@@ -344,29 +389,44 @@ fn new_exec_command_generic(
command: &[String],
output: Option<&CommandOutput>,
start_time: Option<Instant>,
include_header: bool,
) -> Vec<Line<'static>> {
let mut lines: Vec<Line<'static>> = Vec::new();
let command_escaped = strip_bash_lc_and_escape(command);
let mut cmd_lines = command_escaped.lines();
if let Some(first) = cmd_lines.next() {
let mut spans: Vec<Span> = vec!["⚡ Running".magenta()];
if let Some(st) = start_time {
let dur = exec_duration(st);
spans.push(format!("{dur}").dim());
}
spans.push(" ".into());
spans.push(first.to_string().into());
lines.push(Line::from(spans));
} else {
let mut spans: Vec<Span> = vec!["⚡ Running".magenta()];
if let Some(st) = start_time {
let dur = exec_duration(st);
spans.push(format!("{dur}").dim());
}
lines.push(Line::from(spans));
// Leading spacer and header line above command list
if include_header {
lines.push(Line::from(""));
lines.push(Line::from(">_".magenta()));
}
for cont in cmd_lines {
lines.push(Line::from(cont.to_string()));
let command_escaped = strip_bash_lc_and_escape(command);
// Determine marker: spinner while running, ✓/✗ when completed
let status_marker: Span<'static> = match output {
None => {
const FRAMES: &[char] = &['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
let idx = start_time
.map(|st| ((st.elapsed().as_millis() / 100) as usize) % FRAMES.len())
.unwrap_or(0);
let ch = FRAMES[idx];
Span::raw(format!("{ch}"))
}
Some(o) if o.exit_code == 0 => Span::styled("", Style::default().fg(Color::Green)),
Some(_) => Span::styled("", Style::default().fg(Color::Red)),
};
for (i, line) in command_escaped.lines().enumerate() {
if i == 0 {
lines.push(Line::from(vec![
Span::raw(" "),
status_marker.clone(),
Span::raw(" "),
Span::raw(line.to_string()),
]));
} else {
lines.push(Line::from(vec![
Span::styled(" ", Style::default().add_modifier(Modifier::DIM)),
Span::raw(line.to_string()),
]));
}
}
lines.extend(output_lines(output, false, true));
@@ -377,9 +437,9 @@ fn new_exec_command_generic(
pub(crate) fn new_active_mcp_tool_call(invocation: McpInvocation) -> PlainHistoryCell {
let title_line = Line::from(vec!["tool".magenta(), " running...".dim()]);
let lines: Vec<Line> = vec![
Line::from(""),
title_line,
format_mcp_invocation(invocation.clone()),
Line::from(""),
];
PlainHistoryCell { lines }
@@ -489,8 +549,6 @@ pub(crate) fn new_completed_mcp_tool_call(
));
}
}
lines.push(Line::from(""));
}
Err(e) => {
lines.push(Line::from(vec![
@@ -503,6 +561,8 @@ pub(crate) fn new_completed_mcp_tool_call(
}
};
// Leading blank separator at the start of this cell
lines.insert(0, Line::from(""));
Box::new(PlainHistoryCell { lines })
}
@@ -512,6 +572,7 @@ pub(crate) fn new_status_output(
session_id: &Option<Uuid>,
) -> PlainHistoryCell {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from(""));
lines.push(Line::from("/status".magenta()));
let config_entries = create_config_summary_entries(config);
@@ -596,8 +657,6 @@ pub(crate) fn new_status_output(
]));
}
lines.push(Line::from(""));
// 👤 Account (only if ChatGPT tokens exist), shown under the first block
let auth_file = get_auth_file(&config.codex_home);
if let Ok(auth) = try_read_auth_json(&auth_file)
@@ -688,13 +747,19 @@ pub(crate) fn new_status_output(
usage.blended_total().to_string().into(),
]));
lines.push(Line::from(""));
PlainHistoryCell { lines }
}
/// Simple one-line log entry (dim) to surface traces and diagnostics in the transcript.
pub(crate) fn new_log_line(message: String) -> TranscriptOnlyHistoryCell {
let lines: Vec<Line<'static>> = vec![Line::from(""), Line::from(message).dim()];
TranscriptOnlyHistoryCell { lines }
}
/// Render a summary of configured MCP servers from the current `Config`.
pub(crate) fn empty_mcp_output() -> PlainHistoryCell {
let lines: Vec<Line<'static>> = vec![
Line::from(""),
Line::from("/mcp".magenta()),
Line::from(""),
Line::from(vec!["🔌 ".into(), "MCP Tools".bold()]),
@@ -709,7 +774,6 @@ pub(crate) fn empty_mcp_output() -> PlainHistoryCell {
" to configure them.".into(),
])
.style(Style::default().add_modifier(Modifier::DIM)),
Line::from(""),
];
PlainHistoryCell { lines }
@@ -782,7 +846,7 @@ pub(crate) fn new_mcp_tools_output(
}
pub(crate) fn new_error_event(message: String) -> PlainHistoryCell {
let lines: Vec<Line<'static>> = vec![vec!["🖐 ".red().bold(), message.into()].into(), "".into()];
let lines: Vec<Line<'static>> = vec!["".into(), vec!["🖐 ".red().bold(), message.into()].into()];
PlainHistoryCell { lines }
}
@@ -797,6 +861,8 @@ pub(crate) fn new_plan_update(update: UpdatePlanArgs) -> PlainHistoryCell {
let UpdatePlanArgs { explanation, plan } = update;
let mut lines: Vec<Line<'static>> = Vec::new();
// Leading blank for separation
lines.push(Line::from(""));
// Header with progress summary
let total = plan.len();
let completed = plan
@@ -887,8 +953,6 @@ pub(crate) fn new_plan_update(update: UpdatePlanArgs) -> PlainHistoryCell {
}
}
lines.push(Line::from(""));
PlainHistoryCell { lines }
}
@@ -908,16 +972,16 @@ pub(crate) fn new_patch_event(
auto_approved: false,
} => {
let lines: Vec<Line<'static>> = vec![
Line::from("✏️ Applying patch".magenta().bold()),
Line::from(""),
Line::from("✏️ Applying patch".magenta().bold()),
];
return PlainHistoryCell { lines };
}
};
let mut lines: Vec<Line<'static>> = create_diff_summary(title, &changes, event_type);
lines.push(Line::from(""));
// Add leading blank separator for the cell
lines.insert(0, Line::from(""));
PlainHistoryCell { lines }
}
@@ -934,14 +998,15 @@ pub(crate) fn new_patch_apply_failure(stderr: String) -> PlainHistoryCell {
exit_code: 1,
stdout: String::new(),
stderr,
formatted_output: String::new(),
}),
true,
true,
));
}
lines.push(Line::from(""));
// Leading blank separator
lines.insert(0, Line::from(""));
PlainHistoryCell { lines }
}
@@ -988,9 +1053,8 @@ pub(crate) fn new_patch_apply_success(stdout: String) -> PlainHistoryCell {
lines.push(Line::from(format!("... +{remaining} lines")).dim());
}
}
lines.push(Line::from(""));
// Leading blank separator
lines.insert(0, Line::from(""));
PlainHistoryCell { lines }
}
@@ -999,9 +1063,9 @@ pub(crate) fn new_reasoning_block(
config: &Config,
) -> TranscriptOnlyHistoryCell {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(Line::from(""));
lines.push(Line::from("thinking".magenta().italic()));
append_markdown(&full_reasoning_buffer, &mut lines, config);
lines.push(Line::from(""));
TranscriptOnlyHistoryCell { lines }
}
@@ -1014,6 +1078,7 @@ fn output_lines(
exit_code,
stdout,
stderr,
..
} = match output {
Some(output) if only_err && output.exit_code == 0 => return vec![],
Some(output) => output,
@@ -1096,9 +1161,14 @@ mod tests {
let parsed = vec![ParsedCommand::Unknown {
cmd: "printf 'foo\nbar'".to_string(),
}];
let lines = exec_command_lines(&[], &parsed, None, None);
assert!(lines.len() >= 3);
assert_eq!(lines[1].spans[0].content, "");
assert_eq!(lines[2].spans[0].content, " ");
let lines = exec_command_lines(&[], &parsed, None, None, true);
assert!(lines.len() >= 4);
// Leading spacer then header line
assert!(lines[0].spans.is_empty() || lines[0].spans[0].content.is_empty());
assert_eq!(lines[1].spans[0].content, ">_");
// First rendered command line starts with two-space + marker.
assert_eq!(lines[2].spans[0].content, " ");
// Continuation lines align under the text block.
assert_eq!(lines[3].spans[0].content, " ");
}
}

View File

@@ -124,6 +124,7 @@ pub async fn run_main(
config_profile: cli.config_profile.clone(),
codex_linux_sandbox_exe,
base_instructions: None,
include_subagent_tool: None,
include_plan_tool: Some(true),
include_apply_patch_tool: None,
disable_response_storage: cli.oss.then_some(true),

View File

@@ -34,12 +34,3 @@ pub fn is_blank_line_spaces_only(line: &Line<'_>) -> bool {
.iter()
.all(|s| s.content.is_empty() || s.content.chars().all(|c| c == ' '))
}
/// Consider a line blank if its spans are empty or all span contents are
/// whitespace when trimmed.
pub fn is_blank_line_trim(line: &Line<'_>) -> bool {
if line.spans.is_empty() {
return true;
}
line.spans.iter().all(|s| s.content.trim().is_empty())
}

View File

@@ -70,17 +70,6 @@ impl StreamController {
self.header.maybe_emit(out_lines)
}
#[inline]
fn ensure_single_trailing_blank(lines: &mut Lines) {
if lines
.last()
.map(|l| !crate::render::line_utils::is_blank_line_trim(l))
.unwrap_or(true)
{
lines.push(Line::from(""));
}
}
/// Begin an answer stream. Does not emit header yet; it is emitted on first commit.
pub(crate) fn begin(&mut self, _sink: &impl HistorySink) {
// Starting a new stream cancels any pending finish-from-previous-stream animation.
@@ -138,7 +127,6 @@ impl StreamController {
let mut lines_with_header: Lines = Vec::new();
self.emit_header_if_needed(&mut lines_with_header);
lines_with_header.extend(out_lines);
Self::ensure_single_trailing_blank(&mut lines_with_header);
sink.insert_history(lines_with_header);
}

View File

@@ -64,6 +64,8 @@ impl HeaderEmitter {
pub(crate) fn maybe_emit(&mut self, out_lines: &mut Vec<ratatui::text::Line<'static>>) -> bool {
if !self.emitted_in_stream && !self.emitted_this_turn {
// Add a leading blank line before the header for visual spacing
out_lines.push(ratatui::text::Line::from(""));
out_lines.push(render_header_line());
self.emitted_in_stream = true;
self.emitted_this_turn = true;

View File

@@ -28,7 +28,7 @@ impl TranscriptApp {
pub(crate) fn new(transcript_lines: Vec<Line<'static>>) -> Self {
Self {
transcript_lines,
scroll_offset: 0,
scroll_offset: usize::MAX,
is_done: false,
title: "T R A N S C R I P T".to_string(),
}
@@ -105,7 +105,7 @@ impl TranscriptApp {
self.scroll_offset = self.scroll_offset.saturating_sub(area.height as usize);
}
KeyEvent {
code: KeyCode::PageDown,
code: KeyCode::PageDown | KeyCode::Char(' '),
kind: KeyEventKind::Press | KeyEventKind::Repeat,
..
} => {

View File

@@ -138,6 +138,12 @@ enum ResumeAction {
RestoreAlt = 2,
}
#[cfg(unix)]
enum PreparedResumeAction {
RestoreAltScreen,
RealignViewport(ratatui::layout::Rect),
}
#[cfg(unix)]
fn take_resume_action(pending: &AtomicU8) -> ResumeAction {
match pending.swap(ResumeAction::None as u8, Ordering::Relaxed) {
@@ -333,23 +339,37 @@ impl Tui {
}
#[cfg(unix)]
fn apply_resume_action(&mut self, action: ResumeAction) -> Result<()> {
fn prepare_resume_action(
&mut self,
action: ResumeAction,
) -> Result<Option<PreparedResumeAction>> {
match action {
ResumeAction::RealignInline => {
let cursor_pos = self.terminal.get_cursor_position()?;
self.terminal
.set_viewport_area(ratatui::layout::Rect::new(0, cursor_pos.y, 0, 0));
Ok(Some(PreparedResumeAction::RealignViewport(
ratatui::layout::Rect::new(0, cursor_pos.y, 0, 0),
)))
}
ResumeAction::RestoreAlt => {
// When we're resuming from alt screen, we need to save what the cursor position
// _was_ when we resumed. That way, when we leave the alt screen, we can restore
// the cursor to the new position.
if let Ok((_x, y)) = crossterm::cursor::position()
&& let Some(saved) = self.alt_saved_viewport.as_mut()
{
saved.y = y;
}
let _ = execute!(self.terminal.backend_mut(), EnterAlternateScreen);
Ok(Some(PreparedResumeAction::RestoreAltScreen))
}
ResumeAction::None => Ok(None),
}
}
#[cfg(unix)]
fn apply_prepared_resume_action(&mut self, prepared: PreparedResumeAction) -> Result<()> {
match prepared {
PreparedResumeAction::RealignViewport(area) => {
self.terminal.set_viewport_area(area);
}
PreparedResumeAction::RestoreAltScreen => {
execute!(self.terminal.backend_mut(), EnterAlternateScreen)?;
if let Ok(size) = self.terminal.size() {
self.terminal.set_viewport_area(ratatui::layout::Rect::new(
0,
@@ -360,13 +380,10 @@ impl Tui {
self.terminal.clear()?;
}
}
ResumeAction::None => {}
}
Ok(())
}
// Public suspend() removed; Ctrl+Z is handled internally via event_stream + draw.
/// Enter alternate screen and expand the viewport to full terminal size, saving the current
/// inline viewport for restoration when leaving.
pub fn enter_alt_screen(&mut self) -> Result<()> {
@@ -405,12 +422,13 @@ impl Tui {
height: u16,
draw_fn: impl FnOnce(&mut custom_terminal::Frame),
) -> Result<()> {
std::io::stdout().sync_update(|_| {
#[cfg(unix)]
{
// Apply any post-resume action before layout/clear/draw.
self.apply_resume_action(take_resume_action(&self.resume_pending))?;
}
// Precompute any viewport updates that need a cursor-position query before entering
// the synchronized update, to avoid racing with the event reader.
let mut pending_viewport_area: Option<ratatui::layout::Rect> = None;
#[cfg(unix)]
let mut prepared_resume =
self.prepare_resume_action(take_resume_action(&self.resume_pending))?;
{
let terminal = &mut self.terminal;
let screen_size = terminal.size()?;
let last_known_screen_size = terminal.last_known_screen_size;
@@ -419,15 +437,27 @@ impl Tui {
let last_known_cursor_pos = terminal.last_known_cursor_pos;
if cursor_pos.y != last_known_cursor_pos.y {
let cursor_delta = cursor_pos.y as i32 - last_known_cursor_pos.y as i32;
let new_viewport_area = terminal.viewport_area.offset(Offset {
x: 0,
y: cursor_delta,
});
terminal.set_viewport_area(new_viewport_area);
terminal.clear()?;
pending_viewport_area = Some(new_viewport_area);
}
}
}
std::io::stdout().sync_update(|_| {
#[cfg(unix)]
{
if let Some(prepared) = prepared_resume.take() {
self.apply_prepared_resume_action(prepared)?;
}
}
let terminal = &mut self.terminal;
if let Some(new_area) = pending_viewport_area.take() {
terminal.set_viewport_area(new_area);
terminal.clear()?;
}
let size = terminal.size()?;

View File

@@ -258,7 +258,7 @@ impl UserApprovalWidget {
}
fn send_decision_with_feedback(&mut self, decision: ReviewDecision, feedback: String) {
let mut lines: Vec<Line<'static>> = Vec::new();
let mut lines: Vec<Line<'static>> = vec![Line::from("")];
match &self.approval_request {
ApprovalRequest::Exec { command, .. } => {
let cmd = strip_bash_lc_and_escape(command);
@@ -327,7 +327,6 @@ impl UserApprovalWidget {
lines.push(Line::from(l.to_string()));
}
}
lines.push(Line::from(""));
self.app_event_tx.send(AppEvent::InsertHistoryLines(lines));
let op = match &self.approval_request {

View File

@@ -9,7 +9,8 @@ codex
Im going to scan the workspace and Cargo manifests to see build profiles and
dependencies that impact binary size. Then Ill summarize the main causes.
⚡ Running ls -la
_
✓ ls -la
└ total 6696
drwxr-xr-x@ 39 easong staff 1248 Aug 9 08:49 .
drwxr-xr-x@ 29 easong staff 928 Aug 9 08:50 ..
@@ -21,7 +22,7 @@ dependencies that impact binary size. Then Ill summarize the main causes.
drwxr-xr-x@ 6 easong staff 192 Aug 7 19:20 tui
-rw-r--r--@ 1 easong staff 8639 Aug 6 23:30 typewriter.md
-rw-r--r--@ 1 easong staff 7673 Aug 6 21:58 wrap-plan.md
Running sed -n '1,200p' Cargo.toml
sed -n '1,200p' Cargo.toml
└ [workspace]
members = [
"ansi-escape",
@@ -34,7 +35,7 @@ dependencies that impact binary size. Then Ill summarize the main causes.
# ratatui = { path = "../../ratatui" }
ratatui = { git = "https://github.com/nornagon/ratatui", branch =
"nornagon-v0.29.0-patch" }
Running for d in ansi-escape apply-patch arg0 cli common core exec execpolicy
for d in ansi-escape apply-patch arg0 cli common core exec execpolicy
file-search linux-sandbox login mcp-client mcp-server mcp-types ollama tui; do
echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
└ --- ansi-escape/Cargo.toml
@@ -48,7 +49,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
"signal",
] }
tracing = { version
Running sed -n '1,200p' execpolicy/Cargo.toml
sed -n '1,200p' execpolicy/Cargo.toml
└ [package]
name = "codex-execpolicy"
version = { workspace = true }
@@ -60,7 +61,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
[dev-dependencies]
tempfile = "3.13.0"
Running sed -n '1,200p' file-search/Cargo.toml
sed -n '1,200p' file-search/Cargo.toml
└ [package]
edition = "2024"
name = "codex-file-search"
@@ -72,7 +73,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
serde = { version = "1", features = ["derive"] }
serde_json = "1.0.142"
tokio = { version = "1", features = ["full"] }
Running sed -n '1,200p' linux-sandbox/Cargo.toml
sed -n '1,200p' linux-sandbox/Cargo.toml
└ [package]
edition = "2024"
name = "codex-linux-sandbox"
@@ -84,7 +85,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
"rt-multi-thread",
"signal",
] }
Running sed -n '1,200p' login/Cargo.toml
sed -n '1,200p' login/Cargo.toml
└ [package]
edition = "2024"
name = "codex-login"
@@ -96,7 +97,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
[dev-dependencies]
pretty_assertions = "1.4.1"
tempfile = "3"
Running sed -n '1,200p' mcp-client/Cargo.toml
sed -n '1,200p' mcp-client/Cargo.toml
└ [package]
name = "codex-mcp-client"
version = { workspace = true }
@@ -108,7 +109,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
"sync",
"time",
] }
Running sed -n '1,200p' mcp-server/Cargo.toml
sed -n '1,200p' mcp-server/Cargo.toml
└ [package]
edition = "2024"
name = "codex-mcp-server"
@@ -120,7 +121,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
tempfile = "3"
tokio-test = "0.4"
wiremock = "0.6"
Running sed -n '1,200p' mcp-types/Cargo.toml
sed -n '1,200p' mcp-types/Cargo.toml
└ [package]
edition = "2024"
name = "mcp-types"
@@ -132,7 +133,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
Running sed -n '1,200p' ollama/Cargo.toml
sed -n '1,200p' ollama/Cargo.toml
└ [package]
edition = "2024"
name = "codex-ollama"
@@ -144,7 +145,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
[dev-dependencies]
tempfile = "3"
Running sed -n '1,200p' tui/Cargo.toml
sed -n '1,200p' tui/Cargo.toml
└ [package]
edition = "2024"
name = "codex-tui"
@@ -156,7 +157,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
pretty_assertions = "1"
rand = "0.8"
vt100 = "0.16.2"
Running sed -n '1,200p' chatgpt/Cargo.toml
sed -n '1,200p' chatgpt/Cargo.toml
└ [package]
edition = "2024"
name = "codex-chatgpt"
@@ -168,6 +169,7 @@ echo "--- $d/Cargo.toml"; sed -n '1,200p' $d/Cargo.toml; echo; done
[dev-dependencies]
tempfile = "3"
codex
Heres whats driving size in this workspaces binaries.
@@ -203,4 +205,4 @@ assertions—outputs are much larger than cargo build --release.
If you want, I can outline targeted trims (e.g., strip = "debuginfo", opt-level
= "z", panic abort, tighter tokio/reqwest features) and estimate impact per
binary.
binary.