Compare commits

..

21 Commits

Author SHA1 Message Date
Ahmed Ibrahim
5bbf94bd93 subagents 2025-08-23 11:31:52 -07:00
Ahmed Ibrahim
76c209d78c progress 2025-08-23 11:07:02 -07:00
Ahmed Ibrahim
d2fe780280 Merge branch 'aggregate-out-err' of https://github.com/openai/codex into aggregate-out-err 2025-08-23 09:47:51 -07:00
Ahmed Ibrahim
e172014062 commit 2025-08-23 09:46:54 -07:00
Ahmed Ibrahim
d4cb5fcdbd Update codex-rs/core/src/codex.rs
Co-authored-by: Gabriel Peal <gpeal@users.noreply.github.com>
2025-08-23 09:01:44 -07:00
Ahmed Ibrahim
549a5de99a Update codex-rs/core/src/codex.rs
Co-authored-by: Gabriel Peal <gpeal@users.noreply.github.com>
2025-08-23 09:01:15 -07:00
Ahmed Ibrahim
e0418bf4b9 rust 2025-08-22 20:08:51 -07:00
Ahmed Ibrahim
07c8dbc94d rust 2025-08-22 20:05:52 -07:00
Ahmed Ibrahim
bb9be76328 rust 2025-08-22 20:02:28 -07:00
Ahmed Ibrahim
b277a654fa rust 2025-08-22 19:54:58 -07:00
Ahmed Ibrahim
8752a9b049 rust 2025-08-22 19:51:45 -07:00
Ahmed Ibrahim
5af5856848 rust 2025-08-22 19:41:13 -07:00
Ahmed Ibrahim
16882fa090 rust 2025-08-22 19:38:32 -07:00
Ahmed Ibrahim
366d0738a4 rust 2025-08-22 19:35:02 -07:00
Ahmed Ibrahim
032f14aec8 aggregate-out-err 2025-08-22 18:13:37 -07:00
Ahmed Ibrahim
6ef0c2e8e7 aggregate-out-err 2025-08-22 18:13:03 -07:00
Ahmed Ibrahim
5db76dc66e dead code 2025-08-22 18:12:12 -07:00
Ahmed Ibrahim
1a04fa0379 cap to full 2025-08-22 18:08:52 -07:00
Ahmed Ibrahim
a5c14eb8c0 test 2025-08-22 18:01:06 -07:00
Ahmed Ibrahim
cd610fd409 tests 2025-08-22 18:00:11 -07:00
Ahmed Ibrahim
35130cf21b send-aggregated output 2025-08-22 17:58:33 -07:00
46 changed files with 871 additions and 1331 deletions

View File

@@ -95,7 +95,7 @@ jobs:
sudo apt install -y musl-tools pkg-config
- name: Cargo build
run: cargo build --target ${{ matrix.target }} --release --bin codex --bin apply-patch
run: cargo build --target ${{ matrix.target }} --release --bin codex
- name: Stage artifacts
shell: bash
@@ -105,10 +105,8 @@ jobs:
if [[ "${{ matrix.runner }}" == windows* ]]; then
cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe"
cp target/${{ matrix.target }}/release/apply-patch.exe "$dest/apply-patch-${{ matrix.target }}.exe"
else
cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}"
cp target/${{ matrix.target }}/release/apply-patch "$dest/apply-patch-${{ matrix.target }}"
fi
- name: Compress artifacts

View File

@@ -2,9 +2,6 @@
// Unified entry point for the Codex CLI.
import path from "path";
import os from "os";
import fs from "fs";
import { createRequire } from "module";
import { fileURLToPath } from "url";
// __dirname equivalent in ESM
@@ -59,9 +56,7 @@ if (!targetTriple) {
throw new Error(`Unsupported platform: ${platform} (${arch})`);
}
const pkgRoot = path.join(__dirname, "..");
const pkgBinDir = path.join(pkgRoot, "bin");
const binaryPath = path.join(pkgBinDir, `codex-${targetTriple}`);
const binaryPath = path.join(__dirname, "..", "bin", `codex-${targetTriple}`);
// Use an asynchronous spawn instead of spawnSync so that Node is able to
// respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is
@@ -98,35 +93,10 @@ function getUpdatedPath(newDirs) {
}
const additionalDirs = [];
// 1) Make packaged bin directory available on PATH for any helper binaries.
additionalDirs.push(pkgBinDir);
const rgDir = await resolveRgDir();
if (rgDir) {
additionalDirs.push(rgDir);
}
// 2) Ensure an `apply_patch` helper exists in $CODEX_HOME/<version>/ and add that directory to PATH.
try {
const require = createRequire(import.meta.url);
// Load package.json to read the version string.
const { version } = require("../package.json");
const codexHome = process.env.CODEX_HOME || path.join(os.homedir(), ".codex");
const versionDir = path.join(codexHome, version);
fs.mkdirSync(versionDir, { recursive: true });
const isWindows = platform === "win32";
const destName = isWindows ? "apply_patch.exe" : "apply_patch";
const destPath = path.join(versionDir, destName);
const srcPath = path.join(pkgBinDir, `apply-patch-${targetTriple}`);
// Only copy if missing; keep it simple and fast.
if (!fs.existsSync(destPath)) {
fs.copyFileSync(srcPath, destPath);
if (!isWindows) {
fs.chmodSync(destPath, 0o755);
}
}
additionalDirs.push(versionDir);
} catch {
// Best-effort: if anything fails, continue without the helper.
}
const updatedPath = getUpdatedPath(additionalDirs);
const child = spawn(binaryPath, process.argv.slice(2), {

View File

@@ -75,32 +75,17 @@ gh run download --dir "$ARTIFACTS_DIR" --repo openai/codex "$WORKFLOW_ID"
# x64 Linux
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-x86_64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-x86_64-unknown-linux-musl"
if [ -f "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/apply-patch-x86_64-unknown-linux-musl.zst" ]; then
zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/apply-patch-x86_64-unknown-linux-musl.zst" -o "$BIN_DIR/apply-patch-x86_64-unknown-linux-musl"
fi
# ARM64 Linux
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-aarch64-unknown-linux-musl.zst" \
-o "$BIN_DIR/codex-aarch64-unknown-linux-musl"
if [ -f "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/apply-patch-aarch64-unknown-linux-musl.zst" ]; then
zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/apply-patch-aarch64-unknown-linux-musl.zst" -o "$BIN_DIR/apply-patch-aarch64-unknown-linux-musl"
fi
# x64 macOS
zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/codex-x86_64-apple-darwin.zst" \
-o "$BIN_DIR/codex-x86_64-apple-darwin"
if [ -f "$ARTIFACTS_DIR/x86_64-apple-darwin/apply-patch-x86_64-apple-darwin.zst" ]; then
zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/apply-patch-x86_64-apple-darwin.zst" -o "$BIN_DIR/apply-patch-x86_64-apple-darwin"
fi
# ARM64 macOS
zstd -d "$ARTIFACTS_DIR/aarch64-apple-darwin/codex-aarch64-apple-darwin.zst" \
-o "$BIN_DIR/codex-aarch64-apple-darwin"
if [ -f "$ARTIFACTS_DIR/aarch64-apple-darwin/apply-patch-aarch64-apple-darwin.zst" ]; then
zstd -d "$ARTIFACTS_DIR/aarch64-apple-darwin/apply-patch-aarch64-apple-darwin.zst" -o "$BIN_DIR/apply-patch-aarch64-apple-darwin"
fi
# x64 Windows
zstd -d "$ARTIFACTS_DIR/x86_64-pc-windows-msvc/codex-x86_64-pc-windows-msvc.exe.zst" \
-o "$BIN_DIR/codex-x86_64-pc-windows-msvc.exe"
if [ -f "$ARTIFACTS_DIR/x86_64-pc-windows-msvc/apply-patch-x86_64-pc-windows-msvc.exe.zst" ]; then
zstd -d "$ARTIFACTS_DIR/x86_64-pc-windows-msvc/apply-patch-x86_64-pc-windows-msvc.exe.zst" -o "$BIN_DIR/apply-patch-x86_64-pc-windows-msvc.exe"
fi
echo "Installed native dependencies into $BIN_DIR"

76
codex-rs/Cargo.lock generated
View File

@@ -635,7 +635,6 @@ name = "codex-apply-patch"
version = "0.0.0"
dependencies = [
"anyhow",
"assert_cmd",
"pretty_assertions",
"similar",
"tempfile",
@@ -732,7 +731,6 @@ dependencies = [
"mime_guess",
"openssl-sys",
"os_info",
"portable-pty",
"predicates",
"pretty_assertions",
"rand 0.9.2",
@@ -1481,12 +1479,6 @@ version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
[[package]]
name = "downcast-rs"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
[[package]]
name = "dupe"
version = "0.9.1"
@@ -1732,17 +1724,6 @@ dependencies = [
"simd-adler32",
]
[[package]]
name = "filedescriptor"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e40758ed24c9b2eeb76c35fb0aebc66c626084edd827e07e1552279814c6682d"
dependencies = [
"libc",
"thiserror 1.0.69",
"winapi",
]
[[package]]
name = "fixedbitset"
version = "0.4.2"
@@ -3458,27 +3439,6 @@ dependencies = [
"portable-atomic",
]
[[package]]
name = "portable-pty"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4a596a2b3d2752d94f51fac2d4a96737b8705dddd311a32b9af47211f08671e"
dependencies = [
"anyhow",
"bitflags 1.3.2",
"downcast-rs",
"filedescriptor",
"lazy_static",
"libc",
"log",
"nix",
"serial2",
"shared_library",
"shell-words",
"winapi",
"winreg",
]
[[package]]
name = "potential_utf"
version = "0.1.2"
@@ -4406,17 +4366,6 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "serial2"
version = "0.2.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26e1e5956803a69ddd72ce2de337b577898801528749565def03515f82bad5bb"
dependencies = [
"cfg-if",
"libc",
"winapi",
]
[[package]]
name = "sha1"
version = "0.10.6"
@@ -4448,22 +4397,6 @@ dependencies = [
"lazy_static",
]
[[package]]
name = "shared_library"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"
dependencies = [
"lazy_static",
"libc",
]
[[package]]
name = "shell-words"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde"
[[package]]
name = "shlex"
version = "1.3.0"
@@ -6243,15 +6176,6 @@ dependencies = [
"memchr",
]
[[package]]
name = "winreg"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d"
dependencies = [
"winapi",
]
[[package]]
name = "winsafe"
version = "0.0.19"

View File

@@ -6,10 +6,6 @@ version = { workspace = true }
[lib]
name = "codex_apply_patch"
path = "src/lib.rs"
[[bin]]
name = "apply-patch"
path = "src/main.rs"
[lints]
workspace = true
@@ -24,4 +20,3 @@ tree-sitter-bash = "0.25.0"
[dev-dependencies]
pretty_assertions = "1.4.1"
tempfile = "3.13.0"
assert_cmd = "2"

View File

@@ -1,40 +0,0 @@
use std::io::Write;
use std::process::ExitCode;
fn main() -> ExitCode {
// Expect exactly one argument: the full apply_patch payload.
let mut args = std::env::args_os();
// argv[0]
let _argv0 = args.next();
let patch_arg = match args.next() {
Some(arg) => match arg.into_string() {
Ok(s) => s,
Err(_) => {
eprintln!("Error: apply-patch requires a UTF-8 PATCH argument.");
return ExitCode::from(1);
}
},
None => {
eprintln!("Usage: apply-patch '<apply_patch_payload>'");
return ExitCode::from(2);
}
};
// Refuse extra args to avoid ambiguity.
if args.next().is_some() {
eprintln!("Error: apply-patch accepts exactly one argument.");
return ExitCode::from(2);
}
let mut stdout = std::io::stdout();
let mut stderr = std::io::stderr();
match codex_apply_patch::apply_patch(&patch_arg, &mut stdout, &mut stderr) {
Ok(()) => {
// Flush to ensure output ordering when used in pipelines.
let _ = stdout.flush();
ExitCode::from(0)
}
Err(_) => ExitCode::from(1),
}
}

View File

@@ -1,48 +0,0 @@
#![allow(clippy::expect_used, clippy::unwrap_used)]
use assert_cmd::prelude::*;
use std::fs;
use std::process::Command;
use tempfile::tempdir;
#[test]
fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> {
let tmp = tempdir()?;
let file = "cli_test.txt";
let absolute_path = tmp.path().join(file);
// 1) Add a file
let add_patch = format!(
r#"*** Begin Patch
*** Add File: {file}
+hello
*** End Patch"#
);
Command::cargo_bin("apply-patch")
.expect("should find apply-patch binary")
.arg(add_patch)
.current_dir(tmp.path())
.assert()
.success()
.stdout(format!("Success. Updated the following files:\nA {file}\n"));
assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n");
// 2) Update the file
let update_patch = format!(
r#"*** Begin Patch
*** Update File: {file}
@@
-hello
+world
*** End Patch"#
);
Command::cargo_bin("apply-patch")
.expect("should find apply-patch binary")
.arg(update_patch)
.current_dir(tmp.path())
.assert()
.success()
.stdout(format!("Success. Updated the following files:\nM {file}\n"));
assert_eq!(fs::read_to_string(&absolute_path)?, "world\n");
Ok(())
}

View File

@@ -28,7 +28,6 @@ libc = "0.2.175"
mcp-types = { path = "../mcp-types" }
mime_guess = "2.0"
os_info = "3.12.0"
portable-pty = "0.9.0"
rand = "0.9"
regex-lite = "0.1.6"
reqwest = { version = "0.12", features = ["json", "stream"] }

View File

@@ -53,11 +53,6 @@ use crate::exec::SandboxType;
use crate::exec::StdoutStream;
use crate::exec::StreamOutput;
use crate::exec::process_exec_tool_call;
use crate::exec_command::EXEC_COMMAND_TOOL_NAME;
use crate::exec_command::ExecCommandParams;
use crate::exec_command::SESSION_MANAGER;
use crate::exec_command::WRITE_STDIN_TOOL_NAME;
use crate::exec_command::WriteStdinParams;
use crate::exec_env::create_env;
use crate::mcp_connection_manager::McpConnectionManager;
use crate::mcp_tool_call::handle_mcp_tool_call;
@@ -147,6 +142,14 @@ pub struct CodexSpawnOk {
}
pub(crate) const INITIAL_SUBMIT_ID: &str = "";
pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 64;
// Model-formatting limits: clients get full streams; oonly content sent to the model is truncated.
pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB
pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines
pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2;
pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128
pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2;
impl Codex {
/// Spawn a new [`Codex`] and initialize the session.
@@ -155,7 +158,7 @@ impl Codex {
auth_manager: Arc<AuthManager>,
initial_history: Option<Vec<ResponseItem>>,
) -> CodexResult<CodexSpawnOk> {
let (tx_sub, rx_sub) = async_channel::bounded(64);
let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY);
let (tx_event, rx_event) = async_channel::unbounded();
let user_instructions = get_user_instructions(&config).await;
@@ -260,6 +263,15 @@ pub(crate) struct Session {
/// Manager for external MCP servers/tools.
mcp_connection_manager: McpConnectionManager,
/// Loaded subagent definitions from project and user scope.
subagents_registry: crate::subagents::registry::SubagentRegistry,
/// Auth manager used to spawn nested sessions (e.g., subagents).
auth_manager: Arc<AuthManager>,
/// Base configuration used to derive nested session configs.
base_config: Arc<Config>,
/// External notifier command (will be passed as args to exec()). When
/// `None` this feature is disabled.
notify: Option<Vec<String>>,
@@ -495,6 +507,30 @@ impl Session {
model_reasoning_summary,
session_id,
);
// Build subagent registry paths and load once per session
let project_agents_dir = {
let mut p = cwd.clone();
p.push(".codex");
p.push("agents");
if p.exists() { Some(p) } else { None }
};
let user_agents_dir = {
let mut p = config.codex_home.clone();
p.push("agents");
if p.exists() { Some(p) } else { None }
};
let mut subagents_registry =
crate::subagents::registry::SubagentRegistry::new(project_agents_dir, user_agents_dir);
subagents_registry.load();
// Log discovered subagents for visibility in clients (e.g., TUI) after
// SessionConfigured so the first event contract remains intact.
post_session_configured_error_events.push(Event {
id: INITIAL_SUBMIT_ID.to_string(),
msg: EventMsg::BackgroundEvent(BackgroundEventEvent {
message: format!("subagents discovered: {:?}", subagents_registry.all_names()),
}),
});
let turn_context = TurnContext {
client,
tools_config: ToolsConfig::new(
@@ -503,7 +539,7 @@ impl Session {
sandbox_policy.clone(),
config.include_plan_tool,
config.include_apply_patch_tool,
config.use_experimental_streamable_shell_tool,
config.include_subagent_tool,
),
user_instructions,
base_instructions,
@@ -517,6 +553,9 @@ impl Session {
session_id,
tx_event: tx_event.clone(),
mcp_connection_manager,
subagents_registry,
auth_manager: auth_manager.clone(),
base_config: config.clone(),
notify,
state: Mutex::new(state),
rollout: Mutex::new(rollout_recorder),
@@ -576,6 +615,16 @@ impl Session {
}
}
/// Access auth manager for nested sessions.
pub(crate) fn auth_manager(&self) -> Arc<AuthManager> {
self.auth_manager.clone()
}
/// Access base config for nested sessions.
pub(crate) fn base_config(&self) -> Arc<Config> {
self.base_config.clone()
}
/// Sends the given event to the client and swallows the send event, if
/// any, logging it as an error.
pub(crate) async fn send_event(&self, event: Event) {
@@ -728,15 +777,15 @@ impl Session {
let ExecToolCallOutput {
stdout,
stderr,
aggregated_output,
duration,
exit_code,
} = output;
// Because stdout and stderr could each be up to 100 KiB, we send
// truncated versions.
const MAX_STREAM_OUTPUT: usize = 5 * 1024; // 5KiB
let stdout = stdout.text.chars().take(MAX_STREAM_OUTPUT).collect();
let stderr = stderr.text.chars().take(MAX_STREAM_OUTPUT).collect();
// Send full stdout/stderr to clients; do not truncate.
let stdout = stdout.text.clone();
let stderr = stderr.text.clone();
let formatted_output = format_exec_output_str(output);
let aggregated_output: String = aggregated_output.text.clone();
let msg = if is_apply_patch {
EventMsg::PatchApplyEnd(PatchApplyEndEvent {
@@ -750,9 +799,10 @@ impl Session {
call_id: call_id.to_string(),
stdout,
stderr,
formatted_output,
duration: *duration,
aggregated_output,
exit_code: *exit_code,
duration: *duration,
formatted_output,
})
};
@@ -810,6 +860,7 @@ impl Session {
exit_code: -1,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(get_error_message_ui(e)),
aggregated_output: StreamOutput::new(get_error_message_ui(e)),
duration: Duration::default(),
};
&output_stderr
@@ -1086,7 +1137,7 @@ async fn submission_loop(
new_sandbox_policy.clone(),
config.include_plan_tool,
config.include_apply_patch_tool,
config.use_experimental_streamable_shell_tool,
config.include_subagent_tool,
);
let new_turn_context = TurnContext {
@@ -1165,7 +1216,7 @@ async fn submission_loop(
sandbox_policy.clone(),
config.include_plan_tool,
config.include_apply_patch_tool,
config.use_experimental_streamable_shell_tool,
config.include_subagent_tool,
),
user_instructions: turn_context.user_instructions.clone(),
base_instructions: turn_context.base_instructions.clone(),
@@ -1552,6 +1603,27 @@ async fn run_turn(
&turn_context.tools_config,
Some(sess.mcp_connection_manager.list_all_tools()),
);
tracing::trace!("Tools: {tools:?}");
// Log tool names for visibility in the TUI/debug logs.
#[allow(clippy::match_same_arms)]
let tool_names: Vec<String> = tools
.iter()
.map(|t| match t {
crate::openai_tools::OpenAiTool::Function(f) => f.name.clone(),
crate::openai_tools::OpenAiTool::LocalShell {} => "local_shell".to_string(),
crate::openai_tools::OpenAiTool::Freeform(f) => f.name.clone(),
})
.collect();
let _ = sess
.tx_event
.send(Event {
id: sub_id.clone(),
msg: EventMsg::BackgroundEvent(BackgroundEventEvent {
message: format!("tools available: {:?}", tool_names),
}),
})
.await;
let prompt = Prompt {
input,
@@ -2071,10 +2143,16 @@ async fn handle_function_call(
.await
}
"update_plan" => handle_update_plan(sess, arguments, sub_id, call_id).await,
EXEC_COMMAND_TOOL_NAME => {
// TODO(mbolin): Sandbox check.
let exec_params = match serde_json::from_str::<ExecCommandParams>(&arguments) {
Ok(params) => params,
"subagent_run" => {
#[derive(serde::Deserialize)]
struct Args {
name: String,
input: String,
#[serde(default)]
context: Option<String>,
}
let args = match serde_json::from_str::<Args>(&arguments) {
Ok(a) => a,
Err(e) => {
return ResponseInputItem::FunctionCallOutput {
call_id,
@@ -2085,36 +2163,62 @@ async fn handle_function_call(
};
}
};
let result = SESSION_MANAGER
.handle_exec_command_request(exec_params)
.await;
let function_call_output = crate::exec_command::result_into_payload(result);
ResponseInputItem::FunctionCallOutput {
call_id,
output: function_call_output,
let result = crate::subagents::runner::run(
sess,
turn_context,
&sess.subagents_registry,
crate::subagents::runner::RunSubagentArgs {
name: args.name,
input: args.input,
context: args.context,
},
&sub_id,
)
.await;
match result {
Ok(message) => ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: message,
success: Some(true),
},
},
Err(e) => ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: format!("subagent failed: {e}"),
success: Some(false),
},
},
}
}
WRITE_STDIN_TOOL_NAME => {
let write_stdin_params = match serde_json::from_str::<WriteStdinParams>(&arguments) {
Ok(params) => params,
Err(e) => {
return ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: format!("failed to parse function arguments: {e}"),
success: Some(false),
},
};
"subagent_list" => {
#[derive(serde::Serialize)]
struct SubagentBrief<'a> {
name: &'a str,
description: &'a str,
}
let mut list = Vec::new();
for name in sess.subagents_registry.all_names() {
if let Some(def) = sess.subagents_registry.get(&name) {
list.push(SubagentBrief {
name: &def.name,
description: &def.description,
});
}
}
let payload = match serde_json::to_string(&list) {
Ok(s) => s,
Err(e) => format!("failed to serialize subagent list: {e}"),
};
let result = SESSION_MANAGER
.handle_write_stdin_request(write_stdin_params)
.await;
let function_call_output: FunctionCallOutputPayload =
crate::exec_command::result_into_payload(result);
ResponseInputItem::FunctionCallOutput {
call_id,
output: function_call_output,
output: FunctionCallOutputPayload {
content: payload,
success: Some(true),
},
}
}
_ => {
@@ -2227,6 +2331,8 @@ fn parse_container_exec_arguments(
}
}
// (helper run_one_turn_collect removed as unused)
pub struct ExecInvokeArgs<'a> {
pub params: ExecParams,
pub sandbox_type: SandboxType,
@@ -2604,23 +2710,103 @@ async fn handle_sandbox_error(
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
let ExecToolCallOutput {
exit_code,
stdout,
stderr,
..
aggregated_output, ..
} = exec_output;
let is_success = *exit_code == 0;
let output = if is_success { stdout } else { stderr };
// Head+tail truncation for the model: show the beginning and end with an elision.
// Clients still receive full streams; only this formatted summary is capped.
let mut formatted_output = output.text.clone();
if let Some(truncated_after_lines) = output.truncated_after_lines {
formatted_output.push_str(&format!(
"\n\n[Output truncated after {truncated_after_lines} lines: too many lines or bytes.]",
));
let s = aggregated_output.text.as_str();
let total_lines = s.lines().count();
if s.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES {
return s.to_string();
}
formatted_output
let lines: Vec<&str> = s.lines().collect();
let head_take = MODEL_FORMAT_HEAD_LINES.min(lines.len());
let tail_take = MODEL_FORMAT_TAIL_LINES.min(lines.len().saturating_sub(head_take));
let omitted = lines.len().saturating_sub(head_take + tail_take);
// Join head and tail blocks (lines() strips newlines; reinsert them)
let head_block = lines
.iter()
.take(head_take)
.cloned()
.collect::<Vec<_>>()
.join("\n");
let tail_block = if tail_take > 0 {
lines[lines.len() - tail_take..].join("\n")
} else {
String::new()
};
let marker = format!("\n[... omitted {omitted} of {total_lines} lines ...]\n\n");
// Byte budgets for head/tail around the marker
let mut head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES);
let tail_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(head_budget + marker.len());
if tail_budget == 0 && marker.len() >= MODEL_FORMAT_MAX_BYTES {
// Degenerate case: marker alone exceeds budget; return a clipped marker
return take_bytes_at_char_boundary(&marker, MODEL_FORMAT_MAX_BYTES).to_string();
}
if tail_budget == 0 {
// Make room for the marker by shrinking head
head_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(marker.len());
}
// Enforce line-count cap by trimming head/tail lines
let head_lines_text = head_block;
let tail_lines_text = tail_block;
// Build final string respecting byte budgets
let head_part = take_bytes_at_char_boundary(&head_lines_text, head_budget);
let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(s.len()));
result.push_str(head_part);
result.push_str(&marker);
let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len());
let tail_budget_final = remaining;
let tail_part = take_last_bytes_at_char_boundary(&tail_lines_text, tail_budget_final);
result.push_str(tail_part);
result
}
// Truncate a &str to a byte budget at a char boundary (prefix)
#[inline]
fn take_bytes_at_char_boundary(s: &str, maxb: usize) -> &str {
if s.len() <= maxb {
return s;
}
let mut last_ok = 0;
for (i, ch) in s.char_indices() {
let nb = i + ch.len_utf8();
if nb > maxb {
break;
}
last_ok = nb;
}
&s[..last_ok]
}
// Take a suffix of a &str within a byte budget at a char boundary
#[inline]
fn take_last_bytes_at_char_boundary(s: &str, maxb: usize) -> &str {
if s.len() <= maxb {
return s;
}
let mut start = s.len();
let mut used = 0usize;
for (i, ch) in s.char_indices().rev() {
let nb = ch.len_utf8();
if used + nb > maxb {
break;
}
start = i;
used += nb;
if start == 0 {
break;
}
}
&s[start..]
}
/// Exec output is a pre-serialized JSON payload
@@ -2771,6 +2957,7 @@ mod tests {
use mcp_types::TextContent;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::time::Duration as StdDuration;
fn text_block(s: &str) -> ContentBlock {
ContentBlock::TextContent(TextContent {
@@ -2805,6 +2992,82 @@ mod tests {
assert_eq!(expected, got);
}
#[test]
fn model_truncation_head_tail_by_lines() {
// Build 400 short lines so line-count limit, not byte budget, triggers truncation
let lines: Vec<String> = (1..=400).map(|i| format!("line{i}")).collect();
let full = lines.join("\n");
let exec = ExecToolCallOutput {
exit_code: 0,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(String::new()),
aggregated_output: StreamOutput::new(full.clone()),
duration: StdDuration::from_secs(1),
};
let out = format_exec_output_str(&exec);
// Expect elision marker with correct counts
let omitted = 400 - MODEL_FORMAT_MAX_LINES; // 144
let marker = format!("\n[... omitted {omitted} of 400 lines ...]\n\n");
assert!(out.contains(&marker), "missing marker: {out}");
// Validate head and tail
let parts: Vec<&str> = out.split(&marker).collect();
assert_eq!(parts.len(), 2, "expected one marker split");
let head = parts[0];
let tail = parts[1];
let expected_head: String = (1..=MODEL_FORMAT_HEAD_LINES)
.map(|i| format!("line{i}"))
.collect::<Vec<_>>()
.join("\n");
assert!(head.starts_with(&expected_head), "head mismatch");
let expected_tail: String = ((400 - MODEL_FORMAT_TAIL_LINES + 1)..=400)
.map(|i| format!("line{i}"))
.collect::<Vec<_>>()
.join("\n");
assert!(tail.ends_with(&expected_tail), "tail mismatch");
}
#[test]
fn model_truncation_respects_byte_budget() {
// Construct a large output (about 100kB) so byte budget dominates
let big_line = "x".repeat(100);
let full = std::iter::repeat_n(big_line.clone(), 1000)
.collect::<Vec<_>>()
.join("\n");
let exec = ExecToolCallOutput {
exit_code: 0,
stdout: StreamOutput::new(String::new()),
stderr: StreamOutput::new(String::new()),
aggregated_output: StreamOutput::new(full.clone()),
duration: StdDuration::from_secs(1),
};
let out = format_exec_output_str(&exec);
assert!(out.len() <= MODEL_FORMAT_MAX_BYTES, "exceeds byte budget");
assert!(out.contains("omitted"), "should contain elision marker");
// Ensure head and tail are drawn from the original
assert!(full.starts_with(out.chars().take(8).collect::<String>().as_str()));
assert!(
full.ends_with(
out.chars()
.rev()
.take(8)
.collect::<String>()
.chars()
.rev()
.collect::<String>()
.as_str()
)
);
}
#[test]
fn falls_back_to_content_when_structured_is_null() {
let ctr = CallToolResult {

View File

@@ -169,13 +169,14 @@ pub struct Config {
/// model family's default preference.
pub include_apply_patch_tool: bool,
/// Include the `subagent.run` tool allowing the model to invoke configured subagents.
pub include_subagent_tool: bool,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header: String,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: AuthMode,
pub use_experimental_streamable_shell_tool: bool,
}
impl Config {
@@ -471,8 +472,6 @@ pub struct ConfigToml {
/// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS.
pub experimental_instructions_file: Option<PathBuf>,
pub experimental_use_exec_command_tool: Option<bool>,
/// The value for the `originator` header included with Responses API requests.
pub responses_originator_header_internal_override: Option<String>,
@@ -480,6 +479,9 @@ pub struct ConfigToml {
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: Option<AuthMode>,
/// Include the `subagent.run` tool allowing the model to invoke configured subagents.
pub include_subagent_tool: Option<bool>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
@@ -574,6 +576,7 @@ pub struct ConfigOverrides {
pub base_instructions: Option<String>,
pub include_plan_tool: Option<bool>,
pub include_apply_patch_tool: Option<bool>,
pub include_subagent_tool: Option<bool>,
pub disable_response_storage: Option<bool>,
pub show_raw_agent_reasoning: Option<bool>,
}
@@ -600,6 +603,7 @@ impl Config {
base_instructions,
include_plan_tool,
include_apply_patch_tool,
include_subagent_tool,
disable_response_storage,
show_raw_agent_reasoning,
} = overrides;
@@ -760,11 +764,13 @@ impl Config {
experimental_resume,
include_plan_tool: include_plan_tool.unwrap_or(false),
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
include_subagent_tool: config_profile
.include_subagent_tool
.or(cfg.include_subagent_tool)
.or(include_subagent_tool)
.unwrap_or(false),
responses_originator_header,
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
use_experimental_streamable_shell_tool: cfg
.experimental_use_exec_command_tool
.unwrap_or(false),
};
Ok(config)
}
@@ -1129,9 +1135,9 @@ disable_response_storage = true
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
include_subagent_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
},
o3_profile_config
);
@@ -1184,9 +1190,9 @@ disable_response_storage = true
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
include_subagent_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
};
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -1254,9 +1260,9 @@ disable_response_storage = true
base_instructions: None,
include_plan_tool: false,
include_apply_patch_tool: false,
include_subagent_tool: false,
responses_originator_header: "codex_cli_rs".to_string(),
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
};
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

View File

@@ -21,4 +21,6 @@ pub struct ConfigProfile {
pub model_verbosity: Option<Verbosity>,
pub chatgpt_base_url: Option<String>,
pub experimental_instructions_file: Option<PathBuf>,
/// Include the `subagent.run` tool allowing the model to invoke configured subagents.
pub include_subagent_tool: Option<bool>,
}

View File

@@ -28,18 +28,17 @@ use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
use serde_bytes::ByteBuf;
// Maximum we send for each stream, which is either:
// - 10KiB OR
// - 256 lines
const MAX_STREAM_OUTPUT: usize = 10 * 1024;
const MAX_STREAM_OUTPUT_LINES: usize = 256;
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
// Hardcode these since it does not seem worth including the libc crate just
// for these.
const SIGKILL_CODE: i32 = 9;
const TIMEOUT_CODE: i32 = 64;
const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal
// I/O buffer sizing
const READ_CHUNK_SIZE: usize = 8192; // bytes per read
const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
#[derive(Debug, Clone)]
pub struct ExecParams {
@@ -153,6 +152,7 @@ pub async fn process_exec_tool_call(
exit_code,
stdout,
stderr,
aggregated_output: raw_output.aggregated_output.from_utf8_lossy(),
duration,
})
}
@@ -189,10 +189,11 @@ pub struct StreamOutput<T> {
pub truncated_after_lines: Option<u32>,
}
#[derive(Debug)]
pub struct RawExecToolCallOutput {
struct RawExecToolCallOutput {
pub exit_status: ExitStatus,
pub stdout: StreamOutput<Vec<u8>>,
pub stderr: StreamOutput<Vec<u8>>,
pub aggregated_output: StreamOutput<Vec<u8>>,
}
impl StreamOutput<String> {
@@ -213,11 +214,17 @@ impl StreamOutput<Vec<u8>> {
}
}
#[inline]
fn append_all(dst: &mut Vec<u8>, src: &[u8]) {
dst.extend_from_slice(src);
}
#[derive(Debug)]
pub struct ExecToolCallOutput {
pub exit_code: i32,
pub stdout: StreamOutput<String>,
pub stderr: StreamOutput<String>,
pub aggregated_output: StreamOutput<String>,
pub duration: Duration,
}
@@ -253,7 +260,7 @@ async fn exec(
/// Consumes the output of a child process, truncating it so it is suitable for
/// use as the output of a `shell` tool call. Also enforces specified timeout.
pub(crate) async fn consume_truncated_output(
async fn consume_truncated_output(
mut child: Child,
timeout: Duration,
stdout_stream: Option<StdoutStream>,
@@ -273,19 +280,19 @@ pub(crate) async fn consume_truncated_output(
))
})?;
let (agg_tx, agg_rx) = async_channel::unbounded::<Vec<u8>>();
let stdout_handle = tokio::spawn(read_capped(
BufReader::new(stdout_reader),
MAX_STREAM_OUTPUT,
MAX_STREAM_OUTPUT_LINES,
stdout_stream.clone(),
false,
Some(agg_tx.clone()),
));
let stderr_handle = tokio::spawn(read_capped(
BufReader::new(stderr_reader),
MAX_STREAM_OUTPUT,
MAX_STREAM_OUTPUT_LINES,
stdout_stream.clone(),
true,
Some(agg_tx.clone()),
));
let exit_status = tokio::select! {
@@ -297,38 +304,48 @@ pub(crate) async fn consume_truncated_output(
// timeout
child.start_kill()?;
// Debatable whether `child.wait().await` should be called here.
synthetic_exit_status(128 + TIMEOUT_CODE)
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE)
}
}
}
_ = tokio::signal::ctrl_c() => {
child.start_kill()?;
synthetic_exit_status(128 + SIGKILL_CODE)
synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE)
}
};
let stdout = stdout_handle.await??;
let stderr = stderr_handle.await??;
drop(agg_tx);
let mut combined_buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
while let Ok(chunk) = agg_rx.recv().await {
append_all(&mut combined_buf, &chunk);
}
let aggregated_output = StreamOutput {
text: combined_buf,
truncated_after_lines: None,
};
Ok(RawExecToolCallOutput {
exit_status,
stdout,
stderr,
aggregated_output,
})
}
async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
mut reader: R,
max_output: usize,
max_lines: usize,
stream: Option<StdoutStream>,
is_stderr: bool,
aggregate_tx: Option<Sender<Vec<u8>>>,
) -> io::Result<StreamOutput<Vec<u8>>> {
let mut buf = Vec::with_capacity(max_output.min(8 * 1024));
let mut tmp = [0u8; 8192];
let mut buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY);
let mut tmp = [0u8; READ_CHUNK_SIZE];
let mut remaining_bytes = max_output;
let mut remaining_lines = max_lines;
// No caps: append all bytes
loop {
let n = reader.read(&mut tmp).await?;
@@ -355,33 +372,17 @@ async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
let _ = stream.tx_event.send(event).await;
}
// Copy into the buffer only while we still have byte and line budget.
if remaining_bytes > 0 && remaining_lines > 0 {
let mut copy_len = 0;
for &b in &tmp[..n] {
if remaining_bytes == 0 || remaining_lines == 0 {
break;
}
copy_len += 1;
remaining_bytes -= 1;
if b == b'\n' {
remaining_lines -= 1;
}
}
buf.extend_from_slice(&tmp[..copy_len]);
if let Some(tx) = &aggregate_tx {
let _ = tx.send(tmp[..n].to_vec()).await;
}
// Continue reading to EOF to avoid back-pressure, but discard once caps are hit.
}
let truncated = remaining_lines == 0 || remaining_bytes == 0;
append_all(&mut buf, &tmp[..n]);
// Continue reading to EOF to avoid back-pressure
}
Ok(StreamOutput {
text: buf,
truncated_after_lines: if truncated {
Some((max_lines - remaining_lines) as u32)
} else {
None
},
truncated_after_lines: None,
})
}

View File

@@ -1,57 +0,0 @@
use serde::Deserialize;
use serde::Serialize;
use crate::exec_command::session_id::SessionId;
#[derive(Debug, Clone, Deserialize)]
pub struct ExecCommandParams {
pub(crate) cmd: String,
#[serde(default = "default_yield_time")]
pub(crate) yield_time_ms: u64,
#[serde(default = "max_output_tokens")]
pub(crate) max_output_tokens: u64,
#[serde(default = "default_shell")]
pub(crate) shell: String,
#[serde(default = "default_login")]
pub(crate) login: bool,
}
fn default_yield_time() -> u64 {
10_000
}
fn max_output_tokens() -> u64 {
10_000
}
fn default_login() -> bool {
true
}
fn default_shell() -> String {
"/bin/bash".to_string()
}
#[derive(Debug, Deserialize, Serialize)]
pub struct WriteStdinParams {
pub(crate) session_id: SessionId,
pub(crate) chars: String,
#[serde(default = "write_stdin_default_yield_time_ms")]
pub(crate) yield_time_ms: u64,
#[serde(default = "write_stdin_default_max_output_tokens")]
pub(crate) max_output_tokens: u64,
}
fn write_stdin_default_yield_time_ms() -> u64 {
250
}
fn write_stdin_default_max_output_tokens() -> u64 {
10_000
}

View File

@@ -1,83 +0,0 @@
use std::sync::Mutex as StdMutex;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
#[derive(Debug)]
pub(crate) struct ExecCommandSession {
/// Queue for writing bytes to the process stdin (PTY master write side).
writer_tx: mpsc::Sender<Vec<u8>>,
/// Broadcast stream of output chunks read from the PTY. New subscribers
/// receive only chunks emitted after they subscribe.
output_tx: broadcast::Sender<Vec<u8>>,
/// Child killer handle for termination on drop (can signal independently
/// of a thread blocked in `.wait()`).
killer: StdMutex<Option<Box<dyn portable_pty::ChildKiller + Send + Sync>>>,
/// JoinHandle for the blocking PTY reader task.
reader_handle: StdMutex<Option<JoinHandle<()>>>,
/// JoinHandle for the stdin writer task.
writer_handle: StdMutex<Option<JoinHandle<()>>>,
/// JoinHandle for the child wait task.
wait_handle: StdMutex<Option<JoinHandle<()>>>,
}
impl ExecCommandSession {
pub(crate) fn new(
writer_tx: mpsc::Sender<Vec<u8>>,
output_tx: broadcast::Sender<Vec<u8>>,
killer: Box<dyn portable_pty::ChildKiller + Send + Sync>,
reader_handle: JoinHandle<()>,
writer_handle: JoinHandle<()>,
wait_handle: JoinHandle<()>,
) -> Self {
Self {
writer_tx,
output_tx,
killer: StdMutex::new(Some(killer)),
reader_handle: StdMutex::new(Some(reader_handle)),
writer_handle: StdMutex::new(Some(writer_handle)),
wait_handle: StdMutex::new(Some(wait_handle)),
}
}
pub(crate) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> {
self.writer_tx.clone()
}
pub(crate) fn output_receiver(&self) -> broadcast::Receiver<Vec<u8>> {
self.output_tx.subscribe()
}
}
impl Drop for ExecCommandSession {
fn drop(&mut self) {
// Best-effort: terminate child first so blocking tasks can complete.
if let Ok(mut killer_opt) = self.killer.lock()
&& let Some(mut killer) = killer_opt.take()
{
let _ = killer.kill();
}
// Abort background tasks; they may already have exited after kill.
if let Ok(mut h) = self.reader_handle.lock()
&& let Some(handle) = h.take()
{
handle.abort();
}
if let Ok(mut h) = self.writer_handle.lock()
&& let Some(handle) = h.take()
{
handle.abort();
}
if let Ok(mut h) = self.wait_handle.lock()
&& let Some(handle) = h.take()
{
handle.abort();
}
}
}

View File

@@ -1,14 +0,0 @@
mod exec_command_params;
mod exec_command_session;
mod responses_api;
mod session_id;
mod session_manager;
pub use exec_command_params::ExecCommandParams;
pub use exec_command_params::WriteStdinParams;
pub use responses_api::EXEC_COMMAND_TOOL_NAME;
pub use responses_api::WRITE_STDIN_TOOL_NAME;
pub use responses_api::create_exec_command_tool_for_responses_api;
pub use responses_api::create_write_stdin_tool_for_responses_api;
pub use session_manager::SESSION_MANAGER;
pub use session_manager::result_into_payload;

View File

@@ -1,98 +0,0 @@
use std::collections::BTreeMap;
use crate::openai_tools::JsonSchema;
use crate::openai_tools::ResponsesApiTool;
pub const EXEC_COMMAND_TOOL_NAME: &str = "exec_command";
pub const WRITE_STDIN_TOOL_NAME: &str = "write_stdin";
pub fn create_exec_command_tool_for_responses_api() -> ResponsesApiTool {
let mut properties = BTreeMap::<String, JsonSchema>::new();
properties.insert(
"cmd".to_string(),
JsonSchema::String {
description: Some("The shell command to execute.".to_string()),
},
);
properties.insert(
"yield_time_ms".to_string(),
JsonSchema::Number {
description: Some("The maximum time in milliseconds to wait for output.".to_string()),
},
);
properties.insert(
"max_output_tokens".to_string(),
JsonSchema::Number {
description: Some("The maximum number of tokens to output.".to_string()),
},
);
properties.insert(
"shell".to_string(),
JsonSchema::String {
description: Some("The shell to use. Defaults to \"/bin/bash\".".to_string()),
},
);
properties.insert(
"login".to_string(),
JsonSchema::Boolean {
description: Some(
"Whether to run the command as a login shell. Defaults to true.".to_string(),
),
},
);
ResponsesApiTool {
name: EXEC_COMMAND_TOOL_NAME.to_owned(),
description: r#"Execute shell commands on the local machine with streaming output."#
.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["cmd".to_string()]),
additional_properties: Some(false),
},
}
}
pub fn create_write_stdin_tool_for_responses_api() -> ResponsesApiTool {
let mut properties = BTreeMap::<String, JsonSchema>::new();
properties.insert(
"session_id".to_string(),
JsonSchema::Number {
description: Some("The ID of the exec_command session.".to_string()),
},
);
properties.insert(
"chars".to_string(),
JsonSchema::String {
description: Some("The characters to write to stdin.".to_string()),
},
);
properties.insert(
"yield_time_ms".to_string(),
JsonSchema::Number {
description: Some(
"The maximum time in milliseconds to wait for output after writing.".to_string(),
),
},
);
properties.insert(
"max_output_tokens".to_string(),
JsonSchema::Number {
description: Some("The maximum number of tokens to output.".to_string()),
},
);
ResponsesApiTool {
name: WRITE_STDIN_TOOL_NAME.to_owned(),
description: r#"Write characters to an exec session's stdin. Returns all stdout+stderr received within yield_time_ms.
Can write control characters (\u0003 for Ctrl-C), or an empty string to just poll stdout+stderr."#
.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["session_id".to_string(), "chars".to_string()]),
additional_properties: Some(false),
},
}
}

View File

@@ -1,5 +0,0 @@
use serde::Deserialize;
use serde::Serialize;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub(crate) struct SessionId(pub u32);

View File

@@ -1,677 +0,0 @@
use std::collections::HashMap;
use std::io::ErrorKind;
use std::io::Read;
use std::sync::Arc;
use std::sync::LazyLock;
use std::sync::Mutex as StdMutex;
use std::sync::atomic::AtomicU32;
use portable_pty::CommandBuilder;
use portable_pty::PtySize;
use portable_pty::native_pty_system;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::time::Duration;
use tokio::time::Instant;
use tokio::time::timeout;
use crate::exec_command::exec_command_params::ExecCommandParams;
use crate::exec_command::exec_command_params::WriteStdinParams;
use crate::exec_command::exec_command_session::ExecCommandSession;
use crate::exec_command::session_id::SessionId;
use codex_protocol::models::FunctionCallOutputPayload;
pub static SESSION_MANAGER: LazyLock<SessionManager> = LazyLock::new(SessionManager::default);
#[derive(Debug, Default)]
pub struct SessionManager {
next_session_id: AtomicU32,
sessions: Mutex<HashMap<SessionId, ExecCommandSession>>,
}
#[derive(Debug)]
pub struct ExecCommandOutput {
wall_time: Duration,
exit_status: ExitStatus,
original_token_count: Option<u64>,
output: String,
}
impl ExecCommandOutput {
fn to_text_output(&self) -> String {
let wall_time_secs = self.wall_time.as_secs_f32();
let termination_status = match self.exit_status {
ExitStatus::Exited(code) => format!("Process exited with code {code}"),
ExitStatus::Ongoing(session_id) => {
format!("Process running with session ID {}", session_id.0)
}
};
let truncation_status = match self.original_token_count {
Some(tokens) => {
format!("\nWarning: truncated output (original token count: {tokens})")
}
None => "".to_string(),
};
format!(
r#"Wall time: {wall_time_secs:.3} seconds
{termination_status}{truncation_status}
Output:
{output}"#,
output = self.output
)
}
}
#[derive(Debug)]
pub enum ExitStatus {
Exited(i32),
Ongoing(SessionId),
}
pub fn result_into_payload(result: Result<ExecCommandOutput, String>) -> FunctionCallOutputPayload {
match result {
Ok(output) => FunctionCallOutputPayload {
content: output.to_text_output(),
success: Some(true),
},
Err(err) => FunctionCallOutputPayload {
content: err,
success: Some(false),
},
}
}
impl SessionManager {
/// Processes the request and is required to send a response via `outgoing`.
pub async fn handle_exec_command_request(
&self,
params: ExecCommandParams,
) -> Result<ExecCommandOutput, String> {
// Allocate a session id.
let session_id = SessionId(
self.next_session_id
.fetch_add(1, std::sync::atomic::Ordering::SeqCst),
);
let (session, mut exit_rx) =
create_exec_command_session(params.clone())
.await
.map_err(|err| {
format!(
"failed to create exec command session for session id {}: {err}",
session_id.0
)
})?;
// Insert into session map.
let mut output_rx = session.output_receiver();
self.sessions.lock().await.insert(session_id, session);
// Collect output until either timeout expires or process exits.
// Do not cap during collection; truncate at the end if needed.
// Use a modest initial capacity to avoid large preallocation.
let cap_bytes_u64 = params.max_output_tokens.saturating_mul(4);
let cap_bytes: usize = cap_bytes_u64.min(usize::MAX as u64) as usize;
let mut collected: Vec<u8> = Vec::with_capacity(4096);
let start_time = Instant::now();
let deadline = start_time + Duration::from_millis(params.yield_time_ms);
let mut exit_code: Option<i32> = None;
loop {
if Instant::now() >= deadline {
break;
}
let remaining = deadline.saturating_duration_since(Instant::now());
tokio::select! {
biased;
exit = &mut exit_rx => {
exit_code = exit.ok();
// Small grace period to pull remaining buffered output
let grace_deadline = Instant::now() + Duration::from_millis(25);
while Instant::now() < grace_deadline {
match timeout(Duration::from_millis(1), output_rx.recv()).await {
Ok(Ok(chunk)) => {
collected.extend_from_slice(&chunk);
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
// Skip missed messages; keep trying within grace period.
continue;
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => break,
Err(_) => break,
}
}
break;
}
chunk = timeout(remaining, output_rx.recv()) => {
match chunk {
Ok(Ok(chunk)) => {
collected.extend_from_slice(&chunk);
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
// Skip missed messages; continue collecting fresh output.
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => { break; }
Err(_) => { break; }
}
}
}
}
let output = String::from_utf8_lossy(&collected).to_string();
let exit_status = if let Some(code) = exit_code {
ExitStatus::Exited(code)
} else {
ExitStatus::Ongoing(session_id)
};
// If output exceeds cap, truncate the middle and record original token estimate.
let (output, original_token_count) = truncate_middle(&output, cap_bytes);
Ok(ExecCommandOutput {
wall_time: Instant::now().duration_since(start_time),
exit_status,
original_token_count,
output,
})
}
/// Write characters to a session's stdin and collect combined output for up to `yield_time_ms`.
pub async fn handle_write_stdin_request(
&self,
params: WriteStdinParams,
) -> Result<ExecCommandOutput, String> {
let WriteStdinParams {
session_id,
chars,
yield_time_ms,
max_output_tokens,
} = params;
// Grab handles without holding the sessions lock across await points.
let (writer_tx, mut output_rx) = {
let sessions = self.sessions.lock().await;
match sessions.get(&session_id) {
Some(session) => (session.writer_sender(), session.output_receiver()),
None => {
return Err(format!("unknown session id {}", session_id.0));
}
}
};
// Write stdin if provided.
if !chars.is_empty() && writer_tx.send(chars.into_bytes()).await.is_err() {
return Err("failed to write to stdin".to_string());
}
// Collect output up to yield_time_ms, truncating to max_output_tokens bytes.
let mut collected: Vec<u8> = Vec::with_capacity(4096);
let start_time = Instant::now();
let deadline = start_time + Duration::from_millis(yield_time_ms);
loop {
let now = Instant::now();
if now >= deadline {
break;
}
let remaining = deadline - now;
match timeout(remaining, output_rx.recv()).await {
Ok(Ok(chunk)) => {
// Collect all output within the time budget; truncate at the end.
collected.extend_from_slice(&chunk);
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => {
// Skip missed messages; continue collecting fresh output.
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => break,
Err(_) => break, // timeout
}
}
// Return structured output, truncating middle if over cap.
let output = String::from_utf8_lossy(&collected).to_string();
let cap_bytes_u64 = max_output_tokens.saturating_mul(4);
let cap_bytes: usize = cap_bytes_u64.min(usize::MAX as u64) as usize;
let (output, original_token_count) = truncate_middle(&output, cap_bytes);
Ok(ExecCommandOutput {
wall_time: Instant::now().duration_since(start_time),
exit_status: ExitStatus::Ongoing(session_id),
original_token_count,
output,
})
}
}
/// Spawn PTY and child process per spawn_exec_command_session logic.
async fn create_exec_command_session(
params: ExecCommandParams,
) -> anyhow::Result<(ExecCommandSession, oneshot::Receiver<i32>)> {
let ExecCommandParams {
cmd,
yield_time_ms: _,
max_output_tokens: _,
shell,
login,
} = params;
// Use the native pty implementation for the system
let pty_system = native_pty_system();
// Create a new pty
let pair = pty_system.openpty(PtySize {
rows: 24,
cols: 80,
pixel_width: 0,
pixel_height: 0,
})?;
// Spawn a shell into the pty
let mut command_builder = CommandBuilder::new(shell);
let shell_mode_opt = if login { "-lc" } else { "-c" };
command_builder.arg(shell_mode_opt);
command_builder.arg(cmd);
let mut child = pair.slave.spawn_command(command_builder)?;
// Obtain a killer that can signal the process independently of `.wait()`.
let killer = child.clone_killer();
// Channel to forward write requests to the PTY writer.
let (writer_tx, mut writer_rx) = mpsc::channel::<Vec<u8>>(128);
// Broadcast for streaming PTY output to readers: subscribers receive from subscription time.
let (output_tx, _) = tokio::sync::broadcast::channel::<Vec<u8>>(256);
// Reader task: drain PTY and forward chunks to output channel.
let mut reader = pair.master.try_clone_reader()?;
let output_tx_clone = output_tx.clone();
let reader_handle = tokio::task::spawn_blocking(move || {
let mut buf = [0u8; 8192];
loop {
match reader.read(&mut buf) {
Ok(0) => break, // EOF
Ok(n) => {
// Forward to broadcast; best-effort if there are subscribers.
let _ = output_tx_clone.send(buf[..n].to_vec());
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => {
// Retry on EINTR
continue;
}
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
// We're in a blocking thread; back off briefly and retry.
std::thread::sleep(Duration::from_millis(5));
continue;
}
Err(_) => break,
}
}
});
// Writer task: apply stdin writes to the PTY writer.
let writer = pair.master.take_writer()?;
let writer = Arc::new(StdMutex::new(writer));
let writer_handle = tokio::spawn({
let writer = writer.clone();
async move {
while let Some(bytes) = writer_rx.recv().await {
let writer = writer.clone();
// Perform blocking write on a blocking thread.
let _ = tokio::task::spawn_blocking(move || {
if let Ok(mut guard) = writer.lock() {
use std::io::Write;
let _ = guard.write_all(&bytes);
let _ = guard.flush();
}
})
.await;
}
}
});
// Keep the child alive until it exits, then signal exit code.
let (exit_tx, exit_rx) = oneshot::channel::<i32>();
let wait_handle = tokio::task::spawn_blocking(move || {
let code = match child.wait() {
Ok(status) => status.exit_code() as i32,
Err(_) => -1,
};
let _ = exit_tx.send(code);
});
// Create and store the session with channels.
let session = ExecCommandSession::new(
writer_tx,
output_tx,
killer,
reader_handle,
writer_handle,
wait_handle,
);
Ok((session, exit_rx))
}
/// Truncate the middle of a UTF-8 string to at most `max_bytes` bytes,
/// preserving the beginning and the end. Returns the possibly truncated
/// string and `Some(original_token_count)` (estimated at 4 bytes/token)
/// if truncation occurred; otherwise returns the original string and `None`.
fn truncate_middle(s: &str, max_bytes: usize) -> (String, Option<u64>) {
// No truncation needed
if s.len() <= max_bytes {
return (s.to_string(), None);
}
let est_tokens = (s.len() as u64).div_ceil(4);
if max_bytes == 0 {
// Cannot keep any content; still return a full marker (never truncated).
return (
format!("{} tokens truncated…", est_tokens),
Some(est_tokens),
);
}
// Helper to truncate a string to a given byte length on a char boundary.
fn truncate_on_boundary(input: &str, max_len: usize) -> &str {
if input.len() <= max_len {
return input;
}
let mut end = max_len;
while end > 0 && !input.is_char_boundary(end) {
end -= 1;
}
&input[..end]
}
// Given a left/right budget, prefer newline boundaries; otherwise fall back
// to UTF-8 char boundaries.
fn pick_prefix_end(s: &str, left_budget: usize) -> usize {
if let Some(head) = s.get(..left_budget)
&& let Some(i) = head.rfind('\n')
{
return i + 1; // keep the newline so suffix starts on a fresh line
}
truncate_on_boundary(s, left_budget).len()
}
fn pick_suffix_start(s: &str, right_budget: usize) -> usize {
let start_tail = s.len().saturating_sub(right_budget);
if let Some(tail) = s.get(start_tail..)
&& let Some(i) = tail.find('\n')
{
return start_tail + i + 1; // start after newline
}
// Fall back to a char boundary at or after start_tail.
let mut idx = start_tail.min(s.len());
while idx < s.len() && !s.is_char_boundary(idx) {
idx += 1;
}
idx
}
// Refine marker length and budgets until stable. Marker is never truncated.
let mut guess_tokens = est_tokens; // worst-case: everything truncated
for _ in 0..4 {
let marker = format!("{} tokens truncated…", guess_tokens);
let marker_len = marker.len();
let keep_budget = max_bytes.saturating_sub(marker_len);
if keep_budget == 0 {
// No room for any content within the cap; return a full, untruncated marker
// that reflects the entire truncated content.
return (
format!("{} tokens truncated…", est_tokens),
Some(est_tokens),
);
}
let left_budget = keep_budget / 2;
let right_budget = keep_budget - left_budget;
let prefix_end = pick_prefix_end(s, left_budget);
let mut suffix_start = pick_suffix_start(s, right_budget);
if suffix_start < prefix_end {
suffix_start = prefix_end;
}
let kept_content_bytes = prefix_end + (s.len() - suffix_start);
let truncated_content_bytes = s.len().saturating_sub(kept_content_bytes);
let new_tokens = (truncated_content_bytes as u64).div_ceil(4);
if new_tokens == guess_tokens {
let mut out = String::with_capacity(marker_len + kept_content_bytes + 1);
out.push_str(&s[..prefix_end]);
out.push_str(&marker);
// Place marker on its own line for symmetry when we keep line boundaries.
out.push('\n');
out.push_str(&s[suffix_start..]);
return (out, Some(est_tokens));
}
guess_tokens = new_tokens;
}
// Fallback: use last guess to build output.
let marker = format!("{} tokens truncated…", guess_tokens);
let marker_len = marker.len();
let keep_budget = max_bytes.saturating_sub(marker_len);
if keep_budget == 0 {
return (
format!("{} tokens truncated…", est_tokens),
Some(est_tokens),
);
}
let left_budget = keep_budget / 2;
let right_budget = keep_budget - left_budget;
let prefix_end = pick_prefix_end(s, left_budget);
let suffix_start = pick_suffix_start(s, right_budget);
let mut out = String::with_capacity(marker_len + prefix_end + (s.len() - suffix_start) + 1);
out.push_str(&s[..prefix_end]);
out.push_str(&marker);
out.push('\n');
out.push_str(&s[suffix_start..]);
(out, Some(est_tokens))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::exec_command::session_id::SessionId;
/// Test that verifies that [`SessionManager::handle_exec_command_request()`]
/// and [`SessionManager::handle_write_stdin_request()`] work as expected
/// in the presence of a process that never terminates (but produces
/// output continuously).
#[cfg(unix)]
#[allow(clippy::print_stderr)]
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn session_manager_streams_and_truncates_from_now() {
use crate::exec_command::exec_command_params::ExecCommandParams;
use crate::exec_command::exec_command_params::WriteStdinParams;
use tokio::time::sleep;
let session_manager = SessionManager::default();
// Long-running loop that prints an increasing counter every ~100ms.
// Use Python for a portable, reliable sleep across shells/PTYs.
let cmd = r#"python3 - <<'PY'
import sys, time
count = 0
while True:
print(count)
sys.stdout.flush()
count += 100
time.sleep(0.1)
PY"#
.to_string();
// Start the session and collect ~3s of output.
let params = ExecCommandParams {
cmd,
yield_time_ms: 3_000,
max_output_tokens: 1_000, // large enough to avoid truncation here
shell: "/bin/bash".to_string(),
login: false,
};
let initial_output = match session_manager
.handle_exec_command_request(params.clone())
.await
{
Ok(v) => v,
Err(e) => {
// PTY may be restricted in some sandboxes; skip in that case.
if e.contains("openpty") || e.contains("Operation not permitted") {
eprintln!("skipping test due to restricted PTY: {e}");
return;
}
panic!("exec request failed unexpectedly: {e}");
}
};
eprintln!("initial output: {initial_output:?}");
// Should be ongoing (we launched a never-ending loop).
let session_id = match initial_output.exit_status {
ExitStatus::Ongoing(id) => id,
_ => panic!("expected ongoing session"),
};
// Parse the numeric lines and get the max observed value in the first window.
let first_nums = extract_monotonic_numbers(&initial_output.output);
assert!(
!first_nums.is_empty(),
"expected some output from first window"
);
let first_max = *first_nums.iter().max().unwrap();
// Wait ~4s so counters progress while we're not reading.
sleep(Duration::from_millis(4_000)).await;
// Now read ~3s of output "from now" only.
// Use a small token cap so truncation occurs and we test middle truncation.
let write_params = WriteStdinParams {
session_id,
chars: String::new(),
yield_time_ms: 3_000,
max_output_tokens: 16, // 16 tokens ~= 64 bytes -> likely truncation
};
let second = session_manager
.handle_write_stdin_request(write_params)
.await
.expect("write stdin should succeed");
// Verify truncation metadata and size bound (cap is tokens*4 bytes).
assert!(second.original_token_count.is_some());
let cap_bytes = (16u64 * 4) as usize;
assert!(second.output.len() <= cap_bytes);
// New middle marker should be present.
assert!(
second.output.contains("tokens truncated") && second.output.contains('…'),
"expected truncation marker in output, got: {}",
second.output
);
// Minimal freshness check: the earliest number we see in the second window
// should be significantly larger than the last from the first window.
let second_nums = extract_monotonic_numbers(&second.output);
assert!(
!second_nums.is_empty(),
"expected some numeric output from second window"
);
let second_min = *second_nums.iter().min().unwrap();
// We slept 4 seconds (~40 ticks at 100ms/tick, each +100), so expect
// an increase of roughly 4000 or more. Allow a generous margin.
assert!(
second_min >= first_max + 2000,
"second_min={second_min} first_max={first_max}",
);
}
#[cfg(unix)]
fn extract_monotonic_numbers(s: &str) -> Vec<i64> {
s.lines()
.filter_map(|line| {
if !line.is_empty()
&& line.chars().all(|c| c.is_ascii_digit())
&& let Ok(n) = line.parse::<i64>()
{
// Our generator increments by 100; ignore spurious fragments.
if n % 100 == 0 {
return Some(n);
}
}
None
})
.collect()
}
#[test]
fn to_text_output_exited_no_truncation() {
let out = ExecCommandOutput {
wall_time: Duration::from_millis(1234),
exit_status: ExitStatus::Exited(0),
original_token_count: None,
output: "hello".to_string(),
};
let text = out.to_text_output();
let expected = r#"Wall time: 1.234 seconds
Process exited with code 0
Output:
hello"#;
assert_eq!(expected, text);
}
#[test]
fn to_text_output_ongoing_with_truncation() {
let out = ExecCommandOutput {
wall_time: Duration::from_millis(500),
exit_status: ExitStatus::Ongoing(SessionId(42)),
original_token_count: Some(1000),
output: "abc".to_string(),
};
let text = out.to_text_output();
let expected = r#"Wall time: 0.500 seconds
Process running with session ID 42
Warning: truncated output (original token count: 1000)
Output:
abc"#;
assert_eq!(expected, text);
}
#[test]
fn truncate_middle_no_newlines_fallback() {
// A long string with no newlines that exceeds the cap.
let s = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
let max_bytes = 16; // force truncation
let (out, original) = truncate_middle(s, max_bytes);
// For very small caps, we return the full, untruncated marker,
// even if it exceeds the cap.
assert_eq!(out, "…16 tokens truncated…");
// Original string length is 62 bytes => ceil(62/4) = 16 tokens.
assert_eq!(original, Some(16));
}
#[test]
fn truncate_middle_prefers_newline_boundaries() {
// Build a multi-line string of 20 numbered lines (each "NNN\n").
let mut s = String::new();
for i in 1..=20 {
s.push_str(&format!("{i:03}\n"));
}
// Total length: 20 lines * 4 bytes per line = 80 bytes.
assert_eq!(s.len(), 80);
// Choose a cap that forces truncation while leaving room for
// a few lines on each side after accounting for the marker.
let max_bytes = 64;
// Expect exact output: first 4 lines, marker, last 4 lines, and correct token estimate (80/4 = 20).
assert_eq!(
truncate_middle(&s, max_bytes),
(
r#"001
002
003
004
…12 tokens truncated…
017
018
019
020
"#
.to_string(),
Some(20)
)
);
}
}

View File

@@ -20,7 +20,6 @@ mod conversation_history;
mod environment_context;
pub mod error;
pub mod exec;
mod exec_command;
pub mod exec_env;
mod flags;
pub mod git_info;
@@ -63,3 +62,4 @@ pub use codex_protocol::protocol;
// Re-export protocol config enums to ensure call sites can use the same types
// as those in the protocol crate when constructing protocol messages.
pub use codex_protocol::config_types as protocol_config_types;
pub mod subagents;

View File

@@ -56,7 +56,6 @@ pub enum ConfigShellToolType {
DefaultShell,
ShellWithRequest { sandbox_policy: SandboxPolicy },
LocalShell,
StreamableShell,
}
#[derive(Debug, Clone)]
@@ -64,6 +63,7 @@ pub struct ToolsConfig {
pub shell_type: ConfigShellToolType,
pub plan_tool: bool,
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
pub subagent_tool: bool,
}
impl ToolsConfig {
@@ -73,16 +73,14 @@ impl ToolsConfig {
sandbox_policy: SandboxPolicy,
include_plan_tool: bool,
include_apply_patch_tool: bool,
use_streamable_shell_tool: bool,
include_subagent_tool: bool,
) -> Self {
let mut shell_type = if use_streamable_shell_tool {
ConfigShellToolType::StreamableShell
} else if model_family.uses_local_shell_tool {
let mut shell_type = if model_family.uses_local_shell_tool {
ConfigShellToolType::LocalShell
} else {
ConfigShellToolType::DefaultShell
};
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
if matches!(approval_policy, AskForApproval::OnRequest) {
shell_type = ConfigShellToolType::ShellWithRequest {
sandbox_policy: sandbox_policy.clone(),
}
@@ -104,6 +102,7 @@ impl ToolsConfig {
shell_type,
plan_tool: include_plan_tool,
apply_patch_tool_type,
subagent_tool: include_subagent_tool,
}
}
}
@@ -496,14 +495,6 @@ pub(crate) fn get_openai_tools(
ConfigShellToolType::LocalShell => {
tools.push(OpenAiTool::LocalShell {});
}
ConfigShellToolType::StreamableShell => {
tools.push(OpenAiTool::Function(
crate::exec_command::create_exec_command_tool_for_responses_api(),
));
tools.push(OpenAiTool::Function(
crate::exec_command::create_write_stdin_tool_for_responses_api(),
));
}
}
if config.plan_tool {
@@ -521,6 +512,12 @@ pub(crate) fn get_openai_tools(
}
}
if config.subagent_tool {
tracing::trace!("Adding subagent tool");
tools.push(crate::subagents::SUBAGENT_TOOL.clone());
tools.push(crate::subagents::SUBAGENT_LIST_TOOL.clone());
}
if let Some(mcp_tools) = mcp_tools {
for (name, tool) in mcp_tools {
match mcp_tool_to_openai_tool(name.clone(), tool.clone()) {
@@ -532,6 +529,7 @@ pub(crate) fn get_openai_tools(
}
}
tracing::trace!("Tools: {tools:?}");
tools
}
@@ -576,7 +574,7 @@ mod tests {
SandboxPolicy::ReadOnly,
true,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -592,7 +590,7 @@ mod tests {
SandboxPolicy::ReadOnly,
true,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(&config, Some(HashMap::new()));
@@ -608,7 +606,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(
&config,
@@ -703,7 +701,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(
@@ -760,7 +758,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(
@@ -812,7 +810,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(
@@ -867,7 +865,7 @@ mod tests {
SandboxPolicy::ReadOnly,
false,
false,
/*use_experimental_streamable_shell_tool*/ false,
false,
);
let tools = get_openai_tools(

View File

@@ -0,0 +1,32 @@
use serde::Deserialize;
use std::fs;
use std::path::Path;
#[derive(Debug, Clone, Deserialize)]
pub struct SubagentDefinition {
pub name: String,
pub description: String,
/// Base instructions for this subagent.
pub instructions: String,
/// When not set, inherits the parent agent's tool set. When set to an
/// empty list, no tools are available to the subagent.
#[serde(default)]
pub tools: Option<Vec<String>>, // None => inherit; Some(vec) => allow-list
}
impl SubagentDefinition {
pub fn from_json_str(s: &str) -> Result<Self, serde_json::Error> {
serde_json::from_str::<Self>(s)
}
pub fn from_file(path: &Path) -> std::io::Result<Self> {
let contents = fs::read_to_string(path)?;
// Surface JSON parsing error with file context
serde_json::from_str::<Self>(&contents).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("invalid subagent JSON at {}: {e}", path.display()),
)
})
}
}

View File

@@ -0,0 +1,6 @@
pub mod definition;
pub mod registry;
pub mod runner;
pub mod tool;
pub(crate) use tool::{SUBAGENT_LIST_TOOL, SUBAGENT_TOOL};

View File

@@ -0,0 +1,92 @@
use super::definition::SubagentDefinition;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
#[derive(Debug, Default, Clone)]
pub struct SubagentRegistry {
/// Directory under the project (cwd/.codex/agents).
project_dir: Option<PathBuf>,
/// Directory under CODEX_HOME (~/.codex/agents).
user_dir: Option<PathBuf>,
/// Merged map: project definitions override user ones.
map: HashMap<String, SubagentDefinition>,
}
impl SubagentRegistry {
pub fn new(project_dir: Option<PathBuf>, user_dir: Option<PathBuf>) -> Self {
Self {
project_dir,
user_dir,
map: HashMap::new(),
}
}
/// Loads JSON files from user_dir then project_dir (project wins on conflict).
pub fn load(&mut self) {
let mut map: HashMap<String, SubagentDefinition> = HashMap::new();
// Load user definitions first
if let Some(dir) = &self.user_dir {
Self::load_from_dir_into(dir, &mut map);
}
// Then load project definitions which override on conflicts
if let Some(dir) = &self.project_dir {
Self::load_from_dir_into(dir, &mut map);
}
// Ensure a simple builtin test subagent exists to validate wiring endtoend.
// Users can override this by providing their own definition named "hello".
if !map.contains_key("hello") {
map.insert(
"hello".to_string(),
SubagentDefinition {
name: "hello".to_string(),
description: "Builtin test subagent that replies with a greeting".to_string(),
// Keep instructions narrow so models reliably output the intended text.
instructions:
"Reply with exactly this text and nothing else: Hello from subagent"
.to_string(),
// Disallow tool usage for the hello subagent.
tools: Some(Vec::new()),
},
);
}
self.map = map;
}
pub fn get(&self, name: &str) -> Option<&SubagentDefinition> {
self.map.get(name)
}
pub fn all_names(&self) -> Vec<String> {
self.map.keys().cloned().collect()
}
fn load_from_dir_into(dir: &Path, out: &mut HashMap<String, SubagentDefinition>) {
let Ok(iter) = fs::read_dir(dir) else {
return;
};
for entry in iter.flatten() {
let path = entry.path();
if path.is_file()
&& path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.eq_ignore_ascii_case("json"))
.unwrap_or(false)
{
match SubagentDefinition::from_file(&path) {
Ok(def) => {
out.insert(def.name.clone(), def);
}
Err(e) => {
tracing::warn!("Failed to load subagent from {}: {}", path.display(), e);
}
}
}
}
}
}

View File

@@ -0,0 +1,142 @@
use crate::codex::Codex;
use crate::error::Result as CodexResult;
use super::definition::SubagentDefinition;
use super::registry::SubagentRegistry;
/// Arguments expected for the `subagent.run` tool.
#[derive(serde::Deserialize)]
pub struct RunSubagentArgs {
pub name: String,
pub input: String,
#[serde(default)]
pub context: Option<String>,
}
/// Run a subagent in a nested Codex session and return the final message.
pub(crate) async fn run(
sess: &crate::codex::Session,
turn_context: &crate::codex::TurnContext,
registry: &SubagentRegistry,
args: RunSubagentArgs,
_parent_sub_id: &str,
) -> CodexResult<String> {
let def: &SubagentDefinition = registry.get(&args.name).ok_or_else(|| {
crate::error::CodexErr::Stream(format!("unknown subagent: {}", args.name), None)
})?;
let mut nested_cfg = (*sess.base_config()).clone();
nested_cfg.base_instructions = Some(def.instructions.clone());
nested_cfg.user_instructions = None;
nested_cfg.approval_policy = turn_context.approval_policy;
nested_cfg.sandbox_policy = turn_context.sandbox_policy.clone();
nested_cfg.cwd = turn_context.cwd.clone();
nested_cfg.include_subagent_tool = false;
let nested = Codex::spawn(nested_cfg, sess.auth_manager(), None).await?;
let nested_codex = nested.codex;
let subagent_id = uuid::Uuid::new_v4().to_string();
forward_begin(sess, _parent_sub_id, &subagent_id, &def.name).await;
let text = match args.context {
Some(ctx) if !ctx.trim().is_empty() => format!("{ctx}\n\n{input}", input = args.input),
_ => args.input,
};
nested_codex
.submit(crate::protocol::Op::UserInput {
items: vec![crate::protocol::InputItem::Text { text }],
})
.await
.map_err(|e| {
crate::error::CodexErr::Stream(format!("failed to submit to subagent: {e}"), None)
})?;
let mut last_message: Option<String> = None;
loop {
let ev = nested_codex.next_event().await?;
match ev.msg.clone() {
crate::protocol::EventMsg::AgentMessage(m) => {
last_message = Some(m.message);
}
crate::protocol::EventMsg::TaskComplete(t) => {
let _ = nested_codex.submit(crate::protocol::Op::Shutdown).await;
forward_forwarded(sess, _parent_sub_id, &subagent_id, &def.name, ev.msg).await;
forward_end(
sess,
_parent_sub_id,
&subagent_id,
&def.name,
true,
t.last_agent_message.clone(),
)
.await;
return Ok(t
.last_agent_message
.unwrap_or_else(|| last_message.unwrap_or_default()));
}
_ => {}
}
forward_forwarded(sess, _parent_sub_id, &subagent_id, &def.name, ev.msg).await;
}
}
async fn forward_begin(
sess: &crate::codex::Session,
parent_sub_id: &str,
subagent_id: &str,
name: &str,
) {
sess
.send_event(crate::protocol::Event {
id: parent_sub_id.to_string(),
msg: crate::protocol::EventMsg::SubagentBegin(crate::protocol::SubagentBeginEvent {
subagent_id: subagent_id.to_string(),
name: name.to_string(),
}),
})
.await;
}
async fn forward_forwarded(
sess: &crate::codex::Session,
parent_sub_id: &str,
subagent_id: &str,
name: &str,
msg: crate::protocol::EventMsg,
) {
sess
.send_event(crate::protocol::Event {
id: parent_sub_id.to_string(),
msg: crate::protocol::EventMsg::SubagentForwarded(
crate::protocol::SubagentForwardedEvent {
subagent_id: subagent_id.to_string(),
name: name.to_string(),
event: Box::new(msg),
},
),
})
.await;
}
async fn forward_end(
sess: &crate::codex::Session,
parent_sub_id: &str,
subagent_id: &str,
name: &str,
success: bool,
last_agent_message: Option<String>,
) {
sess
.send_event(crate::protocol::Event {
id: parent_sub_id.to_string(),
msg: crate::protocol::EventMsg::SubagentEnd(crate::protocol::SubagentEndEvent {
subagent_id: subagent_id.to_string(),
name: name.to_string(),
success,
last_agent_message,
}),
})
.await;
}

View File

@@ -0,0 +1,54 @@
use std::collections::BTreeMap;
use std::sync::LazyLock;
use crate::openai_tools::JsonSchema;
use crate::openai_tools::OpenAiTool;
use crate::openai_tools::ResponsesApiTool;
pub(crate) static SUBAGENT_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let mut properties = BTreeMap::new();
properties.insert(
"name".to_string(),
JsonSchema::String {
description: Some("Registered subagent name".to_string()),
},
);
properties.insert(
"input".to_string(),
JsonSchema::String {
description: Some("Task or instruction for the subagent".to_string()),
},
);
properties.insert(
"context".to_string(),
JsonSchema::String {
description: Some("Optional extra context to aid the task".to_string()),
},
);
OpenAiTool::Function(ResponsesApiTool {
name: "subagent_run".to_string(),
description: "Invoke a named subagent with isolated context and return its result"
.to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: Some(vec!["name".to_string(), "input".to_string()]),
additional_properties: Some(false),
},
})
});
pub(crate) static SUBAGENT_LIST_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let properties = BTreeMap::new();
OpenAiTool::Function(ResponsesApiTool {
name: "subagent_list".to_string(),
description: "List available subagents (name and description). Call before subagent_run if unsure.".to_string(),
strict: false,
parameters: JsonSchema::Object {
properties,
required: None,
additional_properties: Some(false),
},
})
});

View File

@@ -70,12 +70,12 @@ async fn truncates_output_lines() {
let output = run_test_cmd(tmp, cmd).await.unwrap();
let expected_output = (1..=256)
let expected_output = (1..=300)
.map(|i| format!("{i}\n"))
.collect::<Vec<_>>()
.join("");
assert_eq!(output.stdout.text, expected_output);
assert_eq!(output.stdout.truncated_after_lines, Some(256));
assert_eq!(output.stdout.truncated_after_lines, None);
}
/// Command succeeds with exit code 0 normally
@@ -91,8 +91,8 @@ async fn truncates_output_bytes() {
let output = run_test_cmd(tmp, cmd).await.unwrap();
assert_eq!(output.stdout.text.len(), 10240);
assert_eq!(output.stdout.truncated_after_lines, Some(10));
assert!(output.stdout.text.len() >= 15000);
assert_eq!(output.stdout.truncated_after_lines, None);
}
/// Command not found returns exit code 127, this is not considered a sandbox error

View File

@@ -139,3 +139,34 @@ async fn test_exec_stderr_stream_events_echo() {
}
assert_eq!(String::from_utf8_lossy(&err), "oops\n");
}
#[tokio::test]
async fn test_aggregated_output_interleaves_in_order() {
// Spawn a shell that alternates stdout and stderr with sleeps to enforce order.
let cmd = vec![
"/bin/sh".to_string(),
"-c".to_string(),
"printf 'O1\\n'; sleep 0.01; printf 'E1\\n' 1>&2; sleep 0.01; printf 'O2\\n'; sleep 0.01; printf 'E2\\n' 1>&2".to_string(),
];
let params = ExecParams {
command: cmd,
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
timeout_ms: Some(5_000),
env: HashMap::new(),
with_escalated_permissions: None,
justification: None,
};
let policy = SandboxPolicy::new_read_only_policy();
let result = process_exec_tool_call(params, SandboxType::None, &policy, &None, None)
.await
.expect("process_exec_tool_call");
assert_eq!(result.exit_code, 0);
assert_eq!(result.stdout.text, "O1\nO2\n");
assert_eq!(result.stderr.text, "E1\nE2\n");
assert_eq!(result.aggregated_output.text, "O1\nE1\nO2\nE2\n");
assert_eq!(result.aggregated_output.truncated_after_lines, None);
}

View File

@@ -168,6 +168,15 @@ impl EventProcessor for EventProcessorWithHumanOutput {
fn process_event(&mut self, event: Event) -> CodexStatus {
let Event { id: _, msg } = event;
match msg {
EventMsg::SubagentBegin(_) => {
// Ignore in human output for now.
}
EventMsg::SubagentForwarded(_) => {
// Ignore; TUI will render forwarded events.
}
EventMsg::SubagentEnd(_) => {
// Ignore in human output for now.
}
EventMsg::Error(ErrorEvent { message }) => {
let prefix = "ERROR:".style(self.red);
ts_println!(self, "{prefix} {message}");
@@ -287,8 +296,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
EventMsg::ExecCommandOutputDelta(_) => {}
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
stdout,
stderr,
aggregated_output,
duration,
exit_code,
..
@@ -304,8 +312,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
("".to_string(), format!("exec('{call_id}')"))
};
let output = if exit_code == 0 { stdout } else { stderr };
let truncated_output = output
let truncated_output = aggregated_output
.lines()
.take(MAX_OUTPUT_LINES_FOR_EXEC_TOOL_CALL)
.collect::<Vec<_>>()

View File

@@ -41,6 +41,12 @@ impl EventProcessor for EventProcessorWithJsonOutput {
fn process_event(&mut self, event: Event) -> CodexStatus {
match event.msg {
EventMsg::SubagentBegin(_)
| EventMsg::SubagentForwarded(_)
| EventMsg::SubagentEnd(_) => {
// Ignored for JSON output in exec for now.
CodexStatus::Running
}
EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_) => {
// Suppress streaming events in JSON mode.
CodexStatus::Running

View File

@@ -146,6 +146,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
model_provider,
codex_linux_sandbox_exe,
base_instructions: None,
include_subagent_tool: None,
include_plan_tool: None,
include_apply_patch_tool: None,
disable_response_storage: oss.then_some(true),

View File

@@ -736,6 +736,7 @@ fn derive_config_from_params(
base_instructions,
include_plan_tool,
include_apply_patch_tool,
include_subagent_tool: None,
disable_response_storage: None,
show_raw_agent_reasoning: None,
};

View File

@@ -161,6 +161,7 @@ impl CodexToolCallParam {
base_instructions,
include_plan_tool,
include_apply_patch_tool: None,
include_subagent_tool: None,
disable_response_storage: None,
show_raw_agent_reasoning: None,
};

View File

@@ -174,6 +174,11 @@ async fn run_codex_tool_session_inner(
.await;
match event.msg {
EventMsg::SubagentBegin(_)
| EventMsg::SubagentForwarded(_)
| EventMsg::SubagentEnd(_) => {
// Ignore subagent orchestration for MCP echoing.
}
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
command,
cwd,

View File

@@ -478,6 +478,14 @@ pub enum EventMsg {
ShutdownComplete,
ConversationHistory(ConversationHistoryResponseEvent),
// --- Subagent orchestration events ---
/// Emitted when a subagent starts.
SubagentBegin(SubagentBeginEvent),
/// Forwards a nested event produced by a running subagent.
SubagentForwarded(SubagentForwardedEvent),
/// Emitted when a subagent finishes.
SubagentEnd(SubagentEndEvent),
}
// Individual event payload types matching each `EventMsg` variant.
@@ -501,6 +509,28 @@ pub struct TokenUsage {
pub total_tokens: u64,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SubagentBeginEvent {
pub subagent_id: String,
pub name: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SubagentEndEvent {
pub subagent_id: String,
pub name: String,
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_agent_message: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SubagentForwardedEvent {
pub subagent_id: String,
pub name: String,
pub event: Box<EventMsg>,
}
impl TokenUsage {
pub fn is_zero(&self) -> bool {
self.total_tokens == 0
@@ -685,6 +715,9 @@ pub struct ExecCommandEndEvent {
pub stdout: String,
/// Captured stderr
pub stderr: String,
/// Captured aggregated output
#[serde(default)]
pub aggregated_output: String,
/// The command's exit code.
pub exit_code: i32,
/// The duration of the command execution.

View File

@@ -1130,8 +1130,6 @@ impl WidgetRef for &ChatComposer {
Span::from(" send "),
newline_hint_key.set_style(key_hint_style),
Span::from(" newline "),
"Ctrl+T".set_style(key_hint_style),
Span::from(" transcript "),
"Ctrl+C".set_style(key_hint_style),
Span::from(" quit"),
]

View File

@@ -11,4 +11,4 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" ⏎ send Ctrl+J newline Ctrl+T transcript Ctrl+C quit "
" ⏎ send Ctrl+J newline Ctrl+C quit "

View File

@@ -11,4 +11,4 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" ⏎ send Ctrl+J newline Ctrl+T transcript Ctrl+C quit "
" ⏎ send Ctrl+J newline Ctrl+C quit "

View File

@@ -11,4 +11,4 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" ⏎ send Ctrl+J newline Ctrl+T transcript Ctrl+C quit "
" ⏎ send Ctrl+J newline Ctrl+C quit "

View File

@@ -11,4 +11,4 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" ⏎ send Ctrl+J newline Ctrl+T transcript Ctrl+C quit "
" ⏎ send Ctrl+J newline Ctrl+C quit "

View File

@@ -11,4 +11,4 @@ expression: terminal.backend()
"▌ "
"▌ "
"▌ "
" ⏎ send Ctrl+J newline Ctrl+T transcript Ctrl+C quit "
" ⏎ send Ctrl+J newline Ctrl+C quit "

View File

@@ -26,7 +26,6 @@ use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::StreamErrorEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::protocol::TokenUsage;
use codex_core::protocol::TurnAbortReason;
use codex_core::protocol::TurnDiffEvent;
use codex_protocol::parse_command::ParsedCommand;
use crossterm::event::KeyEvent;
@@ -821,14 +820,7 @@ impl ChatWidget {
EventMsg::TaskComplete(TaskCompleteEvent { .. }) => self.on_task_complete(),
EventMsg::TokenCount(token_usage) => self.on_token_count(token_usage),
EventMsg::Error(ErrorEvent { message }) => self.on_error(message),
EventMsg::TurnAborted(ev) => match ev.reason {
TurnAbortReason::Interrupted => {
self.on_error("Tell the model what to do differently".to_owned())
}
TurnAbortReason::Replaced => {
self.on_error("Turn aborted: replaced by a new task".to_owned())
}
},
EventMsg::TurnAborted(_) => self.on_error("Turn interrupted".to_owned()),
EventMsg::PlanUpdate(update) => self.on_plan_update(update),
EventMsg::ExecApprovalRequest(ev) => self.on_exec_approval_request(id, ev),
EventMsg::ApplyPatchApprovalRequest(ev) => self.on_apply_patch_approval_request(id, ev),
@@ -844,8 +836,39 @@ impl ChatWidget {
EventMsg::ShutdownComplete => self.on_shutdown_complete(),
EventMsg::TurnDiff(TurnDiffEvent { unified_diff }) => self.on_turn_diff(unified_diff),
EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => {
// Also show background logs in the transcript for visibility.
self.add_to_history(history_cell::new_log_line(message.clone()));
self.on_background_event(message)
}
EventMsg::SubagentBegin(ev) => {
let msg = format!("subagent begin: {} ({})", ev.name, ev.subagent_id);
self.add_to_history(history_cell::new_log_line(msg));
}
EventMsg::SubagentForwarded(ev) => {
// Summarize forwarded event type; include message text when it is AgentMessage.
match *ev.event {
EventMsg::AgentMessage(AgentMessageEvent { message }) => {
let msg = format!("subagent {}: {}", ev.name, message);
self.add_to_history(history_cell::new_log_line(msg));
}
EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { ref delta }) => {
let msg = format!("subagent {}: {}", ev.name, delta);
self.add_to_history(history_cell::new_log_line(msg));
}
ref other => {
let msg = format!("subagent {} forwarded: {:?}", ev.name, other);
self.add_to_history(history_cell::new_log_line(msg));
}
}
}
EventMsg::SubagentEnd(ev) => {
let summary = ev.last_agent_message.as_deref().unwrap_or("");
let msg = format!(
"subagent end: {} ({}) success={} {}",
ev.name, ev.subagent_id, ev.success, summary
);
self.add_to_history(history_cell::new_log_line(msg));
}
EventMsg::StreamError(StreamErrorEvent { message }) => self.on_stream_error(message),
EventMsg::ConversationHistory(_) => {}
}

View File

@@ -263,6 +263,7 @@ fn exec_history_cell_shows_working_then_completed() {
call_id: "call-1".into(),
stdout: "done".into(),
stderr: String::new(),
aggregated_output: "done".into(),
exit_code: 0,
duration: std::time::Duration::from_millis(5),
formatted_output: "done".into(),
@@ -313,6 +314,7 @@ fn exec_history_cell_shows_working_then_failed() {
call_id: "call-2".into(),
stdout: String::new(),
stderr: "error".into(),
aggregated_output: "error".into(),
exit_code: 2,
duration: std::time::Duration::from_millis(7),
formatted_output: "".into(),
@@ -361,6 +363,7 @@ fn exec_history_extends_previous_when_consecutive() {
call_id: "call-a".into(),
stdout: "one".into(),
stderr: String::new(),
aggregated_output: "one".into(),
exit_code: 0,
duration: std::time::Duration::from_millis(5),
formatted_output: "one".into(),
@@ -390,6 +393,7 @@ fn exec_history_extends_previous_when_consecutive() {
call_id: "call-b".into(),
stdout: "two".into(),
stderr: String::new(),
aggregated_output: "two".into(),
exit_code: 0,
duration: std::time::Duration::from_millis(5),
formatted_output: "two".into(),

View File

@@ -750,6 +750,12 @@ pub(crate) fn new_status_output(
PlainHistoryCell { lines }
}
/// Simple one-line log entry (dim) to surface traces and diagnostics in the transcript.
pub(crate) fn new_log_line(message: String) -> TranscriptOnlyHistoryCell {
let lines: Vec<Line<'static>> = vec![Line::from(""), Line::from(message).dim()];
TranscriptOnlyHistoryCell { lines }
}
/// Render a summary of configured MCP servers from the current `Config`.
pub(crate) fn empty_mcp_output() -> PlainHistoryCell {
let lines: Vec<Line<'static>> = vec![

View File

@@ -124,6 +124,7 @@ pub async fn run_main(
config_profile: cli.config_profile.clone(),
codex_linux_sandbox_exe,
base_instructions: None,
include_subagent_tool: None,
include_plan_tool: Some(true),
include_apply_patch_tool: None,
disable_response_storage: cli.oss.then_some(true),

View File

@@ -74,17 +74,6 @@ static COMMAND_SELECT_OPTIONS: LazyLock<Vec<SelectOption>> = LazyLock::new(|| {
key: KeyCode::Char('n'),
decision: ReviewDecision::Denied,
},
SelectOption {
label: Line::from(vec![
"No, ".into(),
"provide ".into(),
"f".underlined(),
"eedback".into(),
]),
description: "Do not run the command; provide feedback",
key: KeyCode::Char('f'),
decision: ReviewDecision::Abort,
},
]
});
@@ -102,17 +91,6 @@ static PATCH_SELECT_OPTIONS: LazyLock<Vec<SelectOption>> = LazyLock::new(|| {
key: KeyCode::Char('n'),
decision: ReviewDecision::Denied,
},
SelectOption {
label: Line::from(vec![
"No, ".into(),
"provide ".into(),
"f".underlined(),
"eedback".into(),
]),
description: "Do not apply the changes; provide feedback",
key: KeyCode::Char('f'),
decision: ReviewDecision::Abort,
},
]
});

View File

@@ -9,7 +9,7 @@ codex
Im going to scan the workspace and Cargo manifests to see build profiles and
dependencies that impact binary size. Then Ill summarize the main causes.
>_
_
✓ ls -la
└ total 6696
drwxr-xr-x@ 39 easong staff 1248 Aug 9 08:49 .
@@ -205,4 +205,4 @@ assertions—outputs are much larger than cargo build --release.
If you want, I can outline targeted trims (e.g., strip = "debuginfo", opt-level
= "z", panic abort, tighter tokio/reqwest features) and estimate impact per
binary.
binary.