Merge remote-tracking branch 'origin/main' into multiple-slash-commands

This commit is contained in:
easong-openai
2025-08-02 12:05:29 -07:00
34 changed files with 1515 additions and 69 deletions

View File

@@ -2,7 +2,9 @@
In the codex-rs folder where the rust code lives:
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR`. You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
- Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`.
- You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations.
- Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate.
Before creating a pull request with changes to `codex-rs`, run `just fmt` (in `codex-rs` directory) to format the code and `just fix` (in `codex-rs` directory) to fix any linter issues in the code, ensure the test suite passes by running `cargo test --all-features` in the `codex-rs` directory.

View File

@@ -83,6 +83,7 @@ if (wantsNative && process.platform !== 'win32') {
const child = spawn(binaryPath, process.argv.slice(2), {
stdio: "inherit",
env: { ...process.env, CODEX_MANAGED_BY_NPM: "1" },
});
child.on("error", (err) => {

13
codex-rs/Cargo.lock generated
View File

@@ -695,6 +695,7 @@ dependencies = [
"reqwest",
"seccompiler",
"serde",
"serde_bytes",
"serde_json",
"sha1",
"shlex",
@@ -842,6 +843,7 @@ version = "0.0.0"
dependencies = [
"anyhow",
"base64 0.22.1",
"chrono",
"clap",
"codex-ansi-escape",
"codex-arg0",
@@ -860,6 +862,8 @@ dependencies = [
"ratatui",
"ratatui-image",
"regex-lite",
"reqwest",
"serde",
"serde_json",
"shlex",
"strum 0.27.2",
@@ -3942,6 +3946,15 @@ dependencies = [
"serde_derive",
]
[[package]]
name = "serde_bytes"
version = "0.11.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
dependencies = [
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.219"

View File

@@ -7,6 +7,7 @@ pub fn summarize_sandbox_policy(sandbox_policy: &SandboxPolicy) -> String {
SandboxPolicy::WorkspaceWrite {
writable_roots,
network_access,
include_default_writable_roots,
} => {
let mut summary = "workspace-write".to_string();
if !writable_roots.is_empty() {
@@ -19,6 +20,9 @@ pub fn summarize_sandbox_policy(sandbox_policy: &SandboxPolicy) -> String {
.join(", ")
));
}
if !*include_default_writable_roots {
summary.push_str(" (exact writable roots)");
}
if *network_access {
summary.push_str(" (network access enabled)");
}

View File

@@ -259,6 +259,8 @@ disk, but attempts to write a file or access the network will be blocked.
A more relaxed policy is `workspace-write`. When specified, the current working directory for the Codex task will be writable (as well as `$TMPDIR` on macOS). Note that the CLI defaults to using the directory where it was spawned as `cwd`, though this can be overridden using `--cwd/-C`.
On macOS (and soon Linux), all writable roots (including `cwd`) that contain a `.git/` folder _as an immediate child_ will configure the `.git/` folder to be read-only while the rest of the Git repository will be writable. This means that commands like `git commit` will fail, by default (as it entails writing to `.git/`), and will require Codex to ask for permission.
```toml
# same as `--sandbox workspace-write`
sandbox_mode = "workspace-write"

View File

@@ -31,6 +31,7 @@ rand = "0.9"
reqwest = { version = "0.12", features = ["json", "stream"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_bytes = "0.11"
sha1 = "0.10.6"
shlex = "1.3.0"
strum_macros = "0.27.2"

View File

@@ -48,6 +48,7 @@ use crate::error::SandboxErr;
use crate::exec::ExecParams;
use crate::exec::ExecToolCallOutput;
use crate::exec::SandboxType;
use crate::exec::StdoutStream;
use crate::exec::process_exec_tool_call;
use crate::exec_env::create_env;
use crate::mcp_connection_manager::McpConnectionManager;
@@ -1366,7 +1367,7 @@ async fn run_compact_task(
let mut retries = 0;
loop {
let attempt_result = drain_to_completed(&sess, &prompt).await;
let attempt_result = drain_to_completed(&sess, &sub_id, &prompt).await;
match attempt_result {
Ok(()) => break,
@@ -1753,6 +1754,11 @@ async fn handle_container_exec_with_params(
sess.ctrl_c.clone(),
&sess.sandbox_policy,
&sess.codex_linux_sandbox_exe,
Some(StdoutStream {
sub_id: sub_id.clone(),
call_id: call_id.clone(),
tx_event: sess.tx_event.clone(),
}),
)
.await;
@@ -1873,6 +1879,11 @@ async fn handle_sandbox_error(
sess.ctrl_c.clone(),
&sess.sandbox_policy,
&sess.codex_linux_sandbox_exe,
Some(StdoutStream {
sub_id: sub_id.clone(),
call_id: call_id.clone(),
tx_event: sess.tx_event.clone(),
}),
)
.await;
@@ -1984,7 +1995,7 @@ fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option<St
})
}
async fn drain_to_completed(sess: &Session, prompt: &Prompt) -> CodexResult<()> {
async fn drain_to_completed(sess: &Session, sub_id: &str, prompt: &Prompt) -> CodexResult<()> {
let mut stream = sess.client.clone().stream(prompt).await?;
loop {
let maybe_event = stream.next().await;
@@ -1994,7 +2005,32 @@ async fn drain_to_completed(sess: &Session, prompt: &Prompt) -> CodexResult<()>
));
};
match event {
Ok(ResponseEvent::Completed { .. }) => return Ok(()),
Ok(ResponseEvent::OutputItemDone(item)) => {
// Record only to in-memory conversation history; avoid state snapshot.
let mut state = sess.state.lock().unwrap();
state.history.record_items(std::slice::from_ref(&item));
}
Ok(ResponseEvent::Completed {
response_id: _,
token_usage,
}) => {
let token_usage = match token_usage {
Some(usage) => usage,
None => {
return Err(CodexErr::Stream(
"token_usage was None in ResponseEvent::Completed".into(),
));
}
};
sess.tx_event
.send(Event {
id: sub_id.to_string(),
msg: EventMsg::TokenCount(token_usage),
})
.await
.ok();
return Ok(());
}
Ok(_) => continue,
Err(e) => return Err(e),
}

View File

@@ -383,6 +383,7 @@ impl ConfigToml {
Some(s) => SandboxPolicy::WorkspaceWrite {
writable_roots: s.writable_roots.clone(),
network_access: s.network_access,
include_default_writable_roots: true,
},
None => SandboxPolicy::new_workspace_write_policy(),
},
@@ -754,6 +755,7 @@ writable_roots = [
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![PathBuf::from("/tmp")],
network_access: false,
include_default_writable_roots: true,
},
sandbox_workspace_write_cfg.derive_sandbox_policy(sandbox_mode_override)
);

View File

@@ -10,6 +10,7 @@ use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use async_channel::Sender;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt;
use tokio::io::BufReader;
@@ -19,10 +20,15 @@ use tokio::sync::Notify;
use crate::error::CodexErr;
use crate::error::Result;
use crate::error::SandboxErr;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::ExecCommandOutputDeltaEvent;
use crate::protocol::ExecOutputStream;
use crate::protocol::SandboxPolicy;
use crate::seatbelt::spawn_command_under_seatbelt;
use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
use serde_bytes::ByteBuf;
// Maximum we send for each stream, which is either:
// - 10KiB OR
@@ -56,18 +62,26 @@ pub enum SandboxType {
LinuxSeccomp,
}
#[derive(Clone)]
pub struct StdoutStream {
pub sub_id: String,
pub call_id: String,
pub tx_event: Sender<Event>,
}
pub async fn process_exec_tool_call(
params: ExecParams,
sandbox_type: SandboxType,
ctrl_c: Arc<Notify>,
sandbox_policy: &SandboxPolicy,
codex_linux_sandbox_exe: &Option<PathBuf>,
stdout_stream: Option<StdoutStream>,
) -> Result<ExecToolCallOutput> {
let start = Instant::now();
let raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr> = match sandbox_type
{
SandboxType::None => exec(params, sandbox_policy, ctrl_c).await,
SandboxType::None => exec(params, sandbox_policy, ctrl_c, stdout_stream.clone()).await,
SandboxType::MacosSeatbelt => {
let ExecParams {
command,
@@ -83,7 +97,7 @@ pub async fn process_exec_tool_call(
env,
)
.await?;
consume_truncated_output(child, ctrl_c, timeout_ms).await
consume_truncated_output(child, ctrl_c, timeout_ms, stdout_stream.clone()).await
}
SandboxType::LinuxSeccomp => {
let ExecParams {
@@ -106,7 +120,7 @@ pub async fn process_exec_tool_call(
)
.await?;
consume_truncated_output(child, ctrl_c, timeout_ms).await
consume_truncated_output(child, ctrl_c, timeout_ms, stdout_stream).await
}
};
let duration = start.elapsed();
@@ -233,6 +247,7 @@ async fn exec(
}: ExecParams,
sandbox_policy: &SandboxPolicy,
ctrl_c: Arc<Notify>,
stdout_stream: Option<StdoutStream>,
) -> Result<RawExecToolCallOutput> {
let (program, args) = command.split_first().ok_or_else(|| {
CodexErr::Io(io::Error::new(
@@ -251,7 +266,7 @@ async fn exec(
env,
)
.await?;
consume_truncated_output(child, ctrl_c, timeout_ms).await
consume_truncated_output(child, ctrl_c, timeout_ms, stdout_stream).await
}
/// Consumes the output of a child process, truncating it so it is suitable for
@@ -260,6 +275,7 @@ pub(crate) async fn consume_truncated_output(
mut child: Child,
ctrl_c: Arc<Notify>,
timeout_ms: Option<u64>,
stdout_stream: Option<StdoutStream>,
) -> Result<RawExecToolCallOutput> {
// Both stdout and stderr were configured with `Stdio::piped()`
// above, therefore `take()` should normally return `Some`. If it doesn't
@@ -280,11 +296,15 @@ pub(crate) async fn consume_truncated_output(
BufReader::new(stdout_reader),
MAX_STREAM_OUTPUT,
MAX_STREAM_OUTPUT_LINES,
stdout_stream.clone(),
false,
));
let stderr_handle = tokio::spawn(read_capped(
BufReader::new(stderr_reader),
MAX_STREAM_OUTPUT,
MAX_STREAM_OUTPUT_LINES,
stdout_stream.clone(),
true,
));
let interrupted = ctrl_c.notified();
@@ -318,10 +338,12 @@ pub(crate) async fn consume_truncated_output(
})
}
async fn read_capped<R: AsyncRead + Unpin>(
async fn read_capped<R: AsyncRead + Unpin + Send + 'static>(
mut reader: R,
max_output: usize,
max_lines: usize,
stream: Option<StdoutStream>,
is_stderr: bool,
) -> io::Result<Vec<u8>> {
let mut buf = Vec::with_capacity(max_output.min(8 * 1024));
let mut tmp = [0u8; 8192];
@@ -335,6 +357,25 @@ async fn read_capped<R: AsyncRead + Unpin>(
break;
}
if let Some(stream) = &stream {
let chunk = tmp[..n].to_vec();
let msg = EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent {
call_id: stream.call_id.clone(),
stream: if is_stderr {
ExecOutputStream::Stderr
} else {
ExecOutputStream::Stdout
},
chunk: ByteBuf::from(chunk),
});
let event = Event {
id: stream.sub_id.clone(),
msg,
};
#[allow(clippy::let_unit_value)]
let _ = stream.tx_event.send(event).await;
}
// Copy into the buffer only while we still have byte and line budget.
if remaining_bytes > 0 && remaining_lines > 0 {
let mut copy_len = 0;

View File

@@ -13,6 +13,7 @@ use std::time::Duration;
use mcp_types::CallToolResult;
use serde::Deserialize;
use serde::Serialize;
use serde_bytes::ByteBuf;
use strum_macros::Display;
use uuid::Uuid;
@@ -179,9 +180,29 @@ pub enum SandboxPolicy {
/// default.
#[serde(default)]
network_access: bool,
/// When set to `true`, will include defaults like the current working
/// directory and TMPDIR (on macOS). When `false`, only `writable_roots`
/// are used. (Mainly used for testing.)
#[serde(default = "default_true")]
include_default_writable_roots: bool,
},
}
/// A writable root path accompanied by a list of subpaths that should remain
/// readonly even when the root is writable. This is primarily used to ensure
/// toplevel VCS metadata directories (e.g. `.git`) under a writable root are
/// not modified by the agent.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WritableRoot {
pub root: PathBuf,
pub read_only_subpaths: Vec<PathBuf>,
}
fn default_true() -> bool {
true
}
impl FromStr for SandboxPolicy {
type Err = serde_json::Error;
@@ -203,6 +224,7 @@ impl SandboxPolicy {
SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
network_access: false,
include_default_writable_roots: true,
}
}
@@ -228,27 +250,51 @@ impl SandboxPolicy {
}
}
/// Returns the list of writable roots that should be passed down to the
/// Landlock rules installer, tailored to the current working directory.
pub fn get_writable_roots_with_cwd(&self, cwd: &Path) -> Vec<PathBuf> {
/// Returns the list of writable roots (tailored to the current working
/// directory) together with subpaths that should remain readonly under
/// each writable root.
pub fn get_writable_roots_with_cwd(&self, cwd: &Path) -> Vec<WritableRoot> {
match self {
SandboxPolicy::DangerFullAccess => Vec::new(),
SandboxPolicy::ReadOnly => Vec::new(),
SandboxPolicy::WorkspaceWrite { writable_roots, .. } => {
let mut roots = writable_roots.clone();
roots.push(cwd.to_path_buf());
SandboxPolicy::WorkspaceWrite {
writable_roots,
include_default_writable_roots,
..
} => {
// Start from explicitly configured writable roots.
let mut roots: Vec<PathBuf> = writable_roots.clone();
// Also include the per-user tmp dir on macOS.
// Note this is added dynamically rather than storing it in
// writable_roots because writable_roots contains only static
// values deserialized from the config file.
if cfg!(target_os = "macos") {
if let Some(tmpdir) = std::env::var_os("TMPDIR") {
roots.push(PathBuf::from(tmpdir));
// Optionally include defaults (cwd and TMPDIR on macOS).
if *include_default_writable_roots {
roots.push(cwd.to_path_buf());
// Also include the per-user tmp dir on macOS.
// Note this is added dynamically rather than storing it in
// `writable_roots` because `writable_roots` contains only static
// values deserialized from the config file.
if cfg!(target_os = "macos") {
if let Some(tmpdir) = std::env::var_os("TMPDIR") {
roots.push(PathBuf::from(tmpdir));
}
}
}
// For each root, compute subpaths that should remain read-only.
roots
.into_iter()
.map(|writable_root| {
let mut subpaths = Vec::new();
let top_level_git = writable_root.join(".git");
if top_level_git.is_dir() {
subpaths.push(top_level_git);
}
WritableRoot {
root: writable_root,
read_only_subpaths: subpaths,
}
})
.collect()
}
}
}
@@ -323,6 +369,9 @@ pub enum EventMsg {
/// Notification that the server is about to execute a command.
ExecCommandBegin(ExecCommandBeginEvent),
/// Incremental chunk of output from a running command.
ExecCommandOutputDelta(ExecCommandOutputDeltaEvent),
ExecCommandEnd(ExecCommandEndEvent),
ExecApprovalRequest(ExecApprovalRequestEvent),
@@ -476,6 +525,24 @@ pub struct ExecCommandEndEvent {
pub exit_code: i32,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ExecOutputStream {
Stdout,
Stderr,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ExecCommandOutputDeltaEvent {
/// Identifier for the ExecCommandBegin that produced this chunk.
pub call_id: String,
/// Which stream produced this chunk.
pub stream: ExecOutputStream,
/// Raw bytes from the stream (may not be valid UTF-8).
#[serde(with = "serde_bytes")]
pub chunk: ByteBuf,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ExecApprovalRequestEvent {
/// Identifier for the associated exec call, if available.

View File

@@ -4,6 +4,7 @@ use std::path::PathBuf;
use tokio::process::Child;
use crate::protocol::SandboxPolicy;
use crate::spawn::CODEX_SANDBOX_ENV_VAR;
use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
@@ -20,10 +21,11 @@ pub async fn spawn_command_under_seatbelt(
sandbox_policy: &SandboxPolicy,
cwd: PathBuf,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
mut env: HashMap<String, String>,
) -> std::io::Result<Child> {
let args = create_seatbelt_command_args(command, sandbox_policy, &cwd);
let arg0 = None;
env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
spawn_child_async(
PathBuf::from(MACOS_PATH_TO_SEATBELT_EXECUTABLE),
args,
@@ -50,16 +52,38 @@ fn create_seatbelt_command_args(
)
} else {
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(cwd);
let (writable_folder_policies, cli_args): (Vec<String>, Vec<String>) = writable_roots
.iter()
.enumerate()
.map(|(index, root)| {
let param_name = format!("WRITABLE_ROOT_{index}");
let policy: String = format!("(subpath (param \"{param_name}\"))");
let cli_arg = format!("-D{param_name}={}", root.to_string_lossy());
(policy, cli_arg)
})
.unzip();
let mut writable_folder_policies: Vec<String> = Vec::new();
let mut cli_args: Vec<String> = Vec::new();
for (index, wr) in writable_roots.iter().enumerate() {
// Canonicalize to avoid mismatches like /var vs /private/var on macOS.
let canonical_root = wr.root.canonicalize().unwrap_or_else(|_| wr.root.clone());
let root_param = format!("WRITABLE_ROOT_{index}");
cli_args.push(format!(
"-D{root_param}={}",
canonical_root.to_string_lossy()
));
if wr.read_only_subpaths.is_empty() {
writable_folder_policies.push(format!("(subpath (param \"{root_param}\"))"));
} else {
// Add parameters for each read-only subpath and generate
// the `(require-not ...)` clauses.
let mut require_parts: Vec<String> = Vec::new();
require_parts.push(format!("(subpath (param \"{root_param}\"))"));
for (subpath_index, ro) in wr.read_only_subpaths.iter().enumerate() {
let canonical_ro = ro.canonicalize().unwrap_or_else(|_| ro.clone());
let ro_param = format!("WRITABLE_ROOT_{index}_RO_{subpath_index}");
cli_args.push(format!("-D{ro_param}={}", canonical_ro.to_string_lossy()));
require_parts
.push(format!("(require-not (subpath (param \"{ro_param}\")))"));
}
let policy_component = format!("(require-all {} )", require_parts.join(" "));
writable_folder_policies.push(policy_component);
}
}
if writable_folder_policies.is_empty() {
("".to_string(), Vec::<String>::new())
} else {
@@ -88,9 +112,201 @@ fn create_seatbelt_command_args(
let full_policy = format!(
"{MACOS_SEATBELT_BASE_POLICY}\n{file_read_policy}\n{file_write_policy}\n{network_policy}"
);
let mut seatbelt_args: Vec<String> = vec!["-p".to_string(), full_policy];
seatbelt_args.extend(extra_cli_args);
seatbelt_args.push("--".to_string());
seatbelt_args.extend(command);
seatbelt_args
}
#[cfg(test)]
mod tests {
#![expect(clippy::expect_used)]
use super::MACOS_SEATBELT_BASE_POLICY;
use super::create_seatbelt_command_args;
use crate::protocol::SandboxPolicy;
use pretty_assertions::assert_eq;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
#[test]
fn create_seatbelt_args_with_read_only_git_subpath() {
// Create a temporary workspace with two writable roots: one containing
// a top-level .git directory and one without it.
let tmp = TempDir::new().expect("tempdir");
let PopulatedTmp {
root_with_git,
root_without_git,
root_with_git_canon,
root_with_git_git_canon,
root_without_git_canon,
} = populate_tmpdir(tmp.path());
// Build a policy that only includes the two test roots as writable and
// does not automatically include defaults like cwd or TMPDIR.
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![root_with_git.clone(), root_without_git.clone()],
network_access: false,
include_default_writable_roots: false,
};
let args = create_seatbelt_command_args(
vec!["/bin/echo".to_string(), "hello".to_string()],
&policy,
tmp.path(),
);
// Build the expected policy text using a raw string for readability.
// Note that the policy includes:
// - the base policy,
// - read-only access to the filesystem,
// - write access to WRITABLE_ROOT_0 (but not its .git) and WRITABLE_ROOT_1.
let expected_policy = format!(
r#"{MACOS_SEATBELT_BASE_POLICY}
; allow read-only file operations
(allow file-read*)
(allow file-write*
(require-all (subpath (param "WRITABLE_ROOT_0")) (require-not (subpath (param "WRITABLE_ROOT_0_RO_0"))) ) (subpath (param "WRITABLE_ROOT_1"))
)
"#,
);
let expected_args = vec![
"-p".to_string(),
expected_policy,
format!(
"-DWRITABLE_ROOT_0={}",
root_with_git_canon.to_string_lossy()
),
format!(
"-DWRITABLE_ROOT_0_RO_0={}",
root_with_git_git_canon.to_string_lossy()
),
format!(
"-DWRITABLE_ROOT_1={}",
root_without_git_canon.to_string_lossy()
),
"--".to_string(),
"/bin/echo".to_string(),
"hello".to_string(),
];
assert_eq!(args, expected_args);
}
#[test]
fn create_seatbelt_args_for_cwd_as_git_repo() {
// Create a temporary workspace with two writable roots: one containing
// a top-level .git directory and one without it.
let tmp = TempDir::new().expect("tempdir");
let PopulatedTmp {
root_with_git,
root_with_git_canon,
root_with_git_git_canon,
..
} = populate_tmpdir(tmp.path());
// Build a policy that does not specify any writable_roots, but does
// use the default ones (cwd and TMPDIR) and verifies the `.git` check
// is done properly for cwd.
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![],
network_access: false,
include_default_writable_roots: true,
};
let args = create_seatbelt_command_args(
vec!["/bin/echo".to_string(), "hello".to_string()],
&policy,
root_with_git.as_path(),
);
let tmpdir_env_var = if cfg!(target_os = "macos") {
std::env::var("TMPDIR")
.ok()
.map(PathBuf::from)
.and_then(|p| p.canonicalize().ok())
.map(|p| p.to_string_lossy().to_string())
} else {
None
};
let tempdir_policy_entry = if tmpdir_env_var.is_some() {
" (subpath (param \"WRITABLE_ROOT_1\"))"
} else {
""
};
// Build the expected policy text using a raw string for readability.
// Note that the policy includes:
// - the base policy,
// - read-only access to the filesystem,
// - write access to WRITABLE_ROOT_0 (but not its .git) and WRITABLE_ROOT_1.
let expected_policy = format!(
r#"{MACOS_SEATBELT_BASE_POLICY}
; allow read-only file operations
(allow file-read*)
(allow file-write*
(require-all (subpath (param "WRITABLE_ROOT_0")) (require-not (subpath (param "WRITABLE_ROOT_0_RO_0"))) ){tempdir_policy_entry}
)
"#,
);
let mut expected_args = vec![
"-p".to_string(),
expected_policy,
format!(
"-DWRITABLE_ROOT_0={}",
root_with_git_canon.to_string_lossy()
),
format!(
"-DWRITABLE_ROOT_0_RO_0={}",
root_with_git_git_canon.to_string_lossy()
),
];
if let Some(p) = tmpdir_env_var {
expected_args.push(format!("-DWRITABLE_ROOT_1={p}"));
}
expected_args.extend(vec![
"--".to_string(),
"/bin/echo".to_string(),
"hello".to_string(),
]);
assert_eq!(args, expected_args);
}
struct PopulatedTmp {
root_with_git: PathBuf,
root_without_git: PathBuf,
root_with_git_canon: PathBuf,
root_with_git_git_canon: PathBuf,
root_without_git_canon: PathBuf,
}
fn populate_tmpdir(tmp: &Path) -> PopulatedTmp {
let root_with_git = tmp.join("with_git");
let root_without_git = tmp.join("no_git");
fs::create_dir_all(&root_with_git).expect("create with_git");
fs::create_dir_all(&root_without_git).expect("create no_git");
fs::create_dir_all(root_with_git.join(".git")).expect("create .git");
// Ensure we have canonical paths for -D parameter matching.
let root_with_git_canon = root_with_git.canonicalize().expect("canonicalize with_git");
let root_with_git_git_canon = root_with_git_canon.join(".git");
let root_without_git_canon = root_without_git
.canonicalize()
.expect("canonicalize no_git");
PopulatedTmp {
root_with_git,
root_without_git,
root_with_git_canon,
root_with_git_git_canon,
root_without_git_canon,
}
}
}

View File

@@ -220,6 +220,7 @@ mod tests {
Arc::new(Notify::new()),
&SandboxPolicy::DangerFullAccess,
&None,
None,
)
.await
.unwrap();

View File

@@ -17,6 +17,11 @@ use crate::protocol::SandboxPolicy;
/// attributes, so this may change in the future.
pub const CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR: &str = "CODEX_SANDBOX_NETWORK_DISABLED";
/// Should be set when the process is spawned under a sandbox. Currently, the
/// value is "seatbelt" for macOS, but it may change in the future to
/// accommodate sandboxing configuration and other sandboxing mechanisms.
pub const CODEX_SANDBOX_ENV_VAR: &str = "CODEX_SANDBOX";
#[derive(Debug, Clone, Copy)]
pub enum StdioPolicy {
RedirectForShellTool,

View File

@@ -0,0 +1,143 @@
#![cfg(unix)]
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use async_channel::Receiver;
use codex_core::exec::ExecParams;
use codex_core::exec::SandboxType;
use codex_core::exec::StdoutStream;
use codex_core::exec::process_exec_tool_call;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecCommandOutputDeltaEvent;
use codex_core::protocol::ExecOutputStream;
use codex_core::protocol::SandboxPolicy;
use tokio::sync::Notify;
fn collect_stdout_events(rx: Receiver<Event>) -> Vec<u8> {
let mut out = Vec::new();
while let Ok(ev) = rx.try_recv() {
if let EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent {
stream: ExecOutputStream::Stdout,
chunk,
..
}) = ev.msg
{
out.extend_from_slice(&chunk);
}
}
out
}
#[tokio::test]
async fn test_exec_stdout_stream_events_echo() {
let (tx, rx) = async_channel::unbounded::<Event>();
let stdout_stream = StdoutStream {
sub_id: "test-sub".to_string(),
call_id: "call-1".to_string(),
tx_event: tx,
};
let cmd = vec![
"/bin/sh".to_string(),
"-c".to_string(),
// Use printf for predictable behavior across shells
"printf 'hello-world\n'".to_string(),
];
let params = ExecParams {
command: cmd,
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
timeout_ms: Some(5_000),
env: HashMap::new(),
};
let ctrl_c = Arc::new(Notify::new());
let policy = SandboxPolicy::new_read_only_policy();
let result = process_exec_tool_call(
params,
SandboxType::None,
ctrl_c,
&policy,
&None,
Some(stdout_stream),
)
.await;
let result = match result {
Ok(r) => r,
Err(e) => panic!("process_exec_tool_call failed: {e}"),
};
assert_eq!(result.exit_code, 0);
assert_eq!(result.stdout, "hello-world\n");
let streamed = collect_stdout_events(rx);
// We should have received at least the same contents (possibly in one chunk)
assert_eq!(String::from_utf8_lossy(&streamed), "hello-world\n");
}
#[tokio::test]
async fn test_exec_stderr_stream_events_echo() {
let (tx, rx) = async_channel::unbounded::<Event>();
let stdout_stream = StdoutStream {
sub_id: "test-sub".to_string(),
call_id: "call-2".to_string(),
tx_event: tx,
};
let cmd = vec![
"/bin/sh".to_string(),
"-c".to_string(),
// Write to stderr explicitly
"printf 'oops\n' 1>&2".to_string(),
];
let params = ExecParams {
command: cmd,
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
timeout_ms: Some(5_000),
env: HashMap::new(),
};
let ctrl_c = Arc::new(Notify::new());
let policy = SandboxPolicy::new_read_only_policy();
let result = process_exec_tool_call(
params,
SandboxType::None,
ctrl_c,
&policy,
&None,
Some(stdout_stream),
)
.await;
let result = match result {
Ok(r) => r,
Err(e) => panic!("process_exec_tool_call failed: {e}"),
};
assert_eq!(result.exit_code, 0);
assert_eq!(result.stdout, "");
assert_eq!(result.stderr, "oops\n");
// Collect only stderr delta events
let mut err = Vec::new();
while let Ok(ev) = rx.try_recv() {
if let EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent {
stream: ExecOutputStream::Stderr,
chunk,
..
}) = ev.msg
{
err.extend_from_slice(&chunk);
}
}
assert_eq!(String::from_utf8_lossy(&err), "oops\n");
}

View File

@@ -0,0 +1,195 @@
#![cfg(target_os = "macos")]
#![expect(clippy::expect_used)]
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use codex_core::protocol::SandboxPolicy;
use codex_core::seatbelt::spawn_command_under_seatbelt;
use codex_core::spawn::CODEX_SANDBOX_ENV_VAR;
use codex_core::spawn::StdioPolicy;
use tempfile::TempDir;
struct TestScenario {
repo_parent: PathBuf,
file_outside_repo: PathBuf,
repo_root: PathBuf,
file_in_repo_root: PathBuf,
file_in_dot_git_dir: PathBuf,
}
struct TestExpectations {
file_outside_repo_is_writable: bool,
file_in_repo_root_is_writable: bool,
file_in_dot_git_dir_is_writable: bool,
}
impl TestScenario {
async fn run_test(&self, policy: &SandboxPolicy, expectations: TestExpectations) {
if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) {
eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test.");
return;
}
assert_eq!(
touch(&self.file_outside_repo, policy).await,
expectations.file_outside_repo_is_writable
);
assert_eq!(
self.file_outside_repo.exists(),
expectations.file_outside_repo_is_writable
);
assert_eq!(
touch(&self.file_in_repo_root, policy).await,
expectations.file_in_repo_root_is_writable
);
assert_eq!(
self.file_in_repo_root.exists(),
expectations.file_in_repo_root_is_writable
);
assert_eq!(
touch(&self.file_in_dot_git_dir, policy).await,
expectations.file_in_dot_git_dir_is_writable
);
assert_eq!(
self.file_in_dot_git_dir.exists(),
expectations.file_in_dot_git_dir_is_writable
);
}
}
/// If the user has added a workspace root that is not a Git repo root, then
/// the user has to specify `--skip-git-repo-check` or go through some
/// interstitial that indicates they are taking on some risk because Git
/// cannot be used to backup their work before the agent begins.
///
/// Because the user has agreed to this risk, we do not try find all .git
/// folders in the workspace and block them (though we could change our
/// position on this in the future).
#[tokio::test]
async fn if_parent_of_repo_is_writable_then_dot_git_folder_is_writable() {
let tmp = TempDir::new().expect("should be able to create temp dir");
let test_scenario = create_test_scenario(&tmp);
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![test_scenario.repo_parent.clone()],
network_access: false,
include_default_writable_roots: false,
};
test_scenario
.run_test(
&policy,
TestExpectations {
file_outside_repo_is_writable: true,
file_in_repo_root_is_writable: true,
file_in_dot_git_dir_is_writable: true,
},
)
.await;
}
/// When the writable root is the root of a Git repository (as evidenced by the
/// presence of a .git folder), then the .git folder should be read-only if
/// the policy is `WorkspaceWrite`.
#[tokio::test]
async fn if_git_repo_is_writable_root_then_dot_git_folder_is_read_only() {
let tmp = TempDir::new().expect("should be able to create temp dir");
let test_scenario = create_test_scenario(&tmp);
let policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec![test_scenario.repo_root.clone()],
network_access: false,
include_default_writable_roots: false,
};
test_scenario
.run_test(
&policy,
TestExpectations {
file_outside_repo_is_writable: false,
file_in_repo_root_is_writable: true,
file_in_dot_git_dir_is_writable: false,
},
)
.await;
}
/// Under DangerFullAccess, all writes should be permitted anywhere on disk,
/// including inside the .git folder.
#[tokio::test]
async fn danger_full_access_allows_all_writes() {
let tmp = TempDir::new().expect("should be able to create temp dir");
let test_scenario = create_test_scenario(&tmp);
let policy = SandboxPolicy::DangerFullAccess;
test_scenario
.run_test(
&policy,
TestExpectations {
file_outside_repo_is_writable: true,
file_in_repo_root_is_writable: true,
file_in_dot_git_dir_is_writable: true,
},
)
.await;
}
/// Under ReadOnly, writes should not be permitted anywhere on disk.
#[tokio::test]
async fn read_only_forbids_all_writes() {
let tmp = TempDir::new().expect("should be able to create temp dir");
let test_scenario = create_test_scenario(&tmp);
let policy = SandboxPolicy::ReadOnly;
test_scenario
.run_test(
&policy,
TestExpectations {
file_outside_repo_is_writable: false,
file_in_repo_root_is_writable: false,
file_in_dot_git_dir_is_writable: false,
},
)
.await;
}
fn create_test_scenario(tmp: &TempDir) -> TestScenario {
let repo_parent = tmp.path().to_path_buf();
let repo_root = repo_parent.join("repo");
let dot_git_dir = repo_root.join(".git");
std::fs::create_dir(&repo_root).expect("should be able to create repo root");
std::fs::create_dir(&dot_git_dir).expect("should be able to create .git dir");
TestScenario {
file_outside_repo: repo_parent.join("outside.txt"),
repo_parent,
file_in_repo_root: repo_root.join("repo_file.txt"),
repo_root,
file_in_dot_git_dir: dot_git_dir.join("dot_git_file.txt"),
}
}
/// Note that `path` must be absolute.
async fn touch(path: &Path, policy: &SandboxPolicy) -> bool {
assert!(path.is_absolute(), "Path must be absolute: {path:?}");
let mut child = spawn_command_under_seatbelt(
vec![
"/usr/bin/touch".to_string(),
path.to_string_lossy().to_string(),
],
policy,
std::env::current_dir().expect("should be able to get current dir"),
StdioPolicy::RedirectForShellTool,
HashMap::new(),
)
.await
.expect("should be able to spawn command under seatbelt");
child
.wait()
.await
.expect("should be able to wait for child process")
.success()
}

View File

@@ -239,6 +239,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
cwd.to_string_lossy(),
);
}
EventMsg::ExecCommandOutputDelta(_) => {}
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
call_id,
stdout,

View File

@@ -36,7 +36,11 @@ pub(crate) fn apply_sandbox_policy_to_current_thread(
}
if !sandbox_policy.has_full_disk_write_access() {
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(cwd);
let writable_roots = sandbox_policy
.get_writable_roots_with_cwd(cwd)
.into_iter()
.map(|writable_root| writable_root.root)
.collect();
install_filesystem_landlock_rules_on_current_thread(writable_roots)?;
}

View File

@@ -49,6 +49,7 @@ async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
writable_roots: writable_roots.to_vec(),
network_access: false,
include_default_writable_roots: true,
};
let sandbox_program = env!("CARGO_BIN_EXE_codex-linux-sandbox");
let codex_linux_sandbox_exe = Some(PathBuf::from(sandbox_program));
@@ -59,6 +60,7 @@ async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
ctrl_c,
&sandbox_policy,
&codex_linux_sandbox_exe,
None,
)
.await
.unwrap();
@@ -149,6 +151,7 @@ async fn assert_network_blocked(cmd: &[&str]) {
ctrl_c,
&sandbox_policy,
&codex_linux_sandbox_exe,
None,
)
.await;

View File

@@ -686,6 +686,7 @@ LOGIN_SUCCESS_HTML = """<!DOCTYPE html>
justify-content: center;
position: relative;
background: white;
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
}
.inner-container {
@@ -703,6 +704,7 @@ LOGIN_SUCCESS_HTML = """<!DOCTYPE html>
align-items: center;
gap: 20px;
display: flex;
margin-top: 15vh;
}
.svg-wrapper {
position: relative;
@@ -710,9 +712,9 @@ LOGIN_SUCCESS_HTML = """<!DOCTYPE html>
.title {
text-align: center;
color: var(--text-primary, #0D0D0D);
font-size: 28px;
font-size: 32px;
font-weight: 400;
line-height: 36.40px;
line-height: 40px;
word-wrap: break-word;
}
.setup-box {
@@ -785,16 +787,26 @@ LOGIN_SUCCESS_HTML = """<!DOCTYPE html>
word-wrap: break-word;
text-decoration: none;
}
.logo {
display: flex;
align-items: center;
justify-content: center;
width: 4rem;
height: 4rem;
border-radius: 16px;
border: .5px solid rgba(0, 0, 0, 0.1);
box-shadow: rgba(0, 0, 0, 0.1) 0px 4px 16px 0px;
box-sizing: border-box;
background-color: rgb(255, 255, 255);
}
</style>
</head>
<body>
<div class="container">
<div class="inner-container">
<div class="content">
<div data-svg-wrapper class="svg-wrapper">
<svg width="56" height="56" viewBox="0 0 56 56" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M4.6665 28.0003C4.6665 15.1137 15.1132 4.66699 27.9998 4.66699C40.8865 4.66699 51.3332 15.1137 51.3332 28.0003C51.3332 40.887 40.8865 51.3337 27.9998 51.3337C15.1132 51.3337 4.6665 40.887 4.6665 28.0003ZM37.5093 18.5088C36.4554 17.7672 34.9999 18.0203 34.2583 19.0742L24.8508 32.4427L20.9764 28.1808C20.1095 27.2272 18.6338 27.1569 17.6803 28.0238C16.7267 28.8906 16.6565 30.3664 17.5233 31.3199L23.3566 37.7366C23.833 38.2606 24.5216 38.5399 25.2284 38.4958C25.9353 38.4517 26.5838 38.089 26.9914 37.5098L38.0747 21.7598C38.8163 20.7059 38.5632 19.2504 37.5093 18.5088Z" fill="var(--green-400, #04B84C)"/>
</svg>
<div class="logo">
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none" viewBox="0 0 32 32"><path stroke="#000" stroke-linecap="round" stroke-width="2.484" d="M22.356 19.797H17.17M9.662 12.29l1.979 3.576a.511.511 0 0 1-.005.504l-1.974 3.409M30.758 16c0 8.15-6.607 14.758-14.758 14.758-8.15 0-14.758-6.607-14.758-14.758C1.242 7.85 7.85 1.242 16 1.242c8.15 0 14.758 6.608 14.758 14.758Z"></path></svg>
</div>
<div class="title">Signed in to Codex CLI</div>
</div>

View File

@@ -258,6 +258,7 @@ async fn run_codex_tool_session_inner(
| EventMsg::McpToolCallBegin(_)
| EventMsg::McpToolCallEnd(_)
| EventMsg::ExecCommandBegin(_)
| EventMsg::ExecCommandOutputDelta(_)
| EventMsg::ExecCommandEnd(_)
| EventMsg::BackgroundEvent(_)
| EventMsg::PatchApplyBegin(_)

View File

@@ -0,0 +1,121 @@
use std::sync::Arc;
use crate::exec_approval::handle_exec_approval_request;
use crate::outgoing_message::OutgoingMessageSender;
use crate::outgoing_message::OutgoingNotificationMeta;
use crate::patch_approval::handle_patch_approval_request;
use codex_core::Codex;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecApprovalRequestEvent;
use mcp_types::RequestId;
use tracing::error;
pub async fn run_conversation_loop(
codex: Arc<Codex>,
outgoing: Arc<OutgoingMessageSender>,
request_id: RequestId,
) {
let request_id_str = match &request_id {
RequestId::String(s) => s.clone(),
RequestId::Integer(n) => n.to_string(),
};
// Stream events until the task needs to pause for user interaction or
// completes.
loop {
match codex.next_event().await {
Ok(event) => {
outgoing
.send_event_as_notification(
&event,
Some(OutgoingNotificationMeta::new(Some(request_id.clone()))),
)
.await;
match event.msg {
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
command,
cwd,
call_id,
reason: _,
}) => {
handle_exec_approval_request(
command,
cwd,
outgoing.clone(),
codex.clone(),
request_id.clone(),
request_id_str.clone(),
event.id.clone(),
call_id,
)
.await;
continue;
}
EventMsg::Error(_) => {
error!("Codex runtime error");
}
EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent {
call_id,
reason,
grant_root,
changes,
}) => {
handle_patch_approval_request(
call_id,
reason,
grant_root,
changes,
outgoing.clone(),
codex.clone(),
request_id.clone(),
request_id_str.clone(),
event.id.clone(),
)
.await;
continue;
}
EventMsg::TaskComplete(_) => {}
EventMsg::SessionConfigured(_) => {
tracing::error!("unexpected SessionConfigured event");
}
EventMsg::AgentMessageDelta(_) => {
// TODO: think how we want to support this in the MCP
}
EventMsg::AgentReasoningDelta(_) => {
// TODO: think how we want to support this in the MCP
}
EventMsg::AgentMessage(AgentMessageEvent { .. }) => {
// TODO: think how we want to support this in the MCP
}
EventMsg::TaskStarted
| EventMsg::TokenCount(_)
| EventMsg::AgentReasoning(_)
| EventMsg::McpToolCallBegin(_)
| EventMsg::McpToolCallEnd(_)
| EventMsg::ExecCommandBegin(_)
| EventMsg::ExecCommandEnd(_)
| EventMsg::BackgroundEvent(_)
| EventMsg::ExecCommandOutputDelta(_)
| EventMsg::PatchApplyBegin(_)
| EventMsg::PatchApplyEnd(_)
| EventMsg::GetHistoryEntryResponse(_)
| EventMsg::PlanUpdate(_)
| EventMsg::ShutdownComplete => {
// For now, we do not do anything extra for these
// events. Note that
// send(codex_event_to_notification(&event)) above has
// already dispatched these events as notifications,
// though we may want to do give different treatment to
// individual events in the future.
}
}
}
Err(e) => {
error!("Codex runtime error: {e}");
}
}
}
}

View File

@@ -17,6 +17,7 @@ use tracing_subscriber::EnvFilter;
mod codex_tool_config;
mod codex_tool_runner;
mod conversation_loop;
mod exec_approval;
mod json_to_toml;
pub mod mcp_protocol;

View File

@@ -172,15 +172,22 @@ pub enum ToolCallResponseResult {
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationCreateResult {
pub conversation_id: ConversationId,
pub model: String,
#[serde(untagged)]
pub enum ConversationCreateResult {
Ok {
conversation_id: ConversationId,
model: String,
},
Error {
message: String,
},
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationStreamResult {}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
// TODO: remove this status because we have is_error field in the response.
#[serde(tag = "status", rename_all = "camelCase")]
pub enum ConversationSendMessageResult {
Ok,
@@ -491,7 +498,7 @@ mod tests {
request_id: RequestId::Integer(1),
is_error: None,
result: Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult {
ConversationCreateResult::Ok {
conversation_id: ConversationId(uuid!("d0f6ecbe-84a2-41c1-b23d-b20473b25eab")),
model: "o3".into(),
},
@@ -515,6 +522,35 @@ mod tests {
assert_eq!(req_id, RequestId::Integer(1));
}
#[test]
fn response_error_conversation_create_full_schema() {
let env = ToolCallResponse {
request_id: RequestId::Integer(2),
is_error: Some(true),
result: Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult::Error {
message: "Failed to initialize session".into(),
},
)),
};
let req_id = env.request_id.clone();
let observed = to_val(&CallToolResult::from(env));
let expected = json!({
"content": [
{ "type": "text", "text": "{\"message\":\"Failed to initialize session\"}" }
],
"isError": true,
"structuredContent": {
"message": "Failed to initialize session"
}
});
assert_eq!(
observed, expected,
"error response (ConversationCreate) must match"
);
assert_eq!(req_id, RequestId::Integer(2));
}
#[test]
fn response_success_conversation_stream_empty_result_object() {
let env = ToolCallResponse {

View File

@@ -11,6 +11,7 @@ use crate::mcp_protocol::ToolCallRequestParams;
use crate::mcp_protocol::ToolCallResponse;
use crate::mcp_protocol::ToolCallResponseResult;
use crate::outgoing_message::OutgoingMessageSender;
use crate::tool_handlers::create_conversation::handle_create_conversation;
use crate::tool_handlers::send_message::handle_send_message;
use codex_core::Codex;
@@ -67,6 +68,10 @@ impl MessageProcessor {
self.session_map.clone()
}
pub(crate) fn outgoing(&self) -> Arc<OutgoingMessageSender> {
self.outgoing.clone()
}
pub(crate) fn running_session_ids(&self) -> Arc<Mutex<HashSet<Uuid>>> {
self.running_session_ids.clone()
}
@@ -349,6 +354,9 @@ impl MessageProcessor {
}
async fn handle_new_tool_calls(&self, request_id: RequestId, params: ToolCallRequestParams) {
match params {
ToolCallRequestParams::ConversationCreate(args) => {
handle_create_conversation(self, request_id, args).await;
}
ToolCallRequestParams::ConversationSendMessage(args) => {
handle_send_message(self, request_id, args).await;
}

View File

@@ -0,0 +1,160 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use codex_core::Codex;
use codex_core::codex_wrapper::init_codex;
use codex_core::config::Config as CodexConfig;
use codex_core::config::ConfigOverrides;
use codex_core::protocol::EventMsg;
use codex_core::protocol::SessionConfiguredEvent;
use mcp_types::RequestId;
use tokio::sync::Mutex;
use uuid::Uuid;
use crate::conversation_loop::run_conversation_loop;
use crate::json_to_toml::json_to_toml;
use crate::mcp_protocol::ConversationCreateArgs;
use crate::mcp_protocol::ConversationCreateResult;
use crate::mcp_protocol::ConversationId;
use crate::mcp_protocol::ToolCallResponseResult;
use crate::message_processor::MessageProcessor;
pub(crate) async fn handle_create_conversation(
message_processor: &MessageProcessor,
id: RequestId,
args: ConversationCreateArgs,
) {
// Build ConfigOverrides from args
let ConversationCreateArgs {
prompt: _, // not used here; creation only establishes the session
model,
cwd,
approval_policy,
sandbox,
config,
profile,
base_instructions,
} = args;
// Convert config overrides JSON into CLI-style TOML overrides
let cli_overrides: Vec<(String, toml::Value)> = match config {
Some(v) => match v.as_object() {
Some(map) => map
.into_iter()
.map(|(k, v)| (k.clone(), json_to_toml(v.clone())))
.collect(),
None => Vec::new(),
},
None => Vec::new(),
};
let overrides = ConfigOverrides {
model: Some(model.clone()),
cwd: Some(PathBuf::from(cwd)),
approval_policy,
sandbox_mode: sandbox,
model_provider: None,
config_profile: profile,
codex_linux_sandbox_exe: None,
base_instructions,
include_plan_tool: None,
};
let cfg: CodexConfig = match CodexConfig::load_with_cli_overrides(cli_overrides, overrides) {
Ok(cfg) => cfg,
Err(e) => {
message_processor
.send_response_with_optional_error(
id,
Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult::Error {
message: format!("Failed to load config: {e}"),
},
)),
Some(true),
)
.await;
return;
}
};
// Initialize Codex session
let codex_conversation = match init_codex(cfg).await {
Ok(conv) => conv,
Err(e) => {
message_processor
.send_response_with_optional_error(
id,
Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult::Error {
message: format!("Failed to initialize session: {e}"),
},
)),
Some(true),
)
.await;
return;
}
};
// Expect SessionConfigured; if not, return error.
let EventMsg::SessionConfigured(SessionConfiguredEvent { model, .. }) =
&codex_conversation.session_configured.msg
else {
message_processor
.send_response_with_optional_error(
id,
Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult::Error {
message: "Expected SessionConfigured event".to_string(),
},
)),
Some(true),
)
.await;
return;
};
let effective_model = model.clone();
let session_id = codex_conversation.session_id;
let codex_arc = Arc::new(codex_conversation.codex);
// Store session for future calls
insert_session(
session_id,
codex_arc.clone(),
message_processor.session_map(),
)
.await;
// Run the conversation loop in the background so this request can return immediately.
let outgoing = message_processor.outgoing();
let spawn_id = id.clone();
tokio::spawn(async move {
run_conversation_loop(codex_arc.clone(), outgoing, spawn_id).await;
});
// Reply with the new conversation id and effective model
message_processor
.send_response_with_optional_error(
id,
Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult::Ok {
conversation_id: ConversationId(session_id),
model: effective_model,
},
)),
Some(false),
)
.await;
}
async fn insert_session(
session_id: Uuid,
codex: Arc<Codex>,
session_map: Arc<Mutex<HashMap<Uuid, Arc<Codex>>>>,
) {
let mut guard = session_map.lock().await;
guard.insert(session_id, codex);
}

View File

@@ -1 +1,2 @@
pub(crate) mod create_conversation;
pub(crate) mod send_message;

View File

@@ -14,6 +14,7 @@ use assert_cmd::prelude::*;
use codex_core::protocol::InputItem;
use codex_mcp_server::CodexToolCallParam;
use codex_mcp_server::CodexToolCallReplyParam;
use codex_mcp_server::mcp_protocol::ConversationCreateArgs;
use codex_mcp_server::mcp_protocol::ConversationId;
use codex_mcp_server::mcp_protocol::ConversationSendMessageArgs;
use codex_mcp_server::mcp_protocol::ToolCallRequestParams;
@@ -200,6 +201,41 @@ impl McpProcess {
.await
}
pub async fn send_conversation_create_tool_call(
&mut self,
prompt: &str,
model: &str,
cwd: &str,
) -> anyhow::Result<i64> {
let params = ToolCallRequestParams::ConversationCreate(ConversationCreateArgs {
prompt: prompt.to_string(),
model: model.to_string(),
cwd: cwd.to_string(),
approval_policy: None,
sandbox: None,
config: None,
profile: None,
base_instructions: None,
});
self.send_request(
mcp_types::CallToolRequest::METHOD,
Some(serde_json::to_value(params)?),
)
.await
}
pub async fn send_conversation_create_with_args(
&mut self,
args: ConversationCreateArgs,
) -> anyhow::Result<i64> {
let params = ToolCallRequestParams::ConversationCreate(args);
self.send_request(
mcp_types::CallToolRequest::METHOD,
Some(serde_json::to_value(params)?),
)
.await
}
async fn send_request(
&mut self,
method: &str,

View File

@@ -0,0 +1,128 @@
#![allow(clippy::expect_used, clippy::unwrap_used)]
use std::path::Path;
use mcp_test_support::McpProcess;
use mcp_test_support::create_final_assistant_message_sse_response;
use mcp_test_support::create_mock_chat_completions_server;
use mcp_types::JSONRPCResponse;
use mcp_types::RequestId;
use pretty_assertions::assert_eq;
use serde_json::json;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_conversation_create_and_send_message_ok() {
// Mock server we won't strictly rely on it, but provide one to satisfy any model wiring.
let responses = vec![
create_final_assistant_message_sse_response("Done").expect("build mock assistant message"),
];
let server = create_mock_chat_completions_server(responses).await;
// Temporary Codex home with config pointing at the mock server.
let codex_home = TempDir::new().expect("create temp dir");
create_config_toml(codex_home.path(), &server.uri()).expect("write config.toml");
// Start MCP server process and initialize.
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
// Create a conversation via the new tool.
let req_id = mcp
.send_conversation_create_tool_call("", "o3", "/repo")
.await
.expect("send conversationCreate");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(req_id)),
)
.await
.expect("create response timeout")
.expect("create response error");
// Structured content must include status=ok, a UUID conversation_id and the model we passed.
let sc = &resp.result["structuredContent"];
let conv_id = sc["conversation_id"].as_str().expect("uuid string");
assert!(!conv_id.is_empty());
assert_eq!(sc["model"], json!("o3"));
// Now send a message to the created conversation and expect an OK result.
let send_id = mcp
.send_user_message_tool_call("Hello", conv_id)
.await
.expect("send message");
let send_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(send_id)),
)
.await
.expect("send response timeout")
.expect("send response error");
assert_eq!(
send_resp.result["structuredContent"],
json!({ "status": "ok" })
);
// avoid race condition by waiting for the mock server to receive the chat.completions request
let deadline = std::time::Instant::now() + DEFAULT_READ_TIMEOUT;
loop {
let requests = server.received_requests().await.unwrap_or_default();
if !requests.is_empty() {
break;
}
if std::time::Instant::now() >= deadline {
panic!("mock server did not receive the chat.completions request in time");
}
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
}
// Verify the outbound request body matches expectations for Chat Completions.
let request = &server.received_requests().await.unwrap()[0];
let body = request
.body_json::<serde_json::Value>()
.expect("parse request body as JSON");
assert_eq!(body["model"], json!("o3"));
assert!(body["stream"].as_bool().unwrap_or(false));
let messages = body["messages"]
.as_array()
.expect("messages should be array");
let last = messages.last().expect("at least one message");
assert_eq!(last["role"], json!("user"));
assert_eq!(last["content"], json!("Hello"));
drop(server);
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "chat"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}

View File

@@ -17,6 +17,7 @@ workspace = true
[dependencies]
anyhow = "1"
base64 = "0.22.1"
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4", features = ["derive"] }
codex-ansi-escape = { path = "../ansi-escape" }
codex-arg0 = { path = "../arg0" }
@@ -41,6 +42,8 @@ ratatui = { version = "0.29.0", features = [
] }
ratatui-image = "8.0.0"
regex-lite = "0.1"
reqwest = { version = "0.12", features = ["json"] }
serde = { version = "1", features = ["derive"] }
serde_json = { version = "1", features = ["preserve_order"] }
shlex = "1.3.0"
strum = "0.27.2"

View File

@@ -382,6 +382,7 @@ impl ChatWidget<'_> {
);
self.add_to_history(HistoryCell::new_active_exec_command(command));
}
EventMsg::ExecCommandOutputDelta(_) => {}
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
call_id: _,
auto_approved,

View File

@@ -545,24 +545,17 @@ impl HistoryCell {
} else {
for (idx, PlanItemArg { step, status }) in plan.into_iter().enumerate() {
let num = idx + 1;
let (icon, style): (&str, Style) = match status {
StepStatus::Completed => ("", Style::default().fg(Color::Green)),
StepStatus::InProgress => (
"",
Style::default()
.fg(Color::Yellow)
.add_modifier(Modifier::BOLD),
),
StepStatus::Pending => ("", Style::default().fg(Color::Gray)),
let icon_span: Span = match status {
StepStatus::Completed => Span::from("").fg(Color::Green),
StepStatus::InProgress => Span::from("").fg(Color::Yellow).bold(),
StepStatus::Pending => Span::from("").fg(Color::Gray),
};
let prefix = vec![
Span::raw(format!("{num:>2}. [")),
Span::styled(icon.to_string(), style),
Span::raw("] "),
];
let mut spans = prefix;
spans.push(Span::raw(step));
lines.push(Line::from(spans));
lines.push(Line::from(vec![
format!("{num:>2}. [").into(),
icon_span,
"] ".into(),
step.into(),
]));
}
}

View File

@@ -216,18 +216,18 @@ where
{
let mut fg = Color::Reset;
let mut bg = Color::Reset;
let mut modifier = Modifier::empty();
let mut last_modifier = Modifier::empty();
for span in content {
let mut next_modifier = modifier;
next_modifier.insert(span.style.add_modifier);
next_modifier.remove(span.style.sub_modifier);
if next_modifier != modifier {
let mut modifier = Modifier::empty();
modifier.insert(span.style.add_modifier);
modifier.remove(span.style.sub_modifier);
if modifier != last_modifier {
let diff = ModifierDiff {
from: modifier,
to: next_modifier,
from: last_modifier,
to: modifier,
};
diff.queue(&mut writer)?;
modifier = next_modifier;
last_modifier = modifier;
}
let next_fg = span.style.fg.unwrap_or(Color::Reset);
let next_bg = span.style.bg.unwrap_or(Color::Reset);
@@ -250,3 +250,37 @@ where
SetAttribute(crossterm::style::Attribute::Reset),
)
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
#[test]
fn writes_bold_then_regular_spans() {
use ratatui::style::Stylize;
let spans = ["A".bold(), "B".into()];
let mut actual: Vec<u8> = Vec::new();
write_spans(&mut actual, spans.iter()).unwrap();
let mut expected: Vec<u8> = Vec::new();
queue!(
expected,
SetAttribute(crossterm::style::Attribute::Bold),
Print("A"),
SetAttribute(crossterm::style::Attribute::NormalIntensity),
Print("B"),
SetForegroundColor(CColor::Reset),
SetBackgroundColor(CColor::Reset),
SetAttribute(crossterm::style::Attribute::Reset),
)
.unwrap();
assert_eq!(
String::from_utf8(actual).unwrap(),
String::from_utf8(expected).unwrap()
);
}
}

View File

@@ -41,6 +41,11 @@ mod text_formatting;
mod tui;
mod user_approval_widget;
#[cfg(not(debug_assertions))]
mod updates;
#[cfg(not(debug_assertions))]
use color_eyre::owo_colors::OwoColorize;
pub use cli::Cli;
pub async fn run_main(
@@ -139,6 +144,38 @@ pub async fn run_main(
.with(tui_layer)
.try_init();
#[allow(clippy::print_stderr)]
#[cfg(not(debug_assertions))]
if let Some(latest_version) = updates::get_upgrade_version(&config) {
let current_version = env!("CARGO_PKG_VERSION");
let exe = std::env::current_exe()?;
let managed_by_npm = std::env::var_os("CODEX_MANAGED_BY_NPM").is_some();
eprintln!(
"{} {current_version} -> {latest_version}.",
"✨⬆️ Update available!".bold().cyan()
);
if managed_by_npm {
let npm_cmd = "npm install -g @openai/codex@latest";
eprintln!("Run {} to update.", npm_cmd.cyan().on_black());
} else if cfg!(target_os = "macos")
&& (exe.starts_with("/opt/homebrew") || exe.starts_with("/usr/local"))
{
let brew_cmd = "brew upgrade codex";
eprintln!("Run {} to update.", brew_cmd.cyan().on_black());
} else {
eprintln!(
"See {} for the latest releases and installation options.",
"https://github.com/openai/codex/releases/latest"
.cyan()
.on_black()
);
}
eprintln!("");
}
let show_login_screen = should_show_login_screen(&config);
if show_login_screen {
std::io::stdout()

137
codex-rs/tui/src/updates.rs Normal file
View File

@@ -0,0 +1,137 @@
#![cfg(any(not(debug_assertions), test))]
use chrono::DateTime;
use chrono::Duration;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use std::path::Path;
use std::path::PathBuf;
use codex_core::config::Config;
pub fn get_upgrade_version(config: &Config) -> Option<String> {
let version_file = version_filepath(config);
let info = read_version_info(&version_file).ok();
if match &info {
None => true,
Some(info) => info.last_checked_at < Utc::now() - Duration::hours(20),
} {
// Refresh the cached latest version in the background so TUI startup
// isnt blocked by a network call. The UI reads the previously cached
// value (if any) for this run; the next run shows the banner if needed.
tokio::spawn(async move {
check_for_update(&version_file)
.await
.inspect_err(|e| tracing::error!("Failed to update version: {e}"))
});
}
info.and_then(|info| {
let current_version = env!("CARGO_PKG_VERSION");
if is_newer(&info.latest_version, current_version).unwrap_or(false) {
Some(info.latest_version)
} else {
None
}
})
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct VersionInfo {
latest_version: String,
// ISO-8601 timestamp (RFC3339)
last_checked_at: DateTime<Utc>,
}
#[derive(Deserialize, Debug, Clone)]
struct ReleaseInfo {
tag_name: String,
}
const VERSION_FILENAME: &str = "version.json";
const LATEST_RELEASE_URL: &str = "https://api.github.com/repos/openai/codex/releases/latest";
fn version_filepath(config: &Config) -> PathBuf {
config.codex_home.join(VERSION_FILENAME)
}
fn read_version_info(version_file: &Path) -> anyhow::Result<VersionInfo> {
let contents = std::fs::read_to_string(version_file)?;
Ok(serde_json::from_str(&contents)?)
}
async fn check_for_update(version_file: &Path) -> anyhow::Result<()> {
let ReleaseInfo {
tag_name: latest_tag_name,
} = reqwest::Client::new()
.get(LATEST_RELEASE_URL)
.header(
"User-Agent",
format!(
"codex/{} (+https://github.com/openai/codex)",
env!("CARGO_PKG_VERSION")
),
)
.send()
.await?
.error_for_status()?
.json::<ReleaseInfo>()
.await?;
let info = VersionInfo {
latest_version: latest_tag_name
.strip_prefix("rust-v")
.ok_or_else(|| anyhow::anyhow!("Failed to parse latest tag name '{latest_tag_name}'"))?
.into(),
last_checked_at: Utc::now(),
};
let json_line = format!("{}\n", serde_json::to_string(&info)?);
if let Some(parent) = version_file.parent() {
tokio::fs::create_dir_all(parent).await?;
}
tokio::fs::write(version_file, json_line).await?;
Ok(())
}
fn is_newer(latest: &str, current: &str) -> Option<bool> {
match (parse_version(latest), parse_version(current)) {
(Some(l), Some(c)) => Some(l > c),
_ => None,
}
}
fn parse_version(v: &str) -> Option<(u64, u64, u64)> {
let mut iter = v.trim().split('.');
let maj = iter.next()?.parse::<u64>().ok()?;
let min = iter.next()?.parse::<u64>().ok()?;
let pat = iter.next()?.parse::<u64>().ok()?;
Some((maj, min, pat))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn prerelease_version_is_not_considered_newer() {
assert_eq!(is_newer("0.11.0-beta.1", "0.11.0"), None);
assert_eq!(is_newer("1.0.0-rc.1", "1.0.0"), None);
}
#[test]
fn plain_semver_comparisons_work() {
assert_eq!(is_newer("0.11.1", "0.11.0"), Some(true));
assert_eq!(is_newer("0.11.0", "0.11.1"), Some(false));
assert_eq!(is_newer("1.0.0", "0.9.9"), Some(true));
assert_eq!(is_newer("0.9.9", "1.0.0"), Some(false));
}
#[test]
fn whitespace_is_ignored() {
assert_eq!(parse_version(" 1.2.3 \n"), Some((1, 2, 3)));
assert_eq!(is_newer(" 1.2.3 ", "1.2.2"), Some(true));
}
}