Compare commits

...

7 Commits

Author SHA1 Message Date
starr-openai
b79227baf3 Thread executor attachment through tool handlers 2026-04-01 20:46:18 -07:00
starr-openai
adbec5ab37 Inject attached executor into tool handlers 2026-04-01 19:13:25 -07:00
starr-openai
b190f2ae57 Polish handler declarations 2026-04-01 18:56:23 -07:00
starr-openai
263c04fec3 Polish static-turn executor wiring 2026-04-01 18:52:54 -07:00
starr-openai
d622a4b061 Simplify static-turn executor wiring 2026-04-01 18:49:14 -07:00
starr-openai
f984e97d6e codex: pass attached executor capability to tools 2026-04-01 18:25:11 -07:00
starr-openai
bd9c85eedf codex: preserve no-executor mode and gate js_repl 2026-04-01 17:35:14 -07:00
24 changed files with 908 additions and 239 deletions

View File

@@ -941,6 +941,7 @@ impl TurnContext {
sandbox_policy: self.sandbox_policy.get(),
windows_sandbox_level: self.windows_sandbox_level,
})
.with_executor_attachment(self.environment.executor_attachment())
.with_unified_exec_shell_mode(self.tools_config.unified_exec_shell_mode.clone())
.with_web_search_config(self.tools_config.web_search_config.clone())
.with_allow_login_shell(self.tools_config.allow_login_shell)
@@ -1395,6 +1396,7 @@ impl Session {
sandbox_policy: session_configuration.sandbox_policy.get(),
windows_sandbox_level: session_configuration.windows_sandbox_level,
})
.with_executor_attachment(environment.executor_attachment())
.with_unified_exec_shell_mode_for_session(
user_shell,
shell_zsh_path,
@@ -5472,6 +5474,7 @@ async fn spawn_review_thread(
sess.services.shell_zsh_path.as_ref(),
sess.services.main_execve_wrapper_exe.as_ref(),
)
.with_executor_attachment(parent_turn_context.environment.executor_attachment())
.with_web_search_config(/*web_search_config*/ None)
.with_allow_login_shell(config.permissions.allow_login_shell)
.with_agent_roles(config.agent_roles.clone());

View File

@@ -77,8 +77,8 @@ pub(crate) async fn run_codex_thread_interactive(
config,
auth_manager,
models_manager,
environment_manager: Arc::new(EnvironmentManager::new(
parent_ctx.environment.exec_server_url().map(str::to_owned),
environment_manager: Arc::new(EnvironmentManager::from_environment(
parent_ctx.environment.as_ref(),
)),
skills_manager: Arc::clone(&parent_session.services.skills_manager),
plugins_manager: Arc::clone(&parent_session.services.plugins_manager),

View File

@@ -5259,7 +5259,12 @@ async fn unified_exec_rejects_escalated_permissions_when_policy_not_on_request()
let turn_context = Arc::new(turn_context_raw);
let tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
let handler = UnifiedExecHandler;
let handler = UnifiedExecHandler::new(
turn_context
.environment
.executor_attachment()
.expect("test environment has an executor attachment"),
);
let resp = handler
.handle(ToolInvocation {
session: Arc::clone(&session),

View File

@@ -207,7 +207,12 @@ async fn guardian_allows_unified_exec_additional_permissions_requests_past_polic
let turn_context = Arc::new(turn_context_raw);
let tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
let handler = UnifiedExecHandler;
let handler = UnifiedExecHandler::new(
turn_context
.environment
.executor_attachment()
.expect("test environment has an executor attachment"),
);
let resp = handler
.handle(ToolInvocation {
session: Arc::clone(&session),

View File

@@ -31,6 +31,7 @@ use crate::tools::spec::JsonSchema;
use async_trait::async_trait;
use codex_apply_patch::ApplyPatchAction;
use codex_apply_patch::ApplyPatchFileChange;
use codex_exec_server::ExecutorAttachment;
use codex_protocol::models::FileSystemPermissions;
use codex_protocol::models::PermissionProfile;
use codex_sandboxing::policy_transforms::effective_file_system_sandbox_policy;
@@ -40,7 +41,17 @@ use codex_utils_absolute_path::AbsolutePathBuf;
use std::collections::BTreeSet;
use std::sync::Arc;
pub struct ApplyPatchHandler;
pub struct ApplyPatchHandler {
executor_attachment: Arc<ExecutorAttachment>,
}
impl ApplyPatchHandler {
pub fn new(executor_attachment: Arc<ExecutorAttachment>) -> Self {
Self {
executor_attachment,
}
}
}
const APPLY_PATCH_LARK_GRAMMAR: &str = include_str!("tool_apply_patch.lark");
@@ -213,7 +224,8 @@ impl ToolHandler for ApplyPatchHandler {
};
let mut orchestrator = ToolOrchestrator::new();
let mut runtime = ApplyPatchRuntime::new();
let mut runtime =
ApplyPatchRuntime::new(Arc::clone(&self.executor_attachment));
let tool_ctx = ToolCtx {
session: session.clone(),
turn: turn.clone(),
@@ -292,6 +304,12 @@ pub(crate) async fn intercept_apply_patch(
Ok(Some(FunctionToolOutput::from_text(content, Some(true))))
}
InternalApplyPatchInvocation::DelegateToExec(apply) => {
let Some(executor_attachment) = turn.environment.executor_attachment() else {
return Err(FunctionCallError::RespondToModel(
"apply_patch interception requires an executor attachment".to_string(),
));
};
let changes = convert_apply_patch_to_protocol(&apply.action);
let emitter = ToolEmitter::apply_patch(changes.clone(), apply.auto_approved);
let event_ctx = ToolEventCtx::new(
@@ -315,7 +333,7 @@ pub(crate) async fn intercept_apply_patch(
};
let mut orchestrator = ToolOrchestrator::new();
let mut runtime = ApplyPatchRuntime::new();
let mut runtime = ApplyPatchRuntime::new(executor_attachment);
let tool_ctx = ToolCtx {
session: session.clone(),
turn: turn.clone(),

View File

@@ -3,8 +3,12 @@ use std::ffi::OsStr;
use std::fs::FileType;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use codex_exec_server::ExecutorAttachment;
use codex_exec_server::ReadDirectoryEntry;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_string::take_bytes_at_char_boundary;
use serde::Deserialize;
use tokio::fs;
@@ -17,7 +21,17 @@ use crate::tools::handlers::parse_arguments;
use crate::tools::registry::ToolHandler;
use crate::tools::registry::ToolKind;
pub struct ListDirHandler;
pub struct ListDirHandler {
executor_attachment: Arc<ExecutorAttachment>,
}
impl ListDirHandler {
pub fn new(executor_attachment: Arc<ExecutorAttachment>) -> Self {
Self {
executor_attachment,
}
}
}
const MAX_ENTRY_LENGTH: usize = 500;
const INDENTATION_SPACES: usize = 2;
@@ -99,7 +113,8 @@ impl ToolHandler for ListDirHandler {
));
}
let entries = list_dir_slice(&path, offset, limit, depth).await?;
let entries =
list_dir_slice(&self.executor_attachment, &path, offset, limit, depth).await?;
let mut output = Vec::with_capacity(entries.len() + 1);
output.push(format!("Absolute path: {}", path.display()));
output.extend(entries);
@@ -108,13 +123,21 @@ impl ToolHandler for ListDirHandler {
}
async fn list_dir_slice(
executor_attachment: &ExecutorAttachment,
path: &Path,
offset: usize,
limit: usize,
depth: usize,
) -> Result<Vec<String>, FunctionCallError> {
let mut entries = Vec::new();
collect_entries(path, Path::new(""), depth, &mut entries).await?;
collect_entries(
executor_attachment,
path,
Path::new(""),
depth,
&mut entries,
)
.await?;
if entries.is_empty() {
return Ok(Vec::new());
@@ -147,6 +170,7 @@ async fn list_dir_slice(
}
async fn collect_entries(
executor_attachment: &ExecutorAttachment,
dir_path: &Path,
relative_prefix: &Path,
depth: usize,
@@ -156,41 +180,18 @@ async fn collect_entries(
queue.push_back((dir_path.to_path_buf(), relative_prefix.to_path_buf(), depth));
while let Some((current_dir, prefix, remaining_depth)) = queue.pop_front() {
let mut read_dir = fs::read_dir(&current_dir).await.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to read directory: {err}"))
})?;
let mut dir_entries = Vec::new();
while let Some(entry) = read_dir.next_entry().await.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to read directory: {err}"))
})? {
let file_type = entry.file_type().await.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to inspect entry: {err}"))
})?;
let file_name = entry.file_name();
let relative_path = if prefix.as_os_str().is_empty() {
PathBuf::from(&file_name)
} else {
prefix.join(&file_name)
};
let display_name = format_entry_component(&file_name);
let display_depth = prefix.components().count();
let sort_key = format_entry_name(&relative_path);
let kind = DirEntryKind::from(&file_type);
dir_entries.push((
entry.path(),
relative_path,
kind,
DirEntry {
name: sort_key,
display_name,
depth: display_depth,
kind,
},
));
if executor_attachment.exec_server_url().is_some() {
collect_remote_dir_entries(
executor_attachment,
&current_dir,
&prefix,
&mut dir_entries,
)
.await?;
} else {
collect_local_dir_entries(&current_dir, &prefix, &mut dir_entries).await?;
}
dir_entries.sort_unstable_by(|a, b| a.3.name.cmp(&b.3.name));
@@ -206,6 +207,96 @@ async fn collect_entries(
Ok(())
}
async fn collect_local_dir_entries(
current_dir: &Path,
prefix: &Path,
dir_entries: &mut Vec<(PathBuf, PathBuf, DirEntryKind, DirEntry)>,
) -> Result<(), FunctionCallError> {
let mut read_dir = fs::read_dir(current_dir).await.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to read directory: {err}"))
})?;
while let Some(entry) = read_dir.next_entry().await.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to read directory: {err}"))
})? {
let file_type = entry.file_type().await.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to inspect entry: {err}"))
})?;
let file_name = entry.file_name();
let relative_path = relative_child_path(prefix, &file_name);
let kind = DirEntryKind::from(&file_type);
dir_entries.push((
entry.path(),
relative_path.clone(),
kind,
to_dir_entry(relative_path, &file_name, kind),
));
}
Ok(())
}
async fn collect_remote_dir_entries(
executor_attachment: &ExecutorAttachment,
current_dir: &Path,
prefix: &Path,
dir_entries: &mut Vec<(PathBuf, PathBuf, DirEntryKind, DirEntry)>,
) -> Result<(), FunctionCallError> {
let current_dir = AbsolutePathBuf::from_absolute_path(current_dir).map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to inspect directory path: {err}"))
})?;
let entries = executor_attachment
.get_filesystem()
.read_directory(&current_dir)
.await
.map_err(|err| {
FunctionCallError::RespondToModel(format!("failed to read directory: {err}"))
})?;
for entry in entries {
let file_name = OsStr::new(entry.file_name.as_str());
let relative_path = relative_child_path(prefix, file_name);
let kind = DirEntryKind::from(&entry);
dir_entries.push((
current_dir
.join(entry.file_name.as_str())
.map_err(|err| {
FunctionCallError::RespondToModel(format!(
"failed to inspect entry path: {err}"
))
})?
.into_path_buf(),
relative_path.clone(),
kind,
to_dir_entry(relative_path, file_name, kind),
));
}
Ok(())
}
fn relative_child_path(prefix: &Path, file_name: &OsStr) -> PathBuf {
if prefix.as_os_str().is_empty() {
PathBuf::from(file_name)
} else {
prefix.join(file_name)
}
}
fn to_dir_entry(relative_path: PathBuf, file_name: &OsStr, kind: DirEntryKind) -> DirEntry {
let display_name = format_entry_component(file_name);
let display_depth = relative_path
.parent()
.map_or(0, |prefix| prefix.components().count());
let sort_key = format_entry_name(&relative_path);
DirEntry {
name: sort_key,
display_name,
depth: display_depth,
kind,
}
}
fn format_entry_name(path: &Path) -> String {
let normalized = path.to_string_lossy().replace("\\", "/");
if normalized.len() > MAX_ENTRY_LENGTH {
@@ -266,6 +357,20 @@ impl From<&FileType> for DirEntryKind {
}
}
impl From<&ReadDirectoryEntry> for DirEntryKind {
fn from(entry: &ReadDirectoryEntry) -> Self {
// The remote directory API currently exposes directory/file booleans, but not a symlink
// bit, so remote listings preserve entries but cannot render the local "@" suffix.
if entry.is_directory {
DirEntryKind::Directory
} else if entry.is_file {
DirEntryKind::File
} else {
DirEntryKind::Other
}
}
}
#[cfg(test)]
#[path = "list_dir_tests.rs"]
mod tests;

View File

@@ -1,7 +1,16 @@
use super::*;
use codex_exec_server::Environment;
use codex_exec_server::ExecutorAttachment;
use pretty_assertions::assert_eq;
use std::sync::Arc;
use tempfile::tempdir;
fn test_executor_attachment() -> Arc<ExecutorAttachment> {
Environment::default()
.executor_attachment()
.expect("default environment should have an executor attachment")
}
#[tokio::test]
async fn lists_directory_entries() {
let temp = tempdir().expect("create tempdir");
@@ -34,8 +43,13 @@ async fn lists_directory_entries() {
symlink(dir_path.join("entry.txt"), &link_path).expect("create symlink");
}
let executor_attachment = test_executor_attachment();
let entries = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 20, /*depth*/ 3,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 20,
/*depth*/ 3,
)
.await
.expect("list directory");
@@ -70,8 +84,13 @@ async fn errors_when_offset_exceeds_entries() {
.await
.expect("create sub dir");
let executor_attachment = test_executor_attachment();
let err = list_dir_slice(
dir_path, /*offset*/ 10, /*limit*/ 1, /*depth*/ 2,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 10,
/*limit*/ 1,
/*depth*/ 2,
)
.await
.expect_err("offset exceeds entries");
@@ -99,8 +118,13 @@ async fn respects_depth_parameter() {
.await
.expect("write deeper");
let executor_attachment = test_executor_attachment();
let entries_depth_one = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 10, /*depth*/ 1,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 10,
/*depth*/ 1,
)
.await
.expect("list depth 1");
@@ -110,7 +134,11 @@ async fn respects_depth_parameter() {
);
let entries_depth_two = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 20, /*depth*/ 2,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 20,
/*depth*/ 2,
)
.await
.expect("list depth 2");
@@ -125,7 +153,11 @@ async fn respects_depth_parameter() {
);
let entries_depth_three = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 30, /*depth*/ 3,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 30,
/*depth*/ 3,
)
.await
.expect("list depth 3");
@@ -158,8 +190,13 @@ async fn paginates_in_sorted_order() {
.await
.expect("write b child");
let executor_attachment = test_executor_attachment();
let first_page = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 2, /*depth*/ 2,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 2,
/*depth*/ 2,
)
.await
.expect("list page one");
@@ -173,7 +210,11 @@ async fn paginates_in_sorted_order() {
);
let second_page = list_dir_slice(
dir_path, /*offset*/ 3, /*limit*/ 2, /*depth*/ 2,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 3,
/*limit*/ 2,
/*depth*/ 2,
)
.await
.expect("list page two");
@@ -197,9 +238,16 @@ async fn handles_large_limit_without_overflow() {
.await
.expect("write gamma");
let entries = list_dir_slice(dir_path, /*offset*/ 2, usize::MAX, /*depth*/ 1)
.await
.expect("list without overflow");
let executor_attachment = test_executor_attachment();
let entries = list_dir_slice(
executor_attachment.as_ref(),
dir_path,
/*offset*/ 2,
usize::MAX,
/*depth*/ 1,
)
.await
.expect("list without overflow");
assert_eq!(
entries,
vec!["beta.txt".to_string(), "gamma.txt".to_string(),]
@@ -218,8 +266,13 @@ async fn indicates_truncated_results() {
.expect("write file");
}
let executor_attachment = test_executor_attachment();
let entries = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 25, /*depth*/ 1,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 25,
/*depth*/ 1,
)
.await
.expect("list directory");
@@ -242,8 +295,13 @@ async fn truncation_respects_sorted_order() -> anyhow::Result<()> {
tokio::fs::write(nested.join("child.txt"), b"child").await?;
tokio::fs::write(deeper.join("grandchild.txt"), b"deep").await?;
let executor_attachment = test_executor_attachment();
let entries_depth_three = list_dir_slice(
dir_path, /*offset*/ 1, /*limit*/ 3, /*depth*/ 3,
executor_attachment.as_ref(),
dir_path,
/*offset*/ 1,
/*limit*/ 3,
/*depth*/ 3,
)
.await?;
assert_eq!(

View File

@@ -27,6 +27,7 @@ use crate::unified_exec::UnifiedExecContext;
use crate::unified_exec::UnifiedExecProcessManager;
use crate::unified_exec::WriteStdinRequest;
use async_trait::async_trait;
use codex_exec_server::ExecutorAttachment;
use codex_features::Feature;
use codex_otel::SessionTelemetry;
use codex_otel::metrics::names::TOOL_CALL_UNIFIED_EXEC_METRIC;
@@ -35,7 +36,17 @@ use serde::Deserialize;
use std::path::PathBuf;
use std::sync::Arc;
pub struct UnifiedExecHandler;
pub struct UnifiedExecHandler {
executor_attachment: Arc<ExecutorAttachment>,
}
impl UnifiedExecHandler {
pub fn new(executor_attachment: Arc<ExecutorAttachment>) -> Self {
Self {
executor_attachment,
}
}
}
#[derive(Debug, Deserialize)]
pub(crate) struct ExecCommandArgs {
@@ -179,7 +190,12 @@ impl ToolHandler for UnifiedExecHandler {
};
let manager: &UnifiedExecProcessManager = &session.services.unified_exec_manager;
let context = UnifiedExecContext::new(session.clone(), turn.clone(), call_id.clone());
let context = UnifiedExecContext::new(
session.clone(),
turn.clone(),
call_id.clone(),
Arc::clone(&self.executor_attachment),
);
let response = match tool_name.as_str() {
"exec_command" => {

View File

@@ -17,8 +17,17 @@ use crate::tools::context::ToolInvocation;
use crate::tools::context::ToolPayload;
use crate::tools::registry::ToolHandler;
use crate::turn_diff_tracker::TurnDiffTracker;
use codex_exec_server::Environment;
use tokio::sync::Mutex;
fn unified_exec_handler() -> UnifiedExecHandler {
UnifiedExecHandler::new(
Environment::default()
.executor_attachment()
.expect("default environment has an executor attachment"),
)
}
#[test]
fn test_get_command_uses_default_shell_when_unspecified() -> anyhow::Result<()> {
let json = r#"{"cmd": "echo hello"}"#;
@@ -202,7 +211,7 @@ async fn exec_command_pre_tool_use_payload_uses_raw_command() {
arguments: serde_json::json!({ "cmd": "printf exec command" }).to_string(),
};
let (session, turn) = make_session_and_context().await;
let handler = UnifiedExecHandler;
let handler = unified_exec_handler();
assert_eq!(
handler.pre_tool_use_payload(&ToolInvocation {
@@ -226,7 +235,7 @@ async fn exec_command_pre_tool_use_payload_skips_write_stdin() {
arguments: serde_json::json!({ "chars": "echo hi" }).to_string(),
};
let (session, turn) = make_session_and_context().await;
let handler = UnifiedExecHandler;
let handler = unified_exec_handler();
assert_eq!(
handler.pre_tool_use_payload(&ToolInvocation {
@@ -263,8 +272,10 @@ fn exec_command_post_tool_use_payload_uses_output_for_noninteractive_one_shot_co
]),
};
let handler = unified_exec_handler();
assert_eq!(
UnifiedExecHandler.post_tool_use_payload("call-43", &payload, &output),
handler.post_tool_use_payload("call-43", &payload, &output),
Some(crate::tools::registry::PostToolUsePayload {
command: "echo three".to_string(),
tool_response: serde_json::json!("three"),
@@ -293,8 +304,10 @@ fn exec_command_post_tool_use_payload_skips_interactive_exec() {
]),
};
let handler = unified_exec_handler();
assert_eq!(
UnifiedExecHandler.post_tool_use_payload("call-44", &payload, &output),
handler.post_tool_use_payload("call-44", &payload, &output),
None
);
}
@@ -320,8 +333,10 @@ fn exec_command_post_tool_use_payload_skips_running_sessions() {
]),
};
let handler = unified_exec_handler();
assert_eq!(
UnifiedExecHandler.post_tool_use_payload("call-45", &payload, &output),
handler.post_tool_use_payload("call-45", &payload, &output),
None
);
}

View File

@@ -9,6 +9,7 @@ use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_image::PromptImageMode;
use codex_utils_image::load_for_prompt_bytes;
use serde::Deserialize;
use std::sync::Arc;
use crate::function_tool::FunctionCallError;
use crate::original_image_detail::can_request_original_image_detail;
@@ -20,8 +21,19 @@ use crate::tools::context::ToolPayload;
use crate::tools::handlers::parse_arguments;
use crate::tools::registry::ToolHandler;
use crate::tools::registry::ToolKind;
use codex_exec_server::ExecutorAttachment;
pub struct ViewImageHandler;
pub struct ViewImageHandler {
executor_attachment: Arc<ExecutorAttachment>,
}
impl ViewImageHandler {
pub fn new(executor_attachment: Arc<ExecutorAttachment>) -> Self {
Self {
executor_attachment,
}
}
}
const VIEW_IMAGE_UNSUPPORTED_MESSAGE: &str =
"view_image is not allowed because you do not support image inputs";
@@ -94,8 +106,8 @@ impl ToolHandler for ViewImageHandler {
FunctionCallError::RespondToModel(format!("unable to resolve image path: {error}"))
})?;
let metadata = turn
.environment
let metadata = self
.executor_attachment
.get_filesystem()
.get_metadata(&abs_path)
.await
@@ -112,8 +124,8 @@ impl ToolHandler for ViewImageHandler {
abs_path.display()
)));
}
let file_bytes = turn
.environment
let file_bytes = self
.executor_attachment
.get_filesystem()
.read_file(&abs_path)
.await

View File

@@ -4,12 +4,17 @@
//! decision to avoid re-prompting, builds the self-invocation command for
//! `codex --codex-run-as-apply-patch`, and runs under the current
//! `SandboxAttempt` with a minimal environment.
use crate::error::CodexErr;
use crate::error::SandboxErr;
use crate::exec::ExecCapturePolicy;
use crate::exec::ExecToolCallOutput;
use crate::exec::StreamOutput;
use crate::exec::is_likely_sandbox_denied;
use crate::guardian::GuardianApprovalRequest;
use crate::guardian::review_approval_request;
use crate::guardian::routes_approval_to_guardian;
use crate::sandboxing::ExecOptions;
use crate::sandboxing::ExecRequest;
use crate::sandboxing::execute_env;
use crate::tools::sandboxing::Approvable;
use crate::tools::sandboxing::ApprovalCtx;
@@ -22,6 +27,10 @@ use crate::tools::sandboxing::ToolRuntime;
use crate::tools::sandboxing::with_cached_approval;
use codex_apply_patch::ApplyPatchAction;
use codex_apply_patch::CODEX_CORE_APPLY_PATCH_ARG1;
use codex_exec_server::ExecOutputStream as ExecutorOutputStream;
use codex_exec_server::ExecParams as ExecutorExecParams;
use codex_exec_server::ExecutorAttachment;
use codex_exec_server::ProcessId;
use codex_protocol::models::PermissionProfile;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::FileChange;
@@ -31,7 +40,13 @@ use codex_sandboxing::SandboxablePreference;
use codex_utils_absolute_path::AbsolutePathBuf;
use futures::future::BoxFuture;
use std::collections::HashMap;
use std::io;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
const EXEC_TIMEOUT_EXIT_CODE: i32 = 124;
#[derive(Debug)]
pub struct ApplyPatchRequest {
@@ -44,12 +59,15 @@ pub struct ApplyPatchRequest {
pub timeout_ms: Option<u64>,
}
#[derive(Default)]
pub struct ApplyPatchRuntime;
pub struct ApplyPatchRuntime {
executor_attachment: Arc<ExecutorAttachment>,
}
impl ApplyPatchRuntime {
pub fn new() -> Self {
Self
pub fn new(executor_attachment: Arc<ExecutorAttachment>) -> Self {
Self {
executor_attachment,
}
}
fn build_guardian_review_request(
@@ -116,6 +134,141 @@ impl ApplyPatchRuntime {
tx_event: ctx.session.get_tx_event(),
})
}
async fn execute_request(
&self,
env: ExecRequest,
ctx: &ToolCtx,
) -> Result<ExecToolCallOutput, CodexErr> {
let start = Instant::now();
let out = if self.executor_attachment.exec_server_url().is_some() {
self.execute_request_remote(env, ctx).await?
} else {
execute_env(env, Self::stdout_stream(ctx)).await?
};
let duration = start.elapsed();
let mut out = out;
out.duration = duration;
Ok(out)
}
async fn execute_request_remote(
&self,
env: ExecRequest,
ctx: &ToolCtx,
) -> Result<ExecToolCallOutput, CodexErr> {
let started = self
.executor_attachment
.get_exec_backend()
.start(ExecutorExecParams {
process_id: ProcessId::new(format!("apply-patch-{}", ctx.call_id)),
argv: env.command.clone(),
cwd: env.cwd.clone(),
env: env.env.clone(),
tty: false,
arg0: env.arg0.clone(),
})
.await
.map_err(|err| CodexErr::Io(io::Error::other(err)))?;
let process = started.process;
let mut wake_rx = process.subscribe_wake();
let mut after_seq = None;
let mut stdout = Vec::new();
let mut stderr = Vec::new();
let mut aggregated_output = Vec::new();
let mut exit_code = None;
let mut timed_out = false;
let expiration = env.expiration.clone();
let capture_policy = env.capture_policy;
let expiration_wait = async move {
if matches!(capture_policy, ExecCapturePolicy::ShellTool) {
expiration.wait().await;
} else {
std::future::pending::<()>().await;
}
};
tokio::pin!(expiration_wait);
loop {
let response = process
.read(after_seq, /*max_bytes*/ None, /*wait_ms*/ Some(0))
.await
.map_err(|err| CodexErr::Io(io::Error::other(err)))?;
for chunk in response.chunks {
let bytes = chunk.chunk.into_inner();
match chunk.stream {
ExecutorOutputStream::Stdout | ExecutorOutputStream::Pty => {
stdout.extend_from_slice(&bytes);
}
ExecutorOutputStream::Stderr => {
stderr.extend_from_slice(&bytes);
}
}
aggregated_output.extend_from_slice(&bytes);
}
if let Some(message) = response.failure {
return Err(CodexErr::Io(io::Error::other(message)));
}
if response.exited {
exit_code = response.exit_code;
}
if response.closed {
break;
}
after_seq = response.next_seq.checked_sub(1);
tokio::select! {
wake_result = wake_rx.changed() => {
if wake_result.is_err() {
return Err(CodexErr::Io(io::Error::other(
"exec-server wake channel closed",
)));
}
}
_ = &mut expiration_wait, if !timed_out => {
process
.terminate()
.await
.map_err(|err| CodexErr::Io(io::Error::other(err)))?;
timed_out = true;
exit_code = Some(EXEC_TIMEOUT_EXIT_CODE);
break;
}
}
}
let output = ExecToolCallOutput {
exit_code: exit_code.unwrap_or(-1),
stdout: StreamOutput::new(String::from_utf8_lossy(&stdout).to_string()),
stderr: StreamOutput::new(String::from_utf8_lossy(&stderr).to_string()),
aggregated_output: StreamOutput::new(
String::from_utf8_lossy(&aggregated_output).to_string(),
),
duration: Duration::ZERO,
timed_out,
};
if timed_out {
return Err(CodexErr::Sandbox(SandboxErr::Timeout {
output: Box::new(output),
}));
}
if is_likely_sandbox_denied(env.sandbox, &output) {
return Err(CodexErr::Sandbox(SandboxErr::Denied {
output: Box::new(output),
network_policy_decision: None,
}));
}
Ok(output)
}
}
impl Sandboxable for ApplyPatchRuntime {
@@ -223,7 +376,8 @@ impl ToolRuntime<ApplyPatchRequest, ExecToolCallOutput> for ApplyPatchRuntime {
let env = attempt
.env_for(command, options, /*network*/ None)
.map_err(|err| ToolError::Codex(err.into()))?;
let out = execute_env(env, Self::stdout_stream(ctx))
let out = self
.execute_request(env, ctx)
.await
.map_err(ToolError::Codex)?;
Ok(out)

View File

@@ -1,13 +1,20 @@
use super::*;
use codex_exec_server::Environment;
use codex_protocol::protocol::GranularApprovalConfig;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
#[cfg(not(target_os = "windows"))]
use std::path::PathBuf;
#[test]
fn wants_no_sandbox_approval_granular_respects_sandbox_flag() {
let runtime = ApplyPatchRuntime::new();
#[tokio::test]
async fn wants_no_sandbox_approval_granular_respects_sandbox_flag() {
let runtime = ApplyPatchRuntime::new(
Environment::create(/*exec_server_url*/ None)
.await
.expect("create environment")
.executor_attachment()
.expect("default environment should have an executor attachment"),
);
assert!(runtime.wants_no_sandbox_approval(AskForApproval::OnRequest));
assert!(
!runtime.wants_no_sandbox_approval(AskForApproval::Granular(GranularApprovalConfig {

View File

@@ -37,6 +37,7 @@ use crate::unified_exec::NoopSpawnLifecycle;
use crate::unified_exec::UnifiedExecError;
use crate::unified_exec::UnifiedExecProcess;
use crate::unified_exec::UnifiedExecProcessManager;
use codex_exec_server::ExecutorAttachment;
use codex_network_proxy::NetworkProxy;
use codex_protocol::models::PermissionProfile;
use codex_protocol::protocol::ReviewDecision;
@@ -44,6 +45,7 @@ use codex_sandboxing::SandboxablePreference;
use futures::future::BoxFuture;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
/// Request payload used by the unified-exec runtime after approvals and
/// sandbox preferences have been resolved for the current turn.
@@ -79,14 +81,20 @@ pub struct UnifiedExecApprovalKey {
/// unified-exec side while delegating process startup to the manager.
pub struct UnifiedExecRuntime<'a> {
manager: &'a UnifiedExecProcessManager,
executor_attachment: Arc<ExecutorAttachment>,
shell_mode: UnifiedExecShellMode,
}
impl<'a> UnifiedExecRuntime<'a> {
/// Creates a runtime bound to the shared unified-exec process manager.
pub fn new(manager: &'a UnifiedExecProcessManager, shell_mode: UnifiedExecShellMode) -> Self {
pub fn new(
manager: &'a UnifiedExecProcessManager,
executor_attachment: Arc<ExecutorAttachment>,
shell_mode: UnifiedExecShellMode,
) -> Self {
Self {
manager,
executor_attachment,
shell_mode,
}
}
@@ -239,7 +247,7 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecProcess> for UnifiedExecRunt
.await?
{
Some(prepared) => {
if ctx.turn.environment.exec_server_url().is_some() {
if self.executor_attachment.exec_server_url().is_some() {
return Err(ToolError::Rejected(
"unified_exec zsh-fork is not supported when exec_server_url is configured".to_string(),
));
@@ -251,7 +259,7 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecProcess> for UnifiedExecRunt
&prepared.exec_request,
req.tty,
prepared.spawn_lifecycle,
ctx.turn.environment.as_ref(),
self.executor_attachment.as_ref(),
)
.await
.map_err(|err| match err {
@@ -287,7 +295,7 @@ impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecProcess> for UnifiedExecRunt
&exec_env,
req.tty,
Box::new(NoopSpawnLifecycle),
ctx.turn.environment.as_ref(),
self.executor_attachment.as_ref(),
)
.await
.map_err(|err| match err {

View File

@@ -21,6 +21,7 @@ use crate::tools::handlers::request_permissions_tool_description;
use crate::tools::handlers::request_user_input_tool_description;
use crate::tools::registry::ToolRegistryBuilder;
use crate::tools::registry::tool_handler_key;
use codex_exec_server::ExecutorAttachment;
use codex_features::Feature;
use codex_features::Features;
use codex_protocol::config_types::WebSearchConfig;
@@ -86,6 +87,7 @@ use serde::Serialize;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
pub type JsonSchema = codex_tools::JsonSchema;
@@ -171,6 +173,7 @@ pub(crate) struct ToolsConfig {
pub experimental_supported_tools: Vec<String>,
pub agent_jobs_tools: bool,
pub agent_jobs_worker_tools: bool,
pub executor_attachment: Option<Arc<ExecutorAttachment>>,
}
pub(crate) struct ToolsConfigParams<'a> {
@@ -305,9 +308,18 @@ impl ToolsConfig {
experimental_supported_tools: model_info.experimental_supported_tools.clone(),
agent_jobs_tools: include_agent_jobs,
agent_jobs_worker_tools,
executor_attachment: None,
}
}
pub fn with_executor_attachment(
mut self,
executor_attachment: Option<Arc<ExecutorAttachment>>,
) -> Self {
self.executor_attachment = executor_attachment;
self
}
pub fn with_agent_roles(mut self, agent_roles: BTreeMap<String, AgentRoleConfig>) -> Self {
self.agent_roles = agent_roles;
self
@@ -390,8 +402,13 @@ pub(crate) fn build_specs(
app_tools: Option<HashMap<String, ToolInfo>>,
dynamic_tools: &[DynamicToolSpec],
) -> ToolRegistryBuilder {
let mut config = config.clone();
if config.executor_attachment.is_none() {
config.executor_attachment =
codex_exec_server::Environment::default().executor_attachment();
}
build_specs_with_discoverable_tools(
config,
&config,
mcp_tools,
app_tools,
/*discoverable_tools*/ None,
@@ -436,16 +453,11 @@ pub(crate) fn build_specs_with_discoverable_tools(
use crate::tools::handlers::multi_agents_v2::SendMessageHandler as SendMessageHandlerV2;
use crate::tools::handlers::multi_agents_v2::SpawnAgentHandler as SpawnAgentHandlerV2;
use crate::tools::handlers::multi_agents_v2::WaitAgentHandler as WaitAgentHandlerV2;
use std::sync::Arc;
let mut builder = ToolRegistryBuilder::new();
let shell_handler = Arc::new(ShellHandler);
let unified_exec_handler = Arc::new(UnifiedExecHandler);
let plan_handler = Arc::new(PlanHandler);
let apply_patch_handler = Arc::new(ApplyPatchHandler);
let dynamic_tool_handler = Arc::new(DynamicToolHandler);
let view_image_handler = Arc::new(ViewImageHandler);
let mcp_handler = Arc::new(McpHandler);
let mcp_resource_handler = Arc::new(McpResourceHandler);
let shell_command_handler = Arc::new(ShellCommandHandler::from(config.shell_command_backend));
@@ -493,66 +505,70 @@ pub(crate) fn build_specs_with_discoverable_tools(
builder.register_handler(WAIT_TOOL_NAME, code_mode_wait_handler);
}
match &config.shell_type {
ConfigShellToolType::Default => {
push_tool_spec(
&mut builder,
create_shell_tool(ShellToolOptions {
exec_permission_approvals_enabled,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
if let Some(executor_attachment) = config.executor_attachment.as_ref() {
let unified_exec_handler =
Arc::new(UnifiedExecHandler::new(Arc::clone(executor_attachment)));
match &config.shell_type {
ConfigShellToolType::Default => {
push_tool_spec(
&mut builder,
create_shell_tool(ShellToolOptions {
exec_permission_approvals_enabled,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
}
ConfigShellToolType::Local => {
push_tool_spec(
&mut builder,
ToolSpec::LocalShell {},
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
}
ConfigShellToolType::UnifiedExec => {
push_tool_spec(
&mut builder,
create_exec_command_tool(CommandToolOptions {
allow_login_shell: config.allow_login_shell,
exec_permission_approvals_enabled,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
push_tool_spec(
&mut builder,
create_write_stdin_tool(),
/*supports_parallel_tool_calls*/ false,
config.code_mode_enabled,
);
builder.register_handler("exec_command", unified_exec_handler.clone());
builder.register_handler("write_stdin", unified_exec_handler);
}
ConfigShellToolType::Disabled => {
// Do nothing.
}
ConfigShellToolType::ShellCommand => {
push_tool_spec(
&mut builder,
create_shell_command_tool(CommandToolOptions {
allow_login_shell: config.allow_login_shell,
exec_permission_approvals_enabled,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
}
}
ConfigShellToolType::Local => {
push_tool_spec(
&mut builder,
ToolSpec::LocalShell {},
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
}
ConfigShellToolType::UnifiedExec => {
push_tool_spec(
&mut builder,
create_exec_command_tool(CommandToolOptions {
allow_login_shell: config.allow_login_shell,
exec_permission_approvals_enabled,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
push_tool_spec(
&mut builder,
create_write_stdin_tool(),
/*supports_parallel_tool_calls*/ false,
config.code_mode_enabled,
);
builder.register_handler("exec_command", unified_exec_handler.clone());
builder.register_handler("write_stdin", unified_exec_handler);
}
ConfigShellToolType::Disabled => {
// Do nothing.
}
ConfigShellToolType::ShellCommand => {
push_tool_spec(
&mut builder,
create_shell_command_tool(CommandToolOptions {
allow_login_shell: config.allow_login_shell,
exec_permission_approvals_enabled,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
}
}
if config.shell_type != ConfigShellToolType::Disabled {
// Always register shell aliases so older prompts remain compatible.
builder.register_handler("shell", shell_handler.clone());
builder.register_handler("container.exec", shell_handler.clone());
builder.register_handler("local_shell", shell_handler);
builder.register_handler("shell_command", shell_command_handler);
if config.shell_type != ConfigShellToolType::Disabled {
// Always register shell aliases so older prompts remain compatible.
builder.register_handler("shell", shell_handler.clone());
builder.register_handler("container.exec", shell_handler.clone());
builder.register_handler("local_shell", shell_handler);
builder.register_handler("shell_command", shell_command_handler);
}
}
if mcp_tools.is_some() {
@@ -587,7 +603,7 @@ pub(crate) fn build_specs_with_discoverable_tools(
);
builder.register_handler("update_plan", plan_handler);
if config.js_repl_enabled {
if config.executor_attachment.is_some() && config.js_repl_enabled {
push_tool_spec(
&mut builder,
create_js_repl_tool(),
@@ -661,7 +677,10 @@ pub(crate) fn build_specs_with_discoverable_tools(
builder.register_handler(TOOL_SUGGEST_TOOL_NAME, tool_suggest_handler);
}
if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type {
if let Some(executor_attachment) = config.executor_attachment.as_ref()
&& let Some(apply_patch_tool_type) = &config.apply_patch_tool_type
{
let apply_patch_handler = Arc::new(ApplyPatchHandler::new(Arc::clone(executor_attachment)));
match apply_patch_tool_type {
ApplyPatchToolType::Freeform => {
push_tool_spec(
@@ -683,12 +702,13 @@ pub(crate) fn build_specs_with_discoverable_tools(
builder.register_handler("apply_patch", apply_patch_handler);
}
if config
.experimental_supported_tools
.iter()
.any(|tool| tool == "list_dir")
if let Some(executor_attachment) = config.executor_attachment.as_ref()
&& config
.experimental_supported_tools
.iter()
.any(|tool| tool == "list_dir")
{
let list_dir_handler = Arc::new(ListDirHandler);
let list_dir_handler = Arc::new(ListDirHandler::new(Arc::clone(executor_attachment)));
push_tool_spec(
&mut builder,
create_list_dir_tool(),
@@ -763,15 +783,18 @@ pub(crate) fn build_specs_with_discoverable_tools(
);
}
push_tool_spec(
&mut builder,
create_view_image_tool(ViewImageToolOptions {
can_request_original_image_detail: config.can_request_original_image_detail,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
builder.register_handler("view_image", view_image_handler);
if let Some(executor_attachment) = config.executor_attachment.as_ref() {
let view_image_handler = Arc::new(ViewImageHandler::new(Arc::clone(executor_attachment)));
push_tool_spec(
&mut builder,
create_view_image_tool(ViewImageToolOptions {
can_request_original_image_detail: config.can_request_original_image_detail,
}),
/*supports_parallel_tool_calls*/ true,
config.code_mode_enabled,
);
builder.register_handler("view_image", view_image_handler);
}
if config.collab_tools {
if config.multi_agent_v2 {

View File

@@ -987,6 +987,54 @@ fn js_repl_enabled_adds_tools() {
assert_contains_tool_names(&tools, &["js_repl", "js_repl_reset"]);
}
#[test]
fn no_attached_executor_hides_executor_backed_tools() {
let model_info = model_info_from_models_json("gpt-5-codex");
let mut features = Features::with_defaults();
features.enable(Feature::UnifiedExec);
features.enable(Feature::JsRepl);
let available_models = Vec::new();
let mut tools_config = ToolsConfig::new(&ToolsConfigParams {
model_info: &model_info,
available_models: &available_models,
features: &features,
web_search_mode: Some(WebSearchMode::Live),
session_source: SessionSource::Cli,
sandbox_policy: &SandboxPolicy::DangerFullAccess,
windows_sandbox_level: WindowsSandboxLevel::Disabled,
})
.with_executor_attachment(None);
tools_config
.experimental_supported_tools
.push("list_dir".to_string());
let (tools, _) = build_specs_with_discoverable_tools(
&tools_config,
Some(std::collections::HashMap::new()),
/*app_tools*/ None,
/*discoverable_tools*/ None,
&[],
)
.build();
if let Some(shell_tool) = shell_tool_name(&tools_config) {
assert_lacks_tool_name(&tools, shell_tool);
}
for absent in [
"exec_command",
"write_stdin",
"apply_patch",
"list_dir",
VIEW_IMAGE_TOOL_NAME,
"js_repl",
"js_repl_reset",
] {
assert_lacks_tool_name(&tools, absent);
}
assert_contains_tool_names(&tools, &["update_plan", "request_user_input", "web_search"]);
}
#[test]
fn image_generation_tools_require_feature_and_supported_model() {
let config = test_config();

View File

@@ -28,6 +28,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Weak;
use codex_exec_server::ExecutorAttachment;
use codex_network_proxy::NetworkProxy;
use codex_protocol::models::PermissionProfile;
use rand::Rng;
@@ -73,14 +74,21 @@ pub(crate) struct UnifiedExecContext {
pub session: Arc<Session>,
pub turn: Arc<TurnContext>,
pub call_id: String,
pub executor_attachment: Arc<ExecutorAttachment>,
}
impl UnifiedExecContext {
pub fn new(session: Arc<Session>, turn: Arc<TurnContext>, call_id: String) -> Self {
pub fn new(
session: Arc<Session>,
turn: Arc<TurnContext>,
call_id: String,
executor_attachment: Arc<ExecutorAttachment>,
) -> Self {
Self {
session,
turn,
call_id,
executor_attachment,
}
}
}

View File

@@ -98,12 +98,21 @@ async fn exec_command_with_tty(
&request,
tty,
Box::new(NoopSpawnLifecycle),
turn.environment.as_ref(),
turn.environment
.executor_attachment()
.expect("test turn has executor attachment")
.as_ref(),
)
.await?,
);
let context =
UnifiedExecContext::new(Arc::clone(session), Arc::clone(turn), "call".to_string());
let context = UnifiedExecContext::new(
Arc::clone(session),
Arc::clone(turn),
"call".to_string(),
turn.environment
.executor_attachment()
.expect("test turn has executor attachment"),
);
let started_at = Instant::now();
let process_started_alive = !process.has_exited() && process.exit_code().is_none();
if process_started_alive {
@@ -507,13 +516,16 @@ async fn completed_pipe_commands_preserve_exit_code() -> anyhow::Result<()> {
);
let environment = codex_exec_server::Environment::default();
let executor_attachment = environment
.executor_attachment()
.expect("default environment has an executor attachment");
let process = UnifiedExecProcessManager::default()
.open_session_with_exec_env(
/*process_id*/ 1234,
&request,
/*tty*/ false,
Box::new(NoopSpawnLifecycle),
&environment,
executor_attachment.as_ref(),
)
.await?;
@@ -540,6 +552,10 @@ async fn unified_exec_uses_remote_exec_server_when_configured() -> anyhow::Resul
};
let remote_test_env = remote_test_env().await?;
let environment = remote_test_env.environment();
let executor_attachment = environment
.executor_attachment()
.expect("remote environment has an executor attachment");
let (_, turn) = make_session_and_context().await;
let request = test_exec_request(
&turn,
@@ -555,7 +571,7 @@ async fn unified_exec_uses_remote_exec_server_when_configured() -> anyhow::Resul
&request,
/*tty*/ true,
Box::new(NoopSpawnLifecycle),
remote_test_env.environment(),
executor_attachment.as_ref(),
)
.await?;
@@ -594,6 +610,10 @@ async fn remote_exec_server_rejects_inherited_fd_launches() -> anyhow::Result<()
let remote_test_env = remote_test_env().await?;
let (_, mut turn) = make_session_and_context().await;
turn.environment = Arc::new(remote_test_env.environment().clone());
let executor_attachment = turn
.environment
.executor_attachment()
.expect("remote environment has an executor attachment");
let request = test_exec_request(
&turn,
@@ -611,7 +631,7 @@ async fn remote_exec_server_rejects_inherited_fd_launches() -> anyhow::Result<()
Box::new(TestSpawnLifecycle {
inherited_fds: vec![42],
}),
turn.environment.as_ref(),
executor_attachment.as_ref(),
)
.await
.expect_err("expected inherited fd rejection");

View File

@@ -49,6 +49,7 @@ use crate::unified_exec::process::OutputBuffer;
use crate::unified_exec::process::OutputHandles;
use crate::unified_exec::process::SpawnLifecycleHandle;
use crate::unified_exec::process::UnifiedExecProcess;
use codex_exec_server::ExecutorAttachment;
use codex_utils_output_truncation::approx_token_count;
const UNIFIED_EXEC_ENV: [(&str, &str); 10] = [
@@ -585,7 +586,7 @@ impl UnifiedExecProcessManager {
env: &ExecRequest,
tty: bool,
mut spawn_lifecycle: SpawnLifecycleHandle,
environment: &codex_exec_server::Environment,
executor_attachment: &ExecutorAttachment,
) -> Result<UnifiedExecProcess, UnifiedExecError> {
let (program, args) = env
.command
@@ -593,14 +594,14 @@ impl UnifiedExecProcessManager {
.ok_or(UnifiedExecError::MissingCommandLine)?;
let inherited_fds = spawn_lifecycle.inherited_fds();
if environment.exec_server_url().is_some() {
if executor_attachment.exec_server_url().is_some() {
if !inherited_fds.is_empty() {
return Err(UnifiedExecError::create_process(
"remote exec-server does not support inherited file descriptors".to_string(),
));
}
let started = environment
let started = executor_attachment
.get_exec_backend()
.start(codex_exec_server::ExecParams {
process_id: exec_server_process_id(process_id).into(),
@@ -656,6 +657,7 @@ impl UnifiedExecProcessManager {
let mut orchestrator = ToolOrchestrator::new();
let mut runtime = UnifiedExecRuntime::new(
self,
Arc::clone(&context.executor_attachment),
context.turn.tools_config.unified_exec_shell_mode.clone(),
);
let exec_approval_requirement = context

View File

@@ -132,6 +132,8 @@ pub async fn test_env() -> Result<TestEnv> {
let environment = codex_exec_server::Environment::create(Some(websocket_url)).await?;
let cwd = remote_aware_cwd_path();
environment
.executor_attachment()
.expect("remote test environment has an executor attachment")
.get_filesystem()
.create_directory(
&absolute_path(&cwd)?,
@@ -478,9 +480,9 @@ impl TestCodexBuilder {
test_env: TestEnv,
) -> anyhow::Result<TestCodex> {
let auth = self.auth.clone();
let environment_manager = Arc::new(codex_exec_server::EnvironmentManager::new(
test_env.exec_server_url().map(str::to_owned),
));
let environment_manager = Arc::new(
codex_exec_server::EnvironmentManager::from_environment(test_env.environment()),
);
let thread_manager = if config.model_catalog.is_some() {
ThreadManager::new(
&config,
@@ -663,7 +665,11 @@ impl TestCodex {
}
pub fn fs(&self) -> Arc<dyn ExecutorFileSystem> {
self._test_env.environment().get_filesystem()
self._test_env
.environment()
.executor_attachment()
.expect("test environment has an executor attachment")
.get_filesystem()
}
pub async fn submit_turn(&self, prompt: &str) -> Result<()> {

View File

@@ -15,7 +15,11 @@ async fn remote_test_env_can_connect_and_use_filesystem() -> Result<()> {
};
let test_env = test_env().await?;
let file_system = test_env.environment().get_filesystem();
let file_system = test_env
.environment()
.executor_attachment()
.expect("remote test environment has an executor attachment")
.get_filesystem();
let file_path = remote_test_file_path();
let file_path_abs = absolute_path(file_path.clone())?;

View File

@@ -14,20 +14,84 @@ use crate::remote_process::RemoteProcess;
pub const CODEX_EXEC_SERVER_URL_ENV_VAR: &str = "CODEX_EXEC_SERVER_URL";
/// Exec backend access for a concrete executor attachment.
///
/// Implementations should return a backend bound to the same environment
/// instance so process execution and filesystem operations stay on one
/// transport.
pub trait ExecutorEnvironment: Send + Sync {
fn get_exec_backend(&self) -> Arc<dyn ExecBackend>;
}
/// Executor and filesystem transports attached to one environment.
#[derive(Clone)]
pub struct ExecutorAttachment {
exec_server_url: Option<String>,
exec_backend: Arc<dyn ExecBackend>,
file_system: Arc<dyn ExecutorFileSystem>,
}
impl std::fmt::Debug for ExecutorAttachment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ExecutorAttachment")
.field("exec_server_url", &self.exec_server_url)
.finish_non_exhaustive()
}
}
impl ExecutorAttachment {
fn new_local(exec_backend: Arc<dyn ExecBackend>) -> Self {
Self {
exec_server_url: None,
exec_backend,
file_system: Arc::new(LocalFileSystem),
}
}
fn new_remote(exec_server_url: String, client: ExecServerClient) -> Self {
Self {
exec_server_url: Some(exec_server_url),
exec_backend: Arc::new(RemoteProcess::new(client.clone())),
file_system: Arc::new(RemoteFileSystem::new(client)),
}
}
pub fn exec_server_url(&self) -> Option<&str> {
self.exec_server_url.as_deref()
}
pub fn get_exec_backend(&self) -> Arc<dyn ExecBackend> {
Arc::clone(&self.exec_backend)
}
pub fn get_filesystem(&self) -> Arc<dyn ExecutorFileSystem> {
Arc::clone(&self.file_system)
}
}
impl ExecutorEnvironment for ExecutorAttachment {
fn get_exec_backend(&self) -> Arc<dyn ExecBackend> {
self.get_exec_backend()
}
}
#[derive(Debug, Default)]
pub struct EnvironmentManager {
exec_server_url: Option<String>,
executor_mode: ExecutorMode,
current_environment: OnceCell<Arc<Environment>>,
}
impl EnvironmentManager {
pub fn new(exec_server_url: Option<String>) -> Self {
Self {
exec_server_url: normalize_exec_server_url(exec_server_url),
executor_mode: parse_executor_mode(exec_server_url),
current_environment: OnceCell::new(),
}
}
pub fn from_environment(environment: &Environment) -> Self {
Self {
executor_mode: environment.executor_mode.clone(),
current_environment: OnceCell::new(),
}
}
@@ -37,14 +101,14 @@ impl EnvironmentManager {
}
pub fn exec_server_url(&self) -> Option<&str> {
self.exec_server_url.as_deref()
self.executor_mode.remote_exec_server_url()
}
pub async fn current(&self) -> Result<Arc<Environment>, ExecServerError> {
self.current_environment
.get_or_try_init(|| async {
Ok(Arc::new(
Environment::create(self.exec_server_url.clone()).await?,
Environment::create_with_mode(self.executor_mode.clone()).await?,
))
})
.await
@@ -52,11 +116,34 @@ impl EnvironmentManager {
}
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
enum ExecutorMode {
#[default]
Local,
Remote {
url: String,
},
None,
}
impl ExecutorMode {
fn remote_exec_server_url(&self) -> Option<&str> {
match self {
Self::Remote { url } => Some(url.as_str()),
Self::Local | Self::None => None,
}
}
fn has_attached_executor(&self) -> bool {
!matches!(self, Self::None)
}
}
/// Session/turn environment plus an optional executor attachment.
#[derive(Clone)]
pub struct Environment {
exec_server_url: Option<String>,
remote_exec_server_client: Option<ExecServerClient>,
exec_backend: Arc<dyn ExecBackend>,
executor_mode: ExecutorMode,
executor_attachment: Option<Arc<ExecutorAttachment>>,
}
impl Default for Environment {
@@ -70,9 +157,10 @@ impl Default for Environment {
}
Self {
exec_server_url: None,
remote_exec_server_client: None,
exec_backend: Arc::new(local_process),
executor_mode: ExecutorMode::Local,
executor_attachment: Some(Arc::new(ExecutorAttachment::new_local(Arc::new(
local_process,
)))),
}
}
}
@@ -80,76 +168,70 @@ impl Default for Environment {
impl std::fmt::Debug for Environment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Environment")
.field("exec_server_url", &self.exec_server_url)
.field("executor_mode", &self.executor_mode)
.finish_non_exhaustive()
}
}
impl Environment {
pub async fn create(exec_server_url: Option<String>) -> Result<Self, ExecServerError> {
let exec_server_url = normalize_exec_server_url(exec_server_url);
let remote_exec_server_client = if let Some(url) = &exec_server_url {
Some(
ExecServerClient::connect_websocket(RemoteExecServerConnectArgs {
websocket_url: url.clone(),
client_name: "codex-environment".to_string(),
connect_timeout: std::time::Duration::from_secs(5),
initialize_timeout: std::time::Duration::from_secs(5),
})
.await?,
)
} else {
Self::create_with_mode(parse_executor_mode(exec_server_url)).await
}
async fn create_with_mode(executor_mode: ExecutorMode) -> Result<Self, ExecServerError> {
let executor_attachment = if let Some(url) = executor_mode.remote_exec_server_url() {
let client = ExecServerClient::connect_websocket(RemoteExecServerConnectArgs {
websocket_url: url.to_string(),
client_name: "codex-environment".to_string(),
connect_timeout: std::time::Duration::from_secs(5),
initialize_timeout: std::time::Duration::from_secs(5),
})
.await?;
Some(Arc::new(ExecutorAttachment::new_remote(
url.to_string(),
client,
)))
} else if matches!(executor_mode, ExecutorMode::None) {
None
} else {
let local_process = LocalProcess::default();
local_process
.initialize()
.map_err(|err| ExecServerError::Protocol(err.message))?;
local_process
.initialized()
.map_err(ExecServerError::Protocol)?;
Some(Arc::new(ExecutorAttachment::new_local(Arc::new(
local_process,
))))
};
let exec_backend: Arc<dyn ExecBackend> =
if let Some(client) = remote_exec_server_client.clone() {
Arc::new(RemoteProcess::new(client))
} else {
let local_process = LocalProcess::default();
local_process
.initialize()
.map_err(|err| ExecServerError::Protocol(err.message))?;
local_process
.initialized()
.map_err(ExecServerError::Protocol)?;
Arc::new(local_process)
};
Ok(Self {
exec_server_url,
remote_exec_server_client,
exec_backend,
executor_mode,
executor_attachment,
})
}
pub fn exec_server_url(&self) -> Option<&str> {
self.exec_server_url.as_deref()
self.executor_mode.remote_exec_server_url()
}
pub fn get_exec_backend(&self) -> Arc<dyn ExecBackend> {
Arc::clone(&self.exec_backend)
pub fn has_attached_executor(&self) -> bool {
self.executor_mode.has_attached_executor()
}
pub fn get_filesystem(&self) -> Arc<dyn ExecutorFileSystem> {
if let Some(client) = self.remote_exec_server_client.clone() {
Arc::new(RemoteFileSystem::new(client))
} else {
Arc::new(LocalFileSystem)
}
pub fn executor_attachment(&self) -> Option<Arc<ExecutorAttachment>> {
self.executor_attachment.clone()
}
}
fn normalize_exec_server_url(exec_server_url: Option<String>) -> Option<String> {
exec_server_url.and_then(|url| {
let url = url.trim();
(!url.is_empty()).then(|| url.to_string())
})
}
impl ExecutorEnvironment for Environment {
fn get_exec_backend(&self) -> Arc<dyn ExecBackend> {
Arc::clone(&self.exec_backend)
fn parse_executor_mode(exec_server_url: Option<String>) -> ExecutorMode {
match exec_server_url.as_deref().map(str::trim) {
None | Some("") => ExecutorMode::Local,
Some(url) if url.eq_ignore_ascii_case("none") => ExecutorMode::None,
Some(url) => ExecutorMode::Remote {
url: url.to_string(),
},
}
}
@@ -159,6 +241,8 @@ mod tests {
use super::Environment;
use super::EnvironmentManager;
use super::ExecutorMode;
use super::parse_executor_mode;
use crate::ProcessId;
use pretty_assertions::assert_eq;
@@ -169,14 +253,46 @@ mod tests {
.expect("create environment");
assert_eq!(environment.exec_server_url(), None);
assert!(environment.remote_exec_server_client.is_none());
assert!(environment.has_attached_executor());
assert_eq!(environment.executor_mode, ExecutorMode::Local);
assert!(environment.executor_attachment().is_some());
}
#[test]
fn environment_manager_normalizes_empty_url() {
let manager = EnvironmentManager::new(Some(String::new()));
assert_eq!(manager.exec_server_url(), None);
assert_eq!(manager.executor_mode, ExecutorMode::Local);
}
#[test]
fn environment_manager_preserves_no_executor_setting() {
let manager = EnvironmentManager::new(Some("none".to_string()));
assert_eq!(manager.executor_mode, ExecutorMode::None);
}
#[test]
fn parse_executor_mode_preserves_no_executor_semantics() {
assert_eq!(parse_executor_mode(None), ExecutorMode::Local);
assert_eq!(
parse_executor_mode(Some(String::new())),
ExecutorMode::Local
);
assert_eq!(
parse_executor_mode(Some("none".to_string())),
ExecutorMode::None
);
assert_eq!(
parse_executor_mode(Some("NONE".to_string())),
ExecutorMode::None
);
assert_eq!(
parse_executor_mode(Some("ws://localhost:1234".to_string())),
ExecutorMode::Remote {
url: "ws://localhost:1234".to_string(),
}
);
}
#[tokio::test]
@@ -194,6 +310,8 @@ mod tests {
let environment = Environment::default();
let response = environment
.executor_attachment()
.expect("default environment has executor attachment")
.get_exec_backend()
.start(crate::ExecParams {
process_id: ProcessId::from("default-env-proc"),
@@ -208,4 +326,25 @@ mod tests {
assert_eq!(response.process.process_id().as_str(), "default-env-proc");
}
#[tokio::test]
async fn no_executor_environment_disables_executor_attachment() {
let environment = Environment::create(Some("none".to_string()))
.await
.expect("create environment");
assert_eq!(environment.exec_server_url(), None);
assert!(!environment.has_attached_executor());
assert_eq!(environment.executor_mode, ExecutorMode::None);
assert!(environment.executor_attachment().is_none());
}
#[tokio::test]
async fn no_executor_environment_has_no_executor_attachment() {
let environment = Environment::create(Some("none".to_string()))
.await
.expect("create environment");
assert!(environment.executor_attachment().is_none());
}
}

View File

@@ -34,6 +34,7 @@ pub use codex_app_server_protocol::FsWriteFileResponse;
pub use environment::CODEX_EXEC_SERVER_URL_ENV_VAR;
pub use environment::Environment;
pub use environment::EnvironmentManager;
pub use environment::ExecutorAttachment;
pub use environment::ExecutorEnvironment;
pub use file_system::CopyOptions;
pub use file_system::CreateDirectoryOptions;

View File

@@ -31,13 +31,19 @@ async fn create_process_context(use_remote: bool) -> Result<ProcessContext> {
let server = exec_server().await?;
let environment = Environment::create(Some(server.websocket_url().to_string())).await?;
Ok(ProcessContext {
backend: environment.get_exec_backend(),
backend: environment
.executor_attachment()
.expect("remote environment has an executor attachment")
.get_exec_backend(),
server: Some(server),
})
} else {
let environment = Environment::create(/*exec_server_url*/ None).await?;
Ok(ProcessContext {
backend: environment.get_exec_backend(),
backend: environment
.executor_attachment()
.expect("local environment has an executor attachment")
.get_exec_backend(),
server: None,
})
}

View File

@@ -32,13 +32,19 @@ async fn create_file_system_context(use_remote: bool) -> Result<FileSystemContex
let server = exec_server().await?;
let environment = Environment::create(Some(server.websocket_url().to_string())).await?;
Ok(FileSystemContext {
file_system: environment.get_filesystem(),
file_system: environment
.executor_attachment()
.expect("remote environment has an executor attachment")
.get_filesystem(),
_server: Some(server),
})
} else {
let environment = Environment::create(/*exec_server_url*/ None).await?;
Ok(FileSystemContext {
file_system: environment.get_filesystem(),
file_system: environment
.executor_attachment()
.expect("local environment has an executor attachment")
.get_filesystem(),
_server: None,
})
}