Compare commits

..

1 Commits

Author SHA1 Message Date
gt-oai
9ca636bd4f Fix parallelism flake 2026-02-04 18:08:06 +00:00
45 changed files with 536 additions and 1193 deletions

View File

@@ -100,7 +100,6 @@ jobs:
- name: bazel test //...
env:
BUILDBUDDY_API_KEY: ${{ secrets.BUILDBUDDY_API_KEY }}
CODEX_BWRAP_ENABLE_FFI: ${{ contains(matrix.target, 'unknown-linux') && '1' || '0' }}
shell: bash
run: |
bazel $BAZEL_STARTUP_ARGS --bazelrc=.github/workflows/ci.bazelrc test //... \

View File

@@ -99,9 +99,6 @@ jobs:
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
# Keep cargo-based CI independent of system bwrap build deps.
# The bwrap FFI path is validated in Bazel workflows.
CODEX_BWRAP_ENABLE_FFI: "0"
strategy:
fail-fast: false
@@ -470,9 +467,6 @@ jobs:
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
# Keep cargo-based CI independent of system bwrap build deps.
# The bwrap FFI path is validated in Bazel workflows.
CODEX_BWRAP_ENABLE_FFI: "0"
strategy:
fail-fast: false
@@ -508,6 +502,7 @@ jobs:
steps:
- uses: actions/checkout@v6
# Some integration tests rely on DotSlash being installed.
# See https://github.com/openai/codex/pull/7617.
- name: Install DotSlash

View File

@@ -65,8 +65,6 @@ jobs:
defaults:
run:
working-directory: codex-rs
env:
CODEX_BWRAP_ENABLE_FFI: ${{ contains(matrix.target, 'unknown-linux') && '1' || '0' }}
strategy:
fail-fast: false
@@ -91,13 +89,6 @@ jobs:
steps:
- uses: actions/checkout@v6
- name: Install Linux bwrap build dependencies
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
set -euo pipefail
sudo apt-get update -y
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends pkg-config libcap-dev
- name: Install UBSan runtime (musl)
if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl' }}
shell: bash

1
codex-rs/Cargo.lock generated
View File

@@ -1797,6 +1797,7 @@ dependencies = [
"serde_json",
"tempfile",
"tokio",
"which",
]
[[package]]

View File

@@ -55,7 +55,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.97.0-alpha.1"
version = "0.0.0"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024

View File

@@ -1484,7 +1484,6 @@ impl CodexMessageProcessor {
let outgoing = self.outgoing.clone();
let req_id = request_id;
let sandbox_cwd = self.config.cwd.clone();
let use_linux_sandbox_bwrap = self.config.features.enabled(Feature::UseLinuxSandboxBwrap);
tokio::spawn(async move {
match codex_core::exec::process_exec_tool_call(
@@ -1492,7 +1491,6 @@ impl CodexMessageProcessor {
&effective_policy,
sandbox_cwd.as_path(),
&codex_linux_sandbox_exe,
use_linux_sandbox_bwrap,
None,
)
.await

View File

@@ -227,19 +227,16 @@ async fn run_command_under_sandbox(
.await?
}
SandboxType::Landlock => {
use codex_core::features::Feature;
#[expect(clippy::expect_used)]
let codex_linux_sandbox_exe = config
.codex_linux_sandbox_exe
.expect("codex-linux-sandbox executable not found");
let use_bwrap_sandbox = config.features.enabled(Feature::UseLinuxSandboxBwrap);
spawn_command_under_linux_sandbox(
codex_linux_sandbox_exe,
command,
cwd,
config.sandbox_policy.get(),
sandbox_policy_cwd.as_path(),
use_bwrap_sandbox,
stdio_policy,
env,
)

View File

@@ -20,7 +20,8 @@ use std::time::Duration;
use std::time::Instant;
use tokio::time::timeout;
const CLOUD_REQUIREMENTS_TIMEOUT: Duration = Duration::from_secs(15);
/// This blocks codex startup, so must be short.
const CLOUD_REQUIREMENTS_TIMEOUT: Duration = Duration::from_secs(5);
#[async_trait]
trait RequirementsFetcher: Send + Sync {

View File

@@ -29,13 +29,4 @@ The public interface of this crate is intentionally small and uniform:
- Output: `Vec<ResponseItem>`.
- `CompactClient::compact_input(&CompactionInput, extra_headers)` wraps the JSON encoding and retry/telemetry wiring.
- **Memory trace summarize endpoint**
- Input: `MemoryTraceSummarizeInput` (re-exported as `codex_api::MemoryTraceSummarizeInput`):
- `model: String`.
- `traces: Vec<MemoryTrace>`.
- `MemoryTrace` includes `id`, `metadata.source_path`, and normalized `items`.
- `reasoning: Option<Reasoning>`.
- Output: `Vec<MemoryTraceSummaryOutput>`.
- `MemoriesClient::trace_summarize_input(&MemoryTraceSummarizeInput, extra_headers)` wraps JSON encoding and retry/telemetry wiring.
All HTTP details (URLs, headers, retry/backoff policies, SSE framing) are encapsulated in `codex-api` and `codex-client`. Callers construct prompts/inputs using protocol types and work with typed streams of `ResponseEvent` or compacted `ResponseItem` values.

View File

@@ -6,7 +6,6 @@ use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::TokenUsage;
use futures::Stream;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use std::pin::Pin;
@@ -38,33 +37,6 @@ pub struct CompactionInput<'a> {
pub instructions: &'a str,
}
/// Canonical input payload for the memory trace summarize endpoint.
#[derive(Debug, Clone, Serialize)]
pub struct MemoryTraceSummarizeInput {
pub model: String,
pub traces: Vec<MemoryTrace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning: Option<Reasoning>,
}
#[derive(Debug, Clone, Serialize)]
pub struct MemoryTrace {
pub id: String,
pub metadata: MemoryTraceMetadata,
pub items: Vec<Value>,
}
#[derive(Debug, Clone, Serialize)]
pub struct MemoryTraceMetadata {
pub source_path: String,
}
#[derive(Debug, Clone, Deserialize, PartialEq, Eq)]
pub struct MemoryTraceSummaryOutput {
pub trace_summary: String,
pub memory_summary: String,
}
#[derive(Debug)]
pub enum ResponseEvent {
Created,

View File

@@ -1,108 +0,0 @@
use crate::auth::AuthProvider;
use crate::common::MemoryTraceSummarizeInput;
use crate::common::MemoryTraceSummaryOutput;
use crate::endpoint::session::EndpointSession;
use crate::error::ApiError;
use crate::provider::Provider;
use codex_client::HttpTransport;
use codex_client::RequestTelemetry;
use http::HeaderMap;
use http::Method;
use serde::Deserialize;
use serde_json::to_value;
use std::sync::Arc;
pub struct MemoriesClient<T: HttpTransport, A: AuthProvider> {
session: EndpointSession<T, A>,
}
impl<T: HttpTransport, A: AuthProvider> MemoriesClient<T, A> {
pub fn new(transport: T, provider: Provider, auth: A) -> Self {
Self {
session: EndpointSession::new(transport, provider, auth),
}
}
pub fn with_telemetry(self, request: Option<Arc<dyn RequestTelemetry>>) -> Self {
Self {
session: self.session.with_request_telemetry(request),
}
}
fn path() -> &'static str {
"memories/trace_summarize"
}
pub async fn trace_summarize(
&self,
body: serde_json::Value,
extra_headers: HeaderMap,
) -> Result<Vec<MemoryTraceSummaryOutput>, ApiError> {
let resp = self
.session
.execute(Method::POST, Self::path(), extra_headers, Some(body))
.await?;
let parsed: TraceSummarizeResponse =
serde_json::from_slice(&resp.body).map_err(|e| ApiError::Stream(e.to_string()))?;
Ok(parsed.output)
}
pub async fn trace_summarize_input(
&self,
input: &MemoryTraceSummarizeInput,
extra_headers: HeaderMap,
) -> Result<Vec<MemoryTraceSummaryOutput>, ApiError> {
let body = to_value(input).map_err(|e| {
ApiError::Stream(format!(
"failed to encode memory trace summarize input: {e}"
))
})?;
self.trace_summarize(body, extra_headers).await
}
}
#[derive(Debug, Deserialize)]
struct TraceSummarizeResponse {
output: Vec<MemoryTraceSummaryOutput>,
}
#[cfg(test)]
mod tests {
use super::*;
use async_trait::async_trait;
use codex_client::Request;
use codex_client::Response;
use codex_client::StreamResponse;
use codex_client::TransportError;
#[derive(Clone, Default)]
struct DummyTransport;
#[async_trait]
impl HttpTransport for DummyTransport {
async fn execute(&self, _req: Request) -> Result<Response, TransportError> {
Err(TransportError::Build("execute should not run".to_string()))
}
async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> {
Err(TransportError::Build("stream should not run".to_string()))
}
}
#[derive(Clone, Default)]
struct DummyAuth;
impl AuthProvider for DummyAuth {
fn bearer_token(&self) -> Option<String> {
None
}
}
#[test]
fn path_is_memories_trace_summarize() {
assert_eq!(
MemoriesClient::<DummyTransport, DummyAuth>::path(),
"memories/trace_summarize"
);
}
}

View File

@@ -1,6 +1,5 @@
pub mod aggregate;
pub mod compact;
pub mod memories;
pub mod models;
pub mod responses;
pub mod responses_websocket;

View File

@@ -15,10 +15,6 @@ pub use codex_client::TransportError;
pub use crate::auth::AuthProvider;
pub use crate::common::CompactionInput;
pub use crate::common::MemoryTrace;
pub use crate::common::MemoryTraceMetadata;
pub use crate::common::MemoryTraceSummarizeInput;
pub use crate::common::MemoryTraceSummaryOutput;
pub use crate::common::Prompt;
pub use crate::common::ResponseAppendWsRequest;
pub use crate::common::ResponseCreateWsRequest;
@@ -28,7 +24,6 @@ pub use crate::common::ResponsesApiRequest;
pub use crate::common::create_text_param_for_request;
pub use crate::endpoint::aggregate::AggregateStreamExt;
pub use crate::endpoint::compact::CompactClient;
pub use crate::endpoint::memories::MemoriesClient;
pub use crate::endpoint::models::ModelsClient;
pub use crate::endpoint::responses::ResponsesClient;
pub use crate::endpoint::responses::ResponsesOptions;

View File

@@ -71,7 +71,7 @@ impl CliConfigOverrides {
}
};
Ok((canonicalize_override_key(key), value))
Ok((key.to_string(), value))
})
.collect()
}
@@ -88,14 +88,6 @@ impl CliConfigOverrides {
}
}
fn canonicalize_override_key(key: &str) -> String {
if key == "use_linux_sandbox_bwrap" {
"features.use_linux_sandbox_bwrap".to_string()
} else {
key.to_string()
}
}
/// Apply a single override onto `root`, creating intermediate objects as
/// necessary.
fn apply_single_override(root: &mut Value, path: &str, value: Value) {
@@ -180,16 +172,6 @@ mod tests {
assert_eq!(arr.len(), 3);
}
#[test]
fn canonicalizes_use_linux_sandbox_bwrap_alias() {
let overrides = CliConfigOverrides {
raw_overrides: vec!["use_linux_sandbox_bwrap=true".to_string()],
};
let parsed = overrides.parse_overrides().expect("parse_overrides");
assert_eq!(parsed[0].0.as_str(), "features.use_linux_sandbox_bwrap");
assert_eq!(parsed[0].1.as_bool(), Some(true));
}
#[test]
fn parses_inline_table() {
let v = parse_toml_value("{a = 1, b = 2}").expect("parse");

View File

@@ -235,9 +235,6 @@
"unified_exec": {
"type": "boolean"
},
"use_linux_sandbox_bwrap": {
"type": "boolean"
},
"web_search": {
"type": "boolean"
},
@@ -1256,9 +1253,6 @@
"unified_exec": {
"type": "boolean"
},
"use_linux_sandbox_bwrap": {
"type": "boolean"
},
"web_search": {
"type": "boolean"
},

View File

@@ -7,10 +7,6 @@ use crate::api_bridge::map_api_error;
use crate::auth::UnauthorizedRecovery;
use codex_api::CompactClient as ApiCompactClient;
use codex_api::CompactionInput as ApiCompactionInput;
use codex_api::MemoriesClient as ApiMemoriesClient;
use codex_api::MemoryTrace as ApiMemoryTrace;
use codex_api::MemoryTraceSummarizeInput as ApiMemoryTraceSummarizeInput;
use codex_api::MemoryTraceSummaryOutput as ApiMemoryTraceSummaryOutput;
use codex_api::Prompt as ApiPrompt;
use codex_api::RequestTelemetry;
use codex_api::ReqwestTransport;
@@ -187,55 +183,6 @@ impl ModelClient {
instructions: &instructions,
};
let extra_headers = self.build_subagent_headers();
client
.compact_input(&payload, extra_headers)
.await
.map_err(map_api_error)
}
/// Builds memory summaries for each provided normalized trace.
///
/// This is a unary call (no streaming) to `/v1/memories/trace_summarize`.
pub async fn summarize_memory_traces(
&self,
traces: Vec<ApiMemoryTrace>,
) -> Result<Vec<ApiMemoryTraceSummaryOutput>> {
if traces.is_empty() {
return Ok(Vec::new());
}
let auth_manager = self.state.auth_manager.clone();
let auth = match auth_manager.as_ref() {
Some(manager) => manager.auth().await,
None => None,
};
let api_provider = self
.state
.provider
.to_api_provider(auth.as_ref().map(CodexAuth::internal_auth_mode))?;
let api_auth = auth_provider_from_auth(auth, &self.state.provider)?;
let transport = ReqwestTransport::new(build_reqwest_client());
let request_telemetry = self.build_request_telemetry();
let client = ApiMemoriesClient::new(transport, api_provider, api_auth)
.with_telemetry(Some(request_telemetry));
let payload = ApiMemoryTraceSummarizeInput {
model: self.state.model_info.slug.clone(),
traces,
reasoning: self.state.effort.map(|effort| Reasoning {
effort: Some(effort),
summary: None,
}),
};
client
.trace_summarize_input(&payload, self.build_subagent_headers())
.await
.map_err(map_api_error)
}
fn build_subagent_headers(&self) -> ApiHeaderMap {
let mut extra_headers = ApiHeaderMap::new();
if let SessionSource::SubAgent(sub) = &self.state.session_source {
let subagent = match sub {
@@ -248,7 +195,10 @@ impl ModelClient {
extra_headers.insert("x-openai-subagent", val);
}
}
extra_headers
client
.compact_input(&payload, extra_headers)
.await
.map_err(map_api_error)
}
}

View File

@@ -508,7 +508,6 @@ pub(crate) struct TurnContext {
pub(crate) windows_sandbox_level: WindowsSandboxLevel,
pub(crate) shell_environment_policy: ShellEnvironmentPolicy,
pub(crate) tools_config: ToolsConfig,
pub(crate) features: Features,
pub(crate) ghost_snapshot: GhostSnapshotConfig,
pub(crate) final_output_json_schema: Option<Value>,
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
@@ -767,7 +766,6 @@ impl Session {
windows_sandbox_level: session_configuration.windows_sandbox_level,
shell_environment_policy: per_turn_config.shell_environment_policy.clone(),
tools_config,
features: per_turn_config.features.clone(),
ghost_snapshot: per_turn_config.ghost_snapshot.clone(),
final_output_json_schema: None,
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
@@ -1038,7 +1036,6 @@ impl Session {
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
sandbox_cwd: session_configuration.cwd.clone(),
use_linux_sandbox_bwrap: config.features.enabled(Feature::UseLinuxSandboxBwrap),
};
let cancel_token = sess.mcp_startup_cancellation_token().await;
@@ -1288,9 +1285,6 @@ impl Session {
sandbox_policy: per_turn_config.sandbox_policy.get().clone(),
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
sandbox_cwd: per_turn_config.cwd.clone(),
use_linux_sandbox_bwrap: per_turn_config
.features
.enabled(Feature::UseLinuxSandboxBwrap),
};
if let Err(e) = self
.services
@@ -2373,7 +2367,6 @@ impl Session {
sandbox_policy: turn_context.sandbox_policy.clone(),
codex_linux_sandbox_exe: turn_context.codex_linux_sandbox_exe.clone(),
sandbox_cwd: turn_context.cwd.clone(),
use_linux_sandbox_bwrap: turn_context.features.enabled(Feature::UseLinuxSandboxBwrap),
};
let cancel_token = self.reset_mcp_startup_cancellation_token().await;
@@ -3350,7 +3343,6 @@ async fn spawn_review_thread(
session_source,
transport_manager,
tools_config,
features: parent_turn_context.features.clone(),
ghost_snapshot: parent_turn_context.ghost_snapshot.clone(),
developer_instructions: None,
user_instructions: None,
@@ -3579,35 +3571,19 @@ pub(crate) async fn run_turn(
// Note that pending_input would be something like a message the user
// submitted through the UI while the model was running. Though the UI
// may support this, the model might not.
let pending_response_items = sess
let pending_input = sess
.get_pending_input()
.await
.into_iter()
.map(ResponseItem::from)
.collect::<Vec<ResponseItem>>();
if !pending_response_items.is_empty() {
for response_item in pending_response_items {
if let Some(TurnItem::UserMessage(user_message)) = parse_turn_item(&response_item) {
// todo(aibrahim): move pending input to be UserInput only to keep TextElements. context: https://github.com/openai/codex/pull/10656#discussion_r2765522480
sess.record_user_prompt_and_emit_turn_item(
turn_context.as_ref(),
&user_message.content,
response_item,
)
.await;
} else {
sess.record_conversation_items(
&turn_context,
std::slice::from_ref(&response_item),
)
.await;
}
}
}
// Construct the input that we will send to the model.
let sampling_request_input: Vec<ResponseItem> = { sess.clone_history().await.for_prompt() };
let sampling_request_input: Vec<ResponseItem> = {
sess.record_conversation_items(&turn_context, &pending_input)
.await;
sess.clone_history().await.for_prompt()
};
let sampling_request_input_messages = sampling_request_input
.iter()

View File

@@ -72,8 +72,8 @@ const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"];
/// configuration layers in the following order, but a constraint defined in an
/// earlier layer cannot be overridden by a later layer:
///
/// - cloud: managed cloud requirements
/// - admin: managed preferences (*)
/// - cloud: managed cloud requirements
/// - system `/etc/codex/requirements.toml`
///
/// For backwards compatibility, we also load from
@@ -107,11 +107,6 @@ pub async fn load_config_layers_state(
) -> io::Result<ConfigLayerStack> {
let mut config_requirements_toml = ConfigRequirementsWithSources::default();
if let Some(requirements) = cloud_requirements.get().await {
config_requirements_toml
.merge_unset_fields(RequirementSource::CloudRequirements, requirements);
}
#[cfg(target_os = "macos")]
macos::load_managed_admin_requirements_toml(
&mut config_requirements_toml,
@@ -121,6 +116,11 @@ pub async fn load_config_layers_state(
)
.await?;
if let Some(requirements) = cloud_requirements.get().await {
config_requirements_toml
.merge_unset_fields(RequirementSource::CloudRequirements, requirements);
}
// Honor /etc/codex/requirements.toml.
if cfg!(unix) {
load_requirements_toml(

View File

@@ -487,59 +487,6 @@ enforce_residency = "us"
Ok(())
}
#[cfg(target_os = "macos")]
#[tokio::test]
async fn cloud_requirements_take_precedence_over_mdm_requirements() -> anyhow::Result<()> {
use base64::Engine;
let tmp = tempdir()?;
let state = load_config_layers_state(
tmp.path(),
Some(AbsolutePathBuf::try_from(tmp.path())?),
&[] as &[(String, TomlValue)],
LoaderOverrides {
macos_managed_config_requirements_base64: Some(
base64::prelude::BASE64_STANDARD.encode(
r#"
allowed_approval_policies = ["on-request"]
"#
.as_bytes(),
),
),
..LoaderOverrides::default()
},
CloudRequirementsLoader::new(async {
Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
})
}),
)
.await?;
assert_eq!(
state.requirements().approval_policy.value(),
AskForApproval::Never
);
assert_eq!(
state
.requirements()
.approval_policy
.can_set(&AskForApproval::OnRequest),
Err(ConstraintError::InvalidValue {
field_name: "approval_policy",
candidate: "OnRequest".into(),
allowed: "[Never]".into(),
requirement_source: RequirementSource::CloudRequirements,
})
);
Ok(())
}
#[tokio::test(flavor = "current_thread")]
async fn cloud_requirements_are_not_overwritten_by_system_requirements() -> anyhow::Result<()> {
let tmp = tempdir()?;

View File

@@ -43,7 +43,6 @@ pub async fn list_accessible_connectors_from_mcp_tools(
sandbox_policy: SandboxPolicy::ReadOnly,
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
sandbox_cwd: env::current_dir().unwrap_or_else(|_| PathBuf::from("/")),
use_linux_sandbox_bwrap: config.features.enabled(Feature::UseLinuxSandboxBwrap),
};
mcp_connection_manager

View File

@@ -140,7 +140,6 @@ pub async fn process_exec_tool_call(
sandbox_policy: &SandboxPolicy,
sandbox_cwd: &Path,
codex_linux_sandbox_exe: &Option<PathBuf>,
use_linux_sandbox_bwrap: bool,
stdout_stream: Option<StdoutStream>,
) -> Result<ExecToolCallOutput> {
let windows_sandbox_level = params.windows_sandbox_level;
@@ -185,15 +184,14 @@ pub async fn process_exec_tool_call(
let manager = SandboxManager::new();
let exec_env = manager
.transform(crate::sandboxing::SandboxTransformRequest {
.transform(
spec,
policy: sandbox_policy,
sandbox: sandbox_type,
sandbox_policy_cwd: sandbox_cwd,
codex_linux_sandbox_exe: codex_linux_sandbox_exe.as_ref(),
use_linux_sandbox_bwrap,
sandbox_policy,
sandbox_type,
sandbox_cwd,
codex_linux_sandbox_exe.as_ref(),
windows_sandbox_level,
})
)
.map_err(CodexErr::from)?;
// Route through the sandboxing module for a single, unified execution path.
@@ -1110,7 +1108,6 @@ mod tests {
&SandboxPolicy::DangerFullAccess,
cwd.as_path(),
&None,
false,
None,
)
.await;

View File

@@ -89,8 +89,6 @@ pub enum Feature {
WebSearchCached,
/// Gate the execpolicy enforcement for shell/unified exec.
ExecPolicy,
/// Use the bubblewrap-based Linux sandbox pipeline.
UseLinuxSandboxBwrap,
/// Allow the model to request approval and propose exec rules.
RequestRule,
/// Enable Windows sandbox (restricted token) on Windows.
@@ -467,12 +465,6 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::UnderDevelopment,
default_enabled: true,
},
FeatureSpec {
id: Feature::UseLinuxSandboxBwrap,
key: "use_linux_sandbox_bwrap",
stage: Stage::UnderDevelopment,
default_enabled: false,
},
FeatureSpec {
id: Feature::RequestRule,
key: "request_rule",

View File

@@ -6,34 +6,26 @@ use std::path::Path;
use std::path::PathBuf;
use tokio::process::Child;
/// Spawn a shell tool command under the Linux sandbox helper
/// (codex-linux-sandbox), which currently uses bubblewrap for filesystem
/// isolation plus seccomp for network restrictions.
/// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper
/// (codex-linux-sandbox).
///
/// Unlike macOS Seatbelt where we directly embed the policy text, the Linux
/// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the
/// public CLI. We convert the internal [`SandboxPolicy`] representation into
/// the equivalent CLI options.
#[allow(clippy::too_many_arguments)]
pub async fn spawn_command_under_linux_sandbox<P>(
codex_linux_sandbox_exe: P,
command: Vec<String>,
command_cwd: PathBuf,
sandbox_policy: &SandboxPolicy,
sandbox_policy_cwd: &Path,
use_bwrap_sandbox: bool,
stdio_policy: StdioPolicy,
env: HashMap<String, String>,
) -> std::io::Result<Child>
where
P: AsRef<Path>,
{
let args = create_linux_sandbox_command_args(
command,
sandbox_policy,
sandbox_policy_cwd,
use_bwrap_sandbox,
);
let args = create_linux_sandbox_command_args(command, sandbox_policy, sandbox_policy_cwd);
let arg0 = Some("codex-linux-sandbox");
spawn_child_async(
codex_linux_sandbox_exe.as_ref().to_path_buf(),
@@ -48,14 +40,10 @@ where
}
/// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`.
///
/// The helper performs the actual sandboxing (bubblewrap + seccomp) after
/// parsing these arguments. See `docs/linux_sandbox.md` for the Linux semantics.
pub(crate) fn create_linux_sandbox_command_args(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
sandbox_policy_cwd: &Path,
use_bwrap_sandbox: bool,
) -> Vec<String> {
#[expect(clippy::expect_used)]
let sandbox_policy_cwd = sandbox_policy_cwd
@@ -72,42 +60,13 @@ pub(crate) fn create_linux_sandbox_command_args(
sandbox_policy_cwd,
"--sandbox-policy".to_string(),
sandbox_policy_json,
// Separator so that command arguments starting with `-` are not parsed as
// options of the helper itself.
"--".to_string(),
];
if use_bwrap_sandbox {
linux_cmd.push("--use-bwrap-sandbox".to_string());
}
// Separator so that command arguments starting with `-` are not parsed as
// options of the helper itself.
linux_cmd.push("--".to_string());
// Append the original tool command.
linux_cmd.extend(command);
linux_cmd
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn bwrap_flags_are_feature_gated() {
let command = vec!["/bin/true".to_string()];
let cwd = Path::new("/tmp");
let policy = SandboxPolicy::ReadOnly;
let with_bwrap = create_linux_sandbox_command_args(command.clone(), &policy, cwd, true);
assert_eq!(
with_bwrap.contains(&"--use-bwrap-sandbox".to_string()),
true
);
let without_bwrap = create_linux_sandbox_command_args(command, &policy, cwd, false);
assert_eq!(
without_bwrap.contains(&"--use-bwrap-sandbox".to_string()),
false
);
}
}

View File

@@ -161,5 +161,4 @@ pub use codex_protocol::models::ResponseItem;
pub use compact::content_items_to_text;
pub use event_mapping::parse_turn_item;
pub mod compact;
pub mod memory_trace;
pub mod otel_init;

View File

@@ -167,7 +167,6 @@ pub async fn collect_mcp_snapshot(config: &Config) -> McpListToolsResponseEvent
sandbox_policy: SandboxPolicy::ReadOnly,
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
sandbox_cwd: env::current_dir().unwrap_or_else(|_| PathBuf::from("/")),
use_linux_sandbox_bwrap: config.features.enabled(Feature::UseLinuxSandboxBwrap),
};
mcp_connection_manager

View File

@@ -313,8 +313,6 @@ pub struct SandboxState {
pub sandbox_policy: SandboxPolicy,
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub sandbox_cwd: PathBuf,
#[serde(default)]
pub use_linux_sandbox_bwrap: bool,
}
/// A thin wrapper around a set of running [`RmcpClient`] instances.

View File

@@ -1,292 +0,0 @@
use std::path::Path;
use std::path::PathBuf;
use crate::ModelClient;
use crate::error::CodexErr;
use crate::error::Result;
use codex_api::MemoryTrace as ApiMemoryTrace;
use codex_api::MemoryTraceMetadata as ApiMemoryTraceMetadata;
use serde_json::Map;
use serde_json::Value;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BuiltTraceMemory {
pub trace_id: String,
pub source_path: PathBuf,
pub trace_summary: String,
pub memory_summary: String,
}
struct PreparedTrace {
trace_id: String,
source_path: PathBuf,
payload: ApiMemoryTrace,
}
/// Loads raw trace files, normalizes trace items, and builds memory summaries.
///
/// The request/response wiring mirrors the memory trace summarize E2E flow:
/// `/v1/memories/trace_summarize` with one output object per input trace.
pub async fn build_memories_from_trace_files(
client: &ModelClient,
trace_paths: &[PathBuf],
) -> Result<Vec<BuiltTraceMemory>> {
if trace_paths.is_empty() {
return Ok(Vec::new());
}
let mut prepared = Vec::with_capacity(trace_paths.len());
for (index, path) in trace_paths.iter().enumerate() {
prepared.push(prepare_trace(index + 1, path).await?);
}
let traces = prepared.iter().map(|trace| trace.payload.clone()).collect();
let output = client.summarize_memory_traces(traces).await?;
if output.len() != prepared.len() {
return Err(CodexErr::InvalidRequest(format!(
"unexpected memory summarize output length: expected {}, got {}",
prepared.len(),
output.len()
)));
}
Ok(prepared
.into_iter()
.zip(output)
.map(|(trace, summary)| BuiltTraceMemory {
trace_id: trace.trace_id,
source_path: trace.source_path,
trace_summary: summary.trace_summary,
memory_summary: summary.memory_summary,
})
.collect())
}
async fn prepare_trace(index: usize, path: &Path) -> Result<PreparedTrace> {
let text = load_trace_text(path).await?;
let items = load_trace_items(path, &text)?;
let trace_id = build_trace_id(index, path);
let source_path = path.to_path_buf();
Ok(PreparedTrace {
trace_id: trace_id.clone(),
source_path: source_path.clone(),
payload: ApiMemoryTrace {
id: trace_id,
metadata: ApiMemoryTraceMetadata {
source_path: source_path.display().to_string(),
},
items,
},
})
}
async fn load_trace_text(path: &Path) -> Result<String> {
let raw = tokio::fs::read(path).await?;
Ok(decode_trace_bytes(&raw))
}
fn decode_trace_bytes(raw: &[u8]) -> String {
if let Some(without_bom) = raw.strip_prefix(&[0xEF, 0xBB, 0xBF])
&& let Ok(text) = String::from_utf8(without_bom.to_vec())
{
return text;
}
if let Ok(text) = String::from_utf8(raw.to_vec()) {
return text;
}
raw.iter().map(|b| char::from(*b)).collect()
}
fn load_trace_items(path: &Path, text: &str) -> Result<Vec<Value>> {
if let Ok(Value::Array(items)) = serde_json::from_str::<Value>(text) {
let dict_items = items
.into_iter()
.filter(serde_json::Value::is_object)
.collect::<Vec<_>>();
if dict_items.is_empty() {
return Err(CodexErr::InvalidRequest(format!(
"no object items found in trace file: {}",
path.display()
)));
}
return normalize_trace_items(dict_items, path);
}
let mut parsed_items = Vec::new();
for line in text.lines() {
let line = line.trim();
if line.is_empty() || (!line.starts_with('{') && !line.starts_with('[')) {
continue;
}
let Ok(obj) = serde_json::from_str::<Value>(line) else {
continue;
};
match obj {
Value::Object(_) => parsed_items.push(obj),
Value::Array(inner) => {
parsed_items.extend(inner.into_iter().filter(serde_json::Value::is_object))
}
_ => {}
}
}
if parsed_items.is_empty() {
return Err(CodexErr::InvalidRequest(format!(
"no JSON items parsed from trace file: {}",
path.display()
)));
}
normalize_trace_items(parsed_items, path)
}
fn normalize_trace_items(items: Vec<Value>, path: &Path) -> Result<Vec<Value>> {
let mut normalized = Vec::new();
for item in items {
let Value::Object(obj) = item else {
continue;
};
if let Some(payload) = obj.get("payload") {
if obj.get("type").and_then(Value::as_str) != Some("response_item") {
continue;
}
match payload {
Value::Object(payload_item) => {
if is_allowed_trace_item(payload_item) {
normalized.push(Value::Object(payload_item.clone()));
}
}
Value::Array(payload_items) => {
for payload_item in payload_items {
if let Value::Object(payload_item) = payload_item
&& is_allowed_trace_item(payload_item)
{
normalized.push(Value::Object(payload_item.clone()));
}
}
}
_ => {}
}
continue;
}
if is_allowed_trace_item(&obj) {
normalized.push(Value::Object(obj));
}
}
if normalized.is_empty() {
return Err(CodexErr::InvalidRequest(format!(
"no valid trace items after normalization: {}",
path.display()
)));
}
Ok(normalized)
}
fn is_allowed_trace_item(item: &Map<String, Value>) -> bool {
let Some(item_type) = item.get("type").and_then(Value::as_str) else {
return false;
};
if item_type == "message" {
return matches!(
item.get("role").and_then(Value::as_str),
Some("assistant" | "system" | "developer" | "user")
);
}
true
}
fn build_trace_id(index: usize, path: &Path) -> String {
let stem = path
.file_stem()
.map(|stem| stem.to_string_lossy().into_owned())
.filter(|stem| !stem.is_empty())
.unwrap_or_else(|| "trace".to_string());
format!("trace_{index}_{stem}")
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[test]
fn normalize_trace_items_handles_payload_wrapper_and_message_role_filtering() {
let items = vec![
serde_json::json!({
"type": "response_item",
"payload": {"type": "message", "role": "assistant", "content": []}
}),
serde_json::json!({
"type": "response_item",
"payload": [
{"type": "message", "role": "user", "content": []},
{"type": "message", "role": "tool", "content": []},
{"type": "function_call", "name": "shell", "arguments": "{}", "call_id": "c1"}
]
}),
serde_json::json!({
"type": "not_response_item",
"payload": {"type": "message", "role": "assistant", "content": []}
}),
serde_json::json!({
"type": "message",
"role": "developer",
"content": []
}),
];
let normalized = normalize_trace_items(items, Path::new("trace.json")).expect("normalize");
let expected = vec![
serde_json::json!({"type": "message", "role": "assistant", "content": []}),
serde_json::json!({"type": "message", "role": "user", "content": []}),
serde_json::json!({"type": "function_call", "name": "shell", "arguments": "{}", "call_id": "c1"}),
serde_json::json!({"type": "message", "role": "developer", "content": []}),
];
assert_eq!(normalized, expected);
}
#[test]
fn load_trace_items_supports_jsonl_arrays_and_objects() {
let text = r#"
{"type":"response_item","payload":{"type":"message","role":"assistant","content":[]}}
[{"type":"message","role":"user","content":[]},{"type":"message","role":"tool","content":[]}]
"#;
let loaded = load_trace_items(Path::new("trace.jsonl"), text).expect("load");
let expected = vec![
serde_json::json!({"type":"message","role":"assistant","content":[]}),
serde_json::json!({"type":"message","role":"user","content":[]}),
];
assert_eq!(loaded, expected);
}
#[tokio::test]
async fn load_trace_text_decodes_utf8_sig() {
let dir = tempdir().expect("tempdir");
let path = dir.path().join("trace.json");
tokio::fs::write(
&path,
[
0xEF, 0xBB, 0xBF, b'[', b'{', b'"', b't', b'y', b'p', b'e', b'"', b':', b'"', b'm',
b'e', b's', b's', b'a', b'g', b'e', b'"', b',', b'"', b'r', b'o', b'l', b'e', b'"',
b':', b'"', b'u', b's', b'e', b'r', b'"', b',', b'"', b'c', b'o', b'n', b't', b'e',
b'n', b't', b'"', b':', b'[', b']', b'}', b']',
],
)
.await
.expect("write");
let text = load_trace_text(&path).await.expect("decode");
assert!(text.starts_with('['));
}
}

View File

@@ -51,19 +51,6 @@ pub struct ExecEnv {
pub arg0: Option<String>,
}
/// Bundled arguments for sandbox transformation.
///
/// This keeps call sites self-documenting when several fields are optional.
pub(crate) struct SandboxTransformRequest<'a> {
pub spec: CommandSpec,
pub policy: &'a SandboxPolicy,
pub sandbox: SandboxType,
pub sandbox_policy_cwd: &'a Path,
pub codex_linux_sandbox_exe: Option<&'a PathBuf>,
pub use_linux_sandbox_bwrap: bool,
pub windows_sandbox_level: WindowsSandboxLevel,
}
pub enum SandboxPreference {
Auto,
Require,
@@ -117,17 +104,13 @@ impl SandboxManager {
pub(crate) fn transform(
&self,
request: SandboxTransformRequest<'_>,
mut spec: CommandSpec,
policy: &SandboxPolicy,
sandbox: SandboxType,
sandbox_policy_cwd: &Path,
codex_linux_sandbox_exe: Option<&PathBuf>,
windows_sandbox_level: WindowsSandboxLevel,
) -> Result<ExecEnv, SandboxTransformError> {
let SandboxTransformRequest {
mut spec,
policy,
sandbox,
sandbox_policy_cwd,
codex_linux_sandbox_exe,
use_linux_sandbox_bwrap,
windows_sandbox_level,
} = request;
let mut env = spec.env;
if !policy.has_full_network_access() {
env.insert(
@@ -158,12 +141,8 @@ impl SandboxManager {
SandboxType::LinuxSeccomp => {
let exe = codex_linux_sandbox_exe
.ok_or(SandboxTransformError::MissingLinuxSandboxExecutable)?;
let mut args = create_linux_sandbox_command_args(
command.clone(),
policy,
sandbox_policy_cwd,
use_linux_sandbox_bwrap,
);
let mut args =
create_linux_sandbox_command_args(command.clone(), policy, sandbox_policy_cwd);
let mut full_command = Vec::with_capacity(1 + args.len());
full_command.push(exe.to_string_lossy().to_string());
full_command.append(&mut args);

View File

@@ -8,7 +8,6 @@ retry without sandbox on denial (no reapproval thanks to caching).
use crate::error::CodexErr;
use crate::error::SandboxErr;
use crate::exec::ExecToolCallOutput;
use crate::features::Feature;
use crate::sandboxing::SandboxManager;
use crate::tools::sandboxing::ApprovalCtx;
use crate::tools::sandboxing::ExecApprovalRequirement;
@@ -98,14 +97,12 @@ impl ToolOrchestrator {
// Platform-specific flag gating is handled by SandboxManager::select_initial
// via crate::safety::get_platform_sandbox(..).
let use_linux_sandbox_bwrap = turn_ctx.features.enabled(Feature::UseLinuxSandboxBwrap);
let initial_attempt = SandboxAttempt {
sandbox: initial_sandbox,
policy: &turn_ctx.sandbox_policy,
manager: &self.sandbox,
sandbox_cwd: &turn_ctx.cwd,
codex_linux_sandbox_exe: turn_ctx.codex_linux_sandbox_exe.as_ref(),
use_linux_sandbox_bwrap,
windows_sandbox_level: turn_ctx.windows_sandbox_level,
};
@@ -157,7 +154,6 @@ impl ToolOrchestrator {
manager: &self.sandbox,
sandbox_cwd: &turn_ctx.cwd,
codex_linux_sandbox_exe: None,
use_linux_sandbox_bwrap,
windows_sandbox_level: turn_ctx.windows_sandbox_level,
};

View File

@@ -274,7 +274,6 @@ pub(crate) struct SandboxAttempt<'a> {
pub(crate) manager: &'a SandboxManager,
pub(crate) sandbox_cwd: &'a Path,
pub codex_linux_sandbox_exe: Option<&'a std::path::PathBuf>,
pub use_linux_sandbox_bwrap: bool,
pub windows_sandbox_level: codex_protocol::config_types::WindowsSandboxLevel,
}
@@ -283,16 +282,14 @@ impl<'a> SandboxAttempt<'a> {
&self,
spec: CommandSpec,
) -> Result<crate::sandboxing::ExecEnv, SandboxTransformError> {
self.manager
.transform(crate::sandboxing::SandboxTransformRequest {
spec,
policy: self.policy,
sandbox: self.sandbox,
sandbox_policy_cwd: self.sandbox_cwd,
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe,
use_linux_sandbox_bwrap: self.use_linux_sandbox_bwrap,
windows_sandbox_level: self.windows_sandbox_level,
})
self.manager.transform(
spec,
self.policy,
self.sandbox,
self.sandbox_cwd,
self.codex_linux_sandbox_exe,
self.windows_sandbox_level,
)
}
}

View File

@@ -44,7 +44,7 @@ async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput
let policy = SandboxPolicy::new_read_only_policy();
process_exec_tool_call(params, &policy, tmp.path(), &None, false, None).await
process_exec_tool_call(params, &policy, tmp.path(), &None, None).await
}
/// Command succeeds with exit code 0 normally

View File

@@ -125,11 +125,6 @@ async fn injected_user_input_triggers_follow_up_request_with_deltas() {
let _ = gate_completed_tx.send(());
let _ = wait_for_event(&codex, |event| {
matches!(event, EventMsg::UserMessage(message) if message.message == "second prompt")
})
.await;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
let requests = server.requests().await;

View File

@@ -149,7 +149,6 @@ async fn shell_tools_run_in_parallel() -> anyhow::Result<()> {
let shell_args = json!({
"command": "sleep 0.3",
// Avoid user-specific shell startup cost (e.g. zsh profile scripts) in timing assertions.
"login": false,
"timeout_ms": 1_000,
});
@@ -187,6 +186,7 @@ async fn mixed_parallel_tools_run_in_parallel() -> anyhow::Result<()> {
.to_string();
let shell_args = serde_json::to_string(&json!({
"command": "sleep 0.3",
"login": false,
"timeout_ms": 1_000,
}))?;

View File

@@ -95,7 +95,6 @@ impl EscalateServer {
&sandbox_state.sandbox_policy,
&sandbox_state.sandbox_cwd,
&sandbox_state.codex_linux_sandbox_exe,
sandbox_state.use_linux_sandbox_bwrap,
None,
)
.await?;

View File

@@ -126,7 +126,6 @@ impl ExecTool {
sandbox_policy: SandboxPolicy::ReadOnly,
codex_linux_sandbox_exe: None,
sandbox_cwd: PathBuf::from(&params.workdir),
use_linux_sandbox_bwrap: false,
});
let escalate_server = EscalateServer::new(
self.bash_path.clone(),

View File

@@ -91,7 +91,6 @@ where
sandbox_policy: SandboxPolicy::ReadOnly,
codex_linux_sandbox_exe,
sandbox_cwd: sandbox_cwd.as_ref().to_path_buf(),
use_linux_sandbox_bwrap: false,
};
send_sandbox_state_update(sandbox_state, service).await
}
@@ -119,7 +118,6 @@ where
},
codex_linux_sandbox_exe,
sandbox_cwd: writable_folder.as_ref().to_path_buf(),
use_linux_sandbox_bwrap: false,
};
send_sandbox_state_update(sandbox_state, service).await
}

View File

@@ -50,7 +50,6 @@ async fn spawn_command_under_sandbox(
command_cwd,
sandbox_policy,
sandbox_cwd,
false,
stdio_policy,
env,
)

View File

@@ -23,6 +23,7 @@ landlock = { workspace = true }
libc = { workspace = true }
seccompiler = { workspace = true }
serde_json = { workspace = true }
which = "8.0.0"
[target.'cfg(target_os = "linux")'.dev-dependencies]
pretty_assertions = { workspace = true }

View File

@@ -6,28 +6,3 @@ This crate is responsible for producing:
- a lib crate that exposes the business logic of the executable as `run_main()` so that
- the `codex-exec` CLI can check if its arg0 is `codex-linux-sandbox` and, if so, execute as if it were `codex-linux-sandbox`
- this should also be true of the `codex` multitool CLI
On Linux, the bubblewrap pipeline uses the vendored bubblewrap path compiled
into this binary.
**Current Behavior**
- Legacy Landlock + mount protections remain available as the legacy pipeline.
- The bubblewrap pipeline is standardized on the vendored path.
- During rollout, the bubblewrap pipeline is gated by the temporary feature
flag `use_linux_sandbox_bwrap` (CLI `-c` alias for
`features.use_linux_sandbox_bwrap`; legacy remains default when off).
- When enabled, the bubblewrap pipeline applies `PR_SET_NO_NEW_PRIVS` and a
seccomp network filter in-process.
- When enabled, the filesystem is read-only by default via `--ro-bind / /`.
- When enabled, writable roots are layered with `--bind <root> <root>`.
- When enabled, protected subpaths under writable roots (for example `.git`,
resolved `gitdir:`, and `.codex`) are re-applied as read-only via `--ro-bind`.
- When enabled, symlink-in-path and non-existent protected paths inside
writable roots are blocked by mounting `/dev/null` on the symlink or first
missing component.
- When enabled, the helper isolates the PID namespace via `--unshare-pid`.
- When enabled, it mounts a fresh `/proc` via `--proc /proc` by default, but
you can skip this in restrictive container environments with `--no-proc`.
**Notes**
- The CLI surface still uses legacy names like `codex debug landlock`.

View File

@@ -44,6 +44,45 @@ pub(crate) fn create_bwrap_command_args(
sandbox_policy: &SandboxPolicy,
cwd: &Path,
options: BwrapOptions,
bwrap_path: Option<&Path>,
) -> Result<Vec<String>> {
if sandbox_policy.has_full_disk_write_access() {
return Ok(command);
}
let bwrap_path = match bwrap_path {
Some(path) => {
if path.exists() {
path.to_path_buf()
} else {
return Err(CodexErr::UnsupportedOperation(format!(
"bubblewrap (bwrap) not found at configured path: {}",
path.display()
)));
}
}
None => which::which("bwrap").map_err(|err| {
CodexErr::UnsupportedOperation(format!("bubblewrap (bwrap) not found on PATH: {err}"))
})?,
};
let mut args = Vec::new();
args.push(path_to_string(&bwrap_path));
args.extend(create_bwrap_flags(command, sandbox_policy, cwd, options)?);
Ok(args)
}
/// Doc-hidden helper that builds bubblewrap arguments without a program path.
///
/// This is intended for experiments where we call a build-time bubblewrap
/// `main` symbol via FFI rather than exec'ing the `bwrap` binary. The caller
/// is responsible for providing a suitable `argv[0]`.
#[doc(hidden)]
pub(crate) fn create_bwrap_command_args_vendored(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
cwd: &Path,
options: BwrapOptions,
) -> Result<Vec<String>> {
if sandbox_policy.has_full_disk_write_access() {
return Ok(command);

View File

@@ -1,7 +1,3 @@
//! In-process Linux sandbox primitives: `no_new_privs` and seccomp.
//!
//! Filesystem restrictions are enforced by bubblewrap in `linux_run_main`.
//! Landlock helpers remain available here as legacy/backup utilities.
use std::collections::BTreeMap;
use std::path::Path;
@@ -12,7 +8,6 @@ use codex_core::protocol::SandboxPolicy;
use codex_utils_absolute_path::AbsolutePathBuf;
use landlock::ABI;
#[allow(unused_imports)]
use landlock::Access;
use landlock::AccessFs;
use landlock::CompatLevel;
@@ -32,24 +27,11 @@ use seccompiler::apply_filter;
/// Apply sandbox policies inside this thread so only the child inherits
/// them, not the entire CLI process.
///
/// This function is responsible for:
/// - enabling `PR_SET_NO_NEW_PRIVS` when restrictions apply, and
/// - installing the network seccomp filter when network access is disabled.
///
/// Filesystem restrictions are intentionally handled by bubblewrap.
pub(crate) fn apply_sandbox_policy_to_current_thread(
sandbox_policy: &SandboxPolicy,
cwd: &Path,
apply_landlock_fs: bool,
) -> Result<()> {
// `PR_SET_NO_NEW_PRIVS` is required for seccomp, but it also prevents
// setuid privilege elevation. Many `bwrap` deployments rely on setuid, so
// we avoid this unless we need seccomp or we are explicitly using the
// legacy Landlock filesystem pipeline.
if !sandbox_policy.has_full_network_access()
|| (apply_landlock_fs && !sandbox_policy.has_full_disk_write_access())
{
if !sandbox_policy.has_full_disk_write_access() || !sandbox_policy.has_full_network_access() {
set_no_new_privs()?;
}
@@ -57,7 +39,7 @@ pub(crate) fn apply_sandbox_policy_to_current_thread(
install_network_seccomp_filter_on_current_thread()?;
}
if apply_landlock_fs && !sandbox_policy.has_full_disk_write_access() {
if !sandbox_policy.has_full_disk_write_access() {
let writable_roots = sandbox_policy
.get_writable_roots_with_cwd(cwd)
.into_iter()
@@ -72,7 +54,6 @@ pub(crate) fn apply_sandbox_policy_to_current_thread(
Ok(())
}
/// Enable `PR_SET_NO_NEW_PRIVS` so seccomp can be applied safely.
fn set_no_new_privs() -> Result<()> {
let result = unsafe { libc::prctl(libc::PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) };
if result != 0 {
@@ -87,9 +68,6 @@ fn set_no_new_privs() -> Result<()> {
///
/// # Errors
/// Returns [`CodexErr::Sandbox`] variants when the ruleset fails to apply.
///
/// Note: this is currently unused because filesystem sandboxing is performed
/// via bubblewrap. It is kept for reference and potential fallback use.
fn install_filesystem_landlock_rules_on_current_thread(
writable_roots: Vec<AbsolutePathBuf>,
) -> Result<()> {
@@ -120,9 +98,6 @@ fn install_filesystem_landlock_rules_on_current_thread(
/// Installs a seccomp filter that blocks outbound network access except for
/// AF_UNIX domain sockets.
///
/// The filter is applied to the current thread so only the sandboxed child
/// inherits it.
fn install_network_seccomp_filter_on_current_thread() -> std::result::Result<(), SandboxErr> {
// Build rule map.
let mut rules: BTreeMap<i64, Vec<SeccompRule>> = BTreeMap::new();

View File

@@ -1,16 +1,13 @@
use clap::Parser;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
use std::os::fd::FromRawFd;
use std::path::Path;
use std::path::PathBuf;
use crate::bwrap::BwrapOptions;
use crate::bwrap::create_bwrap_command_args;
use crate::bwrap::create_bwrap_command_args_vendored;
use crate::landlock::apply_sandbox_policy_to_current_thread;
use crate::vendored_bwrap::exec_vendored_bwrap;
use crate::vendored_bwrap::run_vendored_bwrap_main;
#[derive(Debug, Parser)]
/// CLI surface for the Linux sandbox helper.
@@ -32,6 +29,18 @@ pub struct LandlockCommand {
#[arg(long = "use-bwrap-sandbox", hide = true, default_value_t = false)]
pub use_bwrap_sandbox: bool,
/// Optional explicit path to the `bwrap` binary to use.
///
/// When provided, this implies bubblewrap opt-in and avoids PATH lookups.
#[arg(long = "bwrap-path", hide = true)]
pub bwrap_path: Option<PathBuf>,
/// Experimental: call a build-time bubblewrap `main()` via FFI.
///
/// This is opt-in and only works when the build script compiles bwrap.
#[arg(long = "use-vendored-bwrap", hide = true, default_value_t = false)]
pub use_vendored_bwrap: bool,
/// Internal: apply seccomp and `no_new_privs` in the already-sandboxed
/// process, then exec the user command.
///
@@ -63,10 +72,13 @@ pub fn run_main() -> ! {
sandbox_policy_cwd,
sandbox_policy,
use_bwrap_sandbox,
bwrap_path,
use_vendored_bwrap,
apply_seccomp_then_exec,
no_proc,
command,
} = LandlockCommand::parse();
let use_bwrap_sandbox = use_bwrap_sandbox || bwrap_path.is_some() || use_vendored_bwrap;
if command.is_empty() {
panic!("No command specified to execute.");
@@ -75,192 +87,74 @@ pub fn run_main() -> ! {
// Inner stage: apply seccomp/no_new_privs after bubblewrap has already
// established the filesystem view.
if apply_seccomp_then_exec {
if let Err(e) =
apply_sandbox_policy_to_current_thread(&sandbox_policy, &sandbox_policy_cwd, false)
if let Err(e) = apply_sandbox_policy_to_current_thread(&sandbox_policy, &sandbox_policy_cwd)
{
panic!("error applying Linux sandbox restrictions: {e:?}");
}
exec_or_panic(command);
}
if sandbox_policy.has_full_disk_write_access() {
if let Err(e) =
apply_sandbox_policy_to_current_thread(&sandbox_policy, &sandbox_policy_cwd, false)
let command = if sandbox_policy.has_full_disk_write_access() {
if let Err(e) = apply_sandbox_policy_to_current_thread(&sandbox_policy, &sandbox_policy_cwd)
{
panic!("error applying Linux sandbox restrictions: {e:?}");
}
exec_or_panic(command);
}
if use_bwrap_sandbox {
command
} else if use_bwrap_sandbox {
// Outer stage: bubblewrap first, then re-enter this binary in the
// sandboxed environment to apply seccomp. This path never falls back
// to legacy Landlock on failure.
// sandboxed environment to apply seccomp.
let inner = build_inner_seccomp_command(
&sandbox_policy_cwd,
&sandbox_policy,
use_bwrap_sandbox,
bwrap_path.as_deref(),
command,
);
run_bwrap_with_proc_fallback(&sandbox_policy_cwd, &sandbox_policy, inner, !no_proc);
}
// Legacy path: Landlock enforcement only, when bwrap sandboxing is not enabled.
if let Err(e) =
apply_sandbox_policy_to_current_thread(&sandbox_policy, &sandbox_policy_cwd, true)
{
panic!("error applying legacy Linux sandbox restrictions: {e:?}");
}
exec_or_panic(command);
}
fn run_bwrap_with_proc_fallback(
sandbox_policy_cwd: &Path,
sandbox_policy: &codex_core::protocol::SandboxPolicy,
inner: Vec<String>,
mount_proc: bool,
) -> ! {
let mut mount_proc = mount_proc;
if mount_proc && !preflight_proc_mount_support(sandbox_policy_cwd, sandbox_policy) {
eprintln!("codex-linux-sandbox: bwrap could not mount /proc; retrying with --no-proc");
mount_proc = false;
}
let options = BwrapOptions { mount_proc };
let argv = build_bwrap_argv(inner, sandbox_policy, sandbox_policy_cwd, options);
exec_vendored_bwrap(argv);
}
fn build_bwrap_argv(
inner: Vec<String>,
sandbox_policy: &codex_core::protocol::SandboxPolicy,
sandbox_policy_cwd: &Path,
options: BwrapOptions,
) -> Vec<String> {
let mut args = create_bwrap_command_args(inner, sandbox_policy, sandbox_policy_cwd, options)
.unwrap_or_else(|err| panic!("error building bubblewrap command: {err:?}"));
let command_separator_index = args
.iter()
.position(|arg| arg == "--")
.unwrap_or_else(|| panic!("bubblewrap argv is missing command separator '--'"));
args.splice(
command_separator_index..command_separator_index,
["--argv0".to_string(), "codex-linux-sandbox".to_string()],
);
let mut argv = vec!["bwrap".to_string()];
argv.extend(args);
argv
}
fn preflight_proc_mount_support(
sandbox_policy_cwd: &Path,
sandbox_policy: &codex_core::protocol::SandboxPolicy,
) -> bool {
let preflight_command = vec![resolve_true_command()];
let preflight_argv = build_bwrap_argv(
preflight_command,
sandbox_policy,
sandbox_policy_cwd,
BwrapOptions { mount_proc: true },
);
let stderr = run_bwrap_in_child_capture_stderr(preflight_argv);
!is_proc_mount_failure(stderr.as_str())
}
fn resolve_true_command() -> String {
for candidate in ["/usr/bin/true", "/bin/true"] {
if Path::new(candidate).exists() {
return candidate.to_string();
}
}
"true".to_string()
}
/// Run a short-lived bubblewrap preflight in a child process and capture stderr.
///
/// Strategy:
/// - This is used only by `preflight_proc_mount_support`, which runs `/bin/true`
/// under bubblewrap with `--proc /proc`.
/// - The goal is to detect environments where mounting `/proc` fails (for
/// example, restricted containers), so we can retry the real run with
/// `--no-proc`.
/// - We capture stderr from that preflight to match known mount-failure text.
/// We do not stream it because this is a one-shot probe with a trivial
/// command, and reads are bounded to a fixed max size.
fn run_bwrap_in_child_capture_stderr(argv: Vec<String>) -> String {
const MAX_PREFLIGHT_STDERR_BYTES: u64 = 64 * 1024;
let mut pipe_fds = [0; 2];
let pipe_res = unsafe { libc::pipe2(pipe_fds.as_mut_ptr(), libc::O_CLOEXEC) };
if pipe_res < 0 {
let err = std::io::Error::last_os_error();
panic!("failed to create stderr pipe for bubblewrap: {err}");
}
let read_fd = pipe_fds[0];
let write_fd = pipe_fds[1];
let pid = unsafe { libc::fork() };
if pid < 0 {
let err = std::io::Error::last_os_error();
panic!("failed to fork for bubblewrap: {err}");
}
if pid == 0 {
// Child: redirect stderr to the pipe, then run bubblewrap.
unsafe {
close_fd_or_panic(read_fd, "close read end in bubblewrap child");
if libc::dup2(write_fd, libc::STDERR_FILENO) < 0 {
let err = std::io::Error::last_os_error();
panic!("failed to redirect stderr for bubblewrap: {err}");
let options = BwrapOptions {
mount_proc: !no_proc,
};
if use_vendored_bwrap {
let mut argv0 = bwrap_path
.as_deref()
.map(|path| path.to_string_lossy().to_string())
.unwrap_or_else(|| "bwrap".to_string());
if argv0.is_empty() {
argv0 = "bwrap".to_string();
}
close_fd_or_panic(write_fd, "close write end in bubblewrap child");
let mut argv = vec![argv0];
argv.extend(
create_bwrap_command_args_vendored(
inner,
&sandbox_policy,
&sandbox_policy_cwd,
options,
)
.unwrap_or_else(|err| {
panic!("error building build-time bubblewrap command: {err:?}")
}),
);
exec_vendored_bwrap(argv);
}
ensure_bwrap_available(bwrap_path.as_deref());
create_bwrap_command_args(
inner,
&sandbox_policy,
&sandbox_policy_cwd,
options,
bwrap_path.as_deref(),
)
.unwrap_or_else(|err| panic!("error building bubblewrap command: {err:?}"))
} else {
// Legacy path: Landlock enforcement only.
if let Err(e) = apply_sandbox_policy_to_current_thread(&sandbox_policy, &sandbox_policy_cwd)
{
panic!("error applying legacy Linux sandbox restrictions: {e:?}");
}
command
};
let exit_code = run_vendored_bwrap_main(&argv);
std::process::exit(exit_code);
}
// Parent: close the write end and read stderr while the child runs.
close_fd_or_panic(write_fd, "close write end in bubblewrap parent");
// SAFETY: `read_fd` is a valid owned fd in the parent.
let mut read_file = unsafe { File::from_raw_fd(read_fd) };
let mut stderr_bytes = Vec::new();
let mut limited_reader = (&mut read_file).take(MAX_PREFLIGHT_STDERR_BYTES);
if let Err(err) = limited_reader.read_to_end(&mut stderr_bytes) {
panic!("failed to read bubblewrap stderr: {err}");
}
let mut status: libc::c_int = 0;
let wait_res = unsafe { libc::waitpid(pid, &mut status as *mut libc::c_int, 0) };
if wait_res < 0 {
let err = std::io::Error::last_os_error();
panic!("waitpid failed for bubblewrap child: {err}");
}
String::from_utf8_lossy(&stderr_bytes).into_owned()
}
/// Close an owned file descriptor and panic with context on failure.
///
/// We use explicit close() checks here (instead of ignoring return codes)
/// because this code runs in low-level sandbox setup paths where fd leaks or
/// close errors can mask the root cause of later failures.
fn close_fd_or_panic(fd: libc::c_int, context: &str) {
let close_res = unsafe { libc::close(fd) };
if close_res < 0 {
let err = std::io::Error::last_os_error();
panic!("{context}: {err}");
}
}
fn is_proc_mount_failure(stderr: &str) -> bool {
stderr.contains("Can't mount proc")
&& stderr.contains("/newroot/proc")
&& stderr.contains("Invalid argument")
exec_or_panic(command);
}
/// Build the inner command that applies seccomp after bubblewrap.
@@ -268,6 +162,7 @@ fn build_inner_seccomp_command(
sandbox_policy_cwd: &Path,
sandbox_policy: &codex_core::protocol::SandboxPolicy,
use_bwrap_sandbox: bool,
bwrap_path: Option<&Path>,
command: Vec<String>,
) -> Vec<String> {
let current_exe = match std::env::current_exe() {
@@ -290,6 +185,10 @@ fn build_inner_seccomp_command(
inner.push("--use-bwrap-sandbox".to_string());
inner.push("--apply-seccomp-then-exec".to_string());
}
if let Some(bwrap_path) = bwrap_path {
inner.push("--bwrap-path".to_string());
inner.push(bwrap_path.to_string_lossy().to_string());
}
inner.push("--".to_string());
inner.extend(command);
inner
@@ -318,52 +217,32 @@ fn exec_or_panic(command: Vec<String>) -> ! {
panic!("Failed to execvp {}: {err}", command[0].as_str());
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::protocol::SandboxPolicy;
use pretty_assertions::assert_eq;
#[test]
fn detects_proc_mount_invalid_argument_failure() {
let stderr = "bwrap: Can't mount proc on /newroot/proc: Invalid argument";
assert_eq!(is_proc_mount_failure(stderr), true);
}
#[test]
fn ignores_non_proc_mount_errors() {
let stderr = "bwrap: Can't bind mount /dev/null: Operation not permitted";
assert_eq!(is_proc_mount_failure(stderr), false);
}
#[test]
fn inserts_bwrap_argv0_before_command_separator() {
let argv = build_bwrap_argv(
vec!["/bin/true".to_string()],
&SandboxPolicy::ReadOnly,
Path::new("/"),
BwrapOptions { mount_proc: true },
);
assert_eq!(
argv,
vec![
"bwrap".to_string(),
"--new-session".to_string(),
"--die-with-parent".to_string(),
"--ro-bind".to_string(),
"/".to_string(),
"/".to_string(),
"--dev-bind".to_string(),
"/dev/null".to_string(),
"/dev/null".to_string(),
"--unshare-pid".to_string(),
"--proc".to_string(),
"/proc".to_string(),
"--argv0".to_string(),
"codex-linux-sandbox".to_string(),
"--".to_string(),
"/bin/true".to_string(),
]
/// Ensure the `bwrap` binary is available when the sandbox needs it.
fn ensure_bwrap_available(bwrap_path: Option<&Path>) {
if let Some(path) = bwrap_path {
if path.exists() {
return;
}
panic!(
"bubblewrap (bwrap) is required for Linux filesystem sandboxing but was not found at the configured path: {}\n\
Install it and retry. Examples:\n\
- Debian/Ubuntu: apt-get install bubblewrap\n\
- Fedora/RHEL: dnf install bubblewrap\n\
- Arch: pacman -S bubblewrap\n\
If you are running the Codex Node package, ensure bwrap is installed on the host system.",
path.display()
);
}
if which::which("bwrap").is_ok() {
return;
}
panic!(
"bubblewrap (bwrap) is required for Linux filesystem sandboxing but was not found on PATH.\n\
Install it and retry. Examples:\n\
- Debian/Ubuntu: apt-get install bubblewrap\n\
- Fedora/RHEL: dnf install bubblewrap\n\
- Arch: pacman -S bubblewrap\n\
If you are running the Codex Node package, ensure bwrap is installed on the host system."
);
}

View File

@@ -0,0 +1,339 @@
#![allow(dead_code)]
use std::ffi::CString;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use codex_core::error::CodexErr;
use codex_core::error::Result;
use codex_core::protocol::SandboxPolicy;
use codex_core::protocol::WritableRoot;
use codex_utils_absolute_path::AbsolutePathBuf;
/// Apply read-only bind mounts for protected subpaths before Landlock.
///
/// This unshares mount namespaces (and user namespaces for non-root) so the
/// read-only remounts do not affect the host, then bind-mounts each protected
/// target onto itself and remounts it read-only.
pub(crate) fn apply_read_only_mounts(sandbox_policy: &SandboxPolicy, cwd: &Path) -> Result<()> {
let writable_roots = sandbox_policy.get_writable_roots_with_cwd(cwd);
let mount_targets = collect_read_only_mount_targets(&writable_roots)?;
if mount_targets.is_empty() {
return Ok(());
}
// Root can unshare the mount namespace directly; non-root needs a user
// namespace to gain capabilities for remounting.
if is_running_as_root() {
unshare_mount_namespace()?;
} else {
let original_euid = unsafe { libc::geteuid() };
let original_egid = unsafe { libc::getegid() };
unshare_user_and_mount_namespaces()?;
write_user_namespace_maps(original_euid, original_egid)?;
}
make_mounts_private()?;
for target in mount_targets {
// Bind and remount read-only works for both files and directories.
bind_mount_read_only(target.as_path())?;
}
// Drop ambient capabilities acquired from the user namespace so the
// sandboxed command cannot remount or create new bind mounts.
if !is_running_as_root() {
drop_caps()?;
}
Ok(())
}
/// Collect read-only mount targets, resolving worktree `.git` pointer files.
fn collect_read_only_mount_targets(
writable_roots: &[WritableRoot],
) -> Result<Vec<AbsolutePathBuf>> {
let mut targets = Vec::new();
for writable_root in writable_roots {
for ro_subpath in &writable_root.read_only_subpaths {
// The policy expects these paths to exist; surface actionable errors
// rather than silently skipping protections.
if !ro_subpath.as_path().exists() {
return Err(CodexErr::UnsupportedOperation(format!(
"Sandbox expected to protect {path}, but it does not exist. Ensure the repository contains this path or create it before running Codex.",
path = ro_subpath.as_path().display()
)));
}
targets.push(ro_subpath.clone());
// Worktrees and submodules store `.git` as a pointer file; add the
// referenced gitdir as an extra read-only target.
if is_git_pointer_file(ro_subpath) {
let gitdir = resolve_gitdir_from_file(ro_subpath)?;
if !targets
.iter()
.any(|target| target.as_path() == gitdir.as_path())
{
targets.push(gitdir);
}
}
}
}
Ok(targets)
}
/// Detect a `.git` pointer file used by worktrees and submodules.
fn is_git_pointer_file(path: &AbsolutePathBuf) -> bool {
path.as_path().is_file() && path.as_path().file_name() == Some(std::ffi::OsStr::new(".git"))
}
/// Resolve a worktree `.git` pointer file to its gitdir path.
fn resolve_gitdir_from_file(dot_git: &AbsolutePathBuf) -> Result<AbsolutePathBuf> {
let contents = std::fs::read_to_string(dot_git.as_path()).map_err(CodexErr::from)?;
let trimmed = contents.trim();
let (_, gitdir_raw) = trimmed.split_once(':').ok_or_else(|| {
CodexErr::UnsupportedOperation(format!(
"Expected {path} to contain a gitdir pointer, but it did not match `gitdir: <path>`.",
path = dot_git.as_path().display()
))
})?;
// `gitdir: <path>` may be relative to the directory containing `.git`.
let gitdir_raw = gitdir_raw.trim();
if gitdir_raw.is_empty() {
return Err(CodexErr::UnsupportedOperation(format!(
"Expected {path} to contain a gitdir pointer, but it was empty.",
path = dot_git.as_path().display()
)));
}
let base = dot_git.as_path().parent().ok_or_else(|| {
CodexErr::UnsupportedOperation(format!(
"Unable to resolve parent directory for {path}.",
path = dot_git.as_path().display()
))
})?;
let gitdir_path = AbsolutePathBuf::resolve_path_against_base(gitdir_raw, base)?;
if !gitdir_path.as_path().exists() {
return Err(CodexErr::UnsupportedOperation(format!(
"Resolved gitdir path {path} does not exist.",
path = gitdir_path.as_path().display()
)));
}
Ok(gitdir_path)
}
/// Unshare the mount namespace so mount changes are isolated to the sandboxed process.
fn unshare_mount_namespace() -> Result<()> {
let result = unsafe { libc::unshare(libc::CLONE_NEWNS) };
if result != 0 {
return Err(std::io::Error::last_os_error().into());
}
Ok(())
}
/// Unshare user + mount namespaces so the process can remount read-only without privileges.
fn unshare_user_and_mount_namespaces() -> Result<()> {
let result = unsafe { libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) };
if result != 0 {
return Err(std::io::Error::last_os_error().into());
}
Ok(())
}
fn is_running_as_root() -> bool {
unsafe { libc::geteuid() == 0 }
}
#[repr(C)]
struct CapUserHeader {
version: u32,
pid: i32,
}
#[repr(C)]
struct CapUserData {
effective: u32,
permitted: u32,
inheritable: u32,
}
const LINUX_CAPABILITY_VERSION_3: u32 = 0x2008_0522;
/// Map the provided uid/gid to root inside the user namespace.
fn write_user_namespace_maps(uid: libc::uid_t, gid: libc::gid_t) -> Result<()> {
write_proc_file("/proc/self/setgroups", "deny\n")?;
write_proc_file("/proc/self/uid_map", format!("0 {uid} 1\n"))?;
write_proc_file("/proc/self/gid_map", format!("0 {gid} 1\n"))?;
Ok(())
}
/// Drop all capabilities in the current user namespace.
fn drop_caps() -> Result<()> {
let mut header = CapUserHeader {
version: LINUX_CAPABILITY_VERSION_3,
pid: 0,
};
let data = [
CapUserData {
effective: 0,
permitted: 0,
inheritable: 0,
},
CapUserData {
effective: 0,
permitted: 0,
inheritable: 0,
},
];
// Use syscall directly to avoid libc capability symbols that are missing on musl.
let result = unsafe { libc::syscall(libc::SYS_capset, &mut header, data.as_ptr()) };
if result != 0 {
return Err(std::io::Error::last_os_error().into());
}
Ok(())
}
/// Write a small procfs file, returning a sandbox error on failure.
fn write_proc_file(path: &str, contents: impl AsRef<[u8]>) -> Result<()> {
std::fs::write(path, contents)?;
Ok(())
}
/// Ensure mounts are private so remounting does not propagate outside the namespace.
fn make_mounts_private() -> Result<()> {
let root = CString::new("/").map_err(|_| {
CodexErr::UnsupportedOperation("Sandbox mount path contains NUL byte: /".to_string())
})?;
let result = unsafe {
libc::mount(
std::ptr::null(),
root.as_ptr(),
std::ptr::null(),
libc::MS_REC | libc::MS_PRIVATE,
std::ptr::null(),
)
};
if result != 0 {
return Err(std::io::Error::last_os_error().into());
}
Ok(())
}
/// Bind-mount a path onto itself and remount read-only.
fn bind_mount_read_only(path: &Path) -> Result<()> {
let c_path = CString::new(path.as_os_str().as_bytes()).map_err(|_| {
CodexErr::UnsupportedOperation(format!(
"Sandbox mount path contains NUL byte: {path}",
path = path.display()
))
})?;
let bind_result = unsafe {
libc::mount(
c_path.as_ptr(),
c_path.as_ptr(),
std::ptr::null(),
libc::MS_BIND,
std::ptr::null(),
)
};
if bind_result != 0 {
return Err(std::io::Error::last_os_error().into());
}
let remount_result = unsafe {
libc::mount(
c_path.as_ptr(),
c_path.as_ptr(),
std::ptr::null(),
libc::MS_BIND | libc::MS_REMOUNT | libc::MS_RDONLY,
std::ptr::null(),
)
};
if remount_result != 0 {
return Err(std::io::Error::last_os_error().into());
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn collect_read_only_mount_targets_errors_on_missing_path() {
let tempdir = tempfile::tempdir().expect("tempdir");
let missing = AbsolutePathBuf::try_from(tempdir.path().join("missing").as_path())
.expect("missing path");
let root = AbsolutePathBuf::try_from(tempdir.path()).expect("root");
let writable_root = WritableRoot {
root,
read_only_subpaths: vec![missing],
};
let err = collect_read_only_mount_targets(&[writable_root])
.expect_err("expected missing path error");
let message = match err {
CodexErr::UnsupportedOperation(message) => message,
other => panic!("unexpected error: {other:?}"),
};
assert_eq!(
message,
format!(
"Sandbox expected to protect {path}, but it does not exist. Ensure the repository contains this path or create it before running Codex.",
path = tempdir.path().join("missing").display()
)
);
}
#[test]
fn collect_read_only_mount_targets_adds_gitdir_for_pointer_file() {
let tempdir = tempfile::tempdir().expect("tempdir");
let gitdir = tempdir.path().join("actual-gitdir");
std::fs::create_dir_all(&gitdir).expect("create gitdir");
let dot_git = tempdir.path().join(".git");
std::fs::write(&dot_git, format!("gitdir: {}\n", gitdir.display()))
.expect("write gitdir pointer");
let root = AbsolutePathBuf::try_from(tempdir.path()).expect("root");
let writable_root = WritableRoot {
root,
read_only_subpaths: vec![
AbsolutePathBuf::try_from(dot_git.as_path()).expect("dot git"),
],
};
let targets = collect_read_only_mount_targets(&[writable_root]).expect("collect targets");
assert_eq!(targets.len(), 2);
assert_eq!(targets[0].as_path(), dot_git.as_path());
assert_eq!(targets[1].as_path(), gitdir.as_path());
}
#[test]
fn collect_read_only_mount_targets_errors_on_invalid_gitdir_pointer() {
let tempdir = tempfile::tempdir().expect("tempdir");
let dot_git = tempdir.path().join(".git");
std::fs::write(&dot_git, "not-a-pointer\n").expect("write invalid pointer");
let root = AbsolutePathBuf::try_from(tempdir.path()).expect("root");
let writable_root = WritableRoot {
root,
read_only_subpaths: vec![
AbsolutePathBuf::try_from(dot_git.as_path()).expect("dot git"),
],
};
let err = collect_read_only_mount_targets(&[writable_root])
.expect_err("expected invalid pointer error");
let message = match err {
CodexErr::UnsupportedOperation(message) => message,
other => panic!("unexpected error: {other:?}"),
};
assert_eq!(
message,
format!(
"Expected {path} to contain a gitdir pointer, but it did not match `gitdir: <path>`.",
path = dot_git.display()
)
);
}
}

View File

@@ -13,35 +13,22 @@ mod imp {
fn bwrap_main(argc: libc::c_int, argv: *const *const c_char) -> libc::c_int;
}
fn argv_to_cstrings(argv: &[String]) -> Vec<CString> {
/// Execute the build-time bubblewrap `main` function with the given argv.
pub(crate) fn exec_vendored_bwrap(argv: Vec<String>) -> ! {
let mut cstrings: Vec<CString> = Vec::with_capacity(argv.len());
for arg in argv {
for arg in &argv {
match CString::new(arg.as_str()) {
Ok(value) => cstrings.push(value),
Err(err) => panic!("failed to convert argv to CString: {err}"),
}
}
cstrings
}
/// Run the build-time bubblewrap `main` function and return its exit code.
///
/// On success, bubblewrap will `execve` into the target program and this
/// function will never return. A return value therefore implies failure.
pub(crate) fn run_vendored_bwrap_main(argv: &[String]) -> libc::c_int {
let cstrings = argv_to_cstrings(argv);
let mut argv_ptrs: Vec<*const c_char> = cstrings.iter().map(|arg| arg.as_ptr()).collect();
argv_ptrs.push(std::ptr::null());
// SAFETY: We provide a null-terminated argv vector whose pointers
// remain valid for the duration of the call.
unsafe { bwrap_main(cstrings.len() as libc::c_int, argv_ptrs.as_ptr()) }
}
/// Execute the build-time bubblewrap `main` function with the given argv.
pub(crate) fn exec_vendored_bwrap(argv: Vec<String>) -> ! {
let exit_code = run_vendored_bwrap_main(&argv);
let exit_code = unsafe { bwrap_main(cstrings.len() as libc::c_int, argv_ptrs.as_ptr()) };
std::process::exit(exit_code);
}
}
@@ -49,7 +36,7 @@ mod imp {
#[cfg(not(vendored_bwrap_available))]
mod imp {
/// Panics with a clear error when the build-time bwrap path is not enabled.
pub(crate) fn run_vendored_bwrap_main(_argv: &[String]) -> libc::c_int {
pub(crate) fn exec_vendored_bwrap(_argv: Vec<String>) -> ! {
panic!(
"build-time bubblewrap is not available in this build.\n\
Rebuild codex-linux-sandbox on Linux with CODEX_BWRAP_ENABLE_FFI=1.\n\
@@ -62,13 +49,6 @@ Notes:\n\
- bubblewrap sources expected at codex-rs/vendor/bubblewrap (default)"
);
}
/// Panics with a clear error when the build-time bwrap path is not enabled.
pub(crate) fn exec_vendored_bwrap(_argv: Vec<String>) -> ! {
let _ = run_vendored_bwrap_main(&[]);
unreachable!("run_vendored_bwrap_main should always panic in this configuration")
}
}
pub(crate) use imp::exec_vendored_bwrap;
pub(crate) use imp::run_vendored_bwrap_main;

View File

@@ -2,7 +2,6 @@
#![allow(clippy::unwrap_used)]
use codex_core::config::types::ShellEnvironmentPolicy;
use codex_core::error::CodexErr;
use codex_core::error::Result;
use codex_core::error::SandboxErr;
use codex_core::exec::ExecParams;
use codex_core::exec::process_exec_tool_call;
@@ -33,8 +32,6 @@ const NETWORK_TIMEOUT_MS: u64 = 2_000;
#[cfg(target_arch = "aarch64")]
const NETWORK_TIMEOUT_MS: u64 = 10_000;
const BWRAP_UNAVAILABLE_ERR: &str = "build-time bubblewrap is not available in this build.";
fn create_env_from_core_vars() -> HashMap<String, String> {
let policy = ShellEnvironmentPolicy::default();
create_env(&policy, None)
@@ -50,24 +47,12 @@ async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
}
}
#[expect(clippy::expect_used)]
#[expect(clippy::expect_used, clippy::unwrap_used)]
async fn run_cmd_output(
cmd: &[&str],
writable_roots: &[PathBuf],
timeout_ms: u64,
) -> codex_core::exec::ExecToolCallOutput {
run_cmd_result_with_writable_roots(cmd, writable_roots, timeout_ms, false)
.await
.expect("sandboxed command should execute")
}
#[expect(clippy::expect_used)]
async fn run_cmd_result_with_writable_roots(
cmd: &[&str],
writable_roots: &[PathBuf],
timeout_ms: u64,
use_bwrap_sandbox: bool,
) -> Result<codex_core::exec::ExecToolCallOutput> {
let cwd = std::env::current_dir().expect("cwd should exist");
let sandbox_cwd = cwd.clone();
let params = ExecParams {
@@ -101,48 +86,10 @@ async fn run_cmd_result_with_writable_roots(
&sandbox_policy,
sandbox_cwd.as_path(),
&codex_linux_sandbox_exe,
use_bwrap_sandbox,
None,
)
.await
}
fn is_bwrap_unavailable_output(output: &codex_core::exec::ExecToolCallOutput) -> bool {
output.stderr.text.contains(BWRAP_UNAVAILABLE_ERR)
}
async fn should_skip_bwrap_tests() -> bool {
match run_cmd_result_with_writable_roots(
&["bash", "-lc", "true"],
&[],
NETWORK_TIMEOUT_MS,
true,
)
.await
{
Ok(output) => is_bwrap_unavailable_output(&output),
Err(CodexErr::Sandbox(SandboxErr::Denied { output })) => {
is_bwrap_unavailable_output(&output)
}
// Probe timeouts are not actionable for the bwrap-specific assertions below;
// skip rather than fail the whole suite.
Err(CodexErr::Sandbox(SandboxErr::Timeout { .. })) => true,
Err(err) => panic!("bwrap availability probe failed unexpectedly: {err:?}"),
}
}
fn expect_denied(
result: Result<codex_core::exec::ExecToolCallOutput>,
context: &str,
) -> codex_core::exec::ExecToolCallOutput {
match result {
Ok(output) => {
assert_ne!(output.exit_code, 0, "{context}: expected nonzero exit code");
output
}
Err(CodexErr::Sandbox(SandboxErr::Denied { output })) => *output,
Err(err) => panic!("{context}: {err:?}"),
}
.unwrap()
}
#[tokio::test]
@@ -245,7 +192,6 @@ async fn assert_network_blocked(cmd: &[&str]) {
&sandbox_policy,
sandbox_cwd.as_path(),
&codex_linux_sandbox_exe,
false,
None,
)
.await;
@@ -296,90 +242,6 @@ async fn sandbox_blocks_nc() {
assert_network_blocked(&["nc", "-z", "127.0.0.1", "80"]).await;
}
#[tokio::test]
async fn sandbox_blocks_git_and_codex_writes_inside_writable_root() {
if should_skip_bwrap_tests().await {
eprintln!("skipping bwrap test: vendored bwrap was not built in this environment");
return;
}
let tmpdir = tempfile::tempdir().expect("tempdir");
let dot_git = tmpdir.path().join(".git");
let dot_codex = tmpdir.path().join(".codex");
std::fs::create_dir_all(&dot_git).expect("create .git");
std::fs::create_dir_all(&dot_codex).expect("create .codex");
let git_target = dot_git.join("config");
let codex_target = dot_codex.join("config.toml");
let git_output = expect_denied(
run_cmd_result_with_writable_roots(
&[
"bash",
"-lc",
&format!("echo denied > {}", git_target.to_string_lossy()),
],
&[tmpdir.path().to_path_buf()],
LONG_TIMEOUT_MS,
true,
)
.await,
".git write should be denied under bubblewrap",
);
let codex_output = expect_denied(
run_cmd_result_with_writable_roots(
&[
"bash",
"-lc",
&format!("echo denied > {}", codex_target.to_string_lossy()),
],
&[tmpdir.path().to_path_buf()],
LONG_TIMEOUT_MS,
true,
)
.await,
".codex write should be denied under bubblewrap",
);
assert_ne!(git_output.exit_code, 0);
assert_ne!(codex_output.exit_code, 0);
}
#[tokio::test]
async fn sandbox_blocks_codex_symlink_replacement_attack() {
if should_skip_bwrap_tests().await {
eprintln!("skipping bwrap test: vendored bwrap was not built in this environment");
return;
}
use std::os::unix::fs::symlink;
let tmpdir = tempfile::tempdir().expect("tempdir");
let decoy = tmpdir.path().join("decoy-codex");
std::fs::create_dir_all(&decoy).expect("create decoy dir");
let dot_codex = tmpdir.path().join(".codex");
symlink(&decoy, &dot_codex).expect("create .codex symlink");
let codex_target = dot_codex.join("config.toml");
let codex_output = expect_denied(
run_cmd_result_with_writable_roots(
&[
"bash",
"-lc",
&format!("echo denied > {}", codex_target.to_string_lossy()),
],
&[tmpdir.path().to_path_buf()],
LONG_TIMEOUT_MS,
true,
)
.await,
".codex symlink replacement should be denied",
);
assert_ne!(codex_output.exit_code, 0);
}
#[tokio::test]
async fn sandbox_blocks_ssh() {
// Force ssh to attempt a real TCP connection but fail quickly. `BatchMode`