Compare commits

..

19 Commits

Author SHA1 Message Date
viyatb-oai
3fc1dd287d fix(linux-sandbox): tolerate runfiles codex carveout sources
Co-authored-by: Codex noreply@openai.com
2026-04-13 22:51:02 -07:00
viyatb-oai
badb1beb36 fix(linux-sandbox): mask fragile bwrap carveout paths
Co-authored-by: Codex noreply@openai.com
2026-04-13 22:41:34 -07:00
viyatb-oai
4179952552 fix(linux-sandbox): clean bwrap mountpoint placeholders
Co-authored-by: Codex noreply@openai.com
2026-04-13 22:29:17 -07:00
viyatb-oai
6270d61439 chore: merge origin/main into bwrap regression branch
Co-authored-by: Codex noreply@openai.com
2026-04-10 23:16:04 -07:00
viyatb-oai
356035c871 fix(linux-sandbox): avoid expect in landlock test helper
Co-authored-by: Codex noreply@openai.com
2026-04-10 23:11:18 -07:00
viyatb-oai
906cff498c Merge remote-tracking branch 'origin/main' into codex/viyatb/0118-bwrap-regression 2026-04-10 11:28:32 -07:00
viyatb-oai
46f62ffbb1 Revert "fix(app-server): unload threads from shared processor"
This reverts commit 20e063a18d.
2026-04-10 11:28:21 -07:00
viyatb-oai
20e063a18d fix(app-server): unload threads from shared processor
Allow thread teardown to run from shared connection handlers by making unload_thread_without_subscribers take &self. The method only uses interior-mutability fields and cloned handles, so it does not require mutable access to the processor.

Co-authored-by: Codex noreply@openai.com
2026-04-10 09:43:08 -07:00
viyatb-oai
361dae22b2 fix(linux-sandbox): deny missing read-only subtrees
Mask the first missing component of read-only carveouts under writable roots so sandboxed processes cannot create protected paths when the final leaf does not exist yet.

Co-authored-by: Codex noreply@openai.com
2026-04-10 09:10:26 -07:00
viyatb-oai
3b0c2edb7f Revert "fix(app-server): unload threads from shared processor"
This reverts commit 83b54acd91.
2026-04-10 09:07:55 -07:00
viyatb-oai
83b54acd91 fix(app-server): unload threads from shared processor
Allow thread teardown to run from shared connection handlers by making unload_thread_without_subscribers take &self. The method only uses interior-mutability fields and cloned handles, so it does not need mutable access to the processor.

Co-authored-by: Codex noreply@openai.com
2026-04-10 09:05:02 -07:00
viyatb-oai
ec5a7fd714 Merge remote-tracking branch 'origin/main' into codex/viyatb/0118-bwrap-regression
# Conflicts:
#	codex-rs/linux-sandbox/tests/suite/landlock.rs
2026-04-10 08:44:40 -07:00
viyatb-oai
f45dabf46e Merge remote-tracking branch 'origin/main' into codex/viyatb/0118-bwrap-regression
# Conflicts:
#	codex-rs/core/src/config/mod.rs
2026-04-06 20:26:41 -07:00
viyatb-oai
17e52b756d test: revert unrelated app-server change 2026-04-06 13:25:06 -07:00
viyatb-oai
f93598ef4b test: fix bwrap test helper path 2026-04-06 12:49:37 -07:00
viyatb-oai
3c69faf447 test: fix bwrap CI regressions 2026-04-06 12:34:36 -07:00
viyatb-oai
1a2c4fd308 test: fix linux bwrap test clippy allowance 2026-04-06 12:27:54 -07:00
viyatb-oai
b82d0b6748 test: annotate linux bwrap test arguments 2026-04-06 12:27:54 -07:00
viyatb-oai
709b9c075f Fix missing .codex Linux bwrap startup
Co-authored-by: Codex noreply@op
2026-04-06 12:27:54 -07:00
14 changed files with 519 additions and 813 deletions

View File

@@ -1,71 +0,0 @@
FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04
ARG TZ
ARG DEBIAN_FRONTEND=noninteractive
ARG NODE_MAJOR=22
ARG RUST_TOOLCHAIN=1.92.0
ARG CODEX_NPM_VERSION=latest
ENV TZ="$TZ"
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
curl \
git \
ca-certificates \
pkg-config \
clang \
musl-tools \
libssl-dev \
libsqlite3-dev \
just \
python3 \
python3-pip \
jq \
less \
man-db \
unzip \
ripgrep \
fzf \
fd-find \
zsh \
dnsutils \
iproute2 \
ipset \
iptables \
aggregate \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | bash - \
&& apt-get update \
&& apt-get install -y --no-install-recommends nodejs \
&& npm install -g corepack@latest "@openai/codex@${CODEX_NPM_VERSION}" \
&& corepack enable \
&& corepack prepare pnpm@10.28.2 --activate \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY .devcontainer/init-firewall.sh /usr/local/bin/init-firewall.sh
COPY .devcontainer/post_install.py /opt/post_install.py
COPY .devcontainer/post-start.sh /opt/post_start.sh
RUN chmod 500 /usr/local/bin/init-firewall.sh \
&& chmod 755 /opt/post_start.sh \
&& chmod 644 /opt/post_install.py \
&& chown vscode:vscode /opt/post_install.py
RUN install -d -m 0775 -o vscode -g vscode /commandhistory /workspace \
&& touch /commandhistory/.bash_history /commandhistory/.zsh_history \
&& chown vscode:vscode /commandhistory/.bash_history /commandhistory/.zsh_history
USER vscode
ENV PATH="/home/vscode/.cargo/bin:${PATH}"
WORKDIR /workspace
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain "${RUST_TOOLCHAIN}" \
&& rustup component add clippy rustfmt rust-src \
&& rustup target add x86_64-unknown-linux-musl aarch64-unknown-linux-musl

View File

@@ -1,36 +1,10 @@
# Containerized Development
We provide two container paths:
- `devcontainer.json` keeps the existing Codex contributor setup for working on this repository.
- `devcontainer.secure.json` adds a customer-oriented profile with stricter outbound network controls.
## Codex contributor profile
Use `devcontainer.json` when you are developing Codex itself. This is the same lightweight arm64 container that already exists in the repo.
## Secure customer profile
Use `devcontainer.secure.json` when you want a stricter runtime profile for running Codex inside a project container:
- installs the Codex CLI plus common build tools
- enables firewall startup with an allowlist-driven outbound policy
- blocks IPv6 by default so the allowlist cannot be bypassed over AAAA routes
- requires `NET_ADMIN` and `NET_RAW` so the firewall can be installed at startup
This profile keeps the stricter networking isolated to the customer path instead of changing the default Codex contributor container.
Start it from the CLI with:
```bash
devcontainer up --workspace-folder . --config .devcontainer/devcontainer.secure.json
```
In VS Code, choose **Dev Containers: Open Folder in Container...** and select `.devcontainer/devcontainer.secure.json`.
We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host.
## Docker
To build the contributor image locally for x64 and then run it with the repo mounted under `/workspace`:
To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`:
```shell
CODEX_DOCKER_IMAGE_NAME=codex-linux-dev
@@ -40,6 +14,17 @@ docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-
Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory.
For arm64, specify `--platform=linux/arm64` instead for both `docker build` and `docker run`.
For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`.
Currently, the contributor `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64.
## VS Code
VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container.
From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl):
```shell
cargo build --target aarch64-unknown-linux-musl
cargo build --target aarch64-unknown-linux-gnu
```

View File

@@ -1,76 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json",
"name": "Codex (Secure)",
"build": {
"dockerfile": "Dockerfile.secure",
"context": "..",
"args": {
"TZ": "${localEnv:TZ:UTC}",
"NODE_MAJOR": "22",
"RUST_TOOLCHAIN": "1.92.0",
"CODEX_NPM_VERSION": "latest"
}
},
"runArgs": [
"--cap-add=NET_ADMIN",
"--cap-add=NET_RAW"
],
"init": true,
"updateRemoteUserUID": true,
"remoteUser": "vscode",
"workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,consistency=delegated",
"workspaceFolder": "/workspace",
"mounts": [
"source=codex-commandhistory-${devcontainerId},target=/commandhistory,type=volume",
"source=codex-home-${devcontainerId},target=/home/vscode/.codex,type=volume",
"source=codex-gh-${devcontainerId},target=/home/vscode/.config/gh,type=volume",
"source=codex-cargo-registry-${devcontainerId},target=/home/vscode/.cargo/registry,type=volume",
"source=codex-cargo-git-${devcontainerId},target=/home/vscode/.cargo/git,type=volume",
"source=codex-rustup-${devcontainerId},target=/home/vscode/.rustup,type=volume",
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,readonly"
],
"containerEnv": {
"RUST_BACKTRACE": "1",
"CODEX_UNSAFE_ALLOW_NO_SANDBOX": "1",
"CODEX_ENABLE_FIREWALL": "1",
"CODEX_INCLUDE_GITHUB_META_RANGES": "1",
"OPENAI_ALLOWED_DOMAINS": "api.openai.com auth.openai.com github.com api.github.com codeload.github.com raw.githubusercontent.com objects.githubusercontent.com crates.io index.crates.io static.crates.io static.rust-lang.org registry.npmjs.org pypi.org files.pythonhosted.org",
"CARGO_TARGET_DIR": "/workspace/.cache/cargo-target",
"GIT_CONFIG_GLOBAL": "/home/vscode/.gitconfig.local",
"COREPACK_ENABLE_DOWNLOAD_PROMPT": "0",
"PYTHONDONTWRITEBYTECODE": "1",
"PIP_DISABLE_PIP_VERSION_CHECK": "1"
},
"remoteEnv": {
"OPENAI_API_KEY": "${localEnv:OPENAI_API_KEY}"
},
"postCreateCommand": "python3 /opt/post_install.py",
"postStartCommand": "bash /opt/post_start.sh",
"waitFor": "postStartCommand",
"customizations": {
"vscode": {
"settings": {
"terminal.integrated.defaultProfile.linux": "zsh",
"terminal.integrated.profiles.linux": {
"bash": {
"path": "bash",
"icon": "terminal-bash"
},
"zsh": {
"path": "zsh"
}
},
"files.trimTrailingWhitespace": true,
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true
},
"extensions": [
"openai.chatgpt",
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",
"ms-azuretools.vscode-docker"
]
}
}
}

View File

@@ -1,170 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
allowed_domains_file="/etc/codex/allowed_domains.txt"
include_github_meta_ranges="${CODEX_INCLUDE_GITHUB_META_RANGES:-1}"
if [ -f "$allowed_domains_file" ]; then
mapfile -t allowed_domains < <(sed '/^\s*#/d;/^\s*$/d' "$allowed_domains_file")
else
allowed_domains=("api.openai.com")
fi
if [ "${#allowed_domains[@]}" -eq 0 ]; then
echo "ERROR: No allowed domains configured"
exit 1
fi
add_ipv4_cidr_to_allowlist() {
local source="$1"
local cidr="$2"
if [[ ! "$cidr" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}/[0-9]{1,2}$ ]]; then
echo "ERROR: Invalid ${source} CIDR range: $cidr"
exit 1
fi
ipset add allowed-domains "$cidr" -exist
}
configure_ipv6_default_deny() {
if ! command -v ip6tables >/dev/null 2>&1; then
echo "ERROR: ip6tables is required to enforce IPv6 default-deny policy"
exit 1
fi
ip6tables -F
ip6tables -X
ip6tables -t mangle -F
ip6tables -t mangle -X
ip6tables -t nat -F 2>/dev/null || true
ip6tables -t nat -X 2>/dev/null || true
ip6tables -A INPUT -i lo -j ACCEPT
ip6tables -A OUTPUT -o lo -j ACCEPT
ip6tables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
ip6tables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
ip6tables -P INPUT DROP
ip6tables -P FORWARD DROP
ip6tables -P OUTPUT DROP
echo "IPv6 firewall policy configured (default-deny)"
}
# Preserve docker-managed DNS NAT rules before clearing tables.
docker_dns_rules="$(iptables-save -t nat | grep "127\\.0\\.0\\.11" || true)"
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
ipset destroy allowed-domains 2>/dev/null || true
if [ -n "$docker_dns_rules" ]; then
echo "Restoring Docker DNS NAT rules"
iptables -t nat -N DOCKER_OUTPUT 2>/dev/null || true
iptables -t nat -N DOCKER_POSTROUTING 2>/dev/null || true
while IFS= read -r rule; do
[ -z "$rule" ] && continue
iptables -t nat $rule
done <<< "$docker_dns_rules"
fi
# Allow DNS resolution and localhost communication.
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
iptables -A INPUT -p udp --sport 53 -j ACCEPT
iptables -A INPUT -p tcp --sport 53 -j ACCEPT
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
ipset create allowed-domains hash:net
for domain in "${allowed_domains[@]}"; do
echo "Resolving $domain"
ips="$(dig +short A "$domain" | sed '/^\s*$/d')"
if [ -z "$ips" ]; then
echo "ERROR: Failed to resolve $domain"
exit 1
fi
while IFS= read -r ip; do
if [[ ! "$ip" =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}$ ]]; then
echo "ERROR: Invalid IPv4 address from DNS for $domain: $ip"
exit 1
fi
ipset add allowed-domains "$ip" -exist
done <<< "$ips"
done
if [ "$include_github_meta_ranges" = "1" ]; then
echo "Fetching GitHub meta ranges"
github_meta="$(curl -fsSL --connect-timeout 10 https://api.github.com/meta)"
if ! echo "$github_meta" | jq -e '.web and .api and .git' >/dev/null; then
echo "ERROR: GitHub meta response missing expected fields"
exit 1
fi
while IFS= read -r cidr; do
[ -z "$cidr" ] && continue
if [[ "$cidr" == *:* ]]; then
# Current policy enforces IPv4-only ipset entries.
continue
fi
add_ipv4_cidr_to_allowlist "GitHub" "$cidr"
done < <(echo "$github_meta" | jq -r '((.web // []) + (.api // []) + (.git // []))[]' | sort -u)
fi
host_ip="$(ip route | awk '/default/ {print $3; exit}')"
if [ -z "$host_ip" ]; then
echo "ERROR: Failed to detect host IP"
exit 1
fi
host_network="$(echo "$host_ip" | sed 's/\.[0-9]*$/.0\/24/')"
iptables -A INPUT -s "$host_network" -j ACCEPT
iptables -A OUTPUT -d "$host_network" -j ACCEPT
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT DROP
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT
# Reject rather than silently drop to make policy failures obvious.
iptables -A INPUT -j REJECT --reject-with icmp-admin-prohibited
iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited
iptables -A FORWARD -j REJECT --reject-with icmp-admin-prohibited
configure_ipv6_default_deny
echo "Firewall configuration complete"
if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - was able to reach https://example.com"
exit 1
fi
if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com"
exit 1
fi
if [ "$include_github_meta_ranges" = "1" ] && ! curl --connect-timeout 5 https://api.github.com/zen >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - unable to reach https://api.github.com"
exit 1
fi
if curl --connect-timeout 5 -6 https://example.com >/dev/null 2>&1; then
echo "ERROR: Firewall verification failed - was able to reach https://example.com over IPv6"
exit 1
fi
echo "Firewall verification passed"

View File

@@ -1,36 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [ "${CODEX_ENABLE_FIREWALL:-1}" != "1" ]; then
echo "[devcontainer] Firewall mode: permissive (CODEX_ENABLE_FIREWALL=${CODEX_ENABLE_FIREWALL:-unset})."
exit 0
fi
echo "[devcontainer] Firewall mode: strict"
domains_raw="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}"
mapfile -t domains < <(printf '%s\n' "$domains_raw" | tr ', ' '\n\n' | sed '/^$/d' | sort -u)
if [ "${#domains[@]}" -eq 0 ]; then
echo "[devcontainer] No allowed domains configured."
exit 1
fi
tmp_file="$(mktemp)"
for domain in "${domains[@]}"; do
if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]*\.[a-zA-Z]{2,}$ ]]; then
echo "[devcontainer] Invalid domain in OPENAI_ALLOWED_DOMAINS: $domain"
rm -f "$tmp_file"
exit 1
fi
printf '%s\n' "$domain" >> "$tmp_file"
done
sudo install -d -m 0755 /etc/codex
sudo cp "$tmp_file" /etc/codex/allowed_domains.txt
sudo chown root:root /etc/codex/allowed_domains.txt
sudo chmod 0444 /etc/codex/allowed_domains.txt
rm -f "$tmp_file"
echo "[devcontainer] Applying firewall policy for domains: ${domains[*]}"
sudo --preserve-env=CODEX_INCLUDE_GITHUB_META_RANGES /usr/local/bin/init-firewall.sh

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""Post-install configuration for the Codex devcontainer."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
def ensure_history_files() -> None:
command_history_dir = Path("/commandhistory")
command_history_dir.mkdir(parents=True, exist_ok=True)
for filename in (".bash_history", ".zsh_history"):
(command_history_dir / filename).touch(exist_ok=True)
def fix_directory_ownership() -> None:
uid = os.getuid()
gid = os.getgid()
paths = [
Path.home() / ".codex",
Path.home() / ".config" / "gh",
Path.home() / ".cargo",
Path.home() / ".rustup",
Path("/commandhistory"),
]
for path in paths:
if not path.exists():
continue
stat_info = path.stat()
if stat_info.st_uid == uid and stat_info.st_gid == gid:
continue
try:
subprocess.run(
["sudo", "chown", "-R", f"{uid}:{gid}", str(path)],
check=True,
capture_output=True,
text=True,
)
print(f"[post_install] fixed ownership: {path}", file=sys.stderr)
except subprocess.CalledProcessError as err:
print(
f"[post_install] warning: could not fix ownership of {path}: {err.stderr.strip()}",
file=sys.stderr,
)
def setup_git_config() -> None:
home = Path.home()
host_gitconfig = home / ".gitconfig"
local_gitconfig = home / ".gitconfig.local"
gitignore_global = home / ".gitignore_global"
gitignore_global.write_text(
"""# Codex
.codex/
# Rust
/target/
# Node
node_modules/
# Python
__pycache__/
*.pyc
# Editors
.vscode/
.idea/
# macOS
.DS_Store
""",
encoding="utf-8",
)
include_line = (
f"[include]\n path = {host_gitconfig}\n\n" if host_gitconfig.exists() else ""
)
local_gitconfig.write_text(
f"""# Container-local git configuration
{include_line}[core]
excludesfile = {gitignore_global}
[merge]
conflictstyle = diff3
[diff]
colorMoved = default
""",
encoding="utf-8",
)
def main() -> None:
print("[post_install] configuring devcontainer...", file=sys.stderr)
ensure_history_files()
fix_directory_ownership()
setup_git_config()
print("[post_install] complete", file=sys.stderr)
if __name__ == "__main__":
main()

View File

@@ -5,14 +5,6 @@ use codex_utils_absolute_path::AbsolutePathBuf;
use std::collections::HashMap;
use std::future::Future;
use std::io;
#[cfg(target_os = "linux")]
use std::net::Ipv4Addr;
#[cfg(target_os = "linux")]
use std::net::TcpListener;
#[cfg(target_os = "linux")]
use std::net::TcpStream;
#[cfg(target_os = "linux")]
use std::os::fd::AsRawFd;
use std::path::Path;
use std::path::PathBuf;
use std::process::ExitStatus;
@@ -435,34 +427,6 @@ fn unix_sock_body() {
recvd
);
let sent = libc::sendto(
fds[0],
msg.as_ptr() as *const libc::c_void,
msg.len(),
0,
std::ptr::null(),
0,
);
assert!(
sent >= 0,
"sendto(NULL, 0) failed: {}",
io::Error::last_os_error()
);
let recvd = libc::recvfrom(
fds[1],
buf.as_mut_ptr() as *mut libc::c_void,
buf.len(),
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
assert!(
recvd >= 0,
"recvfrom() after sendto(NULL, 0) failed: {}",
io::Error::last_os_error()
);
assert_eq!(&buf[..(recvd as usize)], &msg[..]);
// Also exercise AF_UNIX stream socketpair quickly to ensure AF_UNIX in general works.
let mut sfds = [0i32; 2];
let sr = libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, sfds.as_mut_ptr());
@@ -496,113 +460,23 @@ fn unix_sock_body() {
#[tokio::test]
async fn allow_unix_socketpair_recvfrom() {
let result = run_code_under_sandbox(
run_code_under_sandbox(
"allow_unix_socketpair_recvfrom",
&SandboxPolicy::new_read_only_policy(),
|| async { unix_sock_body() },
)
.await
.expect("should be able to reexec");
assert_sandbox_reexec_succeeded(result);
}
const IN_SANDBOX_ENV_VAR: &str = "IN_SANDBOX";
#[cfg(target_os = "linux")]
const INHERITED_CONNECTED_SOCKET_FD_ENV_VAR: &str = "INHERITED_CONNECTED_SOCKET_FD";
#[tokio::test]
#[cfg(target_os = "linux")]
async fn inherited_connected_tcp_socket_cannot_send_after_sandbox_exec() {
let mut sandbox_env = HashMap::new();
let mut connected_socket_guards: Option<(TcpStream, TcpStream)> = None;
if std::env::var(IN_SANDBOX_ENV_VAR).is_err() {
sandbox_env = match linux_sandbox_test_env().await {
Some(env) => env,
None => return,
};
let listener =
TcpListener::bind((Ipv4Addr::LOCALHOST, 0)).expect("bind local TCP listener");
let stream =
TcpStream::connect(listener.local_addr().expect("listener address")).expect("connect");
let inherited_fd = stream.as_raw_fd();
clear_cloexec(inherited_fd);
sandbox_env.insert(
INHERITED_CONNECTED_SOCKET_FD_ENV_VAR.to_string(),
inherited_fd.to_string(),
);
let accepted_stream = listener.accept().expect("accept connection").0;
connected_socket_guards = Some((stream, accepted_stream));
}
let result = run_code_under_sandbox_with_env(
"inherited_connected_tcp_socket_cannot_send_after_sandbox_exec",
&SandboxPolicy::new_read_only_policy(),
sandbox_env,
|| async { inherited_connected_tcp_socket_send_body() },
)
.await
.expect("should be able to reexec");
drop(connected_socket_guards);
assert_sandbox_reexec_succeeded(result);
}
#[cfg(target_os = "linux")]
#[expect(clippy::expect_used)]
fn inherited_connected_tcp_socket_send_body() {
let fd = std::env::var(INHERITED_CONNECTED_SOCKET_FD_ENV_VAR)
.expect("inherited fd env var should be set")
.parse::<libc::c_int>()
.expect("inherited fd should parse");
let msg = b"should_not_escape";
let sent = unsafe {
libc::sendto(
fd,
msg.as_ptr() as *const libc::c_void,
msg.len(),
0,
std::ptr::null(),
0,
)
};
assert!(
sent < 0,
"sendto(NULL, 0) on inherited connected TCP fd unexpectedly wrote {sent} bytes"
);
}
#[cfg(target_os = "linux")]
fn clear_cloexec(fd: libc::c_int) {
let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) };
assert!(flags >= 0, "F_GETFD failed: {}", io::Error::last_os_error());
let result = unsafe { libc::fcntl(fd, libc::F_SETFD, flags & !libc::FD_CLOEXEC) };
assert!(
result >= 0,
"F_SETFD failed: {}",
io::Error::last_os_error()
);
}
pub async fn run_code_under_sandbox<F, Fut>(
test_selector: &str,
policy: &SandboxPolicy,
child_body: F,
) -> io::Result<Option<ExitStatus>>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
run_code_under_sandbox_with_env(test_selector, policy, HashMap::new(), child_body).await
}
#[expect(clippy::expect_used)]
pub async fn run_code_under_sandbox_with_env<F, Fut>(
test_selector: &str,
policy: &SandboxPolicy,
mut env: HashMap<String, String>,
child_body: F,
) -> io::Result<Option<ExitStatus>>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
@@ -621,14 +495,13 @@ where
// Your existing launcher:
let command_cwd = std::env::current_dir().expect("should be able to get current dir");
let sandbox_cwd = command_cwd.clone();
env.insert(IN_SANDBOX_ENV_VAR.to_string(), "1".to_string());
let mut child = spawn_command_under_sandbox(
cmds,
command_cwd,
policy,
sandbox_cwd.as_path(),
stdio_policy,
env,
HashMap::from([("IN_SANDBOX".into(), "1".into())]),
)
.await?;
@@ -640,9 +513,3 @@ where
Ok(None)
}
}
fn assert_sandbox_reexec_succeeded(status: Option<ExitStatus>) {
if let Some(status) = status {
assert!(status.success(), "sandboxed child exited with {status:?}");
}
}

View File

@@ -83,6 +83,7 @@ impl BwrapNetworkMode {
pub(crate) struct BwrapArgs {
pub args: Vec<String>,
pub preserved_files: Vec<File>,
pub cleanup_mount_points: Vec<PathBuf>,
}
/// Wrap a command with bubblewrap so the filesystem is read-only by default,
@@ -104,6 +105,7 @@ pub(crate) fn create_bwrap_command_args(
Ok(BwrapArgs {
args: command,
preserved_files: Vec::new(),
cleanup_mount_points: Vec::new(),
})
} else {
Ok(create_bwrap_flags_full_filesystem(command, options))
@@ -143,6 +145,7 @@ fn create_bwrap_flags_full_filesystem(command: Vec<String>, options: BwrapOption
BwrapArgs {
args,
preserved_files: Vec::new(),
cleanup_mount_points: Vec::new(),
}
}
@@ -157,6 +160,7 @@ fn create_bwrap_flags(
let BwrapArgs {
args: filesystem_args,
preserved_files,
cleanup_mount_points,
} = create_filesystem_args(file_system_sandbox_policy, sandbox_policy_cwd)?;
let normalized_command_cwd = normalize_command_cwd_for_bwrap(command_cwd);
let mut args = Vec::new();
@@ -188,6 +192,7 @@ fn create_bwrap_flags(
Ok(BwrapArgs {
args,
preserved_files,
cleanup_mount_points,
})
}
@@ -295,6 +300,7 @@ fn create_filesystem_args(
args
};
let mut preserved_files = Vec::new();
let mut cleanup_mount_points = Vec::new();
let mut allowed_write_paths = Vec::with_capacity(writable_roots.len());
for writable_root in &writable_roots {
let root = writable_root.root.as_path();
@@ -331,6 +337,7 @@ fn create_filesystem_args(
append_unreadable_root_args(
&mut args,
&mut preserved_files,
&mut cleanup_mount_points,
unreadable_root,
&allowed_write_paths,
)?;
@@ -366,7 +373,12 @@ fn create_filesystem_args(
}
read_only_subpaths.sort_by_key(|path| path_depth(path));
for subpath in read_only_subpaths {
append_read_only_subpath_args(&mut args, &subpath, &allowed_write_paths);
append_read_only_subpath_args(
&mut args,
&mut cleanup_mount_points,
&subpath,
&allowed_write_paths,
);
}
let mut nested_unreadable_roots: Vec<PathBuf> = unreadable_roots
.iter()
@@ -382,6 +394,7 @@ fn create_filesystem_args(
append_unreadable_root_args(
&mut args,
&mut preserved_files,
&mut cleanup_mount_points,
&unreadable_root,
&allowed_write_paths,
)?;
@@ -403,6 +416,7 @@ fn create_filesystem_args(
append_unreadable_root_args(
&mut args,
&mut preserved_files,
&mut cleanup_mount_points,
&unreadable_root,
&allowed_write_paths,
)?;
@@ -411,6 +425,7 @@ fn create_filesystem_args(
Ok(BwrapArgs {
args,
preserved_files,
cleanup_mount_points,
})
}
@@ -496,6 +511,7 @@ fn append_mount_target_parent_dir_args(args: &mut Vec<String>, mount_target: &Pa
fn append_read_only_subpath_args(
args: &mut Vec<String>,
cleanup_mount_points: &mut Vec<PathBuf>,
subpath: &Path,
allowed_write_paths: &[PathBuf],
) {
@@ -512,17 +528,36 @@ fn append_read_only_subpath_args(
}
if !subpath.exists() {
// Mask the first missing component so the process cannot create a
// protected subtree under a writable root before reaching the leaf.
if let Some(first_missing_component) = find_first_non_existent_component(subpath)
&& is_within_allowed_write_paths(&first_missing_component, allowed_write_paths)
{
args.push("--ro-bind".to_string());
args.push("/dev/null".to_string());
args.push(path_to_string(&first_missing_component));
append_bwrap_mount_point_read_only_bind_args(
args,
cleanup_mount_points,
&first_missing_component,
);
}
return;
}
if is_within_allowed_write_paths(subpath, allowed_write_paths) {
if subpath.file_name().is_some_and(|name| name == ".codex") && subpath.is_dir() {
let subpath = path_to_string(subpath);
args.push("--dir".to_string());
args.push(subpath.clone());
args.push("--ro-bind-try".to_string());
args.push(subpath.clone());
args.push(subpath.clone());
args.push("--remount-ro".to_string());
args.push(subpath);
return;
}
if fs::canonicalize(subpath).is_err() {
append_bwrap_mount_point_read_only_bind_args(args, cleanup_mount_points, subpath);
return;
}
args.push("--ro-bind".to_string());
args.push(path_to_string(subpath));
args.push(path_to_string(subpath));
@@ -532,6 +567,7 @@ fn append_read_only_subpath_args(
fn append_unreadable_root_args(
args: &mut Vec<String>,
preserved_files: &mut Vec<File>,
cleanup_mount_points: &mut Vec<PathBuf>,
unreadable_root: &Path,
allowed_write_paths: &[PathBuf],
) -> Result<()> {
@@ -551,9 +587,11 @@ fn append_unreadable_root_args(
if let Some(first_missing_component) = find_first_non_existent_component(unreadable_root)
&& is_within_allowed_write_paths(&first_missing_component, allowed_write_paths)
{
args.push("--ro-bind".to_string());
args.push("/dev/null".to_string());
args.push(path_to_string(&first_missing_component));
append_bwrap_mount_point_read_only_bind_args(
args,
cleanup_mount_points,
&first_missing_component,
);
}
return Ok(());
}
@@ -602,18 +640,38 @@ fn append_existing_unreadable_path_args(
return Ok(());
}
args.push("--perms".to_string());
args.push("000".to_string());
append_empty_file_read_only_bind_args(args, preserved_files, unreadable_root)?;
Ok(())
}
fn append_empty_file_read_only_bind_args(
args: &mut Vec<String>,
preserved_files: &mut Vec<File>,
mount_target: &Path,
) -> Result<()> {
if preserved_files.is_empty() {
preserved_files.push(File::open("/dev/null")?);
}
let null_fd = preserved_files[0].as_raw_fd().to_string();
args.push("--perms".to_string());
args.push("000".to_string());
args.push("--ro-bind-data".to_string());
args.push(null_fd);
args.push(path_to_string(unreadable_root));
args.push(path_to_string(mount_target));
Ok(())
}
fn append_bwrap_mount_point_read_only_bind_args(
args: &mut Vec<String>,
cleanup_mount_points: &mut Vec<PathBuf>,
mount_target: &Path,
) {
args.push("--ro-bind".to_string());
args.push("/dev/null".to_string());
args.push(path_to_string(mount_target));
cleanup_mount_points.push(mount_target.to_path_buf());
}
/// Returns true when `path` is under any allowed writable root.
fn is_within_allowed_write_paths(path: &Path, allowed_write_paths: &[PathBuf]) -> bool {
allowed_write_paths
@@ -1054,33 +1112,122 @@ mod tests {
Path::new("/"),
)
.expect("bwrap fs args");
assert_eq!(args.preserved_files.len(), 0);
assert_eq!(args.cleanup_mount_points, vec![PathBuf::from("/.codex")]);
assert_eq!(
args.args,
vec![
&args.args[..8],
[
// Start from a read-only view of the full filesystem.
"--ro-bind".to_string(),
"/".to_string(),
"/".to_string(),
"--ro-bind",
"/",
"/",
// Recreate a writable /dev inside the sandbox.
"--dev".to_string(),
"/dev".to_string(),
"--dev",
"/dev",
// Make the writable root itself writable again.
"--bind".to_string(),
"/".to_string(),
"/".to_string(),
// Mask the default protected .codex subpath under that writable
// root. Because the root is `/` in this test, the carveout path
// appears as `/.codex`.
"--ro-bind".to_string(),
"/dev/null".to_string(),
"/.codex".to_string(),
// Rebind /dev after the root bind so device nodes remain
// writable/usable inside the writable root.
"--bind".to_string(),
"/dev".to_string(),
"/dev".to_string(),
"--bind",
"/",
"/",
]
);
let codex_mask_index = args
.args
.windows(3)
.position(|window| window == ["--ro-bind", "/dev/null", "/.codex"])
.expect("missing protected .codex should be masked under bwrap");
let dev_rebind_index = args
.args
.windows(3)
.position(|window| window == ["--bind", "/dev", "/dev"])
.expect("expected /dev to be rebound after the writable root");
assert!(codex_mask_index < dev_rebind_index);
}
#[test]
fn masks_first_missing_component_for_nested_read_only_subpaths() {
let temp_dir = TempDir::new().expect("temp dir");
let protected_path = temp_dir.path().join("missing").join("protected");
let first_missing_component = temp_dir.path().join("missing");
let policy = FileSystemSandboxPolicy::restricted(vec![
FileSystemSandboxEntry {
path: FileSystemPath::Special {
value: FileSystemSpecialPath::Minimal,
},
access: FileSystemAccessMode::Read,
},
FileSystemSandboxEntry {
path: FileSystemPath::Path {
path: AbsolutePathBuf::try_from(temp_dir.path()).expect("absolute temp dir"),
},
access: FileSystemAccessMode::Write,
},
FileSystemSandboxEntry {
path: FileSystemPath::Path {
path: AbsolutePathBuf::try_from(protected_path.as_path())
.expect("absolute protected path"),
},
access: FileSystemAccessMode::Read,
},
]);
let args = create_filesystem_args(&policy, temp_dir.path()).expect("filesystem args");
assert!(
args.cleanup_mount_points.contains(&first_missing_component),
"missing protected subtree should be registered for cleanup: {:#?}",
args.cleanup_mount_points
);
let first_missing_component = path_to_string(&first_missing_component);
let protected_path = path_to_string(&protected_path);
assert_eq!(args.preserved_files.len(), 0);
assert!(
args.args.windows(3).any(|window| {
window == ["--ro-bind", "/dev/null", first_missing_component.as_str()]
}),
"missing protected subtree should be masked at first missing component: {:#?}",
args.args
);
assert!(
!args
.args
.windows(3)
.any(|window| window == ["--ro-bind", "/dev/null", protected_path.as_str()]),
"mask should target the first missing component, not the unreachable leaf: {:#?}",
args.args
);
}
#[test]
fn existing_dot_codex_uses_try_bind_fallback_without_cleanup() {
let temp_dir = TempDir::new().expect("temp dir");
let dot_codex = temp_dir.path().join(".codex");
std::fs::create_dir(&dot_codex).expect("create .codex");
let policy = FileSystemSandboxPolicy::restricted(vec![FileSystemSandboxEntry {
path: FileSystemPath::Path {
path: AbsolutePathBuf::try_from(temp_dir.path()).expect("absolute temp dir"),
},
access: FileSystemAccessMode::Write,
}]);
let args = create_filesystem_args(&policy, temp_dir.path()).expect("filesystem args");
assert!(!args.cleanup_mount_points.contains(&dot_codex));
let dot_codex = path_to_string(&dot_codex);
assert!(
args.args
.windows(2)
.any(|window| window == ["--dir", dot_codex.as_str()])
);
assert!(
args.args
.windows(3)
.any(|window| window == ["--ro-bind-try", dot_codex.as_str(), dot_codex.as_str()])
);
assert!(
args.args
.windows(2)
.any(|window| window == ["--remount-ro", dot_codex.as_str()])
);
}
#[test]

View File

@@ -0,0 +1,162 @@
use std::ffi::OsStr;
use std::fs;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::path::PathBuf;
#[derive(Debug)]
pub(crate) struct BwrapMountPointRegistration {
mount_point: PathBuf,
marker_file: PathBuf,
marker_dir: PathBuf,
}
pub(crate) fn register_bwrap_mount_points(
mount_points: &[PathBuf],
) -> Vec<BwrapMountPointRegistration> {
let mut mount_points = mount_points.to_vec();
mount_points.sort();
mount_points.dedup();
let mut registrations = Vec::new();
for mount_point in mount_points {
let marker_dir = bwrap_mount_point_marker_dir(&mount_point);
if fs::create_dir_all(&marker_dir).is_err() {
continue;
}
let marker_file = marker_dir.join(std::process::id().to_string());
if fs::write(&marker_file, b"").is_err() {
continue;
}
registrations.push(BwrapMountPointRegistration {
mount_point,
marker_file,
marker_dir,
});
}
registrations
}
pub(crate) fn cleanup_bwrap_mount_points(registrations: &[BwrapMountPointRegistration]) {
for registration in registrations {
let _ = fs::remove_file(&registration.marker_file);
if has_active_bwrap_mount_point_markers(&registration.marker_dir) {
continue;
}
remove_empty_bwrap_mount_point(&registration.mount_point);
let _ = fs::remove_dir(&registration.marker_dir);
}
}
fn has_active_bwrap_mount_point_markers(marker_dir: &Path) -> bool {
let Ok(entries) = fs::read_dir(marker_dir) else {
return false;
};
for entry in entries.flatten() {
let marker_file = entry.path();
if marker_pid_is_active(marker_file.file_name()) {
return true;
}
let _ = fs::remove_file(marker_file);
}
false
}
fn marker_pid_is_active(pid: Option<&OsStr>) -> bool {
let Some(pid) = pid.and_then(OsStr::to_str) else {
return false;
};
let Ok(pid) = pid.parse::<i32>() else {
return false;
};
let kill_res = unsafe { libc::kill(pid, 0) };
kill_res == 0 || std::io::Error::last_os_error().raw_os_error() == Some(libc::EPERM)
}
fn bwrap_mount_point_marker_dir(mount_point: &Path) -> PathBuf {
std::env::temp_dir()
.join("codex-bwrap-mountpoints")
.join(hash_os_str(mount_point.as_os_str()))
}
fn hash_os_str(value: &OsStr) -> String {
const FNV_OFFSET_BASIS: u64 = 0xcbf29ce484222325;
const FNV_PRIME: u64 = 0x100000001b3;
let mut hash = FNV_OFFSET_BASIS;
for byte in value.as_bytes() {
hash ^= u64::from(*byte);
hash = hash.wrapping_mul(FNV_PRIME);
}
format!("{hash:016x}")
}
fn remove_empty_bwrap_mount_point(mount_point: &Path) {
let Ok(metadata) = fs::symlink_metadata(mount_point) else {
return;
};
let file_type = metadata.file_type();
if file_type.is_file() && metadata.len() == 0 {
let _ = fs::remove_file(mount_point);
} else if file_type.is_dir()
&& fs::read_dir(mount_point)
.map(|mut entries| entries.next().is_none())
.unwrap_or(false)
{
let _ = fs::remove_dir(mount_point);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cleanup_bwrap_mount_points_removes_empty_mount_points() {
let temp_dir = tempfile::TempDir::new().expect("tempdir");
let empty_file = temp_dir.path().join("empty-file");
let empty_dir = temp_dir.path().join("empty-dir");
std::fs::write(&empty_file, "").expect("create empty file");
std::fs::create_dir(&empty_dir).expect("create empty dir");
let registrations = register_bwrap_mount_points(&[empty_file.clone(), empty_dir.clone()]);
cleanup_bwrap_mount_points(&registrations);
assert!(!empty_file.exists());
assert!(!empty_dir.exists());
}
#[test]
fn cleanup_bwrap_mount_points_keeps_non_empty_paths() {
let temp_dir = tempfile::TempDir::new().expect("tempdir");
let non_empty_file = temp_dir.path().join("non-empty-file");
let non_empty_dir = temp_dir.path().join("non-empty-dir");
std::fs::write(&non_empty_file, "content").expect("create non-empty file");
std::fs::create_dir(&non_empty_dir).expect("create non-empty dir");
std::fs::write(non_empty_dir.join("child"), "").expect("create child");
let registrations =
register_bwrap_mount_points(&[non_empty_file.clone(), non_empty_dir.clone()]);
cleanup_bwrap_mount_points(&registrations);
assert!(non_empty_file.exists());
assert!(non_empty_dir.exists());
}
#[test]
fn cleanup_bwrap_mount_points_defers_when_another_sandbox_is_active() {
let temp_dir = tempfile::TempDir::new().expect("tempdir");
let empty_file = temp_dir.path().join("empty-file");
std::fs::write(&empty_file, "").expect("create empty file");
let registrations = register_bwrap_mount_points(std::slice::from_ref(&empty_file));
let active_marker = registrations[0].marker_dir.join("1");
std::fs::write(&active_marker, "").expect("create active marker");
cleanup_bwrap_mount_points(&registrations);
assert!(empty_file.exists());
std::fs::remove_file(active_marker).expect("remove active marker");
let registrations = register_bwrap_mount_points(std::slice::from_ref(&empty_file));
cleanup_bwrap_mount_points(&registrations);
assert!(!empty_file.exists());
}
}

View File

@@ -1,108 +0,0 @@
//! File descriptor hygiene before entering the sandboxed command.
use std::io::ErrorKind;
const ESCALATE_SOCKET_ENV_VAR: &str = "CODEX_ESCALATE_SOCKET";
/// Close helper-inherited descriptors unless they are standard input/output/error,
/// already close-on-exec, or known helper IPC.
///
/// The sandboxed command can still create allowed local IPC after exec, but it
/// must not inherit an already-connected network socket from the launcher.
pub(crate) fn close_inherited_exec_fds() {
let preserved_fd = inherited_fd_to_preserve();
let fds = match non_stdio_fds_from_proc() {
Ok(fds) => fds,
Err(err) if err.kind() == ErrorKind::NotFound => {
mark_inherited_exec_fds_cloexec(preserved_fd);
return;
}
Err(err) => panic!("failed to enumerate inherited file descriptors: {err}"),
};
for fd in fds {
if Some(fd) == preserved_fd {
continue;
}
close_fd_if_inheritable(fd);
}
}
fn inherited_fd_to_preserve() -> Option<libc::c_int> {
std::env::var(ESCALATE_SOCKET_ENV_VAR)
.ok()
.and_then(|fd| fd.parse::<libc::c_int>().ok())
.filter(|fd| *fd > libc::STDERR_FILENO)
}
fn mark_inherited_exec_fds_cloexec(preserved_fd: Option<libc::c_int>) {
let start = (libc::STDERR_FILENO + 1) as libc::c_uint;
let Some(preserved_fd) = preserved_fd
.and_then(|fd| u32::try_from(fd).ok())
.filter(|fd| *fd >= start)
else {
mark_fd_range_cloexec(start, u32::MAX);
return;
};
if preserved_fd > start {
mark_fd_range_cloexec(start, preserved_fd - 1);
}
if preserved_fd < u32::MAX {
mark_fd_range_cloexec(preserved_fd + 1, u32::MAX);
}
}
fn mark_fd_range_cloexec(first: libc::c_uint, last: libc::c_uint) {
let result = unsafe {
libc::syscall(
libc::SYS_close_range,
first,
last,
libc::CLOSE_RANGE_CLOEXEC,
)
};
if result != 0 {
let err = std::io::Error::last_os_error();
panic!("failed to mark inherited file descriptors close-on-exec: {err}");
}
}
fn non_stdio_fds_from_proc() -> std::io::Result<Vec<libc::c_int>> {
let mut fds = Vec::new();
for entry in std::fs::read_dir("/proc/self/fd")? {
let entry = entry?;
let file_name = entry.file_name();
let Some(name) = file_name.to_str() else {
continue;
};
let Ok(fd) = name.parse::<libc::c_int>() else {
continue;
};
if fd > libc::STDERR_FILENO {
fds.push(fd);
}
}
Ok(fds)
}
fn close_fd_if_inheritable(fd: libc::c_int) {
let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) };
if flags == -1 {
let err = std::io::Error::last_os_error();
if err.raw_os_error() != Some(libc::EBADF) {
panic!("failed to inspect inherited file descriptor {fd}: {err}");
}
return;
}
if flags & libc::FD_CLOEXEC != 0 {
return;
}
let result = unsafe { libc::close(fd) };
if result != 0 {
let err = std::io::Error::last_os_error();
if err.raw_os_error() != Some(libc::EBADF) {
panic!("failed to close inherited file descriptor {fd}: {err}");
}
}
}

View File

@@ -190,6 +190,7 @@ fn install_network_seccomp_filter_on_current_thread(
deny_syscall(&mut rules, libc::SYS_getpeername);
deny_syscall(&mut rules, libc::SYS_getsockname);
deny_syscall(&mut rules, libc::SYS_shutdown);
deny_syscall(&mut rules, libc::SYS_sendto);
deny_syscall(&mut rules, libc::SYS_sendmmsg);
// NOTE: allowing recvfrom allows some tools like: `cargo clippy`
// to run with their socketpair + child processes for sub-proc
@@ -208,27 +209,8 @@ fn install_network_seccomp_filter_on_current_thread(
libc::AF_UNIX as u64,
)?])?;
// Allow send()/self-pipe wakeups on already-connected sockets, but
// continue denying sendto() when it names a destination address.
let sendto_dest_addr_rule = SeccompRule::new(vec![SeccompCondition::new(
4, // fifth argument (dest_addr)
SeccompCmpArgLen::Qword,
SeccompCmpOp::Ne,
0,
)?])?;
let sendto_addrlen_rule = SeccompRule::new(vec![SeccompCondition::new(
5, // sixth argument (addrlen)
SeccompCmpArgLen::Dword,
SeccompCmpOp::Ne,
0,
)?])?;
rules.insert(libc::SYS_socket, vec![unix_only_rule.clone()]);
rules.insert(libc::SYS_socketpair, vec![unix_only_rule]);
rules.insert(
libc::SYS_sendto,
vec![sendto_dest_addr_rule, sendto_addrlen_rule],
);
}
NetworkSeccompMode::ProxyRouted => {
// In proxy-routed mode we allow IP sockets in the isolated

View File

@@ -6,7 +6,7 @@
#[cfg(target_os = "linux")]
mod bwrap;
#[cfg(target_os = "linux")]
mod fd_cleanup;
mod bwrap_mount_cleanup;
#[cfg(target_os = "linux")]
mod landlock;
#[cfg(target_os = "linux")]

View File

@@ -10,7 +10,8 @@ use std::path::PathBuf;
use crate::bwrap::BwrapNetworkMode;
use crate::bwrap::BwrapOptions;
use crate::bwrap::create_bwrap_command_args;
use crate::fd_cleanup::close_inherited_exec_fds;
use crate::bwrap_mount_cleanup::cleanup_bwrap_mount_points;
use crate::bwrap_mount_cleanup::register_bwrap_mount_points;
use crate::landlock::apply_sandbox_policy_to_current_thread;
use crate::launcher::exec_bwrap;
use crate::launcher::preferred_bwrap_supports_argv0;
@@ -437,7 +438,13 @@ fn run_bwrap_with_proc_fallback(
options,
);
apply_inner_command_argv0(&mut bwrap_args.args);
exec_bwrap(bwrap_args.args, bwrap_args.preserved_files);
let cleanup_mount_points = register_bwrap_mount_points(&bwrap_args.cleanup_mount_points);
if cleanup_mount_points.is_empty() {
exec_bwrap(bwrap_args.args, bwrap_args.preserved_files);
}
let exit_code = run_bwrap_in_child_inherit_stdio(bwrap_args);
cleanup_bwrap_mount_points(&cleanup_mount_points);
std::process::exit(exit_code);
}
fn bwrap_network_mode(
@@ -474,6 +481,7 @@ fn build_bwrap_argv(
crate::bwrap::BwrapArgs {
args: argv,
preserved_files: bwrap_args.preserved_files,
cleanup_mount_points: bwrap_args.cleanup_mount_points,
}
}
@@ -574,6 +582,7 @@ fn resolve_true_command() -> String {
/// command, and reads are bounded to a fixed max size.
fn run_bwrap_in_child_capture_stderr(bwrap_args: crate::bwrap::BwrapArgs) -> String {
const MAX_PREFLIGHT_STDERR_BYTES: u64 = 64 * 1024;
let cleanup_mount_points = register_bwrap_mount_points(&bwrap_args.cleanup_mount_points);
let mut pipe_fds = [0; 2];
let pipe_res = unsafe { libc::pipe2(pipe_fds.as_mut_ptr(), libc::O_CLOEXEC) };
@@ -584,11 +593,7 @@ fn run_bwrap_in_child_capture_stderr(bwrap_args: crate::bwrap::BwrapArgs) -> Str
let read_fd = pipe_fds[0];
let write_fd = pipe_fds[1];
let pid = unsafe { libc::fork() };
if pid < 0 {
let err = std::io::Error::last_os_error();
panic!("failed to fork for bubblewrap: {err}");
}
let pid = fork_bwrap_or_panic();
if pid == 0 {
// Child: redirect stderr to the pipe, then run bubblewrap.
@@ -615,16 +620,56 @@ fn run_bwrap_in_child_capture_stderr(bwrap_args: crate::bwrap::BwrapArgs) -> Str
panic!("failed to read bubblewrap stderr: {err}");
}
let mut status: libc::c_int = 0;
let wait_res = unsafe { libc::waitpid(pid, &mut status as *mut libc::c_int, 0) };
if wait_res < 0 {
let err = std::io::Error::last_os_error();
panic!("waitpid failed for bubblewrap child: {err}");
}
let _ = wait_for_bwrap_child(pid);
cleanup_bwrap_mount_points(&cleanup_mount_points);
String::from_utf8_lossy(&stderr_bytes).into_owned()
}
fn run_bwrap_in_child_inherit_stdio(bwrap_args: crate::bwrap::BwrapArgs) -> i32 {
let pid = fork_bwrap_or_panic();
if pid == 0 {
exec_bwrap(bwrap_args.args, bwrap_args.preserved_files);
}
wait_for_bwrap_child(pid)
}
fn fork_bwrap_or_panic() -> libc::pid_t {
let pid = unsafe { libc::fork() };
if pid < 0 {
let err = std::io::Error::last_os_error();
panic!("failed to fork for bubblewrap: {err}");
}
pid
}
fn wait_for_bwrap_child(pid: libc::pid_t) -> i32 {
let mut status: libc::c_int = 0;
loop {
let wait_res = unsafe { libc::waitpid(pid, &mut status as *mut libc::c_int, 0) };
if wait_res == pid {
return wait_status_to_exit_code(status);
}
if wait_res < 0 {
let err = std::io::Error::last_os_error();
if err.raw_os_error() == Some(libc::EINTR) {
continue;
}
panic!("waitpid failed for bubblewrap child: {err}");
}
}
}
fn wait_status_to_exit_code(status: libc::c_int) -> i32 {
if libc::WIFEXITED(status) {
libc::WEXITSTATUS(status)
} else if libc::WIFSIGNALED(status) {
128 + libc::WTERMSIG(status)
} else {
1
}
}
/// Close an owned file descriptor and panic with context on failure.
///
/// We use explicit close() checks here (instead of ignoring return codes)
@@ -730,8 +775,6 @@ fn exec_or_panic(command: Vec<String>) -> ! {
let mut c_args_ptrs: Vec<*const libc::c_char> = c_args.iter().map(|arg| arg.as_ptr()).collect();
c_args_ptrs.push(std::ptr::null());
close_inherited_exec_fds();
unsafe {
libc::execvp(c_command.as_ptr(), c_args_ptrs.as_ptr());
}

View File

@@ -21,6 +21,7 @@ use codex_protocol::protocol::SandboxPolicy;
use codex_utils_absolute_path::AbsolutePathBuf;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use tempfile::NamedTempFile;
@@ -43,6 +44,18 @@ const NETWORK_TIMEOUT_MS: u64 = 10_000;
const BWRAP_UNAVAILABLE_ERR: &str = "build-time bubblewrap is not available in this build.";
#[expect(clippy::expect_used)]
fn codex_linux_sandbox_exe() -> PathBuf {
let path = PathBuf::from(env!("CARGO_BIN_EXE_codex-linux-sandbox"));
if path.is_absolute() {
path
} else {
std::env::current_dir()
.expect("cwd should exist")
.join(path)
}
}
fn create_env_from_core_vars() -> HashMap<String, String> {
let policy = ShellEnvironmentPolicy::default();
create_env(&policy, /*thread_id*/ None)
@@ -75,12 +88,33 @@ async fn run_cmd_output(
.expect("sandboxed command should execute")
}
#[expect(clippy::expect_used)]
async fn run_cmd_result_with_writable_roots(
cmd: &[&str],
writable_roots: &[PathBuf],
timeout_ms: u64,
use_legacy_landlock: bool,
network_access: bool,
) -> Result<codex_protocol::exec_output::ExecToolCallOutput> {
let cwd = std::env::current_dir().expect("cwd should exist");
run_cmd_result_with_writable_roots_in_cwd(
cmd,
writable_roots,
cwd.as_path(),
timeout_ms,
use_legacy_landlock,
network_access,
)
.await
}
async fn run_cmd_result_with_writable_roots_in_cwd(
cmd: &[&str],
writable_roots: &[PathBuf],
cwd: &Path,
timeout_ms: u64,
use_legacy_landlock: bool,
network_access: bool,
) -> Result<codex_protocol::exec_output::ExecToolCallOutput> {
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
writable_roots: writable_roots
@@ -97,11 +131,12 @@ async fn run_cmd_result_with_writable_roots(
};
let file_system_sandbox_policy = FileSystemSandboxPolicy::from(&sandbox_policy);
let network_sandbox_policy = NetworkSandboxPolicy::from(&sandbox_policy);
run_cmd_result_with_policies(
run_cmd_result_with_policies_in_cwd(
cmd,
sandbox_policy,
file_system_sandbox_policy,
network_sandbox_policy,
cwd,
timeout_ms,
use_legacy_landlock,
)
@@ -117,7 +152,29 @@ async fn run_cmd_result_with_policies(
timeout_ms: u64,
use_legacy_landlock: bool,
) -> Result<codex_protocol::exec_output::ExecToolCallOutput> {
let cwd = AbsolutePathBuf::current_dir().expect("cwd should exist");
let cwd = std::env::current_dir().expect("cwd should exist");
run_cmd_result_with_policies_in_cwd(
cmd,
sandbox_policy,
file_system_sandbox_policy,
network_sandbox_policy,
cwd.as_path(),
timeout_ms,
use_legacy_landlock,
)
.await
}
async fn run_cmd_result_with_policies_in_cwd(
cmd: &[&str],
sandbox_policy: SandboxPolicy,
file_system_sandbox_policy: FileSystemSandboxPolicy,
network_sandbox_policy: NetworkSandboxPolicy,
cwd: &Path,
timeout_ms: u64,
use_legacy_landlock: bool,
) -> Result<codex_protocol::exec_output::ExecToolCallOutput> {
let cwd = AbsolutePathBuf::from_absolute_path(cwd)?;
let sandbox_cwd = cwd.clone();
let params = ExecParams {
command: cmd.iter().copied().map(str::to_owned).collect(),
@@ -132,8 +189,7 @@ async fn run_cmd_result_with_policies(
justification: None,
arg0: None,
};
let sandbox_program = env!("CARGO_BIN_EXE_codex-linux-sandbox");
let codex_linux_sandbox_exe = Some(PathBuf::from(sandbox_program));
let codex_linux_sandbox_exe = Some(codex_linux_sandbox_exe());
process_exec_tool_call(
params,
@@ -258,6 +314,45 @@ async fn bwrap_populates_minimal_dev_nodes() {
assert_eq!(output.exit_code, 0);
}
#[tokio::test]
async fn bwrap_dev_nodes_work_and_missing_workspace_dot_codex_stays_blocked() {
if should_skip_bwrap_tests().await {
eprintln!("skipping bwrap test: bwrap sandbox prerequisites are unavailable");
return;
}
let tmpdir = tempfile::tempdir().expect("tempdir");
let writable_roots = vec![tmpdir.path().to_path_buf()];
let output = run_cmd_result_with_writable_roots_in_cwd(
&[
"bash",
"-lc",
concat!(
": >/dev/null && ",
"if mkdir .codex 2>/dev/null; then exit 42; fi && ",
"head -c 8 /dev/zero | od -An -tx1"
),
],
&writable_roots,
tmpdir.path(),
LONG_TIMEOUT_MS,
/*use_legacy_landlock*/ false,
/*network_access*/ true,
)
.await
.expect("sandboxed command should execute");
assert_eq!(output.exit_code, 0);
assert_eq!(
output.stdout.text.split_whitespace().collect::<Vec<_>>(),
vec!["00", "00", "00", "00", "00", "00", "00", "00"]
);
assert!(
!tmpdir.path().join(".codex").exists(),
"bwrap-created .codex mountpoint should be cleaned up after command exit"
);
}
#[tokio::test]
async fn bwrap_preserves_writable_dev_shm_bind_mount() {
if should_skip_bwrap_tests().await {
@@ -394,8 +489,7 @@ async fn assert_network_blocked(cmd: &[&str]) {
};
let sandbox_policy = SandboxPolicy::new_read_only_policy();
let sandbox_program = env!("CARGO_BIN_EXE_codex-linux-sandbox");
let codex_linux_sandbox_exe: Option<PathBuf> = Some(PathBuf::from(sandbox_program));
let codex_linux_sandbox_exe: Option<PathBuf> = Some(codex_linux_sandbox_exe());
let result = process_exec_tool_call(
params,
&sandbox_policy,
@@ -554,7 +648,7 @@ async fn sandbox_blocks_explicit_split_policy_carveouts_under_bwrap() {
let blocked_target = blocked.join("secret.txt");
// These tests bypass the usual legacy-policy bridge, so explicitly keep
// the sandbox helper binary and minimal runtime paths readable.
let sandbox_helper_dir = PathBuf::from(env!("CARGO_BIN_EXE_codex-linux-sandbox"))
let sandbox_helper_dir = codex_linux_sandbox_exe()
.parent()
.expect("sandbox helper should have a parent")
.to_path_buf();
@@ -627,7 +721,7 @@ async fn sandbox_reenables_writable_subpaths_under_unreadable_parents() {
let allowed_target = allowed.join("note.txt");
// These tests bypass the usual legacy-policy bridge, so explicitly keep
// the sandbox helper binary and minimal runtime paths readable.
let sandbox_helper_dir = PathBuf::from(env!("CARGO_BIN_EXE_codex-linux-sandbox"))
let sandbox_helper_dir = codex_linux_sandbox_exe()
.parent()
.expect("sandbox helper should have a parent")
.to_path_buf();