Compare commits

...

11 Commits

Author SHA1 Message Date
viyatb-oai
acb1767588 Gate network proxy behind feature flag 2025-12-21 12:03:20 -08:00
viyatb-oai
a575da87c4 Remove network_proxy.prompt_on_block; always prompt when proxy enabled 2025-12-21 11:08:14 -08:00
viyatb-oai
e47d02ab27 Config surface
New config key: [network_proxy.policy].allow_unix_sockets (string array), stored/edited via network_proxy.rs.
Entries support:
SSH_AUTH_SOCK / ${SSH_AUTH_SOCK}
preset aliases: ssh-agent, ssh_auth_sock, ssh_auth_socket
Entries are resolved at runtime to canonical absolute socket paths before generating Seatbelt rules.
macOS Seatbelt integration

seatbelt.rs now:
allows only loopback proxy ports (localhost:<port>) + explicitly allowed unix socket paths
does not emit per-domain (remote tcp ...) rules (those break under sandbox-exec)
Unix socket allowlist resolution is done via network_proxy::resolve_unix_socket_allowlist(...).
Prompt-on-deny UX (TUI)

When an exec approval happens and the command appears to need the SSH agent socket (ssh/scp/sftp/ssh-add, or git with ssh-style remotes), and the socket isn’t already allowed:
TUI shows an approval modal for the unix socket.
Allow for session: writes the resolved socket path to config (and removes it on exit, like session domain approvals).
Allow always: writes SSH_AUTH_SOCK to allow_unix_sockets for portability across restarts.
2025-12-19 23:57:18 -08:00
viyatb-oai
73430c462f some more improvements 2025-12-19 15:47:50 -08:00
viyatb-oai
f7648cf2b5 remove major func signature changes 2025-12-19 08:56:32 -08:00
viyatb-oai
78a5014430 reduce double prompting 2025-12-18 13:08:46 -08:00
viyatb-oai
4672cddaf2 merge 2025-12-18 00:53:18 -08:00
viyatb-oai
9942765058 final changes v0 2025-12-17 16:14:13 -08:00
viyatb-oai
235c76e972 final working integration 2025-12-16 19:19:33 -08:00
viyatb-oai
05e6729875 network proxy sandbox integration 2025-12-15 23:26:03 -08:00
viyatb-oai
e2b5c918ad security tests 2025-11-13 16:32:06 -08:00
55 changed files with 6585 additions and 860 deletions

1831
codex-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,6 +26,7 @@ members = [
"login",
"mcp-server",
"mcp-types",
"network-proxy",
"ollama",
"process-hardening",
"protocol",
@@ -83,6 +84,7 @@ codex-lmstudio = { path = "lmstudio" }
codex-login = { path = "login" }
codex-mcp-server = { path = "mcp-server" }
codex-ollama = { path = "ollama" }
codex-network-proxy = { path = "network-proxy" }
codex-otel = { path = "otel" }
codex-process-hardening = { path = "process-hardening" }
codex-protocol = { path = "protocol" }
@@ -135,6 +137,7 @@ env_logger = "0.11.5"
escargot = "0.5"
eventsource-stream = "0.2.3"
futures = { version = "0.3", default-features = false }
globset = "0.4"
http = "1.3.1"
icu_decimal = "2.1"
icu_locale_core = "2.1"

View File

@@ -659,6 +659,7 @@ mod tests {
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "echo hello".to_string(),
}],
network_preflight_only: false,
};
let request = ServerRequest::ExecCommandApproval {
request_id: RequestId::Integer(7),
@@ -680,7 +681,8 @@ mod tests {
"type": "unknown",
"cmd": "echo hello"
}
]
],
"networkPreflightOnly": false
}
}),
serde_json::to_value(&request)?,

View File

@@ -227,6 +227,8 @@ pub struct ExecCommandApprovalParams {
pub cwd: PathBuf,
pub reason: Option<String>,
pub parsed_cmd: Vec<ParsedCommand>,
#[serde(default)]
pub network_preflight_only: bool,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]

View File

@@ -182,6 +182,8 @@ pub(crate) async fn apply_bespoke_event_handling(
reason,
proposed_execpolicy_amendment,
parsed_cmd,
network_preflight_only,
..
}) => match api_version {
ApiVersion::V1 => {
let params = ExecCommandApprovalParams {
@@ -191,6 +193,7 @@ pub(crate) async fn apply_bespoke_event_handling(
cwd,
reason,
parsed_cmd,
network_preflight_only,
};
let rx = outgoing
.send_request(ServerRequestPayload::ExecCommandApproval(params))

View File

@@ -1172,7 +1172,15 @@ impl CodexMessageProcessor {
}
let cwd = params.cwd.unwrap_or_else(|| self.config.cwd.clone());
let env = create_env(&self.config.shell_environment_policy);
let effective_policy = params
.sandbox_policy
.map(|policy| policy.to_core())
.unwrap_or_else(|| self.config.sandbox_policy.clone());
let env = create_env(
&self.config.shell_environment_policy,
&effective_policy,
&self.config.network_proxy,
);
let timeout_ms = params
.timeout_ms
.and_then(|timeout_ms| u64::try_from(timeout_ms).ok());
@@ -1186,11 +1194,6 @@ impl CodexMessageProcessor {
arg0: None,
};
let effective_policy = params
.sandbox_policy
.map(|policy| policy.to_core())
.unwrap_or_else(|| self.config.sandbox_policy.clone());
let codex_linux_sandbox_exe = self.config.codex_linux_sandbox_exe.clone();
let outgoing = self.outgoing.clone();
let req_id = request_id;

View File

@@ -274,6 +274,7 @@ async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> {
parsed_cmd: vec![ParsedCommand::Unknown {
cmd: "python3 -c 'print(42)'".to_string()
}],
network_preflight_only: false,
},
params
);

View File

@@ -30,6 +30,7 @@ codex-exec = { workspace = true }
codex-execpolicy = { workspace = true }
codex-login = { workspace = true }
codex-mcp-server = { workspace = true }
codex-network-proxy = { workspace = true }
codex-process-hardening = { workspace = true }
codex-protocol = { workspace = true }
codex-responses-api-proxy = { workspace = true }

View File

@@ -130,7 +130,11 @@ async fn run_command_under_sandbox(
let sandbox_policy_cwd = cwd.clone();
let stdio_policy = StdioPolicy::Inherit;
let env = create_env(&config.shell_environment_policy);
let env = create_env(
&config.shell_environment_policy,
&config.sandbox_policy,
&config.network_proxy,
);
// Special-case Windows sandbox: execute and exit the process to emulate inherited stdio.
if let SandboxType::Windows = sandbox_type {

View File

@@ -21,6 +21,7 @@ use codex_exec::Cli as ExecCli;
use codex_exec::Command as ExecCommand;
use codex_exec::ReviewArgs;
use codex_execpolicy::ExecPolicyCheckCommand;
use codex_network_proxy::Args as NetworkProxyArgs;
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
use codex_tui::AppExitInfo;
use codex_tui::Cli as TuiCli;
@@ -98,6 +99,9 @@ enum Subcommand {
/// [experimental] Run the app server or related tooling.
AppServer(AppServerCommand),
/// Run the Codex network proxy.
Proxy(NetworkProxyArgs),
/// Generate shell completion scripts.
Completion(CompletionCommand),
@@ -469,6 +473,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
);
codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?;
}
Some(Subcommand::Proxy(proxy_args)) => {
codex_network_proxy::run_main(proxy_args).await?;
}
Some(Subcommand::McpServer) => {
codex_mcp_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?;
}

View File

@@ -81,6 +81,7 @@ use crate::config::Constrained;
use crate::config::ConstraintError;
use crate::config::ConstraintResult;
use crate::config::GhostSnapshotConfig;
use crate::config::types::NetworkProxyConfig;
use crate::config::types::ShellEnvironmentPolicy;
use crate::context_manager::ContextManager;
use crate::environment_context::EnvironmentContext;
@@ -368,6 +369,7 @@ pub(crate) struct TurnContext {
pub(crate) approval_policy: AskForApproval,
pub(crate) sandbox_policy: SandboxPolicy,
pub(crate) shell_environment_policy: ShellEnvironmentPolicy,
pub(crate) network_proxy: NetworkProxyConfig,
pub(crate) tools_config: ToolsConfig,
pub(crate) ghost_snapshot: GhostSnapshotConfig,
pub(crate) final_output_json_schema: Option<Value>,
@@ -391,6 +393,7 @@ impl TurnContext {
}
}
#[allow(dead_code)]
#[derive(Clone)]
pub(crate) struct SessionConfiguration {
/// Provider identifier ("openai", "openrouter", ...).
@@ -530,6 +533,7 @@ impl Session {
approval_policy: session_configuration.approval_policy.value(),
sandbox_policy: session_configuration.sandbox_policy.clone(),
shell_environment_policy: per_turn_config.shell_environment_policy.clone(),
network_proxy: per_turn_config.network_proxy.clone(),
tools_config,
ghost_snapshot: per_turn_config.ghost_snapshot.clone(),
final_output_json_schema: None,
@@ -1098,6 +1102,7 @@ impl Session {
cwd: PathBuf,
reason: Option<String>,
proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
network_preflight_only: bool,
) -> ReviewDecision {
let sub_id = turn_context.sub_id.clone();
// Add the tx_approve callback to the map before sending the request.
@@ -1126,6 +1131,7 @@ impl Session {
reason,
proposed_execpolicy_amendment,
parsed_cmd,
network_preflight_only,
});
self.send_event(turn_context, event).await;
rx_approve.await.unwrap_or_default()
@@ -1659,6 +1665,9 @@ async fn submission_loop(sess: Arc<Session>, config: Arc<Config>, rx_sub: Receiv
Op::PatchApproval { id, decision } => {
handlers::patch_approval(&sess, id, decision).await;
}
Op::NetworkApprovalCache { host, decision } => {
handlers::network_approval_cache(&sess, host, decision).await;
}
Op::AddToHistory { text } => {
handlers::add_to_history(&sess, &config, text).await;
}
@@ -1722,6 +1731,7 @@ mod handlers {
use crate::features::Feature;
use crate::mcp::auth::compute_auth_statuses;
use crate::mcp::collect_mcp_snapshot_from_manager;
use crate::network_proxy;
use crate::review_prompts::resolve_review_request;
use crate::tasks::CompactTask;
use crate::tasks::RegularTask;
@@ -1905,6 +1915,21 @@ mod handlers {
}
}
pub async fn network_approval_cache(
sess: &Arc<Session>,
host: String,
decision: ReviewDecision,
) {
if !matches!(
decision,
ReviewDecision::Approved | ReviewDecision::ApprovedForSession
) {
return;
}
let mut store = sess.services.tool_approvals.lock().await;
network_proxy::cache_network_approval(&mut store, &host, decision);
}
pub async fn add_to_history(sess: &Arc<Session>, config: &Arc<Config>, text: String) {
let id = sess.conversation_id;
let config = Arc::clone(config);
@@ -2182,6 +2207,7 @@ async fn spawn_review_thread(
approval_policy: parent_turn_context.approval_policy,
sandbox_policy: parent_turn_context.sandbox_policy.clone(),
shell_environment_policy: parent_turn_context.shell_environment_policy.clone(),
network_proxy: parent_turn_context.network_proxy.clone(),
cwd: parent_turn_context.cwd.clone(),
final_output_json_schema: None,
codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(),

View File

@@ -282,6 +282,7 @@ async fn handle_exec_approval(
event.cwd,
event.reason,
event.proposed_execpolicy_amendment,
event.network_preflight_only,
);
let decision = await_approval_with_cancel(
approval_fut,

View File

@@ -2,6 +2,9 @@ use crate::auth::AuthCredentialsStoreMode;
use crate::config::types::DEFAULT_OTEL_ENVIRONMENT;
use crate::config::types::History;
use crate::config::types::McpServerConfig;
use crate::config::types::NetworkProxyConfig;
use crate::config::types::NetworkProxyConfigToml;
use crate::config::types::NetworkProxyMode;
use crate::config::types::Notice;
use crate::config::types::Notifications;
use crate::config::types::OtelConfig;
@@ -87,6 +90,11 @@ pub(crate) fn test_config() -> Config {
.expect("load default test config")
}
pub fn default_config_path() -> std::io::Result<PathBuf> {
let codex_home = find_codex_home()?;
Ok(codex_home.join(CONFIG_TOML_FILE))
}
/// Application configuration loaded from disk and merged with overrides.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
@@ -122,6 +130,8 @@ pub struct Config {
pub forced_auto_mode_downgraded_on_windows: bool,
pub shell_environment_policy: ShellEnvironmentPolicy,
/// Network proxy settings for routing sandboxed network access.
pub network_proxy: NetworkProxyConfig,
/// When `true`, `AgentReasoning` events emitted by the backend will be
/// suppressed from the frontend output. This can reduce visual noise when
@@ -563,6 +573,9 @@ pub struct ConfigToml {
/// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`.
pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>,
/// Network proxy settings for sandboxed network access.
pub network_proxy: Option<NetworkProxyConfigToml>,
/// Optional external command to spawn for end-user notifications.
#[serde(default)]
pub notify: Option<Vec<String>>,
@@ -725,6 +738,143 @@ impl From<ConfigToml> for UserSavedConfig {
}
}
fn default_network_proxy_config() -> NetworkProxyConfig {
NetworkProxyConfig {
enabled: false,
proxy_url: "http://127.0.0.1:3128".to_string(),
admin_url: "http://127.0.0.1:8080".to_string(),
mode: NetworkProxyMode::Full,
no_proxy: default_no_proxy_entries()
.iter()
.map(|entry| (*entry).to_string())
.collect(),
poll_interval_ms: 1000,
mitm_ca_cert_path: None,
}
}
fn resolve_network_proxy_config(cfg: &ConfigToml, codex_home: &Path) -> NetworkProxyConfig {
let mut resolved = default_network_proxy_config();
let Some(network_proxy) = cfg.network_proxy.clone() else {
return resolved;
};
let mitm_ca_cert_path = resolve_network_proxy_mitm_ca_path(&network_proxy, codex_home);
if let Some(enabled) = network_proxy.enabled {
resolved.enabled = enabled;
}
if let Some(proxy_url) = network_proxy.proxy_url {
let trimmed = proxy_url.trim();
if !trimmed.is_empty() {
resolved.proxy_url = trimmed.to_string();
}
}
if let Some(admin_url) = network_proxy.admin_url {
let trimmed = admin_url.trim();
if !trimmed.is_empty() {
resolved.admin_url = trimmed.to_string();
}
}
if let Some(mode) = network_proxy.mode {
resolved.mode = mode;
}
if let Some(no_proxy) = network_proxy.no_proxy {
resolved.no_proxy = normalize_no_proxy_entries(no_proxy);
}
ensure_default_no_proxy_entries(&mut resolved.no_proxy);
if let Some(poll_interval_ms) = network_proxy.poll_interval_ms
&& poll_interval_ms > 0
{
resolved.poll_interval_ms = poll_interval_ms;
}
resolved.mitm_ca_cert_path = mitm_ca_cert_path;
resolved
}
fn resolve_network_proxy_mitm_ca_path(
network_proxy: &NetworkProxyConfigToml,
codex_home: &Path,
) -> Option<PathBuf> {
let mitm = network_proxy.mitm.as_ref()?;
if !mitm.enabled.unwrap_or(false) {
return None;
}
let ca_cert_path = mitm
.ca_cert_path
.clone()
.unwrap_or_else(|| PathBuf::from("network_proxy/mitm/ca.pem"));
if ca_cert_path.as_os_str().is_empty() {
return None;
}
Some(resolve_network_proxy_path(&ca_cert_path, codex_home))
}
fn resolve_network_proxy_path(path: &Path, codex_home: &Path) -> PathBuf {
let expanded = expand_tilde_path(path);
if expanded.is_absolute() {
expanded
} else {
codex_home.join(expanded)
}
}
fn expand_tilde_path(path: &Path) -> PathBuf {
let path_str = path.to_string_lossy();
if path_str == "~" {
return home_dir().unwrap_or_else(|| path.to_path_buf());
}
if let Some(rest) = path_str.strip_prefix("~/")
&& let Some(home) = home_dir()
{
return home.join(rest);
}
path.to_path_buf()
}
fn normalize_no_proxy_entries(entries: Vec<String>) -> Vec<String> {
let mut out: Vec<String> = Vec::new();
for entry in entries {
let trimmed = entry.trim();
if trimmed.is_empty() {
continue;
}
if out
.iter()
.any(|existing| existing.eq_ignore_ascii_case(trimmed))
{
continue;
}
out.push(trimmed.to_string());
}
out
}
fn ensure_default_no_proxy_entries(entries: &mut Vec<String>) {
for entry in default_no_proxy_entries() {
if entries
.iter()
.any(|existing| existing.eq_ignore_ascii_case(entry))
{
continue;
}
entries.push(entry.to_string());
}
}
fn default_no_proxy_entries() -> [&'static str; 9] {
[
"localhost",
"127.0.0.1",
"::1",
"*.local",
".local",
"169.254.0.0/16",
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct ProjectConfig {
pub trust_level: Option<TrustLevel>,
@@ -1047,6 +1197,11 @@ impl Config {
|| config_profile.sandbox_mode.is_some()
|| cfg.sandbox_mode.is_some();
let mut network_proxy = resolve_network_proxy_config(&cfg, &codex_home);
if !features.enabled(Feature::NetworkProxy) {
network_proxy.enabled = false;
}
let mut model_providers = built_in_model_providers();
// Merge user-defined providers into the built-in list.
for (key, provider) in cfg.model_providers.into_iter() {
@@ -1168,6 +1323,7 @@ impl Config {
did_user_set_custom_approval_policy_or_sandbox_mode,
forced_auto_mode_downgraded_on_windows,
shell_environment_policy,
network_proxy,
notify: cfg.notify,
user_instructions,
base_instructions,
@@ -1403,6 +1559,51 @@ persistence = "none"
);
}
#[test]
fn network_proxy_is_rollout_gated_by_feature_flag() -> std::io::Result<()> {
let cfg_without_feature_flag = r#"
[network_proxy]
enabled = true
"#;
let parsed_without_feature_flag =
toml::from_str::<ConfigToml>(cfg_without_feature_flag).expect("TOML parse should work");
let cwd_temp_dir = TempDir::new().unwrap();
std::fs::write(cwd_temp_dir.path().join(".git"), "gitdir: nowhere")?;
let codex_home_temp_dir = TempDir::new().unwrap();
let config_without_feature_flag = Config::load_from_base_config_with_overrides(
parsed_without_feature_flag,
ConfigOverrides {
cwd: Some(cwd_temp_dir.path().to_path_buf()),
..Default::default()
},
codex_home_temp_dir.path().to_path_buf(),
)?;
assert!(!config_without_feature_flag.network_proxy.enabled);
let cfg_with_feature_flag = r#"
[features]
network_proxy = true
[network_proxy]
enabled = true
"#;
let parsed_with_feature_flag =
toml::from_str::<ConfigToml>(cfg_with_feature_flag).expect("TOML parse should work");
let cwd_temp_dir = TempDir::new().unwrap();
std::fs::write(cwd_temp_dir.path().join(".git"), "gitdir: nowhere")?;
let codex_home_temp_dir = TempDir::new().unwrap();
let config_with_feature_flag = Config::load_from_base_config_with_overrides(
parsed_with_feature_flag,
ConfigOverrides {
cwd: Some(cwd_temp_dir.path().to_path_buf()),
..Default::default()
},
codex_home_temp_dir.path().to_path_buf(),
)?;
assert!(config_with_feature_flag.network_proxy.enabled);
Ok(())
}
#[test]
fn tui_config_missing_notifications_field_defaults_to_enabled() {
let cfg = r#"
@@ -2959,6 +3160,7 @@ model_verbosity = "high"
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
network_proxy: default_network_proxy_config(),
user_instructions: None,
notify: None,
cwd: fixture.cwd(),
@@ -3034,6 +3236,7 @@ model_verbosity = "high"
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
network_proxy: default_network_proxy_config(),
user_instructions: None,
notify: None,
cwd: fixture.cwd(),
@@ -3124,6 +3327,7 @@ model_verbosity = "high"
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
network_proxy: default_network_proxy_config(),
user_instructions: None,
notify: None,
cwd: fixture.cwd(),
@@ -3200,6 +3404,7 @@ model_verbosity = "high"
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
network_proxy: default_network_proxy_config(),
user_instructions: None,
notify: None,
cwd: fixture.cwd(),

View File

@@ -435,6 +435,47 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
}
}
#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
#[derive(Default)]
pub enum NetworkProxyMode {
Limited,
#[default]
Full,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct NetworkProxyConfigToml {
pub enabled: Option<bool>,
pub proxy_url: Option<String>,
pub admin_url: Option<String>,
pub mode: Option<NetworkProxyMode>,
pub no_proxy: Option<Vec<String>>,
pub poll_interval_ms: Option<i64>,
#[serde(default)]
pub mitm: Option<NetworkProxyMitmConfigToml>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct NetworkProxyMitmConfigToml {
pub enabled: Option<bool>,
pub inspect: Option<bool>,
pub max_body_bytes: Option<i64>,
pub ca_cert_path: Option<PathBuf>,
pub ca_key_path: Option<PathBuf>,
}
#[derive(Debug, Clone, PartialEq)]
pub struct NetworkProxyConfig {
pub enabled: bool,
pub proxy_url: String,
pub admin_url: String,
pub mode: NetworkProxyMode,
pub no_proxy: Vec<String>,
pub poll_interval_ms: i64,
pub mitm_ca_cert_path: Option<PathBuf>,
}
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
#[serde(rename_all = "kebab-case")]
pub enum ShellEnvironmentPolicyInherit {

View File

@@ -1,9 +1,14 @@
use crate::config::types::EnvironmentVariablePattern;
use crate::config::types::NetworkProxyConfig;
use crate::config::types::ShellEnvironmentPolicy;
use crate::config::types::ShellEnvironmentPolicyInherit;
use crate::network_proxy;
use crate::protocol::SandboxPolicy;
use std::collections::HashMap;
use std::collections::HashSet;
const DEFAULT_SOCKS_PROXY_PORT: u16 = 8081;
/// Construct an environment map based on the rules in the specified policy. The
/// resulting map can be passed directly to `Command::envs()` after calling
/// `env_clear()` to ensure no unintended variables are leaked to the spawned
@@ -11,8 +16,16 @@ use std::collections::HashSet;
///
/// The derivation follows the algorithm documented in the struct-level comment
/// for [`ShellEnvironmentPolicy`].
pub fn create_env(policy: &ShellEnvironmentPolicy) -> HashMap<String, String> {
populate_env(std::env::vars(), policy)
pub fn create_env(
policy: &ShellEnvironmentPolicy,
sandbox_policy: &SandboxPolicy,
network_proxy: &NetworkProxyConfig,
) -> HashMap<String, String> {
let mut env_map = populate_env(std::env::vars(), policy);
if should_apply_network_proxy(network_proxy, sandbox_policy) {
apply_network_proxy_env(&mut env_map, network_proxy);
}
env_map
}
fn populate_env<I>(vars: I, policy: &ShellEnvironmentPolicy) -> HashMap<String, String>
@@ -68,11 +81,245 @@ where
env_map
}
fn should_apply_network_proxy(
network_proxy: &NetworkProxyConfig,
sandbox_policy: &SandboxPolicy,
) -> bool {
if !network_proxy.enabled {
return false;
}
match sandbox_policy {
SandboxPolicy::WorkspaceWrite { network_access, .. } => *network_access,
SandboxPolicy::DangerFullAccess => true,
SandboxPolicy::ReadOnly => false,
}
}
#[derive(Clone, Debug)]
struct ProxyEndpoint {
host: String,
port: u16,
}
#[derive(Default)]
struct ProxyEndpoints {
http: Option<ProxyEndpoint>,
socks: Option<ProxyEndpoint>,
}
fn proxy_env_entries(
network_proxy: &NetworkProxyConfig,
endpoints: &ProxyEndpoints,
) -> Vec<String> {
let mut entries = Vec::new();
let no_proxy = normalize_no_proxy_value(&network_proxy.no_proxy);
if !no_proxy.is_empty() {
entries.push(format!("NO_PROXY={no_proxy}"));
entries.push(format!("no_proxy={no_proxy}"));
}
let http_proxy_url = endpoints
.http
.as_ref()
.map(|endpoint| format_proxy_url("http", endpoint));
let socks_proxy_url = endpoints
.socks
.as_ref()
.map(|endpoint| format_proxy_url("socks5h", endpoint));
let socks_host_port = endpoints
.socks
.as_ref()
.map(|endpoint| format_host_port(&endpoint.host, endpoint.port));
if let Some(http_proxy_url) = http_proxy_url.as_ref() {
for key in ["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"] {
entries.push(format!("{key}={http_proxy_url}"));
}
for key in [
"YARN_HTTP_PROXY",
"YARN_HTTPS_PROXY",
"npm_config_http_proxy",
"npm_config_https_proxy",
"npm_config_proxy",
] {
entries.push(format!("{key}={http_proxy_url}"));
}
entries.push("ELECTRON_GET_USE_PROXY=true".to_string());
}
if let Some(socks_proxy_url) = socks_proxy_url.as_ref() {
entries.push(format!("ALL_PROXY={socks_proxy_url}"));
entries.push(format!("all_proxy={socks_proxy_url}"));
}
if let Some(socks_host_port) = socks_host_port.as_ref() {
#[cfg(target_os = "macos")]
entries.push(format!(
"GIT_SSH_COMMAND=ssh -o ProxyCommand='nc -X 5 -x {socks_host_port} %h %p'"
));
if let Some(socks_proxy_url) = socks_proxy_url.as_ref() {
entries.push(format!("FTP_PROXY={socks_proxy_url}"));
entries.push(format!("ftp_proxy={socks_proxy_url}"));
}
entries.push(format!("RSYNC_PROXY={socks_host_port}"));
}
let docker_proxy = endpoints.http.as_ref().or(endpoints.socks.as_ref());
if let Some(endpoint) = docker_proxy {
let docker_proxy_url = format_proxy_url("http", endpoint);
entries.push(format!("DOCKER_HTTP_PROXY={docker_proxy_url}"));
entries.push(format!("DOCKER_HTTPS_PROXY={docker_proxy_url}"));
}
if let Some(endpoint) = endpoints.http.as_ref() {
entries.push("CLOUDSDK_PROXY_TYPE=https".to_string());
entries.push("CLOUDSDK_PROXY_ADDRESS=localhost".to_string());
let port = endpoint.port;
entries.push(format!("CLOUDSDK_PROXY_PORT={port}"));
}
if let Some(socks_proxy_url) = socks_proxy_url.as_ref() {
entries.push(format!("GRPC_PROXY={socks_proxy_url}"));
entries.push(format!("grpc_proxy={socks_proxy_url}"));
}
entries
}
fn resolve_proxy_endpoints(network_proxy: &NetworkProxyConfig) -> ProxyEndpoints {
let proxy_url = network_proxy.proxy_url.trim();
if proxy_url.is_empty() {
return ProxyEndpoints::default();
}
let Some((host, port)) = network_proxy::proxy_host_port(proxy_url) else {
return ProxyEndpoints::default();
};
let Some(port) = normalize_proxy_port(port) else {
return ProxyEndpoints::default();
};
let (host, is_loopback) = normalize_proxy_host(&host);
let is_socks = proxy_url_scheme(proxy_url)
.map(|scheme| scheme.to_ascii_lowercase().starts_with("socks"))
.unwrap_or(false);
let http = if is_socks {
None
} else {
Some(ProxyEndpoint {
host: host.clone(),
port,
})
};
let mut socks = if is_socks {
Some(ProxyEndpoint { host, port })
} else {
None
};
if socks.is_none() && is_loopback {
socks = Some(ProxyEndpoint {
host: "localhost".to_string(),
port: DEFAULT_SOCKS_PROXY_PORT,
});
}
ProxyEndpoints { http, socks }
}
fn proxy_url_scheme(proxy_url: &str) -> Option<&str> {
proxy_url.split_once("://").map(|(scheme, _)| scheme)
}
fn normalize_proxy_host(host: &str) -> (String, bool) {
let is_loopback =
host.eq_ignore_ascii_case("localhost") || host == "127.0.0.1" || host == "::1";
if is_loopback {
("localhost".to_string(), true)
} else {
(host.to_string(), false)
}
}
fn normalize_proxy_port(port: i64) -> Option<u16> {
if (1..=u16::MAX as i64).contains(&port) {
Some(port as u16)
} else {
None
}
}
fn format_proxy_url(scheme: &str, endpoint: &ProxyEndpoint) -> String {
let host = &endpoint.host;
let port = endpoint.port;
if endpoint.host.contains(':') {
format!("{scheme}://[{host}]:{port}")
} else {
format!("{scheme}://{host}:{port}")
}
}
fn format_host_port(host: &str, port: u16) -> String {
if host.contains(':') {
format!("[{host}]:{port}")
} else {
format!("{host}:{port}")
}
}
fn apply_network_proxy_env(
env_map: &mut HashMap<String, String>,
network_proxy: &NetworkProxyConfig,
) {
let endpoints = resolve_proxy_endpoints(network_proxy);
for entry in proxy_env_entries(network_proxy, &endpoints) {
if let Some((key, value)) = entry.split_once('=') {
env_map.insert(key.to_string(), value.to_string());
}
}
if let Some(endpoint) = endpoints.http {
let host = &endpoint.host;
let port = endpoint.port;
let gradle_opts = format!(
"-Dhttp.proxyHost={host} -Dhttp.proxyPort={port} -Dhttps.proxyHost={host} -Dhttps.proxyPort={port}"
);
match env_map.get_mut("GRADLE_OPTS") {
Some(existing) => {
if !existing.contains("http.proxyHost") && !existing.contains("https.proxyHost") {
if !existing.ends_with(' ') {
existing.push(' ');
}
existing.push_str(&gradle_opts);
}
}
None => {
env_map.insert("GRADLE_OPTS".to_string(), gradle_opts);
}
}
}
network_proxy::apply_mitm_ca_env_if_enabled(env_map, network_proxy);
}
fn normalize_no_proxy_value(entries: &[String]) -> String {
let mut out = Vec::new();
for entry in entries {
let trimmed = entry.trim();
if trimmed.is_empty() {
continue;
}
out.push(trimmed.to_string());
}
out.join(",")
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::types::NetworkProxyMode;
use crate::config::types::ShellEnvironmentPolicyInherit;
use maplit::hashmap;
use pretty_assertions::assert_eq;
fn make_vars(pairs: &[(&str, &str)]) -> Vec<(String, String)> {
pairs
@@ -191,4 +438,54 @@ mod tests {
};
assert_eq!(result, expected);
}
#[test]
fn proxy_env_entries_are_deterministic() {
let network_proxy = NetworkProxyConfig {
enabled: true,
proxy_url: "http://localhost:3128".to_string(),
admin_url: "http://localhost:8080".to_string(),
mode: NetworkProxyMode::Full,
no_proxy: vec!["localhost".to_string(), "127.0.0.1".to_string()],
poll_interval_ms: 1000,
mitm_ca_cert_path: None,
};
let endpoints = resolve_proxy_endpoints(&network_proxy);
let entries = proxy_env_entries(&network_proxy, &endpoints);
let mut expected = vec![
"NO_PROXY=localhost,127.0.0.1".to_string(),
"no_proxy=localhost,127.0.0.1".to_string(),
"HTTP_PROXY=http://localhost:3128".to_string(),
"HTTPS_PROXY=http://localhost:3128".to_string(),
"http_proxy=http://localhost:3128".to_string(),
"https_proxy=http://localhost:3128".to_string(),
"YARN_HTTP_PROXY=http://localhost:3128".to_string(),
"YARN_HTTPS_PROXY=http://localhost:3128".to_string(),
"npm_config_http_proxy=http://localhost:3128".to_string(),
"npm_config_https_proxy=http://localhost:3128".to_string(),
"npm_config_proxy=http://localhost:3128".to_string(),
"ELECTRON_GET_USE_PROXY=true".to_string(),
"ALL_PROXY=socks5h://localhost:8081".to_string(),
"all_proxy=socks5h://localhost:8081".to_string(),
];
#[cfg(target_os = "macos")]
expected.push(
"GIT_SSH_COMMAND=ssh -o ProxyCommand='nc -X 5 -x localhost:8081 %h %p'".to_string(),
);
expected.extend([
"FTP_PROXY=socks5h://localhost:8081".to_string(),
"ftp_proxy=socks5h://localhost:8081".to_string(),
"RSYNC_PROXY=localhost:8081".to_string(),
"DOCKER_HTTP_PROXY=http://localhost:3128".to_string(),
"DOCKER_HTTPS_PROXY=http://localhost:3128".to_string(),
"CLOUDSDK_PROXY_TYPE=https".to_string(),
"CLOUDSDK_PROXY_ADDRESS=localhost".to_string(),
"CLOUDSDK_PROXY_PORT=3128".to_string(),
"GRPC_PROXY=socks5h://localhost:8081".to_string(),
"grpc_proxy=socks5h://localhost:8081".to_string(),
]);
assert_eq!(entries, expected);
}
}

View File

@@ -85,6 +85,8 @@ pub enum Feature {
ShellSnapshot,
/// Experimental TUI v2 (viewport) implementation.
Tui2,
/// Route subprocess network access through the Codex network proxy and surface approvals.
NetworkProxy,
}
impl Feature {
@@ -405,4 +407,10 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::Experimental,
default_enabled: false,
},
FeatureSpec {
id: Feature::NetworkProxy,
key: "network_proxy",
stage: Stage::Experimental,
default_enabled: false,
},
];

View File

@@ -70,6 +70,7 @@ pub use conversation_manager::NewConversation;
pub use auth::AuthManager;
pub use auth::CodexAuth;
pub mod default_client;
pub mod network_proxy;
pub mod project_doc;
mod rollout;
pub(crate) mod safety;

View File

@@ -0,0 +1,900 @@
use crate::config;
use crate::config::types::NetworkProxyConfig;
use crate::config::types::NetworkProxyMode;
use crate::protocol::ReviewDecision;
use crate::protocol::SandboxPolicy;
use crate::tools::sandboxing::ApprovalStore;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use codex_client::CodexHttpClient;
use serde::Deserialize;
use serde::Serialize;
use shlex::split as shlex_split;
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use toml_edit::Array as TomlArray;
use toml_edit::DocumentMut;
use toml_edit::InlineTable;
use toml_edit::Item as TomlItem;
use toml_edit::Table as TomlTable;
use wildmatch::WildMatchPattern;
const NETWORK_PROXY_TABLE: &str = "network_proxy";
const NETWORK_PROXY_POLICY_TABLE: &str = "policy";
const ALLOWED_DOMAINS_KEY: &str = "allowed_domains";
const DENIED_DOMAINS_KEY: &str = "denied_domains";
const ALLOW_UNIX_SOCKETS_KEY: &str = "allow_unix_sockets";
#[derive(Debug, Clone, Deserialize)]
pub struct NetworkProxyBlockedRequest {
pub host: String,
pub reason: String,
#[serde(default)]
pub call_id: Option<String>,
pub client: Option<String>,
pub method: Option<String>,
pub mode: Option<NetworkProxyMode>,
pub protocol: String,
pub timestamp: i64,
}
#[derive(Debug, Deserialize)]
struct BlockedResponse {
blocked: Vec<NetworkProxyBlockedRequest>,
}
#[derive(Serialize)]
struct ModeUpdate {
mode: NetworkProxyMode,
}
pub async fn fetch_blocked(
client: &CodexHttpClient,
admin_url: &str,
) -> Result<Vec<NetworkProxyBlockedRequest>> {
let base = admin_url.trim_end_matches('/');
let url = format!("{base}/blocked");
let response = client
.get(url)
.send()
.await
.context("network proxy /blocked request failed")?
.error_for_status()
.context("network proxy /blocked returned error")?;
let payload: BlockedResponse = response
.json()
.await
.context("network proxy /blocked returned invalid JSON")?;
Ok(payload.blocked)
}
#[derive(Debug, Clone, Serialize)]
pub(crate) struct NetworkApprovalKey {
host: String,
}
impl NetworkApprovalKey {
fn new(host: &str) -> Option<Self> {
let host = host.trim();
if host.is_empty() {
return None;
}
Some(Self {
host: host.to_ascii_lowercase(),
})
}
}
pub(crate) fn cache_network_approval(
store: &mut ApprovalStore,
host: &str,
decision: ReviewDecision,
) -> bool {
if !matches!(
decision,
ReviewDecision::Approved | ReviewDecision::ApprovedForSession
) {
return false;
}
let Some(key) = NetworkApprovalKey::new(host) else {
return false;
};
store.put(key, decision);
true
}
pub async fn set_mode(
client: &CodexHttpClient,
admin_url: &str,
mode: NetworkProxyMode,
) -> Result<()> {
let base = admin_url.trim_end_matches('/');
let url = format!("{base}/mode");
let request = ModeUpdate { mode };
client
.post(url)
.json(&request)
.send()
.await
.context("network proxy /mode request failed")?
.error_for_status()
.context("network proxy /mode returned error")?;
Ok(())
}
pub async fn reload(client: &CodexHttpClient, admin_url: &str) -> Result<()> {
let base = admin_url.trim_end_matches('/');
let url = format!("{base}/reload");
client
.post(url)
.send()
.await
.context("network proxy /reload request failed")?
.error_for_status()
.context("network proxy /reload returned error")?;
Ok(())
}
pub fn add_allowed_domain(config_path: &Path, host: &str) -> Result<bool> {
update_domain_list(config_path, host, DomainListKind::Allow)
}
pub fn add_denied_domain(config_path: &Path, host: &str) -> Result<bool> {
update_domain_list(config_path, host, DomainListKind::Deny)
}
pub fn add_allowed_unix_socket(config_path: &Path, socket: &str) -> Result<bool> {
update_unix_socket_list(config_path, socket, UnixSocketListKind::Allow)
}
pub fn remove_allowed_unix_socket(config_path: &Path, socket: &str) -> Result<bool> {
update_unix_socket_list(config_path, socket, UnixSocketListKind::Remove)
}
#[derive(Debug, Clone, Copy)]
pub struct DomainState {
pub allowed: bool,
pub denied: bool,
}
pub fn domain_state(config_path: &Path, host: &str) -> Result<DomainState> {
let host = host.trim();
if host.is_empty() {
return Err(anyhow!("host is empty"));
}
let policy = load_network_policy(config_path)?;
Ok(DomainState {
allowed: list_contains(&policy.allowed_domains, host),
denied: list_contains(&policy.denied_domains, host),
})
}
pub fn set_domain_state(config_path: &Path, host: &str, state: DomainState) -> Result<bool> {
let host = host.trim();
if host.is_empty() {
return Err(anyhow!("host is empty"));
}
let mut doc = load_document(config_path)?;
let policy = ensure_policy_table(&mut doc);
let mut changed = false;
{
let allowed = ensure_array(policy, ALLOWED_DOMAINS_KEY);
if state.allowed {
changed |= add_domain(allowed, host);
} else {
changed |= remove_domain(allowed, host);
}
}
{
let denied = ensure_array(policy, DENIED_DOMAINS_KEY);
if state.denied {
changed |= add_domain(denied, host);
} else {
changed |= remove_domain(denied, host);
}
}
if changed {
write_document(config_path, &doc)?;
}
Ok(changed)
}
pub fn unix_socket_allowed(config_path: &Path, socket_path: &Path) -> Result<bool> {
let policy = load_network_policy(config_path)?;
let allowed = resolve_unix_socket_allowlist(&policy.allow_unix_sockets);
if allowed.is_empty() {
return Ok(false);
}
let canonical_socket = socket_path
.canonicalize()
.unwrap_or_else(|_| socket_path.to_path_buf());
Ok(allowed
.iter()
.any(|allowed_path| canonical_socket.starts_with(allowed_path)))
}
pub fn should_preflight_network(
network_proxy: &NetworkProxyConfig,
sandbox_policy: &SandboxPolicy,
) -> bool {
if !network_proxy.enabled {
return false;
}
match sandbox_policy {
SandboxPolicy::WorkspaceWrite { network_access, .. } => *network_access,
SandboxPolicy::DangerFullAccess => true,
SandboxPolicy::ReadOnly => false,
}
}
pub fn preflight_blocked_host_if_enabled(
network_proxy: &NetworkProxyConfig,
sandbox_policy: &SandboxPolicy,
command: &[String],
) -> Result<Option<PreflightMatch>> {
if !should_preflight_network(network_proxy, sandbox_policy) {
return Ok(None);
}
let config_path = config::default_config_path()?;
preflight_blocked_host(&config_path, command)
}
pub fn preflight_blocked_request_if_enabled(
network_proxy: &NetworkProxyConfig,
sandbox_policy: &SandboxPolicy,
command: &[String],
) -> Result<Option<NetworkProxyBlockedRequest>> {
match preflight_blocked_host_if_enabled(network_proxy, sandbox_policy, command)? {
Some(hit) => Ok(Some(NetworkProxyBlockedRequest {
host: hit.host,
reason: hit.reason,
call_id: None,
client: None,
method: None,
mode: Some(network_proxy.mode),
protocol: "preflight".to_string(),
timestamp: 0,
})),
None => Ok(None),
}
}
#[derive(Debug, Clone)]
pub struct UnixSocketPreflightMatch {
/// Socket path that needs to be allowed (canonicalized when possible).
pub socket_path: PathBuf,
/// Suggested config entry to add for a persistent allow (e.g. `$SSH_AUTH_SOCK`).
pub suggested_allow_entry: String,
pub reason: String,
}
pub fn preflight_blocked_unix_socket_if_enabled(
network_proxy: &NetworkProxyConfig,
sandbox_policy: &SandboxPolicy,
command: &[String],
) -> Result<Option<UnixSocketPreflightMatch>> {
if !cfg!(target_os = "macos") {
return Ok(None);
}
if !should_preflight_network(network_proxy, sandbox_policy) {
return Ok(None);
}
let Some(socket_path) = ssh_auth_sock_if_needed(command) else {
return Ok(None);
};
let config_path = config::default_config_path()?;
if unix_socket_allowed(&config_path, &socket_path)? {
return Ok(None);
}
Ok(Some(UnixSocketPreflightMatch {
socket_path,
suggested_allow_entry: "$SSH_AUTH_SOCK".to_string(),
reason: "not_allowed_unix_socket".to_string(),
}))
}
pub fn apply_mitm_ca_env_if_enabled(
env_map: &mut HashMap<String, String>,
network_proxy: &NetworkProxyConfig,
) {
let Some(ca_cert_path) = network_proxy.mitm_ca_cert_path.as_ref() else {
return;
};
let ca_value = ca_cert_path.to_string_lossy().to_string();
for key in [
"SSL_CERT_FILE",
"CURL_CA_BUNDLE",
"GIT_SSL_CAINFO",
"REQUESTS_CA_BUNDLE",
"NODE_EXTRA_CA_CERTS",
"PIP_CERT",
"NPM_CONFIG_CAFILE",
"npm_config_cafile",
"CODEX_PROXY_CERT",
"PROXY_CA_CERT_PATH",
] {
env_map
.entry(key.to_string())
.or_insert_with(|| ca_value.clone());
}
}
pub fn proxy_host_port(proxy_url: &str) -> Option<(String, i64)> {
let trimmed = proxy_url.trim();
if trimmed.is_empty() {
return None;
}
let without_scheme = trimmed
.split_once("://")
.map(|(_, rest)| rest)
.unwrap_or(trimmed);
let mut host_port = without_scheme.split('/').next().unwrap_or("");
if let Some((_, rest)) = host_port.rsplit_once('@') {
host_port = rest;
}
if host_port.is_empty() {
return None;
}
let (host, port_str) = if host_port.starts_with('[') {
let end = host_port.find(']')?;
let host = &host_port[1..end];
let port = host_port[end + 1..].strip_prefix(':')?;
(host, port)
} else {
host_port.rsplit_once(':')?
};
if host.is_empty() {
return None;
}
let port: i64 = port_str.parse().ok()?;
if port <= 0 {
return None;
}
Some((host.to_string(), port))
}
#[derive(Debug, Clone)]
pub struct PreflightMatch {
pub host: String,
pub reason: String,
}
pub fn preflight_blocked_host(
config_path: &Path,
command: &[String],
) -> Result<Option<PreflightMatch>> {
let policy = load_network_policy(config_path)?;
let hosts = extract_hosts_from_command(command);
for host in hosts {
if policy
.denied_domains
.iter()
.any(|pattern| host_matches(pattern, &host))
{
return Ok(Some(PreflightMatch {
host,
reason: "denied".to_string(),
}));
}
if policy.allowed_domains.is_empty()
|| !policy
.allowed_domains
.iter()
.any(|pattern| host_matches(pattern, &host))
{
return Ok(Some(PreflightMatch {
host,
reason: "not_allowed".to_string(),
}));
}
}
Ok(None)
}
pub fn preflight_host(config_path: &Path, host: &str) -> Result<Option<String>> {
let host = host.trim();
if host.is_empty() {
return Err(anyhow!("host is empty"));
}
let policy = load_network_policy(config_path)?;
if policy
.denied_domains
.iter()
.any(|pattern| host_matches(pattern, host))
{
return Ok(Some("denied".to_string()));
}
if policy.allowed_domains.is_empty()
|| !policy
.allowed_domains
.iter()
.any(|pattern| host_matches(pattern, host))
{
return Ok(Some("not_allowed".to_string()));
}
Ok(None)
}
#[derive(Copy, Clone)]
enum DomainListKind {
Allow,
Deny,
}
fn update_domain_list(config_path: &Path, host: &str, list: DomainListKind) -> Result<bool> {
let host = host.trim();
if host.is_empty() {
return Err(anyhow!("host is empty"));
}
let mut doc = load_document(config_path)?;
let policy = ensure_policy_table(&mut doc);
let (target_key, other_key) = match list {
DomainListKind::Allow => (ALLOWED_DOMAINS_KEY, DENIED_DOMAINS_KEY),
DomainListKind::Deny => (DENIED_DOMAINS_KEY, ALLOWED_DOMAINS_KEY),
};
let mut changed = {
let target = ensure_array(policy, target_key);
add_domain(target, host)
};
let removed = {
let other = ensure_array(policy, other_key);
remove_domain(other, host)
};
if removed {
changed = true;
}
if changed {
write_document(config_path, &doc)?;
}
Ok(changed)
}
#[derive(Copy, Clone)]
enum UnixSocketListKind {
Allow,
Remove,
}
fn update_unix_socket_list(
config_path: &Path,
socket: &str,
action: UnixSocketListKind,
) -> Result<bool> {
let socket = socket.trim();
if socket.is_empty() {
return Err(anyhow!("socket is empty"));
}
let mut doc = load_document(config_path)?;
let policy = ensure_policy_table(&mut doc);
let list = ensure_array(policy, ALLOW_UNIX_SOCKETS_KEY);
let changed = match action {
UnixSocketListKind::Allow => add_domain(list, socket),
UnixSocketListKind::Remove => remove_domain(list, socket),
};
if changed {
write_document(config_path, &doc)?;
}
Ok(changed)
}
fn load_document(path: &Path) -> Result<DocumentMut> {
if !path.exists() {
return Ok(DocumentMut::new());
}
let raw = std::fs::read_to_string(path)
.with_context(|| format!("failed to read network proxy config at {}", path.display()))?;
raw.parse::<DocumentMut>()
.with_context(|| format!("failed to parse network proxy config at {}", path.display()))
}
#[derive(Default, Deserialize)]
struct NetworkPolicyConfig {
#[serde(default, rename = "network_proxy")]
network_proxy: NetworkProxySection,
}
#[derive(Default, Deserialize)]
struct NetworkProxySection {
#[serde(default)]
policy: NetworkPolicy,
}
#[derive(Default, Deserialize)]
pub(crate) struct NetworkPolicy {
#[serde(default, rename = "allowed_domains", alias = "allowedDomains")]
allowed_domains: Vec<String>,
#[serde(default, rename = "denied_domains", alias = "deniedDomains")]
denied_domains: Vec<String>,
#[serde(default, rename = "allow_unix_sockets", alias = "allowUnixSockets")]
pub(crate) allow_unix_sockets: Vec<String>,
#[serde(default, rename = "allow_local_binding", alias = "allowLocalBinding")]
pub(crate) allow_local_binding: bool,
}
pub(crate) fn resolve_unix_socket_allowlist(entries: &[String]) -> Vec<PathBuf> {
let mut resolved = Vec::new();
let mut seen = HashSet::new();
for entry in entries {
let entry = entry.trim();
if entry.is_empty() {
continue;
}
for candidate in resolve_unix_socket_entry(entry) {
if !seen.insert(candidate.clone()) {
continue;
}
resolved.push(candidate);
}
}
resolved.sort();
resolved
}
fn resolve_unix_socket_entry(entry: &str) -> Vec<PathBuf> {
// Presets are intentionally simple: they resolve to a path (or set of paths)
// and are ultimately translated into Seatbelt `subpath` rules.
let entry = entry.trim();
if entry.is_empty() {
return Vec::new();
}
let mut candidates: Vec<String> = Vec::new();
match entry {
"ssh-agent" | "ssh_auth_sock" | "ssh_auth_socket" => {
if let Some(value) = std::env::var_os("SSH_AUTH_SOCK") {
candidates.push(value.to_string_lossy().to_string());
}
}
_ => {
if let Some(var) = entry.strip_prefix('$') {
candidates.extend(resolve_env_unix_socket(var));
} else if entry.starts_with("${") && entry.ends_with('}') {
candidates.extend(resolve_env_unix_socket(&entry[2..entry.len() - 1]));
} else {
candidates.push(entry.to_string());
}
}
}
candidates
.into_iter()
.filter_map(|candidate| parse_unix_socket_candidate(&candidate))
.collect()
}
fn resolve_env_unix_socket(var: &str) -> Vec<String> {
let var = var.trim();
if var.is_empty() {
return Vec::new();
}
std::env::var_os(var)
.map(|value| vec![value.to_string_lossy().to_string()])
.unwrap_or_default()
}
fn parse_unix_socket_candidate(candidate: &str) -> Option<PathBuf> {
let trimmed = candidate.trim();
if trimmed.is_empty() {
return None;
}
let path = if let Some(rest) = trimmed.strip_prefix("unix://") {
rest
} else if let Some(rest) = trimmed.strip_prefix("unix:") {
rest
} else {
trimmed
};
let path = PathBuf::from(path);
if !path.is_absolute() {
return None;
}
Some(path.canonicalize().unwrap_or(path))
}
pub(crate) fn load_network_policy(config_path: &Path) -> Result<NetworkPolicy> {
if !config_path.exists() {
return Ok(NetworkPolicy::default());
}
let raw = std::fs::read_to_string(config_path).with_context(|| {
format!(
"failed to read network proxy config at {}",
config_path.display()
)
})?;
let config: NetworkPolicyConfig = toml::from_str(&raw).with_context(|| {
format!(
"failed to parse network proxy config at {}",
config_path.display()
)
})?;
Ok(config.network_proxy.policy)
}
fn list_contains(domains: &[String], host: &str) -> bool {
domains.iter().any(|value| value.eq_ignore_ascii_case(host))
}
fn host_matches(pattern: &str, host: &str) -> bool {
let pattern = pattern.trim();
if pattern.is_empty() {
return false;
}
let matcher: WildMatchPattern<'*', '?'> = WildMatchPattern::new_case_insensitive(pattern);
if matcher.matches(host) {
return true;
}
if let Some(apex) = pattern.strip_prefix("*.") {
return apex.eq_ignore_ascii_case(host);
}
false
}
fn extract_hosts_from_command(command: &[String]) -> Vec<String> {
let mut hosts = HashSet::new();
extract_hosts_from_tokens(command, &mut hosts);
for tokens in extract_shell_script_commands(command) {
extract_hosts_from_tokens(&tokens, &mut hosts);
}
hosts.into_iter().collect()
}
fn ssh_auth_sock_if_needed(command: &[String]) -> Option<PathBuf> {
let Some(cmd0) = command.first() else {
return None;
};
let cmd = std::path::Path::new(cmd0)
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("");
let needs_sock = match cmd {
"ssh" | "scp" | "sftp" | "ssh-add" => true,
"git" => command
.iter()
.skip(1)
.any(|arg| arg.contains("ssh://") || looks_like_scp_host(arg)),
_ => false,
};
if !needs_sock {
return None;
}
let sock = std::env::var_os("SSH_AUTH_SOCK")?;
let sock = sock.to_string_lossy().to_string();
parse_unix_socket_candidate(&sock)
}
fn looks_like_scp_host(value: &str) -> bool {
// e.g. git@github.com:owner/repo.git
let value = value.trim();
if value.is_empty() || value.starts_with('-') {
return false;
}
value.contains('@') && value.contains(':') && !value.contains("://")
}
fn extract_hosts_from_tokens(tokens: &[String], hosts: &mut HashSet<String>) {
let (cmd0, args) = match tokens.split_first() {
Some((cmd0, args)) => (cmd0.as_str(), args),
None => return,
};
let cmd = std::path::Path::new(cmd0)
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("");
let (_tool, tool_args) = match cmd {
"curl" | "wget" | "git" | "gh" | "ssh" | "scp" | "rsync" => (cmd, args),
"npm" | "yarn" | "pnpm" | "pip" | "pip3" | "pipx" | "cargo" | "go" => (cmd, args),
"python" | "python3"
if matches!(
(args.first(), args.get(1)),
(Some(flag), Some(module)) if flag == "-m" && module == "pip"
) =>
{
("pip", &args[2..])
}
_ => return,
};
if tool_args.is_empty() {
return;
}
for arg in tool_args {
if let Some(host) = extract_host_from_url(arg) {
hosts.insert(host);
}
}
}
fn extract_shell_script_commands(command: &[String]) -> Vec<Vec<String>> {
let Some(cmd0) = command.first() else {
return Vec::new();
};
let cmd = std::path::Path::new(cmd0)
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("");
if !matches!(cmd, "bash" | "zsh" | "sh") {
return Vec::new();
}
let Some(flag) = command.get(1) else {
return Vec::new();
};
if !matches!(flag.as_str(), "-lc" | "-c") {
return Vec::new();
}
let Some(script) = command.get(2) else {
return Vec::new();
};
let tokens = shlex_split(script)
.unwrap_or_else(|| script.split_whitespace().map(ToString::to_string).collect());
split_shell_tokens_into_commands(&tokens)
}
fn split_shell_tokens_into_commands(tokens: &[String]) -> Vec<Vec<String>> {
let mut commands = Vec::new();
let mut current: Vec<String> = Vec::new();
for token in tokens {
if is_shell_separator(token) {
if !current.is_empty() {
commands.push(std::mem::take(&mut current));
}
continue;
}
current.push(token.clone());
}
if !current.is_empty() {
commands.push(current);
}
commands
}
fn is_shell_separator(token: &str) -> bool {
matches!(token, "&&" | "||" | ";" | "|")
}
fn extract_host_from_url(value: &str) -> Option<String> {
let trimmed = value
.trim()
.trim_matches(|c: char| matches!(c, '"' | '\'' | '(' | ')' | ';' | ','));
if trimmed.is_empty() {
return None;
}
for scheme in ["http://", "https://", "ssh://", "git://", "git+ssh://"] {
if let Some(rest) = trimmed.strip_prefix(scheme) {
return normalize_host(rest);
}
}
None
}
fn normalize_host(value: &str) -> Option<String> {
let mut host = value.split('/').next().unwrap_or("");
if let Some((_, tail)) = host.rsplit_once('@') {
host = tail;
}
if let Some((head, _)) = host.split_once(':') {
host = head;
}
let host = host.trim_matches(|c: char| matches!(c, '.' | ',' | ';'));
if host.is_empty() {
None
} else {
Some(host.to_string())
}
}
fn write_document(path: &Path, doc: &DocumentMut) -> Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
let mut output = doc.to_string();
if !output.ends_with('\n') {
output.push('\n');
}
std::fs::write(path, output)
.with_context(|| format!("failed to write network proxy config at {}", path.display()))?;
Ok(())
}
fn ensure_network_proxy_table(doc: &mut DocumentMut) -> &mut TomlTable {
let entry = doc
.entry(NETWORK_PROXY_TABLE)
.or_insert_with(|| TomlItem::Table(TomlTable::new()));
let table = ensure_table_for_write(entry);
table.set_implicit(false);
table
}
fn ensure_policy_table(doc: &mut DocumentMut) -> &mut TomlTable {
let network_proxy = ensure_network_proxy_table(doc);
let entry = network_proxy
.entry(NETWORK_PROXY_POLICY_TABLE)
.or_insert_with(|| TomlItem::Table(TomlTable::new()));
let table = ensure_table_for_write(entry);
table.set_implicit(false);
table
}
fn ensure_table_for_write(item: &mut TomlItem) -> &mut TomlTable {
loop {
match item {
TomlItem::Table(table) => return table,
TomlItem::Value(value) => {
if let Some(inline) = value.as_inline_table() {
*item = TomlItem::Table(table_from_inline(inline));
} else {
*item = TomlItem::Table(TomlTable::new());
}
}
_ => {
*item = TomlItem::Table(TomlTable::new());
}
}
}
}
fn table_from_inline(inline: &InlineTable) -> TomlTable {
let mut table = TomlTable::new();
table.set_implicit(false);
for (key, value) in inline.iter() {
table.insert(key, TomlItem::Value(value.clone()));
}
table
}
fn ensure_array<'a>(table: &'a mut TomlTable, key: &str) -> &'a mut TomlArray {
let entry = table
.entry(key)
.or_insert_with(|| TomlItem::Value(TomlArray::new().into()));
if entry.as_array().is_none() {
*entry = TomlItem::Value(TomlArray::new().into());
}
match entry {
TomlItem::Value(value) => value
.as_array_mut()
.unwrap_or_else(|| unreachable!("array should exist after normalization")),
_ => unreachable!("array should be a value after normalization"),
}
}
fn add_domain(array: &mut TomlArray, host: &str) -> bool {
if array
.iter()
.filter_map(|item| item.as_str())
.any(|existing| existing.eq_ignore_ascii_case(host))
{
return false;
}
array.push(host);
true
}
fn remove_domain(array: &mut TomlArray, host: &str) -> bool {
let mut removed = false;
let mut updated = TomlArray::new();
for item in array.iter() {
let should_remove = item
.as_str()
.is_some_and(|value| value.eq_ignore_ascii_case(host));
if should_remove {
removed = true;
} else {
updated.push(item.clone());
}
}
if removed {
*array = updated;
}
removed
}

View File

@@ -118,7 +118,7 @@ impl SandboxManager {
let mut seatbelt_env = HashMap::new();
seatbelt_env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
let mut args =
create_seatbelt_command_args(command.clone(), policy, sandbox_policy_cwd);
create_seatbelt_command_args(command.clone(), policy, sandbox_policy_cwd, &env);
let mut full_command = Vec::with_capacity(1 + args.len());
full_command.push(MACOS_PATH_TO_SEATBELT_EXECUTABLE.to_string());
full_command.append(&mut args);

View File

@@ -1,18 +1,29 @@
#![cfg(target_os = "macos")]
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ffi::CStr;
use std::path::Path;
use std::path::PathBuf;
use tokio::process::Child;
use crate::config;
use crate::network_proxy;
use crate::protocol::SandboxPolicy;
use crate::spawn::CODEX_SANDBOX_ENV_VAR;
use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
const MACOS_SEATBELT_BASE_POLICY: &str = include_str!("seatbelt_base_policy.sbpl");
const MACOS_SEATBELT_NETWORK_POLICY: &str = include_str!("seatbelt_network_policy.sbpl");
const MACOS_SEATBELT_NETWORK_POLICY_BASE: &str = include_str!("seatbelt_network_policy.sbpl");
const PROXY_ENV_KEYS: &[&str] = &[
"HTTP_PROXY",
"HTTPS_PROXY",
"ALL_PROXY",
"http_proxy",
"https_proxy",
"all_proxy",
];
/// When working with `sandbox-exec`, only consider `sandbox-exec` in `/usr/bin`
/// to defend against an attacker trying to inject a malicious version on the
@@ -28,7 +39,7 @@ pub async fn spawn_command_under_seatbelt(
stdio_policy: StdioPolicy,
mut env: HashMap<String, String>,
) -> std::io::Result<Child> {
let args = create_seatbelt_command_args(command, sandbox_policy, sandbox_policy_cwd);
let args = create_seatbelt_command_args(command, sandbox_policy, sandbox_policy_cwd, &env);
let arg0 = None;
env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string());
spawn_child_async(
@@ -43,10 +54,133 @@ pub async fn spawn_command_under_seatbelt(
.await
}
fn is_loopback_host(host: &str) -> bool {
let host_lower = host.to_ascii_lowercase();
host_lower == "localhost" || host == "127.0.0.1" || host == "::1"
}
#[derive(Default)]
struct ProxyPorts {
http: Vec<u16>,
socks: Vec<u16>,
has_proxy_env: bool,
has_non_loopback_proxy_env: bool,
}
fn proxy_ports_from_env(env: &HashMap<String, String>) -> ProxyPorts {
let mut http_ports = BTreeSet::new();
let mut socks_ports = BTreeSet::new();
let mut has_proxy_env = false;
let mut has_non_loopback_proxy_env = false;
for key in PROXY_ENV_KEYS {
let Some(proxy_url) = env.get(*key) else {
continue;
};
has_proxy_env = true;
let Some((host, port)) = network_proxy::proxy_host_port(proxy_url) else {
continue;
};
let Some(port) = normalize_proxy_port(port) else {
continue;
};
if !is_loopback_host(&host) {
has_non_loopback_proxy_env = true;
continue;
}
let scheme = proxy_url_scheme(proxy_url).unwrap_or("http");
if scheme.to_ascii_lowercase().starts_with("socks") {
socks_ports.insert(port);
} else {
http_ports.insert(port);
}
}
ProxyPorts {
http: http_ports.into_iter().collect(),
socks: socks_ports.into_iter().collect(),
has_proxy_env,
has_non_loopback_proxy_env,
}
}
fn proxy_url_scheme(proxy_url: &str) -> Option<&str> {
proxy_url.split_once("://").map(|(scheme, _)| scheme)
}
fn normalize_proxy_port(port: i64) -> Option<u16> {
if (1..=u16::MAX as i64).contains(&port) {
Some(port as u16)
} else {
None
}
}
fn escape_sbpl_string(value: &str) -> String {
value.replace('\\', "\\\\").replace('"', "\\\"")
}
fn build_network_policy(policy: &network_proxy::NetworkPolicy, proxy_ports: &ProxyPorts) -> String {
let mut network_rules = String::from("; Network\n");
// On macOS, `sandbox-exec` only accepts `localhost` or `*` in network
// addresses. We use loopback proxy ports + the network proxy itself to
// enforce per-domain policy and prompting.
if !proxy_ports.has_proxy_env {
network_rules.push_str("(allow network*)\n");
return format!("{network_rules}{MACOS_SEATBELT_NETWORK_POLICY_BASE}");
}
if policy.allow_local_binding {
network_rules.push_str("(allow network-bind (local ip \"localhost:*\"))\n");
network_rules.push_str("(allow network-inbound (local ip \"localhost:*\"))\n");
network_rules.push_str("(allow network-outbound (local ip \"localhost:*\"))\n");
}
if !policy.allow_unix_sockets.is_empty() {
for socket_path in network_proxy::resolve_unix_socket_allowlist(&policy.allow_unix_sockets)
{
let escaped = escape_sbpl_string(&socket_path.to_string_lossy());
network_rules.push_str(&format!("(allow network* (subpath \"{escaped}\"))\n"));
}
}
for port in &proxy_ports.http {
network_rules.push_str(&format!(
"(allow network-bind (local ip \"localhost:{port}\"))\n"
));
network_rules.push_str(&format!(
"(allow network-inbound (local ip \"localhost:{port}\"))\n"
));
network_rules.push_str(&format!(
"(allow network-outbound (remote ip \"localhost:{port}\"))\n"
));
}
for port in &proxy_ports.socks {
network_rules.push_str(&format!(
"(allow network-bind (local ip \"localhost:{port}\"))\n"
));
network_rules.push_str(&format!(
"(allow network-inbound (local ip \"localhost:{port}\"))\n"
));
network_rules.push_str(&format!(
"(allow network-outbound (remote ip \"localhost:{port}\"))\n"
));
}
if proxy_ports.has_non_loopback_proxy_env {
network_rules
.push_str("; NOTE: Non-loopback proxies are not supported under `sandbox-exec`.\n");
}
format!("{network_rules}{MACOS_SEATBELT_NETWORK_POLICY_BASE}")
}
pub(crate) fn create_seatbelt_command_args(
command: Vec<String>,
sandbox_policy: &SandboxPolicy,
sandbox_policy_cwd: &Path,
env: &HashMap<String, String>,
) -> Vec<String> {
let (file_write_policy, file_write_dir_params) = {
if sandbox_policy.has_full_disk_write_access() {
@@ -113,9 +247,14 @@ pub(crate) fn create_seatbelt_command_args(
// TODO(mbolin): apply_patch calls must also honor the SandboxPolicy.
let network_policy = if sandbox_policy.has_full_network_access() {
MACOS_SEATBELT_NETWORK_POLICY
let proxy_ports = proxy_ports_from_env(env);
let policy = config::default_config_path()
.ok()
.and_then(|path| network_proxy::load_network_policy(&path).ok())
.unwrap_or_default();
build_network_policy(&policy, &proxy_ports)
} else {
""
String::new()
};
let full_policy = format!(
@@ -168,17 +307,49 @@ mod tests {
use crate::protocol::SandboxPolicy;
use crate::seatbelt::MACOS_PATH_TO_SEATBELT_EXECUTABLE;
use pretty_assertions::assert_eq;
use serial_test::serial;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use tempfile::TempDir;
struct CodexHomeGuard {
previous: Option<String>,
}
impl CodexHomeGuard {
fn new(path: &Path) -> Self {
let previous = std::env::var("CODEX_HOME").ok();
// SAFETY: these tests execute serially, and we restore the original value in Drop.
unsafe {
std::env::set_var("CODEX_HOME", path);
}
Self { previous }
}
}
impl Drop for CodexHomeGuard {
fn drop(&mut self) {
// SAFETY: these tests execute serially, and we restore the original value before other
// tests run.
unsafe {
if let Some(previous) = self.previous.take() {
std::env::set_var("CODEX_HOME", previous);
} else {
std::env::remove_var("CODEX_HOME");
}
}
}
}
#[test]
#[serial]
fn create_seatbelt_args_with_read_only_git_and_codex_subpaths() {
// Create a temporary workspace with two writable roots: one containing
// top-level .git and .codex directories and one without them.
let tmp = TempDir::new().expect("tempdir");
let _codex_home_guard = CodexHomeGuard::new(tmp.path());
let PopulatedTmp {
vulnerable_root,
vulnerable_root_canonical,
@@ -189,6 +360,7 @@ mod tests {
} = populate_tmpdir(tmp.path());
let cwd = tmp.path().join("cwd");
fs::create_dir_all(&cwd).expect("create cwd");
let env = std::collections::HashMap::new();
// Build a policy that only includes the two test roots as writable and
// does not automatically include defaults TMPDIR or /tmp.
@@ -217,7 +389,7 @@ mod tests {
.iter()
.map(std::string::ToString::to_string)
.collect();
let args = create_seatbelt_command_args(shell_command.clone(), &policy, &cwd);
let args = create_seatbelt_command_args(shell_command.clone(), &policy, &cwd, &env);
// Build the expected policy text using a raw string for readability.
// Note that the policy includes:
@@ -280,6 +452,7 @@ mod tests {
.current_dir(&cwd)
.output()
.expect("execute seatbelt command");
let stderr = String::from_utf8_lossy(&output.stderr).into_owned();
assert_eq!(
"sandbox_mode = \"read-only\"\n",
String::from_utf8_lossy(&fs::read(&config_toml).expect("read config.toml")),
@@ -290,8 +463,14 @@ mod tests {
"command to write {} should fail under seatbelt",
&config_toml.display()
);
if stderr.starts_with("sandbox-exec: sandbox_apply:") {
// Some environments (including Codex's own test harness) run the process under a
// Seatbelt sandbox already, which prevents nested `sandbox-exec` usage. In that case,
// we can still validate policy generation but cannot validate enforcement.
return;
}
assert_eq!(
String::from_utf8_lossy(&output.stderr),
stderr,
format!("bash: {}: Operation not permitted\n", config_toml.display()),
);
@@ -308,7 +487,8 @@ mod tests {
.iter()
.map(std::string::ToString::to_string)
.collect();
let write_hooks_file_args = create_seatbelt_command_args(shell_command_git, &policy, &cwd);
let write_hooks_file_args =
create_seatbelt_command_args(shell_command_git, &policy, &cwd, &env);
let output = Command::new(MACOS_PATH_TO_SEATBELT_EXECUTABLE)
.args(&write_hooks_file_args)
.current_dir(&cwd)
@@ -345,7 +525,7 @@ mod tests {
.map(std::string::ToString::to_string)
.collect();
let write_allowed_file_args =
create_seatbelt_command_args(shell_command_allowed, &policy, &cwd);
create_seatbelt_command_args(shell_command_allowed, &policy, &cwd, &env);
let output = Command::new(MACOS_PATH_TO_SEATBELT_EXECUTABLE)
.args(&write_allowed_file_args)
.current_dir(&cwd)
@@ -365,10 +545,12 @@ mod tests {
}
#[test]
#[serial]
fn create_seatbelt_args_for_cwd_as_git_repo() {
// Create a temporary workspace with two writable roots: one containing
// top-level .git and .codex directories and one without them.
let tmp = TempDir::new().expect("tempdir");
let _codex_home_guard = CodexHomeGuard::new(tmp.path());
let PopulatedTmp {
vulnerable_root,
vulnerable_root_canonical,
@@ -376,6 +558,7 @@ mod tests {
dot_codex_canonical,
..
} = populate_tmpdir(tmp.path());
let env = std::collections::HashMap::new();
// Build a policy that does not specify any writable_roots, but does
// use the default ones (cwd and TMPDIR) and verifies the `.git` and
@@ -400,8 +583,12 @@ mod tests {
.iter()
.map(std::string::ToString::to_string)
.collect();
let args =
create_seatbelt_command_args(shell_command.clone(), &policy, vulnerable_root.as_path());
let args = create_seatbelt_command_args(
shell_command.clone(),
&policy,
vulnerable_root.as_path(),
&env,
);
let tmpdir_env_var = std::env::var("TMPDIR")
.ok()
@@ -470,6 +657,45 @@ mod tests {
assert_eq!(expected_args, args);
}
#[test]
#[serial]
fn create_seatbelt_args_with_proxy_allowlist() {
let tmp = TempDir::new().expect("tempdir");
let _codex_home_guard = CodexHomeGuard::new(tmp.path());
let policy = SandboxPolicy::DangerFullAccess;
let cwd = std::env::current_dir().expect("getcwd");
let env = std::collections::HashMap::from([(
"HTTP_PROXY".to_string(),
"http://127.0.0.1:3128".to_string(),
)]);
let args = create_seatbelt_command_args(vec!["true".to_string()], &policy, &cwd, &env);
let policy_text = &args[1];
assert!(
policy_text.contains("(allow network-bind (local ip \"localhost:3128\"))"),
"expected seatbelt policy to allow local proxy binding"
);
assert!(
policy_text.contains("(allow network-inbound (local ip \"localhost:3128\"))"),
"expected seatbelt policy to allow local proxy inbound"
);
assert!(
policy_text.contains("(allow network-outbound (remote ip \"localhost:3128\"))"),
"expected seatbelt policy to allow local proxy outbound"
);
assert!(
!policy_text.contains("(remote tcp"),
"`sandbox-exec` network addresses only support `localhost` or `*`, so we must not emit host allowlists"
);
assert!(
!policy_text.contains("127.0.0.1:3128"),
"seatbelt policy must not include numeric loopback hosts (it will fail to parse)"
);
assert!(
!policy_text.contains("localhost:*"),
"proxy-restricted policy should not allow all localhost ports"
);
}
struct PopulatedTmp {
/// Path containing a .git and .codex subfolder.
/// For the purposes of this test, we consider this a "vulnerable" root

View File

@@ -1,8 +1,7 @@
; when network access is enabled, these policies are added after those in seatbelt_base_policy.sbpl
; network allow rules are injected by codex-core based on proxy settings.
; Ref https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/network.sb;drc=f8f264d5e4e7509c913f4c60c2639d15905a07e4
(allow network-outbound)
(allow network-inbound)
(allow system-socket)
(allow mach-lookup

View File

@@ -21,7 +21,6 @@ use crate::protocol::EventMsg;
use crate::protocol::ExecCommandBeginEvent;
use crate::protocol::ExecCommandEndEvent;
use crate::protocol::ExecCommandSource;
use crate::protocol::SandboxPolicy;
use crate::protocol::TaskStartedEvent;
use crate::sandboxing::ExecEnv;
use crate::sandboxing::SandboxPermissions;
@@ -96,9 +95,11 @@ impl SessionTask for UserShellCommandTask {
let exec_env = ExecEnv {
command: command.clone(),
cwd: cwd.clone(),
env: create_env(&turn_context.shell_environment_policy),
// TODO(zhao-oai): Now that we have ExecExpiration::Cancellation, we
// should use that instead of an "arbitrarily large" timeout here.
env: create_env(
&turn_context.shell_environment_policy,
&turn_context.sandbox_policy,
&turn_context.network_proxy,
),
expiration: USER_SHELL_TIMEOUT_MS.into(),
sandbox: SandboxType::None,
sandbox_permissions: SandboxPermissions::UseDefault,
@@ -112,8 +113,7 @@ impl SessionTask for UserShellCommandTask {
tx_event: session.get_tx_event(),
});
let sandbox_policy = SandboxPolicy::DangerFullAccess;
let exec_result = execute_exec_env(exec_env, &sandbox_policy, stdout_stream)
let exec_result = execute_exec_env(exec_env, &turn_context.sandbox_policy, stdout_stream)
.or_cancel(&cancellation_token)
.await;

View File

@@ -9,6 +9,7 @@ use crate::exec_env::create_env;
use crate::exec_policy::create_exec_approval_requirement_for_command;
use crate::function_tool::FunctionCallError;
use crate::is_safe_command::is_known_safe_command;
use crate::network_proxy;
use crate::protocol::ExecCommandSource;
use crate::shell::Shell;
use crate::tools::context::ToolInvocation;
@@ -22,6 +23,7 @@ use crate::tools::registry::ToolHandler;
use crate::tools::registry::ToolKind;
use crate::tools::runtimes::shell::ShellRequest;
use crate::tools::runtimes::shell::ShellRuntime;
use crate::tools::sandboxing::ExecApprovalRequirement;
use crate::tools::sandboxing::ToolCtx;
pub struct ShellHandler;
@@ -34,8 +36,12 @@ impl ShellHandler {
command: params.command,
cwd: turn_context.resolve_path(params.workdir.clone()),
expiration: params.timeout_ms.into(),
env: create_env(&turn_context.shell_environment_policy),
sandbox_permissions: params.sandbox_permissions.unwrap_or_default(),
env: create_env(
&turn_context.shell_environment_policy,
&turn_context.sandbox_policy,
&turn_context.network_proxy,
),
justification: params.justification,
arg0: None,
}
@@ -60,8 +66,12 @@ impl ShellCommandHandler {
command,
cwd: turn_context.resolve_path(params.workdir.clone()),
expiration: params.timeout_ms.into(),
env: create_env(&turn_context.shell_environment_policy),
sandbox_permissions: params.sandbox_permissions.unwrap_or_default(),
env: create_env(
&turn_context.shell_environment_policy,
&turn_context.sandbox_policy,
&turn_context.network_proxy,
),
justification: params.justification,
arg0: None,
}
@@ -252,7 +262,7 @@ impl ShellHandler {
emitter.begin(event_ctx).await;
let features = session.features();
let exec_approval_requirement = create_exec_approval_requirement_for_command(
let mut exec_approval_requirement = create_exec_approval_requirement_for_command(
&turn.exec_policy,
&features,
&exec_params.command,
@@ -261,6 +271,31 @@ impl ShellHandler {
exec_params.sandbox_permissions,
)
.await;
let network_preflight_blocked = match network_proxy::preflight_blocked_host_if_enabled(
&turn.network_proxy,
&turn.sandbox_policy,
&exec_params.command,
) {
Ok(Some(_)) => true,
Ok(None) => false,
Err(err) => {
tracing::debug!(error = %err, "network proxy preflight failed");
false
}
};
let mut network_preflight_only = false;
if network_preflight_blocked
&& matches!(
exec_approval_requirement,
ExecApprovalRequirement::Skip { .. }
)
{
exec_approval_requirement = ExecApprovalRequirement::NeedsApproval {
reason: Some("Network access requires approval.".to_string()),
proposed_execpolicy_amendment: None,
};
network_preflight_only = true;
}
let req = ShellRequest {
command: exec_params.command.clone(),
@@ -269,6 +304,7 @@ impl ShellHandler {
env: exec_params.env.clone(),
sandbox_permissions: exec_params.sandbox_permissions,
justification: exec_params.justification.clone(),
network_preflight_only,
exec_approval_requirement,
};
let mut orchestrator = ToolOrchestrator::new();
@@ -371,7 +407,11 @@ mod tests {
let expected_command = session.user_shell().derive_exec_args(&command, true);
let expected_cwd = turn_context.resolve_path(workdir.clone());
let expected_env = create_env(&turn_context.shell_environment_policy);
let expected_env = create_env(
&turn_context.shell_environment_policy,
&turn_context.sandbox_policy,
&turn_context.network_proxy,
);
let params = ShellCommandToolCallParams {
command,

View File

@@ -119,6 +119,7 @@ impl Approvable<ApplyPatchRequest> for ApplyPatchRuntime {
cwd,
Some(reason),
None,
false,
)
.await
} else if user_explicitly_approved {

View File

@@ -32,6 +32,7 @@ pub struct ShellRequest {
pub env: std::collections::HashMap<String, String>,
pub sandbox_permissions: SandboxPermissions,
pub justification: Option<String>,
pub network_preflight_only: bool,
pub exec_approval_requirement: ExecApprovalRequirement,
}
@@ -106,6 +107,7 @@ impl Approvable<ShellRequest> for ShellRuntime {
req.exec_approval_requirement
.proposed_execpolicy_amendment()
.cloned(),
req.network_preflight_only,
)
.await
})

View File

@@ -36,6 +36,7 @@ pub struct UnifiedExecRequest {
pub env: HashMap<String, String>,
pub sandbox_permissions: SandboxPermissions,
pub justification: Option<String>,
pub network_preflight_only: bool,
pub exec_approval_requirement: ExecApprovalRequirement,
}
@@ -57,6 +58,7 @@ impl UnifiedExecRequest {
env: HashMap<String, String>,
sandbox_permissions: SandboxPermissions,
justification: Option<String>,
network_preflight_only: bool,
exec_approval_requirement: ExecApprovalRequirement,
) -> Self {
Self {
@@ -65,6 +67,7 @@ impl UnifiedExecRequest {
env,
sandbox_permissions,
justification,
network_preflight_only,
exec_approval_requirement,
}
}
@@ -124,6 +127,7 @@ impl Approvable<UnifiedExecRequest> for UnifiedExecRuntime<'_> {
req.exec_approval_requirement
.proposed_execpolicy_amendment()
.cloned(),
req.network_preflight_only,
)
.await
})

View File

@@ -15,6 +15,7 @@ use crate::codex::Session;
use crate::codex::TurnContext;
use crate::exec_env::create_env;
use crate::exec_policy::create_exec_approval_requirement_for_command;
use crate::network_proxy;
use crate::protocol::BackgroundEventEvent;
use crate::protocol::EventMsg;
use crate::sandboxing::ExecEnv;
@@ -480,11 +481,27 @@ impl UnifiedExecSessionManager {
justification: Option<String>,
context: &UnifiedExecContext,
) -> Result<UnifiedExecSession, UnifiedExecError> {
let env = apply_unified_exec_env(create_env(&context.turn.shell_environment_policy));
let env = apply_unified_exec_env(create_env(
&context.turn.shell_environment_policy,
&context.turn.sandbox_policy,
&context.turn.network_proxy,
));
let features = context.session.features();
let mut orchestrator = ToolOrchestrator::new();
let mut runtime = UnifiedExecRuntime::new(self);
let exec_approval_requirement = create_exec_approval_requirement_for_command(
let network_preflight_blocked = match network_proxy::preflight_blocked_host_if_enabled(
&context.turn.network_proxy,
&context.turn.sandbox_policy,
command,
) {
Ok(Some(_)) => true,
Ok(None) => false,
Err(err) => {
tracing::debug!(error = %err, "network proxy preflight failed");
false
}
};
let mut exec_approval_requirement = create_exec_approval_requirement_for_command(
&context.turn.exec_policy,
&features,
command,
@@ -493,12 +510,27 @@ impl UnifiedExecSessionManager {
sandbox_permissions,
)
.await;
let mut network_preflight_only = false;
if network_preflight_blocked
&& matches!(
exec_approval_requirement,
crate::tools::sandboxing::ExecApprovalRequirement::Skip { .. }
)
{
exec_approval_requirement =
crate::tools::sandboxing::ExecApprovalRequirement::NeedsApproval {
reason: Some("Network access requires approval.".to_string()),
proposed_execpolicy_amendment: None,
};
network_preflight_only = true;
}
let req = UnifiedExecToolRequest::new(
command.to_vec(),
cwd,
env,
sandbox_permissions,
justification,
network_preflight_only,
exec_approval_requirement,
);
let tool_ctx = ToolCtx {

View File

@@ -1,4 +1,5 @@
#![cfg(target_os = "linux")]
use codex_core::config::types::NetworkProxyConfig;
use codex_core::config::types::ShellEnvironmentPolicy;
use codex_core::error::CodexErr;
use codex_core::error::SandboxErr;
@@ -29,25 +30,16 @@ const NETWORK_TIMEOUT_MS: u64 = 2_000;
#[cfg(target_arch = "aarch64")]
const NETWORK_TIMEOUT_MS: u64 = 10_000;
fn create_env_from_core_vars() -> HashMap<String, String> {
fn create_env_from_core_vars(sandbox_policy: &SandboxPolicy) -> HashMap<String, String> {
let policy = ShellEnvironmentPolicy::default();
create_env(&policy)
let network_proxy = NetworkProxyConfig::default();
create_env(&policy, sandbox_policy, &network_proxy)
}
#[expect(clippy::print_stdout, clippy::expect_used, clippy::unwrap_used)]
async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
let cwd = std::env::current_dir().expect("cwd should exist");
let sandbox_cwd = cwd.clone();
let params = ExecParams {
command: cmd.iter().copied().map(str::to_owned).collect(),
cwd,
expiration: timeout_ms.into(),
env: create_env_from_core_vars(),
sandbox_permissions: SandboxPermissions::UseDefault,
justification: None,
arg0: None,
};
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
writable_roots: writable_roots
.iter()
@@ -60,6 +52,15 @@ async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) {
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
};
let params = ExecParams {
command: cmd.iter().copied().map(str::to_owned).collect(),
cwd,
expiration: timeout_ms.into(),
env: create_env_from_core_vars(&sandbox_policy),
sandbox_permissions: SandboxPermissions::UseDefault,
justification: None,
arg0: None,
};
let sandbox_program = env!("CARGO_BIN_EXE_codex-linux-sandbox");
let codex_linux_sandbox_exe = Some(PathBuf::from(sandbox_program));
let res = process_exec_tool_call(

View File

@@ -181,6 +181,7 @@ async fn run_codex_tool_session_inner(
reason: _,
proposed_execpolicy_amendment: _,
parsed_cmd,
..
}) => {
handle_exec_approval_request(
command,

View File

@@ -0,0 +1,43 @@
[package]
name = "codex-network-proxy"
edition = "2024"
version = { workspace = true }
[[bin]]
name = "codex-network-proxy"
path = "src/main.rs"
[lib]
name = "codex_network_proxy"
path = "src/lib.rs"
[lints]
workspace = true
[features]
default = ["mitm"]
mitm = [
"tokio-rustls",
"rustls",
"rustls-native-certs",
"rustls-pemfile",
"rcgen",
]
[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-core = { workspace = true }
globset = { workspace = true }
hyper = { version = "0.14", features = ["full"] }
rcgen = { version = "0.13", features = ["pem", "x509-parser"], optional = true }
rustls = { version = "0.21", optional = true }
rustls-native-certs = { version = "0.6", optional = true }
rustls-pemfile = { version = "1", optional = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tokio-rustls = { version = "0.24", optional = true }
toml = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["fmt"] }

View File

@@ -0,0 +1,108 @@
use crate::config::NetworkMode;
use crate::responses::json_response;
use crate::responses::text_response;
use crate::state::AppState;
use anyhow::Result;
use hyper::Body;
use hyper::Method;
use hyper::Request;
use hyper::Response;
use hyper::Server;
use hyper::StatusCode;
use hyper::body::to_bytes;
use hyper::service::make_service_fn;
use hyper::service::service_fn;
use serde::Deserialize;
use serde_json::json;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::sync::Arc;
use tracing::error;
use tracing::info;
pub async fn run_admin_api(state: Arc<AppState>, addr: SocketAddr) -> Result<()> {
let make_svc = make_service_fn(move |_conn: &hyper::server::conn::AddrStream| {
let state = state.clone();
async move {
Ok::<_, Infallible>(service_fn(move |req| {
handle_admin_request(req, state.clone())
}))
}
});
let server = Server::bind(&addr).serve(make_svc);
info!(addr = %addr, "admin API listening");
server.await?;
Ok(())
}
async fn handle_admin_request(
req: Request<Body>,
state: Arc<AppState>,
) -> Result<Response<Body>, Infallible> {
let method = req.method().clone();
let path = req.uri().path().to_string();
let response = match (method, path.as_str()) {
(Method::GET, "/health") => Response::new(Body::from("ok")),
(Method::GET, "/config") => match state.current_cfg().await {
Ok(cfg) => json_response(&cfg),
Err(err) => {
error!(error = %err, "failed to load config");
text_response(StatusCode::INTERNAL_SERVER_ERROR, "error")
}
},
(Method::GET, "/patterns") => match state.current_patterns().await {
Ok((allow, deny)) => json_response(&json!({"allowed": allow, "denied": deny})),
Err(err) => {
error!(error = %err, "failed to load patterns");
text_response(StatusCode::INTERNAL_SERVER_ERROR, "error")
}
},
(Method::GET, "/blocked") => match state.drain_blocked().await {
Ok(blocked) => json_response(&json!({ "blocked": blocked })),
Err(err) => {
error!(error = %err, "failed to read blocked queue");
text_response(StatusCode::INTERNAL_SERVER_ERROR, "error")
}
},
(Method::POST, "/mode") => {
let body = match to_bytes(req.into_body()).await {
Ok(bytes) => bytes,
Err(err) => {
error!(error = %err, "failed to read mode body");
return Ok(text_response(StatusCode::BAD_REQUEST, "invalid body"));
}
};
if body.is_empty() {
return Ok(text_response(StatusCode::BAD_REQUEST, "missing body"));
}
let update: ModeUpdate = match serde_json::from_slice(&body) {
Ok(update) => update,
Err(err) => {
error!(error = %err, "failed to parse mode update");
return Ok(text_response(StatusCode::BAD_REQUEST, "invalid json"));
}
};
match state.set_network_mode(update.mode).await {
Ok(()) => json_response(&json!({"status": "ok", "mode": update.mode})),
Err(err) => {
error!(error = %err, "mode update failed");
text_response(StatusCode::INTERNAL_SERVER_ERROR, "mode update failed")
}
}
}
(Method::POST, "/reload") => match state.force_reload().await {
Ok(()) => json_response(&json!({"status": "reloaded"})),
Err(err) => {
error!(error = %err, "reload failed");
text_response(StatusCode::INTERNAL_SERVER_ERROR, "reload failed")
}
},
_ => text_response(StatusCode::NOT_FOUND, "not found"),
};
Ok(response)
}
#[derive(Deserialize)]
struct ModeUpdate {
mode: NetworkMode,
}

View File

@@ -0,0 +1,203 @@
use anyhow::Context;
use anyhow::Result;
use codex_core::config::default_config_path;
use serde::Deserialize;
use serde::Serialize;
use std::net::IpAddr;
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(default)]
pub network_proxy: NetworkProxyConfig,
}
impl Default for Config {
fn default() -> Self {
Self {
network_proxy: NetworkProxyConfig::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkProxyConfig {
#[serde(default)]
pub enabled: bool,
#[serde(default = "default_proxy_url")]
pub proxy_url: String,
#[serde(default = "default_admin_url")]
pub admin_url: String,
#[serde(default)]
pub mode: NetworkMode,
#[serde(default)]
pub policy: NetworkPolicy,
#[serde(default)]
pub mitm: MitmConfig,
}
impl Default for NetworkProxyConfig {
fn default() -> Self {
Self {
enabled: false,
proxy_url: default_proxy_url(),
admin_url: default_admin_url(),
mode: NetworkMode::default(),
policy: NetworkPolicy::default(),
mitm: MitmConfig::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkPolicy {
#[serde(default, rename = "allowed_domains", alias = "allowedDomains")]
pub allowed_domains: Vec<String>,
#[serde(default, rename = "denied_domains", alias = "deniedDomains")]
pub denied_domains: Vec<String>,
#[serde(default, rename = "allow_unix_sockets", alias = "allowUnixSockets")]
pub allow_unix_sockets: Vec<String>,
#[serde(default, rename = "allow_local_binding", alias = "allowLocalBinding")]
pub allow_local_binding: bool,
}
impl Default for NetworkPolicy {
fn default() -> Self {
Self {
allowed_domains: Vec::new(),
denied_domains: Vec::new(),
allow_unix_sockets: Vec::new(),
allow_local_binding: false,
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum NetworkMode {
Limited,
Full,
}
impl Default for NetworkMode {
fn default() -> Self {
NetworkMode::Full
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MitmConfig {
#[serde(default)]
pub enabled: bool,
#[serde(default)]
pub inspect: bool,
#[serde(default = "default_mitm_max_body_bytes")]
pub max_body_bytes: usize,
#[serde(default = "default_ca_cert_path")]
pub ca_cert_path: PathBuf,
#[serde(default = "default_ca_key_path")]
pub ca_key_path: PathBuf,
}
impl Default for MitmConfig {
fn default() -> Self {
Self {
enabled: false,
inspect: false,
max_body_bytes: default_mitm_max_body_bytes(),
ca_cert_path: default_ca_cert_path(),
ca_key_path: default_ca_key_path(),
}
}
}
fn default_proxy_url() -> String {
"http://127.0.0.1:3128".to_string()
}
fn default_admin_url() -> String {
"http://127.0.0.1:8080".to_string()
}
fn default_ca_cert_path() -> PathBuf {
PathBuf::from("network_proxy/mitm/ca.pem")
}
fn default_ca_key_path() -> PathBuf {
PathBuf::from("network_proxy/mitm/ca.key")
}
fn default_mitm_max_body_bytes() -> usize {
4096
}
pub struct RuntimeConfig {
pub http_addr: SocketAddr,
pub socks_addr: SocketAddr,
pub admin_addr: SocketAddr,
}
pub fn default_codex_config_path() -> Result<PathBuf> {
default_config_path().context("failed to resolve Codex config path")
}
pub fn resolve_runtime(cfg: &Config) -> RuntimeConfig {
let http_addr = resolve_addr(&cfg.network_proxy.proxy_url, 3128);
let admin_addr = resolve_addr(&cfg.network_proxy.admin_url, 8080);
let socks_addr = SocketAddr::from(([127, 0, 0, 1], 8081));
RuntimeConfig {
http_addr,
socks_addr,
admin_addr,
}
}
fn resolve_addr(url: &str, default_port: u16) -> SocketAddr {
let (host, port) = parse_host_port(url, default_port);
let host = if host.eq_ignore_ascii_case("localhost") {
"127.0.0.1"
} else {
host
};
match host.parse::<IpAddr>() {
Ok(ip) => SocketAddr::new(ip, port),
Err(_) => SocketAddr::from(([127, 0, 0, 1], port)),
}
}
fn parse_host_port(url: &str, default_port: u16) -> (&str, u16) {
let trimmed = url.trim();
if trimmed.is_empty() {
return ("127.0.0.1", default_port);
}
let without_scheme = trimmed
.split_once("://")
.map(|(_, rest)| rest)
.unwrap_or(trimmed);
let host_port = without_scheme.split('/').next().unwrap_or(without_scheme);
let host_port = host_port
.rsplit_once('@')
.map(|(_, rest)| rest)
.unwrap_or(host_port);
if host_port.starts_with('[') {
if let Some(end) = host_port.find(']') {
let host = &host_port[1..end];
let port = host_port[end + 1..]
.strip_prefix(':')
.and_then(|port| port.parse::<u16>().ok())
.unwrap_or(default_port);
return (host, port);
}
}
if let Some((host, port)) = host_port.rsplit_once(':') {
if let Ok(port) = port.parse::<u16>() {
return (host, port);
}
}
(host_port, default_port)
}

View File

@@ -0,0 +1,446 @@
use crate::config::NetworkMode;
use crate::mitm;
use crate::policy::normalize_host;
use crate::responses::blocked_text;
use crate::responses::json_blocked;
use crate::responses::text_response;
use crate::state::AppState;
use crate::state::BlockedRequest;
use anyhow::Result;
use hyper::Body;
use hyper::Method;
use hyper::Request;
use hyper::Response;
use hyper::Server;
use hyper::StatusCode;
use hyper::Uri;
use hyper::body::to_bytes;
use hyper::header::HOST;
use hyper::header::HeaderName;
use hyper::service::make_service_fn;
use hyper::service::service_fn;
use std::collections::HashSet;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::copy_bidirectional;
use tokio::net::TcpStream;
use tracing::error;
use tracing::info;
use tracing::warn;
pub async fn run_http_proxy(state: Arc<AppState>, addr: SocketAddr) -> Result<()> {
let make_svc = make_service_fn(move |conn: &hyper::server::conn::AddrStream| {
let state = state.clone();
let client_addr = conn.remote_addr();
async move {
Ok::<_, Infallible>(service_fn(move |req| {
handle_proxy_request(req, state.clone(), client_addr)
}))
}
});
let server = Server::bind(&addr).serve(make_svc);
info!(addr = %addr, "HTTP proxy listening");
server.await?;
Ok(())
}
async fn handle_proxy_request(
req: Request<Body>,
state: Arc<AppState>,
client_addr: SocketAddr,
) -> Result<Response<Body>, Infallible> {
let response = if req.method() == Method::CONNECT {
handle_connect(req, state, client_addr).await
} else {
handle_http_forward(req, state, client_addr).await
};
Ok(response)
}
async fn handle_connect(
req: Request<Body>,
state: Arc<AppState>,
client_addr: SocketAddr,
) -> Response<Body> {
let authority = match req.uri().authority() {
Some(auth) => auth.as_str().to_string(),
None => return text_response(StatusCode::BAD_REQUEST, "missing authority"),
};
let (authority_host, target_port) = split_authority(&authority);
let host = normalize_host(&authority_host);
if host.is_empty() {
return text_response(StatusCode::BAD_REQUEST, "invalid host");
}
match state.host_blocked(&host).await {
Ok((true, reason)) => {
let _ = state
.record_blocked(BlockedRequest::new(
host.clone(),
reason.clone(),
Some(client_addr.to_string()),
Some("CONNECT".to_string()),
None,
"http-connect".to_string(),
))
.await;
warn!(client = %client_addr, host = %host, reason = %reason, "CONNECT blocked");
return blocked_text(&reason);
}
Ok((false, _)) => {
info!(client = %client_addr, host = %host, "CONNECT allowed");
}
Err(err) => {
error!(error = %err, "failed to evaluate host");
return text_response(StatusCode::INTERNAL_SERVER_ERROR, "error");
}
}
let mode = match state.network_mode().await {
Ok(mode) => mode,
Err(err) => {
error!(error = %err, "failed to read network mode");
return text_response(StatusCode::INTERNAL_SERVER_ERROR, "error");
}
};
let mitm_state = match state.mitm_state().await {
Ok(state) => state,
Err(err) => {
error!(error = %err, "failed to load MITM state");
return text_response(StatusCode::INTERNAL_SERVER_ERROR, "error");
}
};
if mode == NetworkMode::Limited && mitm_state.is_none() {
let _ = state
.record_blocked(BlockedRequest::new(
host.clone(),
"mitm_required".to_string(),
Some(client_addr.to_string()),
Some("CONNECT".to_string()),
Some(NetworkMode::Limited),
"http-connect".to_string(),
))
.await;
warn!(
client = %client_addr,
host = %host,
mode = "limited",
allowed_methods = "GET, HEAD, OPTIONS",
"CONNECT blocked; MITM required for read-only HTTPS in limited mode"
);
return blocked_text("mitm_required");
}
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
match on_upgrade.await {
Ok(upgraded) => {
if let Some(mitm_state) = mitm_state {
info!(client = %client_addr, host = %host, mode = ?mode, "CONNECT MITM enabled");
if let Err(err) =
mitm::mitm_tunnel(upgraded, &host, target_port, mode, mitm_state).await
{
warn!(error = %err, "MITM tunnel error");
}
return;
}
let mut upgraded = upgraded;
match TcpStream::connect(&authority).await {
Ok(mut server_stream) => {
if let Err(err) =
copy_bidirectional(&mut upgraded, &mut server_stream).await
{
warn!(error = %err, "tunnel error");
}
}
Err(err) => {
warn!(error = %err, "failed to connect to upstream");
}
}
}
Err(err) => warn!(error = %err, "upgrade failed"),
}
});
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())
.unwrap_or_else(|_| Response::new(Body::empty()))
}
async fn handle_http_forward(
req: Request<Body>,
state: Arc<AppState>,
client_addr: SocketAddr,
) -> Response<Body> {
let (parts, body) = req.into_parts();
let method_allowed = match state.method_allowed(&parts.method).await {
Ok(allowed) => allowed,
Err(err) => {
error!(error = %err, "failed to evaluate method policy");
return text_response(StatusCode::INTERNAL_SERVER_ERROR, "error");
}
};
let unix_socket = parts
.headers
.get("x-unix-socket")
.and_then(|v| v.to_str().ok())
.map(|v| v.to_string());
if let Some(socket_path) = unix_socket {
if !method_allowed {
warn!(
client = %client_addr,
method = %parts.method,
mode = "limited",
allowed_methods = "GET, HEAD, OPTIONS",
"unix socket blocked by method policy"
);
return json_blocked("unix-socket", "method_not_allowed");
}
if !cfg!(target_os = "macos") {
warn!(path = %socket_path, "unix socket proxy unsupported on this platform");
return text_response(StatusCode::NOT_IMPLEMENTED, "unix sockets unsupported");
}
match state.is_unix_socket_allowed(&socket_path).await {
Ok(true) => {
info!(client = %client_addr, path = %socket_path, "unix socket allowed");
match proxy_via_unix_socket(Request::from_parts(parts, body), &socket_path).await {
Ok(resp) => return resp,
Err(err) => {
warn!(error = %err, "unix socket proxy failed");
return text_response(StatusCode::BAD_GATEWAY, "unix socket proxy failed");
}
}
}
Ok(false) => {
warn!(client = %client_addr, path = %socket_path, "unix socket blocked");
return json_blocked("unix-socket", "not_allowed");
}
Err(err) => {
warn!(error = %err, "unix socket check failed");
return text_response(StatusCode::INTERNAL_SERVER_ERROR, "error");
}
}
}
let host_header = parts
.headers
.get(HOST)
.and_then(|v| v.to_str().ok())
.map(|v| v.to_string())
.or_else(|| parts.uri.authority().map(|a| a.as_str().to_string()));
let authority = match host_header {
Some(h) => h,
None => return text_response(StatusCode::BAD_REQUEST, "missing host"),
};
let authority = authority.trim().to_string();
let host = normalize_host(&authority);
if host.is_empty() {
return text_response(StatusCode::BAD_REQUEST, "invalid host");
}
match state.host_blocked(&host).await {
Ok((true, reason)) => {
let _ = state
.record_blocked(BlockedRequest::new(
host.clone(),
reason.clone(),
Some(client_addr.to_string()),
Some(parts.method.to_string()),
None,
"http".to_string(),
))
.await;
warn!(client = %client_addr, host = %host, reason = %reason, "request blocked");
return json_blocked(&host, &reason);
}
Ok((false, _)) => {}
Err(err) => {
error!(error = %err, "failed to evaluate host");
return text_response(StatusCode::INTERNAL_SERVER_ERROR, "error");
}
}
if !method_allowed {
let _ = state
.record_blocked(BlockedRequest::new(
host.clone(),
"method_not_allowed".to_string(),
Some(client_addr.to_string()),
Some(parts.method.to_string()),
Some(NetworkMode::Limited),
"http".to_string(),
))
.await;
warn!(
client = %client_addr,
host = %host,
method = %parts.method,
mode = "limited",
allowed_methods = "GET, HEAD, OPTIONS",
"request blocked by method policy"
);
return json_blocked(&host, "method_not_allowed");
}
info!(
client = %client_addr,
host = %host,
method = %parts.method,
"request allowed"
);
let uri = match build_forward_uri(&authority, &parts.uri) {
Ok(uri) => uri,
Err(err) => {
warn!(error = %err, "failed to build upstream uri");
return text_response(StatusCode::BAD_REQUEST, "invalid uri");
}
};
let body_bytes = match to_bytes(body).await {
Ok(bytes) => bytes,
Err(err) => {
warn!(error = %err, "failed to read body");
return text_response(StatusCode::BAD_GATEWAY, "failed to read body");
}
};
let mut builder = Request::builder()
.method(parts.method)
.uri(uri)
.version(parts.version);
let hop_headers = hop_by_hop_headers();
for (name, value) in parts.headers.iter() {
let name_str = name.as_str().to_ascii_lowercase();
if hop_headers.contains(name_str.as_str())
|| name == &HeaderName::from_static("x-unix-socket")
{
continue;
}
builder = builder.header(name, value);
}
let forwarded_req = match builder.body(Body::from(body_bytes)) {
Ok(req) => req,
Err(err) => {
warn!(error = %err, "failed to build request");
return text_response(StatusCode::BAD_GATEWAY, "invalid request");
}
};
match state.client.request(forwarded_req).await {
Ok(resp) => filter_response(resp),
Err(err) => {
warn!(error = %err, "upstream request failed");
text_response(StatusCode::BAD_GATEWAY, "upstream failure")
}
}
}
fn build_forward_uri(authority: &str, uri: &Uri) -> Result<Uri> {
let path = path_and_query(uri);
let target = format!("http://{authority}{path}");
Ok(target.parse()?)
}
fn filter_response(resp: Response<Body>) -> Response<Body> {
let mut builder = Response::builder().status(resp.status());
let hop_headers = hop_by_hop_headers();
for (name, value) in resp.headers().iter() {
if hop_headers.contains(name.as_str().to_ascii_lowercase().as_str()) {
continue;
}
builder = builder.header(name, value);
}
builder
.body(resp.into_body())
.unwrap_or_else(|_| Response::new(Body::from("proxy error")))
}
fn path_and_query(uri: &Uri) -> String {
uri.path_and_query()
.map(|pq| pq.as_str())
.unwrap_or("/")
.to_string()
}
fn hop_by_hop_headers() -> HashSet<&'static str> {
[
"connection",
"proxy-connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailer",
"transfer-encoding",
"upgrade",
]
.into_iter()
.collect()
}
fn split_authority(authority: &str) -> (String, u16) {
if let Some(host) = authority.strip_prefix('[') {
if let Some(end) = host.find(']') {
let hostname = host[..end].to_string();
let port = host[end + 1..]
.strip_prefix(':')
.and_then(|p| p.parse::<u16>().ok())
.unwrap_or(443);
return (hostname, port);
}
}
let mut parts = authority.splitn(2, ':');
let host = parts.next().unwrap_or("").to_string();
let port = parts
.next()
.and_then(|p| p.parse::<u16>().ok())
.unwrap_or(443);
(host, port)
}
async fn proxy_via_unix_socket(req: Request<Body>, socket_path: &str) -> Result<Response<Body>> {
#[cfg(target_os = "macos")]
{
use hyper::client::conn::Builder as ConnBuilder;
use tokio::net::UnixStream;
let path = path_and_query(req.uri());
let (parts, body) = req.into_parts();
let body_bytes = to_bytes(body).await?;
let mut builder = Request::builder()
.method(parts.method)
.uri(path)
.version(parts.version);
let hop_headers = hop_by_hop_headers();
for (name, value) in parts.headers.iter() {
let name_str = name.as_str().to_ascii_lowercase();
if hop_headers.contains(name_str.as_str())
|| name == &HeaderName::from_static("x-unix-socket")
{
continue;
}
builder = builder.header(name, value);
}
let req = builder.body(Body::from(body_bytes))?;
let stream = UnixStream::connect(socket_path).await?;
let (mut sender, conn) = ConnBuilder::new().handshake(stream).await?;
tokio::spawn(async move {
if let Err(err) = conn.await {
warn!(error = %err, "unix socket connection error");
}
});
Ok(sender.send_request(req).await?)
}
#[cfg(not(target_os = "macos"))]
{
let _ = req;
let _ = socket_path;
Err(anyhow::anyhow!("unix sockets not supported"))
}
}

View File

@@ -0,0 +1,17 @@
use anyhow::Context;
use anyhow::Result;
use codex_core::config::find_codex_home;
use std::fs;
pub fn run_init() -> Result<()> {
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
let root = codex_home.join("network_proxy");
let mitm_dir = root.join("mitm");
fs::create_dir_all(&root).with_context(|| format!("failed to create {}", root.display()))?;
fs::create_dir_all(&mitm_dir)
.with_context(|| format!("failed to create {}", mitm_dir.display()))?;
println!("ensured {}", mitm_dir.display());
Ok(())
}

View File

@@ -0,0 +1,58 @@
mod admin;
mod config;
mod http_proxy;
mod init;
mod mitm;
mod policy;
mod responses;
mod socks5;
mod state;
use crate::state::AppState;
use anyhow::Result;
use clap::Parser;
use clap::Subcommand;
use std::net::SocketAddr;
use std::sync::Arc;
use tracing::warn;
#[derive(Debug, Clone, Parser)]
#[command(name = "codex-network-proxy", about = "Codex network sandbox proxy")]
pub struct Args {
#[command(subcommand)]
pub command: Option<Command>,
}
#[derive(Debug, Clone, Subcommand)]
pub enum Command {
/// Initialize the Codex network proxy directories (e.g. MITM cert paths).
Init,
}
pub async fn run_main(args: Args) -> Result<()> {
tracing_subscriber::fmt::init();
if let Some(Command::Init) = args.command {
init::run_init()?;
return Ok(());
}
if cfg!(not(target_os = "macos")) {
warn!("allowUnixSockets is macOS-only; requests will be rejected on this platform");
}
let cfg_path = config::default_codex_config_path()?;
let state = Arc::new(AppState::new(cfg_path).await?);
let runtime = config::resolve_runtime(&state.current_cfg().await?);
let http_addr: SocketAddr = runtime.http_addr;
let socks_addr: SocketAddr = runtime.socks_addr;
let admin_addr: SocketAddr = runtime.admin_addr;
let http_task = http_proxy::run_http_proxy(state.clone(), http_addr);
let socks_task = socks5::run_socks5(state.clone(), socks_addr);
let admin_task = admin::run_admin_api(state.clone(), admin_addr);
tokio::try_join!(http_task, socks_task, admin_task)?;
Ok(())
}

View File

@@ -0,0 +1,8 @@
use anyhow::Result;
use clap::Parser;
use codex_network_proxy::Args;
#[tokio::main]
async fn main() -> Result<()> {
codex_network_proxy::run_main(Args::parse()).await
}

View File

@@ -0,0 +1,665 @@
#[cfg(feature = "mitm")]
mod imp {
use crate::config::MitmConfig;
use crate::config::NetworkMode;
use crate::policy::method_allowed;
use crate::policy::normalize_host;
use crate::responses::text_response;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use hyper::Body;
use hyper::Method;
use hyper::Request;
use hyper::Response;
use hyper::StatusCode;
use hyper::Uri;
use hyper::Version;
use hyper::body::HttpBody;
use hyper::header::HOST;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use rcgen::BasicConstraints;
use rcgen::Certificate;
use rcgen::CertificateParams;
use rcgen::DistinguishedName;
use rcgen::DnType;
use rcgen::ExtendedKeyUsagePurpose;
use rcgen::IsCa;
use rcgen::KeyPair;
use rcgen::KeyUsagePurpose;
use rcgen::SanType;
use rustls::Certificate as RustlsCertificate;
use rustls::ClientConfig;
use rustls::PrivateKey;
use rustls::RootCertStore;
use rustls::ServerConfig;
use std::collections::HashSet;
use std::convert::Infallible;
use std::fs;
use std::io::Cursor;
use std::net::IpAddr;
use std::path::Path;
use std::sync::Arc;
use tokio::net::TcpStream;
use tokio_rustls::TlsAcceptor;
use tokio_rustls::TlsConnector;
use tracing::info;
use tracing::warn;
#[derive(Clone, Copy, Debug)]
enum MitmProtocol {
Http1,
Http2,
}
struct MitmTarget {
host: String,
port: u16,
}
impl MitmTarget {
fn authority(&self) -> String {
if self.port == 443 {
self.host.clone()
} else {
format!("{}:{}", self.host, self.port)
}
}
}
struct RequestLogContext {
host: String,
method: Method,
path: String,
}
struct ResponseLogContext {
host: String,
method: Method,
path: String,
status: StatusCode,
}
pub struct MitmState {
ca_key: KeyPair,
ca_cert: Certificate,
client_config: Arc<ClientConfig>,
inspect: bool,
max_body_bytes: usize,
}
impl MitmState {
pub fn new(cfg: &MitmConfig) -> Result<Self> {
let (ca_cert_pem, ca_key_pem) = load_or_create_ca(cfg)?;
let ca_key = KeyPair::from_pem(&ca_key_pem).context("failed to parse CA key")?;
let ca_params = CertificateParams::from_ca_cert_pem(&ca_cert_pem)
.context("failed to parse CA cert")?;
let ca_cert = ca_params
.self_signed(&ca_key)
.context("failed to reconstruct CA cert")?;
let client_config = build_client_config()?;
Ok(Self {
ca_key,
ca_cert,
client_config,
inspect: cfg.inspect,
max_body_bytes: cfg.max_body_bytes,
})
}
pub fn server_config_for_host(&self, host: &str) -> Result<Arc<ServerConfig>> {
let (certs, key) = issue_host_certificate(host, &self.ca_cert, &self.ca_key)?;
let mut config = ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, key)
.context("failed to build server TLS config")?;
config.alpn_protocols = vec![b"http/1.1".to_vec()];
Ok(Arc::new(config))
}
pub fn client_config(&self) -> Arc<ClientConfig> {
Arc::clone(&self.client_config)
}
pub fn inspect_enabled(&self) -> bool {
self.inspect
}
pub fn max_body_bytes(&self) -> usize {
self.max_body_bytes
}
}
pub async fn mitm_tunnel(
stream: hyper::upgrade::Upgraded,
host: &str,
port: u16,
mode: NetworkMode,
state: Arc<MitmState>,
) -> Result<()> {
let server_config = state.server_config_for_host(host)?;
let acceptor = TlsAcceptor::from(server_config);
let tls_stream = acceptor
.accept(stream)
.await
.context("client TLS handshake failed")?;
let protocol = match tls_stream.get_ref().1.alpn_protocol() {
Some(proto) if proto == b"h2" => MitmProtocol::Http2,
_ => MitmProtocol::Http1,
};
info!(
host = %host,
port = port,
protocol = ?protocol,
mode = ?mode,
inspect = state.inspect_enabled(),
max_body_bytes = state.max_body_bytes(),
"MITM TLS established"
);
let target = Arc::new(MitmTarget {
host: host.to_string(),
port,
});
let service = {
let state = state.clone();
let target = target.clone();
service_fn(move |req| handle_mitm_request(req, target.clone(), mode, state.clone()))
};
let mut http = Http::new();
match protocol {
MitmProtocol::Http2 => {
http.http2_only(true);
}
MitmProtocol::Http1 => {
http.http1_only(true);
}
}
http.serve_connection(tls_stream, service)
.await
.context("MITM HTTP handling failed")?;
Ok(())
}
async fn handle_mitm_request(
req: Request<Body>,
target: Arc<MitmTarget>,
mode: NetworkMode,
state: Arc<MitmState>,
) -> Result<Response<Body>, Infallible> {
let response = match forward_request(req, target.as_ref(), mode, state.as_ref()).await {
Ok(resp) => resp,
Err(err) => {
warn!(error = %err, host = %target.host, "MITM upstream request failed");
text_response(StatusCode::BAD_GATEWAY, "mitm upstream error")
}
};
Ok(response)
}
async fn forward_request(
req: Request<Body>,
target: &MitmTarget,
mode: NetworkMode,
state: &MitmState,
) -> Result<Response<Body>> {
if req.method() == Method::CONNECT {
return Ok(text_response(
StatusCode::METHOD_NOT_ALLOWED,
"CONNECT not supported inside MITM",
));
}
let (parts, body) = req.into_parts();
let request_version = parts.version;
let method = parts.method.clone();
let inspect = state.inspect_enabled();
let max_body_bytes = state.max_body_bytes();
if let Some(request_host) = extract_request_host(&parts) {
let normalized = normalize_host(&request_host);
if !normalized.is_empty() && normalized != target.host {
warn!(
target = %target.host,
request_host = %normalized,
"MITM host mismatch"
);
return Ok(text_response(StatusCode::BAD_REQUEST, "host mismatch"));
}
}
let path = path_and_query(&parts.uri);
let uri = build_origin_form_uri(&path)?;
let authority = target.authority();
if !method_allowed(mode, &method) {
warn!(
host = %authority,
method = %method,
path = %path,
mode = ?mode,
allowed_methods = "GET, HEAD, OPTIONS",
"MITM blocked by method policy"
);
return Ok(text_response(StatusCode::FORBIDDEN, "method not allowed"));
}
let mut builder = Request::builder()
.method(method.clone())
.uri(uri)
.version(Version::HTTP_11);
let hop_headers = hop_by_hop_headers();
for (name, value) in parts.headers.iter() {
let name_str = name.as_str().to_ascii_lowercase();
if hop_headers.contains(name_str.as_str()) || name == &HOST {
continue;
}
builder = builder.header(name, value);
}
builder = builder.header(HOST, authority.as_str());
let body = if inspect {
let (tx, out_body) = Body::channel();
let ctx = RequestLogContext {
host: authority.clone(),
method: method.clone(),
path: path.clone(),
};
tokio::spawn(async move {
stream_body(body, tx, max_body_bytes, ctx).await;
});
out_body
} else {
body
};
let upstream_req = builder
.body(body)
.context("failed to build upstream request")?;
let upstream_resp = send_upstream_request(upstream_req, target, state).await?;
respond_with_inspection(
upstream_resp,
request_version,
inspect,
max_body_bytes,
&method,
&path,
&authority,
)
.await
}
async fn send_upstream_request(
req: Request<Body>,
target: &MitmTarget,
state: &MitmState,
) -> Result<Response<Body>> {
let upstream = TcpStream::connect((target.host.as_str(), target.port))
.await
.context("failed to connect to upstream")?;
let server_name = match target.host.parse::<IpAddr>() {
Ok(ip) => rustls::ServerName::IpAddress(ip),
Err(_) => rustls::ServerName::try_from(target.host.as_str())
.map_err(|_| anyhow!("invalid server name"))?,
};
let connector = TlsConnector::from(state.client_config());
let tls_stream = connector
.connect(server_name, upstream)
.await
.context("upstream TLS handshake failed")?;
let (mut sender, conn) = hyper::client::conn::Builder::new()
.handshake(tls_stream)
.await
.context("upstream HTTP handshake failed")?;
tokio::spawn(async move {
if let Err(err) = conn.await {
warn!(error = %err, "MITM upstream connection error");
}
});
let resp = sender
.send_request(req)
.await
.context("upstream request failed")?;
Ok(resp)
}
async fn respond_with_inspection(
resp: Response<Body>,
request_version: Version,
inspect: bool,
max_body_bytes: usize,
method: &Method,
path: &str,
authority: &str,
) -> Result<Response<Body>> {
let (parts, body) = resp.into_parts();
let mut builder = Response::builder()
.status(parts.status)
.version(request_version);
let hop_headers = hop_by_hop_headers();
for (name, value) in parts.headers.iter() {
if hop_headers.contains(name.as_str().to_ascii_lowercase().as_str()) {
continue;
}
builder = builder.header(name, value);
}
let body = if inspect {
let (tx, out_body) = Body::channel();
let ctx = ResponseLogContext {
host: authority.to_string(),
method: method.clone(),
path: path.to_string(),
status: parts.status,
};
tokio::spawn(async move {
stream_body(body, tx, max_body_bytes, ctx).await;
});
out_body
} else {
body
};
Ok(builder
.body(body)
.unwrap_or_else(|_| Response::new(Body::from("proxy error"))))
}
async fn stream_body<T>(
mut body: Body,
mut tx: hyper::body::Sender,
max_body_bytes: usize,
ctx: T,
) where
T: BodyLoggable,
{
let mut len: usize = 0;
let mut truncated = false;
while let Some(chunk) = body.data().await {
match chunk {
Ok(bytes) => {
len = len.saturating_add(bytes.len());
if len > max_body_bytes {
truncated = true;
}
if tx.send_data(bytes).await.is_err() {
break;
}
}
Err(err) => {
warn!(error = %err, "MITM body stream error");
break;
}
}
}
if let Ok(Some(trailers)) = body.trailers().await {
let _ = tx.send_trailers(trailers).await;
}
ctx.log(len, truncated);
}
trait BodyLoggable {
fn log(self, len: usize, truncated: bool);
}
impl BodyLoggable for RequestLogContext {
fn log(self, len: usize, truncated: bool) {
info!(
host = %self.host,
method = %self.method,
path = %self.path,
body_len = len,
truncated = truncated,
"MITM inspected request body"
);
}
}
impl BodyLoggable for ResponseLogContext {
fn log(self, len: usize, truncated: bool) {
info!(
host = %self.host,
method = %self.method,
path = %self.path,
status = %self.status,
body_len = len,
truncated = truncated,
"MITM inspected response body"
);
}
}
fn extract_request_host(parts: &hyper::http::request::Parts) -> Option<String> {
parts
.headers
.get(HOST)
.and_then(|v| v.to_str().ok())
.map(|v| v.to_string())
.or_else(|| parts.uri.authority().map(|a| a.as_str().to_string()))
}
fn path_and_query(uri: &Uri) -> String {
uri.path_and_query()
.map(|pq| pq.as_str())
.unwrap_or("/")
.to_string()
}
fn build_origin_form_uri(path: &str) -> Result<Uri> {
path.parse().context("invalid request path")
}
fn hop_by_hop_headers() -> HashSet<&'static str> {
[
"connection",
"proxy-connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailer",
"transfer-encoding",
"upgrade",
]
.into_iter()
.collect()
}
fn build_client_config() -> Result<Arc<ClientConfig>> {
let mut roots = RootCertStore::empty();
let certs = rustls_native_certs::load_native_certs()
.map_err(|err| anyhow!("failed to load native certs: {err}"))?;
for cert in certs {
if roots.add(&RustlsCertificate(cert.0)).is_err() {
warn!("skipping invalid root cert");
}
}
if roots.is_empty() {
return Err(anyhow!("no root certificates available"));
}
let mut config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
config.alpn_protocols = vec![b"http/1.1".to_vec()];
Ok(Arc::new(config))
}
fn issue_host_certificate(
host: &str,
ca_cert: &Certificate,
ca_key: &KeyPair,
) -> Result<(Vec<RustlsCertificate>, PrivateKey)> {
let mut params = if let Ok(ip) = host.parse::<IpAddr>() {
let mut params = CertificateParams::new(Vec::new())
.map_err(|err| anyhow!("failed to create cert params: {err}"))?;
params.subject_alt_names.push(SanType::IpAddress(ip));
params
} else {
CertificateParams::new(vec![host.to_string()])
.map_err(|err| anyhow!("failed to create cert params: {err}"))?
};
params.extended_key_usages = vec![ExtendedKeyUsagePurpose::ServerAuth];
params.key_usages = vec![
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::KeyEncipherment,
];
let key_pair = KeyPair::generate_for(&rcgen::PKCS_ECDSA_P256_SHA256)
.map_err(|err| anyhow!("failed to generate host key pair: {err}"))?;
let cert = params
.signed_by(&key_pair, ca_cert, ca_key)
.map_err(|err| anyhow!("failed to sign host cert: {err}"))?;
let cert_pem = cert.pem();
let key_pem = key_pair.serialize_pem();
let certs = certs_from_pem(&cert_pem)?;
let key = private_key_from_pem(&key_pem)?;
Ok((certs, key))
}
fn load_or_create_ca(cfg: &MitmConfig) -> Result<(String, String)> {
let cert_path = &cfg.ca_cert_path;
let key_path = &cfg.ca_key_path;
if cert_path.exists() || key_path.exists() {
if !cert_path.exists() || !key_path.exists() {
return Err(anyhow!("both ca_cert_path and ca_key_path must exist"));
}
let cert_pem = fs::read_to_string(cert_path)
.with_context(|| format!("failed to read CA cert {}", cert_path.display()))?;
let key_pem = fs::read_to_string(key_path)
.with_context(|| format!("failed to read CA key {}", key_path.display()))?;
return Ok((cert_pem, key_pem));
}
if let Some(parent) = cert_path.parent() {
fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
if let Some(parent) = key_path.parent() {
fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
let (cert_pem, key_pem) = generate_ca()?;
write_private_file(cert_path, cert_pem.as_bytes(), 0o644)?;
write_private_file(key_path, key_pem.as_bytes(), 0o600)?;
info!(
cert_path = %cert_path.display(),
key_path = %key_path.display(),
"generated MITM CA"
);
Ok((cert_pem, key_pem))
}
fn generate_ca() -> Result<(String, String)> {
let mut params = CertificateParams::default();
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
params.key_usages = vec![
KeyUsagePurpose::KeyCertSign,
KeyUsagePurpose::DigitalSignature,
KeyUsagePurpose::KeyEncipherment,
];
let mut dn = DistinguishedName::new();
dn.push(DnType::CommonName, "network_proxy MITM CA");
params.distinguished_name = dn;
let key_pair = KeyPair::generate_for(&rcgen::PKCS_ECDSA_P256_SHA256)
.map_err(|err| anyhow!("failed to generate CA key pair: {err}"))?;
let cert = params
.self_signed(&key_pair)
.map_err(|err| anyhow!("failed to generate CA cert: {err}"))?;
let cert_pem = cert.pem();
let key_pem = key_pair.serialize_pem();
Ok((cert_pem, key_pem))
}
fn certs_from_pem(pem: &str) -> Result<Vec<RustlsCertificate>> {
let mut reader = Cursor::new(pem);
let certs = rustls_pemfile::certs(&mut reader).context("failed to parse cert PEM")?;
if certs.is_empty() {
return Err(anyhow!("no certificates found"));
}
Ok(certs.into_iter().map(RustlsCertificate).collect())
}
fn private_key_from_pem(pem: &str) -> Result<PrivateKey> {
let mut reader = Cursor::new(pem);
let mut keys =
rustls_pemfile::pkcs8_private_keys(&mut reader).context("failed to parse pkcs8 key")?;
if let Some(key) = keys.pop() {
return Ok(PrivateKey(key));
}
let mut reader = Cursor::new(pem);
let mut keys =
rustls_pemfile::rsa_private_keys(&mut reader).context("failed to parse rsa key")?;
if let Some(key) = keys.pop() {
return Ok(PrivateKey(key));
}
Err(anyhow!("no private key found"))
}
fn write_private_file(path: &Path, contents: &[u8], mode: u32) -> Result<()> {
fs::write(path, contents).with_context(|| format!("failed to write {}", path.display()))?;
set_permissions(path, mode)?;
Ok(())
}
#[cfg(unix)]
fn set_permissions(path: &Path, mode: u32) -> Result<()> {
use std::os::unix::fs::PermissionsExt;
fs::set_permissions(path, fs::Permissions::from_mode(mode))
.with_context(|| format!("failed to set permissions on {}", path.display()))?;
Ok(())
}
#[cfg(not(unix))]
fn set_permissions(_path: &Path, _mode: u32) -> Result<()> {
Ok(())
}
}
#[cfg(not(feature = "mitm"))]
mod imp {
use crate::config::MitmConfig;
use crate::config::NetworkMode;
use anyhow::Result;
use anyhow::anyhow;
use hyper::upgrade::Upgraded;
use std::sync::Arc;
#[derive(Debug)]
pub struct MitmState;
#[allow(dead_code)]
impl MitmState {
pub fn new(_cfg: &MitmConfig) -> Result<Self> {
Err(anyhow!("MITM feature disabled at build time"))
}
pub fn inspect_enabled(&self) -> bool {
false
}
pub fn max_body_bytes(&self) -> usize {
0
}
}
pub async fn mitm_tunnel(
_stream: Upgraded,
_host: &str,
_port: u16,
_mode: NetworkMode,
_state: Arc<MitmState>,
) -> Result<()> {
Err(anyhow!("MITM feature disabled at build time"))
}
}
pub use imp::*;

View File

@@ -0,0 +1,31 @@
use crate::config::NetworkMode;
use hyper::Method;
use std::net::IpAddr;
pub fn method_allowed(mode: NetworkMode, method: &Method) -> bool {
match mode {
NetworkMode::Full => true,
NetworkMode::Limited => matches!(method, &Method::GET | &Method::HEAD | &Method::OPTIONS),
}
}
pub fn is_loopback_host(host: &str) -> bool {
let host = host.to_ascii_lowercase();
if host == "localhost" || host == "localhost." {
return true;
}
if let Ok(ip) = host.parse::<IpAddr>() {
return ip.is_loopback();
}
false
}
pub fn normalize_host(host: &str) -> String {
let host = host.trim();
if host.starts_with('[') {
if let Some(end) = host.find(']') {
return host[1..end].to_ascii_lowercase();
}
}
host.split(':').next().unwrap_or("").to_ascii_lowercase()
}

View File

@@ -0,0 +1,65 @@
use hyper::Body;
use hyper::Response;
use hyper::StatusCode;
use serde::Serialize;
use serde_json::json;
pub fn json_blocked(host: &str, reason: &str) -> Response<Body> {
let body = Body::from(json!({"status":"blocked","host":host,"reason":reason}).to_string());
Response::builder()
.status(StatusCode::FORBIDDEN)
.header("content-type", "application/json")
.header("x-proxy-error", blocked_header_value(reason))
.body(body)
.unwrap_or_else(|_| Response::new(Body::from("blocked")))
}
pub fn blocked_text(reason: &str) -> Response<Body> {
Response::builder()
.status(StatusCode::FORBIDDEN)
.header("content-type", "text/plain")
.header("x-proxy-error", blocked_header_value(reason))
.body(Body::from(blocked_message(reason).to_string()))
.unwrap_or_else(|_| Response::new(Body::from("blocked")))
}
pub fn text_response(status: StatusCode, body: &str) -> Response<Body> {
Response::builder()
.status(status)
.header("content-type", "text/plain")
.body(Body::from(body.to_string()))
.unwrap_or_else(|_| Response::new(Body::from(body.to_string())))
}
pub fn json_response<T: Serialize>(value: &T) -> Response<Body> {
let body = match serde_json::to_string(value) {
Ok(body) => body,
Err(_) => "{}".to_string(),
};
Response::builder()
.status(StatusCode::OK)
.header("content-type", "application/json")
.body(Body::from(body))
.unwrap_or_else(|_| Response::new(Body::from("{}")))
}
fn blocked_header_value(reason: &str) -> &'static str {
match reason {
"not_allowed" | "not_allowed_local" => "blocked-by-allowlist",
"denied" => "blocked-by-denylist",
"method_not_allowed" => "blocked-by-method-policy",
"mitm_required" => "blocked-by-mitm-required",
_ => "blocked-by-policy",
}
}
fn blocked_message(reason: &str) -> &'static str {
match reason {
"not_allowed" => "Codex blocked this request: domain not in allowlist.",
"not_allowed_local" => "Codex blocked this request: local addresses not allowed.",
"denied" => "Codex blocked this request: domain denied by policy.",
"method_not_allowed" => "Codex blocked this request: method not allowed in limited mode.",
"mitm_required" => "Codex blocked this request: MITM required for limited HTTPS.",
_ => "Codex blocked this request by network policy.",
}
}

View File

@@ -0,0 +1,192 @@
use crate::config::NetworkMode;
use crate::policy::normalize_host;
use crate::state::AppState;
use crate::state::BlockedRequest;
use anyhow::Result;
use anyhow::anyhow;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::io::copy_bidirectional;
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tracing::error;
use tracing::info;
use tracing::warn;
pub async fn run_socks5(state: Arc<AppState>, addr: SocketAddr) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
info!(addr = %addr, "SOCKS5 proxy listening");
match state.network_mode().await {
Ok(NetworkMode::Limited) => {
info!(
mode = "limited",
"SOCKS5 is blocked in limited mode; set mode=\"full\" to allow SOCKS5"
);
}
Ok(NetworkMode::Full) => {}
Err(err) => {
warn!(error = %err, "failed to read network mode");
}
}
loop {
let (stream, peer_addr) = listener.accept().await?;
let state = state.clone();
tokio::spawn(async move {
if let Err(err) = handle_socks5_client(stream, peer_addr, state).await {
warn!(error = %err, "SOCKS5 session ended with error");
}
});
}
}
async fn handle_socks5_client(
mut stream: TcpStream,
peer_addr: SocketAddr,
state: Arc<AppState>,
) -> Result<()> {
let mut header = [0u8; 2];
stream.read_exact(&mut header).await?;
if header[0] != 0x05 {
return Err(anyhow!("invalid SOCKS version"));
}
let nmethods = header[1] as usize;
let mut methods = vec![0u8; nmethods];
stream.read_exact(&mut methods).await?;
stream.write_all(&[0x05, 0x00]).await?;
let mut req_header = [0u8; 4];
stream.read_exact(&mut req_header).await?;
if req_header[0] != 0x05 {
return Err(anyhow!("invalid SOCKS request version"));
}
let cmd = req_header[1];
if cmd != 0x01 {
stream
.write_all(&[0x05, 0x07, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Err(anyhow!("unsupported SOCKS command"));
}
let atyp = req_header[3];
let host = match atyp {
0x01 => {
let mut addr = [0u8; 4];
stream.read_exact(&mut addr).await?;
format!("{}.{}.{}.{}", addr[0], addr[1], addr[2], addr[3])
}
0x03 => {
let mut len_buf = [0u8; 1];
stream.read_exact(&mut len_buf).await?;
let len = len_buf[0] as usize;
let mut domain = vec![0u8; len];
stream.read_exact(&mut domain).await?;
String::from_utf8_lossy(&domain).to_string()
}
0x04 => {
stream
.write_all(&[0x05, 0x08, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Err(anyhow!("ipv6 not supported"));
}
_ => {
stream
.write_all(&[0x05, 0x08, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Err(anyhow!("unknown address type"));
}
};
let mut port_buf = [0u8; 2];
stream.read_exact(&mut port_buf).await?;
let port = u16::from_be_bytes(port_buf);
let normalized_host = normalize_host(&host);
match state.network_mode().await {
Ok(NetworkMode::Limited) => {
let _ = state
.record_blocked(BlockedRequest::new(
normalized_host.clone(),
"method_not_allowed".to_string(),
Some(peer_addr.to_string()),
None,
Some(NetworkMode::Limited),
"socks5".to_string(),
))
.await;
warn!(
client = %peer_addr,
host = %normalized_host,
mode = "limited",
allowed_methods = "GET, HEAD, OPTIONS",
"SOCKS blocked by method policy"
);
stream
.write_all(&[0x05, 0x02, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Ok(());
}
Ok(NetworkMode::Full) => {}
Err(err) => {
error!(error = %err, "failed to evaluate method policy");
stream
.write_all(&[0x05, 0x01, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Ok(());
}
}
match state.host_blocked(&normalized_host).await {
Ok((true, reason)) => {
let _ = state
.record_blocked(BlockedRequest::new(
normalized_host.clone(),
reason.clone(),
Some(peer_addr.to_string()),
None,
None,
"socks5".to_string(),
))
.await;
warn!(client = %peer_addr, host = %normalized_host, reason = %reason, "SOCKS blocked");
stream
.write_all(&[0x05, 0x02, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Ok(());
}
Ok((false, _)) => {
info!(
client = %peer_addr,
host = %normalized_host,
port = port,
"SOCKS allowed"
);
}
Err(err) => {
error!(error = %err, "failed to evaluate host");
stream
.write_all(&[0x05, 0x01, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Ok(());
}
}
let target = format!("{host}:{port}");
let mut upstream = match TcpStream::connect(&target).await {
Ok(stream) => stream,
Err(err) => {
warn!(error = %err, "SOCKS connect failed");
stream
.write_all(&[0x05, 0x04, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
return Ok(());
}
};
stream
.write_all(&[0x05, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 0])
.await?;
let _ = copy_bidirectional(&mut stream, &mut upstream).await;
Ok(())
}

View File

@@ -0,0 +1,350 @@
use crate::config::Config;
use crate::config::MitmConfig;
use crate::config::NetworkMode;
use crate::mitm::MitmState;
use crate::policy::is_loopback_host;
use crate::policy::method_allowed;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use globset::GlobBuilder;
use globset::GlobSet;
use globset::GlobSetBuilder;
use hyper::Client;
use hyper::Method;
use hyper::client::HttpConnector;
use serde::Serialize;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use tokio::sync::RwLock;
use tracing::info;
use tracing::warn;
const MAX_BLOCKED_EVENTS: usize = 200;
#[derive(Clone, Debug, Serialize)]
pub struct BlockedRequest {
pub host: String,
pub reason: String,
pub client: Option<String>,
pub method: Option<String>,
pub mode: Option<NetworkMode>,
pub protocol: String,
pub timestamp: i64,
}
impl BlockedRequest {
pub fn new(
host: String,
reason: String,
client: Option<String>,
method: Option<String>,
mode: Option<NetworkMode>,
protocol: String,
) -> Self {
Self {
host,
reason,
client,
method,
mode,
protocol,
timestamp: unix_timestamp(),
}
}
}
#[derive(Clone)]
struct ConfigState {
cfg: Config,
mtime: Option<SystemTime>,
allow_set: GlobSet,
deny_set: GlobSet,
mitm: Option<Arc<MitmState>>,
cfg_path: PathBuf,
blocked: VecDeque<BlockedRequest>,
}
#[derive(Clone)]
pub struct AppState {
pub(crate) client: Client<HttpConnector>,
state: Arc<RwLock<ConfigState>>,
}
impl AppState {
pub async fn new(cfg_path: PathBuf) -> Result<Self> {
let cfg_state = build_config_state(cfg_path)?;
let client = Client::new();
Ok(Self {
client,
state: Arc::new(RwLock::new(cfg_state)),
})
}
pub async fn current_cfg(&self) -> Result<Config> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
Ok(guard.cfg.clone())
}
pub async fn current_patterns(&self) -> Result<(Vec<String>, Vec<String>)> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
Ok((
guard.cfg.network_proxy.policy.allowed_domains.clone(),
guard.cfg.network_proxy.policy.denied_domains.clone(),
))
}
pub async fn force_reload(&self) -> Result<()> {
let mut guard = self.state.write().await;
let previous_cfg = guard.cfg.clone();
let blocked = guard.blocked.clone();
let cfg_path = guard.cfg_path.clone();
match build_config_state(cfg_path.clone()) {
Ok(mut new_state) => {
log_policy_changes(&previous_cfg, &new_state.cfg);
new_state.blocked = blocked;
*guard = new_state;
info!(path = %cfg_path.display(), "reloaded config");
Ok(())
}
Err(err) => {
warn!(error = %err, path = %cfg_path.display(), "failed to reload config; keeping previous config");
Err(err)
}
}
}
pub async fn host_blocked(&self, host: &str) -> Result<(bool, String)> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
if guard.deny_set.is_match(host) {
return Ok((true, "denied".to_string()));
}
let is_loopback = is_loopback_host(host);
if is_loopback
&& !guard.cfg.network_proxy.policy.allow_local_binding
&& !guard.allow_set.is_match(host)
{
return Ok((true, "not_allowed_local".to_string()));
}
if guard.cfg.network_proxy.policy.allowed_domains.is_empty()
|| !guard.allow_set.is_match(host)
{
return Ok((true, "not_allowed".to_string()));
}
Ok((false, String::new()))
}
pub async fn record_blocked(&self, entry: BlockedRequest) -> Result<()> {
self.reload_if_needed().await?;
let mut guard = self.state.write().await;
guard.blocked.push_back(entry);
while guard.blocked.len() > MAX_BLOCKED_EVENTS {
guard.blocked.pop_front();
}
Ok(())
}
pub async fn drain_blocked(&self) -> Result<Vec<BlockedRequest>> {
self.reload_if_needed().await?;
let mut guard = self.state.write().await;
let blocked = std::mem::take(&mut guard.blocked);
Ok(blocked.into_iter().collect())
}
pub async fn is_unix_socket_allowed(&self, path: &str) -> Result<bool> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
Ok(guard
.cfg
.network_proxy
.policy
.allow_unix_sockets
.iter()
.any(|p| p == path))
}
pub async fn method_allowed(&self, method: &Method) -> Result<bool> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
Ok(method_allowed(guard.cfg.network_proxy.mode, method))
}
pub async fn network_mode(&self) -> Result<NetworkMode> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
Ok(guard.cfg.network_proxy.mode)
}
pub async fn set_network_mode(&self, mode: NetworkMode) -> Result<()> {
self.reload_if_needed().await?;
let mut guard = self.state.write().await;
guard.cfg.network_proxy.mode = mode;
info!(mode = ?mode, "updated network mode");
Ok(())
}
pub async fn mitm_state(&self) -> Result<Option<Arc<MitmState>>> {
self.reload_if_needed().await?;
let guard = self.state.read().await;
Ok(guard.mitm.clone())
}
async fn reload_if_needed(&self) -> Result<()> {
let needs_reload = {
let guard = self.state.read().await;
if !guard.cfg_path.exists() {
true
} else {
let metadata = std::fs::metadata(&guard.cfg_path).ok();
match (metadata.and_then(|m| m.modified().ok()), guard.mtime) {
(Some(new_mtime), Some(old_mtime)) => new_mtime > old_mtime,
(Some(_), None) => true,
_ => false,
}
}
};
if !needs_reload {
return Ok(());
}
self.force_reload().await
}
}
fn build_config_state(cfg_path: PathBuf) -> Result<ConfigState> {
let mut cfg = if cfg_path.exists() {
load_config_from_path(&cfg_path).with_context(|| {
format!(
"failed to load config from {}",
cfg_path.as_path().display()
)
})?
} else {
Config::default()
};
resolve_mitm_paths(&mut cfg, &cfg_path);
let mtime = cfg_path.metadata().and_then(|m| m.modified()).ok();
let deny_set = compile_globset(&cfg.network_proxy.policy.denied_domains)?;
let allow_set = compile_globset(&cfg.network_proxy.policy.allowed_domains)?;
let mitm = if cfg.network_proxy.mitm.enabled {
build_mitm_state(&cfg.network_proxy.mitm)?
} else {
None
};
Ok(ConfigState {
cfg,
mtime,
allow_set,
deny_set,
mitm,
cfg_path,
blocked: VecDeque::new(),
})
}
fn resolve_mitm_paths(cfg: &mut Config, cfg_path: &Path) {
let base = cfg_path.parent().unwrap_or_else(|| Path::new("."));
if cfg.network_proxy.mitm.ca_cert_path.is_relative() {
cfg.network_proxy.mitm.ca_cert_path = base.join(&cfg.network_proxy.mitm.ca_cert_path);
}
if cfg.network_proxy.mitm.ca_key_path.is_relative() {
cfg.network_proxy.mitm.ca_key_path = base.join(&cfg.network_proxy.mitm.ca_key_path);
}
}
fn build_mitm_state(_cfg: &MitmConfig) -> Result<Option<Arc<MitmState>>> {
#[cfg(feature = "mitm")]
{
return Ok(Some(Arc::new(MitmState::new(_cfg)?)));
}
#[cfg(not(feature = "mitm"))]
{
warn!("MITM enabled in config but binary built without mitm feature");
Ok(None)
}
}
fn compile_globset(patterns: &[String]) -> Result<GlobSet> {
let mut builder = GlobSetBuilder::new();
let mut seen = HashSet::new();
for pattern in patterns {
let mut expanded = Vec::with_capacity(2);
expanded.push(pattern.as_str());
if let Some(apex) = pattern.strip_prefix("*.") {
expanded.push(apex);
}
for candidate in expanded {
if !seen.insert(candidate.to_string()) {
continue;
}
let glob = GlobBuilder::new(candidate)
.case_insensitive(true)
.build()
.with_context(|| format!("invalid glob pattern: {candidate}"))?;
builder.add(glob);
}
}
Ok(builder.build()?)
}
fn log_policy_changes(previous: &Config, next: &Config) {
log_domain_list_changes(
"allowlist",
&previous.network_proxy.policy.allowed_domains,
&next.network_proxy.policy.allowed_domains,
);
log_domain_list_changes(
"denylist",
&previous.network_proxy.policy.denied_domains,
&next.network_proxy.policy.denied_domains,
);
}
fn log_domain_list_changes(list_name: &str, previous: &[String], next: &[String]) {
let previous_set: HashSet<String> = previous
.iter()
.map(|entry| entry.to_ascii_lowercase())
.collect();
let next_set: HashSet<String> = next
.iter()
.map(|entry| entry.to_ascii_lowercase())
.collect();
let mut seen_next = HashSet::new();
for entry in next {
let key = entry.to_ascii_lowercase();
if seen_next.insert(key.clone()) && !previous_set.contains(&key) {
info!(list = list_name, entry = %entry, "config entry added");
}
}
let mut seen_previous = HashSet::new();
for entry in previous {
let key = entry.to_ascii_lowercase();
if seen_previous.insert(key.clone()) && !next_set.contains(&key) {
info!(list = list_name, entry = %entry, "config entry removed");
}
}
}
fn unix_timestamp() -> i64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|duration| duration.as_secs() as i64)
.unwrap_or(0)
}
fn load_config_from_path(path: &Path) -> Result<Config> {
let raw = std::fs::read_to_string(path)
.with_context(|| format!("unable to read config file {}", path.display()))?;
toml::from_str(&raw).map_err(|err| anyhow!("unable to parse config: {err}"))
}

View File

@@ -9,6 +9,10 @@ use serde::Deserialize;
use serde::Serialize;
use ts_rs::TS;
fn is_false(value: &bool) -> bool {
!*value
}
/// Proposed execpolicy change to allow commands starting with this prefix.
///
/// The `command` tokens form the prefix that would be added as an execpolicy
@@ -57,6 +61,8 @@ pub struct ExecApprovalRequestEvent {
#[ts(optional)]
pub proposed_execpolicy_amendment: Option<ExecPolicyAmendment>,
pub parsed_cmd: Vec<ParsedCommand>,
#[serde(default, skip_serializing_if = "is_false")]
pub network_preflight_only: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)]

View File

@@ -167,6 +167,14 @@ pub enum Op {
decision: ElicitationAction,
},
/// Cache a network approval decision for this session.
NetworkApprovalCache {
/// The host that was approved.
host: String,
/// The user's decision in response to the request.
decision: ReviewDecision,
},
/// Append an entry to the persistent cross-session message history.
///
/// Note the entry is not guaranteed to be logged if the user has

View File

@@ -22,11 +22,15 @@ use crate::update_action::UpdateAction;
use codex_ansi_escape::ansi_escape_line;
use codex_core::AuthManager;
use codex_core::ConversationManager;
use codex_core::config;
use codex_core::config::Config;
use codex_core::config::edit::ConfigEdit;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::types::NetworkProxyConfig;
use codex_core::default_client::create_client;
#[cfg(target_os = "windows")]
use codex_core::features::Feature;
use codex_core::network_proxy;
use codex_core::openai_models::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::openai_models::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::openai_models::models_manager::ModelsManager;
@@ -34,6 +38,7 @@ use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
use codex_core::protocol::Op;
use codex_core::protocol::ReviewDecision;
use codex_core::protocol::SessionSource;
use codex_core::protocol::SkillErrorInfo;
use codex_core::protocol::TokenUsage;
@@ -52,6 +57,8 @@ use ratatui::text::Line;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
@@ -72,6 +79,10 @@ pub struct AppExitInfo {
pub update_action: Option<UpdateAction>,
}
fn codex_config_path() -> Result<PathBuf> {
config::default_config_path().wrap_err("failed to resolve Codex config path")
}
fn session_summary(
token_usage: TokenUsage,
conversation_id: Option<ConversationId>,
@@ -304,6 +315,11 @@ pub(crate) struct App {
// One-shot suppression of the next world-writable scan after user confirmation.
skip_world_writable_scan_once: bool,
network_proxy_pending: HashSet<String>,
network_proxy_session_restore: HashMap<String, network_proxy::DomainState>,
unix_socket_pending: HashSet<String>,
unix_socket_session_restore: HashSet<String>,
}
impl App {
@@ -434,8 +450,19 @@ impl App {
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
network_proxy_pending: HashSet::new(),
network_proxy_session_restore: HashMap::new(),
unix_socket_pending: HashSet::new(),
unix_socket_session_restore: HashSet::new(),
};
if app.config.network_proxy.enabled && app.config.sandbox_policy.has_full_network_access() {
Self::spawn_network_proxy_poller(
app.config.network_proxy.clone(),
app.app_event_tx.clone(),
);
}
// On startup, if Agent mode (workspace-write) or ReadOnly is active, warn about world-writable dirs on Windows.
#[cfg(target_os = "windows")]
{
@@ -485,6 +512,9 @@ impl App {
app.handle_tui_event(tui, event).await?
}
} {}
if let Err(err) = app.restore_network_proxy_approvals().await {
tracing::error!(error = %err, "failed to restore network proxy approvals");
}
tui.terminal.clear()?;
Ok(AppExitInfo {
token_usage: app.token_usage(),
@@ -544,6 +574,7 @@ impl App {
.await;
match event {
AppEvent::NewSession => {
let session_allow = self.chat_widget.network_session_allow();
let summary = session_summary(
self.chat_widget.token_usage(),
self.chat_widget.conversation_id(),
@@ -563,6 +594,7 @@ impl App {
model_family: model_family.clone(),
};
self.chat_widget = ChatWidget::new(init, self.server.clone());
self.chat_widget.set_network_session_allow(session_allow);
self.current_model = model_family.get_model_slug().to_string();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> = vec![summary.usage_line.clone().into()];
@@ -575,6 +607,7 @@ impl App {
tui.frame_requester().schedule_frame();
}
AppEvent::OpenResumePicker => {
let session_allow = self.chat_widget.network_session_allow();
match crate::resume_picker::run_resume_picker(
tui,
&self.config.codex_home,
@@ -617,6 +650,7 @@ impl App {
resumed.conversation,
resumed.session_configured,
);
self.chat_widget.set_network_session_allow(session_allow);
self.current_model = model_family.get_model_slug().to_string();
if let Some(summary) = summary {
let mut lines: Vec<Line<'static>> =
@@ -1088,6 +1122,78 @@ impl App {
"E X E C".to_string(),
));
}
ApprovalRequest::Network { request } => {
let mut lines = Vec::new();
if !request.host.trim().is_empty() {
lines.push(Line::from(vec![
"Host: ".into(),
request.host.clone().bold(),
]));
}
if !request.reason.trim().is_empty() {
lines.push(Line::from(vec![
"Reason: ".into(),
request.reason.clone().into(),
]));
}
if let Some(method) = request.method.as_ref().filter(|value| !value.is_empty())
{
lines.push(Line::from(vec![
"Method: ".into(),
method.to_string().into(),
]));
}
if cfg!(debug_assertions) {
if !request.protocol.trim().is_empty() {
lines.push(Line::from(vec![
"Protocol: ".into(),
request.protocol.clone().into(),
]));
}
if let Some(mode) = request.mode {
let label = match mode {
codex_core::config::types::NetworkProxyMode::Limited => "limited",
codex_core::config::types::NetworkProxyMode::Full => "full",
};
lines.push(Line::from(vec!["Mode: ".into(), label.into()]));
}
}
if let Some(client) = request.client.as_ref().filter(|value| !value.is_empty())
{
lines.push(Line::from(vec![
"Client: ".into(),
client.to_string().dim(),
]));
}
let _ = tui.enter_alt_screen();
self.overlay = Some(Overlay::new_static_with_lines(lines, "N E T".to_string()));
}
ApprovalRequest::UnixSocket { request } => {
let mut lines = Vec::new();
if !request.label.trim().is_empty() {
lines.push(Line::from(vec![
"Resource: ".into(),
request.label.clone().bold(),
]));
}
if !request.socket_path.trim().is_empty() {
lines.push(Line::from(vec![
"Socket: ".into(),
request.socket_path.clone().dim(),
]));
}
if !request.allow_entry.trim().is_empty() {
lines.push(Line::from(vec![
"Allow entry: ".into(),
request.allow_entry.clone().dim(),
]));
}
let _ = tui.enter_alt_screen();
self.overlay = Some(Overlay::new_static_with_lines(
lines,
"S O C K E T".to_string(),
));
}
ApprovalRequest::McpElicitation {
server_name,
message,
@@ -1106,6 +1212,234 @@ impl App {
));
}
},
AppEvent::NetworkProxyApprovalRequest(request) => {
if !self.config.network_proxy.enabled {
return Ok(true);
}
let host = request.host.trim().to_string();
if host.is_empty() || self.network_proxy_pending.contains(&host) {
return Ok(true);
}
let reason = request.reason.trim();
if reason.eq_ignore_ascii_case("not_allowed")
&& let Ok(config_path) = codex_config_path()
{
match network_proxy::preflight_host(&config_path, &host) {
Ok(None) => {
self.chat_widget.resume_pending_exec_approval();
return Ok(true);
}
Ok(Some(_)) => {}
Err(err) => {
tracing::debug!(
error = %err,
"network proxy preflight host check failed"
);
}
}
}
if reason.eq_ignore_ascii_case("denied") {
self.chat_widget.add_error_message(format!(
"Network access to {host} is denied by the denylist."
));
return Ok(true);
}
if self.chat_widget.is_network_session_allowed(&host) {
self.chat_widget.resume_pending_exec_approval();
return Ok(true);
}
self.network_proxy_pending.insert(host);
self.chat_widget.on_network_approval_request(request);
}
AppEvent::UnixSocketApprovalRequest(request) => {
if !self.config.network_proxy.enabled {
return Ok(true);
}
let socket_path = request.socket_path.trim().to_string();
if socket_path.is_empty() || self.unix_socket_pending.contains(&socket_path) {
return Ok(true);
}
if let Ok(config_path) = codex_config_path() {
let socket = std::path::Path::new(&socket_path);
match network_proxy::unix_socket_allowed(&config_path, socket) {
Ok(true) => {
self.chat_widget.resume_pending_exec_approval();
return Ok(true);
}
Ok(false) => {}
Err(err) => {
tracing::debug!(error = %err, "unix socket preflight check failed");
}
}
}
self.unix_socket_pending.insert(socket_path);
self.chat_widget.on_unix_socket_approval_request(request);
}
AppEvent::NetworkProxyDecision { host, decision } => {
let host = host.trim().to_string();
if host.is_empty() {
return Ok(true);
}
self.network_proxy_pending.remove(&host);
let client = create_client();
let admin_url = self.config.network_proxy.admin_url.clone();
let config_path = codex_config_path()?;
let mut reload_needed = false;
let mut should_resume_exec = matches!(
decision,
crate::app_event::NetworkProxyDecision::AllowSession
| crate::app_event::NetworkProxyDecision::AllowAlways
);
match decision {
crate::app_event::NetworkProxyDecision::AllowSession => {
let original = match network_proxy::domain_state(&config_path, &host) {
Ok(state) => Some(state),
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to read network policy for {host}: {err}"
));
should_resume_exec = false;
None
}
};
let allow_state = network_proxy::DomainState {
allowed: true,
denied: false,
};
if let Some(original) = original {
match network_proxy::set_domain_state(&config_path, &host, allow_state)
{
Ok(changed) => {
reload_needed |= changed;
self.network_proxy_session_restore
.entry(host.clone())
.or_insert(original);
self.chat_widget.add_network_session_allow(host.clone());
self.chat_widget.submit_op(Op::NetworkApprovalCache {
host: host.clone(),
decision: ReviewDecision::ApprovedForSession,
});
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to allow {host} for this session: {err}"
));
should_resume_exec = false;
}
}
}
}
crate::app_event::NetworkProxyDecision::AllowAlways => {
let allow_state = network_proxy::DomainState {
allowed: true,
denied: false,
};
match network_proxy::set_domain_state(&config_path, &host, allow_state) {
Ok(changed) => {
reload_needed |= changed;
self.chat_widget.add_network_session_allow(host.clone());
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to add {host} to allowlist: {err}"
));
}
}
}
crate::app_event::NetworkProxyDecision::Deny => {
let deny_state = network_proxy::DomainState {
allowed: false,
denied: true,
};
match network_proxy::set_domain_state(&config_path, &host, deny_state) {
Ok(changed) => {
reload_needed |= changed;
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to add {host} to denylist: {err}"
));
}
}
}
}
if reload_needed && let Err(err) = network_proxy::reload(&client, &admin_url).await
{
self.chat_widget.add_error_message(format!(
"Failed to reload network proxy after policy update: {err}"
));
}
if should_resume_exec {
self.chat_widget.resume_pending_exec_approval();
} else {
self.chat_widget.reject_pending_exec_approval();
}
}
AppEvent::UnixSocketDecision {
socket_path,
allow_entry,
decision,
} => {
let socket_path = socket_path.trim().to_string();
if socket_path.is_empty() {
return Ok(true);
}
self.unix_socket_pending.remove(&socket_path);
let config_path = codex_config_path()?;
let mut should_resume_exec = matches!(
decision,
crate::app_event::UnixSocketDecision::AllowSession
| crate::app_event::UnixSocketDecision::AllowAlways
);
match decision {
crate::app_event::UnixSocketDecision::AllowSession => {
match network_proxy::add_allowed_unix_socket(&config_path, &socket_path) {
Ok(changed) => {
if changed {
self.unix_socket_session_restore.insert(socket_path.clone());
}
self.chat_widget
.add_unix_socket_session_allow(socket_path.clone());
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to allow Unix socket {socket_path} for this session: {err}"
));
should_resume_exec = false;
}
}
}
crate::app_event::UnixSocketDecision::AllowAlways => {
if allow_entry.trim().is_empty() {
self.chat_widget.add_error_message(
"Failed to allow Unix socket permanently: missing allow entry"
.to_string(),
);
should_resume_exec = false;
} else {
match network_proxy::add_allowed_unix_socket(&config_path, &allow_entry)
{
Ok(_) => {
self.chat_widget
.add_unix_socket_session_allow(socket_path.clone());
}
Err(err) => {
self.chat_widget.add_error_message(format!(
"Failed to add Unix socket allow entry {allow_entry}: {err}"
));
should_resume_exec = false;
}
}
}
}
crate::app_event::UnixSocketDecision::Deny => {}
}
if should_resume_exec {
self.chat_widget.resume_pending_exec_approval();
} else {
self.chat_widget.reject_pending_exec_approval();
}
}
}
Ok(true)
}
@@ -1132,6 +1466,49 @@ impl App {
self.chat_widget.token_usage()
}
async fn restore_network_proxy_approvals(&mut self) -> Result<()> {
if self.network_proxy_session_restore.is_empty()
&& self.unix_socket_session_restore.is_empty()
{
return Ok(());
}
let config_path = codex_config_path()?;
let client = create_client();
let admin_url = self.config.network_proxy.admin_url.clone();
let mut reload_needed = false;
let mut restores: HashMap<String, network_proxy::DomainState> = HashMap::new();
for (host, state) in self.network_proxy_session_restore.drain() {
restores.insert(host, state);
}
for (host, state) in restores {
match network_proxy::set_domain_state(&config_path, &host, state) {
Ok(changed) => {
reload_needed |= changed;
}
Err(err) => {
tracing::error!(
error = %err,
host = host,
"failed to restore network policy"
);
}
}
}
if reload_needed {
network_proxy::reload(&client, &admin_url)
.await
.map_err(|err| color_eyre::eyre::eyre!(err))?;
}
for socket_path in self.unix_socket_session_restore.drain() {
let _ = network_proxy::remove_allowed_unix_socket(&config_path, &socket_path);
}
Ok(())
}
fn on_update_reasoning_effort(&mut self, effort: Option<ReasoningEffortConfig>) {
self.chat_widget.set_reasoning_effort(effort);
self.config.model_reasoning_effort = effort;
@@ -1197,6 +1574,35 @@ impl App {
};
}
fn spawn_network_proxy_poller(network_proxy: NetworkProxyConfig, tx: AppEventSender) {
if !network_proxy.enabled {
return;
}
let poll_interval_ms = if network_proxy.poll_interval_ms > 0 {
network_proxy.poll_interval_ms
} else {
1000
};
let poll_interval = Duration::from_secs_f64(poll_interval_ms as f64 / 1000.0);
let admin_url = network_proxy.admin_url;
tokio::spawn(async move {
let client = create_client();
loop {
match network_proxy::fetch_blocked(&client, &admin_url).await {
Ok(blocked) => {
for request in blocked {
tx.send(AppEvent::NetworkProxyApprovalRequest(request));
}
}
Err(err) => {
tracing::debug!(error = %err, "network proxy poll failed");
}
}
tokio::time::sleep(poll_interval).await;
}
});
}
#[cfg(target_os = "windows")]
fn spawn_world_writable_scan(
cwd: PathBuf,
@@ -1283,6 +1689,10 @@ mod tests {
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
network_proxy_pending: HashSet::new(),
network_proxy_session_restore: HashMap::new(),
unix_socket_pending: HashSet::new(),
unix_socket_session_restore: HashSet::new(),
}
}
@@ -1323,6 +1733,10 @@ mod tests {
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
network_proxy_pending: HashSet::new(),
network_proxy_session_restore: HashMap::new(),
unix_socket_pending: HashSet::new(),
unix_socket_session_restore: HashSet::new(),
},
rx,
op_rx,

View File

@@ -1,6 +1,7 @@
use std::path::PathBuf;
use codex_common::approval_presets::ApprovalPreset;
use codex_core::network_proxy::NetworkProxyBlockedRequest;
use codex_core::protocol::ConversationPathResponseEvent;
use codex_core::protocol::Event;
use codex_core::protocol::RateLimitSnapshot;
@@ -171,6 +172,25 @@ pub(crate) enum AppEvent {
/// Open the approval popup.
FullScreenApprovalRequest(ApprovalRequest),
/// Prompt for a blocked network request from the proxy.
NetworkProxyApprovalRequest(NetworkProxyBlockedRequest),
/// Prompt to allow a Unix socket path inside the sandbox (macOS only).
UnixSocketApprovalRequest(UnixSocketApprovalRequest),
/// User decision for a blocked network request.
NetworkProxyDecision {
host: String,
decision: NetworkProxyDecision,
},
/// User decision for a Unix socket approval request.
UnixSocketDecision {
socket_path: String,
allow_entry: String,
decision: UnixSocketDecision,
},
/// Open the feedback note entry overlay after the user selects a category.
OpenFeedbackNote {
category: FeedbackCategory,
@@ -183,6 +203,27 @@ pub(crate) enum AppEvent {
},
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum NetworkProxyDecision {
AllowSession,
AllowAlways,
Deny,
}
#[derive(Debug, Clone)]
pub(crate) struct UnixSocketApprovalRequest {
pub label: String,
pub socket_path: String,
pub allow_entry: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum UnixSocketDecision {
AllowSession,
AllowAlways,
Deny,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum FeedbackCategory {
BadResult,

View File

@@ -2,6 +2,9 @@ use std::collections::HashMap;
use std::path::PathBuf;
use crate::app_event::AppEvent;
use crate::app_event::NetworkProxyDecision;
use crate::app_event::UnixSocketApprovalRequest;
use crate::app_event::UnixSocketDecision;
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::BottomPaneView;
use crate::bottom_pane::CancellationEvent;
@@ -16,8 +19,10 @@ use crate::key_hint::KeyBinding;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::ColumnRenderable;
use crate::render::renderable::Renderable;
use codex_core::config::types::NetworkProxyMode;
use codex_core::features::Feature;
use codex_core::features::Features;
use codex_core::network_proxy::NetworkProxyBlockedRequest;
use codex_core::protocol::ElicitationAction;
use codex_core::protocol::ExecPolicyAmendment;
use codex_core::protocol::FileChange;
@@ -51,6 +56,12 @@ pub(crate) enum ApprovalRequest {
cwd: PathBuf,
changes: HashMap<PathBuf, FileChange>,
},
Network {
request: NetworkProxyBlockedRequest,
},
UnixSocket {
request: UnixSocketApprovalRequest,
},
McpElicitation {
server_name: String,
request_id: RequestId,
@@ -119,6 +130,13 @@ impl ApprovalOverlay {
patch_options(),
"Would you like to make the following edits?".to_string(),
),
ApprovalVariant::Network { preflight_only, .. } => (
network_options(*preflight_only),
"Allow network access to this domain?".to_string(),
),
ApprovalVariant::UnixSocket { label, .. } => {
(unix_socket_options(), format!("Allow access to {label}?"))
}
ApprovalVariant::McpElicitation { server_name, .. } => (
elicitation_options(),
format!("{server_name} needs your approval."),
@@ -174,6 +192,19 @@ impl ApprovalOverlay {
(ApprovalVariant::ApplyPatch { id, .. }, ApprovalDecision::Review(decision)) => {
self.handle_patch_decision(id, decision.clone());
}
(ApprovalVariant::Network { host, .. }, ApprovalDecision::Network(decision)) => {
self.handle_network_decision(host, *decision);
}
(
ApprovalVariant::UnixSocket {
socket_path,
allow_entry,
..
},
ApprovalDecision::UnixSocket(decision),
) => {
self.handle_unix_socket_decision(socket_path, allow_entry, *decision);
}
(
ApprovalVariant::McpElicitation {
server_name,
@@ -207,6 +238,31 @@ impl ApprovalOverlay {
}));
}
fn handle_network_decision(&self, host: &str, decision: NetworkProxyDecision) {
let cell = history_cell::new_network_approval_decision_cell(host.to_string(), decision);
self.app_event_tx.send(AppEvent::InsertHistoryCell(cell));
self.app_event_tx.send(AppEvent::NetworkProxyDecision {
host: host.to_string(),
decision,
});
}
fn handle_unix_socket_decision(
&self,
socket_path: &str,
allow_entry: &str,
decision: UnixSocketDecision,
) {
let cell =
history_cell::new_unix_socket_approval_decision_cell(socket_path.to_string(), decision);
self.app_event_tx.send(AppEvent::InsertHistoryCell(cell));
self.app_event_tx.send(AppEvent::UnixSocketDecision {
socket_path: socket_path.to_string(),
allow_entry: allow_entry.to_string(),
decision,
});
}
fn handle_elicitation_decision(
&self,
server_name: &str,
@@ -286,6 +342,20 @@ impl BottomPaneView for ApprovalOverlay {
ApprovalVariant::ApplyPatch { id, .. } => {
self.handle_patch_decision(id, ReviewDecision::Abort);
}
ApprovalVariant::Network { host, .. } => {
self.handle_network_decision(host, NetworkProxyDecision::Deny);
}
ApprovalVariant::UnixSocket {
socket_path,
allow_entry,
..
} => {
self.handle_unix_socket_decision(
socket_path,
allow_entry,
UnixSocketDecision::Deny,
);
}
ApprovalVariant::McpElicitation {
server_name,
request_id,
@@ -386,6 +456,87 @@ impl From<ApprovalRequest> for ApprovalRequestState {
header: Box::new(ColumnRenderable::with(header)),
}
}
ApprovalRequest::Network { request } => {
let mut header: Vec<Line<'static>> = Vec::new();
let host = request.host.trim().to_string();
if !host.is_empty() {
header.push(Line::from(vec!["Host: ".into(), host.clone().bold()]));
}
let reason = request.reason.trim().to_string();
if !reason.is_empty() {
let reason_label = network_reason_label(&reason);
header.push(Line::from(vec!["Reason: ".into(), reason_label.into()]));
if let Some(hint) = network_reason_hint(&reason) {
header.push(Line::from(vec!["Hint: ".into(), hint.dim()]));
}
}
if let Some(method) = request
.method
.as_ref()
.filter(|value| !value.is_empty())
.cloned()
{
header.push(Line::from(vec!["Method: ".into(), method.into()]));
}
if cfg!(debug_assertions) {
let protocol = request.protocol.trim().to_string();
if !protocol.is_empty() {
header.push(Line::from(vec!["Protocol: ".into(), protocol.into()]));
}
if let Some(mode) = request.mode {
let label = match mode {
NetworkProxyMode::Limited => "limited",
NetworkProxyMode::Full => "full",
};
header.push(Line::from(vec!["Mode: ".into(), label.into()]));
}
}
if let Some(client) = request
.client
.as_ref()
.filter(|value| !value.is_empty())
.cloned()
{
header.push(Line::from(vec!["Client: ".into(), client.dim()]));
}
let preflight_only = request.protocol.trim().eq_ignore_ascii_case("preflight");
Self {
variant: ApprovalVariant::Network {
host,
preflight_only,
},
header: Box::new(Paragraph::new(header).wrap(Wrap { trim: false })),
}
}
ApprovalRequest::UnixSocket { request } => {
let mut header: Vec<Line<'static>> = Vec::new();
if !request.label.trim().is_empty() {
header.push(Line::from(vec![
"Resource: ".into(),
request.label.clone().bold(),
]));
}
if !request.socket_path.trim().is_empty() {
header.push(Line::from(vec![
"Socket: ".into(),
request.socket_path.clone().dim(),
]));
}
if !request.allow_entry.trim().is_empty() {
header.push(Line::from(vec![
"Allow entry: ".into(),
request.allow_entry.clone().dim(),
]));
}
Self {
variant: ApprovalVariant::UnixSocket {
socket_path: request.socket_path,
allow_entry: request.allow_entry,
label: request.label,
},
header: Box::new(Paragraph::new(header).wrap(Wrap { trim: false })),
}
}
ApprovalRequest::McpElicitation {
server_name,
request_id,
@@ -409,6 +560,26 @@ impl From<ApprovalRequest> for ApprovalRequestState {
}
}
fn network_reason_label(reason: &str) -> String {
match reason {
"not_allowed" => "Domain not in allowlist".to_string(),
"not_allowed_local" => "Loopback blocked by policy".to_string(),
"denied" => "Domain denied by denylist".to_string(),
"method_not_allowed" => "Method blocked by network mode".to_string(),
"mitm_required" => "MITM required for limited HTTPS".to_string(),
_ => reason.to_string(),
}
}
fn network_reason_hint(reason: &str) -> Option<&'static str> {
match reason {
"not_allowed_local" => Some("Allow loopback or add the host to the allowlist."),
"method_not_allowed" => Some("Switch to full mode or enable MITM to allow this method."),
"mitm_required" => Some("Enable MITM or switch to full mode for HTTPS tunneling."),
_ => None,
}
}
#[derive(Clone)]
enum ApprovalVariant {
Exec {
@@ -419,6 +590,15 @@ enum ApprovalVariant {
ApplyPatch {
id: String,
},
Network {
host: String,
preflight_only: bool,
},
UnixSocket {
socket_path: String,
allow_entry: String,
label: String,
},
McpElicitation {
server_name: String,
request_id: RequestId,
@@ -428,6 +608,8 @@ enum ApprovalVariant {
#[derive(Clone)]
enum ApprovalDecision {
Review(ReviewDecision),
Network(NetworkProxyDecision),
UnixSocket(UnixSocketDecision),
McpElicitation(ElicitationAction),
}
@@ -503,6 +685,62 @@ fn patch_options() -> Vec<ApprovalOption> {
]
}
fn network_options(preflight_only: bool) -> Vec<ApprovalOption> {
let mut options = Vec::new();
let mut allow_session = ApprovalOption {
label: if preflight_only {
"Allow for session".to_string()
} else {
"Allow".to_string()
},
decision: ApprovalDecision::Network(NetworkProxyDecision::AllowSession),
display_shortcut: None,
additional_shortcuts: vec![key_hint::plain(KeyCode::Char('s'))],
};
if preflight_only {
allow_session
.additional_shortcuts
.push(key_hint::plain(KeyCode::Char('y')));
}
options.push(allow_session);
options.push(ApprovalOption {
label: "Allow always (add to allowlist)".to_string(),
decision: ApprovalDecision::Network(NetworkProxyDecision::AllowAlways),
display_shortcut: None,
additional_shortcuts: vec![key_hint::plain(KeyCode::Char('a'))],
});
options.push(ApprovalOption {
label: "Deny (add to denylist)".to_string(),
decision: ApprovalDecision::Network(NetworkProxyDecision::Deny),
display_shortcut: Some(key_hint::plain(KeyCode::Esc)),
additional_shortcuts: vec![key_hint::plain(KeyCode::Char('n'))],
});
options
}
fn unix_socket_options() -> Vec<ApprovalOption> {
vec![
ApprovalOption {
label: "Allow for session".to_string(),
decision: ApprovalDecision::UnixSocket(UnixSocketDecision::AllowSession),
display_shortcut: None,
additional_shortcuts: vec![key_hint::plain(KeyCode::Char('s'))],
},
ApprovalOption {
label: "Allow always (add to allowlist)".to_string(),
decision: ApprovalDecision::UnixSocket(UnixSocketDecision::AllowAlways),
display_shortcut: None,
additional_shortcuts: vec![key_hint::plain(KeyCode::Char('a'))],
},
ApprovalOption {
label: "Deny".to_string(),
decision: ApprovalDecision::UnixSocket(UnixSocketDecision::Deny),
display_shortcut: Some(key_hint::plain(KeyCode::Esc)),
additional_shortcuts: vec![key_hint::plain(KeyCode::Char('n'))],
},
]
}
fn elicitation_options() -> Vec<ApprovalOption> {
vec![
ApprovalOption {

View File

@@ -13,6 +13,8 @@ use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::git_info::current_branch_name;
use codex_core::git_info::local_git_branches;
use codex_core::network_proxy;
use codex_core::network_proxy::NetworkProxyBlockedRequest;
use codex_core::openai_models::model_family::ModelFamily;
use codex_core::openai_models::models_manager::ModelsManager;
use codex_core::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
@@ -45,6 +47,7 @@ use codex_core::protocol::McpToolCallEndEvent;
use codex_core::protocol::Op;
use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::RateLimitSnapshot;
use codex_core::protocol::ReviewDecision;
use codex_core::protocol::ReviewRequest;
use codex_core::protocol::ReviewTarget;
use codex_core::protocol::SkillsListEntry;
@@ -85,6 +88,7 @@ use tokio::task::JoinHandle;
use tracing::debug;
use crate::app_event::AppEvent;
use crate::app_event::UnixSocketApprovalRequest;
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::ApprovalRequest;
use crate::bottom_pane::BetaFeatureItem;
@@ -353,6 +357,9 @@ pub(crate) struct ChatWidget {
pre_review_token_info: Option<Option<TokenUsageInfo>>,
// Whether to add a final message separator after the last message
needs_final_message_separator: bool,
pending_exec_approval: Option<PendingExecApproval>,
network_proxy_session_allow: HashSet<String>,
unix_socket_session_allow: HashSet<String>,
last_rendered_width: std::cell::Cell<Option<usize>>,
// Feedback sink for /feedback
@@ -361,6 +368,17 @@ pub(crate) struct ChatWidget {
current_rollout_path: Option<PathBuf>,
}
enum PendingExecBlock {
NetworkHost(String),
UnixSocket(String),
}
struct PendingExecApproval {
id: String,
event: ExecApprovalRequestEvent,
block: PendingExecBlock,
}
struct UserMessage {
text: String,
image_paths: Vec<PathBuf>,
@@ -846,6 +864,22 @@ impl ChatWidget {
);
}
pub(crate) fn on_network_approval_request(&mut self, request: NetworkProxyBlockedRequest) {
let request2 = request.clone();
self.defer_or_handle(
|q| q.push_network_approval(request),
|s| s.handle_network_approval_request(request2),
);
}
pub(crate) fn on_unix_socket_approval_request(&mut self, request: UnixSocketApprovalRequest) {
let request2 = request.clone();
self.defer_or_handle(
|q| q.push_unix_socket_approval(request),
|s| s.handle_unix_socket_approval_request(request2),
);
}
fn on_elicitation_request(&mut self, ev: ElicitationRequestEvent) {
let ev2 = ev.clone();
self.defer_or_handle(
@@ -1188,6 +1222,47 @@ impl ChatWidget {
}
pub(crate) fn handle_exec_approval_now(&mut self, id: String, ev: ExecApprovalRequestEvent) {
if self.pending_exec_approval.is_none()
&& let Some(request) = self.preflight_unix_socket_request(&ev.command)
{
self.pending_exec_approval = Some(PendingExecApproval {
id: id.clone(),
event: ev,
block: PendingExecBlock::UnixSocket(request.socket_path.clone()),
});
self.app_event_tx
.send(AppEvent::UnixSocketApprovalRequest(request));
return;
}
if self.pending_exec_approval.is_none()
&& let Some(mut request) = self.preflight_network_request(&ev.command)
{
if request.reason.trim().eq_ignore_ascii_case("denied") {
self.add_error_message(format!(
"Exec canceled because network access to {} is denied by the denylist.",
request.host
));
self.submit_op(Op::ExecApproval {
id,
decision: ReviewDecision::Denied,
});
return;
}
request.call_id = Some(id.clone());
self.pending_exec_approval = Some(PendingExecApproval {
id,
event: ev,
block: PendingExecBlock::NetworkHost(request.host.clone()),
});
self.app_event_tx
.send(AppEvent::NetworkProxyApprovalRequest(request));
return;
}
self.show_exec_approval(id, ev);
}
fn show_exec_approval(&mut self, id: String, ev: ExecApprovalRequestEvent) {
self.flush_answer_stream_with_separator();
let command = shlex::try_join(ev.command.iter().map(String::as_str))
.unwrap_or_else(|_| ev.command.join(" "));
@@ -1204,6 +1279,124 @@ impl ChatWidget {
self.request_redraw();
}
pub(crate) fn resume_pending_exec_approval(&mut self) {
if let Some(pending) = self.pending_exec_approval.take() {
if pending.event.network_preflight_only {
self.submit_op(Op::ExecApproval {
id: pending.id,
decision: ReviewDecision::Approved,
});
return;
}
self.handle_exec_approval_now(pending.id, pending.event);
}
}
pub(crate) fn reject_pending_exec_approval(&mut self) {
if let Some(pending) = self.pending_exec_approval.take() {
let reason = match pending.block {
PendingExecBlock::NetworkHost(host) => {
format!("Exec canceled because network access to {host} was denied.")
}
PendingExecBlock::UnixSocket(socket_path) => {
format!("Exec canceled because Unix socket access to {socket_path} was denied.")
}
};
self.add_to_history(history_cell::new_error_event(reason));
self.submit_op(Op::ExecApproval {
id: pending.id,
decision: ReviewDecision::Denied,
});
}
}
pub(crate) fn add_network_session_allow(&mut self, host: String) {
let host = host.trim().to_ascii_lowercase();
if host.is_empty() {
return;
}
self.network_proxy_session_allow.insert(host);
}
pub(crate) fn add_unix_socket_session_allow(&mut self, socket_path: String) {
let socket_path = socket_path.trim().to_string();
if socket_path.is_empty() {
return;
}
self.unix_socket_session_allow.insert(socket_path);
}
pub(crate) fn network_session_allow(&self) -> HashSet<String> {
self.network_proxy_session_allow.clone()
}
pub(crate) fn set_network_session_allow(&mut self, allowed: HashSet<String>) {
self.network_proxy_session_allow = allowed;
}
pub(crate) fn is_network_session_allowed(&self, host: &str) -> bool {
let normalized = host.trim().to_ascii_lowercase();
if normalized.is_empty() {
return false;
}
self.network_proxy_session_allow
.contains(normalized.as_str())
}
fn is_unix_socket_session_allowed(&self, socket_path: &str) -> bool {
let socket_path = socket_path.trim();
if socket_path.is_empty() {
return false;
}
self.unix_socket_session_allow.contains(socket_path)
}
fn preflight_network_request(&self, command: &[String]) -> Option<NetworkProxyBlockedRequest> {
let blocked = match network_proxy::preflight_blocked_request_if_enabled(
&self.config.network_proxy,
&self.config.sandbox_policy,
command,
) {
Ok(result) => result,
Err(err) => {
tracing::debug!(error = %err, "network proxy preflight failed");
None
}
};
let request = blocked?;
if self.is_network_session_allowed(&request.host) {
return None;
}
Some(request)
}
fn preflight_unix_socket_request(
&self,
command: &[String],
) -> Option<UnixSocketApprovalRequest> {
let blocked = match network_proxy::preflight_blocked_unix_socket_if_enabled(
&self.config.network_proxy,
&self.config.sandbox_policy,
command,
) {
Ok(result) => result,
Err(err) => {
tracing::debug!(error = %err, "unix socket preflight failed");
None
}
};
let blocked = blocked?;
let socket_path = blocked.socket_path.to_string_lossy().to_string();
if self.is_unix_socket_session_allowed(&socket_path) {
return None;
}
Some(UnixSocketApprovalRequest {
label: "SSH agent socket".to_string(),
socket_path,
allow_entry: blocked.suggested_allow_entry,
})
}
pub(crate) fn handle_apply_patch_approval_now(
&mut self,
id: String,
@@ -1226,6 +1419,27 @@ impl ChatWidget {
});
}
pub(crate) fn handle_network_approval_request(&mut self, request: NetworkProxyBlockedRequest) {
self.flush_answer_stream_with_separator();
let host = request.host.clone();
self.notify(Notification::NetworkApprovalRequested { host });
self.bottom_pane
.push_approval_request(ApprovalRequest::Network { request }, &self.config.features);
self.request_redraw();
}
pub(crate) fn handle_unix_socket_approval_request(
&mut self,
request: UnixSocketApprovalRequest,
) {
self.flush_answer_stream_with_separator();
self.bottom_pane.push_approval_request(
ApprovalRequest::UnixSocket { request },
&self.config.features,
);
self.request_redraw();
}
pub(crate) fn handle_elicitation_request_now(&mut self, ev: ElicitationRequestEvent) {
self.flush_answer_stream_with_separator();
@@ -1423,6 +1637,9 @@ impl ChatWidget {
is_review_mode: false,
pre_review_token_info: None,
needs_final_message_separator: false,
pending_exec_approval: None,
network_proxy_session_allow: HashSet::new(),
unix_socket_session_allow: HashSet::new(),
last_rendered_width: std::cell::Cell::new(None),
feedback,
current_rollout_path: None,
@@ -1509,6 +1726,9 @@ impl ChatWidget {
is_review_mode: false,
pre_review_token_info: None,
needs_final_message_separator: false,
pending_exec_approval: None,
network_proxy_session_allow: HashSet::new(),
unix_socket_session_allow: HashSet::new(),
last_rendered_width: std::cell::Cell::new(None),
feedback,
current_rollout_path: None,
@@ -3469,6 +3689,7 @@ enum Notification {
AgentTurnComplete { response: String },
ExecApprovalRequested { command: String },
EditApprovalRequested { cwd: PathBuf, changes: Vec<PathBuf> },
NetworkApprovalRequested { host: String },
ElicitationRequested { server_name: String },
}
@@ -3493,6 +3714,9 @@ impl Notification {
}
)
}
Notification::NetworkApprovalRequested { host } => {
format!("Network approval requested: {}", truncate_text(host, 40))
}
Notification::ElicitationRequested { server_name } => {
format!("Approval requested by {server_name}")
}
@@ -3504,6 +3728,7 @@ impl Notification {
Notification::AgentTurnComplete { .. } => "agent-turn-complete",
Notification::ExecApprovalRequested { .. }
| Notification::EditApprovalRequested { .. }
| Notification::NetworkApprovalRequested { .. }
| Notification::ElicitationRequested { .. } => "approval-requested",
}
}

View File

@@ -1,5 +1,6 @@
use std::collections::VecDeque;
use codex_core::network_proxy::NetworkProxyBlockedRequest;
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
use codex_core::protocol::ExecApprovalRequestEvent;
use codex_core::protocol::ExecCommandBeginEvent;
@@ -9,12 +10,16 @@ use codex_core::protocol::McpToolCallEndEvent;
use codex_core::protocol::PatchApplyEndEvent;
use codex_protocol::approvals::ElicitationRequestEvent;
use crate::app_event::UnixSocketApprovalRequest;
use super::ChatWidget;
#[derive(Debug)]
pub(crate) enum QueuedInterrupt {
ExecApproval(String, ExecApprovalRequestEvent),
ApplyPatchApproval(String, ApplyPatchApprovalRequestEvent),
NetworkApproval(NetworkProxyBlockedRequest),
UnixSocketApproval(UnixSocketApprovalRequest),
Elicitation(ElicitationRequestEvent),
ExecBegin(ExecCommandBeginEvent),
ExecEnd(ExecCommandEndEvent),
@@ -53,6 +58,16 @@ impl InterruptManager {
.push_back(QueuedInterrupt::ApplyPatchApproval(id, ev));
}
pub(crate) fn push_network_approval(&mut self, request: NetworkProxyBlockedRequest) {
self.queue
.push_back(QueuedInterrupt::NetworkApproval(request));
}
pub(crate) fn push_unix_socket_approval(&mut self, request: UnixSocketApprovalRequest) {
self.queue
.push_back(QueuedInterrupt::UnixSocketApproval(request));
}
pub(crate) fn push_elicitation(&mut self, ev: ElicitationRequestEvent) {
self.queue.push_back(QueuedInterrupt::Elicitation(ev));
}
@@ -84,6 +99,12 @@ impl InterruptManager {
QueuedInterrupt::ApplyPatchApproval(id, ev) => {
chat.handle_apply_patch_approval_now(id, ev)
}
QueuedInterrupt::NetworkApproval(request) => {
chat.handle_network_approval_request(request)
}
QueuedInterrupt::UnixSocketApproval(request) => {
chat.handle_unix_socket_approval_request(request)
}
QueuedInterrupt::Elicitation(ev) => chat.handle_elicitation_request_now(ev),
QueuedInterrupt::ExecBegin(ev) => chat.handle_exec_begin_now(ev),
QueuedInterrupt::ExecEnd(ev) => chat.handle_exec_end_now(ev),

View File

@@ -405,6 +405,9 @@ fn make_chatwidget_manual(
is_review_mode: false,
pre_review_token_info: None,
needs_final_message_separator: false,
pending_exec_approval: None,
network_proxy_session_allow: std::collections::HashSet::new(),
unix_socket_session_allow: std::collections::HashSet::new(),
last_rendered_width: std::cell::Cell::new(None),
feedback: codex_feedback::CodexFeedback::new(),
current_rollout_path: None,
@@ -724,6 +727,7 @@ fn exec_approval_emits_proposed_command_and_decision_history() {
),
proposed_execpolicy_amendment: None,
parsed_cmd: vec![],
network_preflight_only: false,
};
chat.handle_codex_event(Event {
id: "sub-short".into(),
@@ -768,6 +772,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
),
proposed_execpolicy_amendment: None,
parsed_cmd: vec![],
network_preflight_only: false,
};
chat.handle_codex_event(Event {
id: "sub-multi".into(),
@@ -818,6 +823,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
reason: None,
proposed_execpolicy_amendment: None,
parsed_cmd: vec![],
network_preflight_only: false,
};
chat.handle_codex_event(Event {
id: "sub-long".into(),
@@ -2282,6 +2288,7 @@ fn approval_modal_exec_snapshot() -> anyhow::Result<()> {
"world".into(),
])),
parsed_cmd: vec![],
network_preflight_only: false,
};
chat.handle_codex_event(Event {
id: "sub-approve".into(),
@@ -2335,6 +2342,7 @@ fn approval_modal_exec_without_reason_snapshot() -> anyhow::Result<()> {
"world".into(),
])),
parsed_cmd: vec![],
network_preflight_only: false,
};
chat.handle_codex_event(Event {
id: "sub-approve-noreason".into(),
@@ -2556,6 +2564,7 @@ fn status_widget_and_approval_modal_snapshot() {
"hello world".into(),
])),
parsed_cmd: vec![],
network_preflight_only: false,
};
chat.handle_codex_event(Event {
id: "sub-approve-exec".into(),

View File

@@ -1,3 +1,5 @@
use crate::app_event::NetworkProxyDecision;
use crate::app_event::UnixSocketDecision;
use crate::diff_render::create_diff_summary;
use crate::diff_render::display_path_for;
use crate::exec_cell::CommandOutput;
@@ -534,6 +536,94 @@ pub fn new_approval_decision_cell(
))
}
pub fn new_network_approval_decision_cell(
host: String,
decision: NetworkProxyDecision,
) -> Box<dyn HistoryCell> {
let host_span = Span::from(host).dim();
let (symbol, summary): (Span<'static>, Vec<Span<'static>>) = match decision {
NetworkProxyDecision::AllowSession => (
"".green(),
vec![
"You ".into(),
"approved".bold(),
" network access to ".into(),
host_span,
" for this session".bold(),
],
),
NetworkProxyDecision::AllowAlways => (
"".green(),
vec![
"You ".into(),
"approved".bold(),
" network access to ".into(),
host_span,
" permanently".bold(),
],
),
NetworkProxyDecision::Deny => (
"".red(),
vec![
"You ".into(),
"denied".bold(),
" network access to ".into(),
host_span,
],
),
};
Box::new(PrefixedWrappedHistoryCell::new(
Line::from(summary),
symbol,
" ",
))
}
pub fn new_unix_socket_approval_decision_cell(
socket_path: String,
decision: UnixSocketDecision,
) -> Box<dyn HistoryCell> {
let socket_span = Span::from(socket_path).dim();
let (symbol, summary): (Span<'static>, Vec<Span<'static>>) = match decision {
UnixSocketDecision::AllowSession => (
"".green(),
vec![
"You ".into(),
"approved".bold(),
" Unix socket access to ".into(),
socket_span,
" for this session".bold(),
],
),
UnixSocketDecision::AllowAlways => (
"".green(),
vec![
"You ".into(),
"approved".bold(),
" Unix socket access to ".into(),
socket_span,
" permanently".bold(),
],
),
UnixSocketDecision::Deny => (
"".red(),
vec![
"You ".into(),
"denied".bold(),
" Unix socket access to ".into(),
socket_span,
],
),
};
Box::new(PrefixedWrappedHistoryCell::new(
Line::from(summary),
symbol,
" ",
))
}
/// Cyan history cell line showing the current review status.
pub(crate) fn new_review_status_line(message: String) -> PlainHistoryCell {
PlainHistoryCell {

View File

@@ -349,6 +349,48 @@ This is reasonable to use if Codex is running in an environment that provides it
Though using this option may also be necessary if you try to use Codex in environments where its native sandboxing mechanisms are unsupported, such as older Linux kernels or on Windows.
### network_proxy
Codex can route subprocess network traffic through an external proxy (for example, the `network_proxy` sandbox proxy) and surface approval prompts when requests are blocked by policy.
```toml
[features]
network_proxy = true
[network_proxy]
enabled = true
proxy_url = "http://127.0.0.1:3128"
admin_url = "http://127.0.0.1:8080"
mode = "limited" # limited | full (default)
no_proxy = [
"localhost",
"127.0.0.1",
"::1",
"*.local",
".local",
"169.254.0.0/16",
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
poll_interval_ms = 1000
```
Notes:
- Proxy settings are injected only when sandbox network access is enabled (or full access mode). If the sandbox blocks network access, requests are blocked at the OS layer.
- Network proxy integration is rollout-gated behind `[features].network_proxy = true`.
- `proxy_url` is used for HTTP proxy envs (`HTTP_PROXY`, `HTTPS_PROXY`, `http_proxy`, `https_proxy`, plus npm/yarn variants). Docker and Cloud SDK proxy envs are derived from the HTTP proxy when present.
- When `proxy_url` points at localhost, Codex also assumes a SOCKS5 proxy on `localhost:8081` for `ALL_PROXY`, `GRPC_PROXY`, `FTP_PROXY`, `RSYNC_PROXY`, and (macOS only) `GIT_SSH_COMMAND`.
- `no_proxy` entries bypass the proxy; defaults include localhost + private network ranges. Use sparingly because bypassed traffic is not filtered by the proxy policy.
- `[network_proxy.policy]` can optionally allow localhost binding or Unix socket access (macOS only) when proxy-restricted network access is active.
- When enabled, Codex polls the proxy admin API (`/blocked`) and surfaces a prompt to allow for the session, allow always (add to allowlist), or deny (add to denylist). Allow/deny decisions update `~/.codex/config.toml` under `[network_proxy.policy]`, then Codex calls `/reload`.
- On macOS, `network_proxy.policy.allow_unix_sockets` is useful for local IPC that relies on Unix domain sockets (most commonly the SSH agent). Entries can be:
- absolute socket paths (or directories containing sockets),
- `$SSH_AUTH_SOCK` / `${SSH_AUTH_SOCK}`,
- the preset `ssh-agent` (alias: `ssh_auth_sock`, `ssh_auth_socket`).
When approvals are enabled, Codex may prompt to allow the SSH agent socket before running commands that appear to require it (e.g. `ssh`, `scp`, `sftp`, `ssh-add`, or `git` over SSH).
### tools.\*
These `[tools]` configuration options are deprecated. Use `[features]` instead (see [Feature flags](#feature-flags)).
@@ -928,67 +970,82 @@ Valid values:
## Config reference
| Key | Type / Values | Notes |
| ------------------------------------------------ | ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). |
| `model_provider` | string | Provider id from `model_providers` (default: `openai`). |
| `model_context_window` | number | Context window tokens. |
| `tool_output_token_limit` | number | Token budget for stored function/tool outputs in history (default: 2,560 tokens). |
| `approval_policy` | `untrusted` \| `on-failure` \| `on-request` \| `never` | When to prompt for approval. |
| `sandbox_mode` | `read-only` \| `workspace-write` \| `danger-full-access` | OS sandbox policy. |
| `sandbox_workspace_write.writable_roots` | array<string> | Extra writable roots in workspacewrite. |
| `sandbox_workspace_write.network_access` | boolean | Allow network in workspacewrite (default: false). |
| `sandbox_workspace_write.exclude_tmpdir_env_var` | boolean | Exclude `$TMPDIR` from writable roots (default: false). |
| `sandbox_workspace_write.exclude_slash_tmp` | boolean | Exclude `/tmp` from writable roots (default: false). |
| `notify` | array<string> | External program for notifications. |
| `tui.animations` | boolean | Enable terminal animations (welcome screen, shimmer, spinner). Defaults to true; set to `false` to disable visual motion. |
| `instructions` | string | Currently ignored; use `experimental_instructions_file` or `AGENTS.md`. |
| `features.<feature-flag>` | boolean | See [feature flags](#feature-flags) for details |
| `ghost_snapshot.disable_warnings` | boolean | Disable every warnings around ghost snapshot (large files, directory, ...) |
| `ghost_snapshot.ignore_large_untracked_files` | number | Exclude untracked files larger than this many bytes from ghost snapshots (default: 10 MiB). Set to `0` to disable. |
| `ghost_snapshot.ignore_large_untracked_dirs` | number | Ignore untracked directories with at least this many files (default: 200). Set to `0` to disable. |
| `mcp_servers.<id>.command` | string | MCP server launcher command (stdio servers only). |
| `mcp_servers.<id>.args` | array<string> | MCP server args (stdio servers only). |
| `mcp_servers.<id>.env` | map<string,string> | MCP server env vars (stdio servers only). |
| `mcp_servers.<id>.url` | string | MCP server url (streamable http servers only). |
| `mcp_servers.<id>.bearer_token_env_var` | string | environment variable containing a bearer token to use for auth (streamable http servers only). |
| `mcp_servers.<id>.enabled` | boolean | When false, Codex skips starting the server (default: true). |
| `mcp_servers.<id>.startup_timeout_sec` | number | Startup timeout in seconds (default: 10). Timeout is applied both for initializing MCP server and initially listing tools. |
| `mcp_servers.<id>.tool_timeout_sec` | number | Per-tool timeout in seconds (default: 60). Accepts fractional values; omit to use the default. |
| `mcp_servers.<id>.enabled_tools` | array<string> | Restrict the server to the listed tool names. |
| `mcp_servers.<id>.disabled_tools` | array<string> | Remove the listed tool names after applying `enabled_tools`, if any. |
| `model_providers.<id>.name` | string | Display name. |
| `model_providers.<id>.base_url` | string | API base URL. |
| `model_providers.<id>.env_key` | string | Env var for API key. |
| `model_providers.<id>.wire_api` | `chat` \| `responses` | Protocol used (default: `chat`). |
| `model_providers.<id>.query_params` | map<string,string> | Extra query params (e.g., Azure `api-version`). |
| `model_providers.<id>.http_headers` | map<string,string> | Additional static headers. |
| `model_providers.<id>.env_http_headers` | map<string,string> | Headers sourced from env vars. |
| `model_providers.<id>.request_max_retries` | number | Perprovider HTTP retry count (default: 4). |
| `model_providers.<id>.stream_max_retries` | number | SSE stream retry count (default: 5). |
| `model_providers.<id>.stream_idle_timeout_ms` | number | SSE idle timeout (ms) (default: 300000). |
| `project_doc_max_bytes` | number | Max bytes to read from `AGENTS.md`. |
| `profile` | string | Active profile name. |
| `profiles.<name>.*` | various | Profilescoped overrides of the same keys. |
| `history.persistence` | `save-all` \| `none` | History file persistence (default: `save-all`). |
| `history.max_bytes` | number | Maximum size of `history.jsonl` in bytes; when exceeded, history is compacted to ~80% of this limit by dropping oldest entries. |
| `file_opener` | `vscode` \| `vscode-insiders` \| `windsurf` \| `cursor` \| `none` | URI scheme for clickable citations (default: `vscode`). |
| `tui` | table | TUIspecific options. |
| `tui.notifications` | boolean \| array<string> | Enable desktop notifications in the tui (default: true). |
| `hide_agent_reasoning` | boolean | Hide model reasoning events. |
| `check_for_update_on_startup` | boolean | Check for Codex updates on startup (default: true). Set to `false` only if updates are centrally managed. |
| `show_raw_agent_reasoning` | boolean | Show raw reasoning (when available). |
| `model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Responses API reasoning effort. |
| `model_reasoning_summary` | `auto` \| `concise` \| `detailed` \| `none` | Reasoning summaries. |
| `model_verbosity` | `low` \| `medium` \| `high` | GPT5 text verbosity (Responses API). |
| `model_supports_reasoning_summaries` | boolean | Forceenable reasoning summaries. |
| `model_reasoning_summary_format` | `none` \| `experimental` | Force reasoning summary format. |
| `chatgpt_base_url` | string | Base URL for ChatGPT auth flow. |
| `experimental_instructions_file` | string (path) | Replace builtin instructions (experimental). |
| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. |
| `projects.<path>.trust_level` | string | Mark project/worktree as trusted (only `"trusted"` is recognized). |
| `tools.web_search` | boolean | Enable web search tool (deprecated) (default: false). |
| `tools.view_image` | boolean | Enable or disable the `view_image` tool so Codex can attach local image files from the workspace (default: true). |
| `forced_login_method` | `chatgpt` \| `api` | Only allow Codex to be used with ChatGPT or API keys. |
| `forced_chatgpt_workspace_id` | string (uuid) | Only allow Codex to be used with the specified ChatGPT workspace. |
| `cli_auth_credentials_store` | `file` \| `keyring` \| `auto` | Where to store CLI login credentials (default: `file`). |
| Key | Type / Values | Notes |
| -------------------------------------------------- | ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). |
| `model_provider` | string | Provider id from `model_providers` (default: `openai`). |
| `model_context_window` | number | Context window tokens. |
| `tool_output_token_limit` | number | Token budget for stored function/tool outputs in history (default: 2,560 tokens). |
| `approval_policy` | `untrusted` \| `on-failure` \| `on-request` \| `never` | When to prompt for approval. |
| `sandbox_mode` | `read-only` \| `workspace-write` \| `danger-full-access` | OS sandbox policy. |
| `sandbox_workspace_write.writable_roots` | array<string> | Extra writable roots in workspacewrite. |
| `sandbox_workspace_write.network_access` | boolean | Allow network in workspacewrite (default: false). |
| `sandbox_workspace_write.exclude_tmpdir_env_var` | boolean | Exclude `$TMPDIR` from writable roots (default: false). |
| `sandbox_workspace_write.exclude_slash_tmp` | boolean | Exclude `/tmp` from writable roots (default: false). |
| `network_proxy.enabled` | boolean | Enable proxy environment injection + admin polling (default: false). |
| `network_proxy.proxy_url` | string | Proxy URL used for HTTP proxy envs (`HTTP_PROXY`/`HTTPS_PROXY`); when it points at localhost, Codex also assumes SOCKS5 on `localhost:8081` for `ALL_PROXY` (default: `http://127.0.0.1:3128`). |
| `network_proxy.admin_url` | string | Proxy admin API base URL (default: `http://127.0.0.1:8080`). |
| `network_proxy.mode` | `limited` \| `full` | Default proxy mode for policy hints (default: `full`). |
| `network_proxy.no_proxy` | array<string> | Hosts/IPs that bypass the proxy (default includes localhost + private network ranges). |
| `network_proxy.poll_interval_ms` | number | Admin poll interval in ms (default: 1000). |
| `network_proxy.policy.allowed_domains` | array<string> | Allowlist of domain patterns (denylist takes precedence). |
| `network_proxy.policy.denied_domains` | array<string> | Denylist of domain patterns (takes precedence over allowlist). |
| `network_proxy.policy.allow_local_binding` | boolean | Allow localhost binding when proxy-restricted (macOS only, default: false). |
| `network_proxy.policy.allow_unix_sockets` | array<string> | Allow Unix socket paths when proxy-restricted (macOS only, default: []). Supports `$SSH_AUTH_SOCK` and the `ssh-agent` preset. |
| `network_proxy.mitm.enabled` | boolean | Enable HTTPS MITM for read-only enforcement in limited mode (default: false). |
| `network_proxy.mitm.inspect` | boolean | Enable body inspection in MITM mode (default: false). |
| `network_proxy.mitm.max_body_bytes` | number | Max body bytes to buffer when inspection is enabled (default: 4096). |
| `network_proxy.mitm.ca_cert_path` | string (path) | CA cert path (default: `network_proxy/mitm/ca.pem` under `$CODEX_HOME`). |
| `network_proxy.mitm.ca_key_path` | string (path) | CA key path (default: `network_proxy/mitm/ca.key` under `$CODEX_HOME`). |
| `notify` | array<string> | External program for notifications. |
| `tui.animations` | boolean | Enable terminal animations (welcome screen, shimmer, spinner). Defaults to true; set to `false` to disable visual motion. |
| `instructions` | string | Currently ignored; use `experimental_instructions_file` or `AGENTS.md`. |
| `features.<feature-flag>` | boolean | See [feature flags](#feature-flags) for details |
| `ghost_snapshot.disable_warnings` | boolean | Disable every warnings around ghost snapshot (large files, directory, ...) |
| `ghost_snapshot.ignore_large_untracked_files` | number | Exclude untracked files larger than this many bytes from ghost snapshots (default: 10 MiB). Set to `0` to disable. |
| `ghost_snapshot.ignore_large_untracked_dirs` | number | Ignore untracked directories with at least this many files (default: 200). Set to `0` to disable. |
| `mcp_servers.<id>.command` | string | MCP server launcher command (stdio servers only). |
| `mcp_servers.<id>.args` | array<string> | MCP server args (stdio servers only). |
| `mcp_servers.<id>.env` | map<string,string> | MCP server env vars (stdio servers only). |
| `mcp_servers.<id>.url` | string | MCP server url (streamable http servers only). |
| `mcp_servers.<id>.bearer_token_env_var` | string | environment variable containing a bearer token to use for auth (streamable http servers only). |
| `mcp_servers.<id>.enabled` | boolean | When false, Codex skips starting the server (default: true). |
| `mcp_servers.<id>.startup_timeout_sec` | number | Startup timeout in seconds (default: 10). Timeout is applied both for initializing MCP server and initially listing tools. |
| `mcp_servers.<id>.tool_timeout_sec` | number | Per-tool timeout in seconds (default: 60). Accepts fractional values; omit to use the default. |
| `mcp_servers.<id>.enabled_tools` | array<string> | Restrict the server to the listed tool names. |
| `mcp_servers.<id>.disabled_tools` | array<string> | Remove the listed tool names after applying `enabled_tools`, if any. |
| `model_providers.<id>.name` | string | Display name. |
| `model_providers.<id>.base_url` | string | API base URL. |
| `model_providers.<id>.env_key` | string | Env var for API key. |
| `model_providers.<id>.wire_api` | `chat` \| `responses` | Protocol used (default: `chat`). |
| `model_providers.<id>.query_params` | map<string,string> | Extra query params (e.g., Azure `api-version`). |
| `model_providers.<id>.http_headers` | map<string,string> | Additional static headers. |
| `model_providers.<id>.env_http_headers` | map<string,string> | Headers sourced from env vars. |
| `model_providers.<id>.request_max_retries` | number | Perprovider HTTP retry count (default: 4). |
| `model_providers.<id>.stream_max_retries` | number | SSE stream retry count (default: 5). |
| `model_providers.<id>.stream_idle_timeout_ms` | number | SSE idle timeout (ms) (default: 300000). |
| `project_doc_max_bytes` | number | Max bytes to read from `AGENTS.md`. |
| `profile` | string | Active profile name. |
| `profiles.<name>.*` | various | Profilescoped overrides of the same keys. |
| `history.persistence` | `save-all` \| `none` | History file persistence (default: `save-all`). |
| `history.max_bytes` | number | Maximum size of `history.jsonl` in bytes; when exceeded, history is compacted to ~80% of this limit by dropping oldest entries. |
| `file_opener` | `vscode` \| `vscode-insiders` \| `windsurf` \| `cursor` \| `none` | URI scheme for clickable citations (default: `vscode`). |
| `tui` | table | TUIspecific options. |
| `tui.notifications` | boolean \| array<string> | Enable desktop notifications in the tui (default: true). |
| `hide_agent_reasoning` | boolean | Hide model reasoning events. |
| `check_for_update_on_startup` | boolean | Check for Codex updates on startup (default: true). Set to `false` only if updates are centrally managed. |
| `show_raw_agent_reasoning` | boolean | Show raw reasoning (when available). |
| `model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Responses API reasoning effort. |
| `model_reasoning_summary` | `auto` \| `concise` \| `detailed` \| `none` | Reasoning summaries. |
| `model_verbosity` | `low` \| `medium` \| `high` | GPT5 text verbosity (Responses API). |
| `model_supports_reasoning_summaries` | boolean | Forceenable reasoning summaries. |
| `model_reasoning_summary_format` | `none` \| `experimental` | Force reasoning summary format. |
| `chatgpt_base_url` | string | Base URL for ChatGPT auth flow. |
| `experimental_instructions_file` | string (path) | Replace builtin instructions (experimental). |
| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. |
| `projects.<path>.trust_level` | string | Mark project/worktree as trusted (only `"trusted"` is recognized). |
| `tools.web_search` | boolean | Enable web search tool (deprecated) (default: false). |
| `tools.view_image` | boolean | Enable or disable the `view_image` tool so Codex can attach local image files from the workspace (default: true). |
| `forced_login_method` | `chatgpt` \| `api` | Only allow Codex to be used with ChatGPT or API keys. |
| `forced_chatgpt_workspace_id` | string (uuid) | Only allow Codex to be used with the specified ChatGPT workspace. |
| `cli_auth_credentials_store` | `file` \| `keyring` \| `auto` | Where to store CLI login credentials (default: `file`). |

View File

@@ -99,6 +99,48 @@ exclude_tmpdir_env_var = false
# Exclude /tmp from writable roots. Default: false
exclude_slash_tmp = false
################################################################################
# Network Proxy (optional)
################################################################################
[network_proxy]
# Enable proxy env injection + approval prompts for blocked domains. Default: false
# NOTE: This is rollout-gated behind `[features].network_proxy = true`.
enabled = false
# HTTP/HTTPS proxy URL. Default: "http://127.0.0.1:3128"
proxy_url = "http://127.0.0.1:3128"
# Admin API for the proxy (for /blocked, /reload, /mode). Default: "http://127.0.0.1:8080"
admin_url = "http://127.0.0.1:8080"
# limited | full (default: full)
mode = "full"
# Hosts/IPs that bypass the proxy. Default includes localhost + private networks.
no_proxy = [
"localhost",
"127.0.0.1",
"::1",
"*.local",
".local",
"169.254.0.0/16",
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
# Poll interval (ms) for checking proxy /blocked. Default: 1000
poll_interval_ms = 1000
[network_proxy.policy]
# Allow localhost binds inside the sandbox (macOS only). Default: false
allow_local_binding = false
# Allow Unix socket paths inside the sandbox (macOS only). Default: []
# Common values:
# - "$SSH_AUTH_SOCK" (recommended) or "${SSH_AUTH_SOCK}"
# - "ssh-agent" (alias: "ssh_auth_sock", "ssh_auth_socket")
# - an absolute socket path like "/private/tmp/..." (or a directory containing sockets)
allow_unix_sockets = []
# Optional domain allow/deny lists (denylist wins)
allowed_domains = []
denied_domains = []
################################################################################
# Shell Environment Policy for spawned processes
################################################################################
@@ -218,6 +260,7 @@ rmcp_client = false
apply_patch_freeform = false
view_image_tool = true
web_search_request = false
network_proxy = false
ghost_commit = false
enable_experimental_windows_sandbox = false
skills = false

View File

@@ -0,0 +1,103 @@
# Codex Network Proxy Design
This document describes the Codex network proxy that runs outside the sandbox and enforces an allow-only network policy for sandboxed subprocesses. The proxy is a single binary with HTTP proxying, SOCKS5, and an admin API. Codex owns the policy state in `~/.codex/config.toml`; the proxy reads that configuration and applies it at the network edge.
## Goals
1. Enforce allow-only network access with denylist precedence.
2. Support wildcard domain patterns, including apex match for `*.domain.tld`.
3. Allow two modes: **limited** (read-only) and **full** (all methods).
4. Provide optional **MITM** to enforce read-only on HTTPS.
5. Allow hot-reloaded configuration via admin API.
6. Provide clear audit logging of allow/deny decisions and policy changes.
7. Enable a single binary with HTTP proxy, SOCKS5 proxy, and admin API.
## Non-Goals
- Enterprise policy distribution or centralized multi-tenant orchestration.
- Deep packet inspection beyond the supported HTTP/HTTPS interception modes.
- Perfect protocol coverage for all network traffic types.
## Architecture
```mermaid
flowchart LR
subgraph Sandbox["Codex (sandboxed)"]
Tools["commands / tools<br/>curl, git, python"]
SocksClients["SOCKS clients"]
end
subgraph Proxy["codex-network-proxy (host process)"]
HttpProxy["HTTP Proxy :3128<br/>CONNECT tunnel<br/>MITM (optional)"]
SocksProxy["SOCKS5 Proxy :8081"]
Admin["Admin API :8080<br/>/health /config /blocked<br/>/reload /mode"]
end
Config["~/.codex/config.toml<br/>[network_proxy.*]"]
Tools -->|HTTP| HttpProxy
SocksClients -->|SOCKS5| SocksProxy
Admin -->|reads + reloads| Config
```
## Configuration Model
The proxy reads `~/.codex/config.toml`:
- `[network_proxy]` for endpoints, mode, and toggles.
- `[network_proxy.policy]` for `allowed_domains` / `denied_domains` (and, on macOS, optional local IPC allowances).
- `[network_proxy.mitm]` for MITM CA paths and inspection settings.
- Codex integration is rollout-gated behind `[features].network_proxy = true`.
Codex is the source of truth. Approval actions update the config and trigger a proxy reload.
## Enforcement Model
- **Allow/deny precedence:** denylist wins; allowlist is required for access.
- **Limited mode:** only GET/HEAD/OPTIONS are permitted. HTTPS requires MITM to enforce method constraints; otherwise CONNECT is blocked with a clear reason.
- **Full mode:** all methods allowed; CONNECT tunneling is permitted without MITM.
## macOS Sandbox Integration (Seatbelt)
On macOS, Codex uses Seatbelt (`sandbox-exec`) for OS-level enforcement.
Key points:
- **Per-domain gating happens in the proxy**, not in Seatbelt: Seatbelt network rules are intentionally limited to loopback proxy ports (e.g. `localhost:3128` / `localhost:8081`) so all outbound traffic is forced through the proxy, which then applies the allow/deny policy and prompts.
- **Local IPC is deny-by-default** when proxy-restricted network access is active. Some tools rely on Unix domain sockets (e.g. the SSH agent). These are blocked unless explicitly allowed via:
- `network_proxy.policy.allow_unix_sockets` (absolute socket paths, `$SSH_AUTH_SOCK`, or the `ssh-agent` preset), and/or
- `network_proxy.policy.allow_local_binding` (if you need to bind/listen on localhost ports).
When approvals are enabled, Codex can preflight commands that appear to require the SSH agent and prompt to allow the SSH agent socket before running.
## Logging and Auditability
The proxy logs:
- Allow/deny decisions (host, client, reason).
- Policy updates (allowlist/denylist adds/removes).
- Mode changes and config reloads.
- MITM lifecycle events (CA generated, TLS established).
## Decision to Make: Preflight Strictness
Codex performs a preflight check before running some commands. Preflight currently scans CLI args for URLs on known network tools (curl, git, etc.) and shell `-c` snippets.
We need to decide how strict preflight should be:
Option A: **Heuristic preflight (current)**
- Pros: catches obvious `curl https://...` style commands early.
- Cons: misses dynamic URLs inside scripts; can still overprompt on shell snippets.
Option B: **Strict preflight**
- Only preflight when a URL argument is present in the command.
- For everything else, rely on the proxy `/blocked` prompt at connect time.
- Pros: fewer false positives, clearer user experience.
- Cons: fewer early prompts; approvals shift to runtime events.
Decision: **TBD**. We should choose a configuration flag (`network_proxy.preflight_mode = "heuristic" | "strict"`) and default based on observed UX.
## Open Items
- Finalize preflight strictness and expose a config toggle if needed.
- Confirm documentation for MITM trust steps and CA injection into sandboxed commands.

View File

@@ -0,0 +1,103 @@
# Codex Network Proxy Quickstart (Local)
This is a compact guide to build and validate the Codex network proxy locally.
## Build
From the Codex repo:
```bash
cd /Users/viyatb/code/codex/codex-rs
cargo build -p codex-network-proxy
```
For MITM support:
```bash
cargo build -p codex-network-proxy --features mitm
```
## Configure
Add this to `~/.codex/config.toml`:
```toml
[features]
network_proxy = true
[network_proxy]
enabled = true
proxy_url = "http://127.0.0.1:3128"
admin_url = "http://127.0.0.1:8080"
mode = "limited" # or "full"
poll_interval_ms = 1000
[network_proxy.policy]
allowed_domains = ["example.com", "*.github.com"]
denied_domains = ["metadata.google.internal", "169.254.*"]
# macOS only: allow specific local IPC when proxy-restricted.
allow_local_binding = false
# Example: allow SSH agent socket for git/ssh.
allow_unix_sockets = ["$SSH_AUTH_SOCK"]
[network_proxy.mitm]
enabled = false
```
## Run the proxy
```bash
cd /Users/viyatb/code/codex/codex-rs
cargo run -p codex-network-proxy -- proxy
```
With MITM:
```bash
cargo run -p codex-network-proxy --features mitm -- proxy
```
## Test with curl
HTTP/HTTPS via proxy:
```bash
export HTTP_PROXY="http://127.0.0.1:3128"
export HTTPS_PROXY="http://127.0.0.1:3128"
curl -sS https://example.com
```
Limited mode + HTTPS requires MITM. If MITM is on, trust the generated CA:
```bash
security add-trusted-cert -d -r trustRoot \
-k ~/Library/Keychains/login.keychain-db \
~/.codex/network_proxy/mitm/ca.pem
```
Or pass the CA directly:
```bash
curl --cacert ~/.codex/network_proxy/mitm/ca.pem -sS https://example.com
```
## Admin endpoints
Reload config after edits:
```bash
curl -fsS -X POST http://127.0.0.1:8080/reload
```
Switch modes:
```bash
curl -fsS -X POST http://127.0.0.1:8080/mode -d '{"mode":"full"}'
```
## Codex integration sanity check
1) Start the proxy.
2) Launch Codex with `[features].network_proxy = true` and `network_proxy.enabled = true` set in config.
3) Run a network command (e.g., `curl https://example.com`).
4) Confirm you see the allow/deny prompt and that the proxy logs reflect the decision.

View File

@@ -54,6 +54,43 @@ approval_policy = "never"
sandbox_mode = "read-only"
```
### Network proxy approvals
Codex can optionally route outbound network traffic through a proxy and prompt you when new domains are blocked by policy.
Key behaviors:
- The OS sandbox is still the first line of defense. If network access is disabled, outbound requests are blocked at the OS level.
- When network is enabled and both `[features].network_proxy = true` and `network_proxy.enabled = true`, Codex polls the proxy admin API (`/blocked`) and immediately notifies you about blocked domains.
- For exec commands that include HTTP/HTTPS URLs, Codex preflights the host against the proxy config and prompts before running the command.
- Approvals update `~/.codex/config.toml` under `[network_proxy.policy]` and trigger a proxy reload.
- You can choose to:
- **Deny** the request (adds the domain to the denylist).
- **Allow for session** (temporary allow that is reverted on exit).
- **Allow always** (adds the domain to the allowlist).
Network access is controlled through a proxy server running outside the sandbox:
- **Domain restrictions:** Only approved domains can be accessed (denylist takes precedence).
- **User confirmation:** New domain requests trigger permission prompts.
- **Custom proxy support:** Advanced users can implement custom rules on outgoing traffic.
- **Comprehensive coverage:** Restrictions apply to all scripts, programs, and subprocesses spawned by Codex.
`NO_PROXY` is supported via `[network_proxy].no_proxy` and is passed to subprocesses as `NO_PROXY/no_proxy`. Defaults include localhost and private network ranges; any entries in that list bypass the proxy and are not filtered by proxy policy.
On macOS, `[network_proxy.policy]` can also allow localhost binding or Unix socket paths when proxy-restricted network access is active. These settings influence the Seatbelt profile.
Unix sockets are deny-by-default. If you run tools that rely on local IPC (most commonly the SSH agent via `SSH_AUTH_SOCK`), you can allow them via:
```toml
[network_proxy.policy]
allow_unix_sockets = ["$SSH_AUTH_SOCK"]
```
When approvals are enabled, Codex may prompt to allow the SSH agent socket before running commands that appear to require it (for example `ssh`, `scp`, `sftp`, `ssh-add`, or `git` over SSH). “Allow always” records `$SSH_AUTH_SOCK`; “Allow for session” records the resolved socket path and is removed when Codex exits.
When MITM is enabled in the proxy config, Codex injects common CA environment variables (for example `SSL_CERT_FILE`, `CURL_CA_BUNDLE`, `GIT_SSL_CAINFO`, `REQUESTS_CA_BUNDLE`, `NODE_EXTRA_CA_CERTS`, `PIP_CERT`, and `NPM_CONFIG_CAFILE`) pointing at the proxy CA cert to reduce pertool configuration.
### Sandbox mechanics by platform
The mechanism Codex uses to enforce the sandbox policy depends on your OS: