Compare commits

...

5 Commits

16 changed files with 532 additions and 8 deletions

View File

@@ -11019,6 +11019,16 @@
"type": "null"
}
]
},
"network": {
"anyOf": [
{
"$ref": "#/definitions/v2/NetworkRequirements"
},
{
"type": "null"
}
]
}
},
"type": "object"
@@ -12348,6 +12358,84 @@
],
"type": "string"
},
"NetworkRequirements": {
"properties": {
"allowLocalBinding": {
"type": [
"boolean",
"null"
]
},
"allowUnixSockets": {
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"allowUpstreamProxy": {
"type": [
"boolean",
"null"
]
},
"allowedDomains": {
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"dangerouslyAllowNonLoopbackAdmin": {
"type": [
"boolean",
"null"
]
},
"dangerouslyAllowNonLoopbackProxy": {
"type": [
"boolean",
"null"
]
},
"deniedDomains": {
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"enabled": {
"type": [
"boolean",
"null"
]
},
"httpPort": {
"format": "uint16",
"minimum": 0.0,
"type": [
"integer",
"null"
]
},
"socksPort": {
"format": "uint16",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"type": "object"
},
"OverriddenMetadata": {
"properties": {
"effectiveValue": true,

View File

@@ -48,6 +48,94 @@
"type": "null"
}
]
},
"network": {
"anyOf": [
{
"$ref": "#/definitions/NetworkRequirements"
},
{
"type": "null"
}
]
}
},
"type": "object"
},
"NetworkRequirements": {
"properties": {
"allowLocalBinding": {
"type": [
"boolean",
"null"
]
},
"allowUnixSockets": {
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"allowUpstreamProxy": {
"type": [
"boolean",
"null"
]
},
"allowedDomains": {
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"dangerouslyAllowNonLoopbackAdmin": {
"type": [
"boolean",
"null"
]
},
"dangerouslyAllowNonLoopbackProxy": {
"type": [
"boolean",
"null"
]
},
"deniedDomains": {
"items": {
"type": "string"
},
"type": [
"array",
"null"
]
},
"enabled": {
"type": [
"boolean",
"null"
]
},
"httpPort": {
"format": "uint16",
"minimum": 0.0,
"type": [
"integer",
"null"
]
},
"socksPort": {
"format": "uint16",
"minimum": 0.0,
"type": [
"integer",
"null"
]
}
},
"type": "object"

View File

@@ -3,7 +3,8 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { WebSearchMode } from "../WebSearchMode";
import type { AskForApproval } from "./AskForApproval";
import type { NetworkRequirements } from "./NetworkRequirements";
import type { ResidencyRequirement } from "./ResidencyRequirement";
import type { SandboxMode } from "./SandboxMode";
export type ConfigRequirements = { allowedApprovalPolicies: Array<AskForApproval> | null, allowedSandboxModes: Array<SandboxMode> | null, allowedWebSearchModes: Array<WebSearchMode> | null, enforceResidency: ResidencyRequirement | null, };
export type ConfigRequirements = { allowedApprovalPolicies: Array<AskForApproval> | null, allowedSandboxModes: Array<SandboxMode> | null, allowedWebSearchModes: Array<WebSearchMode> | null, enforceResidency: ResidencyRequirement | null, network: NetworkRequirements | null, };

View File

@@ -0,0 +1,5 @@
// GENERATED CODE! DO NOT MODIFY BY HAND!
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type NetworkRequirements = { enabled: boolean | null, httpPort: number | null, socksPort: number | null, allowUpstreamProxy: boolean | null, dangerouslyAllowNonLoopbackProxy: boolean | null, dangerouslyAllowNonLoopbackAdmin: boolean | null, allowedDomains: Array<string> | null, deniedDomains: Array<string> | null, allowUnixSockets: Array<string> | null, allowLocalBinding: boolean | null, };

View File

@@ -91,6 +91,7 @@ export type { Model } from "./Model";
export type { ModelListParams } from "./ModelListParams";
export type { ModelListResponse } from "./ModelListResponse";
export type { NetworkAccess } from "./NetworkAccess";
export type { NetworkRequirements } from "./NetworkRequirements";
export type { OverriddenMetadata } from "./OverriddenMetadata";
export type { PatchApplyStatus } from "./PatchApplyStatus";
export type { PatchChangeKind } from "./PatchChangeKind";

View File

@@ -535,6 +535,23 @@ pub struct ConfigRequirements {
pub allowed_sandbox_modes: Option<Vec<SandboxMode>>,
pub allowed_web_search_modes: Option<Vec<WebSearchMode>>,
pub enforce_residency: Option<ResidencyRequirement>,
pub network: Option<NetworkRequirements>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct NetworkRequirements {
pub enabled: Option<bool>,
pub http_port: Option<u16>,
pub socks_port: Option<u16>,
pub allow_upstream_proxy: Option<bool>,
pub dangerously_allow_non_loopback_proxy: Option<bool>,
pub dangerously_allow_non_loopback_admin: Option<bool>,
pub allowed_domains: Option<Vec<String>>,
pub denied_domains: Option<Vec<String>>,
pub allow_unix_sockets: Option<Vec<String>>,
pub allow_local_binding: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]

View File

@@ -116,7 +116,7 @@ Example (from OpenAI's official VSCode extension):
- `config/read` — fetch the effective config on disk after resolving config layering.
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
- `configRequirements/read` — fetch the loaded requirements allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`) and `enforceResidency` from `requirements.toml` and/or MDM (or `null` if none are configured).
- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), `enforceResidency`, and `network` constraints.
### Example: Start or resume a thread

View File

@@ -9,6 +9,7 @@ use codex_app_server_protocol::ConfigValueWriteParams;
use codex_app_server_protocol::ConfigWriteErrorCode;
use codex_app_server_protocol::ConfigWriteResponse;
use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::NetworkRequirements;
use codex_app_server_protocol::SandboxMode;
use codex_core::config::ConfigService;
use codex_core::config::ConfigServiceError;
@@ -129,6 +130,7 @@ fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigR
enforce_residency: requirements
.enforce_residency
.map(map_residency_requirement_to_api),
network: requirements.network.map(map_network_requirements_to_api),
}
}
@@ -149,6 +151,23 @@ fn map_residency_requirement_to_api(
}
}
fn map_network_requirements_to_api(
network: codex_core::config_loader::NetworkRequirementsToml,
) -> NetworkRequirements {
NetworkRequirements {
enabled: network.enabled,
http_port: network.http_port,
socks_port: network.socks_port,
allow_upstream_proxy: network.allow_upstream_proxy,
dangerously_allow_non_loopback_proxy: network.dangerously_allow_non_loopback_proxy,
dangerously_allow_non_loopback_admin: network.dangerously_allow_non_loopback_admin,
allowed_domains: network.allowed_domains,
denied_domains: network.denied_domains,
allow_unix_sockets: network.allow_unix_sockets,
allow_local_binding: network.allow_local_binding,
}
}
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
if let Some(code) = err.write_error_code() {
return config_write_error(code, err.to_string());
@@ -174,6 +193,7 @@ fn config_write_error(code: ConfigWriteErrorCode, message: impl Into<String>) ->
#[cfg(test)]
mod tests {
use super::*;
use codex_core::config_loader::NetworkRequirementsToml as CoreNetworkRequirementsToml;
use codex_protocol::protocol::AskForApproval as CoreAskForApproval;
use pretty_assertions::assert_eq;
@@ -194,6 +214,18 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: Some(CoreResidencyRequirement::Us),
network: Some(CoreNetworkRequirementsToml {
enabled: Some(true),
http_port: Some(8080),
socks_port: Some(1080),
allow_upstream_proxy: Some(false),
dangerously_allow_non_loopback_proxy: Some(false),
dangerously_allow_non_loopback_admin: Some(false),
allowed_domains: Some(vec!["api.openai.com".to_string()]),
denied_domains: Some(vec!["example.com".to_string()]),
allow_unix_sockets: Some(vec!["/tmp/proxy.sock".to_string()]),
allow_local_binding: Some(true),
}),
};
let mapped = map_requirements_toml_to_api(requirements);
@@ -217,6 +249,21 @@ mod tests {
mapped.enforce_residency,
Some(codex_app_server_protocol::ResidencyRequirement::Us),
);
assert_eq!(
mapped.network,
Some(NetworkRequirements {
enabled: Some(true),
http_port: Some(8080),
socks_port: Some(1080),
allow_upstream_proxy: Some(false),
dangerously_allow_non_loopback_proxy: Some(false),
dangerously_allow_non_loopback_admin: Some(false),
allowed_domains: Some(vec!["api.openai.com".to_string()]),
denied_domains: Some(vec!["example.com".to_string()]),
allow_unix_sockets: Some(vec!["/tmp/proxy.sock".to_string()]),
allow_local_binding: Some(true),
}),
);
}
#[test]
@@ -228,6 +275,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
};
let mapped = map_requirements_toml_to_api(requirements);

View File

@@ -385,6 +385,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
}
@@ -426,6 +427,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
}
@@ -470,6 +472,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
);
assert_eq!(fetcher.request_count.load(Ordering::SeqCst), 2);

View File

@@ -1653,6 +1653,7 @@ impl Config {
mcp_servers,
exec_policy: _,
enforce_residency,
network: _network_requirements,
} = requirements;
apply_requirement_constrained_value(
@@ -4379,6 +4380,7 @@ model_verbosity = "high"
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
};
let requirement_source = crate::config_loader::RequirementSource::Unknown;
let requirement_source_for_error = requirement_source.clone();
@@ -4936,6 +4938,7 @@ mcp_oauth_callback_port = 5678
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
};
let config = ConfigBuilder::default()

View File

@@ -81,6 +81,7 @@ pub struct ConfigRequirements {
pub mcp_servers: Option<Sourced<BTreeMap<String, McpServerRequirement>>>,
pub(crate) exec_policy: Option<Sourced<RequirementsExecPolicy>>,
pub enforce_residency: ConstrainedWithSource<Option<ResidencyRequirement>>,
pub network: Option<Sourced<NetworkRequirementsToml>>,
}
impl Default for ConfigRequirements {
@@ -101,6 +102,7 @@ impl Default for ConfigRequirements {
mcp_servers: None,
exec_policy: None,
enforce_residency: ConstrainedWithSource::new(Constrained::allow_any(None), None),
network: None,
}
}
}
@@ -123,6 +125,20 @@ pub struct McpServerRequirement {
pub identity: McpServerIdentity,
}
#[derive(Deserialize, Debug, Clone, Default, PartialEq, Eq)]
pub struct NetworkRequirementsToml {
pub enabled: Option<bool>,
pub http_port: Option<u16>,
pub socks_port: Option<u16>,
pub allow_upstream_proxy: Option<bool>,
pub dangerously_allow_non_loopback_proxy: Option<bool>,
pub dangerously_allow_non_loopback_admin: Option<bool>,
pub allowed_domains: Option<Vec<String>>,
pub denied_domains: Option<Vec<String>>,
pub allow_unix_sockets: Option<Vec<String>>,
pub allow_local_binding: Option<bool>,
}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum WebSearchModeRequirement {
@@ -170,6 +186,7 @@ pub struct ConfigRequirementsToml {
pub mcp_servers: Option<BTreeMap<String, McpServerRequirement>>,
pub rules: Option<RequirementsExecPolicyToml>,
pub enforce_residency: Option<ResidencyRequirement>,
pub network: Option<NetworkRequirementsToml>,
}
/// Value paired with the requirement source it came from, for better error
@@ -202,6 +219,7 @@ pub struct ConfigRequirementsWithSources {
pub mcp_servers: Option<Sourced<BTreeMap<String, McpServerRequirement>>>,
pub rules: Option<Sourced<RequirementsExecPolicyToml>>,
pub enforce_residency: Option<Sourced<ResidencyRequirement>>,
pub network: Option<Sourced<NetworkRequirementsToml>>,
}
impl ConfigRequirementsWithSources {
@@ -236,6 +254,7 @@ impl ConfigRequirementsWithSources {
mcp_servers,
rules,
enforce_residency,
network,
}
);
}
@@ -248,6 +267,7 @@ impl ConfigRequirementsWithSources {
mcp_servers,
rules,
enforce_residency,
network,
} = self;
ConfigRequirementsToml {
allowed_approval_policies: allowed_approval_policies.map(|sourced| sourced.value),
@@ -256,6 +276,7 @@ impl ConfigRequirementsWithSources {
mcp_servers: mcp_servers.map(|sourced| sourced.value),
rules: rules.map(|sourced| sourced.value),
enforce_residency: enforce_residency.map(|sourced| sourced.value),
network: network.map(|sourced| sourced.value),
}
}
}
@@ -301,6 +322,7 @@ impl ConfigRequirementsToml {
&& self.mcp_servers.is_none()
&& self.rules.is_none()
&& self.enforce_residency.is_none()
&& self.network.is_none()
}
}
@@ -315,6 +337,7 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
mcp_servers,
rules,
enforce_residency,
network,
} = toml;
let approval_policy = match allowed_approval_policies {
@@ -478,6 +501,7 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
mcp_servers,
exec_policy,
enforce_residency,
network,
})
}
}
@@ -506,6 +530,7 @@ mod tests {
mcp_servers,
rules,
enforce_residency,
network,
} = toml;
ConfigRequirementsWithSources {
allowed_approval_policies: allowed_approval_policies
@@ -518,6 +543,7 @@ mod tests {
rules: rules.map(|value| Sourced::new(value, RequirementSource::Unknown)),
enforce_residency: enforce_residency
.map(|value| Sourced::new(value, RequirementSource::Unknown)),
network: network.map(|value| Sourced::new(value, RequirementSource::Unknown)),
}
}
@@ -547,6 +573,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: Some(enforce_residency),
network: None,
};
target.merge_unset_fields(source.clone(), other);
@@ -566,6 +593,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: Some(Sourced::new(enforce_residency, enforce_source)),
network: None,
}
);
}
@@ -597,6 +625,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
}
);
Ok(())
@@ -636,6 +665,7 @@ mod tests {
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
}
);
Ok(())
@@ -951,6 +981,50 @@ mod tests {
Ok(())
}
#[test]
fn network_requirements_are_preserved_as_constraints_with_source() -> Result<()> {
let toml_str = r#"
[network]
enabled = true
allow_upstream_proxy = false
allowed_domains = ["api.example.com", "*.openai.com"]
denied_domains = ["blocked.example.com"]
allow_unix_sockets = ["/tmp/example.sock"]
allow_local_binding = false
"#;
let source = RequirementSource::CloudRequirements;
let mut requirements_with_sources = ConfigRequirementsWithSources::default();
requirements_with_sources.merge_unset_fields(source.clone(), from_str(toml_str)?);
let requirements = ConfigRequirements::try_from(requirements_with_sources)?;
let sourced_network = requirements
.network
.expect("network requirements should be preserved as constraints");
assert_eq!(sourced_network.source, source);
assert_eq!(sourced_network.value.enabled, Some(true));
assert_eq!(sourced_network.value.allow_upstream_proxy, Some(false));
assert_eq!(
sourced_network.value.allowed_domains.as_ref(),
Some(&vec![
"api.example.com".to_string(),
"*.openai.com".to_string()
])
);
assert_eq!(
sourced_network.value.denied_domains.as_ref(),
Some(&vec!["blocked.example.com".to_string()])
);
assert_eq!(
sourced_network.value.allow_unix_sockets.as_ref(),
Some(&vec!["/tmp/example.sock".to_string()])
);
assert_eq!(sourced_network.value.allow_local_binding, Some(false));
Ok(())
}
#[test]
fn deserialize_mcp_server_requirements() -> Result<()> {
let toml_str = r#"

View File

@@ -37,6 +37,7 @@ pub use config_requirements::ConfigRequirementsToml;
pub use config_requirements::ConstrainedWithSource;
pub use config_requirements::McpServerIdentity;
pub use config_requirements::McpServerRequirement;
pub use config_requirements::NetworkRequirementsToml;
pub use config_requirements::RequirementSource;
pub use config_requirements::ResidencyRequirement;
pub use config_requirements::SandboxModeRequirement;

View File

@@ -568,6 +568,7 @@ allowed_approval_policies = ["on-request"]
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
})
}),
)
@@ -615,6 +616,7 @@ allowed_approval_policies = ["on-request"]
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
},
);
load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?;
@@ -651,6 +653,7 @@ async fn load_config_layers_includes_cloud_requirements() -> anyhow::Result<()>
mcp_servers: None,
rules: None,
enforce_residency: None,
network: None,
};
let expected = requirements.clone();
let cloud_requirements = CloudRequirementsLoader::new(async move { Some(requirements) });

View File

@@ -49,6 +49,7 @@ mod mcp_tool_call;
mod mentions;
mod message_history;
mod model_provider_info;
mod network_proxy;
pub mod parse_command;
pub mod path_utils;
pub mod personality_migration;

View File

@@ -0,0 +1,72 @@
use anyhow::Context;
use anyhow::Result;
use std::path::Path;
use std::process::Stdio;
use tokio::process::Child;
use tokio::process::Command;
use tokio::time::Duration;
use tokio::time::timeout;
pub(crate) struct ManagedNetworkProxy {
child: Child,
}
impl ManagedNetworkProxy {
pub(crate) async fn maybe_start(codex_home: &Path, enabled: bool) -> Result<Option<Self>> {
if !enabled {
return Ok(None);
}
let mut child = Command::new(network_proxy_binary())
.env("CODEX_HOME", codex_home)
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.spawn()
.context("failed to spawn codex-network-proxy process")?;
// If the proxy process exits immediately, treat startup as failed so callers can
// log once and proceed without assuming policy enforcement is active.
if let Ok(status_result) = timeout(Duration::from_millis(250), child.wait()).await {
let status = status_result.context("failed to wait for codex-network-proxy")?;
anyhow::bail!("codex-network-proxy exited early with status {status}");
}
Ok(Some(Self { child }))
}
pub(crate) async fn shutdown(&mut self) -> Result<()> {
if self
.child
.try_wait()
.context("failed to check codex-network-proxy state")?
.is_some()
{
return Ok(());
}
self.child
.start_kill()
.context("failed to signal codex-network-proxy for shutdown")?;
let _ = self.child.wait().await;
Ok(())
}
}
fn network_proxy_binary() -> String {
std::env::var("CODEX_NETWORK_PROXY_BIN").unwrap_or_else(|_| "codex-network-proxy".to_string())
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn maybe_start_returns_none_when_disabled() {
let codex_home = tempfile::tempdir().expect("create codex home");
let proxy = ManagedNetworkProxy::maybe_start(codex_home.path(), false)
.await
.expect("startup should succeed");
assert!(proxy.is_none());
}
}

View File

@@ -14,6 +14,7 @@ use crate::error::Result as CodexResult;
use crate::file_watcher::FileWatcher;
use crate::file_watcher::FileWatcherEvent;
use crate::models_manager::manager::ModelsManager;
use crate::network_proxy::ManagedNetworkProxy;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::SessionConfiguredEvent;
@@ -36,12 +37,62 @@ use tempfile::TempDir;
use tokio::runtime::Handle;
#[cfg(any(test, feature = "test-support"))]
use tokio::runtime::RuntimeFlavor;
use tokio::sync::Mutex;
use tokio::sync::RwLock;
use tokio::sync::broadcast;
use tracing::warn;
const THREAD_CREATED_CHANNEL_CAPACITY: usize = 1024;
/// Process-wide proxy runtime owned by a single ThreadManagerState.
///
/// This keeps proxy lifecycle in one place:
/// - one startup gate (prevents duplicate binds),
/// - one proxy handle.
struct NetworkProxyRuntime {
codex_home: PathBuf,
proxy: Mutex<Option<ManagedNetworkProxy>>,
start_gate: Mutex<()>,
}
impl NetworkProxyRuntime {
fn new(codex_home: PathBuf) -> Self {
Self {
codex_home,
proxy: Mutex::new(None),
start_gate: Mutex::new(()),
}
}
async fn ensure_started(&self, enabled: bool) -> CodexResult<()> {
if !enabled {
return Ok(());
}
// Serialize startup attempts so concurrent thread starts cannot race and bind twice.
let _gate = self.start_gate.lock().await;
if self.proxy.lock().await.is_some() {
return Ok(());
}
let proxy = ManagedNetworkProxy::maybe_start(self.codex_home.as_path(), enabled)
.await
.map_err(|err| CodexErr::Fatal(format!("failed to start network proxy: {err:#}")))?;
*self.proxy.lock().await = proxy;
Ok(())
}
async fn shutdown(&self) -> CodexResult<()> {
if let Some(mut proxy) = self.proxy.lock().await.take() {
proxy
.shutdown()
.await
.map_err(|err| CodexErr::Fatal(format!("failed to stop network proxy: {err:#}")))?;
}
Ok(())
}
}
fn build_file_watcher(codex_home: PathBuf, skills_manager: Arc<SkillsManager>) -> Arc<FileWatcher> {
#[cfg(any(test, feature = "test-support"))]
if let Ok(handle) = Handle::try_current()
@@ -109,6 +160,7 @@ pub(crate) struct ThreadManagerState {
models_manager: Arc<ModelsManager>,
skills_manager: Arc<SkillsManager>,
file_watcher: Arc<FileWatcher>,
network_proxy_runtime: NetworkProxyRuntime,
session_source: SessionSource,
#[cfg(any(test, feature = "test-support"))]
#[allow(dead_code)]
@@ -125,6 +177,7 @@ impl ThreadManager {
let (thread_created_tx, _) = broadcast::channel(THREAD_CREATED_CHANNEL_CAPACITY);
let skills_manager = Arc::new(SkillsManager::new(codex_home.clone()));
let file_watcher = build_file_watcher(codex_home.clone(), Arc::clone(&skills_manager));
let network_proxy_runtime = NetworkProxyRuntime::new(codex_home.clone());
Self {
state: Arc::new(ThreadManagerState {
threads: Arc::new(RwLock::new(HashMap::new())),
@@ -132,6 +185,7 @@ impl ThreadManager {
models_manager: Arc::new(ModelsManager::new(codex_home, auth_manager.clone())),
skills_manager,
file_watcher,
network_proxy_runtime,
auth_manager,
session_source,
#[cfg(any(test, feature = "test-support"))]
@@ -165,6 +219,7 @@ impl ThreadManager {
let (thread_created_tx, _) = broadcast::channel(THREAD_CREATED_CHANNEL_CAPACITY);
let skills_manager = Arc::new(SkillsManager::new(codex_home.clone()));
let file_watcher = build_file_watcher(codex_home.clone(), Arc::clone(&skills_manager));
let network_proxy_runtime = NetworkProxyRuntime::new(codex_home.clone());
Self {
state: Arc::new(ThreadManagerState {
threads: Arc::new(RwLock::new(HashMap::new())),
@@ -176,6 +231,7 @@ impl ThreadManager {
)),
skills_manager,
file_watcher,
network_proxy_runtime,
auth_manager,
session_source: SessionSource::Exec,
#[cfg(any(test, feature = "test-support"))]
@@ -301,16 +357,12 @@ impl ThreadManager {
/// as `Arc<CodexThread>`, it is possible that other references to it exist elsewhere.
/// Returns the thread if the thread was found and removed.
pub async fn remove_thread(&self, thread_id: &ThreadId) -> Option<Arc<CodexThread>> {
self.state.threads.write().await.remove(thread_id)
self.state.remove_thread(thread_id).await
}
/// Closes all threads open in this ThreadManager
pub async fn remove_and_close_all_threads(&self) -> CodexResult<()> {
for thread in self.state.threads.read().await.values() {
thread.submit(Op::Shutdown).await?;
}
self.state.threads.write().await.clear();
Ok(())
self.state.remove_and_close_all_threads().await
}
/// Fork an existing thread by taking messages up to the given position (not including
@@ -434,6 +486,11 @@ impl ThreadManagerState {
session_source: SessionSource,
dynamic_tools: Vec<codex_protocol::dynamic_tools::DynamicToolSpec>,
) -> CodexResult<NewThread> {
// Requirements currently control whether proxy wiring is active for this process.
let proxy_enabled = proxy_enabled_from_requirements(&config);
if let Err(err) = self.ensure_network_proxy_started(proxy_enabled).await {
warn!("failed to start network proxy singleton: {err:#}");
}
self.file_watcher.register_config(&config);
let CodexSpawnOk {
codex, thread_id, ..
@@ -485,6 +542,46 @@ impl ThreadManagerState {
pub(crate) fn notify_thread_created(&self, thread_id: ThreadId) {
let _ = self.thread_created_tx.send(thread_id);
}
pub(crate) async fn ensure_network_proxy_started(&self, enabled: bool) -> CodexResult<()> {
self.network_proxy_runtime.ensure_started(enabled).await
}
pub(crate) async fn shutdown_network_proxy(&self) -> CodexResult<()> {
self.network_proxy_runtime.shutdown().await
}
pub(crate) async fn remove_and_close_all_threads(&self) -> CodexResult<()> {
// Clone thread handles first so we don't hold the map lock while awaiting shutdown.
let threads = self
.threads
.read()
.await
.values()
.cloned()
.collect::<Vec<_>>();
for thread in threads {
thread.submit(Op::Shutdown).await?;
}
self.threads.write().await.clear();
if let Err(err) = self.shutdown_network_proxy().await {
warn!("failed to stop network proxy singleton: {err:#}");
}
Ok(())
}
}
fn proxy_enabled_from_requirements(config: &Config) -> bool {
// Presence of any requirements network block enables proxy wiring; explicit enabled=false
// is treated as an opt-out to keep control with requirements authors.
config
.config_layer_stack
.requirements_toml()
.network
.as_ref()
.is_some_and(|network| network.enabled.unwrap_or(true))
}
/// Return a prefix of `items` obtained by cutting strictly before the nth user message
@@ -614,4 +711,26 @@ mod tests {
serde_json::to_value(&expected).unwrap()
);
}
#[tokio::test]
async fn network_proxy_runtime_noops_when_network_disabled() {
let codex_home = tempfile::tempdir().expect("create codex_home tempdir");
let runtime = NetworkProxyRuntime::new(codex_home.path().to_path_buf());
runtime
.ensure_started(false)
.await
.expect("disabled network should skip startup");
assert!(runtime.proxy.lock().await.is_none());
}
#[tokio::test]
async fn network_proxy_runtime_shutdown_noops_without_proxy() {
let codex_home = tempfile::tempdir().expect("create codex_home tempdir");
let runtime = NetworkProxyRuntime::new(codex_home.path().to_path_buf());
runtime
.shutdown()
.await
.expect("shutdown without active proxy should succeed");
assert!(runtime.proxy.lock().await.is_none());
}
}