Compare commits

...

8 Commits

Author SHA1 Message Date
gt-oai
761a37d5c1 blocking debug 2026-01-31 01:42:50 +00:00
gt-oai
e04cfc3601 Make cloud requirements load fail-closed 2026-01-31 00:28:03 +00:00
gt-oai
149f3aa27a Add enforce_residency to requirements (#10263)
Add `enforce_residency` to requirements.toml and thread it through to a
header on `default_client`.
2026-01-31 00:26:25 +00:00
gt-oai
a046481ad9 Wire up cloud reqs in exec, app-server (#10241)
We're fetching cloud requirements in TUI in
https://github.com/openai/codex/pull/10167.

This adds the same fetching in exec and app-server binaries also.
2026-01-30 23:53:41 +00:00
Michael Bolin
10ea117ee1 chore: implement Mul for TruncationPolicy (#10272)
Codex thought this was a good idea while working on
https://github.com/openai/codex/pull/10192.
2026-01-30 15:50:20 -08:00
Eric Traut
8d142fd63d Validate CODEX_HOME before resolving (#10249)
Summary
- require `CODEX_HOME` to point to an existing directory before
canonicalizing and surface clear errors otherwise
- share the same helper logic in both `core` and `rmcp-client` and add
unit tests that cover missing, non-directory, valid, and default paths

This addresses #9222
2026-01-30 15:46:33 -08:00
Yuvraj Angad Singh
13e85b1549 fix: update file search directory when session CWD changes (#9279)
## Summary

Fixes #9041

- Adds update_search_dir() method to FileSearchManager to allow updating
the search directory after initialization
- Calls this method when the session CWD changes: new session, resume,
or fork

## Problem

The FileSearchManager was created once with the initial search_dir and
never updated. When a user:

1. Starts Codex in a non-git directory (e.g., /tmp/random)
2. Resumes or forks a session from a different workspace
3. The @filename lookup still searched the original directory

This caused no matches to be returned even when files existed in the
current workspace.

## Solution

Update FileSearchManager.search_dir whenever the session working
directory changes:
- AppEvent::NewSession: Use current config CWD
- SessionSelection::Resume: Use resumed session CWD
- SessionSelection::Fork: Use forked session CWD

## Test plan

- [ ] Start Codex in /tmp/test-dir (non-git)
- [ ] Resume a session from a project with actual files
- [ ] Verify @filename returns matches from the resumed session
directory

---------

Co-authored-by: Eric Traut <etraut@openai.com>
2026-01-30 14:59:20 -08:00
sayan-oai
31d1e49340 fix: dont auto-enable web_search for azure (#10266)
seeing issues with azure after default-enabling web search: #10071,
#10257.

need to work with azure to fix api-side, for now turning off
default-enable of web_search for azure.

diff is big because i moved logic to reuse
2026-01-30 22:52:37 +00:00
42 changed files with 950 additions and 387 deletions

17
codex-rs/Cargo.lock generated
View File

@@ -1088,6 +1088,7 @@ dependencies = [
"codex-arg0",
"codex-backend-client",
"codex-chatgpt",
"codex-cloud-requirements",
"codex-common",
"codex-core",
"codex-execpolicy",
@@ -1296,6 +1297,7 @@ dependencies = [
name = "codex-cloud-requirements"
version = "0.0.0"
dependencies = [
"anyhow",
"async-trait",
"base64",
"codex-backend-client",
@@ -1305,6 +1307,7 @@ dependencies = [
"pretty_assertions",
"serde_json",
"tempfile",
"thiserror 2.0.17",
"tokio",
"toml 0.9.5",
"tracing",
@@ -1400,6 +1403,7 @@ dependencies = [
"codex-state",
"codex-utils-absolute-path",
"codex-utils-cargo-bin",
"codex-utils-home-dir",
"codex-utils-pty",
"codex-utils-readiness",
"codex-utils-string",
@@ -1407,7 +1411,6 @@ dependencies = [
"core-foundation 0.9.4",
"core_test_support",
"ctor 0.6.3",
"dirs",
"dunce",
"encoding_rs",
"env-flags",
@@ -1487,6 +1490,7 @@ dependencies = [
"assert_cmd",
"clap",
"codex-arg0",
"codex-cloud-requirements",
"codex-common",
"codex-core",
"codex-protocol",
@@ -1842,7 +1846,7 @@ dependencies = [
"codex-keyring-store",
"codex-protocol",
"codex-utils-cargo-bin",
"dirs",
"codex-utils-home-dir",
"futures",
"keyring",
"mcp-types",
@@ -2007,6 +2011,15 @@ dependencies = [
"thiserror 2.0.17",
]
[[package]]
name = "codex-utils-home-dir"
version = "0.0.0"
dependencies = [
"dirs",
"pretty_assertions",
"tempfile",
]
[[package]]
name = "codex-utils-image"
version = "0.0.0"

View File

@@ -43,6 +43,7 @@ members = [
"utils/cache",
"utils/image",
"utils/json-to-toml",
"utils/home-dir",
"utils/pty",
"utils/readiness",
"utils/string",
@@ -102,6 +103,7 @@ codex-utils-cache = { path = "utils/cache" }
codex-utils-cargo-bin = { path = "utils/cargo-bin" }
codex-utils-image = { path = "utils/image" }
codex-utils-json-to-toml = { path = "utils/json-to-toml" }
codex-utils-home-dir = { path = "utils/home-dir" }
codex-utils-pty = { path = "utils/pty" }
codex-utils-readiness = { path = "utils/readiness" }
codex-utils-string = { path = "utils/string" }

View File

@@ -497,6 +497,14 @@ pub struct ConfigReadResponse {
pub struct ConfigRequirements {
pub allowed_approval_policies: Option<Vec<AskForApproval>>,
pub allowed_sandbox_modes: Option<Vec<SandboxMode>>,
pub enforce_residency: Option<ResidencyRequirement>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)]
#[serde(rename_all = "lowercase")]
#[ts(export_to = "v2/")]
pub enum ResidencyRequirement {
Us,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]

View File

@@ -19,6 +19,7 @@ workspace = true
anyhow = { workspace = true }
async-trait = { workspace = true }
codex-arg0 = { workspace = true }
codex-cloud-requirements = { workspace = true }
codex-common = { workspace = true, features = ["cli"] }
codex-core = { workspace = true }
codex-backend-client = { workspace = true }

View File

@@ -103,7 +103,7 @@ Example (from OpenAI's official VSCode extension):
- `config/read` — fetch the effective config on disk after resolving config layering.
- `config/value/write` — write a single config key/value to the user's config.toml on disk.
- `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk.
- `configRequirements/read` — fetch the loaded requirements allow-lists from `requirements.toml` and/or MDM (or `null` if none are configured).
- `configRequirements/read` — fetch the loaded requirements allow-lists and `enforceResidency` from `requirements.toml` and/or MDM (or `null` if none are configured).
### Example: Start or resume a thread

View File

@@ -152,6 +152,7 @@ use codex_core::config::ConfigService;
use codex_core::config::edit::ConfigEdit;
use codex_core::config::edit::ConfigEditsBuilder;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::default_client::get_codex_user_agent;
use codex_core::error::CodexErr;
use codex_core::exec::ExecParams;
@@ -263,6 +264,7 @@ pub(crate) struct CodexMessageProcessor {
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
cli_overrides: Vec<(String, TomlValue)>,
cloud_requirements: CloudRequirementsLoader,
conversation_listeners: HashMap<Uuid, oneshot::Sender<()>>,
listener_thread_ids_by_subscription: HashMap<Uuid, ThreadId>,
active_login: Arc<Mutex<Option<ActiveLogin>>>,
@@ -281,6 +283,17 @@ pub(crate) enum ApiVersion {
V2,
}
pub(crate) struct CodexMessageProcessorArgs {
pub(crate) auth_manager: Arc<AuthManager>,
pub(crate) thread_manager: Arc<ThreadManager>,
pub(crate) outgoing: Arc<OutgoingMessageSender>,
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
pub(crate) config: Arc<Config>,
pub(crate) cli_overrides: Vec<(String, TomlValue)>,
pub(crate) cloud_requirements: CloudRequirementsLoader,
pub(crate) feedback: CodexFeedback,
}
impl CodexMessageProcessor {
async fn load_thread(
&self,
@@ -305,15 +318,17 @@ impl CodexMessageProcessor {
Ok((thread_id, thread))
}
pub fn new(
auth_manager: Arc<AuthManager>,
thread_manager: Arc<ThreadManager>,
outgoing: Arc<OutgoingMessageSender>,
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
cli_overrides: Vec<(String, TomlValue)>,
feedback: CodexFeedback,
) -> Self {
pub fn new(args: CodexMessageProcessorArgs) -> Self {
let CodexMessageProcessorArgs {
auth_manager,
thread_manager,
outgoing,
codex_linux_sandbox_exe,
config,
cli_overrides,
cloud_requirements,
feedback,
} = args;
Self {
auth_manager,
thread_manager,
@@ -321,6 +336,7 @@ impl CodexMessageProcessor {
codex_linux_sandbox_exe,
config,
cli_overrides,
cloud_requirements,
conversation_listeners: HashMap::new(),
listener_thread_ids_by_subscription: HashMap::new(),
active_login: Arc::new(Mutex::new(None)),
@@ -333,7 +349,10 @@ impl CodexMessageProcessor {
}
async fn load_latest_config(&self) -> Result<Config, JSONRPCErrorError> {
Config::load_with_cli_overrides(self.cli_overrides.clone())
codex_core::config::ConfigBuilder::default()
.cli_overrides(self.cli_overrides.clone())
.cloud_requirements(self.cloud_requirements.clone())
.build()
.await
.map_err(|err| JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
@@ -1519,6 +1538,7 @@ impl CodexMessageProcessor {
&self.cli_overrides,
Some(request_overrides),
typesafe_overrides,
&self.cloud_requirements,
)
.await
{
@@ -1603,6 +1623,7 @@ impl CodexMessageProcessor {
&self.cli_overrides,
config,
typesafe_overrides,
&self.cloud_requirements,
)
.await
{
@@ -2350,6 +2371,7 @@ impl CodexMessageProcessor {
request_overrides,
typesafe_overrides,
history_cwd,
&self.cloud_requirements,
)
.await
{
@@ -2542,6 +2564,7 @@ impl CodexMessageProcessor {
request_overrides,
typesafe_overrides,
history_cwd,
&self.cloud_requirements,
)
.await
{
@@ -3336,6 +3359,7 @@ impl CodexMessageProcessor {
request_overrides,
typesafe_overrides,
history_cwd,
&self.cloud_requirements,
)
.await
{
@@ -3524,6 +3548,7 @@ impl CodexMessageProcessor {
request_overrides,
typesafe_overrides,
history_cwd,
&self.cloud_requirements,
)
.await
{
@@ -4794,6 +4819,7 @@ async fn derive_config_from_params(
cli_overrides: &[(String, TomlValue)],
request_overrides: Option<HashMap<String, serde_json::Value>>,
typesafe_overrides: ConfigOverrides,
cloud_requirements: &CloudRequirementsLoader,
) -> std::io::Result<Config> {
let merged_cli_overrides = cli_overrides
.iter()
@@ -4806,7 +4832,11 @@ async fn derive_config_from_params(
)
.collect::<Vec<_>>();
Config::load_with_cli_overrides_and_harness_overrides(merged_cli_overrides, typesafe_overrides)
codex_core::config::ConfigBuilder::default()
.cli_overrides(merged_cli_overrides)
.harness_overrides(typesafe_overrides)
.cloud_requirements(cloud_requirements.clone())
.build()
.await
}
@@ -4815,6 +4845,7 @@ async fn derive_config_for_cwd(
request_overrides: Option<HashMap<String, serde_json::Value>>,
typesafe_overrides: ConfigOverrides,
cwd: Option<PathBuf>,
cloud_requirements: &CloudRequirementsLoader,
) -> std::io::Result<Config> {
let merged_cli_overrides = cli_overrides
.iter()
@@ -4831,6 +4862,7 @@ async fn derive_config_for_cwd(
.cli_overrides(merged_cli_overrides)
.harness_overrides(typesafe_overrides)
.fallback_cwd(cwd)
.cloud_requirements(cloud_requirements.clone())
.build()
.await
}

View File

@@ -12,8 +12,10 @@ use codex_app_server_protocol::JSONRPCErrorError;
use codex_app_server_protocol::SandboxMode;
use codex_core::config::ConfigService;
use codex_core::config::ConfigServiceError;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_core::config_loader::LoaderOverrides;
use codex_core::config_loader::ResidencyRequirement as CoreResidencyRequirement;
use codex_core::config_loader::SandboxModeRequirement as CoreSandboxModeRequirement;
use serde_json::json;
use std::path::PathBuf;
@@ -29,9 +31,15 @@ impl ConfigApi {
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
cloud_requirements: CloudRequirementsLoader,
) -> Self {
Self {
service: ConfigService::new(codex_home, cli_overrides, loader_overrides),
service: ConfigService::new(
codex_home,
cli_overrides,
loader_overrides,
cloud_requirements,
),
}
}
@@ -84,6 +92,9 @@ fn map_requirements_toml_to_api(requirements: ConfigRequirementsToml) -> ConfigR
.filter_map(map_sandbox_mode_requirement_to_api)
.collect()
}),
enforce_residency: requirements
.enforce_residency
.map(map_residency_requirement_to_api),
}
}
@@ -96,6 +107,14 @@ fn map_sandbox_mode_requirement_to_api(mode: CoreSandboxModeRequirement) -> Opti
}
}
fn map_residency_requirement_to_api(
residency: CoreResidencyRequirement,
) -> codex_app_server_protocol::ResidencyRequirement {
match residency {
CoreResidencyRequirement::Us => codex_app_server_protocol::ResidencyRequirement::Us,
}
}
fn map_error(err: ConfigServiceError) -> JSONRPCErrorError {
if let Some(code) = err.write_error_code() {
return config_write_error(code, err.to_string());
@@ -137,6 +156,7 @@ mod tests {
]),
mcp_servers: None,
rules: None,
enforce_residency: Some(CoreResidencyRequirement::Us),
};
let mapped = map_requirements_toml_to_api(requirements);
@@ -152,5 +172,9 @@ mod tests {
mapped.allowed_sandbox_modes,
Some(vec![SandboxMode::ReadOnly]),
);
assert_eq!(
mapped.enforce_residency,
Some(codex_app_server_protocol::ResidencyRequirement::Us),
);
}
}

View File

@@ -1,8 +1,11 @@
#![deny(clippy::print_stdout, clippy::print_stderr)]
use codex_cloud_requirements::cloud_requirements_loader;
use codex_common::CliConfigOverrides;
use codex_core::AuthManager;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::config_loader::LoaderOverrides;
use std::io::ErrorKind;
@@ -10,6 +13,7 @@ use std::io::Result as IoResult;
use std::path::PathBuf;
use crate::message_processor::MessageProcessor;
use crate::message_processor::MessageProcessorArgs;
use crate::outgoing_message::OutgoingMessage;
use crate::outgoing_message::OutgoingMessageSender;
use codex_app_server_protocol::ConfigLayerSource;
@@ -204,11 +208,32 @@ pub async fn run_main(
format!("error parsing -c overrides: {e}"),
)
})?;
let cloud_requirements = match ConfigBuilder::default()
.cli_overrides(cli_kv_overrides.clone())
.loader_overrides(loader_overrides.clone())
.build()
.await
{
Ok(config) => {
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
false,
config.cli_auth_credentials_store_mode,
);
cloud_requirements_loader(auth_manager, config.chatgpt_base_url)
}
Err(err) => {
warn!(error = %err, "Failed to preload config for cloud requirements");
// TODO(gt): Make cloud requirements preload failures blocking once we can fail-closed.
CloudRequirementsLoader::default()
}
};
let loader_overrides_for_config_api = loader_overrides.clone();
let mut config_warnings = Vec::new();
let config = match ConfigBuilder::default()
.cli_overrides(cli_kv_overrides.clone())
.loader_overrides(loader_overrides)
.cloud_requirements(cloud_requirements.clone())
.build()
.await
{
@@ -290,15 +315,16 @@ pub async fn run_main(
let outgoing_message_sender = OutgoingMessageSender::new(outgoing_tx);
let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone();
let loader_overrides = loader_overrides_for_config_api;
let mut processor = MessageProcessor::new(
outgoing_message_sender,
let mut processor = MessageProcessor::new(MessageProcessorArgs {
outgoing: outgoing_message_sender,
codex_linux_sandbox_exe,
std::sync::Arc::new(config),
config: std::sync::Arc::new(config),
cli_overrides,
loader_overrides,
feedback.clone(),
cloud_requirements: cloud_requirements.clone(),
feedback: feedback.clone(),
config_warnings,
);
});
let mut thread_created_rx = processor.thread_created_receiver();
async move {
let mut listen_for_threads = true;

View File

@@ -2,6 +2,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use crate::codex_message_processor::CodexMessageProcessor;
use crate::codex_message_processor::CodexMessageProcessorArgs;
use crate::config_api::ConfigApi;
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
use crate::outgoing_message::OutgoingMessageSender;
@@ -31,10 +32,12 @@ use codex_core::auth::ExternalAuthRefreshReason;
use codex_core::auth::ExternalAuthRefresher;
use codex_core::auth::ExternalAuthTokens;
use codex_core::config::Config;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::LoaderOverrides;
use codex_core::default_client::SetOriginatorError;
use codex_core::default_client::USER_AGENT_SUFFIX;
use codex_core::default_client::get_codex_user_agent;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::default_client::set_default_originator;
use codex_feedback::CodexFeedback;
use codex_protocol::ThreadId;
@@ -102,22 +105,36 @@ pub(crate) struct MessageProcessor {
outgoing: Arc<OutgoingMessageSender>,
codex_message_processor: CodexMessageProcessor,
config_api: ConfigApi,
config: Arc<Config>,
initialized: bool,
config_warnings: Vec<ConfigWarningNotification>,
}
pub(crate) struct MessageProcessorArgs {
pub(crate) outgoing: OutgoingMessageSender,
pub(crate) codex_linux_sandbox_exe: Option<PathBuf>,
pub(crate) config: Arc<Config>,
pub(crate) cli_overrides: Vec<(String, TomlValue)>,
pub(crate) loader_overrides: LoaderOverrides,
pub(crate) cloud_requirements: CloudRequirementsLoader,
pub(crate) feedback: CodexFeedback,
pub(crate) config_warnings: Vec<ConfigWarningNotification>,
}
impl MessageProcessor {
/// Create a new `MessageProcessor`, retaining a handle to the outgoing
/// `Sender` so handlers can enqueue messages to be written to stdout.
pub(crate) fn new(
outgoing: OutgoingMessageSender,
codex_linux_sandbox_exe: Option<PathBuf>,
config: Arc<Config>,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
feedback: CodexFeedback,
config_warnings: Vec<ConfigWarningNotification>,
) -> Self {
pub(crate) fn new(args: MessageProcessorArgs) -> Self {
let MessageProcessorArgs {
outgoing,
codex_linux_sandbox_exe,
config,
cli_overrides,
loader_overrides,
cloud_requirements,
feedback,
config_warnings,
} = args;
let outgoing = Arc::new(outgoing);
let auth_manager = AuthManager::shared(
config.codex_home.clone(),
@@ -133,21 +150,28 @@ impl MessageProcessor {
auth_manager.clone(),
SessionSource::VSCode,
));
let codex_message_processor = CodexMessageProcessor::new(
let codex_message_processor = CodexMessageProcessor::new(CodexMessageProcessorArgs {
auth_manager,
thread_manager,
outgoing.clone(),
outgoing: outgoing.clone(),
codex_linux_sandbox_exe,
Arc::clone(&config),
cli_overrides.clone(),
config: Arc::clone(&config),
cli_overrides: cli_overrides.clone(),
cloud_requirements: cloud_requirements.clone(),
feedback,
});
let config_api = ConfigApi::new(
config.codex_home.clone(),
cli_overrides,
loader_overrides,
cloud_requirements,
);
let config_api = ConfigApi::new(config.codex_home.clone(), cli_overrides, loader_overrides);
Self {
outgoing,
codex_message_processor,
config_api,
config,
initialized: false,
config_warnings,
}
@@ -220,6 +244,7 @@ impl MessageProcessor {
}
}
}
set_default_client_residency_requirement(self.config.enforce_residency.value());
let user_agent_suffix = format!("{name}; {version}");
if let Ok(mut suffix) = USER_AGENT_SUFFIX.lock() {
*suffix = Some(user_agent_suffix);

View File

@@ -14,10 +14,12 @@ codex-core = { workspace = true }
codex-otel = { workspace = true }
codex-protocol = { workspace = true }
tokio = { workspace = true, features = ["sync", "time"] }
thiserror = { workspace = true }
toml = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
anyhow = { workspace = true }
base64 = { workspace = true }
pretty_assertions = { workspace = true }
serde_json = { workspace = true }

View File

@@ -3,9 +3,7 @@
//! This crate fetches `requirements.toml` data from the backend as an alternative to loading it
//! from the local filesystem. It only applies to Enterprise ChatGPT customers.
//!
//! Today, fetching is best-effort: on error or timeout, Codex continues without cloud requirements.
//! We expect to tighten this so that Enterprise ChatGPT customers must successfully fetch these
//! requirements before Codex will run.
//! Enterprise ChatGPT customers must successfully fetch these requirements before Codex will run.
use async_trait::async_trait;
use codex_backend_client::Client as BackendClient;
@@ -14,21 +12,73 @@ use codex_core::auth::CodexAuth;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigRequirementsToml;
use codex_protocol::account::PlanType;
use std::io;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use thiserror::Error;
use tokio::time::timeout;
/// This blocks codecs startup, so must be short.
const CLOUD_REQUIREMENTS_TIMEOUT: Duration = Duration::from_secs(5);
#[derive(Debug, Error, Clone, PartialEq, Eq)]
enum CloudRequirementsError {
#[error("cloud requirements user error: {0}")]
User(CloudRequirementsUserError),
#[error("cloud requirements network error: {0}")]
Network(CloudRequirementsNetworkError),
}
impl From<CloudRequirementsUserError> for CloudRequirementsError {
fn from(err: CloudRequirementsUserError) -> Self {
CloudRequirementsError::User(err)
}
}
impl From<CloudRequirementsNetworkError> for CloudRequirementsError {
fn from(err: CloudRequirementsNetworkError) -> Self {
CloudRequirementsError::Network(err)
}
}
impl From<CloudRequirementsError> for io::Error {
fn from(err: CloudRequirementsError) -> Self {
let kind = match &err {
CloudRequirementsError::User(_) => io::ErrorKind::InvalidData,
CloudRequirementsError::Network(CloudRequirementsNetworkError::Timeout { .. }) => {
io::ErrorKind::TimedOut
}
CloudRequirementsError::Network(_) => io::ErrorKind::Other,
};
io::Error::new(kind, err)
}
}
#[derive(Debug, Error, Clone, PartialEq, Eq)]
enum CloudRequirementsUserError {
#[error("failed to parse requirements TOML: {message}")]
InvalidToml { message: String },
}
#[derive(Debug, Error, Clone, PartialEq, Eq)]
enum CloudRequirementsNetworkError {
#[error("backend client initialization failed: {message}")]
BackendClient { message: String },
#[error("request failed: {message}")]
Request { message: String },
#[error("cloud requirements response missing contents")]
MissingContents,
#[error("timed out after {timeout_ms}ms")]
Timeout { timeout_ms: u64 },
#[error("cloud requirements task failed: {message}")]
Task { message: String },
}
#[async_trait]
trait RequirementsFetcher: Send + Sync {
/// Returns requirements as a TOML string.
///
/// TODO(gt): For now, returns an Option. But when we want to make this fail-closed, return a
/// Result.
async fn fetch_requirements(&self, auth: &CodexAuth) -> Option<String>;
async fn fetch_requirements(&self, auth: &CodexAuth) -> Result<String, CloudRequirementsError>;
}
struct BackendRequirementsFetcher {
@@ -43,7 +93,7 @@ impl BackendRequirementsFetcher {
#[async_trait]
impl RequirementsFetcher for BackendRequirementsFetcher {
async fn fetch_requirements(&self, auth: &CodexAuth) -> Option<String> {
async fn fetch_requirements(&self, auth: &CodexAuth) -> Result<String, CloudRequirementsError> {
let client = BackendClient::from_auth(self.base_url.clone(), auth)
.inspect_err(|err| {
tracing::warn!(
@@ -51,20 +101,28 @@ impl RequirementsFetcher for BackendRequirementsFetcher {
"Failed to construct backend client for cloud requirements"
);
})
.ok()?;
.map_err(|err| CloudRequirementsNetworkError::BackendClient {
message: err.to_string(),
})
.map_err(CloudRequirementsError::from)?;
let response = client
.get_config_requirements_file()
.await
.inspect_err(|err| tracing::warn!(error = %err, "Failed to fetch cloud requirements"))
.ok()?;
.map_err(|err| CloudRequirementsNetworkError::Request {
message: err.to_string(),
})
.map_err(CloudRequirementsError::from)?;
let Some(contents) = response.contents else {
tracing::warn!("Cloud requirements response missing contents");
return None;
return Err(CloudRequirementsError::from(
CloudRequirementsNetworkError::MissingContents,
));
};
Some(contents)
Ok(contents)
}
}
@@ -87,29 +145,50 @@ impl CloudRequirementsService {
}
}
async fn fetch_with_timeout(&self) -> Option<ConfigRequirementsToml> {
async fn fetch_with_timeout(
&self,
) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsError> {
let _timer =
codex_otel::start_global_timer("codex.cloud_requirements.fetch.duration_ms", &[]);
let started_at = Instant::now();
let result = timeout(self.timeout, self.fetch())
.await
.inspect_err(|_| {
tracing::warn!("Timed out waiting for cloud requirements; continuing without them");
})
.ok()?;
let result = timeout(self.timeout, self.fetch()).await.map_err(|_| {
CloudRequirementsNetworkError::Timeout {
timeout_ms: self.timeout.as_millis() as u64,
}
})?;
let elapsed_ms = started_at.elapsed().as_millis();
match result.as_ref() {
Some(requirements) => {
Ok(Some(requirements)) => {
tracing::info!(
elapsed_ms = started_at.elapsed().as_millis(),
elapsed_ms,
status = "success",
requirements = ?requirements,
"Cloud requirements load completed"
);
println!(
"cloud_requirements status=success elapsed_ms={elapsed_ms} value={requirements:?}"
);
}
None => {
Ok(None) => {
tracing::info!(
elapsed_ms = started_at.elapsed().as_millis(),
"Cloud requirements load completed (none)"
elapsed_ms,
status = "none",
requirements = %"none",
"Cloud requirements load completed"
);
println!("cloud_requirements status=none elapsed_ms={elapsed_ms} value=none");
}
Err(err) => {
tracing::warn!(
elapsed_ms,
status = "error",
requirements = %"none",
error = %err,
"Cloud requirements load failed"
);
println!(
"cloud_requirements status=error elapsed_ms={elapsed_ms} value=none error={err}"
);
}
}
@@ -117,17 +196,19 @@ impl CloudRequirementsService {
result
}
async fn fetch(&self) -> Option<ConfigRequirementsToml> {
let auth = self.auth_manager.auth().await?;
async fn fetch(&self) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsError> {
let auth = match self.auth_manager.auth().await {
Some(auth) => auth,
None => return Ok(None),
};
if !(auth.is_chatgpt_auth() && auth.account_plan_type() == Some(PlanType::Enterprise)) {
return None;
return Ok(None);
}
let contents = self.fetcher.fetch_requirements(&auth).await?;
parse_cloud_requirements(&contents)
.inspect_err(|err| tracing::warn!(error = %err, "Failed to parse cloud requirements"))
.ok()
.flatten()
.map_err(CloudRequirementsError::from)
}
}
@@ -143,20 +224,28 @@ pub fn cloud_requirements_loader(
let task = tokio::spawn(async move { service.fetch_with_timeout().await });
CloudRequirementsLoader::new(async move {
task.await
.map_err(|err| {
CloudRequirementsError::from(CloudRequirementsNetworkError::Task {
message: err.to_string(),
})
})
.and_then(std::convert::identity)
.map_err(io::Error::from)
.inspect_err(|err| tracing::warn!(error = %err, "Cloud requirements task failed"))
.ok()
.flatten()
})
}
fn parse_cloud_requirements(
contents: &str,
) -> Result<Option<ConfigRequirementsToml>, toml::de::Error> {
) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsUserError> {
if contents.trim().is_empty() {
return Ok(None);
}
let requirements: ConfigRequirementsToml = toml::from_str(contents)?;
let requirements: ConfigRequirementsToml =
toml::from_str(contents).map_err(|err| CloudRequirementsUserError::InvalidToml {
message: err.to_string(),
})?;
if requirements.is_empty() {
Ok(None)
} else {
@@ -167,6 +256,7 @@ fn parse_cloud_requirements(
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use base64::Engine;
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
use codex_core::auth::AuthCredentialsStoreMode;
@@ -177,28 +267,28 @@ mod tests {
use std::path::Path;
use tempfile::tempdir;
fn write_auth_json(codex_home: &Path, value: serde_json::Value) -> std::io::Result<()> {
fn write_auth_json(codex_home: &Path, value: serde_json::Value) -> Result<()> {
std::fs::write(codex_home.join("auth.json"), serde_json::to_string(&value)?)?;
Ok(())
}
fn auth_manager_with_api_key() -> Arc<AuthManager> {
let tmp = tempdir().expect("tempdir");
fn auth_manager_with_api_key() -> Result<Arc<AuthManager>> {
let tmp = tempdir()?;
let auth_json = json!({
"OPENAI_API_KEY": "sk-test-key",
"tokens": null,
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json).expect("write auth");
Arc::new(AuthManager::new(
write_auth_json(tmp.path(), auth_json)?;
Ok(Arc::new(AuthManager::new(
tmp.path().to_path_buf(),
false,
AuthCredentialsStoreMode::File,
))
)))
}
fn auth_manager_with_plan(plan_type: &str) -> Arc<AuthManager> {
let tmp = tempdir().expect("tempdir");
fn auth_manager_with_plan(plan_type: &str) -> Result<Arc<AuthManager>> {
let tmp = tempdir()?;
let header = json!({ "alg": "none", "typ": "JWT" });
let auth_payload = json!({
"chatgpt_plan_type": plan_type,
@@ -209,8 +299,8 @@ mod tests {
"email": "user@example.com",
"https://api.openai.com/auth": auth_payload,
});
let header_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header).expect("header"));
let payload_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload).expect("payload"));
let header_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header)?);
let payload_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload)?);
let signature_b64 = URL_SAFE_NO_PAD.encode(b"sig");
let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}");
@@ -223,26 +313,31 @@ mod tests {
},
"last_refresh": null,
});
write_auth_json(tmp.path(), auth_json).expect("write auth");
Arc::new(AuthManager::new(
write_auth_json(tmp.path(), auth_json)?;
Ok(Arc::new(AuthManager::new(
tmp.path().to_path_buf(),
false,
AuthCredentialsStoreMode::File,
))
)))
}
fn parse_for_fetch(contents: Option<&str>) -> Option<ConfigRequirementsToml> {
contents.and_then(|contents| parse_cloud_requirements(contents).ok().flatten())
fn parse_for_fetch(
contents: Option<&str>,
) -> Result<Option<ConfigRequirementsToml>, CloudRequirementsUserError> {
contents.map(parse_cloud_requirements).unwrap_or(Ok(None))
}
struct StaticFetcher {
contents: Option<String>,
result: Result<String, CloudRequirementsError>,
}
#[async_trait::async_trait]
impl RequirementsFetcher for StaticFetcher {
async fn fetch_requirements(&self, _auth: &CodexAuth) -> Option<String> {
self.contents.clone()
async fn fetch_requirements(
&self,
_auth: &CodexAuth,
) -> Result<String, CloudRequirementsError> {
self.result.clone()
}
}
@@ -250,87 +345,115 @@ mod tests {
#[async_trait::async_trait]
impl RequirementsFetcher for PendingFetcher {
async fn fetch_requirements(&self, _auth: &CodexAuth) -> Option<String> {
async fn fetch_requirements(
&self,
_auth: &CodexAuth,
) -> Result<String, CloudRequirementsError> {
pending::<()>().await;
None
Ok(String::new())
}
}
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_chatgpt_auth() {
let auth_manager = auth_manager_with_api_key();
async fn fetch_cloud_requirements_skips_non_chatgpt_auth() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(StaticFetcher { contents: None }),
auth_manager_with_api_key()?,
Arc::new(StaticFetcher {
result: Ok(String::new()),
}),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let result = service.fetch().await;
assert!(result.is_none());
assert_eq!(service.fetch().await, Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_skips_non_enterprise_plan() {
let auth_manager = auth_manager_with_plan("pro");
async fn fetch_cloud_requirements_skips_non_enterprise_plan() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager,
Arc::new(StaticFetcher { contents: None }),
auth_manager_with_plan("pro")?,
Arc::new(StaticFetcher {
result: Ok(String::new()),
}),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let result = service.fetch().await;
assert!(result.is_none());
assert_eq!(service.fetch().await, Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_missing_contents() {
let result = parse_for_fetch(None);
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_empty_contents() {
let result = parse_for_fetch(Some(" "));
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_invalid_toml() {
let result = parse_for_fetch(Some("not = ["));
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_empty_requirements() {
let result = parse_for_fetch(Some("# comment"));
assert!(result.is_none());
}
#[tokio::test]
async fn fetch_cloud_requirements_parses_valid_toml() {
let result = parse_for_fetch(Some("allowed_approval_policies = [\"never\"]"));
async fn fetch_cloud_requirements_returns_missing_contents_error() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager_with_plan("enterprise")?,
Arc::new(StaticFetcher {
result: Err(CloudRequirementsError::Network(
CloudRequirementsNetworkError::MissingContents,
)),
}),
CLOUD_REQUIREMENTS_TIMEOUT,
);
assert_eq!(
result,
Some(ConfigRequirementsToml {
service.fetch().await,
Err(CloudRequirementsError::Network(
CloudRequirementsNetworkError::MissingContents
))
);
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_empty_contents() -> Result<()> {
assert_eq!(parse_for_fetch(Some(" ")), Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_handles_invalid_toml() -> Result<()> {
assert!(matches!(
parse_for_fetch(Some("not = [")),
Err(CloudRequirementsUserError::InvalidToml { .. })
));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_ignores_empty_requirements() -> Result<()> {
assert_eq!(parse_for_fetch(Some("# comment")), Ok(None));
Ok(())
}
#[tokio::test]
async fn fetch_cloud_requirements_parses_valid_toml() -> Result<()> {
assert_eq!(
parse_for_fetch(Some("allowed_approval_policies = [\"never\"]")),
Ok(Some(ConfigRequirementsToml {
allowed_approval_policies: Some(vec![AskForApproval::Never]),
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
})
enforce_residency: None,
}))
);
Ok(())
}
#[tokio::test(start_paused = true)]
async fn fetch_cloud_requirements_times_out() {
let auth_manager = auth_manager_with_plan("enterprise");
async fn fetch_cloud_requirements_times_out() -> Result<()> {
let service = CloudRequirementsService::new(
auth_manager,
auth_manager_with_plan("enterprise")?,
Arc::new(PendingFetcher),
CLOUD_REQUIREMENTS_TIMEOUT,
);
let handle = tokio::spawn(async move { service.fetch_with_timeout().await });
tokio::time::advance(CLOUD_REQUIREMENTS_TIMEOUT + Duration::from_millis(1)).await;
let result = handle.await.expect("cloud requirements task");
assert!(result.is_none());
assert_eq!(
handle.await?,
Err(CloudRequirementsError::Network(
CloudRequirementsNetworkError::Timeout {
timeout_ms: CLOUD_REQUIREMENTS_TIMEOUT.as_millis() as u64,
}
))
);
Ok(())
}
}

View File

@@ -33,6 +33,7 @@ pub use crate::endpoint::responses_websocket::ResponsesWebsocketConnection;
pub use crate::error::ApiError;
pub use crate::provider::Provider;
pub use crate::provider::WireApi;
pub use crate::provider::is_azure_responses_wire_base_url;
pub use crate::requests::ChatRequest;
pub use crate::requests::ChatRequestBuilder;
pub use crate::requests::ResponsesRequest;

View File

@@ -95,16 +95,7 @@ impl Provider {
}
pub fn is_azure_responses_endpoint(&self) -> bool {
if self.wire != WireApi::Responses {
return false;
}
if self.name.eq_ignore_ascii_case("azure") {
return true;
}
self.base_url.to_ascii_lowercase().contains("openai.azure.")
|| matches_azure_responses_base_url(&self.base_url)
is_azure_responses_wire_base_url(self.wire.clone(), &self.name, Some(&self.base_url))
}
pub fn websocket_url_for_path(&self, path: &str) -> Result<Url, url::ParseError> {
@@ -121,6 +112,23 @@ impl Provider {
}
}
pub fn is_azure_responses_wire_base_url(wire: WireApi, name: &str, base_url: Option<&str>) -> bool {
if wire != WireApi::Responses {
return false;
}
if name.eq_ignore_ascii_case("azure") {
return true;
}
let Some(base_url) = base_url else {
return false;
};
let base = base_url.to_ascii_lowercase();
base.contains("openai.azure.") || matches_azure_responses_base_url(&base)
}
fn matches_azure_responses_base_url(base_url: &str) -> bool {
const AZURE_MARKERS: [&str; 5] = [
"cognitiveservices.azure.",
@@ -129,6 +137,54 @@ fn matches_azure_responses_base_url(base_url: &str) -> bool {
"azurefd.",
"windows.net/openai",
];
let base = base_url.to_ascii_lowercase();
AZURE_MARKERS.iter().any(|marker| base.contains(marker))
AZURE_MARKERS.iter().any(|marker| base_url.contains(marker))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn detects_azure_responses_base_urls() {
let positive_cases = [
"https://foo.openai.azure.com/openai",
"https://foo.openai.azure.us/openai/deployments/bar",
"https://foo.cognitiveservices.azure.cn/openai",
"https://foo.aoai.azure.com/openai",
"https://foo.openai.azure-api.net/openai",
"https://foo.z01.azurefd.net/",
];
for base_url in positive_cases {
assert!(
is_azure_responses_wire_base_url(WireApi::Responses, "test", Some(base_url)),
"expected {base_url} to be detected as Azure"
);
}
assert!(is_azure_responses_wire_base_url(
WireApi::Responses,
"Azure",
Some("https://example.com")
));
let negative_cases = [
"https://api.openai.com/v1",
"https://example.com/openai",
"https://myproxy.azurewebsites.net/openai",
];
for base_url in negative_cases {
assert!(
!is_azure_responses_wire_base_url(WireApi::Responses, "test", Some(base_url)),
"expected {base_url} not to be detected as Azure"
);
}
assert!(!is_azure_responses_wire_base_url(
WireApi::Chat,
"Azure",
Some("https://foo.openai.azure.com/openai")
));
}
}

View File

@@ -39,11 +39,11 @@ codex-protocol = { workspace = true }
codex-rmcp-client = { workspace = true }
codex-state = { workspace = true }
codex-utils-absolute-path = { workspace = true }
codex-utils-home-dir = { workspace = true }
codex-utils-pty = { workspace = true }
codex-utils-readiness = { workspace = true }
codex-utils-string = { workspace = true }
codex-windows-sandbox = { package = "codex-windows-sandbox", path = "../windows-sandbox-rs" }
dirs = { workspace = true }
dunce = { workspace = true }
encoding_rs = { workspace = true }
env-flags = { workspace = true }

View File

@@ -635,6 +635,7 @@ impl Session {
per_turn_config.model_personality = session_configuration.personality;
per_turn_config.web_search_mode = Some(resolve_web_search_mode_for_turn(
per_turn_config.web_search_mode,
session_configuration.provider.is_azure_responses_endpoint(),
session_configuration.sandbox_policy.get(),
));
per_turn_config.features = config.features.clone();

View File

@@ -24,6 +24,7 @@ use crate::config_loader::ConfigRequirements;
use crate::config_loader::LoaderOverrides;
use crate::config_loader::McpServerIdentity;
use crate::config_loader::McpServerRequirement;
use crate::config_loader::ResidencyRequirement;
use crate::config_loader::Sourced;
use crate::config_loader::load_config_layers_state;
use crate::features::Feature;
@@ -57,7 +58,6 @@ use codex_protocol::openai_models::ReasoningEffort;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_absolute_path::AbsolutePathBufGuard;
use dirs::home_dir;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
@@ -141,6 +141,11 @@ pub struct Config {
pub sandbox_policy: Constrained<SandboxPolicy>,
/// enforce_residency means web traffic cannot be routed outside of a
/// particular geography. HTTP clients should direct their requests
/// using backend-specific headers or URLs to enforce this.
pub enforce_residency: Constrained<Option<ResidencyRequirement>>,
/// True if the user passed in an override or set a value in config.toml
/// for either of approval_policy or sandbox_mode.
pub did_user_set_custom_approval_policy_or_sandbox_mode: bool,
@@ -367,7 +372,7 @@ pub struct ConfigBuilder {
cli_overrides: Option<Vec<(String, TomlValue)>>,
harness_overrides: Option<ConfigOverrides>,
loader_overrides: Option<LoaderOverrides>,
cloud_requirements: Option<CloudRequirementsLoader>,
cloud_requirements: CloudRequirementsLoader,
fallback_cwd: Option<PathBuf>,
}
@@ -393,7 +398,7 @@ impl ConfigBuilder {
}
pub fn cloud_requirements(mut self, cloud_requirements: CloudRequirementsLoader) -> Self {
self.cloud_requirements = Some(cloud_requirements);
self.cloud_requirements = cloud_requirements;
self
}
@@ -524,7 +529,7 @@ pub async fn load_config_as_toml_with_cli_overrides(
Some(cwd.clone()),
&cli_overrides,
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -628,7 +633,7 @@ pub async fn load_global_mcp_servers(
cwd,
&cli_overrides,
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
let merged_toml = config_layer_stack.effective_config();
@@ -1246,11 +1251,15 @@ fn resolve_web_search_mode(
pub(crate) fn resolve_web_search_mode_for_turn(
explicit_mode: Option<WebSearchMode>,
is_azure_responses_endpoint: bool,
sandbox_policy: &SandboxPolicy,
) -> WebSearchMode {
if let Some(mode) = explicit_mode {
return mode;
}
if is_azure_responses_endpoint {
return WebSearchMode::Disabled;
}
if matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) {
WebSearchMode::Live
} else {
@@ -1513,6 +1522,7 @@ impl Config {
sandbox_policy: mut constrained_sandbox_policy,
mcp_servers,
exec_policy: _,
enforce_residency,
} = requirements;
constrained_approval_policy
@@ -1535,6 +1545,7 @@ impl Config {
cwd: resolved_cwd,
approval_policy: constrained_approval_policy,
sandbox_policy: constrained_sandbox_policy,
enforce_residency,
did_user_set_custom_approval_policy_or_sandbox_mode,
forced_auto_mode_downgraded_on_windows,
shell_environment_policy,
@@ -1749,27 +1760,12 @@ fn toml_uses_deprecated_instructions_file(value: &TomlValue) -> bool {
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
///
/// - If `CODEX_HOME` is set, the value will be canonicalized and this
/// function will Err if the path does not exist.
/// - If `CODEX_HOME` is set, the value must exist and be a directory. The
/// value will be canonicalized and this function will Err otherwise.
/// - If `CODEX_HOME` is not set, this function does not verify that the
/// directory exists.
pub fn find_codex_home() -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
if let Ok(val) = std::env::var("CODEX_HOME")
&& !val.is_empty()
{
return PathBuf::from(val).canonicalize();
}
let mut p = home_dir().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"Could not find home directory",
)
})?;
p.push(".codex");
Ok(p)
codex_utils_home_dir::find_codex_home()
}
/// Returns the path to the folder where Codex logs are stored. Does not verify
@@ -2347,14 +2343,14 @@ trust_level = "trusted"
#[test]
fn web_search_mode_for_turn_defaults_to_cached_when_unset() {
let mode = resolve_web_search_mode_for_turn(None, &SandboxPolicy::ReadOnly);
let mode = resolve_web_search_mode_for_turn(None, false, &SandboxPolicy::ReadOnly);
assert_eq!(mode, WebSearchMode::Cached);
}
#[test]
fn web_search_mode_for_turn_defaults_to_live_for_danger_full_access() {
let mode = resolve_web_search_mode_for_turn(None, &SandboxPolicy::DangerFullAccess);
let mode = resolve_web_search_mode_for_turn(None, false, &SandboxPolicy::DangerFullAccess);
assert_eq!(mode, WebSearchMode::Live);
}
@@ -2363,12 +2359,20 @@ trust_level = "trusted"
fn web_search_mode_for_turn_prefers_explicit_value() {
let mode = resolve_web_search_mode_for_turn(
Some(WebSearchMode::Cached),
false,
&SandboxPolicy::DangerFullAccess,
);
assert_eq!(mode, WebSearchMode::Cached);
}
#[test]
fn web_search_mode_for_turn_disables_for_azure_responses_endpoint() {
let mode = resolve_web_search_mode_for_turn(None, true, &SandboxPolicy::DangerFullAccess);
assert_eq!(mode, WebSearchMode::Disabled);
}
#[test]
fn profile_legacy_toggles_override_base() -> std::io::Result<()> {
let codex_home = TempDir::new()?;
@@ -2631,9 +2635,14 @@ profile = "project"
};
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
let config_layer_stack =
load_config_layers_state(codex_home.path(), Some(cwd), &Vec::new(), overrides, None)
.await?;
let config_layer_stack = load_config_layers_state(
codex_home.path(),
Some(cwd),
&Vec::new(),
overrides,
CloudRequirementsLoader::default(),
)
.await?;
let cfg = deserialize_config_toml_with_base(
config_layer_stack.effective_config(),
codex_home.path(),
@@ -2760,7 +2769,7 @@ profile = "project"
Some(cwd),
&[("model".to_string(), TomlValue::String("cli".to_string()))],
overrides,
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -3770,6 +3779,7 @@ model_verbosity = "high"
model_provider: fixture.openai_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::Never),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -3854,6 +3864,7 @@ model_verbosity = "high"
model_provider: fixture.openai_chat_completions_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::UnlessTrusted),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -3953,6 +3964,7 @@ model_verbosity = "high"
model_provider: fixture.openai_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -4038,6 +4050,7 @@ model_verbosity = "high"
model_provider: fixture.openai_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),

View File

@@ -2,6 +2,7 @@ use super::CONFIG_TOML_FILE;
use super::ConfigToml;
use crate::config::edit::ConfigEdit;
use crate::config::edit::ConfigEditsBuilder;
use crate::config_loader::CloudRequirementsLoader;
use crate::config_loader::ConfigLayerEntry;
use crate::config_loader::ConfigLayerStack;
use crate::config_loader::ConfigLayerStackOrdering;
@@ -109,6 +110,7 @@ pub struct ConfigService {
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
cloud_requirements: CloudRequirementsLoader,
}
impl ConfigService {
@@ -116,11 +118,13 @@ impl ConfigService {
codex_home: PathBuf,
cli_overrides: Vec<(String, TomlValue)>,
loader_overrides: LoaderOverrides,
cloud_requirements: CloudRequirementsLoader,
) -> Self {
Self {
codex_home,
cli_overrides,
loader_overrides,
cloud_requirements,
}
}
@@ -129,6 +133,7 @@ impl ConfigService {
codex_home,
cli_overrides: Vec::new(),
loader_overrides: LoaderOverrides::default(),
cloud_requirements: CloudRequirementsLoader::default(),
}
}
@@ -146,6 +151,7 @@ impl ConfigService {
.cli_overrides(self.cli_overrides.clone())
.loader_overrides(self.loader_overrides.clone())
.fallback_cwd(Some(cwd.to_path_buf()))
.cloud_requirements(self.cloud_requirements.clone())
.build()
.await
.map_err(|err| {
@@ -376,7 +382,7 @@ impl ConfigService {
cwd,
&self.cli_overrides,
self.loader_overrides.clone(),
None,
self.cloud_requirements.clone(),
)
.await
}
@@ -814,6 +820,7 @@ remote_compaction = true
managed_preferences_base64: None,
macos_managed_config_requirements_base64: None,
},
CloudRequirementsLoader::default(),
);
let response = service
@@ -896,6 +903,7 @@ remote_compaction = true
managed_preferences_base64: None,
macos_managed_config_requirements_base64: None,
},
CloudRequirementsLoader::default(),
);
let result = service
@@ -1000,6 +1008,7 @@ remote_compaction = true
managed_preferences_base64: None,
macos_managed_config_requirements_base64: None,
},
CloudRequirementsLoader::default(),
);
let error = service
@@ -1048,6 +1057,7 @@ remote_compaction = true
managed_preferences_base64: None,
macos_managed_config_requirements_base64: None,
},
CloudRequirementsLoader::default(),
);
let response = service
@@ -1095,6 +1105,7 @@ remote_compaction = true
managed_preferences_base64: None,
macos_managed_config_requirements_base64: None,
},
CloudRequirementsLoader::default(),
);
let result = service

View File

@@ -4,25 +4,29 @@ use futures::future::FutureExt;
use futures::future::Shared;
use std::fmt;
use std::future::Future;
use std::io;
use std::sync::Arc;
#[derive(Clone)]
pub struct CloudRequirementsLoader {
// TODO(gt): This should return a Result once we can fail-closed.
fut: Shared<BoxFuture<'static, Option<ConfigRequirementsToml>>>,
fut: Shared<BoxFuture<'static, Arc<io::Result<Option<ConfigRequirementsToml>>>>>,
}
impl CloudRequirementsLoader {
pub fn new<F>(fut: F) -> Self
where
F: Future<Output = Option<ConfigRequirementsToml>> + Send + 'static,
F: Future<Output = io::Result<Option<ConfigRequirementsToml>>> + Send + 'static,
{
Self {
fut: fut.boxed().shared(),
fut: fut.map(Arc::new).boxed().shared(),
}
}
pub async fn get(&self) -> Option<ConfigRequirementsToml> {
self.fut.clone().await
pub async fn get(&self) -> io::Result<Option<ConfigRequirementsToml>> {
match self.fut.clone().await.as_ref() {
Ok(requirements) => Ok(requirements.clone()),
Err(err) => Err(io::Error::new(err.kind(), err.to_string())),
}
}
}
@@ -32,6 +36,12 @@ impl fmt::Debug for CloudRequirementsLoader {
}
}
impl Default for CloudRequirementsLoader {
fn default() -> Self {
Self::new(async { Ok(None) })
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -46,11 +56,11 @@ mod tests {
let counter_clone = Arc::clone(&counter);
let loader = CloudRequirementsLoader::new(async move {
counter_clone.fetch_add(1, Ordering::SeqCst);
Some(ConfigRequirementsToml::default())
Ok(Some(ConfigRequirementsToml::default()))
});
let (first, second) = tokio::join!(loader.get(), loader.get());
assert_eq!(first, second);
assert_eq!(first.as_ref().ok(), second.as_ref().ok());
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
}

View File

@@ -52,6 +52,7 @@ pub struct ConfigRequirements {
pub sandbox_policy: Constrained<SandboxPolicy>,
pub mcp_servers: Option<Sourced<BTreeMap<String, McpServerRequirement>>>,
pub(crate) exec_policy: Option<Sourced<RequirementsExecPolicy>>,
pub enforce_residency: Constrained<Option<ResidencyRequirement>>,
}
impl Default for ConfigRequirements {
@@ -61,6 +62,7 @@ impl Default for ConfigRequirements {
sandbox_policy: Constrained::allow_any(SandboxPolicy::ReadOnly),
mcp_servers: None,
exec_policy: None,
enforce_residency: Constrained::allow_any(None),
}
}
}
@@ -84,6 +86,7 @@ pub struct ConfigRequirementsToml {
pub allowed_sandbox_modes: Option<Vec<SandboxModeRequirement>>,
pub mcp_servers: Option<BTreeMap<String, McpServerRequirement>>,
pub rules: Option<RequirementsExecPolicyToml>,
pub enforce_residency: Option<ResidencyRequirement>,
}
/// Value paired with the requirement source it came from, for better error
@@ -114,6 +117,7 @@ pub struct ConfigRequirementsWithSources {
pub allowed_sandbox_modes: Option<Sourced<Vec<SandboxModeRequirement>>>,
pub mcp_servers: Option<Sourced<BTreeMap<String, McpServerRequirement>>>,
pub rules: Option<Sourced<RequirementsExecPolicyToml>>,
pub enforce_residency: Option<Sourced<ResidencyRequirement>>,
}
impl ConfigRequirementsWithSources {
@@ -146,6 +150,7 @@ impl ConfigRequirementsWithSources {
allowed_sandbox_modes,
mcp_servers,
rules,
enforce_residency,
}
);
}
@@ -156,12 +161,14 @@ impl ConfigRequirementsWithSources {
allowed_sandbox_modes,
mcp_servers,
rules,
enforce_residency,
} = self;
ConfigRequirementsToml {
allowed_approval_policies: allowed_approval_policies.map(|sourced| sourced.value),
allowed_sandbox_modes: allowed_sandbox_modes.map(|sourced| sourced.value),
mcp_servers: mcp_servers.map(|sourced| sourced.value),
rules: rules.map(|sourced| sourced.value),
enforce_residency: enforce_residency.map(|sourced| sourced.value),
}
}
}
@@ -193,12 +200,19 @@ impl From<SandboxMode> for SandboxModeRequirement {
}
}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum ResidencyRequirement {
Us,
}
impl ConfigRequirementsToml {
pub fn is_empty(&self) -> bool {
self.allowed_approval_policies.is_none()
&& self.allowed_sandbox_modes.is_none()
&& self.mcp_servers.is_none()
&& self.rules.is_none()
&& self.enforce_residency.is_none()
}
}
@@ -211,6 +225,7 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
allowed_sandbox_modes,
mcp_servers,
rules,
enforce_residency,
} = toml;
let approval_policy: Constrained<AskForApproval> = match allowed_approval_policies {
@@ -298,11 +313,33 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
None => None,
};
let enforce_residency: Constrained<Option<ResidencyRequirement>> = match enforce_residency {
Some(Sourced {
value: residency,
source: requirement_source,
}) => {
let required = Some(residency);
Constrained::new(required, move |candidate| {
if candidate == &required {
Ok(())
} else {
Err(ConstraintError::InvalidValue {
field_name: "enforce_residency",
candidate: format!("{candidate:?}"),
allowed: format!("{required:?}"),
requirement_source: requirement_source.clone(),
})
}
})?
}
None => Constrained::allow_any(None),
};
Ok(ConfigRequirements {
approval_policy,
sandbox_policy,
mcp_servers,
exec_policy,
enforce_residency,
})
}
}
@@ -329,6 +366,7 @@ mod tests {
allowed_sandbox_modes,
mcp_servers,
rules,
enforce_residency,
} = toml;
ConfigRequirementsWithSources {
allowed_approval_policies: allowed_approval_policies
@@ -337,6 +375,8 @@ mod tests {
.map(|value| Sourced::new(value, RequirementSource::Unknown)),
mcp_servers: mcp_servers.map(|value| Sourced::new(value, RequirementSource::Unknown)),
rules: rules.map(|value| Sourced::new(value, RequirementSource::Unknown)),
enforce_residency: enforce_residency
.map(|value| Sourced::new(value, RequirementSource::Unknown)),
}
}
@@ -350,6 +390,8 @@ mod tests {
SandboxModeRequirement::WorkspaceWrite,
SandboxModeRequirement::DangerFullAccess,
];
let enforce_residency = ResidencyRequirement::Us;
let enforce_source = source.clone();
// Intentionally constructed without `..Default::default()` so adding a new field to
// `ConfigRequirementsToml` forces this test to be updated.
@@ -358,6 +400,7 @@ mod tests {
allowed_sandbox_modes: Some(allowed_sandbox_modes.clone()),
mcp_servers: None,
rules: None,
enforce_residency: Some(enforce_residency),
};
target.merge_unset_fields(source.clone(), other);
@@ -372,6 +415,7 @@ mod tests {
allowed_sandbox_modes: Some(Sourced::new(allowed_sandbox_modes, source)),
mcp_servers: None,
rules: None,
enforce_residency: Some(Sourced::new(enforce_residency, enforce_source)),
}
);
}
@@ -401,6 +445,7 @@ mod tests {
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
}
);
Ok(())
@@ -438,6 +483,7 @@ mod tests {
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
}
);
Ok(())

View File

@@ -37,6 +37,7 @@ pub use config_requirements::ConfigRequirementsToml;
pub use config_requirements::McpServerIdentity;
pub use config_requirements::McpServerRequirement;
pub use config_requirements::RequirementSource;
pub use config_requirements::ResidencyRequirement;
pub use config_requirements::SandboxModeRequirement;
pub use config_requirements::Sourced;
pub use diagnostics::ConfigError;
@@ -101,7 +102,7 @@ pub async fn load_config_layers_state(
cwd: Option<AbsolutePathBuf>,
cli_overrides: &[(String, TomlValue)],
overrides: LoaderOverrides,
cloud_requirements: Option<CloudRequirementsLoader>, // TODO(gt): Once exec and app-server are wired up, we can remove the option.
cloud_requirements: CloudRequirementsLoader,
) -> io::Result<ConfigLayerStack> {
let mut config_requirements_toml = ConfigRequirementsWithSources::default();
@@ -114,9 +115,7 @@ pub async fn load_config_layers_state(
)
.await?;
if let Some(loader) = cloud_requirements
&& let Some(requirements) = loader.get().await
{
if let Some(requirements) = cloud_requirements.get().await? {
config_requirements_toml
.merge_unset_fields(RequirementSource::CloudRequirements, requirements);
}

View File

@@ -69,7 +69,7 @@ async fn returns_config_error_for_invalid_user_config_toml() {
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await
.expect_err("expected error");
@@ -99,7 +99,7 @@ async fn returns_config_error_for_invalid_managed_config_toml() {
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
CloudRequirementsLoader::default(),
)
.await
.expect_err("expected error");
@@ -188,7 +188,7 @@ extra = true
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
CloudRequirementsLoader::default(),
)
.await
.expect("load config");
@@ -225,7 +225,7 @@ async fn returns_empty_when_all_layers_missing() {
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
CloudRequirementsLoader::default(),
)
.await
.expect("load layers");
@@ -323,7 +323,7 @@ flag = false
Some(cwd),
&[] as &[(String, TomlValue)],
overrides,
None,
CloudRequirementsLoader::default(),
)
.await
.expect("load config");
@@ -363,7 +363,7 @@ allowed_sandbox_modes = ["read-only"]
),
),
},
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -424,7 +424,7 @@ allowed_approval_policies = ["never"]
),
),
},
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -451,6 +451,7 @@ async fn load_requirements_toml_produces_expected_constraints() -> anyhow::Resul
&requirements_file,
r#"
allowed_approval_policies = ["never", "on-request"]
enforce_residency = "us"
"#,
)
.await?;
@@ -465,7 +466,6 @@ allowed_approval_policies = ["never", "on-request"]
.cloned(),
Some(vec![AskForApproval::Never, AskForApproval::OnRequest])
);
let config_requirements: ConfigRequirements = config_requirements_toml.try_into()?;
assert_eq!(
config_requirements.approval_policy.value(),
@@ -480,6 +480,10 @@ allowed_approval_policies = ["never", "on-request"]
.can_set(&AskForApproval::OnFailure)
.is_err()
);
assert_eq!(
config_requirements.enforce_residency.value(),
Some(crate::config_loader::ResidencyRequirement::Us)
);
Ok(())
}
@@ -503,6 +507,7 @@ allowed_approval_policies = ["on-request"]
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
},
);
load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?;
@@ -537,16 +542,17 @@ async fn load_config_layers_includes_cloud_requirements() -> anyhow::Result<()>
allowed_sandbox_modes: None,
mcp_servers: None,
rules: None,
enforce_residency: None,
};
let expected = requirements.clone();
let cloud_requirements = CloudRequirementsLoader::new(async move { Some(requirements) });
let cloud_requirements = CloudRequirementsLoader::new(async move { Ok(Some(requirements)) });
let layers = load_config_layers_state(
&codex_home,
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
Some(cloud_requirements),
cloud_requirements,
)
.await?;
@@ -599,7 +605,7 @@ async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> {
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -731,7 +737,7 @@ async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> s
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -769,7 +775,7 @@ async fn codex_home_is_not_loaded_as_project_layer_from_home_dir() -> std::io::R
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -819,7 +825,7 @@ async fn codex_home_within_project_tree_is_not_double_loaded() -> std::io::Resul
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -888,7 +894,7 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result<
Some(cwd.clone()),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
let project_layers_untrusted: Vec<_> = layers_untrusted
@@ -926,7 +932,7 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result<
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
let project_layers_unknown: Vec<_> = layers_unknown
@@ -987,7 +993,7 @@ async fn invalid_project_config_ignored_when_untrusted_or_unknown() -> std::io::
Some(cwd.clone()),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
let project_layers: Vec<_> = layers
@@ -1043,7 +1049,7 @@ async fn cli_overrides_with_relative_paths_do_not_break_trust_check() -> std::io
Some(cwd),
&cli_overrides,
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -1085,7 +1091,7 @@ async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()
Some(cwd),
&[] as &[(String, TomlValue)],
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;

View File

@@ -266,7 +266,7 @@ impl ContextManager {
}
fn process_item(&self, item: &ResponseItem, policy: TruncationPolicy) -> ResponseItem {
let policy_with_serialization_budget = policy.mul(1.2);
let policy_with_serialization_budget = policy * 1.2;
match item {
ResponseItem::FunctionCallOutput { call_id, output } => {
let truncated =

View File

@@ -1,6 +1,8 @@
use crate::config_loader::ResidencyRequirement;
use crate::spawn::CODEX_SANDBOX_ENV_VAR;
use codex_client::CodexHttpClient;
pub use codex_client::CodexRequestBuilder;
use reqwest::header::HeaderMap;
use reqwest::header::HeaderValue;
use std::sync::LazyLock;
use std::sync::Mutex;
@@ -24,6 +26,7 @@ use std::sync::RwLock;
pub static USER_AGENT_SUFFIX: LazyLock<Mutex<Option<String>>> = LazyLock::new(|| Mutex::new(None));
pub const DEFAULT_ORIGINATOR: &str = "codex_cli_rs";
pub const CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR: &str = "CODEX_INTERNAL_ORIGINATOR_OVERRIDE";
pub const RESIDENCY_HEADER_NAME: &str = "x-openai-internal-codex-residency";
#[derive(Debug, Clone)]
pub struct Originator {
@@ -31,6 +34,8 @@ pub struct Originator {
pub header_value: HeaderValue,
}
static ORIGINATOR: LazyLock<RwLock<Option<Originator>>> = LazyLock::new(|| RwLock::new(None));
static REQUIREMENTS_RESIDENCY: LazyLock<RwLock<Option<ResidencyRequirement>>> =
LazyLock::new(|| RwLock::new(None));
#[derive(Debug)]
pub enum SetOriginatorError {
@@ -74,6 +79,14 @@ pub fn set_default_originator(value: String) -> Result<(), SetOriginatorError> {
Ok(())
}
pub fn set_default_client_residency_requirement(enforce_residency: Option<ResidencyRequirement>) {
let Ok(mut guard) = REQUIREMENTS_RESIDENCY.write() else {
tracing::warn!("Failed to acquire requirements residency lock");
return;
};
*guard = enforce_residency;
}
pub fn originator() -> Originator {
if let Ok(guard) = ORIGINATOR.read()
&& let Some(originator) = guard.as_ref()
@@ -166,10 +179,17 @@ pub fn create_client() -> CodexHttpClient {
}
pub fn build_reqwest_client() -> reqwest::Client {
use reqwest::header::HeaderMap;
let mut headers = HeaderMap::new();
headers.insert("originator", originator().header_value);
if let Ok(guard) = REQUIREMENTS_RESIDENCY.read()
&& let Some(requirement) = guard.as_ref()
&& !headers.contains_key(RESIDENCY_HEADER_NAME)
{
let value = match requirement {
ResidencyRequirement::Us => HeaderValue::from_static("us"),
};
headers.insert(RESIDENCY_HEADER_NAME, value);
}
let ua = get_codex_user_agent();
let mut builder = reqwest::Client::builder()
@@ -214,6 +234,8 @@ mod tests {
async fn test_create_client_sets_default_headers() {
skip_if_no_network!();
set_default_client_residency_requirement(Some(ResidencyRequirement::Us));
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
@@ -256,6 +278,13 @@ mod tests {
.get("user-agent")
.expect("user-agent header missing");
assert_eq!(ua_header.to_str().unwrap(), expected_ua);
let residency_header = headers
.get(RESIDENCY_HEADER_NAME)
.expect("residency header missing");
assert_eq!(residency_header.to_str().unwrap(), "us");
set_default_client_residency_requirement(None);
}
#[test]

View File

@@ -9,6 +9,7 @@ use crate::auth::AuthMode;
use crate::error::EnvVarError;
use codex_api::Provider as ApiProvider;
use codex_api::WireApi as ApiWireApi;
use codex_api::is_azure_responses_wire_base_url;
use codex_api::provider::RetryConfig as ApiRetryConfig;
use http::HeaderMap;
use http::header::HeaderName;
@@ -170,6 +171,15 @@ impl ModelProviderInfo {
})
}
pub(crate) fn is_azure_responses_endpoint(&self) -> bool {
let wire = match self.wire_api {
WireApi::Responses => ApiWireApi::Responses,
WireApi::Chat => ApiWireApi::Chat,
};
is_azure_responses_wire_base_url(wire, &self.name, self.base_url.as_deref())
}
/// If `env_key` is Some, returns the API key for this provider if present
/// (and non-empty) in the environment. If `env_key` is required but
/// cannot be found, returns an error.
@@ -432,87 +442,4 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
assert_eq!(expected_provider, provider);
}
#[test]
fn detects_azure_responses_base_urls() {
let positive_cases = [
"https://foo.openai.azure.com/openai",
"https://foo.openai.azure.us/openai/deployments/bar",
"https://foo.cognitiveservices.azure.cn/openai",
"https://foo.aoai.azure.com/openai",
"https://foo.openai.azure-api.net/openai",
"https://foo.z01.azurefd.net/",
];
for base_url in positive_cases {
let provider = ModelProviderInfo {
name: "test".into(),
base_url: Some(base_url.into()),
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let api = provider.to_api_provider(None).expect("api provider");
assert!(
api.is_azure_responses_endpoint(),
"expected {base_url} to be detected as Azure"
);
}
let named_provider = ModelProviderInfo {
name: "Azure".into(),
base_url: Some("https://example.com".into()),
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let named_api = named_provider.to_api_provider(None).expect("api provider");
assert!(named_api.is_azure_responses_endpoint());
let negative_cases = [
"https://api.openai.com/v1",
"https://example.com/openai",
"https://myproxy.azurewebsites.net/openai",
];
for base_url in negative_cases {
let provider = ModelProviderInfo {
name: "test".into(),
base_url: Some(base_url.into()),
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_openai_auth: false,
supports_websockets: false,
};
let api = provider.to_api_provider(None).expect("api provider");
assert!(
!api.is_azure_responses_endpoint(),
"expected {base_url} not to be detected as Azure"
);
}
}
}

View File

@@ -10,6 +10,7 @@ use tracing::warn;
use crate::config::Config;
use crate::config::types::SkillsConfig;
use crate::config_loader::CloudRequirementsLoader;
use crate::config_loader::LoaderOverrides;
use crate::config_loader::load_config_layers_state;
use crate::skills::SkillLoadOutcome;
@@ -88,7 +89,7 @@ impl SkillsManager {
Some(cwd_abs),
&cli_overrides,
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await
{

View File

@@ -34,18 +34,6 @@ impl From<TruncationPolicyConfig> for TruncationPolicy {
}
impl TruncationPolicy {
/// Scale the underlying budget by `multiplier`, rounding up to avoid under-budgeting.
pub fn mul(self, multiplier: f64) -> Self {
match self {
TruncationPolicy::Bytes(bytes) => {
TruncationPolicy::Bytes((bytes as f64 * multiplier).ceil() as usize)
}
TruncationPolicy::Tokens(tokens) => {
TruncationPolicy::Tokens((tokens as f64 * multiplier).ceil() as usize)
}
}
}
/// Returns a token budget derived from this policy.
///
/// - For `Tokens`, this is the explicit token limit.
@@ -73,6 +61,21 @@ impl TruncationPolicy {
}
}
impl std::ops::Mul<f64> for TruncationPolicy {
type Output = Self;
fn mul(self, multiplier: f64) -> Self::Output {
match self {
TruncationPolicy::Bytes(bytes) => {
TruncationPolicy::Bytes((bytes as f64 * multiplier).ceil() as usize)
}
TruncationPolicy::Tokens(tokens) => {
TruncationPolicy::Tokens((tokens as f64 * multiplier).ceil() as usize)
}
}
}
}
pub(crate) fn formatted_truncate_text(content: &str, policy: TruncationPolicy) -> String {
if content.len() <= policy.byte_budget() {
return content.to_string();

View File

@@ -80,5 +80,5 @@ mod unstable_features_warning;
mod user_notification;
mod user_shell_cmd;
mod view_image;
mod web_search_cached;
mod web_search;
mod websocket_fallback;

View File

@@ -1,5 +1,7 @@
#![allow(clippy::unwrap_used)]
use codex_core::WireApi;
use codex_core::built_in_model_providers;
use codex_core::features::Feature;
use codex_core::protocol::SandboxPolicy;
use codex_protocol::config_types::WebSearchMode;
@@ -25,6 +27,15 @@ fn find_web_search_tool(body: &Value) -> &Value {
.expect("tools should include a web_search tool")
}
#[allow(clippy::expect_used)]
fn has_web_search_tool(body: &Value) -> bool {
body["tools"]
.as_array()
.expect("request body should include tools array")
.iter()
.any(|tool| tool.get("type").and_then(Value::as_str) == Some("web_search"))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn web_search_mode_cached_sets_external_web_access_false() {
skip_if_no_network!();
@@ -174,3 +185,45 @@ async fn web_search_mode_updates_between_turns_with_sandbox_policy() {
"danger-full-access policy should default web_search to live"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn web_search_mode_defaults_to_disabled_for_azure_responses() {
skip_if_no_network!();
let server = start_mock_server().await;
let sse = sse_completed("resp-1");
let resp_mock = responses::mount_sse_once(&server, sse).await;
let mut builder = test_codex()
.with_model("gpt-5-codex")
.with_config(|config| {
let base_url = config.model_provider.base_url.clone();
let mut provider = built_in_model_providers()["openai"].clone();
provider.name = "Azure".to_string();
provider.base_url = base_url;
provider.wire_api = WireApi::Responses;
config.model_provider_id = provider.name.clone();
config.model_provider = provider;
config.web_search_mode = None;
config.features.disable(Feature::WebSearchCached);
config.features.disable(Feature::WebSearchRequest);
});
let test = builder
.build(&server)
.await
.expect("create test Codex conversation");
test.submit_turn_with_policy(
"hello azure default web search",
SandboxPolicy::DangerFullAccess,
)
.await
.expect("submit turn");
let body = resp_mock.single_request().body_json();
assert_eq!(
has_web_search_tool(&body),
false,
"azure responses requests should disable web_search by default"
);
}

View File

@@ -241,7 +241,7 @@ async fn load_exec_policy() -> anyhow::Result<Policy> {
cwd,
&cli_overrides,
overrides,
None,
codex_core::config_loader::CloudRequirementsLoader::default(),
)
.await?;

View File

@@ -19,6 +19,7 @@ workspace = true
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-arg0 = { workspace = true }
codex-cloud-requirements = { workspace = true }
codex-common = { workspace = true, features = [
"cli",
"elapsed",

View File

@@ -13,6 +13,7 @@ pub mod exec_events;
pub use cli::Cli;
pub use cli::Command;
pub use cli::ReviewArgs;
use codex_cloud_requirements::cloud_requirements_loader;
use codex_common::oss::ensure_oss_provider_ready;
use codex_common::oss::get_default_model_for_oss_provider;
use codex_common::oss::ollama_chat_deprecation_notice;
@@ -24,6 +25,7 @@ use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::ThreadManager;
use codex_core::auth::enforce_login_restrictions;
use codex_core::config::Config;
use codex_core::config::ConfigBuilder;
use codex_core::config::ConfigOverrides;
use codex_core::config::find_codex_home;
use codex_core::config::load_config_as_toml_with_cli_overrides;
@@ -64,6 +66,7 @@ use uuid::Uuid;
use crate::cli::Command as ExecCommand;
use crate::event_processor::CodexStatus;
use crate::event_processor::EventProcessor;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::default_client::set_default_originator;
use codex_core::find_thread_path_by_id_str;
use codex_core::find_thread_path_by_name_str;
@@ -159,41 +162,52 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
// we load config.toml here to determine project state.
#[allow(clippy::print_stderr)]
let config_toml = {
let codex_home = match find_codex_home() {
Ok(codex_home) => codex_home,
Err(err) => {
eprintln!("Error finding codex home: {err}");
std::process::exit(1);
}
};
match load_config_as_toml_with_cli_overrides(
&codex_home,
&config_cwd,
cli_kv_overrides.clone(),
)
.await
{
Ok(config_toml) => config_toml,
Err(err) => {
let config_error = err
.get_ref()
.and_then(|err| err.downcast_ref::<ConfigLoadError>())
.map(ConfigLoadError::config_error);
if let Some(config_error) = config_error {
eprintln!(
"Error loading config.toml:\n{}",
format_config_error_with_source(config_error)
);
} else {
eprintln!("Error loading config.toml: {err}");
}
std::process::exit(1);
}
let codex_home = match find_codex_home() {
Ok(codex_home) => codex_home,
Err(err) => {
eprintln!("Error finding codex home: {err}");
std::process::exit(1);
}
};
#[allow(clippy::print_stderr)]
let config_toml = match load_config_as_toml_with_cli_overrides(
&codex_home,
&config_cwd,
cli_kv_overrides.clone(),
)
.await
{
Ok(config_toml) => config_toml,
Err(err) => {
let config_error = err
.get_ref()
.and_then(|err| err.downcast_ref::<ConfigLoadError>())
.map(ConfigLoadError::config_error);
if let Some(config_error) = config_error {
eprintln!(
"Error loading config.toml:\n{}",
format_config_error_with_source(config_error)
);
} else {
eprintln!("Error loading config.toml: {err}");
}
std::process::exit(1);
}
};
let cloud_auth_manager = AuthManager::shared(
codex_home.clone(),
false,
config_toml.cli_auth_credentials_store.unwrap_or_default(),
);
let chatgpt_base_url = config_toml
.chatgpt_base_url
.clone()
.unwrap_or_else(|| "https://chatgpt.com/backend-api/".to_string());
// TODO(gt): Make cloud requirements failures blocking once we can fail-closed.
let cloud_requirements = cloud_requirements_loader(cloud_auth_manager, chatgpt_base_url);
let model_provider = if oss {
let resolved = resolve_oss_provider(
oss_provider.as_deref(),
@@ -246,8 +260,13 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
additional_writable_roots: add_dir,
};
let config =
Config::load_with_cli_overrides_and_harness_overrides(cli_kv_overrides, overrides).await?;
let config = ConfigBuilder::default()
.cli_overrides(cli_kv_overrides)
.harness_overrides(overrides)
.cloud_requirements(cloud_requirements)
.build()
.await?;
set_default_client_residency_requirement(config.enforce_residency.value());
if let Err(err) = enforce_login_restrictions(&config) {
eprintln!("{err}");

View File

@@ -11,6 +11,7 @@ use codex_core::config::CONFIG_TOML_FILE;
use codex_core::config::Constrained;
use codex_core::config::ConstraintError;
use codex_core::config::find_codex_home;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigLayerStack;
use codex_core::config_loader::ConfigLayerStackOrdering;
use codex_core::config_loader::LoaderOverrides;
@@ -31,10 +32,15 @@ pub(crate) async fn build_config_state() -> Result<ConfigState> {
let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?;
let cli_overrides = Vec::new();
let overrides = LoaderOverrides::default();
let config_layer_stack =
load_config_layers_state(&codex_home, None, &cli_overrides, overrides, None)
.await
.context("failed to load Codex config")?;
let config_layer_stack = load_config_layers_state(
&codex_home,
None,
&cli_overrides,
overrides,
CloudRequirementsLoader::default(),
)
.await
.context("failed to load Codex config")?;
let cfg_path = codex_home.join(CONFIG_TOML_FILE);

View File

@@ -15,7 +15,7 @@ axum = { workspace = true, default-features = false, features = [
] }
codex-keyring-store = { workspace = true }
codex-protocol = { workspace = true }
dirs = { workspace = true }
codex-utils-home-dir = { workspace = true }
futures = { workspace = true, default-features = false, features = ["std"] }
keyring = { workspace = true, features = ["crypto-rust"] }
mcp-types = { path = "../mcp-types" }

View File

@@ -1,33 +0,0 @@
use dirs::home_dir;
use std::path::PathBuf;
/// This was copied from codex-core but codex-core depends on this crate.
/// TODO: move this to a shared crate lower in the dependency tree.
///
///
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
///
/// - If `CODEX_HOME` is set, the value will be canonicalized and this
/// function will Err if the path does not exist.
/// - If `CODEX_HOME` is not set, this function does not verify that the
/// directory exists.
pub(crate) fn find_codex_home() -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
if let Ok(val) = std::env::var("CODEX_HOME")
&& !val.is_empty()
{
return PathBuf::from(val).canonicalize();
}
let mut p = home_dir().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"Could not find home directory",
)
})?;
p.push(".codex");
Ok(p)
}

View File

@@ -1,5 +1,4 @@
mod auth_status;
mod find_codex_home;
mod logging_client_handler;
mod oauth;
mod perform_oauth_login;

View File

@@ -48,7 +48,7 @@ use codex_keyring_store::KeyringStore;
use rmcp::transport::auth::AuthorizationManager;
use tokio::sync::Mutex;
use crate::find_codex_home::find_codex_home;
use codex_utils_home_dir::find_codex_home;
const KEYRING_SERVICE: &str = "Codex MCP Credentials";
const REFRESH_SKEW_MILLIS: u64 = 30_000;

View File

@@ -1370,10 +1370,7 @@ impl App {
self.shutdown_current_thread().await;
self.config = resume_config;
tui.set_notification_method(self.config.tui_notification_method);
self.file_search = FileSearchManager::new(
self.config.cwd.clone(),
self.app_event_tx.clone(),
);
self.file_search.update_search_dir(self.config.cwd.clone());
let init = self.chatwidget_init_for_forked_or_resumed_thread(
tui,
self.config.clone(),

View File

@@ -38,6 +38,17 @@ impl FileSearchManager {
}
}
/// Updates the directory used for file searches.
/// This should be called when the session's CWD changes on resume.
/// Drops the current session so it will be recreated with the new directory on next query.
pub fn update_search_dir(&mut self, new_dir: PathBuf) {
self.search_dir = new_dir;
#[expect(clippy::unwrap_used)]
let mut st = self.state.lock().unwrap();
st.session.take();
st.latest_query.clear();
}
/// Call whenever the user edits the `@` token.
pub fn on_user_query(&self, query: String) {
#[expect(clippy::unwrap_used)]

View File

@@ -27,6 +27,7 @@ use codex_core::config::resolve_oss_provider;
use codex_core::config_loader::CloudRequirementsLoader;
use codex_core::config_loader::ConfigLoadError;
use codex_core::config_loader::format_config_error_with_source;
use codex_core::default_client::set_default_client_residency_requirement;
use codex_core::find_thread_path_by_id_str;
use codex_core::find_thread_path_by_name_str;
use codex_core::path_utils;
@@ -276,6 +277,7 @@ pub async fn run_main(
cloud_requirements.clone(),
)
.await;
set_default_client_residency_requirement(config.enforce_residency.value());
if let Some(warning) = add_dir_warning_message(&cli.add_dir, config.sandbox_policy.get()) {
#[allow(clippy::print_stderr)]

View File

@@ -0,0 +1,6 @@
load("//:defs.bzl", "codex_rust_crate")
codex_rust_crate(
name = "home-dir",
crate_name = "codex_utils_home_dir",
)

View File

@@ -0,0 +1,15 @@
[package]
name = "codex-utils-home-dir"
version.workspace = true
edition.workspace = true
license.workspace = true
[lints]
workspace = true
[dependencies]
dirs = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,128 @@
use dirs::home_dir;
use std::path::PathBuf;
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
///
/// - If `CODEX_HOME` is set, the value must exist and be a directory. The
/// value will be canonicalized and this function will Err otherwise.
/// - If `CODEX_HOME` is not set, this function does not verify that the
/// directory exists.
pub fn find_codex_home() -> std::io::Result<PathBuf> {
let codex_home_env = std::env::var("CODEX_HOME")
.ok()
.filter(|val| !val.is_empty());
find_codex_home_from_env(codex_home_env.as_deref())
}
fn find_codex_home_from_env(codex_home_env: Option<&str>) -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
match codex_home_env {
Some(val) => {
let path = PathBuf::from(val);
let metadata = std::fs::metadata(&path).map_err(|err| match err.kind() {
std::io::ErrorKind::NotFound => std::io::Error::new(
std::io::ErrorKind::NotFound,
format!("CODEX_HOME points to {val:?}, but that path does not exist"),
),
_ => std::io::Error::new(
err.kind(),
format!("failed to read CODEX_HOME {val:?}: {err}"),
),
})?;
if !metadata.is_dir() {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("CODEX_HOME points to {val:?}, but that path is not a directory"),
))
} else {
path.canonicalize().map_err(|err| {
std::io::Error::new(
err.kind(),
format!("failed to canonicalize CODEX_HOME {val:?}: {err}"),
)
})
}
}
None => {
let mut p = home_dir().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"Could not find home directory",
)
})?;
p.push(".codex");
Ok(p)
}
}
}
#[cfg(test)]
mod tests {
use super::find_codex_home_from_env;
use dirs::home_dir;
use pretty_assertions::assert_eq;
use std::fs;
use std::io::ErrorKind;
use tempfile::TempDir;
#[test]
fn find_codex_home_env_missing_path_is_fatal() {
let temp_home = TempDir::new().expect("temp home");
let missing = temp_home.path().join("missing-codex-home");
let missing_str = missing
.to_str()
.expect("missing codex home path should be valid utf-8");
let err = find_codex_home_from_env(Some(missing_str)).expect_err("missing CODEX_HOME");
assert_eq!(err.kind(), ErrorKind::NotFound);
assert!(
err.to_string().contains("CODEX_HOME"),
"unexpected error: {err}"
);
}
#[test]
fn find_codex_home_env_file_path_is_fatal() {
let temp_home = TempDir::new().expect("temp home");
let file_path = temp_home.path().join("codex-home.txt");
fs::write(&file_path, "not a directory").expect("write temp file");
let file_str = file_path
.to_str()
.expect("file codex home path should be valid utf-8");
let err = find_codex_home_from_env(Some(file_str)).expect_err("file CODEX_HOME");
assert_eq!(err.kind(), ErrorKind::InvalidInput);
assert!(
err.to_string().contains("not a directory"),
"unexpected error: {err}"
);
}
#[test]
fn find_codex_home_env_valid_directory_canonicalizes() {
let temp_home = TempDir::new().expect("temp home");
let temp_str = temp_home
.path()
.to_str()
.expect("temp codex home path should be valid utf-8");
let resolved = find_codex_home_from_env(Some(temp_str)).expect("valid CODEX_HOME");
let expected = temp_home
.path()
.canonicalize()
.expect("canonicalize temp home");
assert_eq!(resolved, expected);
}
#[test]
fn find_codex_home_without_env_uses_default_home_dir() {
let resolved = find_codex_home_from_env(None).expect("default CODEX_HOME");
let mut expected = home_dir().expect("home dir");
expected.push(".codex");
assert_eq!(resolved, expected);
}
}