Merge branch 'main' into var-expansion

This commit is contained in:
brianm-openai
2026-02-02 16:44:47 -08:00
committed by GitHub
735 changed files with 100049 additions and 1700 deletions

View File

@@ -26,6 +26,7 @@ use crate::config_loader::LoaderOverrides;
use crate::config_loader::McpServerIdentity;
use crate::config_loader::McpServerRequirement;
use crate::config_loader::RealEnv;
use crate::config_loader::ResidencyRequirement;
use crate::config_loader::Sourced;
use crate::config_loader::expand_key_for_matching_with_env;
use crate::config_loader::load_config_layers_state;
@@ -60,7 +61,6 @@ use codex_protocol::openai_models::ReasoningEffort;
use codex_rmcp_client::OAuthCredentialsStoreMode;
use codex_utils_absolute_path::AbsolutePathBuf;
use codex_utils_absolute_path::AbsolutePathBufGuard;
use dirs::home_dir;
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
@@ -136,13 +136,18 @@ pub struct Config {
pub model_provider: ModelProviderInfo,
/// Optionally specify the personality of the model
pub model_personality: Option<Personality>,
pub personality: Option<Personality>,
/// Approval policy for executing commands.
pub approval_policy: Constrained<AskForApproval>,
pub sandbox_policy: Constrained<SandboxPolicy>,
/// enforce_residency means web traffic cannot be routed outside of a
/// particular geography. HTTP clients should direct their requests
/// using backend-specific headers or URLs to enforce this.
pub enforce_residency: Constrained<Option<ResidencyRequirement>>,
/// True if the user passed in an override or set a value in config.toml
/// for either of approval_policy or sandbox_mode.
pub did_user_set_custom_approval_policy_or_sandbox_mode: bool,
@@ -369,7 +374,7 @@ pub struct ConfigBuilder {
cli_overrides: Option<Vec<(String, TomlValue)>>,
harness_overrides: Option<ConfigOverrides>,
loader_overrides: Option<LoaderOverrides>,
cloud_requirements: Option<CloudRequirementsLoader>,
cloud_requirements: CloudRequirementsLoader,
fallback_cwd: Option<PathBuf>,
}
@@ -395,7 +400,7 @@ impl ConfigBuilder {
}
pub fn cloud_requirements(mut self, cloud_requirements: CloudRequirementsLoader) -> Self {
self.cloud_requirements = Some(cloud_requirements);
self.cloud_requirements = cloud_requirements;
self
}
@@ -526,7 +531,7 @@ pub async fn load_config_as_toml_with_cli_overrides(
Some(cwd.clone()),
&cli_overrides,
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -630,7 +635,7 @@ pub async fn load_global_mcp_servers(
cwd,
&cli_overrides,
LoaderOverrides::default(),
None,
CloudRequirementsLoader::default(),
)
.await?;
let merged_toml = config_layer_stack.effective_config();
@@ -1025,9 +1030,8 @@ pub struct ConfigToml {
/// Override to force-enable reasoning summaries for the configured model.
pub model_supports_reasoning_summaries: Option<bool>,
/// EXPERIMENTAL
/// Optionally specify a personality for the model
pub model_personality: Option<Personality>,
pub personality: Option<Personality>,
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
pub chatgpt_base_url: Option<String>,
@@ -1324,7 +1328,7 @@ pub struct ConfigOverrides {
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub model_personality: Option<Personality>,
pub personality: Option<Personality>,
pub compact_prompt: Option<String>,
pub include_apply_patch_tool: Option<bool>,
pub show_raw_agent_reasoning: Option<bool>,
@@ -1431,7 +1435,7 @@ impl Config {
codex_linux_sandbox_exe,
base_instructions,
developer_instructions,
model_personality,
personality,
compact_prompt,
include_apply_patch_tool: include_apply_patch_tool_override,
show_raw_agent_reasoning,
@@ -1628,9 +1632,14 @@ impl Config {
Self::try_read_non_empty_file(model_instructions_path, "model instructions file")?;
let base_instructions = base_instructions.or(file_base_instructions);
let developer_instructions = developer_instructions.or(cfg.developer_instructions);
let model_personality = model_personality
.or(config_profile.model_personality)
.or(cfg.model_personality);
let personality = personality
.or(config_profile.personality)
.or(cfg.personality)
.or_else(|| {
features
.enabled(Feature::Personality)
.then_some(Personality::Friendly)
});
let experimental_compact_prompt_path = config_profile
.experimental_compact_prompt_file
@@ -1653,6 +1662,7 @@ impl Config {
sandbox_policy: mut constrained_sandbox_policy,
mcp_servers,
exec_policy: _,
enforce_residency,
} = requirements;
constrained_approval_policy
@@ -1675,13 +1685,14 @@ impl Config {
cwd: resolved_cwd,
approval_policy: constrained_approval_policy,
sandbox_policy: constrained_sandbox_policy,
enforce_residency,
did_user_set_custom_approval_policy_or_sandbox_mode,
forced_auto_mode_downgraded_on_windows,
shell_environment_policy,
notify: cfg.notify,
user_instructions,
base_instructions,
model_personality,
personality,
developer_instructions,
compact_prompt,
// The config.toml omits "_mode" because it's a config file. However, "_mode"
@@ -1889,27 +1900,12 @@ fn toml_uses_deprecated_instructions_file(value: &TomlValue) -> bool {
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
///
/// - If `CODEX_HOME` is set, the value will be canonicalized and this
/// function will Err if the path does not exist.
/// - If `CODEX_HOME` is set, the value must exist and be a directory. The
/// value will be canonicalized and this function will Err otherwise.
/// - If `CODEX_HOME` is not set, this function does not verify that the
/// directory exists.
pub fn find_codex_home() -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
if let Ok(val) = std::env::var("CODEX_HOME")
&& !val.is_empty()
{
return PathBuf::from(val).canonicalize();
}
let mut p = home_dir().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"Could not find home directory",
)
})?;
p.push(".codex");
Ok(p)
codex_utils_home_dir::find_codex_home()
}
/// Returns the path to the folder where Codex logs are stored. Does not verify
@@ -2780,9 +2776,14 @@ profile = "project"
};
let cwd = AbsolutePathBuf::try_from(codex_home.path())?;
let config_layer_stack =
load_config_layers_state(codex_home.path(), Some(cwd), &Vec::new(), overrides, None)
.await?;
let config_layer_stack = load_config_layers_state(
codex_home.path(),
Some(cwd),
&Vec::new(),
overrides,
CloudRequirementsLoader::default(),
)
.await?;
let cfg = deserialize_config_toml_with_base(
config_layer_stack.effective_config(),
codex_home.path(),
@@ -2909,7 +2910,7 @@ profile = "project"
Some(cwd),
&[("model".to_string(), TomlValue::String("cli".to_string()))],
overrides,
None,
CloudRequirementsLoader::default(),
)
.await?;
@@ -3919,6 +3920,7 @@ model_verbosity = "high"
model_provider: fixture.openai_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::Never),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -3946,7 +3948,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_verbosity: None,
model_personality: None,
personality: Some(Personality::Friendly),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
@@ -4003,6 +4005,7 @@ model_verbosity = "high"
model_provider: fixture.openai_chat_completions_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::UnlessTrusted),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -4030,7 +4033,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_verbosity: None,
model_personality: None,
personality: Some(Personality::Friendly),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
@@ -4102,6 +4105,7 @@ model_verbosity = "high"
model_provider: fixture.openai_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -4129,7 +4133,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_verbosity: None,
model_personality: None,
personality: Some(Personality::Friendly),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
@@ -4187,6 +4191,7 @@ model_verbosity = "high"
model_provider: fixture.openai_provider.clone(),
approval_policy: Constrained::allow_any(AskForApproval::OnFailure),
sandbox_policy: Constrained::allow_any(SandboxPolicy::new_read_only_policy()),
enforce_residency: Constrained::allow_any(None),
did_user_set_custom_approval_policy_or_sandbox_mode: true,
forced_auto_mode_downgraded_on_windows: false,
shell_environment_policy: ShellEnvironmentPolicy::default(),
@@ -4214,7 +4219,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_verbosity: Some(Verbosity::High),
model_personality: None,
personality: Some(Personality::Friendly),
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,