Compare commits

...

4 Commits

Author SHA1 Message Date
Dylan Hurd
d5d0f265a0 rebase 2026-03-02 11:50:05 -07:00
Dylan Hurd
4c163c482d feat(config) Plan mode instructions 2026-03-02 11:50:05 -07:00
Eric Traut
7709bf32a3 Fix project trust config parsing so CLI overrides work (#13090)
Fixes #13076

This PR fixes a bug that causes command-line config overrides for MCP
subtables to not be merged correctly.

Summary
- make project trust loading go through the dedicated struct so CLI
overrides can update trusted project-local MCP transports

---------

Co-authored-by: jif-oai <jif@openai.com>
2026-03-02 11:10:38 -07:00
Michael Bolin
3241c1c6cc fix: use https://git.savannah.gnu.org/git/bash instead of https://github.com/bolinfest/bash (#13057)
Historically, we cloned the Bash repo from
https://github.com/bminor/bash, but for whatever reason, it was removed
at some point.

I had a local clone of it, so I pushed it to
https://github.com/bolinfest/bash so that we could continue running our
CI job. I did this in https://github.com/openai/codex/pull/9563, and as
you can see, I did not tamper with the commit hash we used as the basis
of this build.

Using a personal fork is not great, so this PR changes the CI job to use
what appears to be considered the source of truth for Bash, which is
https://git.savannah.gnu.org/git/bash.git.

Though in testing this out, it appears this Git server does not support
the combination of `git clone --depth 1
https://git.savannah.gnu.org/git/bash` and `git fetch --depth 1 origin
a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b`, as it fails with the
following error:

```
error: Server does not allow request for unadvertised object a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
```

so unfortunately this means that we have to do a full clone instead of a
shallow clone in our CI jobs, which will be a bit slower.

Also updated `codex-rs/shell-escalation/README.md` to reflect this
change.
2026-03-02 09:09:54 -08:00
19 changed files with 170 additions and 14 deletions

View File

@@ -146,9 +146,8 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone https://git.savannah.gnu.org/git/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
./configure --without-bash-malloc
@@ -188,9 +187,8 @@ jobs:
shell: bash
run: |
set -euo pipefail
git clone --depth 1 https://github.com/bolinfest/bash /tmp/bash
git clone https://git.savannah.gnu.org/git/bash /tmp/bash
cd /tmp/bash
git fetch --depth 1 origin a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply "${GITHUB_WORKSPACE}/shell-tool-mcp/patches/bash-exec-wrapper.patch"
./configure --without-bash-malloc

View File

@@ -189,6 +189,7 @@ impl MessageProcessor {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
let cloud_requirements = Arc::new(RwLock::new(cloud_requirements));
let codex_message_processor = CodexMessageProcessor::new(CodexMessageProcessorArgs {

View File

@@ -499,6 +499,9 @@
"personality": {
"$ref": "#/definitions/Personality"
},
"plan_mode_developer_instructions": {
"type": "string"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},
@@ -2038,6 +2041,9 @@
],
"description": "Optionally specify a personality for the model"
},
"plan_mode_developer_instructions": {
"type": "string"
},
"plan_mode_reasoning_effort": {
"$ref": "#/definitions/ReasoningEffort"
},

View File

@@ -8130,6 +8130,7 @@ mod tests {
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let model = ModelsManager::get_model_offline_for_tests(config.model.as_deref());
let model_info =
@@ -8217,6 +8218,7 @@ mod tests {
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let agent_control = AgentControl::default();
let exec_policy = ExecPolicyManager::default();
@@ -8385,6 +8387,7 @@ mod tests {
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let agent_control = AgentControl::default();
let exec_policy = ExecPolicyManager::default();

View File

@@ -413,6 +413,10 @@ pub struct Config {
/// Plan preset. The `none` value means "no reasoning" (not "inherit the
/// global default").
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
/// Optional Plan-mode-specific developer instructions override.
///
/// When unset, Plan mode uses the built-in Plan preset instructions.
pub plan_mode_developer_instructions: Option<String>,
/// Optional value to use for `reasoning.summary` when making a request
/// using the Responses API. When unset, the model catalog default is used.
@@ -1166,6 +1170,7 @@ pub struct ConfigToml {
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_developer_instructions: Option<String>,
pub model_reasoning_summary: Option<ReasoningSummary>,
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
pub model_verbosity: Option<Verbosity>,
@@ -2165,6 +2170,9 @@ impl Config {
plan_mode_reasoning_effort: config_profile
.plan_mode_reasoning_effort
.or(cfg.plan_mode_reasoning_effort),
plan_mode_developer_instructions: config_profile
.plan_mode_developer_instructions
.or(cfg.plan_mode_developer_instructions),
model_reasoning_summary: config_profile
.model_reasoning_summary
.or(cfg.model_reasoning_summary),
@@ -4917,6 +4925,7 @@ model_verbosity = "high"
model_reasoning_effort: Some(ReasoningEffort::High),
plan_mode_reasoning_effort: None,
model_reasoning_summary: Some(ReasoningSummary::Detailed),
plan_mode_developer_instructions: None,
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: None,
@@ -5045,6 +5054,7 @@ model_verbosity = "high"
model_reasoning_effort: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: None,
plan_mode_developer_instructions: None,
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: None,
@@ -5171,6 +5181,7 @@ model_verbosity = "high"
model_reasoning_effort: None,
plan_mode_reasoning_effort: None,
model_reasoning_summary: None,
plan_mode_developer_instructions: None,
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: None,
@@ -5283,6 +5294,7 @@ model_verbosity = "high"
model_reasoning_effort: Some(ReasoningEffort::High),
plan_mode_reasoning_effort: None,
model_reasoning_summary: Some(ReasoningSummary::Detailed),
plan_mode_developer_instructions: None,
model_supports_reasoning_summaries: None,
model_catalog: None,
model_verbosity: Some(Verbosity::High),

View File

@@ -25,6 +25,7 @@ pub struct ConfigProfile {
pub sandbox_mode: Option<SandboxMode>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
pub plan_mode_developer_instructions: Option<String>,
pub model_reasoning_summary: Option<ReasoningSummary>,
pub model_verbosity: Option<Verbosity>,
/// Optional path to a JSON model catalog (applied on startup only).

View File

@@ -6,7 +6,6 @@ mod macos;
mod tests;
use crate::config::ConfigToml;
use crate::config::deserialize_config_toml_with_base;
use crate::config_loader::layer_io::LoadedConfigLayers;
use crate::git_info::resolve_root_git_project_for_trust;
use codex_app_server_protocol::ConfigLayerSource;
@@ -576,6 +575,11 @@ struct ProjectTrustContext {
user_config_file: AbsolutePathBuf,
}
#[derive(Deserialize)]
struct ProjectTrustConfigToml {
projects: Option<std::collections::HashMap<String, crate::config::ProjectConfig>>,
}
struct ProjectTrustDecision {
trust_level: Option<TrustLevel>,
trust_key: String,
@@ -666,10 +670,16 @@ async fn project_trust_context(
config_base_dir: &Path,
user_config_file: &AbsolutePathBuf,
) -> io::Result<ProjectTrustContext> {
let config_toml = deserialize_config_toml_with_base(merged_config.clone(), config_base_dir)?;
let project_trust_config: ProjectTrustConfigToml = {
let _guard = AbsolutePathBufGuard::new(config_base_dir);
merged_config
.clone()
.try_into()
.map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?
};
let project_root = find_project_root(cwd, project_root_markers).await?;
let projects = config_toml.projects.unwrap_or_default();
let projects = project_trust_config.projects.unwrap_or_default();
let project_root_key = project_root.as_path().to_string_lossy().to_string();
let repo_root = resolve_root_git_project_for_trust(cwd.as_path());

View File

@@ -1114,6 +1114,91 @@ async fn project_layers_disabled_when_untrusted_or_unknown() -> std::io::Result<
Ok(())
}
#[tokio::test]
async fn cli_override_can_update_project_local_mcp_server_when_project_is_trusted()
-> std::io::Result<()> {
let tmp = tempdir()?;
let project_root = tmp.path().join("project");
let nested = project_root.join("child");
let dot_codex = project_root.join(".codex");
let codex_home = tmp.path().join("home");
tokio::fs::create_dir_all(&nested).await?;
tokio::fs::create_dir_all(&dot_codex).await?;
tokio::fs::create_dir_all(&codex_home).await?;
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
tokio::fs::write(
dot_codex.join(CONFIG_TOML_FILE),
r#"
[mcp_servers.sentry]
url = "https://mcp.sentry.dev/mcp"
enabled = false
"#,
)
.await?;
make_config_for_test(&codex_home, &project_root, TrustLevel::Trusted, None).await?;
let config = ConfigBuilder::default()
.codex_home(codex_home)
.cli_overrides(vec![(
"mcp_servers.sentry.enabled".to_string(),
TomlValue::Boolean(true),
)])
.fallback_cwd(Some(nested))
.build()
.await?;
let server = config
.mcp_servers
.get()
.get("sentry")
.expect("trusted project MCP server should load");
assert!(server.enabled);
Ok(())
}
#[tokio::test]
async fn cli_override_for_disabled_project_local_mcp_server_returns_invalid_transport()
-> std::io::Result<()> {
let tmp = tempdir()?;
let project_root = tmp.path().join("project");
let nested = project_root.join("child");
let dot_codex = project_root.join(".codex");
let codex_home = tmp.path().join("home");
tokio::fs::create_dir_all(&nested).await?;
tokio::fs::create_dir_all(&dot_codex).await?;
tokio::fs::create_dir_all(&codex_home).await?;
tokio::fs::write(project_root.join(".git"), "gitdir: here").await?;
tokio::fs::write(
dot_codex.join(CONFIG_TOML_FILE),
r#"
[mcp_servers.sentry]
url = "https://mcp.sentry.dev/mcp"
enabled = false
"#,
)
.await?;
let err = ConfigBuilder::default()
.codex_home(codex_home)
.cli_overrides(vec![(
"mcp_servers.sentry.enabled".to_string(),
TomlValue::Boolean(true),
)])
.fallback_cwd(Some(nested))
.build()
.await
.expect_err("untrusted project layer should not provide MCP transport");
assert!(
err.to_string().contains("invalid transport")
&& err.to_string().contains("mcp_servers.sentry"),
"unexpected error: {err}"
);
Ok(())
}
#[tokio::test]
async fn invalid_project_config_ignored_when_untrusted_or_unknown() -> std::io::Result<()> {
let tmp = tempdir()?;

View File

@@ -23,17 +23,25 @@ pub struct CollaborationModesConfig {
pub(crate) fn builtin_collaboration_mode_presets(
collaboration_modes_config: CollaborationModesConfig,
plan_mode_developer_instructions: Option<&str>,
) -> Vec<CollaborationModeMask> {
vec![plan_preset(), default_preset(collaboration_modes_config)]
vec![
plan_preset(plan_mode_developer_instructions),
default_preset(collaboration_modes_config),
]
}
fn plan_preset() -> CollaborationModeMask {
fn plan_preset(plan_mode_developer_instructions: Option<&str>) -> CollaborationModeMask {
CollaborationModeMask {
name: ModeKind::Plan.display_name().to_string(),
mode: Some(ModeKind::Plan),
model: None,
reasoning_effort: Some(Some(ReasoningEffort::Medium)),
developer_instructions: Some(Some(COLLABORATION_MODE_PLAN.to_string())),
developer_instructions: Some(Some(
plan_mode_developer_instructions
.unwrap_or(COLLABORATION_MODE_PLAN)
.to_string(),
)),
}
}
@@ -109,17 +117,27 @@ mod tests {
#[test]
fn preset_names_use_mode_display_names() {
assert_eq!(plan_preset().name, ModeKind::Plan.display_name());
assert_eq!(plan_preset(None).name, ModeKind::Plan.display_name());
assert_eq!(
default_preset(CollaborationModesConfig::default()).name,
ModeKind::Default.display_name()
);
assert_eq!(
plan_preset().reasoning_effort,
plan_preset(None).reasoning_effort,
Some(Some(ReasoningEffort::Medium))
);
}
#[test]
fn plan_preset_uses_configured_developer_instructions_override() {
let override_instructions = "Use plan override.";
let plan = plan_preset(Some(override_instructions));
assert_eq!(
plan.developer_instructions,
Some(Some(override_instructions.to_string()))
);
}
#[test]
fn default_mode_instructions_replace_mode_names_placeholder() {
let default_instructions = default_preset(CollaborationModesConfig {

View File

@@ -61,6 +61,7 @@ pub struct ModelsManager {
etag: RwLock<Option<String>>,
cache_manager: ModelsCacheManager,
provider: ModelProviderInfo,
plan_mode_developer_instructions: Option<String>,
}
impl ModelsManager {
@@ -74,6 +75,7 @@ impl ModelsManager {
auth_manager: Arc<AuthManager>,
model_catalog: Option<ModelsResponse>,
collaboration_modes_config: CollaborationModesConfig,
plan_mode_developer_instructions: Option<String>,
) -> Self {
let cache_path = codex_home.join(MODEL_CACHE_FILE);
let cache_manager = ModelsCacheManager::new(cache_path, DEFAULT_MODEL_CACHE_TTL);
@@ -96,6 +98,7 @@ impl ModelsManager {
etag: RwLock::new(None),
cache_manager,
provider: ModelProviderInfo::create_openai_provider(),
plan_mode_developer_instructions,
}
}
@@ -121,7 +124,10 @@ impl ModelsManager {
&self,
collaboration_modes_config: CollaborationModesConfig,
) -> Vec<CollaborationModeMask> {
builtin_collaboration_mode_presets(collaboration_modes_config)
builtin_collaboration_mode_presets(
collaboration_modes_config,
self.plan_mode_developer_instructions.as_deref(),
)
}
/// Attempt to list models without blocking, using the current cached state.
@@ -394,6 +400,7 @@ impl ModelsManager {
etag: RwLock::new(None),
cache_manager,
provider,
plan_mode_developer_instructions: None,
}
}
@@ -521,6 +528,7 @@ mod tests {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let known_slug = manager
.get_remote_models()
@@ -559,6 +567,7 @@ mod tests {
models: vec![remote_model("gpt-overlay", "Overlay", 0)],
}),
CollaborationModesConfig::default(),
None,
);
let model_info = manager
@@ -587,6 +596,7 @@ mod tests {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let known_slug = manager
.get_remote_models()
@@ -618,6 +628,7 @@ mod tests {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let known_slug = manager
.get_remote_models()

View File

@@ -87,5 +87,6 @@ pub fn all_model_presets() -> &'static Vec<ModelPreset> {
pub fn builtin_collaboration_mode_presets() -> Vec<CollaborationModeMask> {
collaboration_mode_presets::builtin_collaboration_mode_presets(
collaboration_mode_presets::CollaborationModesConfig::default(),
None,
)
}

View File

@@ -150,6 +150,7 @@ impl ThreadManager {
session_source: SessionSource,
model_catalog: Option<ModelsResponse>,
collaboration_modes_config: CollaborationModesConfig,
plan_mode_developer_instructions: Option<String>,
) -> Self {
let (thread_created_tx, _) = broadcast::channel(THREAD_CREATED_CHANNEL_CAPACITY);
let plugins_manager = Arc::new(PluginsManager::new(codex_home.clone()));
@@ -168,6 +169,7 @@ impl ThreadManager {
auth_manager.clone(),
model_catalog,
collaboration_modes_config,
plan_mode_developer_instructions,
)),
skills_manager,
plugins_manager,

View File

@@ -702,6 +702,7 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
.features
.enabled(Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
);
let NewThread { thread: codex, .. } = thread_manager
.start_thread(config)

View File

@@ -18,6 +18,7 @@ async fn offline_model_info_without_tool_output_override() {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let model_info = manager.get_model_info("gpt-5.1", &config).await;
@@ -41,6 +42,7 @@ async fn offline_model_info_with_tool_output_override() {
auth_manager,
None,
CollaborationModesConfig::default(),
None,
);
let model_info = manager.get_model_info("gpt-5.1-codex", &config).await;

View File

@@ -410,6 +410,7 @@ pub async fn run_main(cli: Cli, arg0_paths: Arg0DispatchPaths) -> anyhow::Result
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
let default_model = thread_manager
.get_models_manager()

View File

@@ -68,6 +68,7 @@ impl MessageProcessor {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
Self {
outgoing,

View File

@@ -20,7 +20,7 @@ decision to the shell-escalation protocol over a shared file descriptor (specifi
We carry a small patch to `execute_cmd.c` (see `patches/bash-exec-wrapper.patch`) that adds support for `EXEC_WRAPPER`. The original commit message is “add support for BASH_EXEC_WRAPPER” and the patch applies cleanly to `a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b` from https://github.com/bminor/bash. To rebuild manually:
```bash
git clone https://github.com/bminor/bash
git clone https://git.savannah.gnu.org/git/bash
git checkout a8a1c2fac029404d3f42cd39f5a20f24b6e4fe4b
git apply /path/to/patches/bash-exec-wrapper.patch
./configure --without-bash-malloc

View File

@@ -1521,6 +1521,7 @@ impl App {
.features
.enabled(codex_core::features::Feature::DefaultModeRequestUserInput),
},
config.plan_mode_developer_instructions.clone(),
));
let mut model = thread_manager
.get_models_manager()

View File

@@ -1645,6 +1645,7 @@ async fn make_chatwidget_manual(
auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
let reasoning_effort = None;
let base_mode = CollaborationMode {
@@ -1773,6 +1774,7 @@ fn set_chatgpt_auth(chat: &mut ChatWidget) {
chat.auth_manager.clone(),
None,
CollaborationModesConfig::default(),
None,
));
}