Compare commits

...

4 Commits

Author SHA1 Message Date
jif-oai
7971a90ec0 Merge branch 'main' into jif/profile-agents 2026-03-20 17:40:34 +00:00
jif-oai
8ba14dbc8e Merge branch 'main' into jif/profile-agents 2026-03-20 14:42:27 +00:00
jif-oai
481f765d71 nit fix 2026-03-19 16:36:19 +00:00
jif-oai
7b985beced feat: drop profile support for multi-agents 2026-03-19 16:08:04 +00:00
5 changed files with 90 additions and 364 deletions

View File

@@ -2,9 +2,10 @@
//!
//! Roles are selected at spawn time and are loaded with the same config machinery as
//! `config.toml`. This module resolves built-in and user-defined role files, inserts the role as a
//! high-precedence layer, and preserves the caller's current profile/provider unless the role
//! explicitly takes ownership of model selection. It does not decide when to spawn a sub-agent or
//! which role to use; the multi-agent tool handler owns that orchestration.
//! high-precedence layer, and preserves the caller's current effective model/provider selection
//! unless the role explicitly takes ownership of those fields. Sub-agents do not support config
//! profiles. It does not decide when to spawn a sub-agent or which role to use; the multi-agent
//! tool handler owns that orchestration.
use crate::config::AgentRoleConfig;
use crate::config::Config;
@@ -30,11 +31,8 @@ const AGENT_TYPE_UNAVAILABLE_ERROR: &str = "agent type is currently not availabl
/// Applies a named role layer to `config` while preserving caller-owned model selection.
///
/// The role layer is inserted at session-flag precedence so it can override persisted config, but
/// the caller's current `profile` and `model_provider` remain sticky runtime choices unless the
/// role explicitly sets `profile`, explicitly sets `model_provider`, or rewrites the active
/// profile's `model_provider` in place. Rebuilding the config without those overrides would make a
/// spawned agent silently fall back to the default provider, which is the bug this preservation
/// logic avoids.
/// the caller's current effective `model`, `model_reasoning_effort`, and `model_provider` remain
/// sticky runtime choices unless the role explicitly sets those fields.
pub(crate) async fn apply_role_to_config(
config: &mut Config,
role_name: Option<&str>,
@@ -63,15 +61,8 @@ async fn apply_role_to_config_inner(
return Ok(());
};
let role_layer_toml = load_role_layer_toml(config, config_file, is_built_in, role_name).await?;
let (preserve_current_profile, preserve_current_provider) =
preservation_policy(config, &role_layer_toml);
*config = reload::build_next_config(
config,
role_layer_toml,
preserve_current_profile,
preserve_current_provider,
)?;
let reload_overrides = reload::build_reload_overrides(config, &role_layer_toml);
*config = reload::build_next_config(config, role_layer_toml, reload_overrides)?;
Ok(())
}
@@ -103,12 +94,22 @@ async fn load_role_layer_toml(
};
deserialize_config_toml_with_base(role_config_toml.clone(), role_config_base)?;
ensure_role_does_not_use_profiles(&role_config_toml)?;
Ok(resolve_relative_paths_in_config_toml(
role_config_toml,
role_config_base,
)?)
}
fn ensure_role_does_not_use_profiles(role_config_toml: &TomlValue) -> anyhow::Result<()> {
if role_config_toml.get("profile").is_some() || role_config_toml.get("profiles").is_some() {
return Err(anyhow!(
"sub-agent roles do not support `profile` or `[profiles.*]`"
));
}
Ok(())
}
pub(crate) fn resolve_role_config<'a>(
config: &'a Config,
role_name: &str,
@@ -119,69 +120,33 @@ pub(crate) fn resolve_role_config<'a>(
.or_else(|| built_in::configs().get(role_name))
}
fn preservation_policy(config: &Config, role_layer_toml: &TomlValue) -> (bool, bool) {
let role_selects_provider = role_layer_toml.get("model_provider").is_some();
let role_selects_profile = role_layer_toml.get("profile").is_some();
let role_updates_active_profile_provider = config
.active_profile
.as_ref()
.and_then(|active_profile| {
role_layer_toml
.get("profiles")
.and_then(TomlValue::as_table)
.and_then(|profiles| profiles.get(active_profile))
.and_then(TomlValue::as_table)
.map(|profile| profile.contains_key("model_provider"))
})
.unwrap_or(false);
let preserve_current_profile = !role_selects_provider && !role_selects_profile;
let preserve_current_provider =
preserve_current_profile && !role_updates_active_profile_provider;
(preserve_current_profile, preserve_current_provider)
}
mod reload {
use super::*;
pub(super) fn build_next_config(
config: &Config,
role_layer_toml: TomlValue,
preserve_current_profile: bool,
preserve_current_provider: bool,
reload_overrides: ConfigOverrides,
) -> anyhow::Result<Config> {
let active_profile_name = preserve_current_profile
.then_some(config.active_profile.as_deref())
.flatten();
let config_layer_stack =
build_config_layer_stack(config, &role_layer_toml, active_profile_name)?;
let config_layer_stack = build_config_layer_stack(config, &role_layer_toml)?;
let mut merged_config = deserialize_effective_config(config, &config_layer_stack)?;
if preserve_current_profile {
merged_config.profile = None;
}
merged_config.profile = None;
let mut next_config = Config::load_config_with_layer_stack(
merged_config,
reload_overrides(config, preserve_current_provider),
reload_overrides,
config.codex_home.clone(),
config_layer_stack,
)?;
if preserve_current_profile {
next_config.active_profile = config.active_profile.clone();
}
next_config.active_profile = None;
Ok(next_config)
}
fn build_config_layer_stack(
config: &Config,
role_layer_toml: &TomlValue,
active_profile_name: Option<&str>,
) -> anyhow::Result<ConfigLayerStack> {
let mut layers = existing_layers(config);
if let Some(resolved_profile_layer) =
resolved_profile_layer(config, &layers, role_layer_toml, active_profile_name)?
{
insert_layer(&mut layers, resolved_profile_layer);
}
insert_layer(&mut layers, role_layer(role_layer_toml.clone()));
Ok(ConfigLayerStack::new(
layers,
@@ -190,34 +155,6 @@ mod reload {
)?)
}
fn resolved_profile_layer(
config: &Config,
existing_layers: &[ConfigLayerEntry],
role_layer_toml: &TomlValue,
active_profile_name: Option<&str>,
) -> anyhow::Result<Option<ConfigLayerEntry>> {
let Some(active_profile_name) = active_profile_name else {
return Ok(None);
};
let mut layers = existing_layers.to_vec();
insert_layer(&mut layers, role_layer(role_layer_toml.clone()));
let merged_config = deserialize_effective_config(
config,
&ConfigLayerStack::new(
layers,
config.config_layer_stack.requirements().clone(),
config.config_layer_stack.requirements_toml().clone(),
)?,
)?;
let resolved_profile =
merged_config.get_config_profile(Some(active_profile_name.to_string()))?;
Ok(Some(ConfigLayerEntry::new(
ConfigLayerSource::SessionFlags,
TomlValue::try_from(resolved_profile)?,
)))
}
fn deserialize_effective_config(
config: &Config,
config_layer_stack: &ConfigLayerStack,
@@ -250,9 +187,23 @@ mod reload {
ConfigLayerEntry::new(ConfigLayerSource::SessionFlags, role_layer_toml)
}
fn reload_overrides(config: &Config, preserve_current_provider: bool) -> ConfigOverrides {
pub(super) fn build_reload_overrides(
config: &Config,
role_layer_toml: &TomlValue,
) -> ConfigOverrides {
let preserve_current_provider = role_layer_toml.get("model_provider").is_none();
ConfigOverrides {
cwd: Some(config.cwd.clone()),
model: role_layer_toml
.get("model")
.is_none()
.then(|| config.model.clone())
.flatten(),
model_reasoning_effort: role_layer_toml
.get("model_reasoning_effort")
.is_none()
.then_some(config.model_reasoning_effort)
.flatten(),
model_provider: preserve_current_provider.then(|| config.model_provider_id.clone()),
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
main_execve_wrapper_exe: config.main_execve_wrapper_exe.clone(),

View File

@@ -1,11 +1,8 @@
use super::*;
use crate::config::CONFIG_TOML_FILE;
use crate::config::ConfigBuilder;
use crate::config_loader::ConfigLayerStackOrdering;
use crate::plugins::PluginsManager;
use crate::skills::SkillsManager;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::Verbosity;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
use std::fs;
@@ -194,33 +191,10 @@ async fn apply_role_preserves_unspecified_keys() {
}
#[tokio::test]
async fn apply_role_preserves_active_profile_and_model_provider() {
let home = TempDir::new().expect("create temp dir");
tokio::fs::write(
home.path().join(CONFIG_TOML_FILE),
r#"
[model_providers.test-provider]
name = "Test Provider"
base_url = "https://example.com/v1"
env_key = "TEST_PROVIDER_API_KEY"
wire_api = "responses"
[profiles.test-profile]
model_provider = "test-provider"
"#,
)
.await
.expect("write config.toml");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.harness_overrides(ConfigOverrides {
config_profile: Some("test-profile".to_string()),
..Default::default()
})
.fallback_cwd(Some(home.path().to_path_buf()))
.build()
.await
.expect("load config");
async fn apply_role_preserves_runtime_model_overrides() {
let (home, mut config) = test_config_with_cli_overrides(Vec::new()).await;
config.model = Some("runtime-model".to_string());
config.model_reasoning_effort = Some(ReasoningEffort::High);
let role_path = write_role_config(
&home,
"empty-role.toml",
@@ -240,251 +214,7 @@ model_provider = "test-provider"
.await
.expect("custom role should apply");
assert_eq!(config.active_profile.as_deref(), Some("test-profile"));
assert_eq!(config.model_provider_id, "test-provider");
assert_eq!(config.model_provider.name, "Test Provider");
}
#[tokio::test]
async fn apply_role_top_level_profile_settings_override_preserved_profile() {
let home = TempDir::new().expect("create temp dir");
tokio::fs::write(
home.path().join(CONFIG_TOML_FILE),
r#"
[profiles.base-profile]
model = "profile-model"
model_reasoning_effort = "low"
model_reasoning_summary = "concise"
model_verbosity = "low"
"#,
)
.await
.expect("write config.toml");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.harness_overrides(ConfigOverrides {
config_profile: Some("base-profile".to_string()),
..Default::default()
})
.fallback_cwd(Some(home.path().to_path_buf()))
.build()
.await
.expect("load config");
let role_path = write_role_config(
&home,
"top-level-profile-settings-role.toml",
r#"developer_instructions = "Stay focused"
model = "role-model"
model_reasoning_effort = "high"
model_reasoning_summary = "detailed"
model_verbosity = "high"
"#,
)
.await;
config.agent_roles.insert(
"custom".to_string(),
AgentRoleConfig {
description: None,
config_file: Some(role_path),
nickname_candidates: None,
},
);
apply_role_to_config(&mut config, Some("custom"))
.await
.expect("custom role should apply");
assert_eq!(config.active_profile.as_deref(), Some("base-profile"));
assert_eq!(config.model.as_deref(), Some("role-model"));
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High));
assert_eq!(
config.model_reasoning_summary,
Some(ReasoningSummary::Detailed)
);
assert_eq!(config.model_verbosity, Some(Verbosity::High));
}
#[tokio::test]
async fn apply_role_uses_role_profile_instead_of_current_profile() {
let home = TempDir::new().expect("create temp dir");
tokio::fs::write(
home.path().join(CONFIG_TOML_FILE),
r#"
[model_providers.base-provider]
name = "Base Provider"
base_url = "https://base.example.com/v1"
env_key = "BASE_PROVIDER_API_KEY"
wire_api = "responses"
[model_providers.role-provider]
name = "Role Provider"
base_url = "https://role.example.com/v1"
env_key = "ROLE_PROVIDER_API_KEY"
wire_api = "responses"
[profiles.base-profile]
model_provider = "base-provider"
[profiles.role-profile]
model_provider = "role-provider"
"#,
)
.await
.expect("write config.toml");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.harness_overrides(ConfigOverrides {
config_profile: Some("base-profile".to_string()),
..Default::default()
})
.fallback_cwd(Some(home.path().to_path_buf()))
.build()
.await
.expect("load config");
let role_path = write_role_config(
&home,
"profile-role.toml",
"developer_instructions = \"Stay focused\"\nprofile = \"role-profile\"",
)
.await;
config.agent_roles.insert(
"custom".to_string(),
AgentRoleConfig {
description: None,
config_file: Some(role_path),
nickname_candidates: None,
},
);
apply_role_to_config(&mut config, Some("custom"))
.await
.expect("custom role should apply");
assert_eq!(config.active_profile.as_deref(), Some("role-profile"));
assert_eq!(config.model_provider_id, "role-provider");
assert_eq!(config.model_provider.name, "Role Provider");
}
#[tokio::test]
async fn apply_role_uses_role_model_provider_instead_of_current_profile_provider() {
let home = TempDir::new().expect("create temp dir");
tokio::fs::write(
home.path().join(CONFIG_TOML_FILE),
r#"
[model_providers.base-provider]
name = "Base Provider"
base_url = "https://base.example.com/v1"
env_key = "BASE_PROVIDER_API_KEY"
wire_api = "responses"
[model_providers.role-provider]
name = "Role Provider"
base_url = "https://role.example.com/v1"
env_key = "ROLE_PROVIDER_API_KEY"
wire_api = "responses"
[profiles.base-profile]
model_provider = "base-provider"
"#,
)
.await
.expect("write config.toml");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.harness_overrides(ConfigOverrides {
config_profile: Some("base-profile".to_string()),
..Default::default()
})
.fallback_cwd(Some(home.path().to_path_buf()))
.build()
.await
.expect("load config");
let role_path = write_role_config(
&home,
"provider-role.toml",
"developer_instructions = \"Stay focused\"\nmodel_provider = \"role-provider\"",
)
.await;
config.agent_roles.insert(
"custom".to_string(),
AgentRoleConfig {
description: None,
config_file: Some(role_path),
nickname_candidates: None,
},
);
apply_role_to_config(&mut config, Some("custom"))
.await
.expect("custom role should apply");
assert_eq!(config.active_profile, None);
assert_eq!(config.model_provider_id, "role-provider");
assert_eq!(config.model_provider.name, "Role Provider");
}
#[tokio::test]
async fn apply_role_uses_active_profile_model_provider_update() {
let home = TempDir::new().expect("create temp dir");
tokio::fs::write(
home.path().join(CONFIG_TOML_FILE),
r#"
[model_providers.base-provider]
name = "Base Provider"
base_url = "https://base.example.com/v1"
env_key = "BASE_PROVIDER_API_KEY"
wire_api = "responses"
[model_providers.role-provider]
name = "Role Provider"
base_url = "https://role.example.com/v1"
env_key = "ROLE_PROVIDER_API_KEY"
wire_api = "responses"
[profiles.base-profile]
model_provider = "base-provider"
model_reasoning_effort = "low"
"#,
)
.await
.expect("write config.toml");
let mut config = ConfigBuilder::default()
.codex_home(home.path().to_path_buf())
.harness_overrides(ConfigOverrides {
config_profile: Some("base-profile".to_string()),
..Default::default()
})
.fallback_cwd(Some(home.path().to_path_buf()))
.build()
.await
.expect("load config");
let role_path = write_role_config(
&home,
"profile-edit-role.toml",
r#"developer_instructions = "Stay focused"
[profiles.base-profile]
model_provider = "role-provider"
model_reasoning_effort = "high"
"#,
)
.await;
config.agent_roles.insert(
"custom".to_string(),
AgentRoleConfig {
description: None,
config_file: Some(role_path),
nickname_candidates: None,
},
);
apply_role_to_config(&mut config, Some("custom"))
.await
.expect("custom role should apply");
assert_eq!(config.active_profile.as_deref(), Some("base-profile"));
assert_eq!(config.model_provider_id, "role-provider");
assert_eq!(config.model_provider.name, "Role Provider");
assert_eq!(config.model.as_deref(), Some("runtime-model"));
assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High));
}

View File

@@ -1945,6 +1945,7 @@ fn add_additional_file_system_writes(
#[derive(Default, Debug, Clone)]
pub struct ConfigOverrides {
pub model: Option<String>,
pub model_reasoning_effort: Option<ReasoningEffort>,
pub review_model: Option<String>,
pub cwd: Option<PathBuf>,
pub approval_policy: Option<AskForApproval>,
@@ -2143,6 +2144,7 @@ impl Config {
// Destructure ConfigOverrides fully to ensure all overrides are applied.
let ConfigOverrides {
model,
model_reasoning_effort: override_model_reasoning_effort,
review_model: override_review_model,
cwd,
approval_policy: approval_policy_override,
@@ -2759,9 +2761,9 @@ impl Config {
.or(show_raw_agent_reasoning)
.unwrap_or(false),
guardian_developer_instructions,
model_reasoning_effort: config_profile
model_reasoning_effort: override_model_reasoning_effort.or(config_profile
.model_reasoning_effort
.or(cfg.model_reasoning_effort),
.or(cfg.model_reasoning_effort)),
plan_mode_reasoning_effort: config_profile
.plan_mode_reasoning_effort
.or(cfg.plan_mode_reasoning_effort),

View File

@@ -22,6 +22,7 @@ use codex_protocol::models::ContentItem;
use codex_protocol::models::FunctionCallOutputBody;
use codex_protocol::models::ResponseInputItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::protocol::InitialHistory;
use codex_protocol::protocol::RolloutItem;
use pretty_assertions::assert_eq;
@@ -212,6 +213,47 @@ async fn spawn_agent_uses_explorer_role_and_preserves_approval_policy() {
assert_eq!(snapshot.model_provider_id, "ollama");
}
#[tokio::test]
async fn spawn_agent_explorer_role_preserves_requested_model_and_reasoning_settings() {
#[derive(Debug, Deserialize)]
struct SpawnAgentResult {
agent_id: String,
}
let (mut session, turn) = make_session_and_context().await;
let manager = thread_manager();
session.services.agent_control = manager.agent_control();
let invocation = invocation(
Arc::new(session),
Arc::new(turn),
"spawn_agent",
function_payload(json!({
"message": "inspect this repo",
"agent_type": "explorer",
"model": "gpt-5.2",
"reasoning_effort": "low",
})),
);
let output = SpawnAgentHandler
.handle(invocation)
.await
.expect("spawn_agent should succeed");
let (content, _) = expect_text_output(output);
let result: SpawnAgentResult =
serde_json::from_str(&content).expect("spawn_agent result should be json");
let agent_id = agent_id(&result.agent_id).expect("agent_id should be valid");
let snapshot = manager
.get_thread(agent_id)
.await
.expect("spawned agent thread should exist")
.config_snapshot()
.await;
assert_eq!(snapshot.model, "gpt-5.2");
assert_eq!(snapshot.reasoning_effort, Some(ReasoningEffort::Low));
}
#[tokio::test]
async fn spawn_agent_errors_when_manager_dropped() {
let (session, turn) = make_session_and_context().await;

View File

@@ -336,6 +336,7 @@ pub async fn run_main(cli: Cli, arg0_paths: Arg0DispatchPaths) -> anyhow::Result
// Load configuration and determine approval policy
let overrides = ConfigOverrides {
model,
model_reasoning_effort: None,
review_model: None,
config_profile,
// Default to never ask for approvals in headless mode. Feature flags can override.